repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
pasztorpisti/json-cfg
|
src/jsoncfg/parser_listener.py
|
Python
|
mit
| 5,570
| 0.005027
|
from kwonly_args import kwonly_defaults
from .parser import ParserListener
class ObjectBuilderParams(object):
# By creating a subclass of this class and overriding these
# attributes you can change the default values of __init__().
default_object_creator = None
default_array_creator = None
default_string_to_scalar_converter = None
@kwonly_defaults
def __init__(self, object_creator=None, array_creator=None, string_to_scalar_converter=None):
"""
:param object_creator: A callable with signature object_creator(listener) that has to
return a tuple: (json_object, insert_function). You can access line/column information
by accessing listener.parser.line and listener.parser.column.
The returned json_object will be used as a json object (dict) in the hierarchy returned by
this loads() function. The insert_function(key, value) will be used to insert items.
Besides this json_object has to support the 'in' operator (by implementing __contains__).
:param array_creator: A callable with signature array_creator(listener) that has to
return a tuple: (json_array, append_function).
The returned json_array will be used as a json array (list) in the hierarchy returned by
this loads() function. The append_function(item) will be used to add items.
:param string_to_scalar_converter: This is a callable with signature
string_to_scalar_converter(listener, scalar_str, scalar_str_quoted).
While parsing, this function receives every json value that is not an
object or an array. This includes quoted strings and all other non-quoted stuff
(like the null, True, False literals and numbers/strings). Note that scalar_str is always a
string and scalar_str_quoted is a boolean that indicates whether scalar_str was quoted or
not in the input json string. The parser interprets every scalar as a quoted or non-quoted
string.
If scalar_str_quoted is True then scalar_str contains the unescaped string. If
scalar_str_quoted is False then it may contain "null", "True" false or the string
representation of a
|
nything else (eg: a number: "1.564") and it's up to you how to interpret
it. You can define your own constant scalar literals if you want like interpreting
the unquoted "yes" and "no" literals as boolean values.
In case of conversion error you should call listener.error() with an error message and this
raises an exception with information about the error location, etc...
"""
def get_default(name):
# We use type(self).__dict__['X'] because these
|
class attributes are often simple
# functions and we don't want to convert them to instance methods by retrieving them
# with self.X statements.
return type(self).__dict__[name]
self.object_creator = object_creator or get_default('default_object_creator')
self.array_creator = array_creator or get_default('default_array_creator')
self.string_to_scalar_converter = string_to_scalar_converter or\
get_default('default_string_to_scalar_converter')
class ObjectBuilderParserListener(ParserListener):
""" A configurable parser listener implementation that can be configured to
build a json tree using the user supplied object/array factories and scalar converter. """
def __init__(self, params):
super(ObjectBuilderParserListener, self).__init__()
self.params = params
self._object_key = None
# The lambda function could actually be a None but that way we get a warning in
# self._new_value() that the insert_function isn't callable...
self._container_stack = [(None, None, lambda *args: None)]
self._result = None
@property
def result(self):
""" This property holds the parsed object or array after a successful parsing. """
return self._result
class ContainerType(object):
object = 0
array = 1
@property
def _state(self):
return self._container_stack[-1]
def _new_value(self, value):
container_type, _, insert_function = self._state
if container_type == self.ContainerType.object:
insert_function(self._object_key, value)
self._object_key = None
elif container_type == self.ContainerType.array:
insert_function(value)
def _pop_container_stack(self):
if len(self._container_stack) == 2:
self._result = self._container_stack[-1][1]
self._container_stack.pop()
def begin_object(self):
obj, insert_function = self.params.object_creator(self)
self._new_value(obj)
self._container_stack.append((self.ContainerType.object, obj, insert_function))
def end_object(self):
self._pop_container_stack()
def begin_object_item(self, key, key_quoted):
if key in self._state[1]:
self.error('Duplicate key: "%s"' % (key,))
self._object_key = key
def begin_array(self):
arr, append_function = self.params.array_creator(self)
self._new_value(arr)
self._container_stack.append((self.ContainerType.array, arr, append_function))
def end_array(self):
self._pop_container_stack()
def scalar(self, scalar_str, scalar_str_quoted):
value = self.params.string_to_scalar_converter(self.parser, scalar_str, scalar_str_quoted)
self._new_value(value)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/subnet_paged.py
|
Python
|
mit
| 922
| 0.001085
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes
|
may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class SubnetPaged(Paged):
"""
A paging container for iterating over a list of :class:`Subnet <azure.mgmt.network.v2017_06_01.models.Subnet>` object
"""
_attribute_map = {
|
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Subnet]'}
}
def __init__(self, *args, **kwargs):
super(SubnetPaged, self).__init__(*args, **kwargs)
|
charles-dyfis-net/koan
|
koan/xencreate.py
|
Python
|
gpl-2.0
| 5,855
| 0.008027
|
"""
Virtualization installation functions.
Currently somewhat Xen/paravirt specific, will evolve later.
Copyright 2006-2008 Red Hat, Inc.
Michael DeHaan <mdehaan@redhat.com>
Original version based on virtguest-install
Jeremy Katz <katzj@redhat.com>
Option handling added by Andrew Puch <apuch@redhat.com>
Simplified for use as library by koan, Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os, sys, time, stat
import tempfile
import random
import exceptions
import errno
import re
import virtinst
import app as koan
try:
import virtinst.DistroManager as DistroManager
except:
# older virtinst, this is probably ok
# but we know we can't do Xen fullvirt installs
pass
import traceback
def random_mac():
"""
from xend/server/netif.py
Generate a random MAC address.
Uses OUI 00-16-3E, allocated to
Xensource, Inc. Last 3 fields are random.
return: MAC address string
"""
mac = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
return ':'.join(map(lambda x: "%02x" % x, mac))
def start_install(name=None, ram=None, disks=None,
uuid=None,
extra=None,
vcpus=None,
profile_data=None, arch=None, no_gfx=False, fullvirt=False, bridge=None):
if profile_data.has_key("file"):
raise koan.InfoException("Xen does not work with --image yet")
if fullvirt:
# FIXME: add error handling here to explain when it's not supported
guest = virtinst.FullVirtGuest(installer=DistroManager.PXEInstaller())
else:
guest = virtinst.ParaVirtGuest()
extra = extra.replace("&","&")
if not fullvirt:
guest.set_boot((profile_data["kernel_local"], profile_data["initrd_local"]))
# fullvirt OS's will get this from the PXE config (managed by Cobbler)
guest.extraargs = extra
else:
print "- fullvirt mode"
if profile_data.has_key("breed"):
breed = profile_data["breed"]
if breed != "other" and breed != "":
if breed in [ "debian", "suse", "redhat" ]:
guest.set_os_type("linux")
elif breed in [ "windows" ]:
guest.set_os_type("windows")
else:
guest.set_os_type("unix")
if profile_data.has_key("os_version"):
# FIXME: when os_version is not defined and it's linux, do we use generic24/generic26 ?
version = profile_data["os_version"]
if version != "other" and version != "":
try:
guest.set_os_variant(version)
except:
print "- virtinst library does not understand variant %s, treating as generic" % version
pass
guest.set_name(name)
guest.set_memory(ram)
guest.set_vcpus(vcpus)
if not no_gfx:
guest.set_graphics("vnc")
else:
guest.set_graphics(False)
if uuid is not None:
guest.set_uuid(uuid)
for d in disks:
if d[1] != 0:
guest.disks.append(virtinst.XenDisk(d[0], size=d[1]))
counter = 0
if profile_data.has_key("interfaces"):
interfaces = profile_data["interfaces"].keys()
interfaces.sort()
counter = -1
for iname in interfaces:
counter = counter + 1
intf = profile_data["interfaces"][iname]
mac = intf["mac_address"]
if mac == "":
mac = random_mac()
if not bridge:
profile_bridge = profile_data["virt_bridge"]
|
intf_bridge = intf["virt_bridge"]
if intf_bridge == "":
|
if profile_bridge == "":
raise koan.InfoException("virt-bridge setting is not defined in cobbler")
intf_bridge = profile_bridge
else:
if bridge.find(",") == -1:
intf_bridge = bridge
else:
bridges = bridge.split(",")
intf_bridge = bridges[counter]
nic_obj = virtinst.XenNetworkInterface(macaddr=mac, bridge=intf_bridge)
guest.nics.append(nic_obj)
counter = counter + 1
else:
# for --profile you just get one NIC, go define a system if you want more.
# FIXME: can mac still be sent on command line in this case?
if bridge is None:
profile_bridge = profile_data["virt_bridge"]
else:
profile_bridge = bridge
if profile_bridge == "":
raise koan.InfoException("virt-bridge setting is not defined in cobbler")
nic_obj = virtinst.XenNetworkInterface(macaddr=random_mac(), bridge=profile_bridge)
guest.nics.append(nic_obj)
guest.start_install()
return "use virt-manager or reconnect with virsh console %s" % name
|
tobykurien/MakerDroid
|
assetsrc/public.mp3/skeinforge/skeinforge_utilities/skeinforge_craft.py
|
Python
|
gpl-3.0
| 7,636
| 0.043609
|
"""
Craft is a script to access the plugins which craft a gcode file.
The plugin buttons which are commonly used are bolded and the ones which are rarely used have normal font weight.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities import settings
from skeinforge.skeinforge_utilities import skeinforge_analyze
from skeinforge.skeinforge_utilities import skeinforge_polyfile
from skeinforge.skeinforge_utilities import skeinforge_profile
import os
import time
__author__ = "Enrique Perez (perez_enrique@yahoo.com)"
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
def getChainText( fileName, procedure ):
"Get a crafted shape
|
file."
text = gcodec.getFileText( fileName )
procedures = getProcedures( procedure, text )
return getChainTextFromProcedures( fileName, procedures, text )
def getChainTextFromProcedures( fileName, procedures, text ):
"Get a crafted sh
|
ape file from a list of procedures."
lastProcedureTime = time.time()
for procedure in procedures:
craftModule = getCraftModule( procedure )
if craftModule != None:
text = craftModule.getCraftedText( fileName, text )
if gcodec.isProcedureDone( text, procedure ):
print( '%s procedure took %s seconds.' % ( procedure.capitalize(), int( round( time.time() - lastProcedureTime ) ) ) )
Filehandle = open ('report.txt', 'a')
Filehandle.write ('%s procedure took %s seconds.' % ( procedure.capitalize(), int( round( time.time() - lastProcedureTime ) ) )+'\n')
Filehandle.close ()
lastProcedureTime = time.time()
return text
def getCraftModule( fileName ):
"Get craft module."
return gcodec.getModuleWithDirectoryPath( getPluginsDirectoryPath(), fileName )
def getLastModule():
"Get the last tool."
craftSequence = getReadCraftSequence()
if len( craftSequence ) < 1:
return None
return getCraftModule( craftSequence[ - 1 ] )
def getNewRepository():
"Get the repository constructor."
return CraftRepository()
def getPluginsDirectoryPath():
"Get the plugins directory path."
return gcodec.getAbsoluteFolderPath( os.path.dirname( __file__ ), os.path.join( 'skeinforge_tools', 'craft_plugins' ) )
def getPluginFileNames():
"Get craft plugin fileNames."
craftSequence = getReadCraftSequence()
craftSequence.sort()
return craftSequence
def getProcedures( procedure, text ):
"Get the procedures up to and including the given procedure."
craftSequence = getReadCraftSequence()
sequenceIndexPlusOneFromText = getSequenceIndexPlusOneFromText( text )
sequenceIndexFromProcedure = getSequenceIndexFromProcedure( procedure )
return craftSequence[ sequenceIndexPlusOneFromText : sequenceIndexFromProcedure + 1 ]
def getReadCraftSequence():
"Get profile sequence."
return skeinforge_profile.getCraftTypePluginModule().getCraftSequence()
def getSequenceIndexFromProcedure( procedure ):
"Get the profile sequence index of the procedure. Return None if the procedure is not in the sequence"
craftSequence = getReadCraftSequence()
if procedure not in craftSequence:
return 0
return craftSequence.index( procedure )
def getSequenceIndexPlusOneFromText( fileText ):
"Get the profile sequence index of the file plus one. Return zero if the procedure is not in the file"
craftSequence = getReadCraftSequence()
for craftSequenceIndex in xrange( len( craftSequence ) - 1, - 1, - 1 ):
procedure = craftSequence[ craftSequenceIndex ]
if gcodec.isProcedureDone( fileText, procedure ):
return craftSequenceIndex + 1
return 0
def writeChainTextWithNounMessage( fileName, procedure ):
"Get and write a crafted shape file."
print( '' )
print( 'The %s tool is parsing the file:' % procedure )
print( os.path.basename( fileName ) )
print( '' )
startTime = time.time()
suffixFileName = fileName[ : fileName.rfind( '.' ) ] + '_' + procedure + '.gcode'
craftText = getChainText( fileName, procedure )
if craftText == '':
return
gcodec.writeFileText( suffixFileName, craftText )
print( '' )
print( 'The %s tool has created the file:' % procedure )
print( suffixFileName )
print( '' )
print( 'It took %s seconds to craft the file.' % int( time.time() - startTime ) )
skeinforge_analyze.writeOutput( suffixFileName, craftText )
def writeOutput( fileName ):
"Craft a gcode file with the last module."
pluginModule = getLastModule()
if pluginModule != None:
pluginModule.writeOutput( fileName )
class CraftRadioButtonsSaveListener:
"A class to update the craft radio buttons."
def addToDialog( self, gridPosition ):
"Add this to the dialog."
euclidean.addElementToListTableIfNotThere( self, self.repository.repositoryDialog, settings.globalProfileSaveListenerListTable )
self.gridPosition = gridPosition.getCopy()
self.gridPosition.row = gridPosition.rowStart
self.gridPosition.increment()
self.setRadioButtons()
def getFromRadioPlugins( self, radioPlugins, repository ):
"Initialize."
self.name = 'CraftRadioButtonsSaveListener'
self.radioPlugins = radioPlugins
self.repository = repository
repository.displayEntities.append( self )
return self
def save( self ):
"Profile has been saved and craft radio plugins should be updated."
self.setRadioButtons()
def setRadioButtons( self ):
"Profile has been saved and craft radio plugins should be updated."
craftSequence = skeinforge_profile.getCraftTypePluginModule().getCraftSequence()
craftSequence.append( 'bfb' )
craftSequence.remove( 'home' )
craftSequence.remove( 'chamber' )
craftSequence.remove( 'lash' )
craftSequence.remove( 'oozebane' )
craftSequence.remove( 'splodge' )
craftSequence.remove( 'unpause' )
craftSequence.remove( 'wipe' )
gridPosition = self.gridPosition.getCopy()
maximumValue = False
activeRadioPlugins = []
for radioPlugin in self.radioPlugins:
if radioPlugin.name in craftSequence:
activeRadioPlugins.append( radioPlugin )
radioPlugin.incrementGridPosition( gridPosition )
maximumValue = max( radioPlugin.value, maximumValue )
else:
radioPlugin.radiobutton.grid_remove()
if not maximumValue:
selectedRadioPlugin = settings.getSelectedRadioPlugin( self.repository.importantFileNames + [ activeRadioPlugins[ 0 ].name ], activeRadioPlugins ).setSelect()
self.repository.pluginFrame.update()
class CraftRepository:
"A class to handle the craft settings."
def __init__( self ):
"Set the default settings, execute title & settings fileName."
settings.addListsToRepository( 'skeinforge.skeinforge_utilities.skeinforge_craft.html', '', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Craft', self, '' )
self.importantFileNames = ['bfb', 'carve', 'chop', 'feed', 'flow', 'lift', 'raft', 'speed' ]
allCraftNames = gcodec.getPluginFileNamesFromDirectoryPath( getPluginsDirectoryPath() )
radioPlugins = settings.getRadioPluginsAddPluginFrame( getPluginsDirectoryPath(), self.importantFileNames, allCraftNames, self )
CraftRadioButtonsSaveListener().getFromRadioPlugins( radioPlugins, self )
self.executeTitle = 'Craft'
def execute( self ):
"Craft button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode( self.fileNameInput.value, [], self.fileNameInput.wasCancelled )
for fileName in fileNames:
writeOutput( fileName )
|
CollabQ/CollabQ
|
administration/views.py
|
Python
|
apache-2.0
| 8,027
| 0.017815
|
# Copyright 2010 http://www.collabq.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
from django import http
from django import template
from django.conf import settings
from django.template import loader
from django.shortcuts import render_to_response
from administration import helper as admin_helper
from common import api
from common import clean
from common import decorator
from common import models
from common import exception
from common import util
from common import validate
from common import views as common_views
ITEMS_BY_PAGE = 20
@decorator.gae_admin_required
def install(request):
try:
root_user = api.actor_get(api.ROOT, settings.ROOT_NICK)
if root_user:
return util.RedirectFlash('/', 'Already Installed')
except:
root_user = None
post_name = util.get_metadata('POST_NAME')
default_channel = util.get_metadata('DEFAULT_CHANNEL')
if request.POST:
site_name = request.POST.get('site_name', None)
tagline = request.POST.get('tagline', None)
post_name = request.POST.get('post_name', None)
root_mail = request.POST.get('root_mail', None)
password = request.POST.get('password', None)
confirm = request.POST.get('confirm', None)
default_channel = request.POST.get('default_channel', None)
try:
logging.info('saving values')
validate.nonce(request, 'install')
validate.email(root_mail)
validate.password(password)
validate.password_and_confirm(password, confirm)
channel = clean.channel(default_channel)
admin_helper.validate_and_save_sitesettings(site_name, tagline, post_name)
root_user = api.user_create_root(api.ROOT, password=password)
api.email_associate(api.ROOT, root_user.nick, root_mail)
channel_ref = api.channel_create(api.ROOT, nick=api.ROOT.nick, channel=channel, tags=[],
type='', description='Support Channel')
util.set_metadata('DEFAULT_CHANNEL', default_channel)
logging.info('Installed and Redirecting to front')
return util.RedirectFlash('/', 'Installed Successfully')
except:
exception.handle_exception(request)
redirect_to = '/'
c = template.RequestContext(requ
|
est, locals())
return render_to_response('administration/templates/install.html', c)
@decorator.gae_admin_required
def admin(request):
page = 'admin'
group_menuitem =
|
'admin'
title = 'Administration'
c = template.RequestContext(request, locals())
return render_to_response('administration/templates/admin.html', c)
@decorator.gae_admin_required
def admin_site(request):
page = 'site'
title = 'Site Settings'
site_name = util.get_metadata('SITE_NAME')
tagline = util.get_metadata('TAGLINE')
post_name = util.get_metadata('POST_NAME')
if request.POST:
site_name = request.POST.get('site_name', None)
tagline = request.POST.get('tagline', None)
post_name = request.POST.get('post_name', None)
site_description = request.POST.get('site_description', None)
try:
validate.nonce(request, 'site')
admin_helper.validate_and_save_sitesettings(site_name, tagline, post_name, site_description)
except exception.ValidationError:
exception.handle_exception(request)
c = template.RequestContext(request, locals())
return render_to_response('administration/templates/site.html', c)
@decorator.gae_admin_required
def admin_channel(request):
page = 'channel'
title = 'Channels Settings'
enable_channels = util.get_metadata('ENABLE_CHANNELS')
enable_channel_types = util.get_metadata('ENABLE_CHANNEL_TYPES')
if request.POST:
enable_channels = request.POST.get('enable_channels', False)
enable_channel_types = request.POST.get('enable_channel_types', False)
try:
validate.nonce(request, 'admin_channel')
validate.bool_checkbox(enable_channels)
validate.bool_checkbox(enable_channel_types)
util.set_metadata('ENABLE_CHANNELS', str(enable_channels), 0, {'type':'bool'})
util.set_metadata('ENABLE_CHANNEL_TYPES', str(enable_channel_types), 0, {'type':'bool'})
except exception.ValidationError:
exception.handle_exception(request)
c = template.RequestContext(request, locals())
return render_to_response('administration/templates/channel.html', c)
@decorator.gae_admin_required
def admin_channel_list(request):
page = 'channel_list'
title = 'Channels'
page = util.paging_get_page(request)
offset = util.paging_get_offset(page, ITEMS_BY_PAGE)
filter = request.GET.get('filter', 'all')
#owner = api.actor_lookup_nick(request.user, util.get_owner(request))
new_link = '/admin/channels/new'
size, items = api.admin_get_channels(api.ROOT, ITEMS_BY_PAGE, offset, filter)
start, end, next, prev, first, last = util.paging(page, ITEMS_BY_PAGE, size)
base_url = '/admin/channels?'
if filter is not None:
filter_url = '&filter=%s' % filter
group_menuitem = 'channel'
menuitem = 'channel-list'
channel_types = api.get_config_values(api.ROOT, 'channel_type')
c = template.RequestContext(request, locals())
return render_to_response('administration/templates/channel_list.html', c)
@decorator.gae_admin_required
def admin_channel_new(request):
page = 'channel_new'
title = 'Create a Channel'
if request.method == 'POST':
params = {
'nick': api.ROOT.nick,
'channel': request.POST.get('channel'),
'description': request.POST.get('description', ''),
'type':request.POST.get('type'),
'tags': request.POST.getlist('tags[]'),
}
channel_ref = api.channel_create(api.ROOT, **params)
if channel_ref is not None:
logging.info('Channel created %s' % channel_ref)
return util.RedirectFlash('/admin/channels', "Channel created successfully")
group_menuitem = 'channel'
menuitem = 'channel-new'
channel_types = api.get_config_values(api.ROOT, 'channel_type')
c = template.RequestContext(request, locals())
return render_to_response('administration/templates/channel_new.html', c)
@decorator.gae_admin_required
def admin_channel_enable(request, nick):
logging.info("admin_channel_enable")
nick = clean.channel(nick)
channel = api.channel_get_safe(api.ROOT, nick)
channel.enabled = True
channel.put()
logging.info("Channel %s" % channel.nick)
logging.info("Is enabled? %s" % channel.is_enabled())
return util.RedirectFlash('/admin/channels', "Channel has been enabled successfully")
@decorator.gae_admin_required
def admin_channel_disable(request, nick):
logging.info("admin_channel_disable")
nick = clean.channel(nick)
channel = api.channel_get_safe(api.ROOT, nick)
channel.enabled = False
channel.put()
logging.info("Channel %s" % channel.nick)
logging.info("Is enabled? %s" % channel.is_enabled())
return util.RedirectFlash('/admin/channels', "Channel has been disabled successfully")
@decorator.gae_admin_required
def admin_auto(request, action):
page = util.paging_get_page(request)
offset = util.paging_get_offset(page, ITEMS_BY_PAGE)
next = str(int(page)+1)
redirect_url = 'admin/auto/%s?page=%s' % (action, next)
action = "administration.actions.%s" % action
__import__(action)
action_call = sys.modules[action]
redirect, output = action_call.process(page, ITEMS_BY_PAGE, offset)
c = template.RequestContext(request, locals())
t = loader.get_template('administration/templates/auto.html')
return http.HttpResponse(t.render(c))
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/Python/bin/2.78/scripts/addons/modules/extensions_framework/validate.py
|
Python
|
gpl-3.0
| 6,509
| 0.002612
|
# -*- coding: utf-8 -*-
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
# --------------------------------------------------------------------------
# Blender 2.5 Extensions Framework
# --------------------------------------------------------------------------
#
# Authors
|
:
# Doug Hammond
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more detail
|
s.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# ***** END GPL LICENCE BLOCK *****
#
"""
Pure logic and validation class.
By using a Subject object, and a dict of described logic tests, it
is possible to arrive at a True or False result for various purposes:
1. Data validation
2. UI control visibility
A Subject can be any object whose members are readable with getattr() :
class Subject(object):
a = 0
b = 1
c = 'foo'
d = True
e = False
f = 8
g = 'bar'
Tests are described thus:
Use the special list types Logic_AND and Logic_OR to describe
combinations of values and other members. Use Logic_Operator for
numerical comparison.
With regards to Subject, each of these evaluate to True:
TESTA = {
'a': 0,
'c': Logic_OR([ 'foo', 'bar' ]),
'd': Logic_AND([True, True]),
'f': Logic_AND([8, {'b': 1}]),
'e': {'b': Logic_Operator({'gte':1, 'lt':3}) },
'g': Logic_OR([ 'baz', Logic_AND([{'b': 1}, {'f': 8}]) ])
}
With regards to Subject, each of these evaluate to False:
TESTB = {
'a': 'foo',
'c': Logic_OR([ 'bar', 'baz' ]),
'd': Logic_AND([ True, 'foo' ]),
'f': Logic_AND([9, {'b': 1}]),
'e': {'b': Logic_Operator({'gte':-10, 'lt': 1}) },
'g': Logic_OR([ 'baz', Logic_AND([{'b':0}, {'f': 8}]) ])
}
With regards to Subject, this test is invalid
TESTC = {
'n': 0
}
Tests are executed thus:
S = Subject()
L = Logician(S)
L.execute(TESTA)
"""
class Logic_AND(list):
pass
class Logic_OR(list):
pass
class Logic_Operator(dict):
pass
class Logician(object):
"""Given a subject and a dict that describes tests to perform on
its members, this class will evaluate True or False results for
each member/test pair. See the examples below for test syntax.
"""
subject = None
def __init__(self, subject):
self.subject = subject
def get_member(self, member_name):
"""Get a member value from the subject object. Raise exception
if subject is None or member not found.
"""
if self.subject is None:
raise Exception('Cannot run tests on a subject which is None')
return getattr(self.subject, member_name)
def test_logic(self, member, logic, operator='eq'):
"""Find the type of test to run on member, and perform that test"""
if type(logic) is dict:
return self.test_dict(member, logic)
elif type(logic) is Logic_AND:
return self.test_and(member, logic)
elif type(logic) is Logic_OR:
return self.test_or(member, logic)
elif type(logic) is Logic_Operator:
return self.test_operator(member, logic)
else:
# compare the value, I think using Logic_Operator() here
# allows completeness in test_operator(), but I can't put
# my finger on why for the minute
return self.test_operator(member,
Logic_Operator({operator: logic}))
def test_operator(self, member, value):
"""Execute the operators contained within value and expect that
ALL operators are True
"""
# something in this method is incomplete, what if operand is
# a dict, Logic_AND, Logic_OR or another Logic_Operator ?
# Do those constructs even make any sense ?
result = True
for operator, operand in value.items():
operator = operator.lower().strip()
if operator in ['eq', '==']:
result &= member==operand
if operator in ['not', '!=']:
result &= member!=operand
if operator in ['lt', '<']:
result &= member<operand
if operator in ['lte', '<=']:
result &= member<=operand
if operator in ['gt', '>']:
result &= member>operand
if operator in ['gte', '>=']:
result &= member>=operand
if operator in ['and', '&']:
result &= member&operand
if operator in ['or', '|']:
result &= member|operand
if operator in ['len']:
result &= len(member)==operand
# I can think of some more, but they're probably not useful.
return result
def test_or(self, member, logic):
"""Member is a value, logic is a set of values, ANY of which
can be True
"""
result = False
for test in logic:
result |= self.test_logic(member, test)
return result
def test_and(self, member, logic):
"""Member is a value, logic is a list of values, ALL of which
must be True
"""
result = True
for test in logic:
result &= self.test_logic(member, test)
return result
def test_dict(self, member, logic):
"""Member is a value, logic is a dict of other members to
compare to. All other member tests must be True
"""
result = True
for other_member, test in logic.items():
result &= self.test_logic(self.get_member(other_member), test)
return result
def execute(self, test):
"""Subject is an object, test is a dict of {member: test} pairs
to perform on subject's members. Wach key in test is a member
of subject.
"""
for member_name, logic in test.items():
result = self.test_logic(self.get_member(member_name), logic)
print('member %s is %s' % (member_name, result))
# A couple of name aliases
class Validation(Logician):
pass
class Visibility(Logician):
pass
|
Acehaidrey/incubator-airflow
|
tests/providers/amazon/aws/operators/test_glue_crawler.py
|
Python
|
apache-2.0
| 3,458
| 0.001735
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from airflow.providers.amazon.aws.operators.glue_crawler import GlueCrawlerOperator
mock_crawler_name = 'test-crawler'
mock_role_name = 'test-role'
mock_config = {
'Name': mock_crawler_name,
'Description': 'Test glue crawler from Airflow',
'DatabaseName': 'test_db',
'Role': mock_role_name,
'Targets': {
'S3Targets': [
{
'Path': 's3://test-glue-crawler/foo/',
'Exclusions': [
's3://test-glue-crawler/bar/',
],
'ConnectionName': 'test-s3-conn',
}
],
'JdbcTargets':
|
[
{
'Connec
|
tionName': 'test-jdbc-conn',
'Path': 'test_db/test_table>',
'Exclusions': [
'string',
],
}
],
'MongoDBTargets': [
{'ConnectionName': 'test-mongo-conn', 'Path': 'test_db/test_collection', 'ScanAll': True}
],
'DynamoDBTargets': [{'Path': 'test_db/test_table', 'scanAll': True, 'scanRate': 123.0}],
'CatalogTargets': [
{
'DatabaseName': 'test_glue_db',
'Tables': [
'test',
],
}
],
},
'Classifiers': ['test-classifier'],
'TablePrefix': 'test',
'SchemaChangePolicy': {
'UpdateBehavior': 'UPDATE_IN_DATABASE',
'DeleteBehavior': 'DEPRECATE_IN_DATABASE',
},
'RecrawlPolicy': {'RecrawlBehavior': 'CRAWL_EVERYTHING'},
'LineageConfiguration': 'ENABLE',
'Configuration': """
{
"Version": 1.0,
"CrawlerOutput": {
"Partitions": { "AddOrUpdateBehavior": "InheritFromTable" }
}
}
""",
'SecurityConfiguration': 'test',
'Tags': {'test': 'foo'},
}
class TestGlueCrawlerOperator(unittest.TestCase):
def setUp(self):
self.glue = GlueCrawlerOperator(task_id='test_glue_crawler_operator', config=mock_config)
@mock.patch('airflow.providers.amazon.aws.operators.glue_crawler.GlueCrawlerHook')
def test_execute_without_failure(self, mock_hook):
mock_hook.return_value.has_crawler.return_value = True
self.glue.execute({})
mock_hook.assert_has_calls(
[
mock.call('aws_default'),
mock.call().has_crawler('test-crawler'),
mock.call().update_crawler(**mock_config),
mock.call().start_crawler(mock_crawler_name),
mock.call().wait_for_crawler_completion(crawler_name=mock_crawler_name, poll_interval=5),
]
)
|
Dwolla/arbalest
|
test/pipeline/test_s3_sorted_data_sources.py
|
Python
|
mit
| 5,292
| 0.000189
|
import json
import unittest
from boto.s3.key import Key
from mock import create_autospec, Mock, call
from arbalest.s3 import Bucket
from arbalest.pipeline import S3SortedDataSources
def mock_key(name):
return Key(Mock(), name)
class S3SortedDataSourcesShould(unittest.TestCase):
def setUp(self):
parents = ['event.entity.created/2014-11-03/',
'event.entity.created/2014-11-04/',
'event.entity.created/2014-11-05/',
'event.entity.created/2014-11-06/',
'event.entity.created/2014-11-07/']
first_children = ['event.entity.created/2014-11-04/00/',
'event.entity.created/2014-11-04/01/']
second_children = ['event.entity.created/2014-11-05/00/']
self.bucket = create_autospec(Bucket)
self.bucket.lis
|
t = Mock(
side_effect=[[mock_key(key) for key in parents],
[mock_key(key) for key in first_children],
[mock_key(key) for key in second_children]])
def test_have_source_journal_key(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket)
self.assertEq
|
ual('/event.entity.created_source_journal.json',
source.source_journal_key)
def test_get_all_dates_as_sources(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket)
self.assertEqual(['event.entity.created/2014-11-03',
'event.entity.created/2014-11-04',
'event.entity.created/2014-11-05',
'event.entity.created/2014-11-06',
'event.entity.created/2014-11-07'],
list(source.get()))
self.bucket.list.assert_called_once_with(source.source + '/', '/')
def test_get_all_dates_as_sources_with_empty_dates(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket,
'', '')
self.assertEqual(['event.entity.created/2014-11-03',
'event.entity.created/2014-11-04',
'event.entity.created/2014-11-05',
'event.entity.created/2014-11-06',
'event.entity.created/2014-11-07'],
list(source.get()))
self.bucket.list.assert_called_once_with(source.source + '/', '/')
def test_get_all_dates_including_and_after_start_date_as_sources(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket,
'2014-11-04')
self.assertEqual(['event.entity.created/2014-11-04',
'event.entity.created/2014-11-05',
'event.entity.created/2014-11-06',
'event.entity.created/2014-11-07'],
list(source.get()))
self.bucket.list.assert_called_once_with(source.source + '/', '/')
def test_get_all_dates_including_and_before_end_date_as_sources(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket,
end='2014-11-05')
self.assertEqual(['event.entity.created/2014-11-03',
'event.entity.created/2014-11-04',
'event.entity.created/2014-11-05'],
list(source.get()))
self.bucket.list.assert_called_once_with(source.source + '/', '/')
def test_get_all_dates_including_and_between_start_and_end_date_as_sources(
self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket,
start='2014-11-04/01',
end='2014-11-06')
self.assertEqual(['event.entity.created/2014-11-04/01',
'event.entity.created/2014-11-05/00',
'event.entity.created/2014-11-06'],
list(source.get()))
self.bucket.list.assert_has_calls(
[call(source.source + '/', '/'),
call('event.entity.created/2014-11-04/', '/'),
call('event.entity.created/2014-11-05/', '/'),
call('event.entity.created/2014-11-06/', '/')])
def test_committed(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket)
key = mock_key(source.source_journal_key)
key.exists = Mock(return_value=True)
source.bucket.get = Mock(return_value=key)
self.assertEqual(True, source.committed().exists())
def test_commit(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket)
committed_point = '2014-11-04'
source.commit(committed_point)
self.bucket.save.assert_called_once_with(source.source_journal_key,
json.dumps({
'committed': committed_point}))
def test_rollback(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket)
source.rollback()
self.bucket.delete.assert_called_once_with(source.source_journal_key)
|
dbrumley/recfi
|
llvm-3.3/utils/lit/lit/main.py
|
Python
|
mit
| 15,230
| 0.00499
|
#!/usr/bin/env python
"""
lit - LLVM Integrated Tester.
See lit.pod for more information.
"""
import math, os, platform, random, re, sys, time, threading, traceback
import ProgressBar
import TestRunner
import Util
import LitConfig
import Test
import lit.discovery
class TestingProgressDisplay:
def __init__(self, opts, numTests, progressBar=None):
self.opts = opts
self.numTests = numTests
self.current = None
self.lock = threading.Lock()
self.progressBar = progressBar
self.completed = 0
def update(self, test):
# Avoid locking overhead in quiet mode
if self.opts.quiet and not test.result.isFailure:
self.completed += 1
return
# Output lock.
self.lock.acquire()
try:
self.handleUpdate(test)
finally:
self.lock.release()
def finish(self):
if self.progressBar:
self.progressBar.clear()
elif self.opts.quiet:
pass
elif self.opts.succinct:
sys.stdout.write('\n')
|
def handleUpdate(self, test):
self.co
|
mpleted += 1
if self.progressBar:
self.progressBar.update(float(self.completed)/self.numTests,
test.getFullName())
if self.opts.succinct and not test.result.isFailure:
return
if self.progressBar:
self.progressBar.clear()
print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(),
self.completed, self.numTests)
if test.result.isFailure and self.opts.showOutput:
print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20)
print test.output
print "*" * 20
sys.stdout.flush()
class TestProvider:
def __init__(self, tests, maxTime):
self.maxTime = maxTime
self.iter = iter(tests)
self.lock = threading.Lock()
self.startTime = time.time()
def get(self):
# Check if we have run out of time.
if self.maxTime is not None:
if time.time() - self.startTime > self.maxTime:
return None
# Otherwise take the next test.
self.lock.acquire()
try:
item = self.iter.next()
except StopIteration:
item = None
self.lock.release()
return item
class Tester(threading.Thread):
def __init__(self, litConfig, provider, display):
threading.Thread.__init__(self)
self.litConfig = litConfig
self.provider = provider
self.display = display
def run(self):
while 1:
item = self.provider.get()
if item is None:
break
self.runTest(item)
def runTest(self, test):
result = None
startTime = time.time()
try:
result, output = test.config.test_format.execute(test,
self.litConfig)
except KeyboardInterrupt:
# This is a sad hack. Unfortunately subprocess goes
# bonkers with ctrl-c and we start forking merrily.
print '\nCtrl-C detected, goodbye.'
os.kill(0,9)
except:
if self.litConfig.debug:
raise
result = Test.UNRESOLVED
output = 'Exception during script execution:\n'
output += traceback.format_exc()
output += '\n'
elapsed = time.time() - startTime
test.setResult(result, output, elapsed)
self.display.update(test)
def runTests(numThreads, litConfig, provider, display):
# If only using one testing thread, don't use threads at all; this lets us
# profile, among other things.
if numThreads == 1:
t = Tester(litConfig, provider, display)
t.run()
return
# Otherwise spin up the testing threads and wait for them to finish.
testers = [Tester(litConfig, provider, display)
for i in range(numThreads)]
for t in testers:
t.start()
try:
for t in testers:
t.join()
except KeyboardInterrupt:
sys.exit(2)
def main(builtinParameters = {}):
# Bump the GIL check interval, its more important to get any one thread to a
# blocking operation (hopefully exec) than to try and unblock other threads.
#
# FIXME: This is a hack.
import sys
sys.setcheckinterval(1000)
global options
from optparse import OptionParser, OptionGroup
parser = OptionParser("usage: %prog [options] {file-or-path}")
parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
help="Number of testing threads",
type=int, action="store", default=None)
parser.add_option("", "--config-prefix", dest="configPrefix",
metavar="NAME", help="Prefix for 'lit' config files",
action="store", default=None)
parser.add_option("", "--param", dest="userParameters",
metavar="NAME=VAL",
help="Add 'NAME' = 'VAL' to the user defined parameters",
type=str, action="append", default=[])
group = OptionGroup(parser, "Output Format")
# FIXME: I find these names very confusing, although I like the
# functionality.
group.add_option("-q", "--quiet", dest="quiet",
help="Suppress no error output",
action="store_true", default=False)
group.add_option("-s", "--succinct", dest="succinct",
help="Reduce amount of output",
action="store_true", default=False)
group.add_option("-v", "--verbose", dest="showOutput",
help="Show all test output",
action="store_true", default=False)
group.add_option("", "--no-progress-bar", dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false", default=True)
parser.add_option_group(group)
group = OptionGroup(parser, "Test Execution")
group.add_option("", "--path", dest="path",
help="Additional paths to add to testing environment",
action="append", type=str, default=[])
group.add_option("", "--vg", dest="useValgrind",
help="Run tests under valgrind",
action="store_true", default=False)
group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
help="Check for memory leaks under valgrind",
action="store_true", default=False)
group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
help="Specify an extra argument for valgrind",
type=str, action="append", default=[])
group.add_option("", "--time-tests", dest="timeTests",
help="Track elapsed wall time for each test",
action="store_true", default=False)
group.add_option("", "--no-execute", dest="noExecute",
help="Don't execute any tests (assume PASS)",
action="store_true", default=False)
parser.add_option_group(group)
group = OptionGroup(parser, "Test Selection")
group.add_option("", "--max-tests", dest="maxTests", metavar="N",
help="Maximum number of tests to run",
action="store", type=int, default=None)
group.add_option("", "--max-time", dest="maxTime", metavar="N",
help="Maximum time to spend testing (in seconds)",
action="store", type=float, default=None)
group.add_option("", "--shuffle", dest="shuffle",
help="Run tests in random order",
action="store_true", default=False)
group.add_option("", "--filter", dest="filter", metavar="REGEX",
help=("Only run tests with paths matching the given "
"r
|
suoto/hdlcc
|
setup.py
|
Python
|
gpl-3.0
| 3,321
| 0.014453
|
# This file is part of HDL Checker.
#
# Copyright (c) 2015 - 2019 suoto (Andre Souto)
#
# HDL Checker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HDL Checker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HDL Checker. If not, see <http://www.gnu.org/licenses/>.
"HDL Checker installation script"
import setuptools # type: ignore
import versioneer
LONG_DESCRIPTION = open("README.md", "rb").read().decode(encoding='utf8', errors='replace')
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Intended Audience :: Developers
License :: OSI Approved :: GNU General Public License v3 (GPLv3)
Operating System :: Microsoft :: Windows
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Topic :: Software Development
Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)
Topic :: Text Editors :: Integrated Development Environments (IDE)
"""
setuptools.setup(
name = 'hdl_checker',
version = versioneer.get_version(),
description = 'HDL code checker',
long_description = LONG_DESCRIPTION,
long_description_content_type = "text/markdown",
author = 'Andre Souto',
author_email = 'andre820@gmail.com',
url = 'https://github.com/suoto/hdl_checker',
license = 'GPLv3',
keywords = 'VHDL Verilog SystemVerilog linter LSP language server protocol vimhdl vim-hdl',
platforms = 'any',
packages = setuptools.find_packages(),
install_requires = ['argcomplete',
'argparse',
'backports.functools_lru_cache; python_version<"3.2"',
'bottle>=0.12.9',
'enum34>=1.1.6; python_version<"3.3"',
|
'future>=0.14.0',
'futures; python_version<"3.2"',
|
'prettytable>=0.7.2',
'pygls==0.9.1',
'requests>=2.20.0',
'six>=1.10.0',
'tabulate>=0.8.5',
'typing>=3.7.4',
'waitress>=0.9.0', ],
cmdclass = versioneer.get_cmdclass(),
entry_points = {
'console_scripts' : ['hdl_checker=hdl_checker.server:main', ]
},
classifiers=CLASSIFIERS.splitlines(),
)
|
TaskEvolution/Task-Coach-Evolution
|
taskcoach/taskcoachlib/thirdparty/src/reportlab/graphics/barcode/test.py
|
Python
|
gpl-3.0
| 9,268
| 0.007769
|
#!/usr/pkg/bin/python
import os, sys, time
from reportlab.graphics.barcode.common import *
from reportlab.graphics.barcode.code39 import *
from reportlab.graphics.barcode.code93 import *
from reportlab.graphics.barcode.code128 import *
from reportlab.graphics.barcode.usps import *
from reportlab.graphics.barcode.usps4s import USPS_4State
from reportlab.platypus import Spacer, SimpleDocTemplate, Table, TableStyle, Preformatted, PageBreak
from reportlab.lib.units import inch, cm
from reportlab.lib import colors
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.frames import Frame
from reportlab.platypus.flowables import XBox, KeepTogether
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.barcode import getCodes, getCodeNames, createBarcodeDrawing, createBarcodeImageInMemory
def run():
styles = getSampleStyleSheet()
styleN = styles['Normal']
styleH = styles['Heading1']
story = []
#for codeNames in code
story.append(Paragraph('I2of5', styleN))
story.append(I2of5(1234, barWidth = inch*0.02, checksum=0))
story.append(Paragraph('MSI', styleN))
story.append(MSI(1234))
story.append(Paragraph('Codabar', styleN))
story.append(Codabar("A012345B", barWidth = inch*0.02))
story.append(Paragraph('Code 11', styleN))
story.append(Code11("01234545634563"))
story.append(Paragraph('Code 39', styleN))
story.append(Standard39("A012345B%R"))
story.append(Paragraph('Extended Code 39', styleN))
story.append(Extended39("A012345B}"))
story.append(Paragraph('Code93', styleN))
story.append(Standard93("CODE 93"))
story.append(Paragraph('Extended Code93', styleN))
story.append(Extended93("L@@K! Code 93 :-)")) #, barWidth=0.005 * inch))
story.append(Paragraph('Code 128', styleN))
c=Code128("AB-12345678") #, barWidth=0.005 * inch)
#print 'WIDTH =', (c.width / inch), 'barWidth =', (c.barWidth / inch)
#print 'LQ =', (c.lquiet / inch), 'RQ =', (c.rquiet / inch)
story.append(c)
story.append(Paragraph('USPS FIM', styleN))
story.append(FIM("A"))
story.append(Paragraph('USPS POSTNET', styleN))
story.append(POSTNET('78247-1043'))
story.append(Paragraph('USPS 4 State', styleN))
story.append(USPS_4State('01234567094987654321','01234567891'))
from reportlab.graphics.barcode import createBarcodeDrawing
story.append(Paragraph('EAN13', styleN))
bcd = createBarcodeDrawing('EAN13', value='123456789012')
story.append(bcd)
story.append(Paragraph('EAN8', styleN))
bcd = createBarcodeDrawing('EAN8', value='1234567')
story.append(bcd)
story.append(Paragraph('UPCA', styleN))
bcd = createBarcodeDrawing('UPCA', value='03600029145')
story.append(bcd)
story.append(Paragraph('USPS_4State', styleN))
bcd = createBarcodeDrawing('USPS_4State', value='01234567094987654321',routing='01234567891')
story.append(bcd)
story.append(Paragraph('Label Size', styleN))
story.append(XBox((2.0 + 5.0/8.0)*inch, 1 * inch, '1x2-5/8"'))
story.
|
append(Paragraph('Label Size', styleN))
story.append(XBox((1.75)*inch, .5 * inch, '1/2x1-3/4"'))
c = Canvas('out.pdf')
f = Frame(inch, inch, 6*inch, 9*inch, showBoundary=1
|
)
f.addFromList(story, c)
c.save()
print 'saved out.pdf'
def fullTest(fileName="test_full.pdf"):
"""Creates large-ish test document with a variety of parameters"""
story = []
styles = getSampleStyleSheet()
styleN = styles['Normal']
styleH = styles['Heading1']
styleH2 = styles['Heading2']
story = []
story.append(Paragraph('ReportLab Barcode Test Suite - full output', styleH))
story.append(Paragraph('Generated on %s' % time.ctime(time.time()), styleN))
story.append(Paragraph('', styleN))
story.append(Paragraph('Repository information for this build:', styleN))
#see if we can figure out where it was built, if we're running in source
if os.path.split(os.getcwd())[-1] == 'barcode' and os.path.isdir('.svn'):
#runnning in a filesystem svn copy
infoLines = os.popen('svn info').read()
story.append(Preformatted(infoLines, styles["Code"]))
story.append(Paragraph('About this document', styleH2))
story.append(Paragraph('History and Status', styleH2))
story.append(Paragraph("""
This is the test suite and docoumentation for the ReportLab open source barcode API,
being re-released as part of the forthcoming ReportLab 2.0 release.
""", styleN))
story.append(Paragraph("""
Several years ago Ty Sarna contributed a barcode module to the ReportLab community.
Several of the codes were used by him in hiw work and to the best of our knowledge
this was correct. These were written as flowable objects and were available in PDFs,
but not in our graphics framework. However, we had no knowledge of barcodes ourselves
and did not advertise or extend the package.
""", styleN))
story.append(Paragraph("""
We "wrapped" the barcodes to be usable within our graphics framework; they are now available
as Drawing objects which can be rendered to EPS files or bitmaps. For the last 2 years this
has been available in our Diagra and Report Markup Language products. However, we did not
charge separately and use was on an "as is" basis.
""", styleN))
story.append(Paragraph("""
A major licensee of our technology has kindly agreed to part-fund proper productisation
of this code on an open source basis in Q1 2006. This has involved addition of EAN codes
as well as a proper testing program. Henceforth we intend to publicise the code more widely,
gather feedback, accept contributions of code and treat it as "supported".
""", styleN))
story.append(Paragraph("""
This involved making available both downloads and testing resources. This PDF document
is the output of the current test suite. It contains codes you can scan (if you use a nice sharp
laser printer!), and will be extended over coming weeks to include usage examples and notes on
each barcode and how widely tested they are. This is being done through documentation strings in
the barcode objects themselves so should always be up to date.
""", styleN))
story.append(Paragraph('Usage examples', styleH2))
story.append(Paragraph("""
To be completed
""", styleN))
story.append(Paragraph('The codes', styleH2))
story.append(Paragraph("""
Below we show a scannable code from each barcode, with and without human-readable text.
These are magnified about 2x from the natural size done by the original author to aid
inspection. This will be expanded to include several test cases per code, and to add
explanations of checksums. Be aware that (a) if you enter numeric codes which are too
short they may be prefixed for you (e.g. "123" for an 8-digit code becomes "00000123"),
and that the scanned results and readable text will generally include extra checksums
at the end.
""", styleN))
codeNames = getCodeNames()
from reportlab.lib.utils import flatten
width = [float(x[8:]) for x in sys.argv if x.startswith('--width=')]
height = [float(x[9:]) for x in sys.argv if x.startswith('--height=')]
isoScale = [int(x[11:]) for x in sys.argv if x.startswith('--isoscale=')]
options = {}
if width: options['width'] = width[0]
if height: options['height'] = height[0]
if isoScale: options['isoScale'] = isoScale[0]
scales = [x[8:].split(',') for x in sys.argv if x.startswith('--scale=')]
scales = map(float,scales and flatten(scales) or [1])
scales = map(float,scales and flatten(scales) or [1])
for scale in scales:
story.append(PageBreak())
story.append(Paragraph('Scale = %.1f'%scale, styleH2))
story.append(Spacer(36, 12))
for codeName in codeNames:
s = [Paragra
|
allotory/basilinna
|
app/main/upload_file.py
|
Python
|
mit
| 1,433
| 0.007774
|
# -*- coding: utf-8 -*-
' 检查扩展名是否合法 '
__author__ = 'Ellery'
from app import app
import datetime, random
from PIL import Image
import os
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config.get('ALLOWED_EXTENSIONS')
def unique_name():
now_ti
|
me = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
random_num = random.randint(0, 100)
if random_num <= 10:
random_num = str(0) + str(random_num)
unique_num = str(now_time) + str(random_num)
return unique_num
def image_thumbnail(filename):
filepath = os.path.join(app.config.get('UPLOAD_FOLDER'), filenam
|
e)
im = Image.open(filepath)
w, h = im.size
if w > h:
im.thumbnail((106, 106*h/w))
else:
im.thumbnail((106*w/h, 106))
im.save(os.path.join(app.config.get('UPLOAD_FOLDER'),
os.path.splitext(filename)[0] + '_thumbnail' + os.path.splitext(filename)[1]))
def image_delete(filename):
thumbnail_filepath = os.path.join(app.config.get('UPLOAD_FOLDER'), filename)
filepath = thumbnail_filepath.replace('_thumbnail', '')
os.remove(filepath)
os.remove(thumbnail_filepath)
def cut_image(filename, box):
filepath = os.path.join(app.config.get('UPLOAD_AVATAR_FOLDER'), filename)
im = Image.open(filepath)
new_im = im.crop(box)
new_im.save(os.path.join(app.config.get('UPLOAD_AVATAR_FOLDER'), filename))
|
dhowland/EasyAVR
|
keymapper/easykeymap/legacy.py
|
Python
|
gpl-2.0
| 6,758
| 0.001776
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#
# Easy AVR USB Keyboard Firmware Keymapper
# Copyright (C) 2018 David Howland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""This module contains code to load legacy user save data."""
import pickle
import re
from .build import NUM_MACROS, NULL_SYMBOL, key_mode_map, led_modes, led_assignments
from .scancodes import s
|
cancodes
from .userdata import Map
legacy_layers = ["Default", "Layer 1", "Layer 2", "Layer 3", "Layer 4",
"Layer 5", "Layer 6", "Layer 7", "Layer 8", "Layer 9"]
class LegacySaveFileException(Exception):
"""Raised when an error is encountered while loading a legacy layout file."""
pass
def load_legacy(user_data, datfile):
"""Load the legacy .dat save file from the path given by `datfile` and populate
the
|
UserData object given by `user_data`.
"""
legacy_data = open_legacy(datfile)
convert_legacy(user_data, legacy_data)
def open_legacy(datfile):
"""Opens and decodes the pickled data in a legacy .dat save file. `datfile`
is a path to the file. The function returns a dictionary with an item for each
component of the legacy file.
"""
with open(datfile, 'rb') as fdin:
data = pickle.load(fdin)
if len(data) < 12:
raise LegacySaveFileException("The .dat file is either broken or too old.")
unique_id = data[1]
maps = data[2]
macros = data[3]
actions = data[4]
modes = data[5]
wmods = data[6]
layout_mod = data[8]
leds = data[9]
if len(data) > 11:
advancedleds = data[11]
useadvancedleds = data[12]
else:
advancedleds = [(255, 0)] * len(led_assignments)
useadvancedleds = False
if len(data) > 13:
ledlayers = data[13]
else:
ledlayers = [0, 0, 0, 0, 0]
# fixes for older versions (renamed layers)
for kmap in (maps, actions, modes, wmods):
if 'Fn' in kmap:
kmap['Layer 1'] = kmap['Fn']
del kmap['Fn']
# fixes for older versions (renamed/removed scancodes)
for layer in maps:
for row in maps[layer]:
for i, k in enumerate(row):
if k == "SCANCODE_DEBUG":
row[i] = "SCANCODE_CONFIG"
elif k == "SCANCODE_LOCKINGCAPS":
row[i] = "HID_KEYBOARD_SC_LOCKING_CAPS_LOCK"
elif k == "SCANCODE_FN":
row[i] = "SCANCODE_FN1"
elif k not in scancodes:
row[i] = NULL_SYMBOL
# fixes for older versions (renamed leds)
leds = ['Any Fn Active' if (x == 'Fn Lock') else x for x in leds]
leds = ['Fn1 Active' if (x == 'Fn Active') else x for x in leds]
# fixes for older versions (added macros)
extention = NUM_MACROS - len(macros)
if extention > 0:
macros.extend([''] * extention)
return {
'unique_id': unique_id,
'layout_mod': layout_mod,
'maps': maps,
'actions': actions,
'modes': modes,
'wmods': wmods,
'macros': macros,
'leds': leds,
'advancedleds': advancedleds,
'useadvancedleds': useadvancedleds,
'ledlayers': ledlayers,
}
def convert_legacy(user_data, legacy_data):
"""Converts the data from a legacy save file into a `user_data` object. `user_data`
should be a fresh instance of UserData and `legacy_data` is the output from a
successful call to open_legacy().
"""
# can't save to legacy file
user_data.path = None
# get good defaults to start from
user_data.new(legacy_data['unique_id'], legacy_data['layout_mod'])
# transmogrify the keymap data
for li, layer in enumerate(legacy_layers):
for ri, rowdef in enumerate(user_data.config.keyboard_definition):
if isinstance(rowdef, int):
continue
for ci, keydef in enumerate(rowdef):
keydim, matrix, _ = keydef
if user_data.layout_mod:
mod_map = user_data.config.alt_layouts[user_data.layout_mod]
keydim = mod_map.get((ri, ci), keydim)
if isinstance(keydim, tuple) and isinstance(matrix, tuple):
row, col = matrix
map = Map(legacy_data['maps'][layer][ri][ci],
key_mode_map[legacy_data['modes'][layer][ri][ci]],
legacy_data['actions'][layer][ri][ci],
legacy_data['wmods'][layer][ri][ci])
user_data.keymap[li][row][col] = map
# translate the macro data
user_data.macros = [translate_macro(macro) for macro in legacy_data['macros']]
# adapt the led data
user_data.led_modes = []
for old_assignment in legacy_data['leds']:
if old_assignment == 'Backlight':
user_data.led_modes.append(led_modes.index('Backlight'))
elif old_assignment in led_assignments:
user_data.led_modes.append(led_modes.index('Indicator'))
else:
user_data.led_modes.append(led_modes.index('Disabled'))
if legacy_data['useadvancedleds']:
for i, func in enumerate(legacy_data['advancedleds']):
led_id, _ = func
if led_id < len(user_data.led_modes):
user_data.led_modes[led_id] = led_modes.index('Indicator')
user_data.led_funcs[i] = func
# copy the rest
user_data.led_layers = legacy_data['ledlayers']
def translate_macro(input):
"""Translate the escape sequences in the original macro mini-language into
the equivalent representations in the new macro mini-language.
"""
# remove the special characters
input = input.replace("\\\\,", "\\")
input = input.replace("\\n,", "\n")
input = input.replace("\\t,", "\t")
# escape any $ symbols
input = input.replace("$", "$$")
# convert keyword format
input = re.sub(r'\\([A-Z0-9_]+\()', r'$\1', input)
# convert function/mod format
input = re.sub(r'\\([A-Z0-9_]+),', r'${\1}', input)
return input
|
mjenrungrot/competitive_programming
|
UVa Online Judge/v104/10433.py
|
Python
|
mit
| 619
| 0
|
# =============================================================================
# Author: Teerapat Jenrungrot - https://github.com/mjenrungrot/
# FileName: 10433.py
# Description: UVa Online Judge - 10433
# =============================================================================
while True:
try
|
:
str_N = input()
except EOFError:
break
N = int(str_N)
N2 = N * N
str_N2 = str(N2)
len_N = len(str_N)
if str_N2[-len_N:] == st
|
r_N:
print("Automorphic number of {}-digit.".format(len_N))
else:
print("Not an Automorphic number.")
|
MattNolanLab/ei-attractor
|
grid_cell_model/simulations/007_noise/figures/cosyne2015-abstract/figure_gamma.py
|
Python
|
gpl-3.0
| 492
| 0
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import copy
import matplotlib
from grid_cell_model.submitting import flagparse
import noisefigs
from noisefigs.env import NoiseEnvi
|
ronment
import config
parser = flagparse.FlagParser()
parser.add_flag('--gammaSweep')
args = parser.parse_args()
env = NoiseEnvironment(user_config=config.get_config())
if args.gammaSweep or args.all:
env.register_plotte
|
r(noisefigs.plotters.GammaSweepsPlotter)
env.plot()
|
uclouvain/OSIS-Louvain
|
base/tests/views/learning_units/external/test_update.py
|
Python
|
agpl-3.0
| 4,921
| 0.001829
|
############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
############################################################################
from django.contrib.messages import get_messages, SUCCESS
from django.test import TestC
|
ase
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from waffle.testutils import override_flag
from base.models.enums.entity_type import FACULTY
from base.models.enums.learning_container_year_types import EXTERNAL
from base.m
|
odels.enums.organization_type import MAIN
from base.tests.factories.academic_calendar import generate_learning_unit_edition_calendars
from base.tests.factories.academic_year import create_current_academic_year
from base.tests.factories.entity import EntityWithVersionFactory
from base.tests.factories.external_learning_unit_year import ExternalLearningUnitYearFactory
from base.tests.factories.learning_unit_year import LearningUnitYearFullFactory
from base.tests.factories.person import PersonFactory
from base.tests.forms.test_external_learning_unit import get_valid_external_learning_unit_form_data
from base.views.learning_units.update import update_learning_unit
from learning_unit.tests.factories.central_manager import CentralManagerFactory
@override_flag('learning_unit_update', active=True)
class TestUpdateExternalLearningUnitView(TestCase):
@classmethod
def setUpTestData(cls):
cls.entity = EntityWithVersionFactory(organization__type=MAIN, version__entity_type=FACULTY)
cls.manager = CentralManagerFactory(entity=cls.entity, with_child=True)
cls.person = cls.manager.person
cls.academic_year = create_current_academic_year()
generate_learning_unit_edition_calendars([cls.academic_year])
cls.luy = LearningUnitYearFullFactory(
academic_year=cls.academic_year,
internship_subtype=None,
acronym="EFAC1000",
learning_container_year__container_type=EXTERNAL,
learning_container_year__requirement_entity=cls.entity,
learning_container_year__allocation_entity=cls.entity,
)
cls.data = get_valid_external_learning_unit_form_data(cls.academic_year, cls.luy, cls.entity)
cls.url = reverse(update_learning_unit, args=[cls.luy.pk])
def setUp(self):
self.external = ExternalLearningUnitYearFactory(learning_unit_year=self.luy)
self.client.force_login(self.person.user)
def test_update_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_update_get_permission_denied(self):
self.client.force_login(PersonFactory().user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
def test_update_post(self):
response = self.client.post(self.url, data=self.data)
self.assertEqual(response.status_code, 302)
messages = [m.level for m in get_messages(response.wsgi_request)]
self.assertEqual(messages, [SUCCESS])
def test_update_message_with_report(self):
self.data['postponement'] = "1"
response = self.client.post(self.url, data=self.data)
self.assertEqual(response.status_code, 302)
messages = [m.message for m in get_messages(response.wsgi_request)]
self.assertEqual(messages[0], _("The learning unit has been updated (with report)."))
def test_update_message_without_report(self):
self.data['postponement'] = "0"
response = self.client.post(self.url, data=self.data)
self.assertEqual(response.status_code, 302)
messages = [m.message for m in get_messages(response.wsgi_request)]
self.assertEqual(messages[0], _("The learning unit has been updated (without report)."))
|
translate/amagama
|
amagama/application.py
|
Python
|
gpl-3.0
| 1,780
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010-2014 Zuza Software Foundation
#
# This file is part of amaGama.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""A translation memory server using tmdb for storage, communicates
with clients using JSON over HTTP."""
from flask import Flask
from amagama import tmdb
from amagama.views import api
class AmagamaServer(Flask):
def __init__(self, settings, *args, **kwargs):
super(AmagamaServer, self).__init__(*args, **kwargs)
self.config.from_pyfile(settings)
self.config.from_envvar('AMAGAMA_CONFIG', silent=True)
self.tmdb = tmdb.TMDB(
|
self)
def amagama_server_factory():
app = AmagamaServer("settings.py", __name__)
app
|
.register_blueprint(api.read_api, url_prefix='/tmserver')
app.register_blueprint(api.read_api, url_prefix='/api/v1')
if app.config['ENABLE_DATA_ALTERING_API']:
app.register_blueprint(api.write_api, url_prefix='/tmserver')
app.register_blueprint(api.write_api, url_prefix='/api/v1')
if app.config['ENABLE_WEB_UI']:
from amagama.views import web
app.register_blueprint(web.web_ui, url_prefix='')
return app
|
dimven/SpringNodes
|
py/String.ReplaceIllegalChars.py
|
Python
|
mit
| 557
| 0.028725
|
# Copyright(c) 2017, Dimitar Venkov
# @5devene, dimita
|
r.ven@gmail.com
# www.badmonkeys.net
from itertools import imap, repeat
import System
badChars = set(System.
|
IO.Path.GetInvalidFileNameChars() )
def tolist(x):
if hasattr(x,'__iter__'): return x
else : return [x]
def fixName(n, rep="", badChars=badChars):
n1 = (c if c not in badChars else rep for c in iter(n) )
return ''.join(n1)
names = tolist(IN[0])
replacement = str(IN[1])
other = tolist(IN[2])
badChars.update(other)
OUT = map(fixName, imap(str, names), repeat(replacement, len(names) ) )
|
codeforamerica/mdc-inspectors
|
manage.py
|
Python
|
bsd-3-clause
| 1,026
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from flask_script import Manager, Shell, Server
from flask_script.commands import Clean, ShowUrls
from flask_migrate import MigrateCommand
from inspectors.app import create_app
from inspectors.settings import DevConfig, ProdConfig
from inspectors.database import db
app = create_app()
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = M
|
anager(app)
def _make_context():
"""Return context dict for a shell session so you can access
app, and db.
""
|
"
return {'app': app, 'db': db}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
manager.add_command('server', Server())
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
manager.add_command("urls", ShowUrls())
manager.add_command("clean", Clean())
if __name__ == '__main__':
manager.run()
|
taedori81/shoop
|
shoop_tests/notify/test_log_entries.py
|
Python
|
agpl-3.0
| 1,388
| 0.002161
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from shoop.notify import Context
from shoop_tests.notify.fixtures import get_initialized_test_event
@pytest.mark.django_db
def test_log_entries():
event = get_initialized_test_event()
ctx = Context.from_event(event)
order = ctx.get("order")
n_log_entries = ctx.log_entry_queryset.count()
ctx.add_log_entry_on_log_target("blap", "blorr")
order.add_log_entry("blep")
assert ctx.log_entry_queryset.count() == n_log_entries + 2 # they got added
assert order.log_entries.last().message == "blep" # it's what we added
assert ctx.log_entry_queryset.last().message == "blep" # from this perspective too
@pytest.mark.django_db
@pytest.mark.parametrize("target_obj", (None, object()))
def test_log_entry_on_unloggable_o
|
bject(target_obj):
event = get_initialized_test_event()
event.variable_values["order"] = target_obj # invalidate log target _before_ creating context
ctx = Context.from_event(event)
n_log_entries = ctx.log_entry_queryset.count()
ctx.add_log_entry_on_log_target("blap", "blorr")
assert ctx.log_entry_queryset.count() == n_log_entri
|
es # couldn't add :(
|
ucbvislab/radiotool
|
radiotool/algorithms/constraints.py
|
Python
|
isc
| 27,809
| 0.000467
|
# constraints should be multiplicative so we can do them in any order
# ok, maybe not multiplicative. not sure yet.
# want to avoid plateaus in the space.
import copy
import numpy as np
from scipy.special import binom
import scipy
import librosa_analysis
import novelty
BEAT_DUR_KEY = "med_beat_duration"
class ConstraintPipeline(object):
def __init__(self, constraints=None):
if constraints is None:
self.constraints = []
else:
self.constraints = constraints
def add_constraint(self, constraint):
self.constraints.append(constraint)
def apply(self, song, target_n_length):
n_beats = len(song.analysis["beats"])
beat_names = copy.copy(song.analysis["beats"])
transition_cost = np.zeros((n_beats, n_beats))
penalty = np.zeros((n_beats, target_n_length))
for constraint in self.constraints:
# print constraint
transition_cost, penalty, beat_names = constraint.apply(
transition_cost, penalty, song, beat_names)
return transition_cost, penalty, beat_names
class Constraint(object):
def __init__(self):
pass
def apply(self, transition_cost, penalty, song, beat_names):
return transition_cost, penalty, beat_names
class RandomJitterConstraint(Constraint):
def __init__(self, jitter_max=.001):
self.jitter = jitter_max
def apply(self, transition_cost, penalty, song, beat_names):
return (
transition_cost + self.jitter * np.random.rand(
transition_cost.shape[0], transition_cost.shape[1]),
penalty + self.jitter * np.random.rand(
penalty.shape[0], penalty.shape[1]),
beat_names)
class Timbr
|
ePitchConstraint(Constraint):
def __init__(self, timbre_weight=1, chroma_weight=1,
context=0):
self.tw = timbre_weight
self.cw = chroma_weight
self.m = context
def apply(self, transition_cost, penalty, song, beat_names)
|
:
timbre_dist = librosa_analysis.structure(
np.array(song.analysis['timbres']).T)
chroma_dist = librosa_analysis.structure(
np.array(song.analysis['chroma']).T)
dists = self.tw * timbre_dist + self.cw * chroma_dist
if self.m > 0:
new_dists = np.copy(dists)
coefs = [binom(self.m * 2, i) for i in range(self.m * 2 + 1)]
coefs = np.array(coefs) / np.sum(coefs)
for beat_i in xrange(self.m, dists.shape[0] - self.m):
for beat_j in xrange(self.m, dists.shape[1] - self.m):
new_dists[beat_i, beat_j] = 0.0
for i, c in enumerate(coefs):
t = i - self.m
new_dists[beat_i, beat_j] +=\
c * dists[beat_i + t, beat_j + t]
dists = new_dists
# dists = np.copy(song.analysis["dense_dist"])
# shift it over
dists[:-1, :] = dists[1:, :]
dists[-1, :] = np.inf
# don't use the final beat
dists[:, -1] = np.inf
transition_cost[:dists.shape[0], :dists.shape[1]] += dists
return transition_cost, penalty, beat_names
def __repr__(self):
return "TimbrePitchConstraint:" +\
"%f(timbre) + %f(chroma), %f(context)" % (self.tw, self.cw, self.m)
class RhythmConstraint(Constraint):
def __init__(self, beats_per_measure, penalty):
self.p = penalty
self.time = beats_per_measure
def apply(self, transition_cost, penalty, song, beat_names):
n_beats = len(song.analysis["beats"])
for i in range(self.time):
for j in set(range(self.time)) - set([(i + 1) % self.time]):
transition_cost[i:n_beats:self.time][j:n_beats:self.time] +=\
self.p
return transition_cost, penalty, beat_names
class MinimumLoopConstraint(Constraint):
def __init__(self, min_loop):
self.min_loop = min_loop
def apply(self, transition_cost, penalty, song, beat_names):
n_beats = len(song.analysis["beats"])
for i in range(n_beats):
for j in range(-(self.min_loop - 1), 1):
if 0 <= i + j < n_beats:
transition_cost[i, i + j] += np.inf
return transition_cost, penalty, beat_names
def __repr__(self):
return "MinimumLoopConstraint: min_loop(%d)" % self.min_loop
class LabelConstraint(Constraint):
def __init__(self, in_labels, target_labels, penalty, penalty_window=0):
self.in_labels = copy.copy(in_labels)
self.out_labels = target_labels
self.penalty = penalty
self.window = penalty_window
def apply(self, transition_cost, penalty, song, beat_names):
n_beats = len(song.analysis["beats"])
# extend in_labels to work with pauses that we may have added
if n_beats < transition_cost.shape[0]:
self.in_labels.extend(
[None] * (transition_cost.shape[0] - n_beats))
new_pen = np.ones(penalty.shape) * np.array(self.penalty)
# new_pen = np.ones((n_beats, len(self.penalty))) *\
# np.array(self.penalty)
n_target = penalty.shape[1]
for n_i in xrange(transition_cost.shape[0]):
node_label = self.in_labels[n_i]
for l in xrange(1, n_target - 1):
prev_target = self.out_labels[l - 1]
next_target = self.out_labels[l + 1]
target_label = self.out_labels[l]
if node_label == target_label or target_label is None:
new_pen[n_i, l] = 0.0
elif node_label is None:
# should this have a penalty?
new_pen[n_i, l] = 0.0
if self.window > 0:
if target_label != prev_target:
# reduce penalty for beats prior
span = min(self.window, l)
new_pen[n_i, l - span:l] =\
np.linspace(1.0, 0.01, num=span)
if target_label != next_target:
# reduce penalty for beats later
span = min(self.window, len(self.out_labels) - l - 1)
new_pen[n_i, l + 1:l + span + 1] =\
np.linspace(0.01, 1.0, num=span)
for l in [0, n_target - 1]:
target_label = self.out_labels[l]
if node_label == target_label or target_label is None:
new_pen[n_i, l] = 0.0
elif node_label is None:
new_pen[n_i, l] = 0.0
penalty += new_pen
return transition_cost, penalty, beat_names
def __repr__(self):
return "LabelConstraint"
class ValenceArousalConstraint(Constraint):
def __init__(self, in_va, target_va, penalty, penalty_window=0):
self.in_va = np.copy(in_va)
self.target_va = np.copy(target_va)
self.penalty = penalty
self.window = penalty_window
def apply(self, transition_cost, penalty, song, beat_names):
n_beats = len(song.analysis["beats"])
# extend in_va to work with pauses that have been added
if n_beats < transition_cost.shape[0]:
n_pauses = transition_cost.shape[0] - n_beats
extra_va = np.zeros((n_pauses, 2))
self.in_va = np.r_[self.in_va, extra_va]
new_pen = np.ones(penalty.shape) * np.array(self.penalty)
n_target = penalty.shape[1]
for n_i in xrange(transition_cost.shape[0]):
node_va = self.in_va[n_i]
for l in xrange(n_target):
if n_i < n_beats:
new_pen[n_i, l] *=\
np.linalg.norm(self.target_va[l] - node_va)
else:
# pauses have no penalty here
new_pen[n_i, l] *= 0
penalty += new_pen
return transition_cost, penalty, beat_names
def __repr__(self):
return "ValenceArousalConstraint"
class GenericTimeSensitivePena
|
dmsurti/mayavi
|
mayavi/filters/user_defined.py
|
Python
|
bsd-3-clause
| 3,082
| 0.001622
|
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from tvtk.tools.tvtk_doc import TVTKFilterChooser, TVTK_FILTERS
# Local imports.
from mayavi.filters.filter_base import FilterBase
from mayavi.core.common import handle_children_state, error
from mayavi.core.pipeline_info import PipelineInfo
################################################################################
# `UserDefined` class.
################################################################################
class UserDefined(FilterBase):
"""
This filter lets the user define their own filter
dynamically/interactively. It is like `FilterBase` bu
|
t allows a
user to specify the class without writing any code.
"""
# The version of this class. Used for persistence.
__version__ = 0
input_info = PipelineInfo(datasets=['any'],
|
attribute_types=['any'],
attributes=['any'])
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
######################################################################
# `object` interface.
######################################################################
def __set_pure_state__(self, state):
# Create and set the filter.
children = [f for f in [self.filter] if f is not None]
handle_children_state(children, [state.filter])
self.filter = children[0]
self.update_pipeline()
# Restore our state.
super(UserDefined, self).__set_pure_state__(state)
######################################################################
# `UserDefined` interface.
######################################################################
def setup_filter(self):
"""Setup the filter if none has been set or check it if it
already has been."""
obj = self.filter
if not self._check_object(obj):
if obj is not None:
cname = obj.__class__.__name__
error('Invalid filter %s chosen! Try again!'%cname)
obj = self._choose_filter()
self.filter = obj
######################################################################
# Non-public interface.
######################################################################
def _choose_filter(self):
chooser = TVTKFilterChooser()
chooser.edit_traits(kind='livemodal')
obj = chooser.object
if obj is None:
error('Invalid filter chosen! Try again!')
return obj
def _check_object(self, obj):
if obj is None:
return False
if obj.__class__.__name__ in TVTK_FILTERS:
return True
return False
def _filter_changed(self, old, new):
self.name = 'UserDefined:%s'%new.__class__.__name__
super(UserDefined, self)._filter_changed(old, new)
|
c1728p9/mbed-os
|
tools/host_tests/host_tests_plugins/module_copy_mbed.py
|
Python
|
apache-2.0
| 2,899
| 0.001725
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from shutil import copy
from .host_test_plugins import HostTestPluginBase
from time import sleep
class HostTestPluginCopyMethod_Mbed(HostTestPluginBase):
def generic_mbed_copy(self, image_path, destination_disk):
""" Generic mbed copy method for "mbed enabled" devices.
It uses standard python shuitl function to copy
image_file (target specific binary) to device's disk.
"""
result = True
if not destination_disk.endswith('/') and not destination_disk.endswith('\\'):
destination_disk += '/'
try:
copy(image_path, destination_disk)
except Exception as e:
self.print_plugin_error("shutil.copy('%s', '%s')"% (image_path, destination_disk))
self.print_plugin_error("Error: %s"% str(e))
result = False
return result
# Plugin interface
name = 'HostTestPluginCopyMethod_Mbed'
type = 'CopyMethod'
stable = True
capabilities = ['shutil', 'default']
required_parameters = ['image_path', 'destination_disk', 'program_cycle_s']
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return True
def execute(self, capability, *args, **kwargs):
""" Executes capability by name.
|
Each capability may directly just call some command line
program or execute building pythonic function
"""
result = False
if self.check_parameters(capability, *
|
args, **kwargs) is True:
# Capability 'default' is a dummy capability
if capability == 'shutil':
image_path = kwargs['image_path']
destination_disk = kwargs['destination_disk']
program_cycle_s = kwargs['program_cycle_s']
# Wait for mount point to be ready
self.check_mount_point_ready(destination_disk) # Blocking
result = self.generic_mbed_copy(image_path, destination_disk)
# Allow mbed to cycle
sleep(program_cycle_s)
return result
def load_plugin():
""" Returns plugin available in this module
"""
return HostTestPluginCopyMethod_Mbed()
|
jiasir/openstack-trove
|
lib/charmhelpers/contrib/openstack/neutron.py
|
Python
|
mit
| 7,812
| 0.000256
|
# Various utilies for dealing with Neutron and the renaming from Quantum.
from subprocess import check_output
from charmhelpers.core.hookenv import (
config,
log,
ERROR,
)
from charmhelpers.contrib.openstack.utils import os_release
def headers_package():
"""Ensures correct linux-headers for running kernel are installed,
for building DKMS package"""
kver = check_output(['uname', '-r']).strip()
return 'linux-headers-%s' % kver
QUANTUM_CONF_DIR = '/etc/quantum'
def kernel_version():
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
kver = check_output(['uname', '-r']).strip()
kver = kver.split('.')
return (int(kver[0]), int(kver[1]))
def determine_dkms_package():
""" Determine which DKMS package should be used based on kernel version """
# NOTE: 3.13 kernels have support for GRE and VXLAN native
|
if kernel_ver
|
sion() >= (3, 13):
return []
else:
return ['openvswitch-datapath-dkms']
# legacy
def quantum_plugins():
from charmhelpers.contrib.openstack import context
return {
'ovs': {
'config': '/etc/quantum/plugins/openvswitch/'
'ovs_quantum_plugin.ini',
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
'OVSQuantumPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': ['quantum-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['quantum-plugin-openvswitch-agent']],
'server_packages': ['quantum-server',
'quantum-plugin-openvswitch'],
'server_services': ['quantum-server']
},
'nvp': {
'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['quantum-server',
'quantum-plugin-nicira'],
'server_services': ['quantum-server']
}
}
NEUTRON_CONF_DIR = '/etc/neutron'
def neutron_plugins():
from charmhelpers.contrib.openstack import context
release = os_release('nova-common')
plugins = {
'ovs': {
'config': '/etc/neutron/plugins/openvswitch/'
'ovs_neutron_plugin.ini',
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
'OVSNeutronPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['neutron-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['neutron-plugin-openvswitch-agent']],
'server_packages': ['neutron-server',
'neutron-plugin-openvswitch'],
'server_services': ['neutron-server']
},
'nvp': {
'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-nicira'],
'server_services': ['neutron-server']
},
'nsx': {
'config': '/etc/neutron/plugins/vmware/nsx.ini',
'driver': 'vmware',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-vmware'],
'server_services': ['neutron-server']
},
'n1kv': {
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [['neutron-plugin-cisco']],
'server_packages': ['neutron-server',
'neutron-plugin-cisco'],
'server_services': ['neutron-server']
}
}
if release >= 'icehouse':
# NOTE: patch in ml2 plugin for icehouse onwards
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
plugins['ovs']['server_packages'] = ['neutron-server',
'neutron-plugin-ml2']
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
plugins['nvp'] = plugins['nsx']
return plugins
def neutron_plugin_attribute(plugin, attr, net_manager=None):
manager = net_manager or network_manager()
if manager == 'quantum':
plugins = quantum_plugins()
elif manager == 'neutron':
plugins = neutron_plugins()
else:
log('Error: Network manager does not support plugins.')
raise Exception
try:
_plugin = plugins[plugin]
except KeyError:
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
raise Exception
try:
return _plugin[attr]
except KeyError:
return None
def network_manager():
'''
Deals with the renaming of Quantum to Neutron in H and any situations
that require compatability (eg, deploying H with network-manager=quantum,
upgrading from G).
'''
release = os_release('nova-common')
manager = config('network-manager').lower()
if manager not in ['quantum', 'neutron']:
return manager
if release in ['essex']:
# E does not support neutron
log('Neutron networking not supported in Essex.', level=ERROR)
raise Exception
elif release in ['folsom', 'grizzly']:
# neutron is named quantum in F and G
return 'quantum'
else:
# ensure accurate naming for all releases post-H
return 'neutron'
|
fams/rabbitmq-tutorials
|
python-puka/receive_logs_direct.py
|
Python
|
apache-2.0
| 902
| 0
|
#!/usr/bin/env python
import puka
import sys
client = p
|
uka.Client("amqp://localhost/")
promise = client.connect()
client.wait(promise)
promise = client.exchange_declare(exchange='direct_logs', type='direct')
client.wait(promise)
promise = client.queue_declare(exclusive=True)
queue_name = client.wait(promise)['queue']
severities = sys.argv[1:]
if not severities:
print >> sys.stderr, "Usage: %s [info] [warning] [error]" % (sys.argv[0],)
sys.exit(1)
for severity in severitie
|
s:
promise = client.queue_bind(exchange='direct_logs', queue=queue_name,
routing_key=severity)
client.wait(promise)
print ' [*] Waiting for logs. To exit press CTRL+C'
consume_promise = client.basic_consume(queue=queue_name, no_ack=True)
while True:
msg_result = client.wait(consume_promise)
print " [x] %r:%r" % (msg_result['routing_key'], msg_result['body'])
|
PainNarrativesLab/IOMNarratives
|
IomDataModels.py
|
Python
|
mit
| 13,627
| 0.001614
|
"""
Contains the database connection tools and sqlalchemy models for iom database
Created by adam on 11/11/15
In order to use this, import the module and
create a sqlalchemy engine named 'engine' then do:
# connect to db
from sqlalchemy.orm import sessionmaker
# ORM's handle to database at global level
Session = sessionmaker(bind=engine)
Finally when ready to make queries, do:
#connect to db: Local object
session = Session()
The local session object is then used to make queries like:
s = session.query(Testimony).all() # All testimony objects
s1 = session.query(Testimony).order_by(Testimony.quoteID)[0:99] # First 100 vignettes
"""
__author__ = 'adam'
import os
import sys
import xml.etree.ElementTree as ET
# sqlalchemy tools
import sqlalchemy
from sqlalchemy import Table, Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
# connecting to db
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
class Connection(object):
"""
Parent class for creating sqlalchemy engines, session objects,
and other db interaction stuff behind the scenes from a file
holding credentials
Attributes:
engine: sqlalchemy engine instance
session: sqlalchemy local session object. This is the property that should do most work
_credential_file: String path to file with db connection info
_username: String db username
_password: String db password
_server: String db server
_port: db port
_db_name: String name of db
"""
def __init__(self, credential_file=None):
"""
Loads db connection credentials from file and returns a mysql sqlalchemy engine
Args:
:param credential_file: String path to the credential file to use
Returns:
:return: sqlalchemy.create_engine Engine instance
"""
self._credential_file = credential_file
self._load_credentials()
self._make_engine()
def _load_credentials(self):
"""
Opens the credentials file and loads the attributes
"""
if self._credential_file is not None:
credentials = ET.parse(self._credential_file)
self._server = credentials.find('db_host').text
self._port = credentials.find('db_port').text
if self._port is not None:
self._port = int(self._port)
self._username = credentials.f
|
ind('db_user').text
self._db_name = credentials.find('db_name').text
self._password = credentials.find('db_password').text
def _make_engine(self):
"""
Creates the sqlalchemy engine and stores it in self.engine
"""
raise NotImplementedError
class MySqlConnection(
|
Connection):
"""
Uses the MySQL-Connector-Python driver (pip install MySQL-Connector-Python driver)
"""
def __init__(self, credential_file):
self._driver = '+mysqlconnector'
super().__init__(credential_file)
def _make_engine(self):
if self._port:
server = "%s:%s" % (self._server, self._port)
else:
server = self._server
self._dsn = "mysql%s://%s:%s@%s/%s" % (self._driver, self._username, self._password, server, self._db_name)
self.engine = create_engine(self._dsn)
class SqliteConnection(Connection):
"""
Makes a connection to an in memory sqlite database.
Note that does not actually populate the database. That
requires a call to: Base.metadata.create_all(SqliteConnection)
"""
def __init__(self):
super().__init__()
def _make_engine(self):
self.engine = create_engine('sqlite:///:memory:', echo=True)
class BaseDAO(object):
"""
Parent class for database interactions.
The parent will hold the single global connection (i.e. sqlalchemy Session)
to the db.
Instance classes will have their own session instances
Attributes:
global_session: (class attribute) A sqlalchemy configurable sessionmaker factory (sqlalchemy.orm.session.sessionmaker)
bound to the engine. Is not itself a session. Instead, it needs to be instantiated: DAO.global_session()
engine: sqlalchemy.engine.base.Engine instance
"""
global_session = None
def __init__(self, engine):
assert(isinstance(engine, sqlalchemy.engine.base.Engine))
self.engine = engine
if BaseDAO.global_session is None:
BaseDAO._create_session(engine)
@staticmethod
def _create_session(engine):
"""
Instantiates the sessionmaker factory into the global_session attribute
"""
BaseDAO.global_session = sqlalchemy.orm.sessionmaker(bind=engine)
class DAO(BaseDAO):
"""
example instance. Need to use metaclass to ensure that
all instances of DAO do this
"""
def __init__(self, engine):
assert(isinstance(engine, sqlalchemy.engine.base.Engine))
super().__init__(engine)
self.session = BaseDAO.global_session()
#######################################
# Database models #
#######################################
# Base class that maintains the catalog of tables and classes in db
Base = declarative_base()
condition_testimony_table = Table('iom_conditionsXtestimony', Base.metadata,
Column('quote_id', Integer, ForeignKey('iom_testimony.quote_id')),
Column('condition_id', Integer, ForeignKey('iom_conditions.condition_id'))
)
class Testimony(Base):
"""
Properties:
condition_ids: Tuple of condition ids identified in vignette
condition_names: Tuple of condition names identified in vignette
"""
__tablename__ = "iom_testimony"
quote_id = Column(Integer, primary_key=True)
respondent_id = Column(Integer)
question_number = Column(Integer)
quote_text = Column(String)
# many to many Testimony<->Condition
conditions = relationship('Condition', secondary=condition_testimony_table, backref="iom_testimony")
def get_condition_ids(self):
"""
Returns a tuple of unique condition ids identified for
the vignette
"""
self.condition_ids = []
[self.condition_ids.append(c.conditionID) for c in self.conditions]
self.condition_ids = tuple(set(self.condition_ids))
return self.condition_ids
def get_condition_names(self):
"""
Returns a tuple of any condition names identified for
the vignette
"""
self.condition_names = []
[self.condition_names.append(c.conditionName) for c in self.conditions]
self.condition_names = tuple(set(self.condition_names))
return self.condition_names
def get_id(self):
"""
Getter for quote_id
Returns:
Integer representation of the id of the vignette
"""
return self.quote_id
class Condition(Base):
"""
Properties:
quote_ids: List of associated vignette ids
respondent_ids: List of associated respondent ids
"""
__tablename__ = 'iom_conditions'
condition_id = Column(Integer, primary_key=True)
condition_name = Column(String)
# many to many Condition<->Alias
# aliases = relationship('Alias', backref='iom_conditions')
# many to many Testimony<->Condition
testimony = relationship('Testimony', secondary=condition_testimony_table, backref="iom_conditions")
def get_vignette_ids(self):
"""
Returns a tuple of quote ids wherein the condition is mentioned
"""
self.quote_ids = []
[self.quote_ids.append(t.quote_id) for t in self.testimony]
return tuple(self.quote_ids)
def get_respondent_ids(self):
"""
Returns a tuple of ids of respondents who mentioned the condition
Also sets attribute respondent_ids
"""
self.respondent_ids = []
[self.respondent_ids.append(t.respondent_id) for t in self.testimony]
|
lcc755/WorkTerm1
|
5Array/2Temp/Code/average.py
|
Python
|
apache-2.0
| 897
| 0.020067
|
from nanpy import (ArduinoApi, SerialManager)
from time import sleep
#Connect to Arduino. Automatically finds serial port.
connection = SerialManager()
a = ArduinoApi(connection = connection)
sensor = 14 #Analog pin 0
a.pinMode(sensor, a.INPUT) #Setup sensor
while
|
True:
total = 0 #Each set of readings start with a total of 0
#Get all the readings:
for i in range(0, 24):
reading
|
= a.analogRead(sensor) #get reading
vol = (reading*(5.0/1024)) #relative voltage
temp = ((vol-0.5)*100) #find temp
readings[i] = temp #Place temp reading in i space of array
sleep(0.1) #Time between readings
#Add the readings:
for i in range(0, 24):
total += readings[i]
#Find the average and print:
average = total/24
print("The average temp is ")
print(average)
|
akashlevy/Lyff
|
lyff_lambda/boto/s3/key.py
|
Python
|
mit
| 83,034
| 0.000313
|
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANT
|
IES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import email.utils
import errno
import hashlib
import mimetypes
import os
import re
import base64
import binascii
import math
from hashlib import md5
import b
|
oto.utils
from boto.compat import BytesIO, six, urllib, encodebytes
from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5, compute_hash
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type",
"x-robots-tag", "expires"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
# Metadata fields, whether user-settable or not, other than custom
# metadata fields (i.e., those beginning with a provider specific prefix
# like x-amz-meta).
base_fields = (base_user_settable_fields |
set(["last-modified", "content-length", "date", "etag"]))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self._storage_class = None
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
self.local_hashes = {}
def __repr__(self):
if self.bucket:
name = u'<Key: %s,%s>' % (self.bucket.name, self.name)
else:
name = u'<Key: None,%s>' % self.name
# Encode to bytes for Python 2 to prevent display decoding issues
if not isinstance(name, str):
name = name.encode('utf-8')
return name
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def _get_key(self):
return self.name
def _set_key(self, value):
self.name = value
key = property(_get_key, _set_key);
def _get_md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_hex(self.local_hashes['md5'])
def _set_md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_hex(value)
elif 'md5' in self.local_hashes:
self.local_hashes.pop('md5', None)
md5 = property(_get_md5, _set_md5);
def _get_base64md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
md5 = self.local_hashes['md5']
if not isinstance(md5, bytes):
md5 = md5.encode('utf-8')
return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n')
def _set_base64md5(self, value):
if value:
if not isinstance(value, six.string_types):
value = value.decode('utf-8')
self.local_hashes['md5'] = binascii.a2b_base64(value)
elif 'md5' in self.local_hashes:
del self.local_hashes['md5']
base64md5 = property(_get_base64md5, _set_base64md5);
def _get_storage_class(self):
if self._storage_class is None and self.bucket:
# Attempt to fetch storage class
list_items = list(self.bucket.list(self.name.encode('utf-8')))
if len(list_items) and getattr(list_items[0], '_storage_class',
None):
self._storage_class = list_items[0]._storage_class
else:
# Key is not yet saved? Just use default...
self._storage_class = 'STANDARD'
return self._storage_class
def _set_storage_class(self, value):
self._storage_class = value
storage_class = property(_get_storage_class, _set_storage_class)
def get_md5_from_hexdiges
|
OpenTransitTools/otp_client_py
|
ott/otp_client/otp_to_ott.py
|
Python
|
mpl-2.0
| 36,580
| 0.004265
|
""" Convert an OpenTripPlanner json itinerary response into something that's more suitable for rendering via a webpage
"""
import re
import sys
import math
from decimal import *
import datetime
from datetime import timedelta
import simplejson as json
from ott.utils import object_utils
from ott.utils import date_utils
from ott.utils import json_utils
import logging
log = logging.getLogger(__file__)
def remove_agency_from_id(id):
""" OTP 1.0 has TriMet:1 for trip and route ids
"""
ret_val = id
if id and ":" in id:
v = id.split(":")
if v and len(v) > 1 and len(v[1]) > 0:
ret_val = v[1].strip()
return ret_val
class Error(object):
def __init__(self, jsn, params=None):
self.id = jsn['id']
self.msg = jsn['msg']
class DateInfo(object):
def __init__(self, jsn):
# import pdb; pdb.set_trace()
self.start_time_ms = jsn['startTime']
self.end_time_ms = jsn['endTime']
start = datetime.datetime.fromtimestamp(self.start_time_ms / 1000)
end = datetime.datetime.fromtimestamp(self.end_time_ms / 1000)
self.start_date = "{}/{}/{}".format(start.month, start.day, start.year) # 2/29/2012
self.end_date = "{}/{}/{}".format(end.month, end.day, end.year) # 2/29/2012
self.start_time = start.strftime(" %I:%M%p").lower().replace(' 0','') # "3:40pm" -- note, keep pre-space
self.end_time = end.strftime(" %I:%M%p").lower().replace(' 0','') # "3:44pm" -- note, keep pre-space
# service_date is important to link off to proper stop timetables
# in OTP 1.0, we have: <serviceDate>20161123</serviceDate>
# in older versions of OTP, there's no such date so set it to start_date
if 'serviceDate' in jsn and len(jsn['serviceDate']) == 8:
syear = jsn['serviceDate'][0:4]
smonth = jsn['serviceDate'][4:6].lstrip('0')
sday = jsn['serviceDate'][6:].lstrip('0')
self.service_date = "{}/{}/{}".format(smonth, sday, syear) # 2/29/2012
else:
self.service_date = self.estimate_service_date(start)
# OTP 1.0 has seconds not millisecs for duration
durr = int(jsn['duration'])
if durr < 60000:
durr = durr * 1000
self.duration_ms = durr
self.duration = ms_to_minutes(self.duration_ms, is_pretty=True, show_hours=True)
self.date = "%d/%d/%d" % (start.month, start.day, start.year) # 2/29/2012
self.pretty_date = start.strftime("%A, %B %d, %Y").replace(' 0',' ') # "Monday, March 4, 2013"
self.day = start.day
self.month = start.month
self.year = start.year
def estimate_service_date(self, start):
""" in OTP 1.0, we are provided a service_date that's very important to linking to proper schedules, etc...
but in prior versions, we are missing service_date, so this rountine is going to calculate service date
this way: if the hour is earier than 3am, then use 'yesterday' as the service date. This is a hack that
works for agencies like TriMet, which do not have Owl service.
NOTE: there are often instances in parsing OTP 1.0 (non Legs) that also don't have a service_date attribute,
so this routine will also be called. (Service date is mostly used for linking a transit leg
to a stop schedule, so...)
"""
d = start
if start.hour < 3:
""" yesterday calculation for times less than 3am """
d = start - timedelta(days=1)
ret_val = "{}/{}/{}".format(d.month, d.day, d.year) # 2/29/2012
return ret_val
class DateInfoExtended(DateInfo):
"""
"""
def __init__(self, jsn):
super(DateInfoExtended, self).__init__(jsn)
self.extended = True
# step 1: get data
walk = get_element(jsn, 'walkTime', 0)
tran = get_element(jsn, 'transitTime', 0)
wait = get_element(jsn, 'waitingTime', 0)
tot = walk + tran + wait
# step 2: trip length
h,m = seconds_to_hours_minutes(tot)
self.total_time_hours = h
self.total_time_mins = m
self.duration_min = int(round(tot / 60))
# step 3: transit info
h,m = seconds_to_hours_minutes(tran)
self.transit_time_hours = h
self.transit_time_mins = m
self.start_transit = "TODO"
self.end_transit = "TODO"
# step 4: bike / walk length
self.bike_time_hours = None
self.bike_time_mins = None
self.walk_time_hours = None
self.walk_time_mins = None
if 'mode' in jsn and jsn['mode'] == 'BICYCLE':
h,m = seconds_to_hours_minutes(walk)
self.bike_time_hours = h
self.bike_time_mins = m
else:
h,m = seconds_to_hours_minutes(walk)
self.walk_time_hours = h
self.walk_time_mins = m
# step 5: wait time
h,m = seconds_to_hours_minutes(wait)
self.wait_time_hours = h
self.wait_time_mins = m
# step 5: drive time...unused as of now...
self.drive_time_hours = None
self.drive_time_mins = None
self.text = self.get_text()
def get_text(self):
"""
"""
ret_val = ''
tot = hour_min_string(self.total_time_hours, self.total_time_mins)
walk = hour_min_string(self.walk_time_hours, self.walk_time_mins)
bike = hour_min_string(self.bike_time_hours, self.bike_time_mins)
wait = hour_min_string(self.wait_time_hours, self.wait_time_mins)
return ret_val
class Elevation(object):
def __init__(self, steps):
self.points = None
self.points_array = None
self.distance = None
self.start_ft = None
self.end_ft = None
self.high_ft = None
self.low_ft = None
self.rise_ft = None
self.fall_ft = None
self.grade = None
self.distance = self.make_distance(steps)
self.points_array, self.points = self.make_points(steps)
self.grade = self.find_max_grade(steps)
self.set_marks()
@classmethod
def make_distance(cls, steps):
""" loop through distance
"""
ret_val = None
try:
dist = 0
for s in steps:
dist += s['distance']
ret_val = dist
except Exception as ex:
log.warning(ex)
return ret_val
@classmethod
def make_point_string(cls, points, max_len=50):
"""
"""
points_array = points
if len(points) > (max_len * 1.15):
# reduce the point array down to something around the size of max_len (or smaller)
points_array = []
# slice the array up into chunks
# @see http://stackoverflow.com/questions/1335392/iteration-over-list-slices (thank you Nadia)
slice_size = int(round(len(points) / max_len))
if slice_size == 1:
slice_size = 2
list_of_slices = zip(*(iter(points),) * slice_size)
# average up the slices
for s in list_of_slices:
avg = sum(s) / len(s)
points_array.append(avg)
points_string = ','.join(["{0:.2f}".format(p) for p in points_array])
return points_string
@classmethod
def make_points(cls, steps):
""" parse leg for list of elevation points and distances
"""
points_array = None
points_string = None
try:
points = []
for s in steps:
for e in s['elevation']:
elev = e
|
['second']
dist = e['first']
points.append(round(elev, 2))
if len(points) > 0:
points_array = points
points_string = cls.make_point_string(points)
except Exception as e:
log.warning(e)
return points_array, points_string
@classmethod
def find_max_grade(cls, steps):
""" parse leg
|
for list of elevation poi
|
batxes/4Cin
|
SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/SHH_WT_models39351.py
|
Python
|
gpl-3.0
| 17,562
| 0.025111
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((1187.5, 11664.8, 3272.4), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((1770.89, 9961.76, 3299.8), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((3687.55, 9634.71, 3057.58), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((2648.01, 11291.3, 1826.19), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((4044.92, 11971.5, 1372.47), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((6062.7, 11004.3, 2417.93), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((7263.91, 10793.8, 3615.86), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((6953.99, 11521.2, 3125.77), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((7907.5, 9894.78, 5077.31), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((8303.41, 10606.5, 6543.54), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((8648.15, 8917.55, 7280.2), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((7188.16, 8586, 7361.06), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((5928.71, 7709.66, 7700.76), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((5062.69, 8985.9, 7445.77), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((3197.44, 8804, 8744.82), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((744.469, 6958.43, 8463.76), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((1208.16, 5977.81, 6850.56), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((2088.62, 5295.02, 7713.93), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((3550.58, 6112.9, 7907.4), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((4377.06, 6492.76, 9085.32), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((6007.56, 6982.35, 7340.01), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((4495.74, 6025.06, 8338.37), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((4671.59, 5400.4, 7990.14), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((4094.31, 4289.1, 7702.68), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((2855.56, 4857.38, 7343.13), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((1278.3, 4876.89, 7731.96), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((2784.44, 5323.8, 7802.94), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((4288.76, 6780.35, 7064.9), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((5238.01, 5659.52, 7410.52), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((6351.12, 6173.15, 7332.57), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_se
|
t('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=
|
s.place_marker((6084.66, 5685.12, 6880.48), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((6675.68, 7281.04, 7241.44), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= mark
|
coyote240/zeroecks.com
|
zeroecks/handlers/base_handler.py
|
Python
|
mit
| 1,495
| 0
|
import redis
import psycopg2
from tornado.options import options
import tornadobase.handlers
class BaseHandler(tornadobase.handlers.BaseHandler):
def initialize(self):
self.dbref = psycopg2.connect(dbname=options.dbname,
|
user=options.dbuser,
password=options.dbpass)
self.dbref.autocommit = True
self.redis = redis.StrictRedis(host='localhost',
port=6379,
db=0,
decode_responses=True)
@property
def session(self):
session_id = self.get_secure_cookie('session
|
')
if session_id is not None:
return self.redis.hgetall(session_id)
return None
def get_current_user(self):
session_id = self.get_secure_cookie('__id')
if session_id is not None:
self.redis.expire(session_id, options.session_timeout)
return self.redis.hget(session_id, 'userid')
return None
def write_error(self, status_code, **kwargs):
(http_error, error, stacktrace) = kwargs['exc_info']
if not hasattr(error, 'reason'):
reason = 'Something went wrong.'
else:
reason = error.reason
self.render('errors/general.html',
status_code=status_code,
reason=reason)
def on_finish(self):
self.dbref.close()
|
ZakDoesGaming/OregonTrail
|
lib/afflictionBox.py
|
Python
|
mit
| 523
| 0.032505
|
from pygame import Rect
class AfflictionBox():
def __init__(self, affliction, font, rectPosition = (0, 0)):
self.affliction = affliction
self.rectPosition = rectPosition
self.name = self.afflictio
|
n.name
self.font = font
self.textSize = self.font.size(self.name)
self.textRect = Rect(self.rectPosition, self.textSize)
def update(self, rectP
|
osition):
self.rectPosition = rectPosition
self.textRect.centerx = rectPosition[0] + self.textSize[0]
self.textRect.centery = rectPosition[1] + self.textSize[1]
|
tao12345666333/Talk-Is-Cheap
|
ansible/plugins/callback/test.py
|
Python
|
mit
| 3,110
| 0.000646
|
#!/usr/bin/env python
# coding=utf-8
import requests
import time
import json
"""
ansible 运行结果回调
"""
class CallbackModule(object):
def v2_runner_item_on_ok(self, *args, **kwargs):
# time.sleep(10)
# print args
for i in dir(args[0]):
if not i.startswith('__'):
print i
print '======'
# print args[0]._result
print json.dumps(args[0]._result, indent=4)
print args[0]._task
print 'runner item on ok'
def v2_runner_item_on_failed(self, *args, **kwargs):
# print args
print dir(args[0])
print 'runner item on failed'
# print args[0]._result
print json.dumps(args[0]._result, indent=4)
print args[0]._task
print '======'
def v2_runner_item_on_skipped(self, *args, **kwargs):
# print args
print dir(args[0])
print 'runner item on skipped'
def v2_runner_retry(self, *args, **kwargs):
# print args
print dir(args[0])
print 'runner on retry'
def v2_runner_on_ok(self, *args, **kwargs):
print 'runner on ok'
# # print args
# print dir(args[0])
for i in dir(args[0]):
if not i.startswith('__'):
print i
print json.dumps(args[0]._result, indent=4)
print args[0]._task
requests.post('http://127.0.0.1:9999/api/callback/test', args[0]._result)
# print type(args[0]._task), 'task type'
# print args[0]._host
# print kwargs
def v2_runner_on_unreachable(self, *args, **kwargs):
print 'runner on unreacheable'
# # print args
print dir(args[0])
# print args[0]._result
# print args[0]._task
# print args[0]._host
# print kwargs
def v2_runner_on_failed(self, *args, **kwargs):
# # print args
print dir(args[0])
# print args[0]._result
# print args[0]._task
# print args[0]._host
# print kwargs
print 'runner on failed'
print json.dumps(args[0]._result, indent=4)
print args[0]._task
requests.post('http://127.0.0.1:9999/api/callback/test', args[0]._result)
requests.post('http://127.0.0.1:9999/api/callback/test', args[0]._task)
print args[0].is_failed(), '-*/***********'
print '------'
def v2_runner_on_skipped(self, *args, **kwargs):
print 'runner on skipped'
def v2_playbook_on_stats(self, *args, **
|
kwargs):
# print args
# print dir(args[0])
for i in dir(args[0]):
if not i.startswith('__'):
print i
# print args[0].changed, 'changed'
# print args[0].ok, 'ok'
# print args[0].dark, 'dark'
print args[0].failures, 'fail
|
ures'
# print args[0].increment, 'increment'
# print args[0].processed, 'processed'
# print args[0].skipped, 'skipped'
# print args[0].summarize, 'summarize'
# print kwargs
print 'on stats'
if __name__ == '__main__':
print 'callback'
|
antoinecarme/pyaf
|
tests/artificial/transf_Quantization/trend_PolyTrend/cycle_30/ar_/test_artificial_1024_Quantization_PolyTrend_30__20.py
|
Python
|
bsd-3-clause
| 269
| 0.085502
|
import pyaf.Bench.TS_da
|
tasets as tsds
import tests.artificial.proce
|
ss_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 30, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 0);
|
ikasumi/chainer
|
tests/cupy_tests/test_ndarray_elementwise_op.py
|
Python
|
mit
| 8,059
| 0
|
import operator
import unittest
import numpy
import six
from cupy import testing
@testing.gpu
class TestArrayElementwiseOp(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_scalar_op(self, op, xp, dtype, swap=False):
a = testing.shaped_arange((2, 3), xp, dtype)
if swap:
return op(dtype(2), a)
else:
return op(a, dtype(2))
def test_add_scalar(self):
self.check_array_scalar_op(operator.add)
def test_radd_scalar(self):
|
self.check_array_scalar_op(operator.add, swap=True)
def test_iadd_scalar(self):
self.check_array_scalar_op(operator.iadd)
def test_sub_scalar(self):
self.check_array_scalar_op(operator.sub)
def test_rsub_scalar(self):
self.c
|
heck_array_scalar_op(operator.sub, swap=True)
def test_isub_scalar(self):
self.check_array_scalar_op(operator.isub)
def test_mul_scalar(self):
self.check_array_scalar_op(operator.mul)
def test_rmul_scalar(self):
self.check_array_scalar_op(operator.mul, swap=True)
def test_imul_scalar(self):
self.check_array_scalar_op(operator.imul)
def test_truediv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.truediv)
def test_rtruediv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.truediv, swap=True)
def test_itruediv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.itruediv)
def test_div_scalar(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.div)
def test_rdiv_scalar(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.div, swap=True)
def test_idiv_scalar(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.idiv)
def test_floordiv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.floordiv)
def test_rfloordiv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.floordiv, swap=True)
def test_ifloordiv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.ifloordiv)
def test_pow_scalar(self):
self.check_array_scalar_op(operator.pow)
def test_rpow_scalar(self):
self.check_array_scalar_op(operator.pow, swap=True)
def test_ipow_scalar(self):
self.check_array_scalar_op(operator.ipow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_array_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
b = testing.shaped_reverse_arange((2, 3), xp, dtype)
return op(a, b)
def test_add_array(self):
self.check_array_scalar_op(operator.add)
def test_iadd_array(self):
self.check_array_scalar_op(operator.iadd)
def test_sub_array(self):
self.check_array_scalar_op(operator.sub)
def test_isub_array(self):
self.check_array_scalar_op(operator.isub)
def test_mul_array(self):
self.check_array_scalar_op(operator.mul)
def test_imul_array(self):
self.check_array_scalar_op(operator.imul)
def test_truediv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.truediv)
def test_itruediv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.itruediv)
def test_div_array(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.div)
def test_idiv_array(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.idiv)
def test_floordiv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.floordiv)
def test_ifloordiv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.ifloordiv)
def test_pow_array(self):
self.check_array_scalar_op(operator.pow)
def test_ipow_array(self):
self.check_array_scalar_op(operator.ipow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_broadcasted_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 3), dtype=dtype)
b = testing.shaped_arange((2, 1), dtype=dtype)
return op(a, b)
def test_broadcasted_add(self):
self.check_array_broadcasted_op(operator.add)
def test_broadcasted_iadd(self):
self.check_array_broadcasted_op(operator.iadd)
def test_broadcasted_sub(self):
self.check_array_broadcasted_op(operator.sub)
def test_broadcasted_isub(self):
self.check_array_broadcasted_op(operator.isub)
def test_broadcasted_mul(self):
self.check_array_broadcasted_op(operator.mul)
def test_broadcasted_imul(self):
self.check_array_broadcasted_op(operator.imul)
def test_broadcasted_truediv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.truediv)
def test_broadcasted_itruediv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.itruediv)
def test_broadcasted_div(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.div)
def test_broadcasted_idiv(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.idiv)
def test_broadcasted_floordiv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.floordiv)
def test_broadcasted_ifloordiv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.ifloordiv)
def test_broadcasted_pow(self):
self.check_array_broadcasted_op(operator.pow)
def test_broadcasted_ipow(self):
self.check_array_broadcasted_op(operator.ipow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_doubly_broadcasted_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 1, 3), xp, dtype)
b = testing.shaped_arange((3, 1), xp, dtype)
return op(a, b)
def test_doubly_broadcasted_add(self):
self.check_array_doubly_broadcasted_op(operator.add)
def test_doubly_broadcasted_sub(self):
self.check_array_doubly_broadcasted_op(operator.sub)
def test_doubly_broadcasted_mul(self):
self.check_array_doubly_broadcasted_op(operator.mul)
def test_doubly_broadcasted_truediv(self):
numpy.seterr(divide='ignore', invalid='ignore')
self.check_array_doubly_broadcasted_op(operator.truediv)
def test_doubly_broadcasted_floordiv(self):
numpy.seterr(divide='ignore')
self.check_array_doubly_broadcasted_op(operator.floordiv)
def test_doubly_broadcasted_div(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_doubly_broadcasted_op(operator.div)
def test_doubly_broadcasted_pow(self):
self.check_array_doubly_broadcasted_op(operator.pow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_reversed_op(self, op, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return op(a, a[::-1])
def test_array_reversed_add(self):
self.check_array_reversed_op(operator.add)
def test_array_reversed_sub(self):
self.check_array_reversed_op(operator.sub)
def test_array_reversed_mul(self):
self.check_array_reversed_op(operator.mul)
|
harveyr/omni-api
|
omni_api/hackpad.py
|
Python
|
mit
| 1,372
| 0
|
from omni_api.base import ClientBase, DataItem
class HackpadClient(ClientBase):
|
def __init__(self, client_id, secret):
self.auth = self.get_oauth_token(client_id, secret)
def get_url(self, url, **
|
kwargs):
kwargs['auth'] = self.auth
return super(HackpadClient, self).get_url(
url,
load_json=True,
**kwargs
)
def search(self, query):
url = 'https://hackpad.com/api/1.0/search'
params = {
'q': query,
}
result = self.get_url(url, params=params)
return [HackPad(i) for i in result]
def all_pads(self):
"""Returns list of pad ids."""
# Stupid hack until the necessary endpoint exists
return self.search('a')
class HackPad(DataItem):
@property
def id(self):
return self.data['id']
@property
def creator_id(self):
return self.data['creatorId']
@property
def domain_id(self):
return self.data['domainId']
@property
def last_edited(self):
return self.parse_date(self.data['lastEditedDate'])
@property
def last_editor_id(self):
return self.data['lastEditorId']
@property
def snippet(self):
"""Markup"""
return self.data['snippet']
@property
def title(self):
return self.data['title']
|
ivandeex/dz
|
dz/tests/views.py
|
Python
|
mit
| 3,249
| 0.003078
|
class ListViewTestsMixin(object):
def test_admin_users_can_access_all_tables(self):
for username in ('super', 'follow'):
with self.login_as(username):
for model_name in ('news', 'tip', 'crawl', 'user', 'schedule'):
self._test_table_view(username, model_name, can_access=True)
def test_admin_users_can_crawl_news_and_tips(self):
for username in ('super', 'follow'):
with self.login_as(username):
for model_name in ('news', 'tip'):
self._test_table_view(username, model_name, can_crawl=True)
def test_admin_users_can_delete_rows_in_tables(self):
for username in ('super', 'follow'):
with self.login_as(username):
for model_name in ('news', 'tip', 'crawl', 'user', 'schedule'):
self._test_table_view(username, model_name, can_use_row_actions=True)
def test_admin_users_can_export_news_and_tips(self):
for username in ('super', 'follow'):
with self.login_as(username):
for model_name in ('news', 'tip'):
self._test_table_view(username, model_name, can_export=True)
def test_admin_users_cannot_export_other_models(self):
for username in ('super', 'follow'):
with self.login_as(username):
for model_name in ('crawl', 'user', 'schedule'):
self._test_table_view(username, model_name, can_export=False)
def test_simple_users_can_export_news_and_tips(self):
for username in ('simple',):
with self.login_as(username):
for model_name in ('news', 'tip'):
self._test_table_view(username, model_name, can_export=True)
def test_simple_users_cannot_crawl_news_and_tips(self):
for username in ('simple',):
with self.login_as(username):
for model_name in ('news', 'tip'):
self._test_table_view(username, model_name, can_crawl=False)
def test_simple_users_cannot_delete_news_and_tips(self):
for username in ('simple',):
with self.login_as(username):
for model_name in ('news', 'tip'):
self._test_table_view(username, model_name, can_use_row_actions=False)
def test_simple_users_cannot_access_privileged_tables(self):
for username in ('simple',):
with self.login_as(username):
for model_name in ('crawl', 'user', 'schedule'):
self._test_table_view(username, model_name, can_access=False)
def _page_should_load_custom_js_css(self,
|
response, info, target, skin):
|
msg = 'the page must load custom js/css' + info
for bundle in ('common', skin):
if target == 'prod':
bundle += '.min'
link_css = ('<link type="text/css" href="/static/%s/dz-%s.css?hash='
% (target, bundle))
self.assertContains(response, link_css, msg_prefix=msg)
link_js = ('<script type="text/javascript" src="/static/%s/dz-%s.js?hash='
% (target, bundle))
self.assertContains(response, link_js, msg_prefix=msg)
|
hada2/bingrep
|
bingrep_dump.py
|
Python
|
bsd-3-clause
| 8,471
| 0.007909
|
# BinGrep, version 1.0.0
# Copyright 2017 Hiroki Hada
# coding:UTF-8
import sys, os, time, argparse
import re
import pprint
#import pydot
import math
import cPickle
import ged_node
from idautils import *
from idc import *
import idaapi
def idascript_exit(code=0):
idc.Exit(code)
def get_short_function_name(function):
return function.replace("?", "")[:100]
def mkdir(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def cPickle_dump(filename, data):
with open(filename, "wb") as f:
cPickle.dump(data, f)
def print_cfg(cfg):
for block in cfg:
print "[%02d]" % block.id,
print hex(block.startEA),
succs = list(block.succs())
print "(succs(%d): " % len(succs),
for i in range(len(succs)):
sys.stdout.write(hex(succs[i].startEA))
if i < len(succs) - 1:
sys.stdout.write(", ")
print ")"
def output_cfg_as_png_rec(g, block, memo):
functions1, dummy = get_marks(block, 0)
hashed_label1 = hash_label(functions1)
label1 = hex(block.startEA) + ("\n%08x" % hashed_label1)
g.add_node(pydot.Node(label1, fontcolor='#FFFFFF', color='#333399'))
for b in list(block.succs()):
functions2, dummy = get_marks(b, 0)
hashed_label2 = hash_label(functions2)
label2 = hex(b.startEA) + ("\n%08x" % hashed_label2)
if b.startEA not in memo:
memo.append(b.startEA)
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold'))
output_cfg_as_png_rec(g, b, memo)
else:
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold, dotted'))
def output_cfg_as_png(cfg, filename, overwrite_flag):
blocks_src = {}
blocks_dst = {}
block = cfg[0]
f_name = GetFunctionName(block.startEA)
if not overwrite_flag and os.path.exists(filename):
return
g = pydot.Dot(graph_type='digraph', bgcolor="#F0E0FF")
size = "21"
g.set_rankdir('TB')
g.set_size(size)
g.add_node(pydot.Node('node', shape='ellipse', margin='0.05', fontcolor='#FFFFFF', fontsize=size, color='#333399', style='filled', fontname='Consolas Bold'))
g.add_node(pydot.Node('edge', color='lightgrey'))
memo = []
output_cfg_as_png_rec(g, block, memo)
g.write_png(filename)
def get_cfg(function_start, function_end):
f_name = GetFunctionName(function_start)
cfg = idaapi.FlowChart(idaapi.get_func(function_start))
return list(cfg)
def get_cfgs():
cfgs = []
for ea in Segments():
functions = list(Functions(SegStart(ea), SegEnd(ea)))
functions.append(SegEnd(ea))
for i in range(len(functions) - 1):
function_start = functions[i]
function_end = functions[i+1]
cfg = get_cfg(function_start, function_end)
cfgs.append(cfg)
return cfgs
def hash_label(marks):
tmp = sorted(set(marks))
tmp = "".join(tmp)
tmp = tmp.upper()
def rot13(string):
return reduce(lambda h,c: ((h>>13 | h<<19)+ord(c)) & 0xFFFFFFFF, [0]+list(string))
hashed_label = rot13(tmp)
hashed_label = hashed_label & 0xFFFFFFFF
return hashed_label
def get_marks(block, gamma):
marks = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
if mnem not in ["call"]:
for buf in (opnd[1], opnd[2]):
if buf:
match = re.search("([\dA-F]+)h", buf)
if match:
magic = int(match.group(1), 16)
if 0x00001000 <= magic <= 0xffffffff:
marks.append(hex(magic))
for buf in (opnd[0], opnd[1], opnd[2]):
if buf:
match = re.search("offset (a[\S]+)", buf)
if match:
offset_a = match.group(1)
if offset_a[:4] == "asc_": continue
marks.append(offset_a)
continue
else:
gamma += 1
if opnd[0][:4] == "sub_": continue
if opnd[0][0] in ["?", "$"]: continue
if opnd[0] in ["eax", "ebx", "ecx", "edx", "esi", "edi"]: continue
if opnd[0] in ["__SEH_prolog4", "__SEH_epilog4", "__EH_prolog3_catch"]: continue
if opnd[0].find("cookie") >= 0: continue
marks.append(opnd[0])
continue
return marks, gamma
def get_mnems(block):
mnems = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
buf = " "
for o in opnd:
if not o: break
elif o in ["eax", "ebx", "ecx", "edx", "ax", "bx", "cx",
|
"dx", "al", "bl", "cl", "dl", "ah", "bh", "ch", "dh", "esi", "edi", "si", "di", "esp", "ebp"]:
buf += "reg "
elif o[:3] == "xmm": buf += "reg "
elif o.find("[") >= 0: buf += "mem "
elif o[:6] == "offset": buf += "off "
elif o[:4] == "loc_": buf += "loc "
eli
|
f o[:4] == "sub_": buf += "sub "
elif o.isdigit(): buf += "num "
elif re.match("[\da-fA-F]+h", o): buf += "num "
elif o[:6] == "dword_": buf += "dwd "
else: buf += "lbl "
mnems.append(mnem + buf)
return mnems
def cfg_to_cft_rec(block, memo, abr):
(alpha, beta, gamma) = abr
alpha += 1
marks, gamma = get_marks(block, gamma)
hashed_label = hash_label(marks)
mnems = get_mnems(block)
tree = ged_node.Node(hashed_label)
for b in list(block.succs()):
beta += 1
if b.startEA not in memo:
memo.append(b.startEA)
tmp, (alpha, beta, gamma), tmp2 = cfg_to_cft_rec(b, memo, (alpha, beta, gamma))
tree = tree.addkid(tmp)
mnems += tmp2
return tree, (alpha, beta, gamma), mnems
def cfg_to_cft(cfg):
block = cfg[0]
memo = []
memo.append(block.startEA)
return cfg_to_cft_rec(block, memo, (0, 0, 0))
def dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite):
function_num = len(cfgs)
dump_data_list = {}
for cfg in cfgs:
function_name = GetFunctionName(cfg[0].startEA)
(cft, abr, mnems) = cfg_to_cft(cfg)
dump_data_list[function_name] = {}
dump_data_list[function_name]["FUNCTION_NAME"] = function_name
dump_data_list[function_name]["CFT"] = cft
dump_data_list[function_name]["ABR"] = abr
dump_data_list[function_name]["MNEMS"] = mnems
def dump_pickle(dump_data_list, program, function, f_overwrite):
function_name_short = get_short_function_name(function)
filename_pickle = os.path.join(function_name_short + ".pickle")
if f_overwrite or not os.path.exists(filename_pickle):
cPickle_dump(filename_pickle, dump_data_list[function])
cPickle_dump(program + ".dmp", dump_data_list)
def main(function, f_image, f_all, f_overwrite):
sys.setrecursionlimit(3000)
program = idaapi.get_root_filename()
start_time = time.time()
cfgs = get_cfgs()
dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite)
result_time = time.time() - start_time
print "Dump finished."
print "result_time: " + str(result_time) + " sec."
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="")
parser.add_argument('-f', dest='function', default=None, type=str, help='')
parser.add_argument('-a', dest='f_all', default=False, action='store_true', help='')
parser.add_argument('-i', dest='f_image', default=False, action='store_true', help='Image Flag (Output as PNG)')
parser.add_argument('-o', dest='f_overwrite', default=False, act
|
tbielawa/Taboot
|
taboot/tasks/httpd.py
|
Python
|
gpl-3.0
| 805
| 0
|
# -*- coding: utf-8 -*-
# Taboot - Client utility for performing deployments with Func.
# Copyright © 2009, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Gener
|
al Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
|
.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""TODO: Decide what to do with this file"""
|
KhronosGroup/COLLADA-CTS
|
StandardDataSets/collada/library_geometries/geometry/mesh/polygons/one_geometry_one_polygons/one_geometry_one_polygons.py
|
Python
|
mit
| 5,053
| 0.008114
|
# Copyright (C) 2007 - 2009 Khronos Group
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
|
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT O
|
F OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
#
# This sample judging object does the following:
#
# JudgeBaseline: verifies that app did not crash, the required steps have been performed,
# the rendered images match, and the required element(s) has been preserved
# JudgeExemplary: returns Baseline status.
# JudgeSuperior: returns Baseline status.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = [['library_geometries', 'geometry', 'mesh', 'triangles'], ['library_geometries', 'geometry', 'mesh', 'polygons'], ['library_geometries', 'geometry', 'mesh', 'polylist']]
attrName = 'count'
attrVal = '6'
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
if (self.__assistant.GetResults() == False):
self.status_baseline = False
return False
# Compare the rendered images
self.__assistant.CompareRenderedImages(context)
# Check for preservation of element
self.__assistant.ElementTransformed(context, self.tagList)
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
if (self.status_superior == False):
self.status_exemplary = self.status_superior
return self.status_exemplary
self.status_exemplary = False
if (self.__assistant.ElementPreserved(context, self.tagList[1], False)):
context.Log("PASSED: Geometry preserved as " + self.tagList[1][len(self.tagList[1])-1] + ".")
if (self.__assistant.AttributeCheck(context, self.tagList[1], self.attrName, self.attrVal)):
self.status_exemplary = True
elif (self.__assistant.ElementPreserved(context, self.tagList[2], False)):
context.Log("PASSED: Geometry preserved as " + self.tagList[2][len(self.tagList[2])-1] + ".")
if (self.__assistant.AttributeCheck(context, self.tagList[2], self.attrName, self.attrVal)):
self.status_exemplary = True
else:
context.Log("FAILED: Geometry is not preserved as " + self.tagList[1][len(self.tagList[1])-1] + " or " + self.tagList[2][len(self.tagList[2])-1] + ".")
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
|
ismailsunni/healthsites
|
django_project/core/settings/dev_dodobas.py
|
Python
|
bsd-2-clause
| 1,782
| 0
|
# -*- coding: utf-8 -*-
from .dev import * # noqa
INSTALLED_APPS += (
'django_extensions',
)
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'healthsites_dev',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
# Set to empty string for default.
'PORT': '',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'for
|
matters': {
# define output formats
'verbose': {
'format': (
'%(levelname)s %(name)s %(asctime)s %(module)s %(process)d '
'%(thre
|
ad)d %(message)s')
},
'simple': {
'format': (
'%(name)s %(levelname)s %(filename)s L%(lineno)s: '
'%(message)s')
},
},
'handlers': {
# console output
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
'level': 'DEBUG',
},
# 'logfile': {
# 'class': 'logging.FileHandler',
# 'filename': '/tmp/app-dev.log',
# 'formatter': 'simple',
# 'level': 'DEBUG',
# }
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'INFO', # switch to DEBUG to show actual SQL
},
# example app logger
'localities': {
'level': 'DEBUG',
'handlers': ['console'],
# propagate is True by default, which proppagates logs upstream
'propagate': False
}
},
# root logger
# non handled logs will propagate to the root logger
'root': {
'handlers': ['console'],
'level': 'WARNING'
}
}
|
alfredfrancis/ai-chatbot-framework
|
app/entities/controllers.py
|
Python
|
mit
| 1,963
| 0
|
from bson.json_util import dumps, loads
from bson.objectid import ObjectId
from flask import Blueprint, request, Response
from app.commons import build_response
from app.commons.utils import update_document
from app.entities.models import Entity
entities_blueprint = Blueprint('entities_blueprint', __name__,
url_prefix='/entities')
@entities_blueprint.route('/', methods=['POST'])
def create_entity():
"""
Create a story from the provided json
:return:
"""
content = request.get_json(silent=True)
entity = Entity()
entity.name = content.get("name")
entity.entity_values = []
try:
entity_id = entity.save()
except Exception as e:
return build_response.build_json({"error": str(e)})
return build_response.build_json({
"_id": str(entity_id.id)
})
@entities_blueprint.route('/')
def read_entities():
"""
find list of
|
entities
:return:
"""
intents = Entity.objects.only('name', 'id')
return build_response.sent_json(intents.to_json())
@entities_blueprint.route('/<id>')
def read_entity(id):
"""
Find details for the given entity name
:param id:
:return:
"""
return Response(
response=dumps(Entity.objects.get(
id=ObjectId(id)).to_mongo().to_dict()),
status=200, mimetype="application/json")
@entities_blueprint.route('/<id>', methods=[
|
'PUT'])
def update_entity(id):
"""
Update a story from the provided json
:param id:
:return:
"""
json_data = loads(request.get_data())
entity = Entity.objects.get(id=ObjectId(id))
entity = update_document(entity, json_data)
entity.save()
return build_response.sent_ok()
@entities_blueprint.route('/<id>', methods=['DELETE'])
def delete_entity(id):
"""
Delete a intent
:param id:
:return:
"""
Entity.objects.get(id=ObjectId(id)).delete()
return build_response.sent_ok()
|
google-research/google-research
|
cascaded_networks/models/densenet.py
|
Python
|
apache-2.0
| 6,633
| 0.004975
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Densnet handler.
Adapted from
https://github.com/pytorch/vision/blob/master/torchvision/models/densenet.py
"""
import functools
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from cascaded_networks.models import custom_ops
from cascaded_networks.models import dense_blocks
from cascaded_netw
|
orks.models import model_utils
class DenseNet(nn.Module):
"""Densenet."""
def __init__(self,
name,
block,
block_arch,
growth_rate=12,
reduction=0.5,
num_classes=10,
**kwargs):
"""Initialize dense net."""
super(DenseNet, self).__init__()
self.name = name
self.growth_rate = growth_rate
self._cascaded = kwargs['cascaded']
self.block_arch = block_
|
arch
self._norm_layer_op = self._setup_bn_op(**kwargs)
self._build_net(block, block_arch, growth_rate,
reduction, num_classes, **kwargs)
def _build_net(self,
block,
block_arch,
growth_rate,
reduction,
num_classes,
**kwargs):
self.layers = []
num_planes = 2 * growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes,
block_arch[0], **kwargs)
num_planes += block_arch[0] * growth_rate
out_planes = int(np.floor(num_planes * reduction))
self.trans1 = dense_blocks.Transition(num_planes,
out_planes,
norm_layer=self._norm_layer_op,
**kwargs)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes,
block_arch[1], **kwargs)
num_planes += block_arch[1] * growth_rate
out_planes = int(np.floor(num_planes * reduction))
self.trans2 = dense_blocks.Transition(num_planes,
out_planes,
norm_layer=self._norm_layer_op,
**kwargs)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes,
block_arch[2], **kwargs)
num_planes += block_arch[2] * growth_rate
out_planes = int(np.floor(num_planes * reduction))
self.trans3 = dense_blocks.Transition(num_planes,
out_planes,
norm_layer=self._norm_layer_op,
**kwargs)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes,
block_arch[3], **kwargs)
num_planes += block_arch[3] * growth_rate
self.bn = self._norm_layer_op(num_planes)
self.fc = nn.Linear(num_planes, num_classes)
self.layers.append(self.trans1)
self.layers.append(self.trans2)
self.layers.append(self.trans3)
def _make_dense_layers(self, block, in_planes, n_blocks, **kwargs):
layers = []
for _ in range(n_blocks):
block_i = block(in_planes,
self.growth_rate,
norm_layer=self._norm_layer_op,
**kwargs)
self.layers.append(block_i)
layers.append(block_i)
in_planes += self.growth_rate
return nn.Sequential(*layers)
@property
def timesteps(self):
return sum(self.block_arch) + 1
def _setup_bn_op(self, **kwargs):
if self._cascaded:
self._norm_layer = custom_ops.BatchNorm2d
# Setup batchnorm opts
self.bn_opts = kwargs.get('bn_opts', {
'affine': False,
'standardize': False
})
self.bn_opts['n_timesteps'] = self.timesteps
norm_layer_op = functools.partial(self._norm_layer, self.bn_opts)
else:
self._norm_layer = nn.BatchNorm2d
norm_layer_op = self._norm_layer
return norm_layer_op
def _set_time(self, t):
for block in self.layers:
block.set_time(t)
def forward(self, x, t=0):
# Set time on all blocks
if self._cascaded:
self._set_time(t)
# Feature extraction
out = self.conv1(x)
out = self.dense1(out)
out = self.trans1(out)
out = self.dense2(out)
out = self.trans2(out)
out = self.dense3(out)
out = self.trans3(out)
out = self.dense4(out)
# Classifier
out = self.bn(out) if not self._cascaded else self.bn(out, t)
out = F.avg_pool2d(F.relu(out), 4)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def make_densenet(name, block, layers, pretrained, growth_rate, **kwargs):
model = DenseNet(name, block, layers, growth_rate=growth_rate, **kwargs)
if pretrained:
kwargs['model_name'] = name
model = model_utils.load_model(model, kwargs)
return model
def densenet121(pretrained=False, **kwargs):
return make_densenet('densenet121', dense_blocks.Bottleneck, [6, 12, 24, 16],
pretrained, growth_rate=32, **kwargs)
def densenet161(pretrained=False, **kwargs):
return make_densenet('densenet161', dense_blocks.Bottleneck, [6, 12, 36, 24],
pretrained, growth_rate=48, **kwargs)
def densenet169(pretrained=False, **kwargs):
return make_densenet('densenet169', dense_blocks.Bottleneck, [6, 12, 32, 32],
pretrained, growth_rate=32, **kwargs)
def densenet201(pretrained=False, **kwargs):
return make_densenet('densenet201', dense_blocks.Bottleneck, [6, 12, 48, 32],
pretrained, growth_rate=32, **kwargs)
def densenet_cifar(pretrained=False, **kwargs):
block_arch = [6, 12, 24, 16]
growth_rate = 16
return make_densenet('densenet121_cifar', dense_blocks.Bottleneck, block_arch,
pretrained, growth_rate=growth_rate, **kwargs)
|
dichenko/kpk2016
|
Diff/dra.py
|
Python
|
gpl-3.0
| 442
| 0.011312
|
#http://informatics.mccme.ru/mod/statements/view3.php?id=22783&chapterid=113362#1
n = int(input())
def sum_kv_cifr(
|
x):
su = 0
for i in str(x):
su += int(i)*int(i)
return su
maxi_power = 0
for i in range(1, n//2+1):
print('______',i)
for k in range(n//i, 0, -1):
power = sum_kv_cifr(i * k)
print('_', k, power)
if power > maxi_power:
|
maxi_power = power
print(maxi_power)
|
lexrupy/gmate-editor
|
GMATE/status_position.py
|
Python
|
mit
| 2,729
| 0.002566
|
# -*- coding: utf-8 -*-
# GMate - Plugin Based Programmer's Text Editor
# Copyright © 2008-2009 Alexandre da Silva
#
# This file is part of Gmate.
#
# See LICENTE.TXT for licence information
import gtk
from GMATE import i18n as i
from GMATE.status_widget import StatusWidget
class StatusPosition(StatusWidget):
"""
This box holds the current line number
"""
def initialize(self):
self.buffer = None
self.document = None
self.__changed_id = None
self.__mark_set_id = None
self.line_title_label = gtk.Label(i.status_line)
self.line_position_number = gtk.Label('0')
self.line_position_number.set_size_request(40, -1)
self.line_position_number.set_alignment(0.01, 0.5)
self.column_title_label = gtk.Label(i.status_column)
self.column_position_number = gtk.Label('0')
self.column_position_number.set_size_request(25,-1)
self.column_position_number.set_alignment(0.01, 0.5)
self.pack_start(self.line_title_label, False, False)
self.pack_start(self.line_position_number, False, False)
sep = gtk.VSeparator()
self.pack_start(sep,False, False, 5)
self.pack_start(self.column_title_label, False, False)
self.pack_start(self.column_position_number, False, False)
self.show_all()
def on_disconnect(self):
if self.buffer:
if self.__changed_id:
self.buff.disconnect(self.__changed_id)
self.__changed_id = None
if self.__mark_set_id:
self.buff.disconnect(self.__mark_set_id)
self.__mark_set_id = None
def on_set_document(self, doc):
self.on_disconnect()
self.buff = doc.View.get_buffer()
self.document = doc.View
self.__changed_id = self.buff.connect("changed", self.__changed_cb)
self.__mark_set_id = self.buff.connect("mark-set", self.__mark_set_cb)
self.__changed_cb(self.buff)
def __changed_cb(self, buff):
tabwidth = self.document.get_tab_width()
iter = buff.get_iter_at_mark(buff.get_insert())
row = iter.get_line() + 1
col_offset = iter.get_line_offset()
iter.set_line_offset(0)
col = 0
while not col_offset == iter.get_line_offset():
|
if iter.get_char() == '\t':
col += (tabwidth - (col % tabwidth))
else:
col += 1
iter.forward_char()
self.line_position_number.set_text(str(row))
self.column_position_number.set_text(str(col+1))
return False
def __mark_set_cb(self, buff,
|
cursoriter, mark):
self.__changed_cb(buff)
return False
|
AntonKhorev/BudgetSpb
|
main.py
|
Python
|
bsd-2-clause
| 1,072
| 0.041295
|
#!/usr/bin/env python3
from linker import Linker
import htmlPage
import content.index,content.db,content.fincom
# TODO put into config
spbBudgetXlsPath='../spb-budget-xls'
if __name__=='__main__':
linker=Linker('filelists',{
'csv':['csv'],
'xls':['xls'],
'db':['zip','sql','xlsx'],
})
htmlPage.HtmlPage('index.html','Данные бюджета Санкт-Петербурга',content.index.content,linker).write('output/index.html')
htmlPage.HtmlPage('xls.html','Ведомственная структура расходов бюджета Санкт-Петербурга в csv и xls',htmlPage.importContent(spbBudgetXlsPath+'/index.html'),linker).write('output/xls.html')
htmlPage.HtmlPage('db.html','БД и таблицы расходов бюджета Санкт-Петербурга из разных источников',content.db.content,linker).write('output/db.html')
htmlPage.HtmlPage('fincom.
|
html','Что можно найти на сайте Комитета финанс
|
ов',content.fincom.content,linker).write('output/fincom.html')
|
edoburu/django-fluent-blogs
|
setup.py
|
Python
|
apache-2.0
| 3,060
| 0.002614
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
import codecs
import os
import re
import sys
# When creating the sdist, make sure the django.mo file also exists:
if 'sdist' in sys.argv or 'develop' in sys.argv:
os.chdir('fluent_blogs')
try:
from django.core import management
management.call_command('compilemessages', stdout=sys.stderr, verbosity=1)
except ImportError:
if 'sdist' in sys.argv:
raise
finally:
os.chdir('..')
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name='django-fluent-blogs',
version=find_version('fluent_blogs', '__init__.py'),
license='Apache 2.0',
install_requires=[
'django-fluent-contents>=2.0.2',
'django-fluent-utils>=2.0',
'django-categories-i18n>=1.1',
'django-parler>=1.9.1',
'django-slug-preview>=1.0.4',
'django-tag-parser>=3.1',
],
requires=[
'Django (>=1.10)',
],
extras_require = {
'tests': [
'django-fluent-pages>=2.0.1',
'django-wysiwyg>=0.7.1',
],
'blogpage': ['django-fluent-pages>=2.0.1'],
'taggit': ['taggit', 'taggit-autosuggest'],
},
description='A blog engine with flexible block contents (based on django-fluent-contents).',
long_description=read('README.rst'),
author='Diederik van der Boor',
author_email='opensource@edoburu.nl',
url='https://github.com/edoburu/django-fluent-blogs',
download_url='https://github.com/edoburu/django-fluent-blogs/zipball/master',
packages=find_packages(),
include_package_data=True,
test_suite = 'runtests',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production
|
/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :
|
: 3.5',
'Programming Language :: Python :: 3.6',
'Framework :: Django',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
maxwward/SCOPEBak
|
setup.py
|
Python
|
gpl-3.0
| 5,541
| 0.009024
|
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
import sys
#NOTE: if you want to develop askbot
#you might want to install django-debug-toolbar as well
import askbot
setup(
name = "askbot",
version = askbot.get_version(),#remember to manually set this correctly
description = 'Exercise and Problem forum, like StackOverflow, written in python and Django',
packages = find_packages(),
author = 'Evgeny.Fadeev',
author_email = 'evgeny.fadeev@gmail.com',
license = 'GPLv3',
keywords = 'forum, community, wiki, Q&A',
entry_points = {
'console_scripts' : [
'askbot-setup = askbot.deployment:askbot_setup',
]
},
url = 'http://askbot.org',
include_package_data = True,
install_requires = askbot.REQUIREMENTS.values(),
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Natural Language :: Finnish',
'Natural Language :: German',
'Natural Language :: Russian',
'Natural Language :: Serbian',
'Natural Language :: Turkish',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: JavaScript',
'Topic :: Communications :: Usenet News',
'Topic :: Communications :: Email :: Mailing List Servers',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
],
long_description = """Askbot will work alone or with other django apps (with some limitations, please see below), Django 1.1.1 - 1.2.3(*), MySQL(**) and PostgresQL(recommended) (>=8.3).
Exercises? Suggestions? Found a bug? -> please post at http://askbot.org/
Features
========
* standard Q&A functionalities including votes, reputation system, etc.
* user levels: admin, moderator, regular, suspended, blocked
* per-user inbox for responses & flagged items (for moderators)
* email alerts - instant and delayed, optionally tag filtered
* search by full text and a set of tags simultaneously
* can import data from stackexchange database file
Installation
============
The general steps are:
* install the code
* if there is no database yet - create one
* create a new or configure existing django site for askbot
* create/update the database tables
Methods to install code
-----------------------
* **pip install askbot**
* **easy_install askbot**
* **download .tar.gz** file from the bottom of this page, then run **python setup.py install**
* clone code from the github **git clone git://github.com/ASKBOT/askbot-devel.git**, and then **python setup.py develop**
Create/configure django site
----------------------------
Either run command **askbot-setup** or merge contents of directory **askbot/setup_templates** in the source code into your project directory.
Create/update database tables
-----------------------------
Back up your database if it is not blank, then two commands:
* python manage.py syncdb
* python manage.py migrate
There are two apps to migrate - askbot and django_authopenid (a forked version of the original, included within askbot), so you can as well migrate them separately
Limitations
===========
There are some limitations that will be removed in the future. If any of these cause issues - please do not hesitate to contact admin@askbot.org.
Askbot patches `auth_user` table. The migration
|
script will automatically add missing columns, however it will not overwrite any existing columns. Please do back up your database before adding askbot to an existing site.
Included into askbot there are two forked apps: `django_authopenid` and `livesettings`. If you have these apps on your site, you may have trouble installin
|
g askbot.
User registration and login system is bundled with Askbot. It is quite good though, it allows logging in with password and many authentication service providers, including popular social services and recover account by email.
If there are any other collisions, askbot will simply fail to install, it will not damage your data.
Background Information
======================
Askbot is based on CNPROG project by Mike Chen and Sailing Cai, project which was originally inspired by StackOverflow and Yahoo Problems.
Footnotes
=========
(*) - If you want to install with django 1.2.x a dependency "Coffin-0.3" needs to be replaced with "Coffin-0.3.3" - this will be automated in the future versions of the setup script.
(**) - With MySQL you have to use MyISAM data backend, because it's the only one that supports Full Text Search."""
)
print """**************************************************************
* *
* Thanks for installing Askbot. *
* *
* To start deploying type: askbot-setup *
* Please take a look at the manual askbot/doc/INSTALL *
* And please do not hesitate to ask your questions at *
* at http://askbot.org *
* *
**************************************************************"""
|
guolivar/totus-niwa
|
service/thirdparty/featureserver/FeatureServer/WebFeatureService/FilterEncoding/LogicalOperators/LogicalOperator.py
|
Python
|
gpl-3.0
| 1,243
| 0.008045
|
'''
Created on Apr 20, 2011
@author: michel
'''
import os
from lxml import etree
from FeatureServer.WebFeatureService.FilterEncoding.Operator import Operator
class LogicalOperator(Operator):
def __init__(self, node):
super(LogicalOperator, self).__init__(node)
self.type = 'LogicalOperator'
def createStat
|
ement(self, datasource, operatorList):
logical = self.addOperators(operatorList)
xslt = etree.parse(os.path.dirname(os.path.abspa
|
th(__file__))+"/../../../../resources/filterencoding/logical_operators.xsl")
transform = etree.XSLT(xslt)
result = transform(logical, datasource="'"+datasource.type+"'", operationType="'"+str(self.node.xpath('local-name()'))+"'")
elements = result.xpath("//Statement")
if len(elements) > 0:
self.setStatement(str(elements[0].text).strip())
return
self.setStatement(None)
def addOperators(self, operatorList):
logical = etree.Element(self.node.tag)
for operator in operatorList:
element = etree.Element("Operator")
element.text = operator.stmt
logical.append(element)
return logical
|
centrofermi/e3analysis
|
upgoingmu/beta_sanity_check.py
|
Python
|
lgpl-3.0
| 3,115
| 0.000642
|
import os
import ROOT
from __common__ import *
OUTPUT_FILE_PATH = 'beta_sanity_check.root'
print 'Opening output file %s...' % OUTPUT_FILE_PATH
outputFile = ROOT.TFile(OUTPUT_FILE_PATH, 'RECREATE')
for station in STATIONS:
filePath = os.path.join(DATA_FOLDER, '%s_full_upgoingmu.root' % station)
f = ROOT.TFile(filePath)
t = f.Get('Events')
outputFile.cd()
# Beta (and beta^-1) distributions.
hname = 'hbeta_%s' % station
hbeta = ROOT.TH1F(hname, station, 200, 0, 3)
hbeta.SetXTitle('#beta')
hbeta.SetYTitle('Entries/bin')
t.Project(hname, BETA_EXPR, CUT_EXPR)
print 'Writing %s...' % hname
hbeta.Write()
print 'Done.'
hname = 'hbetainv_%s' % station
hbetainv = ROOT.TH1F(hname, station, 200, 0, 3)
hbetainv.SetXTitle('1/#beta')
hbetainv.SetYTitle('Entries/bin')
t.Project(hname, '1./(%s)' % BETA_EXPR, CUT_EXPR)
print 'Writing %s...' % hname
hbetainv.Write()
print 'Done.'
# Beta (and b
|
eta^-1) distributions for straight tracks.
hname = 'hbetastraight_%s' % station
hbeta = ROOT.TH1F(hname, station, 200, 0, 3)
hbeta.SetXTitle('#beta')
hbeta.SetYTitle('Entries/bin')
t.Project(hname, BETA_EXPR, CUT_EXPR)
print 'Writing %s...' % hname
hbeta.Write()
print 'Do
|
ne.'
hname = 'hbetastraightinv_%s' % station
hbetainv = ROOT.TH1F(hname, station, 200, 0, 3)
hbetainv.SetXTitle('1/#beta')
hbetainv.SetYTitle('Entries/bin')
t.Project(hname, '1./(%s)' % BETA_EXPR, STRAIGHT_CUT_EXPR)
print 'Writing %s...' % hname
hbetainv.Write()
print 'Done.'
# Beta (and beta^-1) distributions as a function of theta.
hname = 'hbetazdir_%s' % station
hbetazdir = ROOT.TH2F(hname, station, 100, 0, 1, 100, 0, 3)
hbetazdir.SetXTitle('cos(#theta)')
hbetazdir.SetYTitle('#beta')
t.Project(hname, '(%s):ZDir' % BETA_EXPR, CUT_EXPR)
print 'Writing %s...' % hname
hbetazdir.Write()
print 'Done.'
hname = 'hbetazdirinv_%s' % station
hbetazdirinv = ROOT.TH2F(hname, station, 100, 0, 1, 100, 0, 3)
hbetazdirinv.SetXTitle('cos(#theta)')
hbetazdirinv.SetYTitle('1/#beta')
t.Project(hname, '1./(%s):ZDir' % BETA_EXPR, CUT_EXPR)
print 'Writing %s...' % hname
hbetazdirinv.Write()
print 'Done.'
# Beta (and beta^-1) distributions for upward-going particles from muon
# decay.
hname = 'hbetadaughters_%s' % station
hbetadaughters = ROOT.TH1F(hname, station, 200, -3, 0)
hbetadaughters.SetXTitle('#beta')
hbetadaughters.SetYTitle('Entries/bin')
t.Project(hname, BETA_EXPR, DAUGHTER_CUT_EXPR)
print 'Writing %s...' % hname
hbetadaughters.Write()
print 'Done.'
hname = 'hbetadaughtersinv_%s' % station
hbetadaughtersinv = ROOT.TH1F(hname, station, 200, -3, 0)
hbetadaughtersinv.SetXTitle('1/#beta')
hbetadaughtersinv.SetYTitle('Entries/bin')
t.Project(hname, '1./(%s)' % BETA_EXPR, DAUGHTER_CUT_EXPR)
print 'Writing %s...' % hname
hbetadaughtersinv.Write()
print 'Done.'
f.Close()
print 'Closing output file...'
outputFile.Close()
print 'Done, bye.'
|
phase-dev/phase
|
libphase/tabs/vulnerabilities.py
|
Python
|
gpl-3.0
| 6,578
| 0.03907
|
"""
Copyright 2014
This file is part of Phase.
Phase is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Phase is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Phase. If not, see <http://www.gnu.org/licenses/>.
"""
from libphase.tabs import tab
import plugins.vulnerabilities
from gi.repository import Gtk
from gi.repository import GObject
from gi.repository import GdkPixbuf
from libphase import gtk
import pkgutil
import importlib
import traceback
import sys
import xml.etree.cElementTree as ET
import os
import threading
import Queue
import time
class Vulnerabilities(tab.Tab):
plugins=[]
def __init__(self,shared_objects):
tab.Tab.__init__(self,shared_objects)
icon_dir=os.path.abspath(os.path.dirname(sys.argv[0]))+os.sep+"resources"+os.sep+"icons"+os.sep
self.info_icon=GdkPixbuf.Pixbuf.new_from_file_at_size(icon_dir+"info.png", 15, 15)
self.low_icon=GdkPixbuf.Pixbuf.new_from_file_at_size(icon_dir+"low.png", 15, 15)
self.medium_icon=GdkPixbuf.Pixbuf.new_from_file_at_size(icon_dir+"medium.png", 15, 15)
self.high_icon=GdkPixbuf.Pixbuf.new_from_file_at_size(icon_dir+"high.png", 15, 15)
self.critical_icon=GdkPixbuf.Pixbuf.new_from_file_at_size(icon_dir+"critical.png", 15, 15)
self.view=self.builder.get_object("treeviewVulnerabilities")
treeview_vulnerabilities_cell_1 = Gtk.CellRendererPixbuf()
treeview_vulnerabilities_column_1 = Gtk.TreeViewColumn("Risk", treeview_vulnerabilities_cell_1)
treeview_vulnerabilities_column_1.set_cell_data_func(treeview_vulnerabilities_cell_1,self.data_function)
self.view.append_column(treeview_vulnerabilities_column_1)
treeview_vulnerabilities_cell_2 = Gtk.CellRendererText()
treeview_vulnerabilities_column_2 = Gtk.TreeViewColumn("Title", treeview_vulnerabilities_cell_2, text=0)
self.view.append_column(treeview_vulnerabilities_column_2)
self.store=gtk.TreeStore(str,int,str,str,str)
self.view.set_model(self.store)
self.view.connect("cursor-changed",self.handler_treeview_vulnerabilities_cursor_changed)
for importer, modname, ispkg in pkgutil.iter_modules(plugins.vulnerabilities.__path__):
if modname != "base":
try:
module = importlib.import_module("plugins.vulnerabilities."+modname)
plugin=module.Plugin()
self.plugins.append(plugin)
except:
print "Failed to import ",modname
print traceback.format_exc()
self.processing_queue=Queue.Queue()
self.finish_processing=False
self.processing_thread=threading.Thread(target=self.process_thread)
self.processing_thread.start()
def data_function(self,column,cell,model,iter,user_data):
cell_contents=model.get_value(iter,1)
if cell_contents==5:
cell.set_property('pixbuf',self.critical_icon)
if cell_contents==4:
cell.set_property('pixbuf',self.high_icon)
if cell_contents==3:
cell.set_property('pixbuf',self.medium_icon)
if cell_contents==2:
cell.set_property('pixbuf',self.low_icon)
if cell_contents==1:
cell.set_property('pixbuf',self.info_icon)
def stop(self):
self.finish_processing=True
def process(self,flow):
if self.builder.get_object("checkbuttonVulnerabilitesDetect").get_active():
self.processing_queue.put(flow)
def process_thread(self):
while not self.finish_processing:
try:
flow=self.processing_queue.get_nowait()
for plugin in self.plugins:
vulnerabilities=plugin.process(flow.copy())
self.add(vulnerabilities)
except Queue.Empty:
time.sleep(0.1)
def load(self,xml):
for parent_node in xml.phase.vulnerabilities.children:
parent=self.store.append(None,[parent_node["title"],int(parent_node["risk"]),parent_node["description"],parent_node["recommendation"],""])
for child_node in parent_node.children:
self.store.append(parent,[child_node["url"],int(parent_node["risk"]),parent_node["description"],parent_node["recommendation"],child_node["value"]])
def save(self,root):
vulnerabilities_node = ET.SubElement(root, "vulnerabilities")
for parent in self.store.get_children(None):
vuln_node=ET.SubElement(vulnerabilities_node, "vulnerability")
vuln_node.set("title",self.store.get_value(parent,0))
vuln_node.set("risk",str(self.store.get_value(parent,1)))
vuln_node.set("description",self.store.get_value(parent,2))
vuln_node.set("recommendation",self.store.get_value(parent,3))
for child in self.store.get_children(parent):
child_node=ET.SubElement(vuln_node, "affected_url")
child_node.set("url",self.store.get_value(child,0))
child_node.set("value",self.store.get_value(child,4))
def clear(self):
self.store.clear()
def handler_treeview_vulnerabilities_cursor_changed(self,treeview):
model,iter=self.view.get_selection().get_selected()
path=model.get_path(iter)
if len(path) == 1:
self.builder.get_object("textviewVulnerabilitiesDescription").get_buffer().set_text(treeview.get_model().get_value(iter,2))
self.builder.get_object("textviewVulnerabilitiesRecommendation").get_buffer().set_text(treeview.get_model().get_value(iter,3))
self.builder.get_object("textviewVulnerabilitiesValue").get_buffer().set_text("")
else:
self.builder.get_object("textviewVulnerabilitiesDescription").get_buffer().set_text(treeview.get_model().get_value(iter,2))
self.builder.get_object("textviewVulnerabilitiesRecommendation").get_buffer().set_text(treeview.get_model().get_value(iter,3))
self.builder.get
|
_object("textviewVulnerabilitiesValue").get_buffer().set_text(treeview.get_model().get_value(iter,4))
def add(self,vulnerabilities):
for vulnerability in vulnerabilities:
parent=self.store.contains(None,[(vulnerability.title,0)])
if parent == None:
parent=self.store.append(None,[vulnerability.title,vulnerability.risk,vulnerability.description,vulnerability.recomme
|
ndation,""])
self.store.append(parent,[vulnerability.url,vulnerability.risk,vulnerability.description,vulnerability.recommendation,vulnerability.value])
else:
if self.store.contains(parent,[(vulnerability.url,0)]) == None:
self.store.append(parent,[vulnerability.url,vulnerability.risk,vulnerability.description,vulnerability.recommendation,vulnerability.value])
|
spseol/DOD-2014
|
NXTornadoWServer/NXTornadoWServer/ClientsHandler.py
|
Python
|
lgpl-2.1
| 775
| 0.003871
|
import logging
from tornado.web import RequestHandler
from NXTornadoWServer.ControlSocketHandler import ControlSocketHandler
class ClientsHandler(Reques
|
tHandler):
def get(self):
try:
controller_i = ControlSocketHandler.clients.index(ControlSocketHandler.client_controller)
except ValueError:
controller_i = 0
return self.render('../static/clients.html', clients=ControlSocketHandler.clients, controller_i=controller_i)
def post(self):
args = {k: ''.
|
join(v) for k, v in self.request.arguments.iteritems()}
try:
ControlSocketHandler.client_controller = ControlSocketHandler.clients[int(args['i'])]
except IndexError:
pass
ControlSocketHandler.refresh_clients()
|
SalemAmeen/neon
|
tests/test_linear_layer.py
|
Python
|
apache-2.0
| 6,132
| 0
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
'''
Test of the mlp/linear layer
'''
import itertools as itt
import numpy as np
from neon import NervanaObject
from neon.initializers.initializer import Uniform
from neon.layers.layer import Linear
def pytest_generate_tests(metafunc):
if metafunc.config.option.all:
bsz_rng = [16, 32, 64]
else:
bsz_rng = [128]
if 'basic_linargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
nin_rng = [1, 2, 1023, 1024, 1025]
nout_rng = [1, 4, 1023, 1024, 1025]
else:
nin_rng = [4, 32]
nout_rng = [3, 33]
fargs = itt.product(nin_rng, nout_rng, bsz_rng)
metafunc.parametrize('basic_linargs', fargs)
if 'allrand_args' in metafunc.fixturenames:
fargs = []
eps = np.finfo(np.float32).eps
# weight ranges
w_rng = [[0.0, 1.0], [-1.0, 0.0], [-1.0, 1.0]]
if metafunc.config.option.all:
rng_max = [eps, eps*10, 1.0, 2048.0, 1.0e6, 1.0e10]
else:
rng_max = [eps, 1.0, 1.0e10]
fargs = itt.product(w_rng, rng_max)
metafunc.parametrize('allrand_args', fargs)
def test_linear_zeros(backend, basic_linargs):
# basic sanity check with 0 weights random inputs
nin, nout, batch_size = basic_linargs
NervanaObject.be.bsz = NervanaObject.be.bs = batch_size
dtypeu = np.float32
init_unif = Uniform(low=0.0, high=0.0)
layer = Linear(nout=nout, init=init_unif)
inp = layer.be.array(dtypeu(np.random.random((nin, batch_size))))
out = layer.fprop(inp).get()
assert np.min(out) == 0.0 and np.max(out) == 0.0
err = dtypeu(np.zeros((nout, batch_size)))
deltas = layer.bprop(layer.be.array(err)).asnumpyarray()
assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0
dw = layer.dW.asnumpyarray()
assert np.min(dw) == 0.0 and np.max(dw) == 0.0
return
def test_linear_ones(backend, basic_linargs):
# basic sanity check with all ones on the inputs
# and weights, check that each row in output
# is the sum of the weights for that output
# this check will confirm that the correct number
# of operations is being run
nin, nout, batch_size = basic_linargs
NervanaObject.be.bsz = NervanaObject.be.bs = batch_size
dtypeu = np.float32
init_unif = Uniform(low=1.0, high=1.0)
layer = Linear(nout=nout, init=init_unif)
inp = layer.be.array(dtypeu(np.ones((nin, batch_size))))
out = layer.fprop(inp).asnumpyarray()
w = layer.W.asnumpyarray()
sums = np.sum(w, 1).reshape((nout, 1))*np.ones((1, batch_size))
# for larger layers need to estimate numerical precision
# atol = est_mm_prec(w, inp.asnumpyarray())
assert (np.allclose(sums, out, atol=0.0, rtol=0.0), '%e'
% np.max(np.abs(out-sums)))
return
def test_all_rand(backend, allrand_args):
# test with random weights and random inputs
dtypeu = np.float32
w_rng, rngmax = allrand_args
inp_rng = [0.0, rngmax]
nin = 1024
nout = 2048
batch_size = 16
NervanaObject.be.bsz = NervanaObject.be.bs = batch_size
init_unif = Uniform(low=w_rng[0], high=w_rng[1])
layer = Linear(nout=nout, init=init_unif)
inp = np.random.random((nin, batch_size))
inp *= inp_rng[1] - inp_rng[0]
inp += inp_rng[0]
inp = inp.astype(dtypeu)
out = layer.fprop(layer.be.array(inp)).asnumpyarray()
w = layer.W.asnumpyarray()
# the expected output using numpy
out_exp = np.dot(w, inp)
# for larger layers need to estimate numerical precision
atol = 2 * est_mm_prec(w, inp, ntrials=1)
assert (np.allclose(out_exp, out, atol=atol, rtol=0.0),
'%e %e' % (np.max(np.abs(out - out_exp)), atol))
err = np.random.random((nout, batch_size))
err = err * (inp_rng[1] - inp_rng[0]) + inp_rng[0]
err = err.astype(dtypeu)
deltas = layer.bprop(layer.be.array(err)).asnumpyarray()
dw = layer.dW.asnumpyarray()
deltas_exp = np.dot(w.T, err)
atol = 2 * est_mm_prec(w.T, err, ntrials=1)
assert (np.allclose(deltas_exp, deltas, atol=atol, rtol=0.0),
'%e %e' % (np.max(np.abs(deltas_exp - deltas)), atol))
dw_exp = np.dot(err, inp.T)
atol = 2 * est_mm_prec(err, inp.T, ntrials=1)
assert (np.allclose(dw_exp, dw, atol=atol, rtol=0.0),
'%e %e' % (np.max(np.abs(dw_exp - dw)), atol))
|
return
# permute mm indicies to change order of computaitons
# to estimate numerical percision
# this is a rough estimate
def est_mm_prec(A, B, ntrials=1):
A64 = np.float64(A)
B64 = np.float64(B)
gt = np.dot(A64, B64)
max_err = -1.0
for trial in
|
range(ntrials):
inds = np.random.permutation(A.shape[1])
# this method gives better estimate of precision tolerances
# but takes too long to run
# for i in range(A.shape[0]):
# for j in range(B.shape[1]):
# c = np.sum(np.multiply(A[i,inds], B[inds,j]))
# max_err = max( max_err, np.abs(c-gt[i,j]))
# need to scale this by 10 for comparison
C = np.dot(A[:, inds], B[inds, :])
dd = np.float32(gt - C)
# just save the worst case from each iteration
max_err = max(max_err, np.max(np.abs(dd)))
# need to scale the np.dot results by 10 to
# match the np.sum(np.multiply()) values
max_err *= 10.0
return max_err
|
marcos-sb/hacker-rank
|
artificial-intelligence/statistics-and-machine-learning/battery/Solution.py
|
Python
|
apache-2.0
| 639
| 0
|
# import matplotlib.pyp
|
lot as plt
# import numpy as np
# from scipy import stats
# import sys
#
# c = list()
# b = list()
# for line in sys.stdin:
# linesp = list(map(float, line.strip().split(",")))
# if(linesp[0] < 4):
# c.append(linesp[0])
#
|
b.append(linesp[1])
#
# carr = np.array(c)
# barr = np.array(b)
#
# slope, intercept, r_value, p_value, std_err = stats.linregress(c,b)
#
# plt.figure()
# plt.plot(carr,barr, 's')
# plt.show()
# print(slope,intercept,r_value,p_value,std_err)
# 2.0 0.0 1.0 0.0 0.0
import sys
for line in sys.stdin:
x = float(line.strip())
y = x < 4 and 2*x or 8
print(y)
|
zhouhoo/conceptNet_55_client
|
app/query.py
|
Python
|
apache-2.0
| 5,682
| 0.001562
|
import os
from hanziconv import HanziConv # 简繁转换
from app.ResultParse import ResultParse
from app.statistics import analysis
from interface.api import LookUp, Search, Association
from tools.parse_and_filter import parse_sentence
from tools.translate import Translate
class Query:
def __init__(self, debug=False):
self.translator = Translate()
self.analysis = analysis()
self.conceptions = []
self.related_conceptions = list(tuple())
self.commonsense = set()
self.debug = debug
@staticmethod
def base_lookup(conception, Type='c', lang='zh', limit=100, s_to_t=True):
lookup = LookUp(lang=lang, Type=Type, limit=limit)
if s_to_t:
data = lookup.search_concept(HanziConv.toTraditional(conception))
else:
data = lookup.search_concept(conception)
r = ResultParse(data)
if r.get_num_found() > 0:
return [(edge.surfaceText, edge.start, edge.end, edge.rel)
for edge in r.parse_all_edges()]
return None
def concept_lookup(self):
print('find only one conception,so get its commonsense at most 10')
# 先中文查找
local_commonsense = Query.base_lookup(HanziConv.toTraditional(self.conceptions))
if not local_commonsense:
# 如果没有找到,翻译成英文再次查找
local_commonsense = Query.base_lookup(self.translator.zh_to_en(self.conceptions))
self.commonsense = set(local_commonsense)
@staticmethod
def base_search(conceptions, lang='zh'):
res = list()
for i in range(len(conceptions)):
conception = '/c/' + lang + '/' + conceptions[i]
s = Search(node=conception) # can add more key-value
data = s.search()
r = ResultParse(data)
if r.get_num_found() > 0:
tmp = [(edge.surfaceText, edge.start.split('/')[3], edge.end.split('/')[3],
edge.rel)
for edge in r.parse_all_edges()]
res.extend(tmp)
return r
|
es
def concept_search(self, to_traditional=True):
# print('looking for conceptions all related commonsense')
if not self.conceptions:
return
if to_traditional:
translated_conceptions = HanziConv.toTraditional(' '.join(self.conceptions))
conceptions = translated_conceptions.split()
else:
conceptions = self.conceptions
if self.debug:
print("关键词:" + ''.join(conceptions))
d
|
ata = Query.base_search(conceptions)
# if not data:
# translated_conceptions = Translate.zh_to_en(self.conceptions)
#
# data = Query.base_search(translated_conceptions, lang='en')
if data:
self.commonsense = self.commonsense.union(set(data))
@staticmethod
def base_association(terms, lang='zh', limit=100):
a = Association(lang=lang, limit=limit)
raw_data = a.get_similar_concepts_by_term_list(terms)
r = ResultParse(raw_data)
return r.parse_all_similarity()
def conception_association(self):
translated_conception = HanziConv.toTraditional(' '.join(self.conceptions))
if self.debug:
print(translated_conception)
self.related_conceptions = Query.base_association(translated_conception.split())
def tranlate_to_simple(self):
for item in self.commonsense.copy():
text = HanziConv.toSimplified(item[0]) if item[0] else 'No text'
s = HanziConv.toSimplified(item[1])
e = HanziConv.toSimplified(item[2])
self.commonsense.remove(item)
self.commonsense.add((text, s, e, item[3]))
def commonsense_query(self, sentences):
self.conceptions = parse_sentence(sentences)
self.concept_search(False)
# self.conception_association()
# self.tranlate_to_simple()
return self.commonsense
def stastics(self):
len_conceptiton = len(self.conceptions)
self.analysis.write_commonsense(self.commonsense)
self.analysis.write_all_relations()
self.analysis.print_comparation(len_conceptiton)
if __name__ == "__main__":
query = Query(debug=False)
# sentences = ["找寻新的利润增长点成为摆在各行面前的一大课题。在资产荒的背景下,个人房贷成为各家银行争抢的“香饽饽”,但随着多地推出楼市调控政策,按揭贷款或将从11月开始有所回落。",
# "精准医疗的目的是进行个体化定制治疗。因为每个人都存在着个体化差异,就算患上同一种疾病,在病理表现上也是不同的,可以表现为基因水平和蛋白水平上的差异",
# "全国人大常委会表决通过网络安全法,特别增加了惩治网络诈骗的有关规定,对个人信息保护做出规定,要求网络运营者应当采取技术措施和其他必要措施,确保其收集的个人信息安全,防止信息泄露、毁损、丢失"]
files = ["../data/" + f for f in os.listdir("../data/")]
for file in files:
print(file)
with open(file, encoding='utf-8') as f:
data = f.readlines()
data_filtered = [s.replace(' ', '') for s in data if not s.isspace() and '---' not in s]
sentences = ''.join(data_filtered).split("。")
for sentence in sentences:
# print("句子是"+sentence)
query.commonsense_query(sentence)
query.stastics()
|
joergsimon/gesture-analysis
|
main.py
|
Python
|
apache-2.0
| 1,322
| 0.008321
|
from dataingestion.initial_input import InitialInput
from const.constants import Constants
from dataingestion.preprocessing import preprocess_basic
from dataingestion.window import get_windows
from dataingestion.cache_control import *
from analysis.preparation import permutate
from analysis.preparation import split_test_train
from analysis.feature_selection import feature_selection
from utils.header_tools import create_headers
def main():
const = Constants()
init_input = InitialInput(const)
data = None
if not has_preprocess_basic_cache(const):
data = init_input.read_all_data_init()
const.remove_stripped_headers()
data = preprocess_basic(data, const)
data, labels = get_windows(data, const)
create_headers(const)
print("flex const
|
index trace info / main:")
|
print(len(const.feature_indices['flex']['row_1']))
print(len(const.feature_indices['flex']['row_2']))
r1 = []
for i in const.feature_indices['flex']['row_1']:
r1.append(const.feature_headers[i])
print(r1)
# permutate the data
data, labels = permutate(data, labels)
# split train and testset
train_data,train_labels,test_data,test_labels = split_test_train(data,labels,0.7)
feature_selection(train_data,train_labels,const)
if __name__ == '__main__':
main()
|
alexei-matveev/ase-local
|
ase/gui/calculator.py
|
Python
|
gpl-2.0
| 84,895
| 0.003028
|
# encoding: utf-8
"""calculator.py - module for choosing a calculator."""
import gtk
from gettext import gettext as _
import os
import numpy as np
from copy import copy
from ase.gui.setupwindow import SetupWindow
from ase.gui.progress import DefaultProgressIndicator, GpawProgressIndicator
from ase.gui.widgets import pack, oops, cancel_apply_ok
from ase import Atoms
from ase.data import chemical_symbols
import ase
# Asap and GPAW may be imported if selected.
introtext = _("""\
To make most calculations on the atoms, a Calculator object must first
be associated with it. ASE supports a number of calculators, supporting
different elements, and implementing different physical models for the
interatomic interactions.\
""")
# Informational text about the calculators
lj_info_txt = _("""\
The Lennard-Jones pair potential is one of the simplest
possible models for interatomic interactions, mostly
suitable for noble gasses and model systems.
Interactions are described by an interaction length and an
interaction strength.\
""")
emt_info_txt = _("""\
The EMT potential is a many-body potential, giving a
good description of the late transition metals crystalling
in the FCC crystal structure. The elements described by the
main set of EMT parameters are Al, Ni, Cu, Pd, Ag, Pt, and
Au, the Al potential is however not suitable for materials
science application, as the stacking fault energy is wrong.
A number of parameter sets are provided.
<b>Default parameters:</b>
The default EMT parameters, as published in K. W. Jacobsen,
P. Stoltze and J. K. Nørskov, <i>Surf. Sci.</i> <b>366</b>, 394 (1996).
<b>Alternative Cu, Ag and Au:</b>
An alternative set of parameters for Cu, Ag and Au,
reoptimized to experimental data including the stacking
fault energies by Torben Rasmussen (partly unpublished).
<b>Ruthenium:</b>
Parameters for Ruthenium, as published in J. Gavnholt and
J. Schiøtz, <i>Phys. Rev. B</i> <b>77</b>, 035404 (2008).
<b>Metallic glasses:</b>
Parameters for MgCu and CuZr metallic glasses. MgCu
parameters are in N. P. Bailey, J. Schiøtz and
K. W. Jacobsen, <i>Phys. Rev. B</i> <b>69</b>, 144205 (2004).
CuZr in A. Paduraru, A. Kenoufi, N. P. Bailey and
J. Schiøtz, <i>Adv. Eng. Mater.</i> <b>9</b>, 505 (2007).
""")
aseemt_info_txt = _("""\
The EMT potential is a many-body potential, giving a
good description of the late transition metals crystalling
in the FCC crystal structure. The elements described by the
main set of EMT parameters are Al, Ni, Cu, Pd, Ag, Pt, and
Au. In addition, this implementation allows for the use of
H, N, O and C adatoms, although the description of these is
most likely not very good.
<b>This is the ASE implementation of EMT.</b> For large
simulations the ASAP implementation is more suitable; this
implementation is mainly to make EMT available when ASAP is
not installed.
""")
eam_info_txt = _("""\
The EAM/ADP potential is a many-body potential
implementation of the Embedded Atom Method and
equipotential plus the Angular Dependent Potential,
which is an extension of the EAM to include
directional bonds. EAM is suited for FCC metallic
bonding while the ADP is suited for metallic bonds
with some degree of directionality.
For EAM see M.S. Daw and M.I. Baskes,
Phys. Rev. Letters 50 (1983) 1285.
For ADP see Y. Mishin, M.J. Mehl, and
D.A. Papaconstantopoulos, Acta Materialia 53 2005
4029--4041.
Data for the potential is contained in a file in either LAMMPS Alloy
or ADP format which need to be loaded before use. The Interatomic
Potentials Repository Project at http://www.ctcms.nist.gov/potentials/
contains many suitable potential files.
For large simulations the LAMMPS calculator is more
suitable; this implementation is mainly to make EAM
available when LAMMPS is not installed or to develop
new EAM/ADP poentials by matching results using ab
initio.
""")
brenner_info_txt = _("""\
The Brenner potential is a reactive bond-order potential for
carbon and hydrocarbons. As a bond-order potential, it takes
into account that carbon orbitals can hybridize in different
ways, and that carbon can form single, double and triple
bonds. That the potential is reactive means that it can
handle gradual changes in the bond order as chemical bonds
are formed or broken.
The Brenner potential is implemented in Asap, based on a
C implentation published at http://www.rahul.net/pcm/brenner/ .
The potential is documented here:
Donald W Brenner, Olga A Shenderova, Judith A Harrison,
Steven J Stuart, Boris Ni and Susan B Sinnott:
"A second-generation reactive empirical bond order (REBO)
potential energy expression for hydrocarbons",
J. Phys.: Condens. Matter 14 (2002) 783-802.
doi: 10.1088/0953-8984/14/4/312
""")
gpaw_info_txt = _("""\
GPAW implements Density Functional Theory using a
<b>G</b>rid-based real-space representation of the wave
functions, and the <b>P</b>rojector <b>A</b>ugmented <b>W</b>ave
method for handling the core regions.
""")
aims_info_txt = _("""\
FHI-aims is an external package implementing density
functional theory and quantum chemical methods using
all-electron methods and a numeric local orbital basis set.
For full details, see http://www.fhi-berlin.mpg.de/aims/
or Comp. Phys. Comm. v180 2175 (2009). The ASE
documentation contains information on the keywords and
functionalities available within this interface.
""")
aims_pbc_warning_text = _("""\
WARNING:
Your system seems to have more than zero but less than
three periodic dimensions. Please check that this is
really what you want to compute. Assuming full
3D periodicity for this calculator.""")
vasp_info_txt = _("""\
VASP is an external package implementing density
functional functional theory using pseudopotentials
or the projector-augmented wave method together
with a plane wave basis set. For full details, see
http://cms.mpi.univie.ac.at/vasp/vasp/
""")
emt_parameters = (
(_("Default (Al, Ni, Cu, Pd, Ag, Pt, Au)"), None),
(_("Alternative Cu, Ag and Au"), "EMTRasmussenParameters"),
(_("Ruthenium"), "EMThcpParameters"),
(_("CuMg and CuZr metallic glass"), "EMTMetalGlassParameters")
)
class SetCalculator(SetupWindow):
"Window for selecting a calculator."
# List the names of the radio button attributes
radios = ("none", "lj", "emt", "aseemt", "eam", "brenner",
"gpaw", "aims", "vasp")
# List the names of the parameter dictionaries
paramdicts = ("lj_parameters", "eam_parameters", "gpaw_parameters",
"aims_parameters",)
# The name used to store parameters on the gui object
classname = "SetCalculator"
d
|
ef __init__(self, gui):
SetupWindow.__init__(self)
self.set_title(_("Select calculator"))
vbox = gtk.VBox()
# Intoductory text
self.packtext(vbox, introtext)
pack(vbox, [gtk.Label(_("Calculator:"))])
# No calculator (the default)
self.none_radio = gtk.RadioButton(None, _("None"))
pack(vbox, [self.none_radio])
|
# Lennard-Jones
self.lj_radio = gtk.RadioButton(self.none_radio,
_("Lennard-Jones (ASAP)"))
self.lj_setup = gtk.Button(_("Setup"))
self.lj_info = InfoButton(lj_info_txt)
self.lj_setup.connect("clicked", self.lj_setup_window)
self.pack_line(vbox, self.lj_radio, self.lj_setup, self.lj_info)
# EMT
self.emt_radio = gtk.RadioButton(
self.none_radio, _("EMT - Effective Medium Theory (ASAP)"))
self.emt_setup = gtk.combo_box_new_text()
self.emt_param_info = {}
for p in emt_parameters:
self.emt_setup.append_text(p[0])
self.emt_param_info[p[0]] = p[1]
self.emt_setup.set_active(0)
self.emt_info = InfoButton(emt_info_txt)
self.pack_line(vbox, self.emt_radio, self.emt_setup, self.emt_info)
# EMT (ASE implementation)
self.aseemt_radio = gtk.RadioButton(
self.none_radio, _("EMT - Effective Medium Theory (ASE)"))
self.aseemt_info = InfoButton(aseemt_info_txt)
self.pack_line(vbox, self.aseemt_radio, None, self.aseemt_info)
# EA
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/IPython/utils/daemonize.py
|
Python
|
bsd-2-clause
| 162
| 0.012346
|
from warnings import warn
warn
|
("IPython.utils.daemonize has moved to ipyparallel.apps.daemonize", stacklevel=2)
from ipyparallel.apps.daemoni
|
ze import daemonize
|
bfontaine/RemindMe
|
remindme/providers/free.py
|
Python
|
mit
| 841
| 0
|
# -*- coding: UTF-8 -*-
from freesms import FreeClient
from base import BaseProvider, MissingConfigParameter, ServerError
class FreeProvider(BaseProvider):
"""
This is a provider class for th
|
e French telco 'Free'.
>>> f = FreeProvider({'api_id': '12345678', 'api_key':'xyz'})
>>> f.send('Hello, World!')
True
"""
def required_keys(self):
return ['api_id', 'api_key']
def send(self, msg):
params = {
'user': self.params['api_id'],
'passwd': self.params['api_key']
}
f = FreeClient(**params)
res = f.send_sms(msg)
if res.status_code == 200:
return True
if res.status
|
_code == 400:
raise MissingConfigParameter()
if res.status_code == 500:
raise ServerError()
return False
|
ledtvavs/repository.ledtv
|
script.tvguide.Vader/resources/playwith/playwithchannel.py
|
Python
|
gpl-3.0
| 2,435
| 0.009035
|
import sys
import xbmc,xbm
|
caddon,xbmcvfs
import sqlite3
from subprocess import Popen
import datetime,time
channel = sys.argv[1]
start = sys.argv[2]
ADDON = xbmcaddon.Addon(id='script.tvguide.Vader')
def adapt_datetime(ts):
# http://docs.python.org/2/library/sqlite3.html#registering-an-adapter-calla
|
ble
return time.mktime(ts.timetuple())
def convert_datetime(ts):
try:
return datetime.datetime.fromtimestamp(float(ts))
except ValueError:
return None
sqlite3.register_adapter(datetime.datetime, adapt_datetime)
sqlite3.register_converter('timestamp', convert_datetime)
path = xbmc.translatePath('special://profile/addon_data/script.tvguide.Vader/source.db')
try:
conn = sqlite3.connect(path, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
except Exception as detail:
xbmc.log("EXCEPTION: (script.tvguide.Vader) %s" % detail, xbmc.LOGERROR)
c = conn.cursor()
startDate = datetime.datetime.fromtimestamp(float(start))
c.execute('SELECT DISTINCT * FROM programs WHERE channel=? AND start_date = ?', [channel,startDate])
for row in c:
title = row["title"]
endDate = row["end_date"]
duration = endDate - startDate
before = int(ADDON.getSetting('autoplaywiths.before'))
after = int(ADDON.getSetting('autoplaywiths.after'))
extra = (before + after) * 60
#TODO start from now
#seconds = duration.seconds + extra
#if seconds > (3600*4):
seconds = 3600*4
break
# Find the channel's stream url
c.execute('SELECT stream_url FROM custom_stream_url WHERE channel=?', [channel])
row = c.fetchone()
url = ""
if row:
url = row[0]
if not url:
quit()
# Find the actual url used to play the stream
#core = "dummy"
#xbmc.executebuiltin('PlayWith(%s)' % core)
player = xbmc.Player()
player.play(url)
count = 30
url = ""
while count:
count = count - 1
time.sleep(1)
if player.isPlaying():
url = player.getPlayingFile()
break
player.stop()
# Play with your own preferred player and paths
if url:
name = "%s = %s = %s" % (start,channel,title)
name = name.encode("cp1252")
filename = xbmc.translatePath("special://temp/%s.ts" % name)
#filename = "/storage/recordings/%s.ts" % name
ffmpeg = r"c:\utils\ffmpeg.exe"
ffmpeg = r"/usr/bin/ffmpeg"
cmd = [ffmpeg, "-y", "-i", url, "-c", "copy", "-t", str(seconds), filename]
p = Popen(cmd,shell=True)
#p = Popen(cmd,shell=False)
|
goldmann/cct
|
cct/__init__.py
|
Python
|
mit
| 696
| 0.001437
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may
|
be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
import logging
def setup_logging(name="cct", level=logging.DEBUG):
# create logger
logger = logging.getLogger(name)
logger.handlers = []
logger.setLevel(level)
# create console handler and set level to debug
ch = logging.Stre
|
amHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
|
hhucn/webvulnscan
|
webvulnscan/__main__.py
|
Python
|
mit
| 428
| 0
|
#!/usr/bin/env python
# Execute with
# $ python webvuln
|
scan/__main__.py (2.6+)
# $ python -m webvulnscan (2.7+)
import sys
if __package__ is None and not hasattr(sys, "frozen"):
# direct call of __main__.py
import os.path
path = os.path.realpath(os.path.abspath(__file__))
sys.path.append(os.path.dirname(os.path.dirname(path)))
import webvulnscan
if __name__ == '__main__':
webvulnscan.main(
|
)
|
twilio/twilio-python
|
tests/integration/preview/sync/service/test_sync_list.py
|
Python
|
mit
| 8,183
| 0.003666
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class SyncListTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"links": {
"items": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items",
"permission
|
s": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
},
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
"sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.create()
self.holodeck.assert_has_request(Request(
'post',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists',
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"links": {
"items": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items",
"permissions": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
},
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.create()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.list()
self.holodeck.assert_has_request(Request(
'get',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"lists": [],
"meta": {
"first_page_url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists?PageSize=50&Page=0",
"key": "lists",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"lists": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"links": {
"items": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items",
"permissions": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
},
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"meta": {
"first_page_url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists?PageSize=50&Page=0",
"key": "lists",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.list()
self.assertIsNotNone(actua
|
valkyriesavage/invenio
|
modules/bibauthorid/lib/bibauthorid_tables_utils.py
|
Python
|
gpl-2.0
| 54,551
| 0.00275
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
bibauthorid_tables_utils
Bibauthorid's DB handler
"""
import sys
import re
import random
import bibauthorid_config as bconfig
import bibauthorid_structs as dat
from search_engine import get_record
from bibrank_citation_searcher import get_citation_dict
from dbquery import run_sql
from dbquery import OperationalError, ProgrammingError
from bibauthorid_utils import split_name_parts, create_normalized_name
from bibauthorid_utils import clean_name_string
from bibauthorid_authorname_utils import update_doclist
def get_papers_recently_modified(date=''):
'''
Returns the bibrecs with modification date more recent then date, or all
the bibrecs if no date is specified.
@param date: date
'''
papers = run_sql("select id from bibrec where modification_date > %s",
(str(date),))
if papers:
bibrecs = [i[0] for i in papers]
bibrecs.append(-1)
min_date = run_sql("select max(modification_date) from bibrec where "
"id in %s", (tuple(bibrecs),))
else:
min_date = run_sql("select now()")
return papers, min_date
def populate_authornames_bibrefs_from_authornames():
'''
Populates aidAUTHORNAMESBIBREFS.
For each entry in aidAUTHORNAMES creates a corresponding entry in aidA.B. so it's possible to search
by bibrec/bibref at a reasonable speed as well and not only by name.
'''
nids = run_sql("select id,bibrefs from aidAUTHORNAMES")
for nid in nids:
for bibref in nid[1].split(','):
if bconfig.TABLES_UTILS_DEBUG:
print ('populate_authornames_bibrefs_from_authornames: Adding: '
' %s %s' % (str(nid[0]), str(bibref)))
run_sql("insert into aidAUTHORNAMESBIBREFS (Name_id, bibref) "
"values (%s,%s)", (str(nid[0]), str(bibref)))
def authornames_tables_gc(bunch_size=50):
'''
Performs garbage collecting on the authornames tables.
Potentially really slow.
'''
bunch_start = run_sql("select min(id) from aidAUTHORNAMESBIBREFS")
if len(bunch_start) >= 1:
bunch_start = int(bunch_start[0][0])
else:
return
abfs_ids_bunch = run_sql("select id,Name_id,bibref from aidAUTHORNAMESBIBREFS limit %s, %s"
, (str(bunch_start - 1), str(bunch_size)))
bunch_start += bunch_size
while len(abfs_ids_bunch) >= 1:
bib100list = []
bib700list = []
for i in abfs_ids_bunch:
if i[2].split(':')[0] == '100':
bib100list.append(i[2].split(':')[1])
elif i[2].split(':')[0] == '700':
bib700list.append(i[2].split(':')[1])
bib100liststr = '( '
for i in bib100list:
bib100liststr += "'" + str(i) + "',"
bib100liststr = bib100liststr[0:len(bib100liststr) - 1] + " )"
bib700liststr = '( '
for i in bib700list:
bib700liststr += "'" + str(i) + "',"
bib700liststr = bib700liststr[0:len(bib700liststr) - 1] + " )"
if len(bib100list) >= 1:
bib10xids = run_sql("select id from bib10x where id in %s"
% bib100liststr)
else:
bib10xids = []
if len(bib700list) >= 1:
bib70xids = run_sql("select id from bib70x where id in %s"
% bib700liststr)
else:
bib70xids = []
bib10xlist = []
bib70xlist = []
for i in bib10xids:
bib10xlist.append(str(i[0]))
for i in bib70xids:
bib70xlist.append(str(i[0]))
bib100junk = set(bib100list).difference(set(bib10xlist))
bib700junk = set(bib700list).difference(set(bib70xlist))
idsdict = {}
for i in abfs_ids_bunch:
idsdict[i[2]] = [i[0], i[1]]
junklist = []
for i in bib100junk:
junklist.append('100:' + i)
for i in bib700junk:
junklist.append('700:' + i)
for junkref in junklist:
try:
id_to_remove = idsdict[junkref]
run_sql("delete from aidAUTHORNAMESBIBREFS where id=%s",
(str(id_to_remove[0]),))
if bconfig.TABLES_UTILS_DEBUG:
print "authornames_tables_gc: idAUTHORNAMESBIBREFS deleting row " + str(id_to_remove)
authrow = run_sql("select id,Name,bibrefs,db_name from aidAUTHORNAMES where id=%s", (str(id_to_remove[1]),))
if len(authrow[0][2].split(',')) == 1:
run_sql("delete from aidAUTHORNAMES where id=%s", (str(id_to_remove[1]),))
if bco
|
nfig.TABLES_UTILS_DEBUG:
print "authornames_tables_gc: aidAUTHORNAMES deleting " + str(authrow)
else:
bibreflist = ''
for ref in authrow[0][2].split(','):
if ref != junkref:
bibreflist += ref + ','
bibreflist = bi
|
breflist[0:len(bibreflist) - 1]
run_sql("update aidAUTHORNAMES set bibrefs=%s where id=%s",
(bibreflist, id_to_remove[1]))
if bconfig.TABLES_UTILS_DEBUG:
print "authornames_tables_gc: aidAUTHORNAMES updating " + str(authrow) + ' with ' + str(bibreflist)
except:
pass
abfs_ids_bunch = run_sql("select id,Name_id,bibref from aidAUTHORNAMESBIBREFS limit %s,%s" ,
(str(bunch_start - 1), str(bunch_size)))
bunch_start += bunch_size
def update_authornames_tables_from_paper(papers_list=[]):
"""
Updates the authornames tables with the names on the given papers list
@param papers_list: list of the papers which have been updated (bibrecs) ((1,),)
For each paper of the list gathers all names, bibrefs and bibrecs to be added to aidAUTHORNAMES
table, taking care of updating aidA.B. as well
NOTE: update_authornames_tables_from_paper: just to remember: get record would be faster but
we don't have the bibref there,
maybe there is a way to rethink everything not to use bibrefs? How to address
authors then?
"""
def update_authornames_tables(name, bibref):
'''
Update the tables for one bibref,name touple
'''
authornames_row = run_sql("select id,Name,bibrefs,db_name from aidAUTHORNAMES where db_name like %s",
(str(name),))
authornames_bibrefs_row = run_sql("select id,Name_id,bibref from aidAUTHORNAMESBIBREFS "
"where bibref like %s", (str(bibref),))
#@XXX: update_authornames_tables: if i'm not wrong there should always be only one result; will be checked further on
if ((len(authornames_row) > 1) or (len(authornames_bibrefs_row) > 1) or
(len(authornames_row) < len(authornames_bibrefs_row))):
if bconfig.TABLES_UTILS_DEBUG:
print "update_authornames_tables: More then one result or missing authornames?? Something is wrong, not updating" + str(authornames_row) + str(authornames_bibrefs_row)
return
if len(authornames_row) == 1:
# we have an hit for the name string; check if there is the 'new' bibref ass
|
teamllamauk/ScopeDriver
|
plugins/debris.py
|
Python
|
gpl-3.0
| 6,763
| 0.000591
|
import random
import time
from dot3k.menu import MenuOption
class Debris(MenuOption):
def __init_
|
_(self, backlight=None):
if backlight is None:
import dot3k.backlight
self.backlight = dot3k.backlight
else:
self.backlight = backlight
|
self.debug = False
self.star_seed = 'thestarsmydestination'
self.debris_seed = 'piratemonkeyrobotninja'
self.debris = []
self.stars = []
self.running = False
self.max_debris = 10
self.max_stars = 10
self.last_update = 0
self.time_start = 0
self.sprites = [
[14, 31, 31, 14, 0, 0, 0, 0], # 0: Debris top of char
[0, 0, 0, 0, 14, 31, 31, 14], # 1: Debris bottom of char
[30, 5, 7, 30, 0, 0, 0, 0], # 2: Ship top of char
[0, 0, 0, 0, 30, 5, 7, 30], # 3: Ship bottom of char
[30, 5, 7, 30, 14, 31, 31, 14], # 4: Ship above debris
[14, 31, 31, 14, 30, 5, 7, 30], # 5: Ship below debris
[0, 14, 31, 31, 31, 31, 14, 0] # 6: Giant debris
]
self.width = 16
self.height = 5 # Two rows per char
self.player_x = 1 # Player horizontal position
self.player_y = 3 # Player vertical position
self.current_player_x = None
self.current_player_y = None
self.current_player_pos = None
self.fill_debris()
MenuOption.__init__(self)
def begin(self):
self.running = False
self.reset()
self.backlight.hue(0.0)
def reset(self):
self.player_x = 1
self.player_y = 3
self.fill_debris()
self.fill_stars()
self.running = True
self.time_start = 0
self.last_update = 0
def fill_stars(self):
random.seed(self.star_seed)
self.stars = []
while len(self.stars) < self.max_stars:
new = (random.randint(0, 15), random.randint(0, 2))
if not new in self.stars:
self.stars.append(new)
def fill_debris(self):
random.seed(self.debris_seed)
self.debris = []
while len(self.debris) < self.max_debris:
new = (random.randint(5, 15), random.randint(0, self.height))
if not new in self.debris:
self.debris.append(new)
print(self.debris)
def left(self):
if not self.running:
r = int(self.get_option('Backlight', 'r'))
g = int(self.get_option('Backlight', 'g'))
b = int(self.get_option('Backlight', 'b'))
self.backlight.rgb(r, g, b)
return False
self.player_x -= 1
if self.player_x < 0:
self.player_x = 0
return True
def right(self):
if not self.running:
self.reset()
return True
self.player_x += 1
if self.player_x > 15:
self.player_x = 15
return True
def up(self):
self.player_y -= 1
if self.player_y < 0:
self.player_y = 0
if self.debug:
print("Player up", self.player_y)
return True
def down(self):
self.player_y += 1
if self.player_y > self.height:
self.player_y = self.height - 1
if self.debug:
print("Player down", self.player_y)
return True
def update(self, menu):
if self.time_start == 0:
for idx, sprite in enumerate(self.sprites):
menu.lcd.create_char(idx, sprite)
menu.clear_row(0)
menu.clear_row(1)
menu.clear_row(2)
for x in range(3):
menu.lcd.set_cursor_position(5, 1)
menu.lcd.write(' 0' + str(3 - x) + '! ')
time.sleep(0.5)
self.backlight.hue(0.5)
self.time_start = self.millis()
# Move all stars left
for idx, star in enumerate(self.stars):
self.stars[idx] = (star[0] - 0.5, star[1])
# Move all debris left 1 place
for idx, rock in enumerate(self.debris):
self.debris[idx] = (rock[0] - 1, rock[1])
debris_x = int(rock[0])
debris_y = int(rock[1])
if debris_x < 0:
continue
if debris_x == self.player_x and debris_y == self.player_y:
# Boom!
menu.lcd.set_cursor_position(5, 1)
menu.lcd.write(' BOOM!')
if self.debug:
print(debris_x, debris_y)
print(self.player_x,
self.player_y)
exit()
self.running = False
return False
# Remove off-screen debris
self.debris = list(filter(lambda x: x[0] > -1, self.debris))
# Remove off-screen stars
self.stars = list(filter(lambda x: x[0] > -1, self.stars))
# Create new debris to replace the removed ones
while len(self.debris) < self.max_debris:
self.debris.append((15, random.randint(0, self.height)))
while len(self.stars) < self.max_stars:
self.stars.append((15, random.randint(0, 2)))
return True
def redraw(self, menu):
if not self.running:
return False
if self.millis() - self.last_update >= 250:
if not self.update(menu):
return False
self.last_update = self.millis()
game_time = str(int((self.millis() - self.time_start) / 1000)).zfill(3)
self.backlight.sweep(((self.millis() - self.time_start) / 500 % 360) / 359.0)
buffer = []
for i in range(3):
buffer.append([' '] * 16)
for idx, rock in enumerate(self.stars):
buffer[rock[1]][int(rock[0])] = '.'
player_v = (self.player_y % 2)
buffer[int(self.player_y / 2)][self.player_x] = chr(2 + player_v)
for idx, rock in enumerate(self.debris):
debris_x = int(rock[0])
debris_y = int(rock[1])
debris_v = (debris_y % 2)
debris_sprite = debris_v
if int(debris_y / 2) == int(self.player_y / 2) and debris_x == self.player_x and debris_v != player_v:
debris_sprite = 4 + player_v
current = buffer[int(debris_y / 2)][debris_x]
if current == chr(0) or current == chr(1):
debris_sprite = 6 # Giant Debris!
buffer[int(debris_y / 2)][debris_x] = chr(debris_sprite)
# Draw elapsed seconds
buffer[0][16 - len(game_time):len(game_time)] = game_time
for idx, row in enumerate(buffer):
menu.write_row(idx, ''.join(row))
|
hbiyik/tribler
|
src/tribler-core/tribler_core/restapi/base_api_test.py
|
Python
|
lgpl-3.0
| 2,844
| 0.003516
|
import json
from aiohttp import ClientSession
from tribler_core.restapi import get_param
from tribler_core.tests.tools.test_as_server import TestAsServer
from tribler_core.utilities.path_util import Path
from tribler_core.version import version_id
def path_to_str(obj):
if isinstance(obj, dict):
return {path_to_str(k):path_to_str(v) for k, v in obj.items()}
if isinstance(obj, list):
return [path_to_str(i) for i in obj]
if isinstance(obj, Path):
return str(obj)
return obj
class AbstractBaseApiTest(TestAsServer):
"""
Tests for the Tribler HTTP API should create a subclass of this class.
"""
def setUpPreSession(self):
super(AbstractBaseApiTest, self).setUpPreSession()
self.config.set_http_api_enabled(True)
self.config.set_http_api_retry_port(True)
self.config.set_tunnel_community_enabled(Fa
|
lse)
self.config.set_trustchain_enabled(False)
# Make sure we select a random port for the HTTP API
self.config.set_http_api_port(self.get_port())
async def do_request(self, endpoint, req_type, data, headers, json_response):
url = 'http://localhost:%d/%s' % (self.session.config.get_http_api_port(), endpoint)
headers = headers or {'User-
|
Agent': 'Tribler ' + version_id}
async with ClientSession() as session:
async with session.request(req_type, url, data=data, headers=headers) as response:
return response.status, (await response.json(content_type=None)
if json_response else await response.read())
class AbstractApiTest(AbstractBaseApiTest):
"""
This class contains some helper methods to perform requests and to check the right response code/
response json returned.
"""
async def do_request(self, endpoint, expected_code=200, expected_json=None,
request_type='GET', post_data={}, headers=None, json_response=True):
data = json.dumps(path_to_str(post_data)) if isinstance(post_data, (dict, list)) else post_data
status, response = await super(AbstractApiTest, self).do_request(endpoint, request_type,
data, headers, json_response)
self.assertEqual(status, expected_code, response)
if response is not None and expected_json is not None:
self.assertDictEqual(expected_json, response)
return response
class TestBaseApi(TestAsServer):
"""
Test some basic functionality of the restful API
"""
def test_get_parameters(self):
"""
Test the get_parameters method
"""
parameters = {'abc': [3]}
self.assertIsNone(get_param(parameters, 'abcd'))
self.assertIsNotNone(get_param(parameters, 'abc'))
|
danfairs/django-dfk
|
dfk/models.py
|
Python
|
bsd-3-clause
| 176
| 0
|
class De
|
ferredForeignKey(object):
def __init__(self, *args, **kwargs):
self.name = kwargs.pop('name', None)
self.args = args
self.kwargs = kwargs
| |
gauthiier/mailinglists
|
www/archives.py
|
Python
|
gpl-3.0
| 1,896
| 0.030063
|
import logging, os, json
import search.archive
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
logging.info('**** new Singleton instance')
return cls._instances[cls]
class Archives(metaclass=Singleton):
def __init__(self, archives_dir=None):
if archives_dir==None:
from www import config
self.archives_dir = config.ARCHIVES_PATH
else:
self.archives_dir = archives_dir
self.data = {}
self.loaded = False
logging.info('loading archives...')
self.load()
logging.info('done.')
def load(self):
if self.loaded:
return
if not os.path.isdir(self.archives_dir):
logging.error("Archives:: the path - " + self.archives_dir + " - is not a valid directory. Aborting.")
logging.error(" -- current cwd is: " + os.getcwd())
return
arch = [d for d in os.listdir(self.archives_dir) if os.path.isdir(os.path.join(self.archives_dir, d))]
self.data = {}
for a in arch:
logging.info("loading " + a)
# archive_path
|
= os.path.join(self.archives_dir, a)
self.data[a] = self.load_archive(self.archives_dir, a)
logging.info("done.")
self.loaded = True
def load_archive(self, archive_dir, archive_name):
if not os.path.isdir(archive_dir):
logging.error("Archives:: the path - " + archive_dir + " - is not a valid directory. Aborting.")
return
archive = search.archive.Archive(archive_dir)
archive.
|
load(archive_name)
return archive
# # -- shoudl use Archive in searh module here....
# files = [f for f in os.listdir(archive_dir) if f.endswith('.json')]
# arch = {}
# for f in files:
# file_path = os.path.join(archive_dir, f)
# with open(file_path) as fdata:
# arch[f.replace('.json', '')] = json.load(fdata)
# return arch
|
joakim-hove/ert
|
test-data/local/snake_oil_structure/snake_oil/jobs/snake_oil_npv.py
|
Python
|
gpl-3.0
| 2,757
| 0
|
#!/usr/bin/env python
from ecl.summary import EclSum
OIL_PRICES = {
"2010-01-01": 78.33,
"2010-02-01": 76.39,
"2010-03-01": 81.20,
"2010-04-01": 84.29,
"2010-05-01": 73.74,
"2010-06-01": 75.34,
"2010-07-01": 76.32,
|
"2010-08-01": 76.60,
"2010-09-01": 75.24,
"2010-10-01": 81.89,
"2010-11-01": 84.25,
"2010-12-01": 89.15,
"2011-01-01": 89.17,
"2011-02-01": 88.58,
"2011-03-01": 102.86,
"2011-04-01": 109.53,
"2011-05-01": 100.90,
"2011-06-01": 96.26,
"2011-07-01": 97.30,
"2011-08-01": 86.33,
"2011-09-01": 85.52,
"2011-10-01": 86.32,
"2011-11-01": 97.16,
"
|
2011-12-01": 98.56,
"2012-01-01": 100.27,
"2012-02-01": 102.20,
"2012-03-01": 106.16,
"2012-04-01": 103.32,
"2012-05-01": 94.65,
"2012-06-01": 82.30,
"2012-07-01": 87.90,
"2012-08-01": 94.13,
"2012-09-01": 94.51,
"2012-10-01": 89.49,
"2012-11-01": 86.53,
"2012-12-01": 87.86,
"2013-01-01": 94.76,
"2013-02-01": 95.31,
"2013-03-01": 92.94,
"2013-04-01": 92.02,
"2013-05-01": 94.51,
"2013-06-01": 95.77,
"2013-07-01": 104.67,
"2013-08-01": 106.57,
"2013-09-01": 106.29,
"2013-10-01": 100.54,
"2013-11-01": 93.86,
"2013-12-01": 97.63,
"2014-01-01": 94.62,
"2014-02-01": 100.82,
"2014-03-01": 100.80,
"2014-04-01": 102.07,
"2014-05-01": 102.18,
"2014-06-01": 105.79,
"2014-07-01": 103.59,
"2014-08-01": 96.54,
"2014-09-01": 93.21,
"2014-10-01": 84.40,
"2014-11-01": 75.79,
"2014-12-01": 59.29,
"2015-01-01": 47.22,
"2015-02-01": 50.58,
"2015-03-01": 47.82,
"2015-04-01": 54.45,
"2015-05-01": 59.27,
"2015-06-01": 59.82,
"2015-07-01": 50.90,
"2015-08-01": 42.87,
"2015-09-01": 45.48,
}
if __name__ == "__main__":
ecl_sum = EclSum("SNAKE_OIL_FIELD")
start_time = ecl_sum.getStartTime()
date_ranges = ecl_sum.timeRange(start_time, interval="1M")
production_sums = ecl_sum.blockedProduction("FOPT", date_ranges)
npv = 0.0
for index in range(0, len(date_ranges) - 1):
date = date_ranges[index + 1] # end of period
production_sum = production_sums[index]
oil_price = OIL_PRICES[date.date().strftime("%Y-%m-%d")]
production_value = oil_price * production_sum
npv += production_value
with open("snake_oil_npv.txt", "w") as output_file:
output_file.write("NPV %s\n" % npv)
if npv < 80000:
rating = "POOR"
elif 80000 <= npv < 100000:
rating = "AVERAGE"
elif 100000 <= npv < 120000:
rating = "GOOD"
else:
rating = "EXCELLENT"
output_file.write("RATING %s\n" % rating)
|
GafferHQ/gaffer
|
.github/workflows/main/setBuildVars.py
|
Python
|
bsd-3-clause
| 6,394
| 0.021896
|
#!/usr/bin/env python
##########################################################################
#
# Copyright (c) 2020, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of Cinesite VFX Ltd. nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWE
|
VER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import datetime
import github
import os
import re
import sys
import json
# GitHub Action workflow variables
|
can be populated at run-time by echoing special
# strings to an env file. The following allows vars to be set:
#
# https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#environment-files
#
# echo var=value >> $GITHUB_ENV
#
# We make use of this mechanism to allow custom logic to define the build name
# as well as determine the correct commit hash depending on the nature of the
# trigger. These variables can be referenced in a pipeline yaml file downstream
# of the step that runs this script.
# Actions is somewhat sparse in what information is available via the GITHUB_*
# env vars (from the github context). There is however a veritable treasure
# trove in the .json file pointed to by GITHUB_EVENT_PATH. "The Internets" seem
# to suggest this is the most reliable way of determining information about the
# triggering commit. Some of the official context vars may vary after a retry,
# etc. too.
#
# The contents of this file is based on the webhook payload so should be
# relatively stable as it is part of that public API.
with open( os.environ["GITHUB_EVENT_PATH"] ) as f :
eventData = json.load( f )
## Source Branches
buildBranch = os.environ.get( "GITHUB_BASE_REF", "" )
sourceBranch = os.environ.get( "GITHUB_HEAD_REF", buildBranch )
## Source Commit Hash
commit = os.environ["GITHUB_SHA"]
## Pull Request builds
# Actions merges the branch into its target in PR build, so GITHUB_SHA isn't
# correct as it references the ephemeral merge. We also want to extract the
# pull request number for the build name.
pullRequest = ""
if os.environ.get( "GITHUB_EVENT_NAME" ) == "pull_request" :
commit = eventData["pull_request"]["head"]["sha"]
pullRequest = eventData["pull_request"]["number"]
## Tag builds
# A variety of build types may be in service of a tag (ie: release publish
# or manual retry for a specific tag).
tag = ""
if "/tags" in os.environ["GITHUB_REF"] :
tag = os.environ["GITHUB_REF"].replace( "refs/tags/", "" )
## Release ID
# To allow builds to be published to a release, we need to lookup the ID of any
# release that matches the tag we're building, if there is one.
releaseId = ""
if tag :
githubClient = github.Github( os.environ.get( 'GITHUB_ACCESS_TOKEN' ) )
repo = githubClient.get_repo( os.environ.get( 'GITHUB_REPOSITORY' ) )
for r in repo.get_releases() :
if r.tag_name == tag :
releaseId = r.id
break
if releaseId :
# Check that the version specified by the SConstruct matches the
# version in the tag.
versions = {}
versionRe = re.compile( r"^gaffer(.*)Version = (\d+)")
with open( "SConstruct" ) as sconstruct :
for line in sconstruct.readlines() :
versionMatch = versionRe.match( line )
if versionMatch :
versions[versionMatch.group( 1 )] = versionMatch.group( 2 )
version = "{Milestone}.{Major}.{Minor}.{Patch}".format( **versions )
if version != tag :
sys.stderr.write( "Tag \"{}\" does not match SConstruct version \"{}\"\n".format( tag, version ) )
sys.exit( 1 )
## Build Name
# We have a couple of naming conventions for builds, depending on the nature of the trigger.
formatVars = {
"variant" : os.environ["GAFFER_BUILD_VARIANT"],
"timestamp" : datetime.datetime.now().strftime( "%Y_%m_%d_%H%M" ),
"pullRequest" : pullRequest,
"shortCommit" : commit[:8],
"tag" : tag,
"branch" : re.sub( r"[^a-zA-Z0-9_]", "", sourceBranch )
}
nameFormats = {
"default" : "gaffer-{timestamp}-{shortCommit}-{variant}",
"pull_request" : "gaffer-pr{pullRequest}-{branch}-{timestamp}-{shortCommit}-{variant}",
"release" : "gaffer-{tag}-{variant}"
}
trigger = os.environ.get( 'GITHUB_EVENT_NAME', '' )
# If we have a releaseID (and tag) then we always use release naming convention
# to allow manual re-runs of release builds that fail for <reasons>.
if tag and releaseId :
print( "Have Release ID %s for tag %s, using release naming." % ( releaseId, tag ) )
trigger = "release"
buildName = nameFormats.get( trigger, nameFormats['default'] ).format( **formatVars )
## Set vars in the downstream workflow environment
with open( os.environ["GITHUB_ENV"], "a" ) as f :
print( "Setting $GAFFER_BUILD_NAME to '%s'" % buildName )
f.write( 'GAFFER_BUILD_NAME=%s\n' % buildName )
print( "Setting $GAFFER_SOURCE_COMMIT to '%s'" % commit )
f.write( 'GAFFER_SOURCE_COMMIT=%s\n' % commit )
print( "Setting $GAFFER_GITHUB_RELEASEID to '%s'" % releaseId )
f.write( 'GAFFER_GITHUB_RELEASEID=%s\n' % releaseId )
|
vericred/vericred-python
|
test/test_county_bulk.py
|
Python
|
apache-2.0
| 9,989
| 0.002903
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provid
|
er: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-sha
|
re has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::=
|
BambooHR/rapid
|
rapid/release/data/models.py
|
Python
|
apache-2.0
| 4,322
| 0.003702
|
"""
Copyright (c) 2015 Michael Bright and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=no-member,too-few-public-methods
import datetime
from sqlalchemy.orm import relationship, backref
from sqlalchemy import Column, String, ForeignKey, Integer, DateTime, Boolean, Text, Enum
from rapid.lib import get_declarative_base
from rapid.master.data.database.models.base.base_model import BaseModel
from rapid.lib.constants import VcsReleaseStepType
Base = get_declarative_base()
class Release(BaseModel, Base):
name = Column(String(255), nullable=False, index=True)
date_created = Column(DateTime(), nullable=False, default=datetime.datetime.utcnow, index=True)
status_id = Column(Integer, ForeignKey('statuses.id'), nullable=False, index=True)
commit_id = Column(Integer, ForeignKey('commits.id'), nullable=False, index=True)
integration_id = Column(Integer, ForeignKey('integrations.id'), index=True)
status = relationship('Status')
integration = relationship('Integration')
commit = relationship('Commit', backref=backref('release', uselist=False))
details = relationship('ReleaseDetail', uselist=False, backref=backref('release'))
class ReleaseDetail(BaseModel, Base):
release_id = Column(Integer, ForeignKey('releases.id'), nullable=False, index=True)
details = Column(Text)
class StepIntegration(BaseModel, Base):
step_id = Column(Integer, ForeignKey('steps.id'), nullable=False, index=True)
integration_id = Column(Integer, ForeignKey('integrations.id'), nullable=False, index=True)
class Step(BaseModel, Base):
name = Column(String(100), nullable=False)
custom_id = Column(String(25), nullable=False)
status_id = Column(Integer, ForeignKey('statuses.id'), nullable=False, index=True)
user_required = Column(Boolean, default=False, nullable=False)
release_id = Column(Integer, ForeignKey('releases.
|
id'), nullable=False, index=True)
sort_order = Column(Integer, default=0)
release = relationship("Release", lazy='subquery', backref="steps")
status = relationship('Status')
integrations = relationship("Integration", secondary="step_integrations")
class StepUser(BaseModel, Base):
step_id = Column(Integer, ForeignKey('steps.id'), nullable=False, index=True)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False, index=True)
date_created = Column(DateTime(), null
|
able=False, default=datetime.datetime.utcnow)
class StepUserComment(BaseModel, Base):
step_user_id = Column(Integer, ForeignKey('step_users.id'), nullable=False)
comment = Column(Text)
class User(BaseModel, Base):
name = Column(String(150), nullable=False)
username = Column(String(150), nullable=False)
active = Column(Boolean, default=True, nullable=False)
class VcsRelease(BaseModel, Base):
search_filter = Column(String(500), nullable=False)
notification_id = Column(String(250), nullable=False)
vcs_id = Column(Integer, ForeignKey('vcs.id'), nullable=False, index=True)
auto_release = Column(Boolean, nullable=False, default=False)
vcs = relationship('Vcs', lazy='subquery', backref='product_release')
steps = relationship("VcsReleaseStep", backref='vcs_release')
class VcsReleaseStep(BaseModel, Base):
name = Column(String(250), nullable=False)
custom_id = Column(String(250), nullable=False)
user_required = Column(Boolean, default=False, nullable=False)
sort_order = Column(Integer, default=0)
type = Column(Enum(*list(map(lambda x: x.name, VcsReleaseStepType))), nullable=False, default='PRE')
vcs_release_id = Column(Integer, ForeignKey('vcs_releases.id'), nullable=False, index=True)
__all__ = ['Release', 'StepIntegration', 'Step', 'StepUser', 'StepUserComment', 'StepIntegration', 'User', 'VcsRelease', 'VcsReleaseStep']
|
Azure/azure-sdk-for-python
|
sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_business_cards.py
|
Python
|
mit
| 5,129
| 0.003509
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_recognize_business_cards.py
DESCRIPTION:
This sample demonstrates how to recognize fields on business cards.
See fields found on a business card here:
https://aka.ms/formrecognizer/businesscardfields
USAGE:
python sample_recognize_business_cards.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
class RecognizeBusinessCardSample(object):
def recognize_business_card(self):
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__),
"..", "..", "./sample_forms/business_cards/business-card-english.jpg"))
# [START recognize_business_cards]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import FormRecognizerClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
with open(path_to_sample_forms, "rb") as f:
poller = form_recognizer_client.begin_recognize_business_cards(business_card=f, locale="en-US")
business_cards = poller.result()
for idx, business_card in enumerate(business_cards):
print("--------Recognizing business card #{}--------".format(idx+1))
contact_names = business_card.fields.get("ContactNames")
if contact_names:
for contact_name in contact_names.value:
print("Contact First Name: {} has confidence: {}".format(
contact_name.value["FirstName"].value, contact_name.value["FirstName"].confidence
))
print("Contact Last Name: {} has confidence: {}".format(
contact_name.value["LastName"].value, contact_name.value["LastName"].confidence
))
company_names = business_card.fields.get("CompanyNames")
if company_names:
for company_name in company_names.value:
print("Company Name: {} has confidence: {}".format(company_name.value, company_name.confidence))
departments = business_card.fields.get("Departments")
if departments:
for department in departments.value:
print("Department: {} has confidence: {}".format(department.value, department.confidence))
job_titles = business_card.fields.get("JobTitles")
if job_titles:
for job_title in job_titles.value:
print("Job Title: {} has confidence: {}".format(job_title.value, job_title.confidence))
emails = business_card.fields.get("Emails")
if emails:
for email in emails.value:
print("Email: {} has confidence: {}".format(email.value, email.confidence))
websites = business_card.fields.get("Websites")
if websites:
for website in websites.value:
print("Website: {} has confidence: {}".format(website.value, website.confidence))
addresses = business_card.fields.get("Addresses")
if addresses:
for address in addresses.value:
print("Address: {} has confidence: {}".format(address.value, address.confidence))
mobile_phones = business_card.fields.get("MobilePhones")
if mobile_phones:
for phone in mobile_phones.value:
print("Mobile phone number: {} has confidence: {}".format(phone.value, phone.confidence))
faxes = business_card.fields.get("Faxes")
if faxes:
for fax in faxes.value:
print("Fax number: {} has confidence: {}".format(fax.value, fax.confidence))
work_phones = business_card.fields.get("WorkPhones")
if work_phones:
for work_phone in work_phones.value:
print("Work phone number: {} has confidence: {}".format(work_phone.value, work_phone.con
|
fidence))
other_phones = business_card.fields.get("OtherPhones")
if other_phones:
for other_phone in other_phones.va
|
lue:
print("Other phone number: {} has confidence: {}".format(other_phone.value, other_phone.confidence))
# [END recognize_business_cards]
if __name__ == '__main__':
sample = RecognizeBusinessCardSample()
sample.recognize_business_card()
|
zeeshanali/blaze
|
blaze/compute/air/passes/ckernel_impls.py
|
Python
|
bsd-3-clause
| 1,537
| 0.003904
|
"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
transform(CKernelImplementations(), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def op_kernel(self, op):
function = op.metadata['kernel']
|
overload = op.metadata['overload']
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = monosig.argtypes
if function.matches('ckernel', argtypes):
overload = fun
|
ction.best_match('ckernel', argtypes)
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
return op
|
Robpol86/sphinxcontrib-versioning
|
sphinxcontrib/versioning/lib.py
|
Python
|
mit
| 5,599
| 0.0025
|
"""Common objects used throughout the project."""
import atexit
import functools
import logging
import os
import shutil
import tempfile
import weakref
import click
class Config(object):
"""The global configuration and state of the running program."""
def __init__(self):
"""Constructor."""
self._already_set = set()
self._program_state = dict()
# Booleans.
self.banner_greatest_tag = False
self.banner_recent_tag = False
self.greatest_tag = False
self.invert = False
self.no_colors = False
self.no_local_conf = False
self.recent_tag = False
self.show_banner = False
# Strings.
self.banner_main_ref = 'master'
self.chdir = None
self.git_root = None
self.local_conf = None
self.priority = None
self.push_remote = 'origin'
self.root_ref = 'master'
# Tuples.
self.grm_exclude = tuple()
self.overflow = tuple()
self.sort = tuple()
self.whitelist_branches = tuple()
self.whitelist_tags = tuple()
# Integers.
self.verbose = 0
def __contains__(self, item):
"""Implement 'key in Config'.
:param str item: Key to search for.
:return: If item in self._program_state.
:rtype: bool
"""
return item in self._program_state
def __iter__(self):
"""Yield names and current values of attributes that can be set from Sphinx config files."""
for name in (n for n in dir(self) if not n.startswith('_') and not callable(getattr(self, n))):
yield name, getattr(self, name)
def __repr__(self):
"""Class representation."""
attributes = ('_program_state', 'verbose', 'root_ref', 'overflow')
key_value_attrs = ', '.join('{}={}'.format(a, repr(getattr(self, a))) for a in attributes)
return '<{}.{} {}>'.format(self.__class__.__module__, self.__class__.__name__, key_value_attrs)
def __setitem__(self, key, value):
"""Implement Config[key] = value, updates self._program_state.
:param str key: Key to set in self._program_state.
:param value: Value to set in self._program_state.
"""
self._program_state[key] = value
@classmethod
def from_context(cls):
"""Retrieve this class' instance from the current Click context.
:return: Instance of this class.
:rtype: Config
"""
try:
ctx = click.get_current_context()
except RuntimeError:
return cls()
return ctx.find_object(cls)
def pop(self, *args):
"""Pop item from self._program_state.
:param iter args: Passed to self._program_state.
:return: Object from self._program_state.pop().
"""
return self._program_state.pop(*args)
def update(self, params, ignore_set=False, overwrite=False):
"""Set instance values from dictionary.
:param dict params: Click context params.
:param bool ignore_set: Skip already-set values instead of raising AttributeError.
:param bool overwrite: Allow overwriting already-set values.
"""
log = logging.getLogger(__name__)
valid = {i[0] for i in self}
for key, value in params.items():
if not hasattr(self, key):
raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__.__name__, key))
if key not in valid:
message = "'{}' object does not support item assignment on '{}'"
raise AttributeError(message.format(self.__class__.__name__, key))
if key in self._already_set:
if ignore_set:
log.debug('%s already set in config, skipping.', key)
continue
if not overwrite:
message = "'{}' object does not support item re-assignment on '{}'"
raise AttributeError(message.format(self.__class__.__name__, key))
setattr(self, key, value)
self._already_set.add(key)
class HandledError(click.ClickException):
"""Abort the program."""
def __init__(self):
"""Constructor
|
."""
super(HandledError, self).__init__(None)
def show(self, **_):
"""Error messages should be logged before raising this exception."""
logging.critical('Failure.')
class TempDir(object):
"""Similar to TemporaryDirectory in Python 3.x
|
but with tuned weakref implementation."""
def __init__(self, defer_atexit=False):
"""Constructor.
:param bool defer_atexit: cleanup() to atexit instead of after garbage collection.
"""
self.name = tempfile.mkdtemp('sphinxcontrib_versioning')
if defer_atexit:
atexit.register(shutil.rmtree, self.name, True)
return
try:
weakref.finalize(self, shutil.rmtree, self.name, True)
except AttributeError:
weakref.proxy(self, functools.partial(shutil.rmtree, self.name, True))
def __enter__(self):
"""Return directory path."""
return self.name
def __exit__(self, *_):
"""Cleanup when exiting context."""
self.cleanup()
def cleanup(self):
"""Recursively delete directory."""
shutil.rmtree(self.name, onerror=lambda *a: os.chmod(a[1], __import__('stat').S_IWRITE) or os.unlink(a[1]))
if os.path.exists(self.name):
raise IOError(17, "File exists: '{}'".format(self.name))
|
stephanepechard/projy
|
projy/templates/DjangoProjectTemplate.py
|
Python
|
gpl-3.0
| 4,869
| 0.00801
|
# -*- coding: utf-8 -*-
""" Projy template for PythonPackage. """
# system
from datetime import date
from os import mkdir, rmdir
from shutil import move
from subprocess import call
# parent class
from projy.templates.ProjyTemplate import ProjyTemplate
# collectors
from projy.collectors.AuthorCollector import AuthorCollector
from projy.collectors.AuthorMailCollector import AuthorMailCollector
class DjangoProjectTemplate(ProjyTemplate):
""" Projy template class for PythonPackage. """
def __init__(self):
ProjyTemplate.__init__(self)
def directories(self):
""" Return the names of directories to be created. """
directories_description = [
self.project_name,
self.project_name + '/conf',
self.project_name + '/static',
]
return directories_description
def files(self):
""" Return the names of files to be created. """
files_description = [
# configuration
[ self.project_name,
'Makefile',
'Django
|
MakefileTemplate' ],
[ self.project_name + '/conf',
'requirements_base.txt',
'DjangoRequirementsBaseTemplate' ],
[ self.project_name + '/conf',
'requirements_dev.txt',
'DjangoRequirementsDevTemplate' ],
[ self.project_name + '/conf',
'requirements_production.txt',
'DjangoRequirementsProdTemplate' ],
[ self.project_name + '/con
|
f',
'nginx.conf',
'DjangoNginxConfTemplate' ],
[ self.project_name + '/conf',
'supervisord.conf',
'DjangoSupervisorConfTemplate' ],
[ self.project_name,
'fabfile.py',
'DjangoFabfileTemplate' ],
[ self.project_name,
'CHANGES.txt',
'PythonPackageCHANGESFileTemplate' ],
[ self.project_name,
'LICENSE.txt',
'GPL3FileTemplate' ],
[ self.project_name,
'README.txt',
'READMEReSTFileTemplate' ],
[ self.project_name,
'.gitignore',
'DjangoGitignoreTemplate' ],
# django files
[ self.project_name,
'dev.py',
'DjangoSettingsDevTemplate' ],
[ self.project_name,
'prod.py',
'DjangoSettingsProdTemplate' ],
]
return files_description
def substitutes(self):
""" Return the substitutions for the templating replacements. """
author_collector = AuthorCollector()
mail_collector = AuthorMailCollector()
substitute_dict = {
'project': self.project_name,
'project_lower': self.project_name.lower(),
'date': date.today().isoformat(),
'author': author_collector.collect(),
'author_email': mail_collector.collect(),
}
return substitute_dict
def posthook(self):
# build the virtualenv
call(['make'])
# create the Django project
call(['./venv/bin/django-admin.py', 'startproject', self.project_name])
# transform original settings files into 3 files for different env
mkdir('{p}/settings'.format(p=self.project_name))
self.touch('{p}/settings/__init__.py'.format(p=self.project_name))
move('dev.py', '{p}/settings'.format(p=self.project_name))
move('prod.py', '{p}/settings'.format(p=self.project_name))
move('{p}/{p}/settings.py'.format(p=self.project_name), '{p}/settings/base.py'.format(p=self.project_name))
# organize files nicely
mkdir('{p}/templates'.format(p=self.project_name))
move('{p}/manage.py'.format(p=self.project_name), 'manage.py')
move('{p}/{p}/__init__.py'.format(p=self.project_name), '{p}/'.format(p=self.project_name))
move('{p}/{p}/urls.py'.format(p=self.project_name), '{p}/'.format(p=self.project_name))
move('{p}/{p}/wsgi.py'.format(p=self.project_name), '{p}/'.format(p=self.project_name))
rmdir('{p}/{p}'.format(p=self.project_name))
# create empty git repo
call(['git', 'init'])
# replace some lines
self.replace_in_file('{p}/wsgi.py'.format(p=self.project_name),
'"{p}.settings"'.format(p=self.project_name),
'"{p}.settings.production"'.format(p=self.project_name))
self.replace_in_file('{p}/settings/base.py'.format(p=self.project_name),
u" # ('Your Name', 'your_email@example.com'),",
u" ('{}', '{}'),".format(self.substitutes()['author'],
self.substitutes()['author_email']))
|
tjsavage/rototutor_djangononrel
|
django/forms/fields.py
|
Python
|
bsd-3-clause
| 31,759
| 0.026575
|
"""
Field classes.
"""
import datetime
import os
import re
import time
import urlparse
import warnings
from decimal import Decimal, DecimalException
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.core.exceptions import ValidationError
from django.core import validators
import django.utils.copycompat as copy
from django.utils import formats
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, smart_str
from django.utils.functional import lazy
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES
from util import ErrorList
from widgets import TextInput, PasswordInput, HiddenInput, MultipleHiddenInput, \
ClearableFileInput, CheckboxInput, Select, NullBooleanSelect, SelectMultiple, \
DateInput, DateTimeInput, TimeInput, SplitDateTimeWidget, SplitHiddenDateTimeWidget, \
FILE_INPUT_CONTRADICTION
__all__ = (
'Field', 'CharField', 'IntegerField',
'DEFAULT_DATE_INPUT_FORMATS', 'DateField',
'DEFAULT_TIME_INPUT_FORMATS', 'TimeField',
'DEFAULT_DATETIME_INPUT_FORMATS', 'DateTimeField', 'TimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'FilePathField', 'SlugField',
'TypedChoiceField'
)
def en_format(name):
"""
Helper function to stay backward compatible.
"""
from django.conf.locale.en import formats
warnings.warn(
"`django.forms.fields.DEFAULT_%s` is deprecated; use `django.utils.formats.get_format('%s')` instead." % (name, name),
DeprecationWarning
)
return getattr(formats, name)
DEFAULT_DATE_INPUT_FORMATS = lazy(lambda: en_format('DATE_INPUT_FORMATS'), tuple, list)()
DEFAULT_TIME_INPUT_FORMATS = lazy(lambda: en_format('TIME_INPUT_FORMATS'), tuple, list)()
DEFAULT_DATETIME_INPUT_FORMATS = lazy(lambda: en_format('DATETIME_INPUT_FORMATS'), tuple, list)()
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
default_error_messages = {
'required': _(u'This field is required.'),
'invalid': _(u'Enter a valid value.'),
}
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, error_messages=None, show_hidden_initial=False,
validators=[], localize=False):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# fi
|
eld in a form. By default, Django will use a "pretty"
# version of the form field name, if the
|
Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of addtional validators to use
# localize -- Boolean that specifies if the field should be localized.
if label is not None:
label = smart_unicode(label)
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
if help_text is None:
self.help_text = u''
else:
self.help_text = smart_unicode(help_text)
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in validators.EMPTY_VALUES and self.required:
raise ValidationError(self.error_messages['required'])
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(min_length))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(max_length))
def to_python(self, value):
"Returns a Unicode object."
if value in validators.EMPTY_VALUES:
return u''
return smart_unicode(value)
def widget_attrs(self, widget):
if self.max_length is not None and isinstance(widget, (TextInput, PasswordInput)):
# The HTML attribute is maxlength, not max_length.
return {'maxlength': str(self.max_length)}
class IntegerField(Field):
default_error_messages = {
'invalid': _(u'Enter a whole number.'),
'max_value': _(u'Ensure this value is less than or equal to %(limit_value)s.'),
'min_value': _(u'Ensure this value is greater than or equal to %(limit_value)s.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the inpu
|
mitsuhiko/django
|
tests/regressiontests/forms/localflavor/cz.py
|
Python
|
bsd-3-clause
| 3,835
| 0.001825
|
import warnings
from django.contrib.localflavor.cz.forms import (CZPostalCodeField,
CZRegionSelect, CZBirthNumberField, CZICNumberField)
from django.core.exceptions import ValidationError
from utils import LocalFlavorTestCase
class CZLocalFlavorTests(LocalFlavorTestCase):
def setUp(self):
self.save_warnings_state()
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
module='django.contrib.localflavor.cz.forms'
)
def tearDown(self):
self.restore_warnings_state()
def test_CZRegionSelect(self):
f = CZRegionSelect()
out = u'''<select name="regions">
<option value="PR">Prague</option>
<option value="CE">Central Bohemian Region</option>
<option value="SO">South Bohemian Region</option>
<option value="PI">Pilsen Region</option>
<option value="CA">Carlsbad Region</option>
<option value="US">Usti Region</option>
<option value="LB">Liberec Region</option>
<option value="HK">Hradec Region</option>
<option value="PA">Pardubice Region</option>
<option value="VY">Vysocina Region</option>
<option value="SM">South Moravian Region</option>
<option value="OL">Olomouc Region</option>
<option value="ZL">Zlin Region</option>
<option value="MS">Moravian-Silesian Region</option>
</select>'''
self.assertEqual(f.render('regions', 'TT'), out)
def test_CZPostalCodeField(self):
error_format = [u'Enter a postal code in the format XXXXX or XXX XX.']
valid = {
'91909': '91909',
'917 01': '91701',
'12345': '12345',
}
invalid = {
'84545x': error_format,
'123456': error_format,
'1234': error_format,
'123 4': error_format,
}
self.assertFieldOutput(CZPostalCodeField, valid, invalid)
def test_CZBirthNumberField(self):
error_format = [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.']
error_invalid = [u'Enter a valid birth number.']
valid = {
'880523/1237': '880523/1237',
'8805231237': '8805231237',
'880523/000': '880523/000',
'880523000': '880523000',
'882101/0011': '882101/0011',
}
invalid = {
'123456/12': error_format,
'123456/12345': error_format,
'12345612': error_format,
'12345612345': error_format,
'880523/1239': error_invalid,
'8805231239': error_invalid,
'990101/0011': error_invalid,
}
self.assertFieldOutput(CZBirthNumberField, valid, invalid)
# These tests should go away in 1.4.
# http://code.djangoproject.com/ticket/14593
f = CZBirthNumberField()
self.assertEqual(f.clean('880523/1237', 'm'), '880523/1237'),
self.assertEqual(f.clean('885523/1231', 'f'), '885523/1231')
self.assertRaisesRegexp(ValidationError, unicode(error_invalid),
f.clean, '881523/0000', 'm')
self.assertRaisesRegexp(ValidationError, unicode(error_invalid),
f.clean, '885223/0000', 'm')
self.assertRaisesRegexp(ValidationError, unicode(error_invalid),
f.clean, '881523/0000', 'f')
self.assertRaisesRegexp(ValidationError, unicode(error_invalid),
|
f.clean, '885223/0000', 'f')
def test_CZICNumberField(self):
error_invalid = [u'Enter a valid IC number.']
valid ={
'12345679': '12345679',
'12345601': '12345601',
'12345661': '12345661',
'12345610': '12345610',
}
invalid = {
'1234567': error_invalid,
'12345660': error_invalid,
'12345600': error_invalid,
}
|
self.assertFieldOutput(CZICNumberField, valid, invalid)
|
KhronosGroup/COLLADA-CTS
|
StandardDataSets/1_5/collada/library_kinematics_model/kinematics_model/asset/unit/unit.py
|
Python
|
mit
| 3,954
| 0.006829
|
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO TH
|
E WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, T
|
ORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = ['library_kinematics_models', 'kinematics_model', 'asset', 'unit']
attrName = 'meter'
attrVal = '1'
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeKinematicsBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], [])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeKinematicsSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeKinematicsExemplary(self, context):
# if superior fails, no point in further checking
if (self.status_superior == False):
self.status_exemplary = self.status_superior
return self.status_exemplary
# IS UNIT ALLOWED TO TRANSFORM? IF SO, WHAT TO TEST FOR?
self.__assistant.AttributeCheck(context, self.tagList, self.attrName, self.attrVal, True, False)
self.status_exemplary = self.__assistant.DeferJudgement(context)
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
|
alshedivat/tensorflow
|
tensorflow/python/kernel_tests/py_func_test.py
|
Python
|
apache-2.0
| 22,732
| 0.012234
|
# -*- coding: utf-8 -*-
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for py_func op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import re
import numpy as np
from six.moves import queue
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
def np_func(x, y):
return np.sinh(x) + np.cosh(y)
def matmul(x, y):
return math_ops.matmul(x, y)
class PyFuncTest(test.TestCase):
"""Encapsulates tests for py_func and eager_py_func."""
# ----- Tests for py_func -----
def testRealDataTypes(self):
def sum_func(x, y):
return x + y
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.uint8, dtypes.int8, dtypes.uint16, dtypes.int16,
dtypes.int32, dtypes.int64]:
with self.cached_session():
x = constant_op.constant(1, dtype=dtype)
y = constant_op.constant(2, dtype=dtype)
z = self.evaluate(script_ops.py_func(sum_func, [x, y], dtype))
self.assertEqual(z, 3)
def testComplexDataTypes(self):
def sub_func(x, y):
return x - y
for dtype in [dtypes.complex64, dtypes.complex128]:
with self.cached_session():
x = constant_op.constant(1 + 1j, dtype=dtype)
y = constant_op.constant(2 - 2j, dtype=dtype)
z = self.evaluate(script_ops.py_func(sub_func, [x, y], dtype))
self.assertEqual(z, -1 + 3j)
def testBoolDataTypes(self):
def and_func(x, y):
return x and y
dtype = dtypes.bool
with self.cached_session():
x = constant_op.constant(True, dtype=dtype)
y = constant_op.constant(False, dtype=dtype)
z = self.evaluate(script_ops.py_func(and_func, [x, y], dtype))
self.assertEqual(z, False)
def testSingleType(self):
with self.cached_session():
x = constant_op.constant(1.0, dtypes.float32)
y = constant_op.constant(2.0, dtypes.float32)
z = self.evaluate(script_ops.py_func(np_func, [x, y], dtypes.float32))
self.assertEqual(z, np_func(1.0, 2.0).astype(np.float32))
def testScalar(self):
with self.cached_session():
x = constant_op.constant(1.0, dtypes.float32)
y = constant_op.constant(2.0, dtypes.float32)
z = self.evaluate(
script_ops.eager_py_func(np_func, [x, y], [dtypes.float32]))
self.assertEqual(z[0], np_func(1.0, 2.0).astype(np.float32))
def testArray(self):
with self.cached_session():
x = constant_op.c
|
onstant([1.0, 2.0], dtypes.float64)
y =
|
constant_op.constant([2.0, 3.0], dtypes.float64)
z = self.evaluate(script_ops.py_func(np_func, [x, y], [dtypes.float64]))
self.assertAllEqual(z[0],
np_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
def testComplexType(self):
with self.cached_session():
x = constant_op.constant(1 + 2j, dtypes.complex64)
y = constant_op.constant(3 + 4j, dtypes.complex64)
z = self.evaluate(script_ops.py_func(np_func, [x, y], dtypes.complex64))
self.assertAllClose(z, np_func(1 + 2j, 3 + 4j))
def testRFFT(self):
with self.cached_session():
x = constant_op.constant([1., 2., 3., 4.], dtypes.float32)
def rfft(x):
return np.fft.rfft(x).astype(np.complex64)
y = self.evaluate(script_ops.py_func(rfft, [x], dtypes.complex64))
self.assertAllClose(y, np.fft.rfft([1., 2., 3., 4.]))
def testPythonLiteral(self):
with self.cached_session():
def literal(x):
return 1.0 if float(x) == 0.0 else 0.0
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(script_ops.py_func(literal, [x], dtypes.float64))
self.assertAllClose(y, 1.0)
def testList(self):
with self.cached_session():
def list_func(x):
return [x, x + 1]
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(
script_ops.py_func(list_func, [x], [dtypes.float64] * 2))
self.assertAllClose(y, [0.0, 1.0])
def testTuple(self):
# returns a tuple
with self.cached_session():
def tuple_func(x):
return x, x + 1
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(
script_ops.py_func(tuple_func, [x], [dtypes.float64] * 2))
self.assertAllClose(y, [0.0, 1.0])
# returns a tuple, Tout and inp a tuple
with self.cached_session():
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(
script_ops.py_func(tuple_func, (x,),
(dtypes.float64, dtypes.float64)))
self.assertAllClose(y, [0.0, 1.0])
def testStrings(self):
def read_fixed_length_numpy_strings():
return np.array([b" there"])
def read_and_return_strings(x, y):
return x + y
with self.cached_session():
x = constant_op.constant([b"hello", b"hi"], dtypes.string)
y = self.evaluate(
script_ops.py_func(read_fixed_length_numpy_strings, [],
dtypes.string))
z = self.evaluate(
script_ops.py_func(read_and_return_strings, [x, y], dtypes.string))
self.assertAllEqual(z, [b"hello there", b"hi there"])
def testStringsAreConvertedToBytes(self):
def read_fixed_length_numpy_strings():
return np.array([" there"])
def read_and_return_strings(x, y):
return x + y
with self.cached_session():
x = constant_op.constant(["hello", "hi"], dtypes.string)
y = self.evaluate(
script_ops.py_func(read_fixed_length_numpy_strings, [],
dtypes.string))
z = self.evaluate(
script_ops.py_func(read_and_return_strings, [x, y], dtypes.string))
self.assertAllEqual(z, [b"hello there", b"hi there"])
def testObjectArraysAreConvertedToBytes(self):
def read_object_array():
return np.array([b" there", u" ya"], dtype=np.object)
def read_and_return_strings(x, y):
return x + y
with self.cached_session():
x = constant_op.constant(["hello", "hi"], dtypes.string)
y, = script_ops.py_func(read_object_array, [],
[dtypes.string])
z, = script_ops.py_func(read_and_return_strings, [x, y], [dtypes.string])
self.assertListEqual(list(z.eval()), [b"hello there", b"hi ya"])
def testStringPadding(self):
correct = [b"this", b"is", b"a", b"test"]
with self.cached_session():
s, = script_ops.py_func(lambda: [correct], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
def testStringPaddingAreConvertedToBytes(self):
inp = ["this", "is", "a", "test"]
correct = [b"this", b"is", b"a", b"test"]
with self.cached_session():
s, = script_ops.py_func(lambda: [inp], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
def
|
luckiestlindy/osproject
|
booker/migrations/0026_remove_event_expected_guests.py
|
Python
|
gpl-3.0
| 403
| 0
|
# -*- coding: utf-8 -*-
# Gen
|
erated by Django 1.10.5 on 2017-03-01 05:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('booker', '0025_event_wedding_options'),
]
operations = [
migrations.RemoveField(
model_name='event',
na
|
me='expected_guests',
),
]
|
A425/django-nadmin
|
nadmin/plugins/aggregation.py
|
Python
|
mit
| 2,408
| 0.002076
|
from django.db.models import FieldDoesNotExist, Avg, Max, Min, Count, Sum
from django.utils.translation import ugettext as _
from nadmin.sites import site
from nadmin.views import BaseAdminPlugin, ListAdminView
from nadmin.views.list import ResultRow, ResultItem
from nadmin.util import display_for_field
AGGREGATE_METHODS = {
'min': Min, 'max': Max, 'avg': Avg, 'sum': Sum, 'count': Count
}
AGGREGATE_TITLE = {
'min': _('Min'), 'max': _('Max'), 'avg': _('Avg'), 'sum': _('Sum'), 'count': _('Count')
}
class AggregationPlugin(BaseAdminPlugin):
aggregate_fields = {}
def init_request(self, *args, **kwargs):
return bool(self.aggregate_fields)
def _get_field_aggregate(self, field_name, obj, row):
item = ResultItem(field_name, row)
item.classes = ['aggregate', ]
if field_name not in self.aggregate_fields:
item.text = ""
else:
try:
f = self.opts.get_field(field_name)
agg_method = self.aggregate_fields[field_name]
key = '%s__%s' % (field_name, agg_method)
if key not in obj:
item.text = ""
else:
item.text = display_for_field(obj[key], f)
item.wraps.append('%%s<span class="aggregate_title label label-info">%s</span>' % AGGREGATE_TITLE[agg_method])
item.classes.append(agg_method)
except FieldDoesNotExist:
item.text = ""
return item
def _get_aggregate_row(self):
queryset = self.admin_view.list_queryset._clone()
obj = queryset.aggregate(*[AGGREGATE_METHODS[method](field_name) for field_name, method in
self.aggregate_fields.items() if method in AGGREGA
|
TE_METHODS])
row = ResultRow()
row['is_display_first'] = False
row.cells = [self._get_field_aggregate(field_name, obj, row) for field_name in self.admin_view.list_display]
row.css_class = 'info aggregate'
return row
def results(self, rows):
if rows:
rows.append(self._get_aggregate_row())
return rows
# Media
def get_media(self, media):
med
|
ia.add_css({'screen': [self.static(
'nadmin/css/nadmin.plugin.aggregation.css'), ]})
return media
site.register_plugin(AggregationPlugin, ListAdminView)
|
edx/edx-platform
|
common/lib/xmodule/xmodule/contentstore/utils.py
|
Python
|
agpl-3.0
| 1,571
| 0.001273
|
# lint-amnesty, pylint: disable=missing-module-docstring
from xmodule.contentstore.content import StaticContent
from .django import contentstore
def empty_asset_trashcan(course_locs):
'''
This method will hard delete all assets (optionally within a course_id) from the trashcan
'''
store = contentstore('trashcan')
for course_loc in course_locs:
# first delete all of the thumbnails
thumbs = store.get_all_content_thumbnails_for_course(course_loc)
for thumb in thumbs:
print(f"Deleting {thumb}...")
store.delete(thumb['asset_key'])
# then delete all of the assets
assets, __ = store.get_all_content_for_course(course_loc)
for asset in assets:
print(f"Deleting {asset}...")
store.delete(asset['asset_key'])
def restore_asset_from_trashcan(location):
'''
This method will restore an asset which got soft deleted and put back in the original course
'''
trash = contentstore('trashcan')
store = contentstore()
loc = StaticContent.get_location_from_path(location)
content = trash.find(loc)
# ok, save the content into the courseware
store.save(content)
# see if there is a thumbnail as well, if so move that as well
if content.thumbnail_location is not None:
try:
thumbnail_content = t
|
rash.find(content.thu
|
mbnail_location)
store.save(thumbnail_content)
except Exception: # lint-amnesty, pylint: disable=broad-except
pass # OK if this is left dangling
|
yasushiyy/awr2csv
|
awrtext2csv102.py
|
Python
|
mit
| 21,131
| 0.007668
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Create CSV from AWR text reports
#
# Tested Versions
# 10.2.0.1 RAC
# 10.2.0.4 RAC
#
# Note
# extracts values specified with "===" sequence.
#
######################################################################################################
t = {}
t['Load Profile'] = ('load_profile.csv', 'Name,Per Second,Per Transaction', '=========================== =============== ===============')
t['Buffer_Hit'] = ('buffer_hit.csv', 'Buffer Hit %', ' =======')
t['Events_TopN'] = ('events_topn.csv', 'Event,Waits,Time(s),Avg Wait(ms),% Total Call Time,Wait Class', '============================== ============ =========== ====== ====== ==========')
t['Inst_Stats'] = ('inst_stats.csv', 'Statistic,Total,per Second,per Trans', '================================ ================== ============== =============')
t['PGA_Aggr'] = ('pga_aggr_stats.csv', 'B or E,PGA Aggr Target(M),Auto PGA Target(M),PGA Mem Alloc(M),W/A PGA Used(M),%PGA W/A Mem,%Auto W/A Mem,%Man W/A Mem,Global Mem Bound(K)', '= ========== ========== ========== ========== ====== ====== ====== ==========')
t['GlobalLP'] = ('load_profile_g.csv', 'Name,Per Second,Per Transaction', '============================== =============== ===============')
t['GlobalEP'] = ('efficiency_g.csv', 'Name,Value', '============================== =======')
t['SQL_Elapsed'] = ('sql_elapsed.csv', 'Elapsed Time (s),CPU Time (s),Executions,Elap per Exec (s),%Total,SQL Id,SQL Module', '========== ========== ============ ========== ======= =============')
t['SQL_CPU'] = ('sql_cpu.csv', 'CPU Time (s),Elapsed Time (s),Executions,CPU per Exec (s),%Total,SQL Id,SQL Module', '========== ========== ============ =========== ======= =============')
t['SQL_Gets'] = ('sql_gets.csv', 'Buffer Gets,Executions,Gets per Exec,%Total,CPU Time (s),Elapsed Time (s),SQL Id,SQL Module', '============== ============ ============ ====== ======== ========= =============')
t['SQL_Reads'] = ('sql_reads.csv', 'Physical Reads,Executions,Reads per Exec,%Total,CPU Time (s),Elapsed Time (s),SQL Id,SQL Module', '============== =========== ============= ====== ======== ========= =============')
t['SQL_Cluster'] = ('sql_cluster.csv', 'Cluster Wait Time (s),CWT % of Elapsed Time,Elapsed Time (s),CPU Time (s),Executions,SQL Id,SQL Module', '============ =========== =========== =========== ============== =============')
#####################################################################################################
import codecs
import glob
import os
import re
import sys
from datetime import datetime
##### extract
##### ===== ====== ======
##### aaaaa 123 12.3 4,567 -> ['aaaaa', '12.3', '4567']
def line2list(line, mask):
ret = []
re_eq = re.compile(r'=+')
for x in re_eq.finditer(mask):
(b, e) = x.span()
text = line[b:e].strip().replace(',', '')
text = re.sub(r'\s+', ' ', text)
ret.append(text)
return ret
##### parse files
def parse(filelist):
##### common header
h_base = 'DB_NAME,DB_ID,INSTANCE_NAME,INST_NUM,B_Y,B_MO,B_D,B_H,B_MI,B_S,E_Y,E_MO,E_D,E_H,E_MI,E_S,'
##### DB name, Snaptime, SQL Module extract helper
m_dbname = '============ =========== ============ ======== ==========='
m_snaptm = ' ==================='
m_module = ' ========================================================================'
##### output
output = {}
for section in t:
(csvname, header, mask) = t[section]
output[csvname] = [h_base + header]
##### iterate over files
for filename in filelist:
print('Processing {0}...'.format(filename))
db_ver = '' # DB Versoin
section = '' # section Name
l_base = [] # report-specific info (list)
d_base = '' # report-specific info (string)
b_data = False # begin data
l_data = [] # section-specific data (list)
##### iterate over lines
for line in open(filename, 'r'):
if section in t:
(csvname, header, mask) = t[section]
##### DB Name
# ============ =========== ============ ======== ===========
# DB Name DB Id Instance Inst Num Release RAC Host
# ------------ ----------- ------------ -------- ----------- --- ------------
# DB0 9901230123 DB01 1 10.2.0.1.0 YES host1
#
if line.startswith('DB Name'):
section = 'DB Name'
elif section == 'DB Name':
if not line.startswith('---'):
|
l_line = line2list(line, m_dbname)
l_base = l_line[:4]
db_ver = l_line[4]
print(' DB Version: ' + db_ver)
section = ''
##### Snap Time
# ===================
# Snap Id Snap Time Sessions Curs/Sess
# --------- ------------------- -------- ---------
# Begin Snap: 3726 16-2月
|
-13 05:00:50 640 .1
# End Snap: 3727 16-2月 -13 06:00:16 672 .2
# Elapsed: 59.43 (mins)
# DB Time: 25.21 (mins)
#
elif line.startswith('Begin Snap:') or line.startswith(' End Snap:'):
dt = datetime.strptime(line2list(line, m_snaptm)[0], '%d-%b-%y %H:%M:%S')
l_base.extend(str(x) for x in (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second))
d_base = ','.join(l_base) + ','
##### Load Profile
#
# =========================== =============== ===============
# Load Profile
# ~~~~~~~~~~~~ Per Second Per Transaction
# --------------- ---------------
# Redo size: 68,225.00 12,794.53
# Logical reads: 19,994.77 3,749.71
# Block changes: 222.80 41.78
# Physical reads: 11.35 2.13
#
# <EOS>
#
#
elif line.startswith('Load Profile'):
section = 'Load Profile'
elif section == 'Load Profile':
##### blank line => section end
if len(line.strip()) == 0:
section = ''
b_data = False
l_data = []
##### begin data
elif line.startswith(' ---------------'):
b_data = True
##### extract data
elif b_data:
l_data = line2list(line, mask)
output[csvname].append(d_base + ','.join(l_data))
##### Instance Efficiency Percentages
#
# =======
# Instance Efficiency Percentages (Target 100%)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Buffer Nowait %: 100.00 Redo NoWait %: 100.00
# Buffer Hit %: 48.43 In-memory Sort %: 100.00
# Library Hit %: 97.00 Soft Parse %: 94.66
# Execute to Parse %: 78.61 Latch Hit %: 99.99
# Parse CPU to Parse Elapsd %: 26.97 % Non-Parse CPU: 97.16
#
elif line.startswith(' Buffer Hit %'):
section = 'Buffer_Hit'
|
iansprice/wagtail
|
wagtail/wagtailredirects/tests.py
|
Python
|
bsd-3-clause
| 23,927
| 0.003679
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.test import TestCase, override_settings
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Page, Site
from wagtail.wagtailredirects import models
@override_settings(ALLOWED_HOSTS=['testserver', 'localhost', 'test.example.com', 'other.example.com'])
class TestRedirects(TestCase):
fixtures = ['test.json']
def test_path_normalisation(self):
# Shortcut to normalise function (to keep things tidy)
normalise_path = models.Redirect.normalise_path
# Create a path
path = normalise_path('/Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2')
# Test against equivalant paths
self.assertEqual(path, normalise_path( # The exact same URL
'/Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertEqual(path, normalise_path( # Scheme, hostname and port ignored
'http://mywebsite.com:8000/Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertEqual(path, normalise_path( # Leading slash can be omitted
'Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertEqual(path, normalise_path( # Trailing slashes are ignored
'Hello/world.html/;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertEqual(path, normalise_path( # Fragments are ignored
'/Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2#cool'
))
self.assertEqual(path, normalise_path( # Order of query string parameters is ignored
'/Hello/world.html;fizz=three;buzz=five?Baz=quux2&foo=Bar'
))
self.assertEqual(path, normalise_path( # Order of parameters is ignored
'/Hello/world.html;buzz=five;fizz=three?foo=Bar&Baz=quux2'
))
self.assertEqual(path, normalise_path( # Leading whitespace
' /Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertEqual(path, normalise_path( # Trailing whitespace
'/Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2 '
))
# Test against different paths
self.assertNotEqual(path, normalise_path( # 'hello' is lowercase
'/hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertNotEqual(path, normalise_path( # No '.html'
'/Hello/world;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertNotEqual(path, normalise_path( # Query string parameter value has wrong case
'/Hello/world.html;fizz=three;buzz=five?foo=bar&Baz=Quux2'
))
self.assertNotEqual(path, normalise_path( # Query string parameter name has wrong case
'/Hello/world.html;fizz=three;buzz=five?foo=Bar&baz=quux2'
))
self.assertNotEqual(path, normalise_path( # Parameter value has wrong case
'/Hello/world.html;fizz=three;buzz=Five?foo=Bar&Baz=quux2'
))
self.assertNotEqual(path, normalise_path( # Parameter name has wrong case
'/Hello/world.html;Fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertNotEqual(path, normalise_path( # Missing params
'/Hello/world.html?foo=Bar&Baz=quux2'
))
self.assertNotEqual(path, normalise_path( # 'WORLD' is uppercase
'/Hello/WORLD.html;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertNotEqual(path, normalise_path( # '.htm' is not the same as '.html'
'/Hello/world.htm;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertEqual('/', normalise_path('/')) # '/' should stay '/'
# Normalise some rubbish to make sure it doesn't crash
normalise_path('This is not a URL')
normalise_path('//////hello/world')
normalise_path('!#@%$*')
normalise_path('C:\\Program Files (x86)\\Some random program\\file.txt')
def test_unicode_path_normalisation(self):
normalise_path = models.Redirect.normalise_path
self.assertEqual(
'/here/tésting-ünicode', # stays the same
normalise_path('/here/tésting-ünicode')
)
self.assertNotEqual( # Doesn't remove unicode characters
'/here/testing-unicode',
normalise_path('/here/tésting-ünicode')
)
def test_basic_redirect(self):
# Create a redirect
redirect = models.Redirect(old_path='/redirectme', redirect_link='/redirectto')
redirect.save()
# Navigate to it
response = self.client.get('/redirectme/')
# Check that we were redirected
self.assertRedirects(response, '/redirectto', status_code=301, fetch_redirect_response=False)
def test_temporary_redirect(self):
# Create a redirect
redirect = models.Redirect(old_path='/redirectme', redirect_link='/redirectto', is_permanent=False)
redirect.save()
# Navigate to it
response = self.client.get('/redirectme/')
# Check that we were redirected temporarily
self.assertRedirects(response, '/redirectto', status_code=302, fetch_redirect_response=False)
def test_redirect_stripping_query_string(self):
# Create a redirect which includes a query string
redirect_with_query_string = models.Redirect(
old_path='/redirectme?foo=Bar', redirect_link='/with-query-string-only'
)
redirect_with_query_string.save()
# ... and another redirect without the query string
redirect_without_query_string = models.Redirect(old_path='/redirectme', redirect_link='/without-query-string')
redirect_without_query_string.save()
# Navigate to the redirect with the query string
r_matching_qs = self.client.get('/redirectme/?foo=Bar')
self.assertRedirects(r_matching_qs, '/with-query-string-only', status_code=301, fetch_redirect_response=False)
# Navigate to the redirect with a different query string
# This should strip out the query string and match redirect_without_query_string
r_no_qs = self.client.get('/redirectme/?utm_source=irrelevant')
self.assertRedirects(r_no_qs, '/without-query-string', status_code=301, fetch_redirect_response=False)
def test_redirect_to_page(self):
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
model
|
s.Redirect.objects.create(old_path='/xmas', redirect_page=christmas_page)
response = self.client.get('/xmas/', HTTP_HOST='test.example.com')
self.assertRedirects(response, 'http://test.example.com/events/christmas/', status_code=301, fetch_redirect_response=False)
def
|
test_redirect_from_any_site(self):
contact_page = Page.objects.get(url_path='/home/contact-us/')
Site.objects.create(hostname='other.example.com', port=80, root_page=contact_page)
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
models.Redirect.objects.create(old_path='/xmas', redirect_page=christmas_page)
# no site was specified on the redirect, so it should redirect regardless of hostname
response = self.client.get('/xmas/', HTTP_HOST='localhost')
self.assertRedirects(response, 'http://localhost/events/christmas/', status_code=301, fetch_redirect_response=False)
response = self.client.get('/xmas/', HTTP_HOST='other.example.com')
self.assertRedirects(response, 'http://localhost/events/christmas/', status_code=301, fetch_redirect_response=False)
def test_redirect_from_specific_site(self):
contact_page = Page.objects.get(url_path='/home/contact-us/')
other_site = Site.objects.create(hostname='other.example.com', port=80, root_page=contact_page)
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
models.Redirect.objects.create(old_path='/xmas', redirect_page=christmas_page, site=other_site)
# redirect should only respond when site is other_site
|
talflon/rkivas-python
|
rkivas/config.py
|
Python
|
gpl-3.0
| 3,679
| 0.000272
|
# rkivas file backupper
# Copyright (C) 2016 Daniel Getz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import logging.config
import sys
from configparser import RawConfigParser
from io import StringIO
DEFAULT_CONFIG_FIL
|
E = '/etc/rkivas.conf'
DEFAULTS = """
[sources]
[backup]
filename-format = {source}/{timestamp:%Y-%m}/{source}-{timestamp:%Y%m%d_%H%M%S}-{hash}
hash-algorithm = md5
hash-length = 8
[backup-no-timestamp]
f
|
ilename-format = {source}/unknown/{source}-{hash}
hash-algorithm = md5
hash-length = 16
[extension-map]
jpeg = jpg
tiff = tif
[extension-handlers]
jpg = exif
tif = exif
"""
class ConfigParser(RawConfigParser):
def optionxform(self, optionstr):
return optionstr
def load_config_files(opts):
cfg = ConfigParser()
cfg.read_file(StringIO(DEFAULTS))
cfg.read(opts.config_file)
return cfg
def add_default_opts(parser):
parser.add_argument(
'--config-file', default=DEFAULT_CONFIG_FILE,
help='load a particular configuration file',
metavar='FILE')
parser.add_argument(
'-L', '--logging',
choices=['DEBUG', 'WARN', 'WARNING', 'INFO', 'ERROR',
'CRITICAL', 'FATAL'],
help='log to stderr with the given LEVEL', metavar='LEVEL')
parser.add_argument(
'--debug-config', action='store_true',
help='instead of running, output the combined configuration')
parser.add_argument(
'--dry-run', action='store_true',
help="don't affect filesystem, just log what would have been done")
def config_logging(opts, cfg):
if opts.logging:
level = getattr(logging, opts.logging)
logging.basicConfig(
level=level,
format='%(asctime)s %(levelname)s %(name)s - %(message)s',
)
elif (cfg.has_section('formatters') or
cfg.has_section('handlers') or
cfg.has_section('loggers') or
cfg.has_section('logger_root')):
tmp = StringIO()
cfg.write(tmp)
tmp.seek(0)
logging.config.fileConfig(tmp, disable_existing_loggers=False)
else:
logging.basicConfig(
level=logging.WARNING,
format='%(levelname)s %(name)s - %(message)s',
)
if hasattr(logging, 'captureWarnings'):
logging.captureWarnings(True)
def load_opts_into_cfg(opts, cfg, which):
for section, options in which.items():
for cfg_key, opt_key in options.items():
value = getattr(opts, opt_key, None)
if value is not None:
cfg.set(section, cfg_key, str(value))
def load_common_config(opts):
cfg = load_config_files(opts)
if not opts.debug_config:
config_logging(opts, cfg)
load_opts_into_cfg(opts, cfg, {
'backup': {
'dry-run': 'dry_run',
}
})
return cfg
def load_config(opts, opts_spec=None):
cfg = load_common_config(opts)
if opts_spec:
load_opts_into_cfg(opts, cfg, opts_spec)
if opts.debug_config:
cfg.write(sys.stdout)
sys.exit(0)
return cfg
|
cloudbase/neutron
|
neutron/tests/fullstack/test_qos.py
|
Python
|
apache-2.0
| 11,734
| 0
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from neutron_lib import constants
from oslo_utils import uuidutils
from neutron.agent.linux import tc_lib
from neutron.common import utils
from neutron.services.qos import qos_consts
from neutron.tests.common.agents import l2_extensions
from neutron.tests.fullstack import base
from neutron.tests.fullstack.resources import environment
from neutron.tests.fullstack.resources import machine
from neutron.tests.fullstack import utils as fullstack_utils
from neutron.tests.unit import testlib_api
from neutron.conf.plugins.ml2.drivers import linuxbridge as \
linuxbridge_agent_config
from neutron.plugins.ml2.drivers.linuxbridge.agent import \
linuxbridge_neutron_agent as linuxbridge_agent
from neutron.plugins.ml2.drivers.openvswitch.mech_driver import \
mech_openvswitch as mech_ovs
load_tests = testlib_api.module_load_tests
BANDWIDTH_BURST = 100
BANDWIDTH_LIMIT = 500
DSCP_MARK = 16
class BaseQoSRuleTestCase(object):
of_interface = None
ovsdb_interface = None
def setUp(self):
host_desc = [environment.HostDescription(
l3_agent=False,
of_interface=self.of_interface,
ovsdb_interface=self.ovsdb_interface,
l2_agent_type=self.l2_agent_type)]
env_desc = environment.EnvironmentDescription(qos=True)
env = environment.Environment(env_desc, host_desc)
super(BaseQoSRuleTestCase, self).setUp(env)
self.tenant_id = uuidutils.generate_uuid()
self.network = self.safe_client.create_network(self.tenant_id,
'network-test')
self.subnet = self.safe_client.create_subnet(
self.tenant_id, self.network['id'],
cidr='10.0.0.0/24',
gateway_ip='10.0.0.1',
name='subnet-test',
enable_dhcp=False)
def _create_qos_policy(self):
return self.safe_client.create_qos_policy(
self.tenant_id, 'fs_policy', 'Fullstack testing policy',
shared='False')
def _prepare_vm_with_qos_policy(self, rule_add_functions):
qos_policy = self._create_qos_policy()
qos_policy_id = qos_policy['id']
port = self.safe_client.create_port(
self.tenant_id, self.network['id'],
self.environment.hosts[0].hostname,
qos_policy_id)
for rule_add in rule_add_functions:
rule_add(qos_policy)
vm = self.useFixture(
machine.FakeFullstackMachine(
self.environment.hosts[0],
self.network['id'],
self.tenant_id,
self.safe_client,
neutron_port=port))
return vm, qos_policy
class _TestBwLimitQoS(BaseQoSRuleTestCase):
def _wait_for_bw_rule_removed(self, vm):
# No values are provided when port doesn't have qos policy
self._wait_for_bw_rule_applied(vm, None, None)
def _add_bw_limit_rule(self, limit, burst, qos_policy):
qos_policy_id = qos_policy['id']
rule = self.safe_client.create_bandwidth_limit_rule(
self.tenant_id, qos_policy_id, limit, burst)
# Make it consistent with GET reply
rule['type'] = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT
rule['qos_policy_id'] = qos_policy_id
qos_policy['rules'].append(rule)
def test_bw_limit_qos_policy_rule_lifecycle(self):
new_limit = BANDWIDTH_LIMIT + 100
# Create port with qos policy attached
vm, qos_policy = self._prepare_vm_with_qos_policy(
[functools.partial(self._add_bw_limit_rule,
BANDWIDTH_LIMIT, BANDWIDTH_BURST)])
bw_rule = qos_policy['rules'][0]
self._wait_for_bw_rule_applied(vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST)
qos_policy_id = qos_policy['id']
self.client.delete_bandwidth_limit_rule(bw_rule['id'], qos_policy_id)
self._wait_for_bw_rule_removed(vm)
# Create new rule with no given burst value, in such case ovs and lb
# agent should apply burst value as
# bandwidth_limit * qos_consts.DEFAULT_BURST_RATE
new_expected_burst = int(
new_limit * qos_consts.DEFAULT_BURST_RATE
)
new_rule = self.safe_client.create_bandwidth_limit_rule(
self.tenant_id, qos_policy_id, new_limit)
self._wait_for_bw_rule_applied(vm, new_limit, new_expected_burst)
# Update qos policy rule id
self.client.update_bandwidth_limit_rule(
new_rule['id'], qos_policy_id,
body={'bandwidth_limit_rule': {'max_kbps': BANDWIDTH_LIMIT,
'max_burst_kbps': BANDWIDTH_BURST}})
self._wait_for_bw_rule_applied(vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST)
# Remove qos policy from port
self.client.update_port(
vm.neutron_port['id'],
body={'port': {'qos_policy_id': None}})
self._wait_for_bw_rule_removed(vm)
class TestBwLimitQoSOvs(_TestBwLimitQoS, base.BaseFullStackTestCase):
l2_agent_type = constants.AGENT_TYPE_OVS
scenarios = fullstack_utils.get_ovs_interface_scenarios()
def _wait_for_bw_rule_applied(self, vm, limit, burst):
utils.wait_until_true(
lambda: vm.bridge.get_egress_bw_limit_for_port(
vm.port.name) == (limit, burst))
class TestBwLimitQoSLinuxbridge(_TestBwLimitQoS, base.BaseFullStackTestCase):
l2_agent_type = constants.AGENT_TYPE_LINUXBRIDGE
def _wait_for_bw_rule_applied(self, vm, limit, burst):
port_name = linuxbridge_agent.LinuxBridgeManager.get_tap_device_name(
vm.neutron_port['id'])
tc = tc_lib.TcCommand(
port_name,
linuxbridge_agent_config.DEFAULT_KERNEL_HZ_VALUE,
namespace=vm.host.host_namespace
)
utils.wait_until_true(
lambda: tc.get_filters_bw_limits() == (limit, burst))
class TestDscpMarkingQoSOvs(BaseQoSRuleTestCase, base.BaseFullStackTestCase):
scenarios = fullstack_utils.get_ovs_interface_scenarios()
l2_agent_type = constants.AGENT_TYPE_OVS
def setUp(s
|
elf):
host_desc = [
environment.HostDescription(
l3_agent=False,
of_interface=self.of_interface,
ovsdb_interface=self.ovsdb_interface,
l2_agent_type=self.l2_agent_type
) for _ in range(2)]
env_desc = environment.EnvironmentDescription(
qos=True)
|
env = environment.Environment(env_desc, host_desc)
super(BaseQoSRuleTestCase, self).setUp(env)
self.tenant_id = uuidutils.generate_uuid()
self.network = self.safe_client.create_network(self.tenant_id,
'network-test')
self.subnet = self.safe_client.create_subnet(
self.tenant_id, self.network['id'],
cidr='10.0.0.0/24',
gateway_ip='10.0.0.1',
name='subnet-test',
enable_dhcp=False)
def _wait_for_dscp_marking_rule_applied(self, vm, dscp_mark):
l2_extensions.wait_until_dscp_marking_rule_applied(
vm.bridge, vm.port.name, dscp_mark)
def _wait_for_dscp_marking_rule_removed(self, vm):
self._wait_for_dscp_marking_rule_applied(vm, None)
def _add_dscp_rule(self, dscp_mark, qos_policy):
qos_policy_id = qos_policy['id']
rule = self.safe_client.create_dscp_marking_rule(
self.tenant_id, qos_policy_id, dscp_mark)
# Make it consistent with GET re
|
kawamon/hue
|
desktop/core/ext-py/pyasn1-modules-0.2.6/tests/test_rfc5649.py
|
Python
|
apache-2.0
| 1,730
| 0.001156
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.codec.der import encoder as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc5649
try:
import unittest2 as unittest
except ImportError:
import unittest
class AESKeyWrapTestCase(unittest.TestCase):
kw_alg_id_pem_text = "MAsGCWCGSAFlAwQBLQ=="
def setUp(self):
self.asn1Spec = rfc5649.AlgorithmIdentifier()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.kw_alg_id_pem_text)
asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
assert not rest
assert asn1Object.prettyPrint()
assert asn1Object[0] == rfc5649.id_aes256_wrap
assert der_encoder.encode(asn1Object) == substrate
class AESKeyWrapWithPadTestCase(unittest.TestCase):
kw_pad_alg_id_pem_text = "MAsGCWCGSAFlAwQBMA=="
def setUp(se
|
lf):
self.asn1Spec = rfc5649.AlgorithmIdentifier()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.kw_pad_alg_id_pem_text)
asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
assert not rest
assert asn1Object.prettyPrint()
assert asn1Object[0] == rfc5649.id_aes256_wrap_pad
assert der_encoder.encode(asn1Object) == substrate
suite = unittest.TestLoader().loadTestsF
|
romModule(sys.modules[__name__])
if __name__ == '__main__':
import sys
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
|
alphagov/notifications-delivery
|
wsgi.py
|
Python
|
mit
| 322
| 0.003106
|
i
|
mport os
from notifications_delivery.app import create_app
from credstash import getAllSecrets
# on aws get secrets and export to env
secrets = getAllSecrets(region="eu-west-1")
for key, val in secrets.items():
os.environ[key] = val
application = create_app()
if __name__ == "__main__":
applica
|
tion.run()
|
Tutakamimearitomomei/Kongcoin
|
contrib/wallettools/walletunlock.py
|
Python
|
mit
| 159
| 0
|
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:12644"
|
)
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphras
|
e(pwd, 60)
|
hzlf/openbroadcast
|
website/_notification/atomformat.py
|
Python
|
gpl-3.0
| 22,948
| 0.009936
|
#
# django-atompub by James Tauber <http://jtauber.com/>
# http://code.google.com/p/django-atompub/
# An implementation of the Atom format and protocol for Django
#
# For instructions on how to use this module to generate Atom feeds,
# see http://code.google.com/p/django-atompub/wiki/UserGuide
#
#
# Copyright (c) 2007, James Tauber
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from xml.sax.saxutils import XMLGenerator
from datetime import datetime
GENERATOR_TEXT = 'django-atompub'
GENERATOR_ATTR = {
'uri': 'http://code.google.com/p/django-atompub/',
'version': 'r33'
}
## based on django.utils.xmlutils.SimplerXMLGenerator
class SimplerXMLGenerator(XMLGenerator):
def addQuickElement(self, name, contents=None, attrs=None):
"Convenience method for adding an element with no children"
if attrs is None: attrs = {}
self.startElement(name, attrs)
if contents is not None:
self.characters(contents)
self.endElement(name)
## based on django.utils.feedgenerator.rfc3339_date
def rfc3339_date(date):
return date.strftime('%Y-%m-%dT%H:%M:%SZ')
## based on django.utils.feedgenerator.get_tag_uri
def get_tag_uri(url, date):
"Creates a TagURI. See http://diveintomark.org/archives/2004/05/28/howto-atom-id"
parts = urlparse.urlparse(url)
date_part = ""
if date is not None:
date_part = ",%s:" % date.strftime("%Y-%m-%d")
return "tag:%s%s%s/%s" % (
|
parts.hostname,
date_part,
parts.path,
parts.fragment,
)
## based on django.contrib.syndication.feeds.Feed
class Feed(object):
VALIDATE = True
def __init__(self, slug, feed_url):
# @@@ slug and feed_url are not used yet
pass
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if
|
callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def get_feed(self, extra_params=None):
if extra_params:
try:
obj = self.get_object(extra_params.split('/'))
except (AttributeError, LookupError):
raise LookupError('Feed does not exist')
else:
obj = None
feed = AtomFeed(
atom_id = self.__get_dynamic_attr('feed_id', obj),
title = self.__get_dynamic_attr('feed_title', obj),
updated = self.__get_dynamic_attr('feed_updated', obj),
icon = self.__get_dynamic_attr('feed_icon', obj),
logo = self.__get_dynamic_attr('feed_logo', obj),
rights = self.__get_dynamic_attr('feed_rights', obj),
subtitle = self.__get_dynamic_attr('feed_subtitle', obj),
authors = self.__get_dynamic_attr('feed_authors', obj, default=[]),
categories = self.__get_dynamic_attr('feed_categories', obj, default=[]),
contributors = self.__get_dynamic_attr('feed_contributors', obj, default=[]),
links = self.__get_dynamic_attr('feed_links', obj, default=[]),
extra_attrs = self.__get_dynamic_attr('feed_extra_attrs', obj),
hide_generator = self.__get_dynamic_attr('hide_generator', obj, default=False)
)
items = self.__get_dynamic_attr('items', obj)
if items is None:
raise LookupError('Feed has no items field')
for item in items:
feed.add_item(
atom_id = self.__get_dynamic_attr('item_id', item),
title = self.__get_dynamic_attr('item_title', item),
updated = self.__get_dynamic_attr('item_updated', item),
content = self.__get_dynamic_attr('item_content', item),
published = self.__get_dynamic_attr('item_published', item),
rights = self.__get_dynamic_attr('item_rights', item),
source = self.__get_dynamic_attr('item_source', item),
summary = self.__get_dynamic_attr('item_summary', item),
authors = self.__get_dynamic_attr('item_authors', item, default=[]),
categories = self.__get_dynamic_attr('item_categories', item, default=[]),
contributors = self.__get_dynamic_attr('item_contributors', item, default=[]),
links = self.__get_dynamic_attr('item_links', item, default=[]),
extra_attrs = self.__get_dynamic_attr('item_extra_attrs', None, default={}),
)
if self.VALIDATE:
feed.validate()
return feed
class ValidationError(Exception):
pass
## based on django.utils.feedgenerator.SyndicationFeed and django.utils.feedgenerator.Atom1Feed
class AtomFeed(object):
mime_type = 'application/atom+xml'
ns = u'http://www.w3.org/2005/Atom'
def __init__(self, atom_id, title, updated=None, icon=None, logo=None, rights=None, subtitle=None,
authors=[], categories=[], contributors=[], links=[], extra_attrs={}, hide_generator=False):
if atom_id is None:
raise LookupError('Feed has no feed_id field')
if title is None:
raise LookupError('Feed has no feed_title field')
# if updated == None, we'll calculate it
self.feed = {
'id': atom_id,
'title': title,
'updated': updated,
'icon': icon,
'logo': logo,
'rights': rights,
'subtitle': subtitle,
'authors': authors,
'categories': categories,
'contributors': contributors,
'links': links,
'extra_attrs': extra_attrs,
'hide_generator': hide_generator,
}
self.items = []
def add_item(self, atom_id, title, updated, content=None, published=None, rights=None, source=None, summary=None,
authors=[], categories=[], contributors=[], links=[], extra_attrs={}):
if atom_id is None:
raise LookupError('Feed has no item_id method')
if title is None:
raise LookupError('Feed has no item_title method')
if updated is None:
raise LookupError('Feed has no item_updated method')
self.items.append({
'id': atom_id,
'title': title,
'updated': updated,
'content': content,
'published': published,
'rights': rights,
'source': source,
'summary': summary,
'authors': authors,
|
steveb/heat
|
heat/tests/openstack/heat/test_resource_group.py
|
Python
|
apache-2.0
| 58,016
| 0
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import six
from heat.common import exception
from heat.common import grouputils
from heat.common import template_format
from heat.engine.resources.openstack.heat import resource_group
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack as stackm
from heat.tests import common
from heat.tests import utils
template = {
"heat_template_version": "2013-05-23",
"resources": {
"group1": {
"type": "OS::Heat::Reso
|
urceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "Bar"
}
}
}
}
}
}
template2 = {
"heat_template_version": "2013-05-23",
"resources": {
"dummy": {
"type": "OverwrittenFnGetRefIdType",
|
"properties": {
"Foo": "baz"
}
},
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": {"get_attr": ["dummy", "Foo"]}
}
}
}
}
}
}
template_repl = {
"heat_template_version": "2013-05-23",
"resources": {
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "ResourceWithListProp%index%",
"properties": {
"Foo": "Bar_%index%",
"listprop": [
"%index%_0",
"%index%_1",
"%index%_2"
]
}
}
}
}
}
}
template_attr = {
"heat_template_version": "2014-10-16",
"resources": {
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "ResourceWithComplexAttributesType",
"properties": {
}
}
}
}
},
"outputs": {
"nested_strings": {
"value": {"get_attr": ["group1", "nested_dict", "string"]}
}
}
}
class ResourceGroupTest(common.HeatTestCase):
def setUp(self):
common.HeatTestCase.setUp(self)
self.m.StubOutWithMock(stackm.Stack, 'validate')
def test_assemble_nested(self):
"""Tests nested stack creation based on props.
Tests that the nested stack that implements the group is created
appropriately based on properties.
"""
stack = utils.parse_stack(template)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
templ = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "Bar"
}
},
"1": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "Bar"
}
},
"2": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "Bar"
}
}
}
}
self.assertEqual(templ, resg._assemble_nested(['0', '1', '2']).t)
def test_assemble_nested_include(self):
templ = copy.deepcopy(template)
res_def = templ["resources"]["group1"]["properties"]['resource_def']
res_def['properties']['Foo'] = None
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {}
}
}
}
self.assertEqual(expect, resg._assemble_nested(['0']).t)
expect['resources']["0"]['properties'] = {"Foo": None}
self.assertEqual(
expect, resg._assemble_nested(['0'], include_all=True).t)
def test_assemble_nested_include_zero(self):
templ = copy.deepcopy(template)
templ['resources']['group1']['properties']['count'] = 0
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
}
self.assertEqual(expect, resg._assemble_nested([]).t)
def test_assemble_nested_with_metadata(self):
templ = copy.deepcopy(template)
res_def = templ["resources"]["group1"]["properties"]['resource_def']
res_def['properties']['Foo'] = None
res_def['metadata'] = {
'priority': 'low',
'role': 'webserver'
}
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {},
"metadata": {
'priority': 'low',
'role': 'webserver'
}
}
}
}
self.assertEqual(expect, resg._assemble_nested(['0']).t)
def test_assemble_nested_rolling_update(self):
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "bar"
}
},
"1": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "baz"
}
}
}
}
resource_def = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
{"foo": "baz"})
stack = utils.parse_stack(template)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
resg._nested = get_fake_nested_stack(['0', '1'])
resg.build_resource_definition = mock.Mock(return_value=resource_def)
self.assertEqual(expect, resg._assemble_for_rolling_update(2, 1).t)
def test_assemble_nested_rolling_update_none(self):
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "bar"
|
yuroller/pvaurora
|
src/config-dist.py
|
Python
|
gpl-3.0
| 152
| 0.006579
|
#!/usr/bin
|
/env python
# encoding: utf-8
'''
pvaurora configuration file
'''
LATITUDE =
|
42.6
LONGITUDE = 12.9
API_KEY = "api_key_value"
SYSTEM_ID = -1
|
gautelinga/BERNAISE
|
utilities/mesh_scripts/barbell_capillary.py
|
Python
|
mit
| 1,501
| 0
|
""" barbell_capilar script. """
from common import info
import dolfin as df
import mshr
import os
from generate_mesh import MESHES_DIR, store_mesh_HDF5
import matplotlib.pyplot as plt
def description(**kwargs):
info("Generates mesh for a barbell capillary.")
def method(res=50, diameter=1., length=5., show=False, **kwargs):
'''
Function That Generates a mesh for a barbell capillary,
Meshing method is mshr.
Note: The generarted mesh is stored in "BERNAISE/meshes/".
'''
info("Generating mesh using the mshr tool.")
inletdiameter = diameter*5.
inletlength = diameter*4.
# Define coners of "capilar"
a = df.Point(-diameter/2., -length/2-inletlength/2.)
b = df.Point(d
|
iameter/2., length/2+inletlength/2.)
capilar = mshr.Rectangle(a, b)
# Define coners of "leftbell
c = df.Point(-inletdiameter/2., -length/2-inletlength)
d = df.Point(inletdiameter/2., -length/2)
leftbell = mshr.Rectangle(c, d)
# Define coners of "rightbell"
e = df.Point(-inletdiameter/2., length/2)
f = df.Point(inletdiameter/2., length/2+inletlength)
rightbell = mshr.Rectangle(e, f)
domain = capi
|
lar + leftbell + rightbell
mesh = mshr.generate_mesh(domain, res)
meshpath = os.path.join(MESHES_DIR,
"BarbellCapilarDolfin_d" + str(diameter) + "_l" +
str(length) + "_res" + str(res))
store_mesh_HDF5(mesh, meshpath)
if show:
df.plot(mesh)
plt.show()
|
tomkralidis/geonode
|
geonode/tasks/tasks.py
|
Python
|
gpl-3.0
| 4,983
| 0.000201
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from django.db import transaction
from django.core.mail import send_mail
from celery.utils.log import get_task_logger
from geonode.celery_app import app
try:
import pylibmc
import sherlock
from sherlock import MCLock as Lock
sherlock.configure(
expire=settings.MEMCACHED_LOCK_EXPIRE,
timeout=settings.MEMCACHED_LOCK_TIMEOUT)
memcache_client = pylibmc.Client(
[settings.MEMCACHED_LOCATION],
|
binary=True)
lock_type = "MEMCACHED"
except Exception:
from django.core.cache import cache
from contextlib import contextmanager
lock_type = "MEMCACHED-LOCAL-CONTEXT"
memcache_client = None
"""
ref.
h
|
ttp://docs.celeryproject.org/en/latest/tutorials/task-cookbook.html#ensuring-a-task-is-only-executed-one-at-a-time
"""
class Lock(object):
def __init__(self, lock_id, *args, **kwargs):
self.lock_id = lock_id
self.client = kwargs.get('client', None)
@contextmanager
def acquire(self, blocking=True):
if not blocking:
logger.warning("Non-blocking lock not currently available!")
# cache.add fails if the key already exists
from geonode.celery_app import app
status = cache.add(self.lock_id, app.oid, None)
try:
yield status
finally:
# memcache delete is very slow, but we have to use it to take
# advantage of using add() for atomic locking
if status:
# don't release the lock if we didn't acquire it
cache.delete(self.lock_id)
logger = get_task_logger(__name__)
def memcache_lock(lock_id):
logger.info(f"Using '{lock_type}' lock type.")
lock = Lock(lock_id, client=memcache_client)
return lock
@app.task(
bind=True,
name='geonode.tasks.email.send_mail',
queue='email',
countdown=60,
# expires=120,
acks_late=True,
retry=True,
retry_policy={
'max_retries': 3,
'interval_start': 0,
'interval_step': 0.2,
'interval_max': 0.2,
})
def send_email(self, *args, **kwargs):
"""
Sends an email using django's send_mail functionality.
"""
send_mail(*args, **kwargs)
@app.task(
bind=True,
name='geonode.tasks.notifications.send_queued_notifications',
queue='email',
countdown=60,
# expires=120,
acks_late=True,
retry=True,
retry_policy={
'max_retries': 3,
'interval_start': 0,
'interval_step': 0.2,
'interval_max': 0.2,
})
def send_queued_notifications(self, *args):
"""Sends queued notifications.
settings.PINAX_NOTIFICATIONS_QUEUE_ALL needs to be true in order to take
advantage of this.
"""
from importlib import import_module
notifications = getattr(settings, 'NOTIFICATIONS_MODULE', None)
if notifications:
engine = import_module(f"{notifications}.engine")
send_all = getattr(engine, 'send_all')
# Make sure application can write to location where lock files are stored
if not args and getattr(settings, 'NOTIFICATION_LOCK_LOCATION', None):
send_all(settings.NOTIFICATION_LOCK_LOCATION)
else:
send_all(*args)
@app.task(
bind=True,
name='geonode.tasks.layers.set_permissions',
queue='update',
countdown=60,
# expires=120,
acks_late=True,
retry=True,
retry_policy={
'max_retries': 3,
'interval_start': 0,
'interval_step': 0.2,
'interval_max': 0.2,
})
def set_permissions(self, permissions_names, resources_names,
users_usernames, groups_names, delete_flag):
from geonode.layers.utils import set_layers_permissions
with transaction.atomic():
for permissions_name in permissions_names:
set_layers_permissions(
permissions_name,
resources_names,
users_usernames,
groups_names,
delete_flag,
verbose=True
)
|
octopus-platform/bjoern
|
python/bjoern-tools/bjoern/plugins/vsa.py
|
Python
|
gpl-3.0
| 414
| 0
|
from octopus.plugins.plugin import OctopusPlugin
class VSA(OctopusPlugin):
def __init__(self, executor):
super().__i
|
nit__(executor)
self._pluginname = 'vsa.jar'
self._classname = 'bjoern.plugins.vsa.VSAPlugin'
def __setattr__(self, key, value):
if key == "project":
|
self._settings["database"] = value
else:
super().__setattr__(key, value)
|
qsnake/gpaw
|
doc/exercises/surface/surface.agts.py
|
Python
|
gpl-3.0
| 180
| 0.005556
|
def agts(queue):
al = que
|
ue.add(
|
'surface.agts.py')
queue.add('work_function.py', ncpus=1, deps=[al])
if __name__ == '__main__':
execfile('Al100.py', {'k': 6, 'N': 5})
|
SlideAtlas/SlideAtlas-Server
|
slideatlas/models/common/multiple_database_model_document.py
|
Python
|
apache-2.0
| 5,469
| 0.002377
|
# coding=utf-8
from flask import g
from mongoengine.connection import get_db
from .model_document import ModelDocument, ModelQuerySet
################################################################################
__all__ = ('MultipleDatabaseModelDocument',)
################################################################################
class MultipleDatabaseModelQuerySet(ModelQuerySet):
def __init__(self, document, collection):
# make a local copy of the Document class for this QuerySet, to prevent
# database, so that new attributes can be set on it
new_document = self._copy_class(document)
# this copies what may be class-level attributes from 'document',
# to instance-level attributes on 'new_document', freezing them
current_db_alias = document._get_db_alias()
new_document._get_db_alias = staticmethod(lambda: current_db_alias)
current_collection = document._get_collection()
new_document._get_collection = staticmethod(lambda: current_collection)
super(MultipleDatabaseModelQuerySet, self).__init__(new_document, collection)
@staticmethod
def _copy_class(cls):
# TODO: move this to a common utils
new_cls_dict = dict(cls.__dict__)
new_cls_dict['meta'] = new_cls_dict.pop('_meta')
return type(cls.__name__, cls.__bases__, new_cls_dict)
class MultipleDatabaseModelDocument(ModelDocument):
"""
An abstract class for documents that may reside in one of multiple databases.
"""
# TODO: prevent this class from being instantiated directly
meta = {
'abstract': True,
'allow_inheritance': False,
'db_alias': None, # this shouldn't actually be used
'queryset_class': MultipleDatabaseModelQuerySet,
'auto_create_index': False, # don't change; see '_get_collection' for why this is set
}
@property
def database(self):
# the import is required here to prevent circular imports
# TODO: remove this import statement
from ..image_store import MultipleDatabaseImageStore
return MultipleDatabaseImageStore.objects.with_id(self._db_alias)
@classmethod
def _get_db_alias(cls):
"""
Helper method to provide the current database, as set by a
MultipleDatabaseImageStore context manager.
This would be better as a property, but Python has poor support for
classmethod descriptors, particularly with mutators.
"""
try:
return g.multiple_database_connection_aliases[-1]
except (AttributeError, IndexError):
raise NotImplemented('A "%s" must be used inside a "MultipleDatabaseImageStoreMixin" context (\'with\' statement).' % cls.__name__)
@classmethod
def _get_db(cls):
"""
Overrides the Document._get_collection classmethod.
This will only be called on class instances, as instantiated objects
have this method patched by 'self.switch_db'.
"""
return get_db(cls._get_db_alias())
@classmethod
def _get_collection(cls):
"""
Overrides the 'Document._get_collection' classmethod.
This method attempts to provide some degree of caching, preventing a
new collection from having to be created on every access, while still
allowing the database to change.
Unlike for databases, MongoEngine doesn't store an internal cache for
multiple collections per class, so one is created here, and used
instead of the single '_collection' cache.
This will only be called on class instances, as instantiated objects
have this method patched by 'self.switch_db'.
"""
if issubclass(MultipleDatabaseModelDocument, cls):
# setting the '_collections' property on one of the common base
# classes would prevent the derived classes from having their own
# seperate instances of the property
raise NotImplementedError('"_get_collection" should only be called on concrete model classes.')
if not hasattr(cls, '_collections'):
cls._collections = dict()
db_alias = cls._get_db_alias()
try:
cls._collection = cls._collections[db_alias]
except KeyError:
cls._collection = None
# 'cls._collection' is set as a side effect of the superclass
# '_get_collection'
cls._collections[db_alias] = super(MultipleDatabaseModelDocument, cls)._get_collection()
# unless meta['auto_create_index'] is false, the superclass
# '_get_collection' will attempt to call 'ensure_indexes', which
# in turn calls '_get_collection', leading to infinite recursion
# so, wait until the necessary '_collection' / '_collections' values
# are set after the return, and only then call 'ensure_indexes'
cls.ensure_indexes()
return cls._collection
def __init__(self, *args, **kwargs):
super(MultipleDatabaseModelDocument, self).__init__(*args, **kwargs)
# make the new database persistent to this instance
# cls_db_alias = type(self)._get_db_alias()
cls_db_ali
|
as = self._get_db_alias()
self._db_alias = cls_db_alias # save the value for use in the 'database' property
self.switch_db(cls_db_alias) # thi
|
s patches over 'self._get_db'
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/pylint/test/functional/wrong_import_position.py
|
Python
|
apache-2.0
| 777
| 0.006435
|
"""Checks im
|
port order rule"""
# pylint: disable=unused-import,relative-import,ungrouped-imports,wrong-import-order
# pylint: disable=import-error, too-few-public-methods, missing-docstring,using-constant-test
import os.path
if True:
from astroid import are_exclusive
try:
import sys
except ImportError:
class Myclass(object):
"""docstring"""
if sys.version_info[0] == 3:
from collections import OrderedDict
else:
class OrderedDict(object):
|
"""Nothing to see here."""
def some_func(self):
pass
import six # [wrong-import-position]
CONSTANT = True
import datetime # [wrong-import-position]
VAR = 0
for i in range(10):
VAR += i
import scipy # [wrong-import-position]
import astroid # [wrong-import-position]
|
hoechenberger/psychopy
|
psychopy/experiment/utils.py
|
Python
|
gpl-3.0
| 1,018
| 0
|
#!/usr/bin/env
|
python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyrigh
|
t (C) 2018 Jonathan Peirce
# Distributed under the terms of the GNU General Public License (GPL).
"""Utility functions to support Experiment classes
"""
import re
# this needs to be accessed from __str__ method of Param
scriptTarget = "PsychoPy"
# predefine some regex's; deepcopy complains if do in NameSpace.__init__()
unescapedDollarSign_re = re.compile(r"^\$|[^\\]\$") # detect "code wanted"
valid_var_re = re.compile(r"^[a-zA-Z_][\w]*$") # filter for legal var names
nonalphanumeric_re = re.compile(r'\W') # will match all bad var name chars
class CodeGenerationException(Exception):
"""
Exception thrown by a component when it is unable to generate its code.
"""
def __init__(self, source, message=""):
super(CodeGenerationException, self).__init__()
self.source = source
self.message = message
def __str__(self):
return "{}: ".format(self.source, self.message)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.