repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
hblancolo/ptavi-p4 | server.py | Python | gpl-2.0 | 2,750 | 0.000728 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Clase (y programa principal) para un servidor de eco en UDP simple
"""
import sys
import socketserver
import time
import json
class SIPRegisterHandler(socketserver.DatagramRequestHandler):
"""
Echo server class
"""
dic = {} # almacena nombre usuario e ip correspondiente cuando REGISTER
def check_expires(self):
"""
Comprueba que en el registro de usuarios (self.dic) no haya ningún
usuario caducado. Si hay usuarios caducados, los borra
"""
expired_users = []
for usuario in self.dic:
time_now = time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(time.time()))
if time_now >= self.dic[usuario][1]['expires']:
expired_users.append(usuario)
for usuario in expired_users:
del self.dic[usuario]
def register2json(self):
"""
Vuelca el registro de usuarios (self.dic) en un fichero json, para
poder almacenarlo en la memoria estática |
"""
fich_json = open('registered.json', 'w')
codigo_json = json.dumps(self.dic)
fich_json.write(codigo_json)
fich_json.close()
def json2registered(self):
"""
Si existe un fichero registered.json con un registro almacenado, lo
utiliza en el servidor Registrar
"""
try:
fich_json = open('registered.json', 'r')
self.dic = json.load(fich_json)
except:
pass
def handle(self | ):
"""
Manejador de peticiones de cliente. Sólo hace algo cuando recibe
peticiones tipo REGISTER
"""
line_str = self.rfile.read().decode('utf-8')
list_linecontent = line_str.split()
self.json2registered()
if list_linecontent[0] == 'REGISTER':
user = list_linecontent[1].split(':')[-1]
ip_user = self.client_address[0]
if list_linecontent[3] == 'Expires:':
expires = time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(time.time() +
int(list_linecontent
[4])))
self.dic[user] = [{'address': ip_user}, {'expires': expires}]
self.wfile.write(b"SIP/2.0 200 OK\r\n\r\n")
self.check_expires()
self.register2json()
if __name__ == "__main__":
serv = socketserver.UDPServer(('', int(sys.argv[1])), SIPRegisterHandler)
print("Lanzando servidor UDP de eco...")
try:
serv.serve_forever()
except KeyboardInterrupt:
print("Finalizado servidor")
|
jiadaizhao/LeetCode | 0801-0900/0805-Split Array With Same Average/0805-Split Array With Same Average.py | Python | mit | 507 | 0.00789 | class Solution:
def splitArraySameAverage(self, A: List[int]) -> bool:
total = sum(A)
n = len(A)
if all(total*i%n for i in range(n//2 + 1)):
return False
sums = [set() for _ in range(1 + n // 2)]
sums[0].add(0)
for num in A:
| for i in range(n//2, 0, -1):
for s in sums[i - 1]:
sums[i].add(s + num)
| return any(total*i%n == 0 and total*i//n in sums[i] for i in range(1, 1 + n//2))
|
viswimmer1/PythonGenerator | data/python_files/34574373/cmss.py | Python | gpl-2.0 | 2,623 | 0.014487 | import win32pipe
import win32console
import win32process
import time
import win32con
import codecs
import ctypes
user32 = ctypes.windll.user32
CONQUE_WINDOWS_VK = {
'3' : win32con.VK_CANCEL,
'8' : win32con.VK_BACK,
'9' : win32con.VK_TAB,
'12' : win32con.VK_CLEAR,
'13' : win32con.VK_RETURN,
'17' : win32con.VK_CONTROL,
'20' : win32con.VK_CAPITAL,
'27' : win32con.VK_ESCAPE,
'28' : win32con.VK_CONVERT,
'35' : win32con.VK_END,
'36' : win32con.VK_HOME,
'37' : win32con.VK_LEFT,
'38' : win32con.VK_UP,
'39' : win32con.VK_RIGHT,
'40' : win32con.VK_DOWN,
'45' : win32con.VK_INSERT,
'46' : win32con.VK_DELETE,
'47' : win32con.VK_HELP
}
def make_input_key(c, control_key_state=None):
kc = win32console.PyINPUT_RECORDType (win32console.KEY_EVENT)
kc.KeyDown = True
kc.RepeatCount = 1
cnum = ord(c)
if cnum == 3:
pid_list = win32console.GetConsoleProcessList()
win32console.GenerateConsoleCtrlEvent(win32con.CTRL_C_EVENT, 0)
return
else:
kc.Char = unicode(c)
if str(cnum) in CONQUE_WINDOWS_VK:
kc.VirtualKeyCode = CONQUE_WINDOWS_VK[str(cnum)]
else:
kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum)
#kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum+96)
#kc.ControlKeyState = win32con.LEFT_CTRL_PRESSED
return kc
#win32console.AttachConsole()
coord = win32console.PyCOORDType
con_stdout = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)
con_stdin = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)
flags = win32process.NORMAL_PRIORITY_CLASS
si = win32process.STARTUPINFO()
si.dwFlags |= win32con.STARTF_USESHOWWINDOW
(handle1, handle2, i1, i2) = win32process.CreateProcess(None, "cmd.exe", None, | None, 0, flags, None, '.', si)
time.sleep(1)
#size = con_stdout.GetConsoleScreenBufferInfo()['Window']
# with codecs.open("log.txt", "w", "utf8") as f:
# for i in xrange(0, size.Bottom):
# f.write(con_stdout.ReadConsoleOutputCharacter(size.Right+1, coord(0, i)))
# f.write("\n")
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
HOST = "127.0.0.1"
PORT = 5554
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
s.bind((HOST, PORT))
s.listen(1)
(sc, scname) = s.accept()
while True:
msg = sc.recv(1)
if ord(msg) == 0:
break
keys = [make_input_key(msg)]
if keys:
con_stdin.WriteConsoleInput(keys)
win32process.TerminateProcess(handle1, 0) |
openbmc/openbmc-test-automation | bin/validate_plug_ins.py | Python | apache-2.0 | 3,734 | 0.004017 | #!/usr/bin/env python3
import sys
try:
import __builtin__
except ImportError:
import builtins as __builtin__
import os
# python puts the program's directory path in sys.path[0]. In other words, the user ordinarily has no way
# to override python's choice of a module from its own dir. We want to have that ability i | n our environment.
# However, we don't want to break any established python modules that depend on this behavior. So, we'll
# save the value from sys.path[0], delete it, import our modules and then restore sys.path to its original
# value.
save_path_0 = sys.path[0]
del sys.path[0]
from gen_print import *
| from gen_arg import *
from gen_plug_in import *
# Restore sys.path[0].
sys.path.insert(0, save_path_0)
# Create parser object to process command line parameters and args.
# Create parser object.
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS] [PLUG_IN_DIR_PATHS]',
description="%(prog)s will validate the plug-in packages passed to it."
+ " It will also print a list of the absolute plug-in"
+ " directory paths for use by the calling program.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prefix_chars='-+')
# Create arguments.
parser.add_argument(
'plug_in_dir_paths',
nargs='?',
default="",
help=plug_in_dir_paths_help_text + default_string)
parser.add_argument(
'--mch_class',
default="obmc",
help=mch_class_help_text + default_string)
# The stock_list will be passed to gen_get_options. We populate it with the names of stock parm options we
# want. These stock parms are pre-defined by gen_get_options.
stock_list = [("test_mode", 0), ("quiet", 1), ("debug", 0)]
def exit_function(signal_number=0,
frame=None):
r"""
Execute whenever the program ends normally or with the signals that we catch (i.e. TERM, INT).
"""
dprint_executing()
dprint_var(signal_number)
qprint_pgm_footer()
def signal_handler(signal_number, frame):
r"""
Handle signals. Without a function to catch a SIGTERM or SIGINT, our program would terminate immediately
with return code 143 and without calling our exit_function.
"""
# Our convention is to set up exit_function with atexit.registr() so there is no need to explicitly call
# exit_function from here.
dprint_executing()
# Calling exit prevents us from returning to the code that was running when we received the signal.
exit(0)
def validate_parms():
r"""
Validate program parameters, etc. Return True or False accordingly.
"""
gen_post_validation(exit_function, signal_handler)
return True
def main():
r"""
This is the "main" function. The advantage of having this function vs just doing this in the true
mainline is that you can:
- Declare local variables
- Use "return" instead of "exit".
- Indent 4 chars like you would in any function.
This makes coding more consistent, i.e. it's easy to move code from here into a function and vice versa.
"""
if not gen_get_options(parser, stock_list):
return False
if not validate_parms():
return False
qprint_pgm_header()
# Access program parameter globals.
global plug_in_dir_paths
global mch_class
plug_in_packages_list = return_plug_in_packages_list(plug_in_dir_paths,
mch_class)
qprint_var(plug_in_packages_list)
# As stated in the help text, this program must print the full paths of each selected plug in.
for plug_in_dir_path in plug_in_packages_list:
print(plug_in_dir_path)
return True
# Main
if not main():
exit(1)
|
antoinecarme/pyaf | tests/artificial/transf_RelativeDifference/trend_LinearTrend/cycle_7/ar_/test_artificial_128_RelativeDifference_LinearTrend_7__20.py | Python | bsd-3-clause | 275 | 0.083636 | import pyaf.Bench.TS_datasets as tsds
import tes | ts.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 7, transform = "RelativeDifference", sigma = 0.0, exog_count = | 20, ar_order = 0); |
abhijithmannath/ml-ceeri | geo/views.py | Python | gpl-3.0 | 594 | 0.035354 | from django.shortcuts import render
from . | models import GeoLocation
from django.http import HttpResponse
def log_location(request):
"""
:params
:lat - latitude
:lon - longitude
:user_agent - useful for IOT applications that needs to log the client
that send the location
"""
if request.method == 'GET':
user_agent = request.GET.get('user_agent','test')
try:
lat = request.GET['l | at']
lon = request.GET['lon']
GeoLocation.objects.create(user_agent=user_agent,lat=lat,lon=lon)
except:
return HttpResponse(0, status=500)
return HttpResponse(1, status=200)
|
PHB-CS123/graphene | graphene/storage/base/relationship_store.py | Python | apache-2.0 | 7,198 | 0 | import struct
from graphene.storage.base.general_store import *
from graphene.storage.base.relationship import *
class RelationshipStore(GeneralStore):
"""
Handles storage of relationships to a file. It stores relationships using
the format:
(inUse_direction, firstNode, secondNode, relType, firstPrevRelId,
firstNextRelId, secondPrevRelId, secondNextRelId, nextPropId)
"""
class InUseAndDir(Enum):
undefined = 0
inUse_rightDir = 1
inUse_leftDir = 2
notInUse_rightDir = 3
notInUse_leftDir = 4
# Format string used to compact these values
# '=': native byte order representation, standard size, no alignment
# 'B': unsigned char
# 'I': unsigned int
STRUCT_FORMAT_STR = "= B I I I I I I I I"
''':type: str'''
# Size of an individual record (bytes)
RECORD_SIZE = struct.calcsize(STRUCT_FORMAT_STR)
''':type: int'''
# Name of RelationshipStore File
FILE_NAME = "graphenestore.relationshipstore.db"
''':type: str'''
# Type stored by this class
STORAGE_TYPE = Relationship
def __init__(self):
"""
Creates a RelationshipStore instance which handles reading/writing to
the file containing relationship values
:return: RelationshipStore instance for handling relationship records
:rtype: RelationshipStore
"""
# Initialize using generic base class
super(RelationshipStore, self).__init__(self.FILE_NAME,
self.RECORD_SIZE)
def item_from_packed_data(self, index, packed_data):
"""
Creates a relationship from the given packed data
:param index: Index of the relationship the packed data belongs to
:type index: int
:param packed_data: Packed binary data
:type packed_data: bytes
:return: Relationship from packed data
:rtype: Relationship
"""
# Unpack the data using the relationship struct format
relationship_struct = struct.Struct(self.STRUCT_FORMAT_STR)
unpacked_data = relationship_struct.unpack(packed_data)
# Get the relationship components
# Get in use and direction from byte, make enum and extract values
in_use_dir = unpacked_data[0]
enum = self.InUseAndDir(in_use_dir)
(in_use, direction) = self.in_use_dir_from_enum(enum)
# Get rest of components
first_node_id = unpacked_data[1]
second_node_id = unpacked_data[2]
rel_type = unpacked_data[3]
first_prev_rel_id = unpacked_data[4]
first_next_rel_id = unpacked_data[5]
second_prev_rel_id = unpacked_data[6]
second_next_rel_id = unpacked_data[7]
prop_id = unpacked_data[8]
# Empty data, deleted item
if in_use is False and direction == Relationship.Direction.undefined \
and first_node_id == 0 and second_node_id == 0 and rel_type == 0 \
and first_prev_rel_id == 0 and first_next_rel_id == 0 \
and second_prev_rel_id == 0 and second_next_rel_id == 0 \
and prop_id == 0:
return None
# Create a relationship record with these components
else:
return Relationship(index, in_use, direction, first_node_id,
second_node_id, rel_type, first_prev_rel_id,
first_next_rel_id, second_prev_rel_id,
second_next_rel_id, prop_id)
def packed_data_from_item(self, relationship):
"""
Creates packed data with Relationship structure to be written to a file
:param relationship: Relationship to convert into packed data
:type relationship: Relationship
:return: Packed data
"""
# Pack the relationship into a struct with the order
# (inUse_direction, firstNode, secondNode, relType, firstPrevRelId,
# firstNextRelId, secondPrevRelId, secondNextRelId, nextPropId)
relationship_struct = struct.Struct(self.STRUCT_FORMAT_STR)
# Create enum to combine the in use and direction values
enum = self.enum_from_in_use_dir(relationship.inUse,
relationship.direction)
packed_data = relationship_struct.pack(enum.value,
relationship.firstNodeId,
relationship.secondNodeId,
| relationship.relType,
relationship.firstPrevRelId,
relationship.firstNextRelId,
relationship.secondPrevRelId,
relationship.secondNext | RelId,
relationship.propId)
return packed_data
def empty_struct_data(self):
"""
Creates a packed struct of 0s
:return: Packed class struct of 0s
"""
empty_struct = struct.Struct(self.STRUCT_FORMAT_STR)
packed_data = empty_struct.pack(0, 0, 0, 0, 0, 0, 0, 0, 0)
return packed_data
@classmethod
def in_use_dir_from_enum(cls, enum):
"""
Get the in use and direction from the given enum which contains both
:param enum: Enum containing both values
:type enum: InUseAndDir
:return: Tuple (in_use, direction)
:rtype: tuple
"""
if enum == cls.InUseAndDir.inUse_leftDir:
return True, Relationship.Direction.left
elif enum == cls.InUseAndDir.inUse_rightDir:
return True, Relationship.Direction.right
elif enum == cls.InUseAndDir.notInUse_leftDir:
return False, Relationship.Direction.left
elif enum == cls.InUseAndDir.notInUse_rightDir:
return False, Relationship.Direction.right
elif enum == cls.InUseAndDir.undefined:
return False, Relationship.Direction.undefined
else:
raise TypeError("Enum is not of type InUseAndDir")
@classmethod
def enum_from_in_use_dir(cls, in_use, direction):
"""
Create an enum containing the in use value and direction given
:param in_use: Whether the relationship is being used
:type in_use: bool
:param direction: Left or right
:type direction: Direction
:return: Enum containing both values
:rtype: InUseAndDir
"""
if direction == Relationship.Direction.left:
if in_use:
return cls.InUseAndDir.inUse_leftDir
else:
return cls.InUseAndDir.notInUse_leftDir
elif direction == Relationship.Direction.right:
if in_use:
return cls.InUseAndDir.inUse_rightDir
else:
return cls.InUseAndDir.notInUse_rightDir
elif direction == Relationship.Direction.undefined:
return cls.InUseAndDir.undefined
else:
raise TypeError("Given direction is not of type Direction")
|
s0undt3ch/Deluge | deluge/plugins/Execute/deluge/plugins/execute/core.py | Python | gpl-3.0 | 5,677 | 0.002114 | #
# core.py
#
# Copyright (C) 2009 Andrew Resch <andrewresch@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
import os
import time
import hashlib
import logging
from subprocess import Popen, PIPE
from deluge.plugins.pluginbase import CorePluginBase
import deluge.component as component
from deluge.configmanager import ConfigManager
from deluge.core.rpcserver import export
from deluge.event import DelugeEvent
log = logging.getLogger(__name__)
DEFAULT_CONFIG = {
"commands": []
}
EXECUTE_ID = 0
EXECUTE_EVENT = 1
EXECUTE_COMMAND = 2
EVENT_MAP = {
"complete": "TorrentFinishedEvent",
"added": "TorrentAddedEvent"
}
class ExecuteCommandAddedEvent(DelugeEvent):
"""
Emitted when a new command is added.
"""
def __init__(self, command_id, event, command):
self._args = [command_id, event, command]
class ExecuteCommandRemovedEvent(DelugeEvent):
"""
Emitted when a command is removed.
| """
def __init__(self, command_id):
sel | f._args = [command_id]
class Core(CorePluginBase):
def enable(self):
self.config = ConfigManager("execute.conf", DEFAULT_CONFIG)
event_manager = component.get("EventManager")
self.registered_events = {}
# Go through the commands list and register event handlers
for command in self.config["commands"]:
event = command[EXECUTE_EVENT]
if event in self.registered_events:
continue
def create_event_handler(event):
def event_handler(torrent_id):
self.execute_commands(torrent_id, event)
return event_handler
event_handler = create_event_handler(event)
event_manager.register_event_handler(EVENT_MAP[event], event_handler)
self.registered_events[event] = event_handler
log.debug("Execute core plugin enabled!")
def execute_commands(self, torrent_id, event):
torrent = component.get("TorrentManager").torrents[torrent_id]
info = torrent.get_status(["name", "save_path", "move_on_completed", "move_on_completed_path"])
# Grab the torrent name and save path
torrent_name = info["name"]
if event == "complete":
save_path = info["move_on_completed_path"] if info ["move_on_completed"] else info["save_path"]
else:
save_path = info["save_path"]
log.debug("[execute] Running commands for %s", event)
# Go through and execute all the commands
for command in self.config["commands"]:
if command[EXECUTE_EVENT] == event:
command = os.path.expandvars(command[EXECUTE_COMMAND])
command = os.path.expanduser(command)
log.debug("[execute] running %s", command)
p = Popen([command, torrent_id, torrent_name, save_path], stdin=PIPE, stdout=PIPE, stderr=PIPE)
if p.wait() != 0:
log.warn("Execute command failed with exit code %d", p.returncode)
def disable(self):
self.config.save()
event_manager = component.get("EventManager")
for event, handler in self.registered_events.iteritems():
event_manager.deregister_event_handler(event, handler)
log.debug("Execute core plugin disabled!")
### Exported RPC methods ###
@export
def add_command(self, event, command):
command_id = hashlib.sha1(str(time.time())).hexdigest()
self.config["commands"].append((command_id, event, command))
self.config.save()
component.get("EventManager").emit(ExecuteCommandAddedEvent(command_id, event, command))
@export
def get_commands(self):
return self.config["commands"]
@export
def remove_command(self, command_id):
for command in self.config["commands"]:
if command[EXECUTE_ID] == command_id:
self.config["commands"].remove(command)
component.get("EventManager").emit(ExecuteCommandRemovedEvent(command_id))
break
self.config.save()
@export
def save_command(self, command_id, event, cmd):
for i, command in enumerate(self.config["commands"]):
if command[EXECUTE_ID] == command_id:
self.config["commands"][i] = (command_id, event, cmd)
break
self.config.save()
|
team-xue/xue | xue/tutor/admin.py | Python | bsd-3-clause | 1,563 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, division
from xue.tutor.models import *
from django.contrib import admin
| class TutorProjectAdmin(admin.ModelAdmin):
list_display = (
'get_teacher_na | me',
'teacher',
'name',
'year',
)
list_filter = (
'year',
)
class StudentProjectAdmin(admin.ModelAdmin):
list_display = (
'get_student_realname',
'student',
'status',
'get_project_year',
'project',
'get_student_klass',
'get_project_teacher_realname',
'fail_count',
)
list_filter = (
'status',
'student__central_info__klass__major',
'student__central_info__klass',
)
class StudentApplicationAdmin(admin.ModelAdmin):
list_display = (
'get_student_realname',
'student',
'status',
'get_student_year',
'get_student_major',
'get_student_klass',
'get_student_political',
)
list_filter = (
'status',
'student__central_info__klass__major',
'student__central_info__political',
'student__central_info__klass',
)
admin.site.register(TutorProject, TutorProjectAdmin)
admin.site.register(StudentProject, StudentProjectAdmin)
admin.site.register(StudentApplication, StudentApplicationAdmin)
# vim:ai:et:ts=4:sw=4:sts=4:fenc=utf-8
|
OpenAcademy-OpenStack/nova-scheduler | nova/api/openstack/compute/plugins/v3/extended_volumes.py | Python | apache-2.0 | 9,702 | 0.000103 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Volumes API extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import extended_volumes
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import volume
ALIAS = "os-extended-volumes"
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
authorize_attach = extensions.extension_authorizer('compute',
'v3:%s:attach' % ALIAS)
authorize_detach = extensions.extension_authorizer('compute',
'v3:%s:detach' % ALIAS)
authorize_swap = extensions.extension_authorizer('compute',
'v3:%s:swap' % ALIAS)
class ExtendedVolumesController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedVolumesController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.volume_api = volume.API()
def _extend_server(self, context, server, instance):
bdms = self.compute_api.get_instance_bdms(context, instance)
volume_ids = [bdm['volume_id'] for bdm in bdms if bdm['volume_id']]
key = "%s:volumes_attached" % ExtendedVolumes.alias
server[key] = [{'id': volume_id} for volume_id in volume_ids]
@extensions.expected_errors((400, 404, 409))
@wsgi.action('swap_volume_attachment')
@validation.schema(extended_volumes.swap_volume_attachment)
def swap(self, req, id, body):
context = req.environ['nova.context']
authorize_swap(context)
old_volume_id = body['swap_volume_attachment']['old_volume_id']
new_volume_id = body['swap_volume_attachment']['new_volume_id']
try:
old_volume = self.volume_api.get(context, old_volume_id)
new_volume = self.volume_api.get(context, new_volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
try:
instance = self.compute_api.get(context, id,
want_objects=True)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
bdms = self.compute_api.get_instance_bdms(context, instance)
found = False
try:
for bdm in bdms:
if bdm['volume_id'] != old_volume_id:
continue
try:
self.compute_api.swap_volume(context, instance, old_volume,
new_volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'swap_volume')
if not found:
raise exc.HTTPNotFound("The volume was either invalid or not "
"attached to the instance.")
else:
return webob.Response(status_int=202)
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(context, server, db_instance)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('attach')
@validation.schema(extended_volumes.attach)
def attach(self, req, id, body):
server_id = id
context = req.environ['nova.context']
authorize_attach(context)
volume_id = body['attach']['volume_id']
device = body['attach'].get('device')
LOG.audit(_("Attach volume %(volume_id)s to instance %(server_id)s "
"at %(device)s"),
{'volume_id': volume_id,
'device': device,
'server_id': server_id},
context=context)
try:
instance = self.compute_api.get(context, server_id)
self.compute_api.attach_volume(context, instance,
volume_id, device)
except (exception.InstanceNotFound, exception.VolumeNotFound) as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(
state_error, 'attach_volume')
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InvalidDevicePath as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('detach')
@validation.schema(extended_volumes.detach)
def detach(self, req, id, body):
server_id = id
context = req.environ['nova.context']
authorize_detach(context)
volum | e_id = body['detach']['volume_id']
LOG.audit(_("Detach volume %(volume_id)s from "
"instance | %(server_id)s"),
{"volume_id": volume_id,
"server_id": id,
"context": context})
try:
instance = self.compute_api.get(context, server_id)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
try:
volume = self.volume_api.get(context, volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
bdms = self.compute_api.get_instance_bdms(context, instance)
if not bdms:
msg = _("Volume %(volume_id)s is not attached to the "
"instance %(server_id)s") % {'server_id': server_id,
'volume_id': volume_id}
LOG.debug(msg)
raise exc.HTTPN |
danya02/raspi-gpio-glyphs | bin/all-leds-blink-single.py | Python | mit | 345 | 0.049275 | #!/usr/bin/python
pos="/home/pi/gpio-glyphs/bin/"
import RPi.GPIO as gpio
import json
import time
flag=False
gpio.setwarnings | (False)
delay=1
gpio.setmode(gpio.BOARD)
conv=json.load(open(pos+"conv.json"))
for i in conv:
gpio.setup(conv | [i],gpio.OUT)
for j in range(0,2):
flag=not flag
for i in conv:gpio.output(conv[i],flag)
time.sleep(delay)
|
AdamISZ/joinmarket_core | joinmarket_core/slowaes.py | Python | gpl-3.0 | 28,009 | 0.000428 | #!/usr/bin/python
#
# aes.py: implements AES - Advanced Encryption Standard
# from the SlowAES project, http://code.google.com/p/slowaes/
#
# Copyright (c) 2008 Josh Davis ( http://www.josh-davis.org ),
# Alex Martelli ( http://www.aleax.it )
#
# Ported from C code written by Laurent Haan ( http://www.progressive-coding.com )
#
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/
#
import math
import os
def append_PKCS7_padding(s):
"""return s padded to a multiple of 16-bytes by PKCS7 padding"""
numpads = 16 - (len(s) % 16)
return s + numpads * chr(numpads)
def strip_PKCS7_padding(s):
"""return s stripped of PKCS7 padding"""
if len(s) % 16 or not s:
raise ValueError("String of len %d can't be PCKS7-padded" % len(s))
numpads = ord(s[-1])
if numpads > 16:
raise ValueError("String ending with %r can't be PCKS7-padded" % s[-1])
if not all(numpads == x | for x in map(ord, s[-numpads:-1])):
raise ValueError("Invalid PKCS7 padding")
return s[:-numpads]
class AES(object):
# valid key sizes
| keySize = dict(SIZE_128=16, SIZE_192=24, SIZE_256=32)
# Rijndael S-box
sbox = [0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67,
0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59,
0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7,
0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1,
0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05,
0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83,
0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29,
0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa,
0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c,
0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc,
0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19,
0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee,
0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49,
0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4,
0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6,
0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70,
0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9,
0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e,
0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1,
0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0,
0x54, 0xbb, 0x16]
# Rijndael Inverted S-box
rsbox = [0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3,
0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f,
0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54,
0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b,
0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24,
0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8,
0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d,
0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab,
0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3,
0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1,
0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41,
0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6,
0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9,
0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d,
0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0,
0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07,
0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60,
0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f,
0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5,
0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b,
0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55,
0x21, 0x0c, 0x7d]
def getSBoxValue(self, num):
"""Retrieves a given S-Box Value"""
return self.sbox[num]
def getSBoxInvert(self, num):
"""Retrieves a given Inverted S-Box Value"""
return self.rsbox[num]
@staticmethod
def rotate(word):
""" Rijndael's key schedule rotate operation.
Rotate a word eight bits to the left: eg, rotate(1d2c3a4f) == 2c3a4f1d
Word is an char list of size 4 (32 bits overall).
"""
return word[1:] + word[:1]
# Rijndael Rcon
Rcon = [0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97,
0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72,
0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66,
0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d,
0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61,
0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a,
0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40,
0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc,
0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5,
0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a,
0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d,
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c,
0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35,
0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4,
0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc,
0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08,
0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d,
0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2,
0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74,
0xe8, 0xcb]
def getRconValue(self, num):
"""Retrieves a given Rcon Value"""
return self.Rcon[num]
def core(self, word, iteration):
"""Key schedule core."""
# rotate the 32-bit word 8 bits to the left
word = self.rotate(word)
# apply S-Box substitution on all 4 parts of the 32-bit word
for i in range(4):
word[i] = self.getSBoxValue(word[i])
# XOR the output of the rcon operation with i to the first part
# (leftmost) only
word[0] = word[0] ^ self.getRconValue(iteration)
return word
def expandKey(self, key, size, expandedKeySize):
"""Rijndael's key expansion.
Expands an 128,192,256 key into an 176,208,240 bytes key
expandedKey is a char list of large enough size,
key is the non-expanded key.
"""
# current expanded keySize, in bytes
currentSize = 0
rconIteration = 1
|
pyphrb/myweb | app/plugin/nmap/libnmap/objects/os.py | Python | apache-2.0 | 13,080 | 0.000076 | # -*- coding: utf-8 -*-
import warnings
from libnmap.objects.cpe import CPE
class OSFPPortUsed(object):
"""
Port used class: this enables the user of NmapOSFingerprint class
to have a common and clear interface to access portused data which
were collected and used during os fingerprint scan
"""
def __init__(self, port_used_dict):
try:
self._state = port_used_dict['state']
self._proto = port_used_dict['proto']
self._portid = port_used_dict['portid']
except KeyError:
raise Exception("Cannot create OSFPPortUsed: missing required key")
@property
def state(self):
"""
Accessor for the portused state (closed, open,...)
"""
return self._state
@property
def proto(self):
"""
Accessor for the portused protocol (tcp, udp,...)
"""
return self._proto
@property
def portid(self):
"""
Accessor for the referenced port number used
"""
return self._portid
class NmapOSMatch(object):
"""
NmapOSMatch is an internal class used for offering results
from an nmap os fingerprint. This common interfaces makes
a compatibility between old nmap xml (<1.04) and new nmap
xml versions (used in nmapv6 for instance).
In previous xml version, osclass tags from nmap fingerprints
were not directly mapped to a osmatch. In new xml version,
osclass could be embedded in osmatch tag.
The approach to solve this is to create a common class
which will, for older xml version, match based on the accuracy
osclass to an osmatch. If no match, an osmatch will be made up
from a concat of os class attributes: vendor and osfamily.
Unmatched osclass will have a line attribute of -1.
More info, see issue #26 or http://seclists.org/nmap-dev/2012/q2/252
"""
def __init__(self, osmatch_dict):
_osmatch_dict = osmatch_dict['osmatch']
if('name' not in _osmatch_dict or
'line' not in _osmatch_dict or
'accuracy' not in _osmatch_dict):
raise Exception("Cannot create NmapOSClass: missing required key")
self._name = _osmatch_dict['name']
self._line = _osmatch_dict['line']
self._accuracy = _osmatch_dict['accuracy']
# create osclass list
self._osclasses = []
try:
for _osclass in osmatch_dict['osclasses']:
try:
_osclassobj = NmapOSClass(_osclass)
except:
raise Exception("Could not create NmapOSClass object")
self._osclasses.append(_osclassobj)
except KeyError:
pass
def add_osclass(self, osclass_obj):
"""
Add a NmapOSClass object to the OSMatch object. This method is
useful to implement compatibility with older versions of NMAP
by providing a common interface to access os fingerprint data.
"""
self._osclasses.append(osclass_obj)
@property
def osclasses(self):
"""
Accessor for all NmapOSClass objects matching with this OS Match
"""
return self._osclasses
@property
def name(self):
"""
Accessor for name attribute (e.g.: Linux 2.4.26 (Slackware 10.0.0))
"""
return self._name
@property
def line(self):
"""
Accessor for line attribute as integer. value equals -1 if this
osmatch holds orphans NmapOSClass objects. This could happen with
older version of nmap xml engine (<1.04 (e.g: nmapv6)).
:return: int
"""
return int(self._line)
@property
def accuracy(self):
"""
Accessor for accuracy
:return: int
"""
return int(self._accuracy)
def get_cpe(self):
"""
This method return a list of cpe stings and not CPE objects as
the NmapOSClass.cpelist property. This method is a helper to
simplify data management.
For more advanced handling of CPE data, use NmapOSClass.cpelist
and use the methods from CPE class
"""
_cpelist = []
for osc in self.osclasses:
for cpe in osc.cpelist:
_cpelist.append(cpe.cpestring)
return _cpelist
def __repr__(self):
rval = "{0}: {1}".format(self.name, self.accuracy)
for _osclass in self._osclasses:
rval += "\r\n |__ os class: {0}".format(str(_osclass))
return rval
class NmapOSClass(object):
"""
NmapOSClass offers an unified API to access data from analysed
osclass tag. As implemented in libnmap and newer version of nmap,
osclass objects will always be embedded in a NmapOSMatch.
Unmatched NmapOSClass will be stored in "dummy" NmapOSMatch objects
which will have the particularity of have a line attribute of -1.
On top of this, NmapOSClass will have optional CPE objects
embedded.
"""
def __init__(self, osclass_dict):
_osclass = osclass_dict['osclass']
if('vendor' not in _osclass or
'osfamily' not in _osclass or
'accuracy' not in _osclass):
raise Exception("Wrong osclass structure: missing required key")
self._vendor = _osclass['vendor']
self._osfamily = _osclass['osfamily']
self._accuracy = _osclass['accuracy']
self._osgen = ''
self._type = ''
# optional data
| if 'osgen' in _osclass:
self._osgen = _osclass['osgen']
if 'type' in _osclass:
self._type = _osclass['type']
self._cpelist = []
for _cpe in osclass_dict['cpe']:
self._cpelist.append(CPE(_cpe))
@property
def cpelist(self):
"""
Returns a list of CPE Objects matching with this os class
| :return: list of CPE objects
:rtype: Array
"""
return self._cpelist
@property
def vendor(self):
"""
Accessor for vendor information (Microsoft, Linux,...)
:return: string
"""
return self._vendor
@property
def osfamily(self):
"""
Accessor for OS family information (Windows, Linux,...)
:return: string
"""
return self._osfamily
@property
def accuracy(self):
"""
Accessor for OS class detection accuracy (int)
:return: int
"""
return int(self._accuracy)
@property
def osgen(self):
"""
Accessor for OS class generation (7, 8, 2.4.X,...).
:return: string
"""
return self._osgen
@property
def type(self):
"""
Accessor for OS class type (general purpose,...)
:return: string
"""
return self._type
@property
def description(self):
"""
Accessor helper which returns a concataned string of
the valuable attributes from NmapOSClass object
:return: string
"""
rval = "{0}: {1}, {2}".format(self.type, self.vendor, self.osfamily)
if len(self.osgen):
rval += "({0})".format(self.osgen)
return rval
def __repr__(self):
rval = "{0}: {1}, {2}".format(self.type, self.vendor, self.osfamily)
if len(self.osgen):
rval += "({0})".format(self.osgen)
for _cpe in self._cpelist:
rval += "\r\n |__ {0}".format(str(_cpe))
return rval
class NmapOSFingerprint(object):
"""
NmapOSFingerprint is a easier API for using os fingerprinting.
Data for OS fingerprint (<os> tag) is instanciated from
a NmapOSFingerprint which is accessible in NmapHost via NmapHost.os
"""
def __init__(self, osfp_data):
self.__osmatches = []
self.__ports_used = []
self.__fingerprints = []
if 'os |
c3nav/c3nav | src/c3nav/editor/migrations/0015_changeset_last_state_update.py | Python | apache-2.0 | 1,114 | 0.002693 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-05 13:48
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.db import migrations, models
import django.db.models.deletion
def forwards_func(apps, schema_editor):
ChangeSet = apps.get_model('editor', 'ChangeSet')
for changeset in ChangeSet.objects.all():
try:
changeset.last_state_update = changeset.updates.filter(state__isnull=False).latest()
except ObjectDoesNotExist:
pass
changeset.save()
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration) | :
dependencies = [
('editor', '0014_last_update_foreign_key'),
]
operations = [
migrations.AddField(
model_name='changeset',
name='last_state_update',
field=mo | dels.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='editor.ChangeSetUpdate', verbose_name='last state update'),
),
migrations.RunPython(forwards_func, reverse_func),
]
|
mmmikael/vlfeat | python/vlfeat/demo/vl_demo_sift_edge.py | Python | gpl-2.0 | 733 | 0.051842 | import Image
import numpy
import pylab
import vlfeat
from vlfeat.plotop.vl_plotframe import vl_plotframe
if __name__ == '__main__':
""" VL_DEMO_SIFT_EDGE Demo: SIFT: edge treshold
"""
I = numpy.zeros([100, 500])
for i in 10 * (1+numpy.arange(9)):
d = numpy.round(i / 3.0)
I[50-d-1:50+d-1, i * 5-1] = 1
I = numpy.array(I | , 'f', order='F')
I = 2 * numpy.pi * 8 ** 2 * vlfeat.vl_imsmooth(I, 8)
I = 255 * I
I = numpy.array(I, 'f', order='F')
print 'sift_edge_0'
ter = [3.5, 5, 7.5, 10]
for te in ter:
f, d = vlfeat.vl_sift(I, peak_thresh=0.0, edge_thresh=te)
pylab.figure()
pylab.gray()
| pylab.imshow(I)
vl_plotframe(f, color='k', linewidth=3)
vl_plotframe(f, color='y', linewidth=2)
pylab.show() |
satish-avninetworks/murano | murano/tests/functional/engine/test_deployment.py | Python | apache-2.0 | 5,629 | 0 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from nose.plugins.attrib import attr as tag
import murano.tests.functional.engine.manager as core
class MuranoDeploymentTest(core.MuranoTestsCore):
@classmethod
def setUpClass(cls):
super(MuranoDeploymentTest, cls).setUpClass()
cls.linux = core.CONF.murano.linux_image
cls.flavor = core.CONF.murano.standard_flavor
cls.upload_app('io.murano.apps.test.UpdateExecutor',
'UpdateExecutor',
{"categories": ["Web"], "tags": ["tag"]})
cls.upload_app('io.murano.apps.test.Lighttpd',
'Lighttpd',
{"categories": ["Web"], "tags": ["tag"]})
cls.upload_app('io.murano.apps.test.ApacheHttpServerCustom',
'Apache HTTP Server Custom',
{"categories": ["Web"], "tags": ["test"]})
@classmethod
def tearDownClass(cls):
super(MuranoDeploymentTest, cls).tearDownClass()
cls.purge_environments()
cls.purge_uploaded_packages()
@tag('gate', 'all', 'coverage')
def test_app_deployment(self):
post_body = self.get_test_app()
environment_name = self.rand_name('dummyMurano')
environment = self.create_environment(name=environment_name)
session = self.create_session(environment)
self.add_service(environment, post_body, session)
self.deploy_environment(environment, session)
@tag('gate', 'all', 'coverage')
def test_resources_deallocation(self):
app_1 = self.get_test_app()
app_2 = self.get_test_app()
environment_name = self.rand_name('dummyMurano')
environment = self.create_environment(name=environment_name)
session = self.create_session(environment)
self.add_service(environment, app_1, session)
self.add_service(environment, app_2, session)
self.deploy_environment(environment, session)
environment = self.get_environment(environment)
app_for_remove = self.get_service(environment, app_1['name'],
to_dict=False)
session = self.create_session(environment)
environment = self.delete_service(environment, session, app_for_remove)
self.deploy_environment(environment, session)
instance_name = app_1['instance']['name']
stack = self._get_stack(environment.id)
template = self.get_stack_template(stack)
ip_addresses = '{0}-assigned-ip'.format(instance_name)
floating_ip = '{0}-FloatingIPaddress'.format(instance_name)
self.assertNotIn(ip_addresses, template['outputs'])
self.assertNotIn(floating_ip, template['outputs'])
self.assertNotIn(instance_name, template['resources'])
@tag('gate', 'all', 'coverage')
def test_dependent_apps(self):
post_body = self.get_test_app()
environment_name = self.rand_name('dummyMurano')
environment = self.create_environment(name=environment_name)
session = self.create_session(environment)
updater = self.add | _service(environment, post_body, session,
to_dict=True)
post_body = {
"name": self.rand_name("lighttest"),
"updater": updater,
"?": { |
"type": "io.murano.apps.test.Lighttpd",
"id": str(uuid.uuid4())
}
}
self.add_service(environment, post_body, session)
self.deploy_environment(environment, session)
self.status_check(environment,
[[updater['instance']['name'], 22, 80]])
@tag('gate', 'all', 'coverage')
def test_simple_software_configuration(self):
post_body = {
"instance": {
"flavor": self.flavor,
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": self.rand_name("mrn-test"),
},
"name": self.rand_name("ssc-test"),
"userName": self.rand_name("user"),
"?": {
"type": "io.murano.apps.test.ApacheHttpServerCustom",
"id": str(uuid.uuid4())
}
}
username = post_body["userName"]
environment_name = self.rand_name('SSC-murano')
environment = self.create_environment(name=environment_name)
session = self.create_session(environment)
self.add_service(environment, post_body, session, to_dict=True)
self.deploy_environment(environment, session)
self.status_check(environment,
[[post_body['instance']['name'], 22, 80]])
resp = self.check_path(environment, '', post_body['instance']['name'])
self.assertIn(username, resp.text, "Required information not found in "
"response from server")
|
astrofrog/reducer | reducer/image_browser.py | Python | bsd-3-clause | 12,267 | 0.000326 | from __future__ import (division, print_function, absolute_import,
unicode_literals)
from collections import OrderedDict
import os
from io import BytesIO
import numpy as np
from astropy.io import fits
import msumastro
from .notebook_dir import get_data_path
from .ipython_version_helper import ipython_version_as_string
__all__ = [
'ImageTree',
'FitsViewer',
'ImageBrowser',
'ndarray_to_png',
]
ipython_version = ipython_version_as_string()
if ipython_version.startswith('3'):
from IPython.html import widgets
from IPython.utils.traitlets import Unicode
else:
import ipywidgets as widgets
from traitlets import Unicode
if (ipython_version.startswith('3') or
(int(ipython_version) <= 402)):
# Use my accordion js
class Accordion(widgets.Accordion):
_view_module = Unicode("nbextensions/accordion_replacement/accordion_patch", sync=True)
_view_name = Unicode("AccordionView", sync=True)
else:
class Accordion(widgets.Accordion):
pass
class ImageTree(object):
"""
Create a tree view of a collection of images.
Parameters
----------
tree : `msumastro.TableTree`
Tree of images, arranged by metadata.
"""
def __init__(self, tree):
if not isinstance(tree, msumastro.TableTree):
raise ValueError("argument must be a TableTree")
self._tree = tree
self._id_string = lambda l: os.path.join(*[str(s) for s in l]) if l else ''
self._gui_objects = OrderedDict()
self._top = None
self._create_gui()
self._set_titles()
@property
def top(self):
"""
Widget at the top of the tree.
"""
return self._top
def _get_index_in_children(self, widget):
parent = widget.parent
for idx, wid in enumerate(parent.children):
if widget is wid:
return idx
def _replace_child(self, parent, old=None, new=None):
"""
Replace old child with new.
Parameters
----------
parent : IPython widget
String that identifies parent in gui
old : IPython widget
Child to be replaced
new : IPython widget or None
Replacement child (or None)
Notes
-----
Children are stored as a tuple so they are immutable.
"""
current_children = list(parent.children)
for idx, child in enumerate(current_children):
if child is old:
current_children[idx] = new
parent.children = current_children
def _create_gui(self):
"""
Create the tree gui elements.
Notes
-----
Each node of the tree is either an
`IPython.html.widgets.Accordion`, if the node has child nodes,
or a `IPython.html.widgets.Select`, if the node has a list.
Note well this does **not** allow for the case of child nodes and
a list, so this does not really suffice as a file browser.
List nodes monkey with their parents by editing the description to
include the number of list items in the node.
"""
for parents, children, index in self._tree.walk():
if children and index:
# This should be impossible...
raise RuntimeError("What the ???")
parent_string = self._id_string(parents)
depth = len(parents)
try:
key = self._tree.tree_keys[depth]
except IndexError:
key = ''
if depth == 0:
self._top = Accordion(description=key)
self._top.selected_index = -1
self._gui_objects[parent_string] = self._top
parent = self._gui_objects[parent_string]
# Do I have children? If so, add them as sub-accordions
if children:
child_objects = []
for child in children:
desc = ": ".join([key, str(child)])
child_container = Accordion(description=desc)
# Make sure all panels start out closed.
child_container.selected_index = -1
child_container.parent = self._gui_objects[parent_string]
child_string = os.path.join(parent_string, str(child))
self._gui_objects[child_string] = child_container
child_objects.append(child_container)
parent.children = child_objects
# Do I have only a list? Populate a select box with those...
if index:
new_text = widgets.Select(options=index)
index_string = self._id_string([parent_string, 'files'])
self._gui_objects[index_string] = new_text
# On the last pass an Accordion will have been created for
# this item. We need to replace that Accordion with a Select.
# The Select should be inside a box so that we can set a
# description on the box that won't be displayed on the
# Select. When titles are built for the image viewer tree
# later on they are based on the description of the Accordions
# and their immediate children.
old_parent = parent
grandparent = old_parent.parent
desc = old_parent.description
s_or_not = ['', 's']
n_files = len(index)
desc += " ({0} image{1})".format(n_files,
s_or_not[n_files > 1])
# Place the box between the Select and the parent Accordion
parent = widgets.Box(description=desc)
parent.children = [new_text]
parent.parent = grandparent
self._replace_child(grandparent, old=old_parent, new=parent)
def display(self):
"""
Display and format this widget.
"""
from IPython.display import display
display(self._top)
def _set_titles(self):
"""
Set titles for accordions.
This should apparently be done *before* the widget is displayed.
"""
for name, obj in self._gui_objects.iteritems():
if isinstance(obj, Accordion):
for idx, child in enumerate(obj.children):
if not isinstance(child, widgets.Select):
obj.set_title(idx, child.description)
def format(self):
"""
This gets called by the ImageBrowser so don't delete it.
"""
pass
def ndarray_to_png(x):
from PIL import Image
shape = np.array(x.shape)
# Reverse order for reasons I do not understand...
shape = shape[::-1]
if len(shape) != 2:
return
aspect = shape[1]/shape[0]
width = 600 # pixels
new_shape = np.asarray(width/shape[0]*aspect*shape, dtype='int')
x = np.asarray(Image.fromarray(x).resize(new_shape))
x = (x - x.mean()) / x.std()
x[x >= 3] = 2.99
x[x < -3] = -3.0
x = (x - x.min()) / (1.1*x.max( | ) - x.min())
img = Image.fromarray((x*256).astype('uint8'))
img_buffer = BytesIO()
img.save(img_buffer, format='png')
return img_buffer.getvalue()
class FitsViewer(object):
"""
Display the image and header from a single FITS file.
"""
def __init__(self):
self._top = widgets.Tab(visible=False)
self._data = None # hd | u.data
self._png_image = None # ndarray_to_png(self._data)
self._header = ''
self._image = widgets.Image()
self._header_display = widgets.Textarea(disabled=True)
self._top.children = [self._image, self._header_display]
@property
def top(self):
return self._top
def display(self):
"""
Display and format this widget.
"""
from IPython.display import display
display(self._top)
self.format()
def format(self):
"""
Format widget.
Must be called after the widget is |
ratnania/pyccel | tests/codegen/scripts/tuples.py | Python | mit | 288 | 0.013889 | ai = (1 | ,4,5)
ai[0] = 2
bi = ai[0]
ci = 2 * ai[0]
di = 2 * ai[0] + 3 * ai[1]
ei = 2 * ai[0] + bi * ai[1]
fi = ai
gi = (0,)*2
gi[:] = ai[1:]
ad = (1.,4.,5.)
ad[0] = 2.
bd = ad[0]
cd = 2. * ad[0]
dd = 2. * ad[0] + 3. * ad[1]
ed = 2. * ad[0] + bd * ad[1]
fd = ad
gd = (0.,)*2
gd[:] = ad[1:] | |
dddTESTxx/Gym-Final | src/data/tests.py | Python | mit | 268 | 0.003731 | import unittest
from click.tes | ting import CliRunner
from make_dataset import main
class TestMain(unittest.TestCase):
def test_main_runs(self):
runner = CliRunner()
result = runner.invoke(main, ['.', '.'])
assert result.exit_code == 0
| |
shazamengineering/gearman-geodis | setup.py | Python | apache-2.0 | 570 | 0.019298 | from distutils.core import setup
PKGLIST = ['gearman_geodis']
setup(name='gearman-geodis',
version='1.0.0',
description='Geolocation Gearman worker powered by Geodis',
| author_email='engineering@shazamteam.com',
license='Apache License, Version 2.0',
packages=PKGLIST,
scripts=['gearman_geodis/geodis_worker.py', 'gearman_geodis/gearman_geodisd.py', 'gearman_geodis/std | in_geodis_worker.py'],
data_files=[('/etc/sysconfig/',['support/gearman_geodis.sysconfig']),
('/etc/init.d/',['support/gearman_geodis'])]
)
|
WorldBank-Transport/DRIVER | app/data/migrations/0011_copy_ashlar_tables.py | Python | gpl-3.0 | 2,467 | 0.00527 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-10 18:20
from __future__ import unicode_literals
from django.db import migrations
from django.db import connection
def table_exists(tablename):
cursor = connection.cursor()
cursor.execute(
"""SELECT EXISTS (SELECT * FROM pg_tables WHERE tablename ='{}');""".format(tablename)
)
return cursor.fetchone()[0]
tables = [
('ashlar_boundary', 'grout_boundary'),
('ashlar_boundarypolygon', 'grout_boundarypolygon'),
('ashlar_recordtype', 'grout_recordtype'),
('ashlar_recordschema', 'grout_recordschema')
]
def forward(apps, schema_editor):
cursor = connection.cursor()
for ashlar_table, grout_table in tables:
# Only rename the table if it exists and the corresponding grout table doesn't exist. If the
# corresponding grout table exists, that means that migrations have been run from the
# beginning without Ashlar having been installed, such as when testing, and we shouldn't do
# anything.
if table_exists(ashlar_table) and not table_exists(grout_table):
cursor.execute('ALTER TABLE {a} RENAME TO {g};'.format(a=ashlar_table, g=grout_table))
# The Record table needs special handling because we need to preserve some of the old Ashlar
# data in DRIVER after Grout drops those columns, so we need to copy the table rather than
# simply renaming it, so that we can pull in the dropped columns later from the old Ashlar
# tables.
if table_exists('ashlar_record') and not table_exists('grout_record'):
cursor.execute('CREATE TABLE g | rout_record (LIKE ashlar_record INCLUDING ALL);')
cursor.execute('INSERT INTO grout_record SELECT * FROM ashlar_record;')
def reverse(apps, schema_editor):
cursor = connection.cursor()
# The only time we would reverse is if we know that we previously used Ashlar, which we can only
# know for sure if the `ashlar_record` table exists (since all other tables get renamed rather
# than copied).
if table_exists('ashlar_record'):
for a | shlar_table, grout_table in tables:
cursor.execute('ALTER TABLE {g} RENAME TO {a};'.format(a=ashlar_table, g=grout_table))
cursor.execute('DROP TABLE grout_record;')
class Migration(migrations.Migration):
dependencies = [
('data', '0010_auto_20160527_2236'),
]
operations = [
migrations.RunPython(forward, reverse)
]
|
badock/nova | nova/virt/libvirt/volume.py | Python | apache-2.0 | 47,624 | 0.000294 | # Copyright 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
import glob
import os
import time
import urllib2
from oslo.config import cfg
from oslo.utils import strutils
import six
import six.moves.urllib.parse as urlparse
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova import paths
from nova.storage import linuxscsi
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help='Number of times to rescan iSCSI target to find volume'),
cfg.IntOpt('num_iser_scan_tries',
default=5,
help='Number of times to rescan iSER target to find volume'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt UUID of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('nfs_m | ount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node'),
cfg.StrOpt('nfs_m | ount_options',
help='Mount options passedf to the NFS client. See section '
'of the nfs man page for details'),
cfg.IntOpt('num_aoe_discover_tries',
default=3,
help='Number of times to rediscover AoE target to find volume'),
cfg.StrOpt('glusterfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the glusterfs volume is mounted on the '
'compute node'),
cfg.BoolOpt('iscsi_use_multipath',
default=False,
help='Use multipath connection of the iSCSI volume'),
cfg.BoolOpt('iser_use_multipath',
default=False,
help='Use multipath connection of the iSER volume'),
cfg.StrOpt('scality_sofs_config',
help='Path or URL to Scality SOFS configuration file'),
cfg.StrOpt('scality_sofs_mount_point',
default='$state_path/scality',
help='Base dir where Scality SOFS shall be mounted'),
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]')
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = libvirt_utils.pick_disk_driver_name(
self.connection._get_hypervisor_version(),
self.is_block_dev
)
conf.source_device = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
# Support for block size tuning
data = {}
if 'data' in connection_info:
data = connection_info['data']
if 'logical_block_size' in data:
conf.logical_block_size = data['logical_block_size']
if 'physical_block_size' in data:
conf.physical_block_size = data['physical_block_size']
# Extract rate_limit control parameters
if 'qos_specs' in data and data['qos_specs']:
tune_opts = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
specs = data['qos_specs']
if isinstance(specs, dict):
for k, v in specs.iteritems():
if k in tune_opts:
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warn(_LW('Unknown content in connection_info/'
'qos_specs: %s'), specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
access_mode = data['access_mode']
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
LOG.error(_LE('Unknown content in '
'connection_info/access_mode: %s'),
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
return self.get_config(connection_info, disk_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFakeVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
return conf
class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNetVolumeDriver,
self).get_config(connection_info, disk_info)
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_name = netdisk_properties.get('name')
conf.source_hosts = netdisk_properties.get('hosts', [])
conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.libvirt.rbd_secret |
macopedia/hr | __unported__/hr_language/hr_language.py | Python | agpl-3.0 | 2,057 | 0 | # -*- encoding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import tools
from openerp.osv import fields, orm
class hr_language(orm.Model):
_name = 'hr.language'
_columns = {
'name': fields.selection(
tools.scan_languages(),
'Language',
required=True,
),
'description': fields.char(
'Description',
size=64,
required=True,
translate=True,
),
'employee_id': fields.many2one(
'hr.employee',
' | Employee',
required=True,
),
'read': fields.boolean(
'Read',
),
'write': fields.boolean(
'Write',
),
| 'speak': fields.boolean(
'Speak',
),
}
_defaults = {
'read': True,
'write': True,
'speak': True,
}
class hr_employee(orm.Model):
_inherit = 'hr.employee'
_columns = {
'language_ids': fields.one2many(
'hr.language',
'employee_id',
'Languages',
),
}
|
lawzou/shoop | shoop_setup_utils/__init__.py | Python | agpl-3.0 | 780 | 0 | # This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from .commands import COMMANDS
from .excludes import set_exclude_patters, get_exclude_patterns
from .finding import find_packages
from .parsing import get_long_description, get_test_requirements_from_tox_ini
from .resource_building import build_resources
from .versions import get_version, write_version_to_file
__all__ = [
'COMMANDS',
| 'build_resources',
'find_packages',
'get_exclude_patterns',
'get_long_description',
'get_test_requirements_from_tox_ini',
'get_version',
'set_exclude_patters',
'write_v | ersion_to_file',
]
|
USGSDenverPychron/pychron | pychron/dashboard/tasks/client/preferences.py | Python | apache-2.0 | 1,997 | 0.001002 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from envisage.ui.tasks.preferences_pane import PreferencesPane
from traits.api import Bool, Str, Int
from traitsui.api import View, Item, VGroup
from pychron.envisage.tasks.base_preferences_helper import BasePreferencesHelper
class ExperimentDashboardClientPreferences(BasePreferencesHelper):
preferences_path = "pychron.dashboard.experiment"
use_dashboard_client = Bool
class ExperimentDashboardClientPreferencesPane(PreferencesPane):
model_factory = ExperimentDashboardClientPreferences
category = "Experiment"
def traits_view(self):
v = View(Item("use_dashboard_cli | ent"))
return v
class DashboardClientPreferences(BasePreferencesHelper):
preferences_path = "pychron.dashboard.client"
host = Str
port = Int
class DashboardClientPreferencesPane(PreferencesPane):
model_factory = DashboardClientPreferences
category = "Dashboard"
def traits_view(self):
v = View(
VGroup(
Item("host"), Item("port"), show_border=True, label="Dashboard Server"
) |
)
return v
# ============= EOF =============================================
|
dogebuild/dogebuild-hello | dogebuild_hello/loader.py | Python | mit | 74 | 0.013514 | from dogebuild_hello.h | ello import Hello
def get() | :
return Hello()
|
oxc/Flexget | flexget/plugins/cli/try_regexp.py | Python | mit | 2,592 | 0.001929 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
import logging
from flexget import options, plugin
from flexget.event import event
from flexget.terminal import console
log = logging.getLogger('try_regexp')
class PluginTryRegexp(object):
"""
This plugin allows user to test regexps for a task.
"""
def __init__(self):
self.abort = False
def matches(self, entry, regexp):
"""Return True if any of the entry string fields match given regexp"""
import re
for field, value in entry.items():
if not isinstance(value, basestring):
continue
if re.search(regexp, value, re.IGNORECASE | re.UNICODE):
return (True, field)
return (False, None)
def on_task_filter(self, task, config):
if not task.options.try_regexp:
return
if self.abort:
return
console('-' * 79)
console('Hi there, welcome to try regexps in realtime!')
console('Press ^D or type \'exit\' to continue. Type \'continue\' to continue non-interactive execution.')
console('Task \'%s\' has %s entries, enter regexp to see what matches it.' % (task.name, len(task.entries)))
while (True):
try:
s = input('--> ')
if s == 'exit':
break
if s == 'abort' or s == 'continue':
self.abort = True
break
except EOFError:
break
count = 0
for entry in task.entries:
try:
match, field = self.matches(entry, s)
if match:
console('Title: %-40s URL: %-30s From: %s' % (entry['title'], entry['url'], field))
count += 1
except re.error:
console('Invalid regular expression')
break
console('%s of %s entries matched' % ( | count, len(task.entries)))
console('Bye!')
@event('plugin.register')
def register_plugin():
plugin.register(PluginTryRegexp, '--try-regexp', builtin=True, api_ver=2)
@event('options.register')
def register_parser_arguments():
opt | ions.get_parser('execute').add_argument('--try-regexp', action='store_true', dest='try_regexp', default=False,
help='try regular expressions interactively')
|
OmnesRes/pan_cancer | paper/tables/S1/table_creation.py | Python | mit | 5,368 | 0.062221 | ##A script for creating tables for each cancer, with the data sorted
def compare(first,second):
if float(first[-2])>float(second[-2]):
return 1
elif float(first[-2])<float(second[-2]):
return -1
else:
return 0
## Load necessary modules
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
##need to get the gene ids from a RNA-SEQV2 file, any file will work
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','mrna','unc.edu.0cbec58e-f95e-4c60-a85d-210dc56bdf3c.1545137.rsem.genes.normalized_results'))
f.readline()
id_to_gene={}
data=[i.split()[0] for i in f]
for i in data:
id_to_gene[i.split('|')[1]]=i.split('|')[0]
##load the data that will be in the table for each cancer and add ids
f=open(os.path.join(BASE_DIR,'cox_regression','BLCA','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
BLCA=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','BRCA','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
BRCA=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','CESC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
CESC=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','COAD','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
COAD=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','GBM','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
GBM=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','HNSC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
HNSC=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','KIRC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
KIRC=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','KIRP','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
KIRP=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','LAML','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
LAML=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','LGG','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
LGG=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','LIHC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
LIHC=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','LUAD','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
LUAD=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','LUSC','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() fo | r i in f]
i | ds,coeffs,normalized,pvalues,adjusted=zip(*data)
LUSC=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','OV','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
OV=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','SKCM','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
SKCM=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
f=open(os.path.join(BASE_DIR,'cox_regression','STAD','coeffs_normalized_pvalues_adjusted.txt'))
data=[i.strip().split() for i in f]
ids,coeffs,normalized,pvalues,adjusted=zip(*data)
STAD=zip(ids,[id_to_gene[i] for i in ids],coeffs,normalized,pvalues,adjusted)
all_cancers=[BLCA,LGG,BRCA,CESC,COAD,GBM,HNSC,KIRC,KIRP,LAML,LIHC,LUAD,LUSC,SKCM,OV,STAD]
names=['BLCA','LGG','BRCA','CESC','COAD','GBM','HNSC','KIRC','KIRP','LAML','LIHC','LUAD','LUSC','SKCM','OV','STAD']
for i,j in zip(names,all_cancers):
f=open(i+'.txt','w')
for k in sorted(j,cmp=compare):
for l in k:
f.write(l)
f.write('\t')
f.write('\n')
f.close()
|
nkripper/pynet | week2/snmp_helper.py | Python | apache-2.0 | 6,852 | 0.002043 | # Requires the pysnmp4 library
# Example usage (SNMPv1/SNMPv2c):
# >>> from snmp_helper import snmp_get_oid,snmp_extract
# >>>
# >>> COMMUNITY_STRING = '<COMMUNITY>'
# >>> SNMP_PORT = 161
# >>> a_device = ('1.1.1.1', COMMUNITY_STRING, SNMP_PORT)
# Use the MIB-2 sysDescr as a test
# >>> snmp_data = snmp_get_oid(a_device, oid='.1.3.6.1.2.1.1.1.0', display_errors=True)
# >>> snmp_data
# [(MibVariable(ObjectName(1.3.6.1.2.1.1.1.0)), DisplayString(hexValue='436973636f
# 20494f5320536f6674776172652c204338383020536f667477617265202843383830444154412d55
# 4e4956455253414c4b392d4d292c2056657273696f6e2031352e302831294d342c2052454c454153
# 4520534f4654574152452028666331290d0a546563686e6963616c20537570706f72743a20687474
# 703a2f2f7777772e636973636f2e636f6d2f74656368737570706f72740d0a436f70797269676874
# 2028632920313938362d3230313020627920436973636f2053797374656d732c20496e632e0d0a43
# 6f6d70696c6564204672692032392d4f63742d31302030303a30322062792070726f645f72656c5f
# 7465616d'))]
# >>> output = snmp_extract(snmp_data)
# >>> print output
# Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
# Technical Support: http://www.cisco.com/techsupport
# Copyright (c) 1986-2010 by Cisco Systems, Inc.
# Compiled Fri 29-Oct-10 00:02 by prod_rel_team
# Example usage (SNMPv3):
# >>> from snmp_helper import snmp_get_oid_v3,snmp_extract
# >>>
# >>> snmp_device = ('10.10.10.10', 161)
# >>> a_user = <snmpv3_user>
# >>> auth_key = <snmpv3_auth_key>
# >>> encrypt_key = <snmpv3_encrypt_key>
# >>> snmp_user = (a_user, auth_key, encrypt_key)
# OID to query
# >>> sys_descr = '1.3.6.1.2.1.1.1.0'
# Defaults to using AES128 and SHA1
# >> snmp_data = snmp_get_oid_v3(snmp_device, snmp_user, oid=sys_descr)
# >>> output = snmp_extract(snmp_data)
# >>> print output
# Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.4(2)T1, RELEASE SOFTWARE (fc3)
# Technical Support: http://www.cisco.com/techsupport
# Copyright (c) 1986-2014 by Cisco Systems, Inc.
# Compiled Thu 26-Jun-14 14:15 by prod_rel_team
from __future__ import print_function
from pysnmp.entity.rfc3413.oneliner import cmdgen
def snmp_get_oid_v3(snmp_device, snmp_user, oid='.1.3.6.1.2.1.1.1.0', auth_proto='sha',
encrypt_proto='aes128', display_errors=True):
'''
Retrieve the given OID
Default OID is MIB2, sysDescr
snmp_device is a tuple = (hostname_or_IP, snmp_port)
snmp_user is a tuple = (user_name, auth_key, encrypt_key)
Defaults to SHA1-AES128 for authentication + encryption
auth_proto can be 'sha' or 'md5' or 'none'
encrypt_proto can be 'aes128', 'aes192', 'aes256', '3des', 'des', or 'none'
From PySNMP manuals: http://pysnmp.sourceforge.net/docs/current/security-configuration.html
Optional authProtocol parameter may be used to specify non-default hash function algorithm.
Possible values include:
usmHMACMD5AuthProtocol -- MD5-based authentication protocol
usmHMACSHAAuthProtocol -- SHA-based authentication protocol
usmNoAuthProtocol -- no authentication to use (default)
Optional privProtocol parameter may be used to specify non-default ciphering algorithm.
Possible values include:
usmDESPrivProtocol -- DES-based encryption protocol
usmAesCfb128Protocol -- AES128-based encryption protocol (RFC3826)
usm3DESEDEPrivProtocol -- triple DES-based encryption protocol (Extended Security Options)
usmAesCfb192Protocol -- AES192-based encryption protocol (Extended Security Options)
usmAesCfb256Protocol -- AES256-based encryption protocol (Extended Security Options)
usmNoPrivProtocol -- no encryption to use (default)
'''
# unpack snmp_user
a_user, auth_key, encrypt_key = snmp_user
auth_proto_map = {
'sha': cmdgen.usmHMACSHAAuthProtocol,
'md5': cmdgen.usmHMACMD5AuthProtocol,
'none': cmdgen.usmNoAuthProtocol
}
if auth_proto in auth_proto_map.keys():
auth_protocol = auth_proto_map[auth_proto]
else:
raise ValueError("Invalid authentication protocol specified: %s" % auth_proto)
encrypt_proto_map = {
'des': cmdgen.usmDESPrivProtocol,
'3des': cmdgen.usm3DESEDEPrivProtocol,
'aes128': cmdgen.usmAesCfb128Protocol,
'aes192': cmdgen.usmAesCfb192Protocol,
'aes256': cmdgen.usmAesCfb256Protocol,
'none': cmdgen.usmNoPrivProtocol,
}
if encrypt_proto in encrypt_proto_ma | p.keys():
encrypt_protocol = encrypt_proto_map[encrypt_proto]
else:
raise ValueError("Invalid encryption protocol specified: %s" % encrypt_proto)
# Create a PYSNMP cmdgen object
cmd_gen = cmdgen.CommandGenerator()
(error_detected, error_status, error_index, snmp_data) = cmd_gen.getCmd(
cmdgen.UsmUserData(a_user, auth_key, | encrypt_key,
authProtocol=auth_protocol,
privProtocol=encrypt_protocol, ),
cmdgen.UdpTransportTarget(snmp_device),
oid,
lookupNames=True, lookupValues=True
)
if not error_detected:
return snmp_data
else:
if display_errors:
print('ERROR DETECTED: ')
print(' %-16s %-60s' % ('error_message', error_detected))
print(' %-16s %-60s' % ('error_status', error_status))
print(' %-16s %-60s' % ('error_index', error_index))
return None
def snmp_get_oid(a_device, oid='.1.3.6.1.2.1.1.1.0', display_errors=False):
'''
Retrieve the given OID
Default OID is MIB2, sysDescr
a_device is a tuple = (a_host, community_string, snmp_port)
'''
a_host, community_string, snmp_port = a_device
snmp_target = (a_host, snmp_port)
# Create a PYSNMP cmdgen object
cmd_gen = cmdgen.CommandGenerator()
(error_detected, error_status, error_index, snmp_data) = cmd_gen.getCmd(
cmdgen.CommunityData(community_string),
cmdgen.UdpTransportTarget(snmp_target),
oid,
lookupNames=True, lookupValues=True
)
if not error_detected:
return snmp_data
else:
if display_errors:
print('ERROR DETECTED: ')
print(' %-16s %-60s' % ('error_message', error_detected))
print(' %-16s %-60s' % ('error_status', error_status))
print(' %-16s %-60s' % ('error_index', error_index))
return None
def snmp_extract(snmp_data):
'''
Unwrap the SNMP response data and return in a readable format
Assumes only a single list element is returned
'''
if len(snmp_data) > 1:
raise ValueError("snmp_extract only allows a single element")
if len(snmp_data) == 0:
return None
else:
# Unwrap the data which is returned as a tuple wrapped in a list
return snmp_data[0][1].prettyPrint()
|
LLNL/spack | var/spack/repos/builtin/packages/openfoam-org/package.py | Python | lgpl-2.1 | 16,158 | 0.001176 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
# Original Author: Mark Olesen <mark.olesen@esi-group.com>
#
# Legal Notice
# ------------
# OPENFOAM is a trademark owned by OpenCFD Ltd
# (producer and distributor of the OpenFOAM software via www.openfoam.com).
# The trademark information must remain visible and unadulterated in this
# file and via the "spack info" and comply with the term set by
# http://openfoam.com/legal/trademark-policy.php
#
# This file is not part of OpenFOAM, nor does it constitute a component of an
# OpenFOAM distribution.
#
##############################################################################
#
# Notes
# - The openfoam-org package is a modified version of the openfoam package.
# If changes are needed here, consider if they should also be applied there.
#
# - mpi handling: WM_MPLIB=SYSTEMMPI and populate prefs.{csh,sh} with values
# from spack.
#
# - Building with boost/cgal is not included, since some of the logic is not
# entirely clear and thus untested.
# - Resolution of flex, zlib needs more attention (within OpenFOAM)
#
# Known issues
# - Combining +zoltan with +int64 has not been tested, but probably won't work.
# - Combining +mgridgen with +int64 or +float32 probably won't work.
#
##############################################################################
import glob
import os
import re
import llnl.util.tty as tty
from spack import *
from spack.pkg.builtin.openfoam import (
OpenfoamArch,
add_extra_files,
mplib_content,
rewrite_environ_files,
write_environ,
)
from spack.util.environment import EnvironmentModifications
class OpenfoamOrg(Package):
"""OpenFOAM is a GPL-opensource C++ CFD-toolbox.
The openfoam.org release is managed by the OpenFOAM Foundation Ltd as
a licensee of the OPENFOAM trademark.
This offering is not approved or endorsed by OpenCFD Ltd,
producer and distributor of the OpenFOAM software via www.openfoam.com,
and owner of the OPENFOAM trademark.
"""
homepage = "https://www.openfoam.org/"
baseurl = "https://github.com/OpenFOAM"
url = "https://github.com/OpenFOAM/OpenFOAM-4.x/archive/version-4.1.tar.gz"
git = "https://github.com/OpenFOAM/OpenFOAM-dev.git"
version('develop', branch='master')
version('8', sha256='94ba11cbaaa12fbb5b356e01758df403ac8832d69da309a5d79f76f42eb008fc',
url=baseurl + '/OpenFOAM-8/archive/version-8.tar.gz')
version('7', sha256='12389cf092dc032372617785822a597aee434a50a62db2a520ab35ba5a7548b5',
url=baseurl + '/OpenFOAM-7/archive/version-7.tar.gz')
version('6', sha256='32a6af4120e691ca2df29c5b9bd7bc7a3e11208947f9bccf6087cfff5492f025',
url=baseurl + '/OpenFOAM-6/archive/version-6.tar.gz')
version('5.0', sha256='9057d6a8bb9fa18802881feba215215699065e0b3c5cdd0c0e84cb29c9916c89',
url=baseurl + '/OpenFOAM-5.x/archive/version-5.0.tar.gz')
version('4.1', sha256='2de18de64e7abdb1b649ad8e9d2d58b77a2b188fb5bcb6f7c2a038282081fd31',
url=baseurl + '/OpenFOAM-4.x/archive/version-4.1.tar.gz')
version('2.4.0', sha256='9529aa7441b64210c400c019dcb2e0410fcfd62a6f62d23b6c5994c4753c4465',
url=baseurl + '/OpenFOAM-2.4.x/archive/version-2.4.0.tar.gz')
version('2.3.1', sha256='2bbcf4d5932397c2087a9b6d7eeee6d2b1350c8ea4f455415f05e7cd94d9e5ba',
url='http://downloads.sourceforge.net/foam/OpenFOAM-2.3.1.tgz')
variant('int64', default=False,
description='Compile with 64-bit label')
variant('float32', default=False,
description='Compile with 32-bit scalar (single-precision)')
variant('source', default=True,
description='Install library/application sources and tutorials')
variant('metis', default=False,
description='With metis decomposition')
depends_on('mpi')
depends_on('zlib')
depends_on('flex')
depends_on('cmake', type='build')
# Require scotch with ptscotch - corresponds to standard OpenFOAM setup
depends_on('scotch~metis+mpi~int64', when='~int64')
depends_on('scotch~metis+mpi+int64', when='+int64')
depends_on('metis@5:', when='+metis')
depends_on('metis+int64', when='+metis+int64')
# General patches - foamEtcFile as per openfoam.com (robuster)
common = ['spack-Allwmake', 'README-spack']
assets = ['bin/foamEtcFile']
# Version-specific patches
patch('https://github.com/OpenFOAM/OpenFOAM-7/commit/ef33cf38ac9b811072a8970c71fbda35a90f6641.patch',
| sha256='73103e6b1bdbf3b1e0d517cbbd11562e98c6e9464df5f43e5125e9a5b457d1c5', when='@7')
patch('50-etc.patch', when='@5.0:5.9')
patch('41-etc.patch', when='@4 | .1')
patch('41-site.patch', when='@4.1:')
patch('240-etc.patch', when='@:2.4.0')
patch('isnan.patch', when='@:2.4.0')
# Add support for SYSTEMMPI
patch('https://github.com/OpenFOAM/OpenFOAM-2.3.x/commit/ae9a670c99472787f3a5446ac2b522bf3519b796.patch',
sha256='6c4c535baca3ce64035d512265c4ce8effd39de7602c923c5e19985db68b632a', when='@:2.3.1')
# The openfoam architecture, compiler information etc
_foam_arch = None
# Content for etc/prefs.{csh,sh}
etc_prefs = {}
# Content for etc/config.{csh,sh}/ files
etc_config = {}
phases = ['configure', 'build', 'install']
build_script = './spack-Allwmake' # <- Added by patch() method.
#
# - End of definitions / setup -
#
# Some user config settings
@property
def config(self):
settings = {
# Use SYSTEMMPI since openfoam-org doesn't have USERMPI
'mplib': 'SYSTEMMPI',
# Add links into bin/, lib/ (eg, for other applications)
'link': False,
}
# OpenFOAM v2.4 and earlier lacks WM_LABEL_OPTION
if self.spec.satisfies('@:2.4'):
settings['label-size'] = False
return settings
def setup_run_environment(self, env):
bashrc = self.prefix.etc.bashrc
try:
env.extend(EnvironmentModifications.from_sourcing_file(
bashrc, clean=True
))
except Exception as e:
msg = 'unexpected error when sourcing OpenFOAM bashrc [{0}]'
tty.warn(msg.format(str(e)))
def setup_dependent_build_environment(self, env, dependent_spec):
"""Location of the OpenFOAM project directory.
This is identical to the WM_PROJECT_DIR value, but we avoid that
variable since it would mask the normal OpenFOAM cleanup of
previous versions.
"""
env.set('FOAM_PROJECT_DIR', self.projectdir)
def setup_dependent_run_environment(self, env, dependent_spec):
"""Location of the OpenFOAM project directory.
This is identical to the WM_PROJECT_DIR value, but we avoid that
variable since it would mask the normal OpenFOAM cleanup of
previous versions.
"""
env.set('FOAM_PROJECT_DIR', self.projectdir)
@property
def projectdir(self):
"""Absolute location of project directory: WM_PROJECT_DIR/"""
return self.prefix # <- install directly under prefix
@property
def foam_arch(self):
if not self._foam_arch:
self._foam_arch = OpenfoamOrgArch(self.spec, **self.config)
return self._foam_arch
@property
def archbin(self):
"""Relative location of architecture-specific executables"""
return join_path('platforms', self.foam_arch, 'bin')
@property
def archlib(self):
"""Relative location of architecture-specific libraries"""
return join_path('platforms', self.foam_arch, 'lib')
def rename_source(self):
"""This is fairly horrible.
The github tarfiles have weird names that do not correspond to the
canonical name. We need to rename these, but leave a symlink for
spack to work with.
"""
# Note that this particular OpenFOAM requires absolute directories
# to build correctly!
parent = os.path. |
leapcode/soledad | src/leap/soledad/client/_db/blobs/sync.py | Python | gpl-3.0 | 7,065 | 0 | # -*- coding: utf-8 -*-
# sync.py
# Copyright (C) 2017 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Synchronization between blobs client/server
"""
from collections import defaultdict
from twisted.internet import defer
from twisted.internet import reactor
from twisted.logger import Logger
from twisted.internet import error
from .sql import SyncStatus
from .errors import RetriableTransferError
logger = Logger()
def sleep(seconds):
d = defer.Deferred()
reactor.callLater(seconds, d.callback, None)
return d
MAX_WAIT = 60 # In seconds. Max time between retries
@def | er.inlineCallbacks
def with_retry(func, *args, **kwargs):
"""
Run func repeatedly until success, as long as the exception raised is
a "retriable error". If an exception of another kind is raised by func,
the retrying stops and that exception is propagated up the stack.
"""
retry_wai | t = 1
retriable_errors = (error.ConnectError, error.ConnectionClosed,
RetriableTransferError,)
while True:
try:
yield func(*args, **kwargs)
break
except retriable_errors:
yield sleep(retry_wait)
retry_wait = min(retry_wait + 10, MAX_WAIT)
class BlobsSynchronizer(object):
def __init__(self):
self.locks = defaultdict(defer.DeferredLock)
@defer.inlineCallbacks
def refresh_sync_status_from_server(self, namespace=''):
d1 = self.remote_list(namespace=namespace)
d2 = self.local_list(namespace=namespace)
remote_list, local_list = yield defer.gatherResults([d1, d2])
pending_download_ids = tuple(set(remote_list) - set(local_list))
pending_upload_ids = tuple(set(local_list) - set(remote_list))
yield self.local.update_batch_sync_status(
pending_download_ids,
SyncStatus.PENDING_DOWNLOAD,
namespace=namespace)
yield self.local.update_batch_sync_status(
pending_upload_ids,
SyncStatus.PENDING_UPLOAD,
namespace=namespace)
@defer.inlineCallbacks
def _apply_deletions_from_server(self, namespace=''):
remote_deletions = self.remote_list(namespace=namespace, deleted=True)
remote_deletions = yield remote_deletions
yield self.local.batch_delete(remote_deletions)
yield self.local.update_batch_sync_status(
remote_deletions,
SyncStatus.SYNCED,
namespace=namespace)
def send_missing(self, namespace=''):
"""
Compare local and remote blobs and send what's missing in server.
:param namespace:
Optional parameter to restrict operation to a given namespace.
:type namespace: str
:return: A deferred that fires when all local blobs were sent to
server.
:rtype: twisted.internet.defer.Deferred
"""
lock = self.locks['send_missing']
d = lock.run(self._send_missing, namespace)
return d
@defer.inlineCallbacks
def _send_missing(self, namespace):
# the list of priorities must be refreshed every time a new blob will
# be transferred. To do that, we use a semaphore and get a new ordered
# list only when there are free slots for new transfers.
max_transfers = self.concurrent_transfers_limit
semaphore = defer.DeferredSemaphore(max_transfers)
scheduled = set()
while True:
d = semaphore.run(self._send_next, namespace, scheduled)
success = yield d
if not success:
break
@defer.inlineCallbacks
def _send_next(self, namespace, scheduled):
status = SyncStatus.PENDING_UPLOAD
pending = yield self.local_list_status(status, namespace)
pending = [x for x in pending if x not in scheduled]
logger.info("There are %d pending blob uploads." % len(pending))
if not pending:
# we are finished, indicate that to our caller
defer.returnValue(False)
blob_id = pending[0]
logger.info("Sending blob: %s" % (blob_id,))
yield with_retry(self._send, blob_id, namespace)
defer.returnValue(True)
def fetch_missing(self, namespace=''):
"""
Compare local and remote blobs and fetch what's missing in local
storage.
:param namespace:
Optional parameter to restrict operation to a given namespace.
:type namespace: str
:return: A deferred that fires when all remote blobs were received from
server.
:rtype: twisted.internet.defer.Deferred
"""
lock = self.locks['fetch_missing']
d = lock.run(self._fetch_missing, namespace)
return d
@defer.inlineCallbacks
def _fetch_missing(self, namespace=''):
# the list of priorities must be refreshed every time a new blob will
# be transferred. To do that, we use a semaphore and get a new ordered
# list only when there are free slots for new transfers.
max_transfers = self.concurrent_transfers_limit
semaphore = defer.DeferredSemaphore(max_transfers)
scheduled = set()
while True:
d = semaphore.run(self._fetch_next, namespace, scheduled)
success = yield d
if not success:
break
@defer.inlineCallbacks
def _fetch_next(self, namespace, scheduled):
status = SyncStatus.PENDING_DOWNLOAD
pending = yield self.local_list_status(status, namespace)
pending = [x for x in pending if x not in scheduled]
logger.info("There are %d pending blob downloads." % len(pending))
if not pending:
# we are finished, indicate that to our caller
defer.returnValue(False)
blob_id = pending[0]
logger.info("Fetching blob: %s" % (blob_id,))
yield with_retry(self._fetch, blob_id, namespace)
defer.returnValue(True)
@defer.inlineCallbacks
def sync(self, namespace=''):
try:
yield self._apply_deletions_from_server(namespace)
yield self.refresh_sync_status_from_server(namespace)
yield self.fetch_missing(namespace)
yield self.send_missing(namespace)
except defer.FirstError as e:
e.subFailure.raiseException()
@property
def sync_progress(self):
return self.local.get_sync_progress()
|
agrawalabhishek/NAOS | python/plotEllipsoidSurfaceAccelerationLatLong.py | Python | mit | 4,657 | 0.016964 | '''
Copyright (c) 2016 Abhishek Agrawal (abhishek.agrawal@protonmail.com)
Distributed under the MIT License.
See accompanying file LICENSE.md or copy at http://opensource.org/licenses/MIT
'''
# Set up modules and packages
# I/O
import csv
from pprint import pprint
# Numerical
import numpy as np
import pandas as pd
from scipy | .interpolate import griddata
import math
# System
import sys
import time
from tqdm import tqdm
# Get plotting packages
import matplotlib
import matplotlib.colors
import matplotlib.axes
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from matplotlib import rcParams
from matplotlib import cm
| from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
from mpl_toolkits.basemap import Basemap
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.tri as tri
print ""
print "---------------------------------------------------------------------------------"
print " NAOS "
print " "
print " Copyright (c) 2016, A. Agrawal (abhishek.agrawal@protonmail.com) "
print "---------------------------------------------------------------------------------"
print ""
# Start timer.
start_time = time.time( )
## Operations
# Read data in csv file. data returned as a panda series.
data = pd.read_csv( '../data/ellipsoidSurfaceAcceleration.csv' )
# data = pd.read_csv( '../data/nondimensionalellipsoidSurfaceAcceleration.csv' )
Ux = data['Ux'].values
Uy = data['Uy'].values
Uz = data['Uz'].values
U = data['U'].values
print U
latitude = data['latitude'].values
longitude = data['longitude'].values
## Plot magnitude of acceleration due to gravity on surface of an ellipsoid using scatter map
fig = plt.figure()
ax1 = fig.add_subplot(111)
scatterPlotHeat = ax1.scatter( latitude, longitude, c=U )
cbar = plt.colorbar( scatterPlotHeat, cmap = cm.jet )
cbar.ax.set_ylabel( 'Gravitational Acceleration [m/s^2]' )
ax1.set_xlim( latitude.min(), latitude.max() )
ax1.set_ylim( longitude.min(), longitude.max() )
formatter = matplotlib.ticker.ScalarFormatter( useOffset=False )
ax1.xaxis.set_major_formatter( formatter )
ax1.yaxis.set_major_formatter( formatter )
ax1.get_yaxis().set_tick_params( direction='out' )
ax1.get_xaxis().set_tick_params( direction='out' )
ax1.set_ylabel( 'Latitude [deg]' )
ax1.set_xlabel( 'Longitude [deg]' )
ax1.set_title( 'Gravitational acceleration at ellipsoid surface (Eros)' )
# plt.ticklabel_format( style = 'sci', axis = 'x', scilimits = ( 0, 0 ) )
plt.grid()
## Plot magnitude of acceleration due to gravity on surface of an ellipsoid using contourf
fig = plt.figure()
ax1 = fig.add_subplot(111)
# find number of unique latitudes and longitudes
numberOfLatitudes = len( data['latitude'].unique() )
numberOfLongitudes = len( data['longitude'].unique() )
# make 2D arrays without changing data
y = latitude.reshape( numberOfLatitudes, numberOfLongitudes )
x = longitude.reshape( numberOfLatitudes, numberOfLongitudes )
z = U.reshape( numberOfLatitudes, numberOfLongitudes )
contourHeatPlot = plt.contourf( y, x, z, cmap=cm.jet )
cbar = plt.colorbar( contourHeatPlot, cmap=cm.jet )
cbar.ax.set_ylabel( 'Gravitational Acceleration [m/s^2]' )
ax1.set_xlim( latitude.min(), latitude.max() )
ax1.set_ylim( longitude.min(), longitude.max() )
formatter = matplotlib.ticker.ScalarFormatter( useOffset=False )
ax1.xaxis.set_major_formatter( formatter )
ax1.yaxis.set_major_formatter( formatter )
ax1.get_yaxis().set_tick_params( direction='out' )
ax1.get_xaxis().set_tick_params( direction='out' )
ax1.set_ylabel( 'Latitude [deg]' )
ax1.set_xlabel( 'Longitude [deg]' )
ax1.set_title( 'Gravitational acceleration at ellipsoid surface (Eros)' )
# plt.ticklabel_format( style = 'sci', axis = 'x', scilimits = ( 0, 0 ) )
plt.grid()
plt.show()
# fig = plt.figure()
# ax1 = fig.add_subplot(111)
# heatmap = plt.pcolor( x, y, z, cmap=cm.jet )
# plt.show()
# fig = plt.figure()
# ax1 = fig.gca( projection = '3d' )
# heatmap = ax1.plot_surface( y, x, z, cmap=cm.jet )
# plt.show()
# Stop timer
end_time = time.time( )
# Print elapsed time
print "Script time: " + str("{:,g}".format(end_time - start_time)) + "s"
print ""
print "------------------------------------------------------------------"
print " Exited successfully! "
print "------------------------------------------------------------------"
print ""
|
zygh0st/terminator | terminatorlib/util.py | Python | gpl-2.0 | 10,216 | 0.003328 | #!/usr/bin/python
# Terminator.util - misc utility functions
# Copyright (C) 2006-2010 cmsj@tenshu.net
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Terminator.util - misc utility functions
>>> a = {'foo': 'bar', 'baz': 'bjonk'}
>>> b = {'foo': 'far', 'baz': 'bjonk'}
>>> dict_diff(a, b)
{'foo': 'far'}
"""
import sys
import gtk
import os
import pwd
import inspect
import uuid
import subprocess
# set this to true to enable debugging output
DEBUG = False
# set this to true to additionally list filenames in debugging
DEBUGFILES = False
# list of classes to show debugging for. empty list means show all classes
DEBUGCLASSES = []
# list of methods to show debugging for. empty list means show all methods
DEBUGMETHODS = []
def dbg(log = ""):
"""Print a message if debugging is enabled"""
if DEBUG:
stackitem = inspect.stack()[1]
parent_frame = stackitem[0]
method = parent_frame.f_code.co_name
names, varargs, keywords, local_vars = inspect.getargvalues(parent_frame)
try:
self_name = names[0]
classname = local_vars[self_name].__class__.__name__
except IndexError:
classname = "noclass"
if DEBUGFILES:
line = stackitem[2]
filename = parent_frame.f_code.co_filename
extra = " (%s:%s)" % (filename, line)
else:
extra = ""
if DEBUGCLASSES != [] and classname not in DEBUGCLASSES:
return
if DEBUGMETHODS != [] and method not in DEBUGMETHODS:
return
try:
print >> sys.stderr, "%s::%s: %s%s" % (classname, method, log, extra)
except IOError:
pass
def err(log = ""):
"""Print an error message"""
try:
print >> sys.stderr, log
except IOError:
pass
def gerr(message = None):
"""Display a graphical error. This should only be used for serious
errors as it will halt execution"""
dialog = gtk.MessageDialog(None, gtk.DIALOG_MODAL,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, message)
dialog.run()
dialog.destroy()
def has_ancestor(widget, wtype):
"""Walk up the family tree of widget to see if any ancestors are of type"""
while widget:
widget = widget.get_parent()
if isinstance(widget, wtype):
return(True)
return(False)
def path_lookup(command):
'''Find a command in our path'''
if os.path.isabs(command):
if os.path.isfile(command):
return(command)
else:
return(None)
elif command[:2] == './' and os.path.isfile(command):
dbg('path_lookup: Relative filename %s found in cwd' % command)
return(command)
try:
paths = os.environ['PATH'].split(':')
if len(paths[0]) == 0:
raise(ValueError)
except (ValueError, NameError):
dbg('path_lookup: PATH not set in environment, using fallbacks')
paths = ['/usr/local/bin', '/usr/bin', '/bin']
dbg('path_lookup: Using %d paths: %s' % (len(paths), paths))
for path in paths:
target = os.path.join(path, command)
if os.path.isfile(target):
dbg('path_lookup: found %s' % target)
return(target)
dbg('path_lookup: Unable to locate %s' % command)
def shell_lookup():
"""Find an appropriate shell for the user"""
try:
usershell = pwd.getpwuid(os.getuid())[6]
except KeyError:
usershell = None
shells = [usershell, 'bash', 'zsh', 'tcsh', 'ksh', 'csh', 'sh']
for shell in shells:
if shell is None:
continue
elif os.path.isfile(shell):
return(shell)
else:
rshell = path_lookup(shell)
if rshell is not None:
dbg('shell_lookup: Found %s at %s' % (shell, rshell))
return(rshell)
dbg('shell_lookup: Unable to locate a shell')
def widget_pixbuf(widget, maxsize=None):
"""Generate a pixbuf of a widget"""
if gtk.gtk_version < (2, 14):
return(None)
pixmap = widget.get_snapshot()
(w | idth, height) = pixmap.get_size()
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(), 0, 0, 0, 0, width,
height)
longest = max | (width, height)
if maxsize is not None:
factor = float(maxsize) / float(longest)
if not maxsize or (width * factor) > width or (height * factor) > height:
factor = 1
scaledpixbuf = pixbuf.scale_simple(int(width * factor), int(height * factor), gtk.gdk.INTERP_BILINEAR)
return(scaledpixbuf)
def get_config_dir():
"""Expand all the messy nonsense for finding where ~/.config/terminator
really is"""
try:
configdir = os.environ['XDG_CONFIG_HOME']
except KeyError:
configdir = os.path.join(os.path.expanduser('~'), '.config')
dbg('Found config dir: %s' % configdir)
return(os.path.join(configdir, 'terminator'))
def dict_diff(reference, working):
"""Examine the values in the supplied working set and return a new dict
that only contains those values which are different from those in the
reference dictionary"""
result = {}
for key in reference:
if reference[key] != working[key]:
result[key] = working[key]
return(result)
# Helper functions for directional navigation
def get_edge(allocation, direction):
"""Return the edge of the supplied allocation that we will care about for
directional navigation"""
if direction == 'left':
edge = allocation.x
elif direction == 'up':
edge = allocation.y
elif direction == 'right':
edge = allocation.x + allocation.width
elif direction == 'down':
edge = allocation.y + allocation.height
else:
raise ValueError('unknown direction %s' % direction)
return(edge)
def get_nav_possible(edge, allocation, direction):
"""Check if the supplied allocation is in the right direction of the
supplied edge"""
if direction == 'left':
return((allocation.x + allocation.width) <= edge)
elif direction == 'right':
return(allocation.x >= edge)
elif direction == 'up':
return((allocation.y + allocation.height) <= edge)
elif direction == 'down':
return(allocation.y >= edge)
else:
raise ValueError('Unknown direction: %s' % direction)
def get_nav_offset(edge, allocation, direction):
"""Work out how far edge is from a particular point on the allocation
rectangle, in the given direction"""
if direction == 'left':
return(edge - (allocation.x + allocation.width))
elif direction == 'right':
return(edge + allocation.x)
elif direction == 'up':
return(edge - (allocation.y - allocation.height))
elif direction == 'down':
return(edge + allocation.y)
else:
raise ValueError('Unknown direction: %s' % direction)
def get_nav_tiebreak(direction, cursor_x, cursor_y, rect):
"""We have multiple candidate terminals. Pick the closest by cursor
position"""
if direction in ['left', 'right']:
return(cursor_y >= rect.y and cursor_y <= (rect.y + rect.height))
elif direction in ['up', 'down']:
return(cursor_x >= rect.x and cursor_x <= (rect.x + rect.width))
else:
raise ValueError('Unknown direction: %s' % direction)
def enumerate_descendants(parent):
"""Walk all our children and build up a list of containers and
terminals"""
# FIXME: Does ha |
klmitch/python-jenkins | tests/test_plugins.py | Python | bsd-3-clause | 13,759 | 0.000145 | # Software License Agreement (BSD License)
#
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import json
from mock import patch
from testscenarios.scenarios import multiply_scenarios
import jenkins
from jenkins import plugins
from tests.base import JenkinsTestBase
class JenkinsPluginsBase(JenkinsTestBase):
plugin_info_json = {
u"plugins":
[
{
u"active": u'true',
u"backupVersion": u'null',
u"bundled": u'true',
u"deleted": u'false',
u"dependencies": [],
u"downgradable": u'false',
u"enabled": u'true',
u"hasUpdate": u'true',
u"longName": u"Jenkins Mailer Plugin",
u"pinned": u'false',
u"shortName": u"mailer",
u"supportsDynamicLoad": u"MAYBE",
u"url": u"http://wiki.jenkins-ci.org/display/JENKINS/Mailer",
u"version": u"1.5"
}
]
}
updated_plugin_info_json = {
u"plugins":
[
dict(plugin_info_json[u"plugins"][0],
**{u"version": u"1.6"})
]
}
class JenkinsPluginsInfoTest(JenkinsPluginsBase):
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_simple(self, jenkins_mock):
jenkins_mock.return_value = json.dumps(self.plugin_info_json)
# expected to return a list of plugins
plugins_info = self.j.get_plugins_info()
self.assertEqual(plugins_info, self.plugin_info_json['plugins'])
self.assertEqual(
jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('pluginManager/api/json?depth=2'))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_return_none(self, jenkins_mock):
empty_plugin_info_json = {u"plugins": []}
jenkins_mock.return_value = json.dumps(empty_plugin_info_json)
plugins_info = self.j.get_plugins_info()
self.assertEqual(plugins_info, [])
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_depth(self, jenkins_mock):
jenkins_mock.return_value = json.dumps(self.plugin_info_json)
self.j.get_plugins_info(depth=1)
self.assertEqual(
jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('pluginManager/api/json?depth=1'))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_raise_BadStatusLine(self, jenkins_mock):
jenkins_mock.side_effect = jenkins.BadStatusLine('not a valid status line')
with self.assertRaises(jenkins.BadHTTPException) as context_manager:
self.j.get_plugins_info()
self.assertEqual(
jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('pluginManager/api/json?depth=2'))
self.assertEqual(
str(context_manager.exception),
'Error communicating with server[{0}/]'.format(self.base_url))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_return_invalid_json(self, jenkins_mock):
jenkins_mock.return_value = 'not valid JSON'
with self.assertRaises(jenkins.JenkinsException) as context_manager:
self.j.get_plugins_info()
self.assertEqual(
jenkins_mock.call_args[0][0].get_full_url(),
self.make_url('pluginManager/api/json?depth=2'))
self.assertEqual(
str(context_manager.exception),
'Could not parse JSON info for server[{0}/]'.format(self.base_url))
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_raise_HTTPError(self, jenkins_mock):
jenkins_mock.side_effect = jenkins.HTTPError(
self.make_url('job/pluginManager/api/json?depth=2'),
code=401,
msg="basic auth failed",
hdrs=[],
fp=None)
with self.assertRaises(jenkins.BadHTTPException) as context_manager:
self.j.get_plugins_info(depth=52)
self.assertEqual(
str(context_manager.exception),
'Error communicating with server[{0}/]'.format(self.base_url))
self._check_requests(jenkins_mock.call_args_list)
class JenkinsPluginInfoTest(JenkinsPluginsBase):
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_shortname(self, jenkins_mock):
jenkins_mock.return_value = json.dumps(self.plugin_info_json)
# expected to return info on a single plugin
plugin_info = self.j.get_plugin_info("mailer")
self.assertEqual(plugin_info, self.plugin_info_json['plugins'][0])
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_longname(self, jenkins_mock):
jenkins_mock.return_value = json.dumps(self.plugin_info_json)
# expected to return info on a single plugin
plugin_info = self.j.get_plugin_info("Jenkins Mailer Plugin")
self.assertEqual(plugin_info, | self.plugin_info_json['plugins'][0])
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_get_plugin_info_updated(self, jenkins_mock):
jenkins_mock.side_effect | = [
json.dumps(self.plugin_info_json),
json.dumps(self.updated_plugin_info_json)
]
j = jenkins.Jenkins(self.make_url(''), 'test', 'test')
plugins_info = j.get_plugins()
self.assertEqual(plugins_info["mailer"]["version"],
self.plugin_info_json['plugins'][0]["version"])
self.assertNotEqual(
plugins_info["mailer"]["version"],
self.updated_plugin_info_json['plugins'][0]["version"])
plugins_info = j.get_plugins()
self.assertEqual(
plugins_info["mailer"]["version"],
self.updated_plugin_info_json['plugins'][0]["version"])
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_return_none(self, jenkins_mock):
jenkins_mock.return_value = json.dumps(self.plugin_info_json)
# expected not to find bogus so should return None
plugin_info = self.j.get_plugin_info("bogus")
self.assertEqual(plugin_info, None)
self._check_requests(jenkins_mock.call_args_list)
@patch.object(jenkins.Jenkins, 'jenkins_open')
def test_depth(self, je |
rehassachdeva/pydsa | pydsa/queue.py | Python | bsd-3-clause | 786 | 0 | class queue(object):
"""
A queue is a container of objects (a linear collection)
that are inserted and removed according to the
first-in first-out (FIFO) principle.
>>> from | pydsa import queue
>>> q = queue()
>>> q.enqueue(5)
>>> q.enqueue(8)
>>> q.e | nqueue(19)
>>> q.dequeue()
5
"""
def __init__(self):
self.List = []
def isEmpty(self):
return self.List == []
def enqueue(self, item):
"""
Insert element in queue.
"""
self.List.append(item)
def dequeue(self):
"""
Remove element from front of the Queue.
"""
return self.List.pop(0)
def size(self):
"""
Return size of Queue.
"""
return len(self.List)
|
airportmarc/the416life | src/apps/library/apps.py | Python | mit | 89 | 0 | from django.apps import AppConfi | g
class LibraryConfig(AppConfig):
n | ame = 'library'
|
WarrenWeckesser/scikits-image | doc/examples/plot_regional_maxima.py | Python | bsd-3-clause | 3,435 | 0.000291 | """
=========================
Filtering regional maxima
=========================
Here, we use morphological reconstruction to create a background image, which
we can subtract from the original image to isolate bright features (regional
maxima).
First we try reconstruction by dilation starting at the edges of the image. We
initialize a seed image to the minimum intensity of the image, and set its
border to be the pixel values in the original image. These maximal pixels will
get dilated in order to reconstruct the background image.
"""
import numpy as np
from scipy.ndimage import gaussian_filter
import matplotlib.pyplot as plt
from skimage import data
from skimage import img_as_float
from skimage.morphology import reconstruction
# Convert to float: Important for subtraction later which won't work with uint8
image = img_as_float(data.coins())
image = gaussian_filter(image, 1)
seed = np.copy(image)
seed[1:-1, 1:-1] = image.min()
mask = image
dilated = reconstruction(seed, mask, method='dilation')
"""
Subtracting the dilated image leaves an image with just the coins and a flat,
black background, as shown below.
"""
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 2.5), sharex=True, sharey=True)
ax1.imshow(image)
ax1.set_title('original image')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax2.imshow(dilated, vmin=image.min(), vmax=image.max())
ax2.set_title('dilated')
ax2.axis('off')
ax2.set_adjustable('box-forced')
ax3.imshow(image - dilated)
ax3.set_title('image - dilated')
ax3.axis('off')
ax3.set_adjustable('box-forced')
fig.tight_layout()
"""
.. image:: PLOT2RST.current_figure
Although the features (i.e. the coins) are clearly isolated, the coins
surrounded by a bright background in the original image are dimmer in the
subtracted image. We can attempt to correct this using a different seed image.
Instead of creating a seed image with maxima along the image border, we can use
the features of the image itself to seed the reconstruction process. Here, the
seed image is the original image m | inus a | fixed value, ``h``.
"""
h = 0.4
seed = image - h
dilated = reconstruction(seed, mask, method='dilation')
hdome = image - dilated
"""
To get a feel for the reconstruction process, we plot the intensity of the
mask, seed, and dilated images along a slice of the image (indicated by red
line).
"""
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 2.5))
yslice = 197
ax1.plot(mask[yslice], '0.5', label='mask')
ax1.plot(seed[yslice], 'k', label='seed')
ax1.plot(dilated[yslice], 'r', label='dilated')
ax1.set_ylim(-0.2, 2)
ax1.set_title('image slice')
ax1.set_xticks([])
ax1.legend()
ax2.imshow(dilated, vmin=image.min(), vmax=image.max())
ax2.axhline(yslice, color='r', alpha=0.4)
ax2.set_title('dilated')
ax2.axis('off')
ax3.imshow(hdome)
ax3.axhline(yslice, color='r', alpha=0.4)
ax3.set_title('image - dilated')
ax3.axis('off')
fig.tight_layout()
plt.show()
"""
.. image:: PLOT2RST.current_figure
As you can see in the image slice, each coin is given a different baseline
intensity in the reconstructed image; this is because we used the local
intensity (shifted by ``h``) as a seed value. As a result, the coins in the
subtracted image have similar pixel intensities. The final result is known as
the h-dome of an image since this tends to isolate regional maxima of height
``h``. This operation is particularly useful when your images are unevenly
illuminated.
"""
|
EDITD/pyinfra-kubernetes | pyinfra_kubernetes/__init__.py | Python | mit | 1,938 | 0 | from pyinfra.api import deploy
from .configure import configure_kubeconfig, configure_kubernetes_component
from .install import install_kubernetes
@deploy('Deploy Kubernetes master')
def deploy_kubernetes_master(
state, host,
etcd_nodes,
kube_apiserver_kwargs=None,
kube_scheduler_kwargs=None,
kube_controller_manager_kwargs=None,
):
# Install server components
install_kubernetes(
state, host,
components=(
'kube-apiserver', 'kube-scheduler', 'kube-controller-manager',
),
)
# Configure the API server, passing in our etcd nodes
configure_kubernetes_component(
state, host,
'kube-apiserver',
etcd_nodes=etcd_nodes,
service_kwargs=kube_apiserver_kwargs,
)
# Setup the kube-scheduler service/config
configure_kubernetes_component(
state, host,
'kube-scheduler',
| service_kwargs=kube_scheduler_kwargs,
)
# Setup the kube-controller-manager service/config
configure_kubernetes_component(
state, host,
'kube-controller-manager',
service_kwargs | =kube_controller_manager_kwargs,
)
@deploy('Deploy Kubernetes node')
def deploy_kubernetes_node(
state, host,
master_address,
kubelet_kwargs=None,
kube_proxy_kwargs=None,
):
# Install node components
install_kubernetes(
state, host,
components=(
'kubelet', 'kube-proxy',
),
)
# Setup the kubeconfig for kubelet & kube-proxy to use
configure_kubeconfig(
state, host,
master_address,
)
# Setup the kubelet service/config
configure_kubernetes_component(
state, host,
'kubelet',
service_kwargs=kubelet_kwargs,
)
# Setup the kube-proxy service/config
configure_kubernetes_component(
state, host,
'kube-proxy',
service_kwargs=kube_proxy_kwargs,
)
|
ttsui/guppy | src/guppy.py | Python | gpl-2.0 | 21,484 | 0.03612 | #!/usr/bin/env python
## guppy.py - Main program
## Copyright (C) 2005 Tony Tsui <tsui.tony@gmail.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import sys
import os
import stat
import time
import string
import math
import gtk
import gtk.glade
import gobject
import locale
import gettext
import puppy
APP_NAME = 'guppy'
def humanReadableSize(size):
div_count = 0
new_size = size
prev_size = new_size
while new_size > 0 and new_size > 1000:
div_count += 1
prev_size = new_size
new_size = new_size/1024
if prev_size > 1000:
# Divide by float for more precise number
human_size = "%.1f" % (prev_size / 1024.0)
else:
human_size = str(prev_size)
if div_count == 1:
human_size += ' KB'
elif div_count == 2:
human_size += ' MB'
elif div_count == 3:
human_size += ' GB'
else:
human_size += ' B'
return human_size
def convertToBytes(size):
size, unit = size.split()
size = float(size)
if unit == 'GB':
size = size * math.pow(1024, 3)
elif unit == 'MB':
size = size * math.pow(1024, 2)
elif unit == 'KB':
size = size * 1024
return size
class FileSystemModel(gtk.ListStore):
TYPE_COL, ICON_COL, NAME_COL, DATE_COL, SIZE_COL = range(5)
def __init__(self):
self.current_dir = None
gtk.ListStore.__init__(self, gobject.TYPE_STRING, gobject.TYPE_STRING,
gobject.TYPE_STRING, gobject.TYPE_STRING,
gobject.TYPE_STRING)
def getCWD(self):
return self.current_dir
def sort_func(self, model, iter1, iter2, col=None):
type1 = model.get_value(iter1, FileSystemModel.TYPE_COL)
type2 = model.get_value(iter2, FileSystemModel.TYPE_COL)
if type1 == type2:
if col == FileSystemModel.NAME_COL:
return self.string_sort_func(model, iter1, iter2, col)
elif col == FileSystemModel.DATE_COL:
return self.date_sort_func(model, iter1, iter2, col)
elif col == FileSystemModel.SIZE_COL:
return self.size_sort_func(model, iter1, iter2, col)
else:
# FIXME: Raise exception here
print "ERROR: Unknown column type: ", col
return 0
elif type1 == 'f' and type2 == 'd':
return 1
else:
return -1
def string_sort_func(self, model, iter1, iter2, col=None):
string1 = string.lower(model.get_value(iter1, col))
string2 = string.lower(model.get_value(iter2, col))
if string1 == string2:
return 0;
elif string1 < string2:
return -1
else:
return 1
def date_sort_func(self, model, iter1, iter2, col=None):
# Date column is empty for '..' directory
date1_str = model.get_value(iter1, col)
if len(date1_str) == 0:
return -1
date2_str = model.get_value(iter2, col)
if len(date2_str) == 0:
return 1
format = '%a %b %d %Y'
date1 = time.strptime(date1_str, format)
date2 = time.strptime(date2_str, format)
if date1 == date2:
return self.string_sort_func(model, iter1, iter2, FileSystemModel.NAME_COL);
elif date1 < date2:
return -1
else:
return 1
def size_sort_func(self, model, iter1, iter2, col=None):
size1 = model.get_value(iter1, col).split()
size2 = model.get_value(iter2, col).split()
size1_len = len(size1)
size2_len = len(size2)
if size1_len < size2_len:
return -1
elif size1_len > size2_len:
return 1
else:
return 0
unit_weight = { 'B' : 1, 'KB' : 2, 'MB' : 3, 'GB' : 4 }
# Sizes in bytes may not have 'B' unit postfix
if len(size1) < 2:
size1[1] = unit_weight['B']
else:
size1[1] = unit_weight[size1[1]]
return 1
# Sizes in bytes may not have 'B' unit appended
if len(size2) < 2:
size2[1] = unit_weight['B']
else:
size2[1] = unit_weight[size2[1]]
if size1[1] == size2[1]:
size1[0] = float(size1[0])
size2[0] = float(size2[0])
if size1[0] == size2[0]:
return self.string_sort_func(model, iter1, iter2, FileSystemModel.NAME_COL);
elif size1[0] < size2[0]:
return -1
else:
return 1
elif size1[1] < size2[1]:
return -1
else:
return 1
class PVRFileSystemModel(FileSystemModel):
dir_sep = '\\'
def __init__(self):
FileSystemModel.__init__(self)
# FIXME: Get dir from when Guppy last exited
self.current_dir = ''
self.puppy = puppy.Puppy()
self.changeDir()
def changeDir(se | lf, dir=None):
if len(self) > 0:
self.clear()
if dir:
if dir[0] != '\\':
dir = self.current_dir + '\\' + dir
else:
dir = self.current_dir
norm_path = os.path.normpath(dir.replace('\\', '/'))
if norm_path != '.':
self.current_dir = norm_path.replace('/', '\\')
else:
self.current_dir = '\\'
pvr_files = self.puppy.listDir(self.current_dir)
for file in pvr_files:
# TODO: Set icon based on file type. Use dummy icon for now
if file[FileSystemModel.TYPE_ | COL] == 'd':
file.insert(FileSystemModel.ICON_COL, gtk.STOCK_DIRECTORY)
else:
file.insert(FileSystemModel.ICON_COL, gtk.STOCK_FILE)
file[FileSystemModel.SIZE_COL] = humanReadableSize(int(file[FileSystemModel.SIZE_COL]))
self.append(file)
def freeSpace(self):
total, free = self.puppy.getDiskSpace()
return humanReadableSize(free)
class PCFileSystemModel(FileSystemModel):
def __init__(self):
FileSystemModel.__init__(self)
# FIXME: Get dir from when Guppy last exited
self.current_dir = os.environ['HOME']
self.changeDir()
def changeDir(self, dir=None):
if dir:
if dir[0] != '/':
dir = self.current_dir + '/' + dir
else:
dir = self.current_dir
dir = os.path.normpath(dir)
if not os.access(dir, os.F_OK):
return
self.current_dir = dir
if len(self) > 0:
self.clear()
# Parent directory
self.append(['d', gtk.STOCK_DIRECTORY, '..', '', ''])
for file in os.listdir(self.current_dir):
mode = os.stat(self.current_dir + '/' + file)
if stat.S_ISDIR(mode[stat.ST_MODE]):
type = 'd'
icon = gtk.STOCK_DIRECTORY
size = ''
else:
type = 'f'
icon = gtk.STOCK_FILE
size = humanReadableSize(mode[stat.ST_SIZE])
mtime = time.strftime('%a %b %d %Y', time.localtime(mode[stat.ST_MTIME]))
entry = [ type, icon, file, mtime, size ]
self.append(entry)
def freeSpace(self):
cmd = 'df ' + self.current_dir
pipe = os.popen(cmd)
# Skip Headers
pipe.readline()
output = pipe.readline().split()
exit_stat = pipe.close()
if exit_stat != None and os.WEXITSTATUS(exit_stat) != 0:
# TODO: Raise exception
print "ERROR: Failed to get disk free space (`%s')" % (cmd)
return None
# Multiple by 1024 to convert from kilobytes to bytes
return humanReadableSize(int(output[3])*1024)
class GuppyWindow:
def __init__(self):
# Find out proper way to find glade files
guppy_glade_file = 'guppy.glade'
self.initUIManager()
gtk.glade.set_custom_handler(self.customWidgetHandler)
# Load glade file
self.glade_xml = gtk.glade.XML(guppy_glade_file, None, gettext.textdomain())
# Connect callback functions in glade file to functions
self.glade_xml.signal_autoconnect(self)
accelgroup = self.uimanager.get_accel_group()
window = self.glade_xml.get_widget('guppy_window')
window.add_accel_group(accelgroup)
self.puppy = puppy.Puppy()
self.show_hidden = False
self.transfer_dialog = self.glade_xml.get_widget('transfer_dialog')
self.pvr_total_size_label = self.glade_xml.get_widget('pvr_total_size_label')
self.pvr_free_space_label = self.glade_xml.get_widget('pvr_free_space_label')
self.pc_total_size_label = self.glade_xml.get_widget('pc_total_size_label')
self.pc_free_space_label = self.glade_xml.get_widget('p |
Qwaz/solved-hacking-problem | SCTF/2017 Quals/dfa/solver.py | Python | gpl-2.0 | 308 | 0 | from pwn import *
import base64
p = remote('dfa.eatpwnno | sleep.com', 9999)
p.recvline()
p.recvline()
p.recvline()
p.recvline()
p.recvline()
p.sendline(' | auto.c')
with open('auto.patch.c', 'r') as f:
content = f.read()
p.recvuntil('base64 : ')
p.sendline(base64.b64encode(content))
print p.recvall()
|
aprudent/caduceus | caduceus/transform/templateHtmlComment.py | Python | bsd-3-clause | 378 | 0.029101 | from caduceus.transform.templateEntity import CaduceusTemplateEntity
class CaduceusTemplateHtmlComment(CaduceusTemplateEntity):
def __init__(self, text):
CaduceusTemplateEntity.__init__(self)
self._text = text
def render(self, dictGlob, dictLoc | , results):
return | "<!-- %s -->%s" % (self._text,
CaduceusTemplateEntity.render(self, dictGlob, dictLoc, results))
|
QuantEcon/QuantEcon.py | docs/qe_apidoc.py | Python | bsd-3-clause | 10,122 | 0.001877 | """
Our version of sphinx-apidoc
@author : Spencer Lyon
@date : 2014-07-16
This file should be called from the command line. It accepts one
additional command line parameter. If we pass the parameter `single`
when running the file, this file will create a single directory named
modules where each module in quantecon will be documented. The index.rst
file will then contain a single list of all modules.
If no argument is passed or if the argument is anything other than
`single`, two directories will be created: models and tools. The models
directory will contain documentation instructions for the different
models in quantecon, whereas the tools directory will contain docs for
the tools in the package. The generated index.rst will then contain
two toctrees, one for models and one for tools.
Examples
--------
$ python qe_apidoc.py # generates the two separate directories
$ python qe_apidoc.py foo_bar # generates the two separate directories
$ python qe_apidoc.py single # generates the single directory
Notes
-----
1. This file can also be run from within ipython using the %%run magic.
To do this, use one of the commands above and replace `python` with
`%%run`
2. Models has been removed. But leaving infrastructure here for qe_apidoc
in the event we need it in the future
"""
import os
import sys
from glob import glob
######################
## String Templates ##
######################
module_template = """{mod_name}
{equals}
.. automodule:: quantecon.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
game_theory_module_template = """{mod_name}
{equals}
.. automodule:: quantecon.game_theory.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
game_generators_module_template = """{mod_name}
{equals}
.. automodule:: quantecon.game_theory.game_generators.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
markov_module_template = """{mod_name}
{equals}
.. automodule:: quantecon.markov.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
optimize_module_template = """{mod_name}
{equals}
.. automodule:: quantecon.optimize.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
random_module_template = """{mod_name}
{equals}
.. automodule:: quantecon.random.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
util_module_template = """{mod_name}
{equals}
.. automodule:: quantecon.util.{mod_name}
:members:
:undoc-members:
:show-inheritance:
"""
all_index_template = """=======================
QuantEcon documentation
=======================
Auto-generated documentation by module:
.. toctree::
:maxdepth: 2
{generated}
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
"""
split_index_template = """=======================
QuantEcon documentation
=======================
The `quantecon` python library consists of a number of modules which
includes game theory (game_theory), markov chains (markov), random
generation utilities (random), a collection of tools (tools),
and other utilities (util) which are
mainly used by developers internal to the package.
.. toctree::
:maxdepth: 2
game_theory
markov
optimize
random
tools
util
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
"""
split_file_template = """{name}
{equals}
.. toctree::
:maxdepth: 2
{files}
"""
######################
## Helper functions ##
######################
def source_join(f_name):
return os.path.join("source", f_name)
####################
## Main functions ##
####################
def all_auto():
# Get list of module names
mod_names = glob("../quantecon/[a-z0-9]*.py")
mod_names = list(map(lambda x: x.split('/')[-1], mod_names))
# Ensure source/modules directory exists
if not os.path.exists(source_join("modules")):
os.makedirs(source_join("modules"))
# Write file for each module
for mod in mod_names:
name = mod.split(".")[0] # drop .py ending
new_path = os.path.join("source", "modules", name + ".rst")
with open(new_path, "w") as f:
gen_module(name, f)
# write index.rst file to include these autogenerated files
with open(source_join("index.rst"), "w") as index:
generated = "\n ".join(list(map(lambda x: "modules/" + x.split(".")[0],
mod_names)))
temp = all_index_template.format(generated=generated)
index.write(temp)
def model_tool():
# list file names with game_theory
game_theory_files = glob("../quantecon/game_theory/[a-z0-9]*.py")
game_theory = list(map(lambda x: x.split('/')[-1][:-3], game_theory_files))
# Alphabetize
game_theory.sort()
# list file names with game_theory/game_generators
game_generators_files = glob("../quantecon/game_theory/game_generators/[a-z0-9]*.py")
game_generators = list(
map(lambda x: x.split('/')[-1][:-3], game_generators_files))
# Alphabetize
game_generators.sort()
# list file names with markov
markov_files = glob("../quantecon/markov/[a-z0-9]*.py")
markov = list(map(lambda x: x.split('/')[-1][:-3], markov_files))
# Alphabetize
markov.sort()
# list file names with optimize
optimize_files = glob("../quantecon/optimize/[a-z0-9]*.py")
optimize = list(map(lambda x: x.split('/')[-1][:-3], optimize_files))
# Alphabetize
optimize.sort()
# list file names with random
random_files = glob("../quantecon/random/[a-z0-9]*.py")
random = list(map(lambda x: x.split('/')[-1][:-3], random_files))
# Alphabetize
random.sort()
# list file names of tools (base level modules)
tool_files = glob("../quantecon/[a-z0-9]*.py")
tools = list(map(lambda x: x.split('/')[-1][:-3], tool_files))
# Alphabetize
tools.remove("version")
tools.sort()
# list file names of utilities
util_files = glob("../quantecon/util/[a-z0-9]*.py")
util = list(map(lambda x: x.split('/')[-1][:-3], util_files))
# Alphabetize
util.sort()
for folder in ["game_theory", "markov", "optimize", "random", "tools", "util"]:
if not os.path.exists(source_join(folder)):
os.makedirs(source_join(folder))
# Write file for each game_theory file
for mod in game_theory:
new_path = os.path.join("source", "game_theory", mod + ".rst")
with open(new_path, "w") as f:
equals = "=" * len(mod)
f.write(game_theory_module_template.format(mod_name=mod, equals=equals))
for mod in game_generators:
new_path = os.path.join("source", "game_theory", "game_generators", mod + ".rst")
with open(new_path, "w") as f:
equals = "=" * len(mod)
f.write(game_generators_module_template.format(
mod_name=mod, equals=equals))
#Add sudirectory to flat game_theory list for index file
game_theory.append("game_generators/{}".format(mod))
# | Write file for each markov file
for mod in markov:
new_path = os.path.join("source", "markov", mod + ".rst")
with open(new_path, "w") as f:
equals = "=" * len(mod)
f.write(markov_module_template.format(mod_name=mod, equals=equals))
# Write file for each optimize file
for mod in optimize:
new_path = os.path.join("source", "optimize", mod + ".rst")
with open( | new_path, "w") as f:
equals = "=" * len(mod)
f.write(optimize_module_template.format(mod_name=mod, equals=equals))
# Write file for each random file
for mod in random:
new_path = os.path.join("source", "random", mod + ".rst")
with open(new_path, "w") as f:
equals = "=" * len(mod)
f.write(random_module_template.format(mod_name=mod, equals=equals))
# Write file for each tool (base level modules)
for mod in tools:
new_path = os.path.join("source", "tools", mod + ".rst")
with open(new_path, "w") as f:
equals = "=" * len(mod)
f.write(module_template.format( |
bakaat/xhtml2pdf | xhtml2pdf/parser.py | Python | apache-2.0 | 25,104 | 0.002948 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from html5lib import treebuilders, inputstream
from xhtml2pdf.default import TAGS, STRING, INT, BOOL, SIZE, COLOR, FILE
from xhtml2pdf.default import BOX, POS, MUST, FONT
from xhtml2pdf.util import getSize, getBool, toList, getColor, getAlign
from xhtml2pdf.util import getBox, getPos, pisaTempFile
from reportlab.platypus.doctemplate import NextPageTemplate, FrameBreak
from reportlab.platypus.flowables import PageBreak, KeepInFrame
from xhtml2pdf.xhtml2pdf_reportlab import PmlRightPageBreak, PmlLeftPageBreak
from xhtml2pdf.tags import * # TODO: Kill wild import!
from xhtml2pdf.tables import * # TODO: Kill wild import!
from xhtml2pdf.util import * # TODO: Kill wild import!
from xml.dom import Node
import copy
import html5lib
import logging
import re
import types
import xhtml2pdf.w3c.cssDOMElementInterface as cssDOMElementInterface
import xml.dom.minidom
CSSAttrCache = {}
log = logging.getLogger("xhtml2pdf")
rxhttpstrip = re.compile("https?://[^/]+(.*)", re.M | re.I)
class AttrContainer(dict):
def __getattr__(self, name):
try:
return dict.__getattr__(self, name)
except:
return self[name]
def pisaGetAttributes(c, tag, attributes):
global TAGS
attrs = {}
if attributes:
for k, v in attributes.items():
try:
attrs[str(k)] = str(v) # XXX no Unicode! Reportlab fails with template names
except:
attrs[k] = v
nattrs = {}
if tag in TAGS:
block, adef = TAGS[tag]
adef["id"] = STRING
# print block, adef
for k, v in adef.iteritems():
nattrs[k] = None
# print k, v
# defaults, wenn vorhanden
if type(v) == types.TupleType:
if v[1] == MUST:
if k not in attrs:
log.warn(c.warning("Attribute '%s' must be set!", k))
nattrs[k] = None
continue
nv = attrs.get(k, v[1])
dfl = v[1]
v = v[0]
else:
nv = attrs.get(k, None)
dfl = None
if nv is not None:
if type(v) == types.ListType:
nv = nv.strip().lower()
if nv not in v:
#~ raise PML_EXCEPTION, "attribute '%s' of wrong value, allowed is one of: %s" % (k, repr(v))
log.warn(c.warning("Attribute '%s' of wrong value, allowed is one of: %s", k, repr(v)))
nv = dfl
elif v == BOOL:
nv = nv.strip().lower()
nv = nv in ("1", "y", "yes", "true", str(k))
elif v == SIZE:
try:
nv = getSize(nv)
except:
log.warn(c.warning("Attribute '%s' expects a size value", k))
elif v == BOX:
nv = getBox(nv, c.pageSize)
elif v == POS:
nv = getPos(nv, c.pageSize)
elif v == INT:
nv = int(nv)
elif v == COLOR:
nv = getColor(nv)
elif v == FILE:
nv = c.getFile(nv)
elif v == FONT:
nv = c.getFontName(nv)
nattrs[k] = nv
return AttrContainer(nattrs)
attrNames = '''
color
font-family
font-size
font-weight
font-style
text-decoration
line-height
letter-spacing
background-color
display
margin-left
margin-right
margin-top
margin-bottom
padding-left
padding-right
padding-top
padding-bottom
border-top-color
border-top-style
border-top-width
border-bottom-color
border-bottom-style
border-bottom-width
border-left-color
border-left-style
border-left-w | idth
border-right-color
border-right-style
border-right-width
text-align
vertical-align
width
height
zoom
page-break-after
page-break-before
list | -style-type
list-style-image
white-space
text-indent
-pdf-page-break
-pdf-frame-break
-pdf-next-page
-pdf-keep-with-next
-pdf-outline
-pdf-outline-level
-pdf-outline-open
-pdf-line-spacing
-pdf-keep-in-frame-mode
-pdf-word-wrap
'''.strip().split()
def getCSSAttr(self, cssCascade, attrName, default=NotImplemented):
if attrName in self.cssAttrs:
return self.cssAttrs[attrName]
try:
result = cssCascade.findStyleFor(self.cssElement, attrName, default)
except LookupError:
result = None
# XXX Workaround for inline styles
try:
style = self.cssStyle
except:
style = self.cssStyle = cssCascade.parser.parseInline(self.cssElement.getStyleAttr() or '')[0]
if attrName in style:
result = style[attrName]
if result == 'inherit':
if hasattr(self.parentNode, 'getCSSAttr'):
result = self.parentNode.getCSSAttr(cssCascade, attrName, default)
elif default is not NotImplemented:
return default
raise LookupError("Could not find inherited CSS attribute value for '%s'" % (attrName,))
if result is not None:
self.cssAttrs[attrName] = result
return result
#TODO: Monkeypatching standard lib should go away.
xml.dom.minidom.Element.getCSSAttr = getCSSAttr
# Create an aliasing system. Many sources use non-standard tags, because browsers allow
# them to. This allows us to map a nonstandard name to the standard one.
nonStandardAttrNames = {
'bgcolor': 'background-color',
}
def mapNonStandardAttrs(c, n, attrList):
for attr in nonStandardAttrNames:
if attr in attrList and nonStandardAttrNames[attr] not in c:
c[nonStandardAttrNames[attr]] = attrList[attr]
return c
def getCSSAttrCacheKey(node):
_cl = _id = _st = ''
for k, v in node.attributes.items():
if k == 'class':
_cl = v
elif k == 'id':
_id = v
elif k == 'style':
_st = v
return "%s#%s#%s#%s#%s" % (id(node.parentNode), node.tagName.lower(), _cl, _id, _st)
def CSSCollect(node, c):
#node.cssAttrs = {}
#return node.cssAttrs
if c.css:
_key = getCSSAttrCacheKey(node)
if hasattr(node.parentNode, "tagName"):
if node.parentNode.tagName.lower() != "html":
CachedCSSAttr = CSSAttrCache.get(_key, None)
if CachedCSSAttr is not None:
node.cssAttrs = CachedCSSAttr
return CachedCSSAttr
node.cssElement = cssDOMElementInterface.CSSDOMElementInterface(node)
node.cssAttrs = {}
# node.cssElement.onCSSParserVisit(c.cssCascade.parser)
cssAttrMap = {}
for cssAttrName in attrNames:
try:
cssAttrMap[cssAttrName] = node.getCSSAttr(c.cssCascade, cssAttrName)
#except LookupError:
# pass
except Exception: # TODO: Kill this catch-all!
log.debug("CSS error '%s'", cssAttrName, exc_info=1)
CSSAttrCache[_key] = node.cssAttrs
return node.cssAttrs
def CSS2Frag(c, kw, isBlock):
# COLORS
if "color" in c.cssAttr:
c.frag.textColor = getColor(c.cssAttr["color"])
if "background-color" in c.cssAttr:
c.frag.backColor = getColor(c.cssAttr["background-color"])
# FONT SIZE, STYLE, WEIGHT
if "font-family |
bitmazk/django-oscar-asiapay | oscar_integration_example/checkout/app.py | Python | mit | 331 | 0 | """Custom ``checkout`` app."""
from oscar.apps.checkout.app import CheckoutApplication
from oscar_custom.checkout import views
class OverriddenCheckoutApplication(CheckoutApplication):
# Specify new view for payment details
payment_details_view = views.PaymentDetailsView
appl | ication = OverriddenCheckoutApplication()
| |
Subarno/MachineLearningPracticePrograms | transfer.py | Python | gpl-3.0 | 565 | 0 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from kera | s.preprocessing import image
from keras.applications.resnet50 import *
model = ResNet50(weights=None)
model.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels.h5py')
img = image.load_img('data/deer.jpg', target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
print('Predicted:', decode_predict | ions(preds, top=5)[0])
|
food52/thumbor | thumbor/transformer.py | Python | mit | 12,511 | 0.002398 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
import math
import sys
from thumbor.point import FocalPoint
from thumbor.utils import logger
trim_enabled = True
try:
from thumbor.ext.filters import _bounding_box
except ImportError:
logger.warn("Error importing bounding_box filter, trimming won't work")
trim_enabled = False
class Transformer(object):
def __init__(self, context):
self.context = context
self.engine = self.context.request.engine
def calculate_target_dimensions(self):
source_width, source_height = self.engine.size
source_width = float(source_width)
source_height = float(source_height)
if not self.context.request.width and not self.context.request.height:
self.target_width = source_width
self.target_height = source_height
else:
if self.context.request.width:
if self.context.request.width == "orig":
self.target_width = source_width
else:
self.target_width = float(self.context.request.width)
else:
self.target_width = self.engine.get_proportional_width(self.context.request.height)
if self.context.request.height:
if self.context.request.height == "orig":
self.target_height = source_height
else:
self.target_height = float(self.context.request.height)
else:
self.target_height = self.engine.get_proportional_height(self.context.request.width)
def adjust_focal_points(self):
source_width, source_height = self.engine.size
self.focal_points = None
if self.context.request.focal_points:
if self.context.request.should_crop:
self.focal_points = []
crop = self.context.request.crop
for point in self.context.request.focal_points:
if point.x < crop['left'] or point.x > crop['right'] or point.y < crop['top'] or point.y > crop['bottom']:
continue
point.x -= crop['left'] or 0
point.y -= crop['top'] or 0
self.focal_points.append(point)
else:
self.focal_points = self.context.request.focal_points
if not self.focal_points:
self.focal_points = [
FocalPoint.from_alignment(self.context.request.halign,
self.context.request.valign,
source_width,
source_height)
]
self.engine.focus(self.focal_points)
def transform(self, callback):
self.done_callback = callback
if self.context.config.RESPECT_ORIENTATION:
self.engine.reorientate()
self.trim()
self.smart_detect()
def trim(self):
is_gifsicle = (self.context.request.engine.extension == '.gif' and self.context.config.USE_GIFSICLE_ENGINE)
if self.context.request.trim is None or not trim_enabled or is_gifsicle:
return
mode, data = self.engine.image_data_as_rgb()
box = _bounding_box.apply(
mode,
self.engine.size[0],
self.engine.size[1],
self.context.request.trim_pos,
self.context.request.trim_tolerance,
data
)
if box[2] < box[0] or box[3] < box[1]:
logger.warn("Ignoring trim, there wouldn't be any image left, check the tolerance.")
return
self.engine.crop(box[0], box[1], box[2] + 1, box[3] + 1)
if self.context.request.should_crop:
self.context.request.crop['left'] -= box[0]
self.context.request.crop['top'] -= box[1]
self.context.request.crop[ | 'right'] -= box[0]
self.context.request.crop['bottom'] -= box[1]
@property
def smart_storage_key(self):
return self | .context.request.image_url
def smart_detect(self):
is_gifsicle = (self.context.request.engine.extension == '.gif' and self.context.config.USE_GIFSICLE_ENGINE)
if (not (self.context.modules.detectors and self.context.request.smart)) or is_gifsicle:
self.do_image_operations()
return
try:
# Beware! Boolean hell ahead.
#
# The `running_smart_detection` flag is needed so we can know
# whether `after_smart_detect()` is running synchronously or not.
#
# If we're running it in a sync fashion it will set
# `should_run_image_operations` to True so we can avoid running
# image operation inside the try block.
self.should_run_image_operations = False
self.running_smart_detection = True
self.do_smart_detection()
self.running_smart_detection = False
except Exception:
if not self.context.config.IGNORE_SMART_ERRORS:
raise
logger.exception("Ignored error during smart detection")
if self.context.config.USE_CUSTOM_ERROR_HANDLING:
self.context.modules.importer.error_handler.handle_error(
context=self.context,
handler=self.context.request_handler,
exception=sys.exc_info()
)
self.context.request.prevent_result_storage = True
self.context.request.detection_error = True
self.do_image_operations()
if self.should_run_image_operations:
self.do_image_operations()
def do_smart_detection(self):
focal_points = self.context.modules.storage.get_detector_data(self.smart_storage_key)
if focal_points is not None:
self.after_smart_detect(focal_points, points_from_storage=True)
else:
detectors = self.context.modules.detectors
detectors[0](self.context, index=0, detectors=detectors).detect(self.after_smart_detect)
def after_smart_detect(self, focal_points=[], points_from_storage=False):
for point in focal_points:
self.context.request.focal_points.append(FocalPoint.from_dict(point))
if self.context.request.focal_points and self.context.modules.storage and not points_from_storage:
storage = self.context.modules.storage
points = []
for point in self.context.request.focal_points:
points.append(point.to_dict())
storage.put_detector_data(self.smart_storage_key, points)
if self.running_smart_detection:
self.should_run_image_operations = True
return
self.do_image_operations()
def do_image_operations(self):
if '.gif' == self.context.request.engine.extension and 'cover()' in self.context.request.filters:
self.extract_cover()
self.manual_crop()
self.calculate_target_dimensions()
self.adjust_focal_points()
if self.context.request.debug:
self.debug()
else:
if self.context.request.fit_in:
self.fit_in_resize()
else:
self.auto_crop()
self.resize()
self.flip()
self.done_callback()
def extract_cover(self):
self.engine.extract_cover()
def manual_crop(self):
if self.context.request.should_crop:
limit = lambda dimension, maximum: min(max(dimension, 0), maximum)
source_width, source_height = self.engine.size
crop = self.context.request.crop
crop['left'] = limit(crop['left'], source_width)
crop['top'] = limit(crop['top'], source_height)
crop['right'] = limit(crop['right'], source_width)
crop['bottom'] = limit(crop['bottom'], |
kscottz/SkinnerBox | modules/__init__.py | Python | mit | 120 | 0.008333 | from Har | dwareInterface import *
from CameraInterface import *
#from ProtocolRunner import *
from DataInterface import | *
|
stephentyrone/swift | utils/gyb_sourcekit_support/UIDs.py | Python | apache-2.0 | 23,110 | 0 | class KEY(object):
def __init__(self, internal_name, external_name):
self.internalName = internal_name
self.externalName = external_name
class REQUEST(object):
def __init__(self, internal_name, external_name):
self.internalName = internal_name
self.externalName = external_name
class KIND(object):
def __init__(self, internal_name, external_name):
self.internalName = internal_name
self.externalName = external_name
UID_KEYS = [
KEY('VersionMajor', 'key.version_major'),
KEY('VersionMinor', 'key.version_minor'),
KEY('VersionPatch', 'key.version_patch'),
KEY('Results', 'key.results'),
KEY('Request', 'key.request'),
KEY('Notification', 'key.notification'),
KEY('Kind', 'key.kind'),
KEY('AccessLevel', 'key.accessibility'),
KEY('SetterAccessLevel', 'key.setter_accessibility'),
KEY('Keyword', 'key.keyword'),
KEY('Name', 'key.name'),
KEY('USR', 'key.usr'),
KEY('OriginalUSR', 'key.original_usr'),
KEY('DefaultImplementationOf', 'key.default_implementation_of'),
KEY('InterestedUSR', 'key.interested_usr'),
KEY('GenericParams', 'key.generic_params'),
KEY('GenericRequirements', 'key.generic_requirements'),
KEY('DocFullAsXML', 'key.doc.full_as_xml'),
KEY('Line', 'key.line'),
KEY('Column', 'key.column'),
KEY('ReceiverUSR', 'key.receiver_usr'),
KEY('IsDynamic', 'key.is_dynamic'),
KEY('IsImplicit', 'key.is_implicit'),
KEY('FilePath', 'key.filepath'),
KEY('ModuleInterfaceName', 'key.module_interface_name'),
KEY('Hash', 'key.hash'),
KEY('CompilerArgs', 'key.compilerargs'),
KEY('Severity', 'key.severity'),
KEY('Offset', 'key.offset'),
KEY('Length', 'key.length'),
KEY('SourceFile', 'key.sourcefile'),
KEY('SerializedSyntaxTree', 'key.serialized_syntax_tree'),
KEY('SourceText', 'key.sourcetext'),
KEY('EnableSyntaxMap', 'key.enablesyntaxmap'),
KEY('SyntaxTreeTransferMode', 'key.syntaxtreetransfermode'),
KEY('SyntaxTreeSerializationFormat',
'key.syntax_tree_serialization_format'),
KEY('EnableStructure', 'key.enablesubstructure'),
KEY('Description', 'key.description'),
KEY('TypeName', 'key.typename'),
KEY('RuntimeName', 'key.runtime_name'),
KEY('SelectorName', 'key.selector_name'),
KEY('AnnotatedDecl', 'key.annotated_decl'),
KEY('FullyAnnotatedDecl', 'key.fully_annotated_decl'),
KEY('FullyAnnotatedGenericSignature',
'key.fully_annotated_generic_signature'),
KEY('DocBrief', 'key.doc.brief'),
KEY('Context', 'key.context'),
KEY('TypeRelation', 'key.typerelation'),
KEY('ModuleImportDepth', 'key.moduleimportdepth'),
KEY('NumBytesToErase', 'key.num_bytes_to_erase'),
KEY('NotRecommended', 'key.not_recommended'),
KEY('Annotations', 'key.annotations'),
KEY('DiagnosticStage', 'key.diagnostic_stage'),
KEY('SyntaxMap', 'key.syntaxmap'),
KEY('IsSystem', 'key.is_system'),
KEY('Related', 'key.related'),
KEY('Inherits', 'key.inherits'),
KEY('Conforms', 'key.conforms'),
KEY('Extends', 'key.extends'),
KEY('Dependencies', 'key.dependencies'),
KEY('Entities', 'key.entities'),
KEY('NameOffset', 'key.nameoffset'),
KEY('NameLength', 'key.namelength'),
KEY('BodyOffset', 'key.bodyoffset'),
KEY('BodyLength', 'key.bodylength'),
KEY('ThrowOffset', 'key.throwoffset'),
KEY('ThrowLength', 'key.throwlength'),
KEY('DocOffset', 'key.docoffset'),
KEY('DocLength', 'key.doclength'),
KEY('IsLocal', 'key.is_local'),
KEY('InheritedTypes', 'key.inheritedtypes'),
KEY('Attributes', 'key.attributes'),
KEY('Attribute', 'key.attribute'),
KEY('Elements', 'key.elements'),
KEY('SubStructure', 'key.substructure'),
KEY('Ranges', 'key.ranges'),
KEY('Fixits', 'key.fixits'),
KEY('Diagnostics', 'key.diagnostics'),
KEY('EducationalNotePaths', 'key.educational_note_paths'),
KEY('FormatOptions', 'key.editor.format.options'),
KEY('CodeCompleteOptions', 'key.codecomplete.options'),
KEY('FilterRules', 'key.codecomplete.filterrules'),
KEY('NextRequestStart', 'key.nextrequeststart'),
KEY('Popular', 'key.popular'),
KEY('Unpopular', 'key.unpopular'),
KEY('Hide', 'key.hide'),
KEY('Platform', 'key.platform'),
KEY('IsDeprecated', 'key.is_deprecated'),
KEY('IsUnavailable', 'key.is_unavailable'),
KEY('IsOptional', 'key.is_optional'),
KEY('Message', 'key.message'),
KEY('Introduced', 'key.introduced'),
KEY('Deprecated', 'key.deprecated'),
KEY('Obsoleted', 'key.obsoleted'),
KEY('RemoveCache', 'key.removecache'),
KEY('TypeInterface', 'key.typeinterface'),
KEY('TypeUsr', 'key.typeusr'),
KEY('ContainerTypeUsr', 'key.containertypeusr'),
KEY('ModuleGroups', 'key.modulegroups'),
KEY('BaseName', 'key.basename'),
KEY('ArgNames', 'key.argnames'),
KEY('SelectorPieces', 'key.selectorpieces'),
KEY('NameKind', 'key.namekind'),
KEY('LocalizationKey', 'key.localizat | ion_key'),
KEY('IsZeroArgSelector', 'key.is_zero_arg_selector'),
KEY('SwiftVersion', 'key.swift_version'),
KEY('Value', 'key.value'),
KEY('EnableDiagnostics', 'key.enablediagnostics' | ),
KEY('GroupName', 'key.groupname'),
KEY('ActionName', 'key.actionname'),
KEY('SynthesizedExtension', 'key.synthesizedextensions'),
KEY('UsingSwiftArgs', 'key.usingswiftargs'),
KEY('Names', 'key.names'),
KEY('UIDs', 'key.uids'),
KEY('SyntacticOnly', 'key.syntactic_only'),
KEY('ParentLoc', 'key.parent_loc'),
KEY('IsTestCandidate', 'key.is_test_candidate'),
KEY('Overrides', 'key.overrides'),
KEY('AssociatedUSRs', 'key.associated_usrs'),
KEY('ModuleName', 'key.modulename'),
KEY('RelatedDecls', 'key.related_decls'),
KEY('Simplified', 'key.simplified'),
KEY('RangeContent', 'key.rangecontent'),
KEY('CancelOnSubsequentRequest', 'key.cancel_on_subsequent_request'),
KEY('RenameLocations', 'key.renamelocations'),
KEY('Locations', 'key.locations'),
KEY('NameType', 'key.nametype'),
KEY('NewName', 'key.newname'),
KEY('CategorizedEdits', 'key.categorizededits'),
KEY('CategorizedRanges', 'key.categorizedranges'),
KEY('RangesWorthNote', 'key.rangesworthnote'),
KEY('Edits', 'key.edits'),
KEY('EndLine', 'key.endline'),
KEY('EndColumn', 'key.endcolumn'),
KEY('ArgIndex', 'key.argindex'),
KEY('Text', 'key.text'),
KEY('Category', 'key.category'),
KEY('IsFunctionLike', 'key.is_function_like'),
KEY('IsNonProtocolType', 'key.is_non_protocol_type'),
KEY('RefactorActions', 'key.refactor_actions'),
KEY('RetrieveRefactorActions', 'key.retrieve_refactor_actions'),
KEY('ActionUID', 'key.actionuid'),
KEY('ActionUnavailableReason', 'key.actionunavailablereason'),
KEY('CompileID', 'key.compileid'),
KEY('CompilerArgsString', 'key.compilerargs-string'),
KEY('ImplicitMembers', 'key.implicitmembers'),
KEY('ExpectedTypes', 'key.expectedtypes'),
KEY('Members', 'key.members'),
KEY('TypeBuffer', 'key.printedtypebuffer'),
KEY('ExpressionTypeList', 'key.expression_type_list'),
KEY('ExpressionOffset', 'key.expression_offset'),
KEY('ExpressionLength', 'key.expression_length'),
KEY('ExpressionType', 'key.expression_type'),
KEY('CanonicalizeType', 'key.canonicalize_type'),
KEY('InternalDiagnostic', "key.internal_diagnostic"),
KEY('VFSName', 'key.vfs.name'),
KEY('VFSOptions', 'key.vfs.options'),
KEY('Files', 'key.files'),
KEY('OptimizeForIDE', 'key.optimize_for_ide'),
KEY('RequiredBystanders', 'key.required_bystanders'),
KEY('ReusingASTContext', 'key.reusingastcontext'),
KEY('CompletionCheckDependencyInterval',
'key.completion_check_dependency_interval'),
KEY('AnnotatedTypename', 'key.annotated.typename'),
KEY('CompileOperation', 'key.compile_operation'),
KEY('EffectiveAccess', 'key.effective_access'),
]
UID_REQUESTS = [
REQUEST('ProtocolVersion', 'source.request.protocol_version'),
REQUEST('CompilerVersion', 'source.request.compiler_version'),
REQUEST('CrashWithExit', 'source.request.crash_exit'),
R |
wannaphongcom/flappy | flappy/events/focusevent.py | Python | mit | 1,029 | 0.009718 |
from event import Event
class FocusEvent(Event):
FOCUS_IN = "focusIn"
FOCUS_OUT = "focusOut"
KEY_FOCUS_CHANGE = "keyFocusChange"
MOUSE_FOCUS_CHANGE = "mouseFocusChange"
def __init__(self, etype, bubbles=True, cancelable=False,
relatedObject=None, shiftKey=None, keyCode=0,
direction='none'):
Event.__init__(self, etype, bubbles, cancelable)
self.relatedObject = relatedObject
self.keyCode = keyCode
self.shiftKey = shiftKey
def clone(self):
return FocusEvent(self.type, self.bubbles, self.cancelable,
self.relatedObject, self.shiftKey, self.keyCode)
def __str__(self):
s = '[FocusEvent type=%s bubbles=%s cancelable=%s' \
'relatedObject=%s shiftKey=%s keyCode=%s]' % \
(self.type, str(self.bubbles), str(sel | f.cancelable),
str(self.relatedObject), str(self.shiftKey),
str(self. | keyCode)) |
scikit-learn/scikit-learn | sklearn/metrics/_ranking.py | Python | bsd-3-clause | 67,892 | 0.000324 | """Metrics to assess performance on classification task given scores.
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Michal Karbownik <michakarbownik@gmail.com>
# License: BSD 3 clause
import warnings
from functools import partial
import numpy as np
from scipy.sparse import csr_matrix
from scipy.stats import rankdata
from ..utils import assert_all_finite
from ..utils import check_consistent_length
from ..utils.validation import _check | _sample_weight
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target |
from ..utils.extmath import stable_cumsum
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from ..preprocessing import label_binarize
from ..utils._encode import _encode, _unique
from ._base import (
_average_binary_score,
_average_multiclass_ovo_score,
_check_pos_label_consistency,
)
def auc(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule.
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`. For an alternative
way to summarize a precision-recall curve, see
:func:`average_precision_score`.
Parameters
----------
x : ndarray of shape (n,)
x coordinates. These must be either monotonic increasing or monotonic
decreasing.
y : ndarray of shape, (n,)
y coordinates.
Returns
-------
auc : float
See Also
--------
roc_auc_score : Compute the area under the ROC curve.
average_precision_score : Compute average precision from prediction scores.
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError(
"At least 2 points are needed to compute area under curve, but x.shape = %s"
% x.shape
)
direction = 1
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("x is neither increasing nor decreasing : {}.".format(x))
area = direction * np.trapz(y, x)
if isinstance(area, np.memmap):
# Reductions such as .sum used internally in np.trapz do not return a
# scalar by default for numpy.memmap instances contrary to
# regular numpy.ndarray instances.
area = area.dtype.type(area)
return area
def average_precision_score(
y_true, y_score, *, average="macro", pos_label=1, sample_weight=None
):
"""Compute average precision (AP) from prediction scores.
AP summarizes a precision-recall curve as the weighted mean of precisions
achieved at each threshold, with the increase in recall from the previous
threshold used as the weight:
.. math::
\\text{AP} = \\sum_n (R_n - R_{n-1}) P_n
where :math:`P_n` and :math:`R_n` are the precision and recall at the nth
threshold [1]_. This implementation is not interpolated and is different
from computing the area under the precision-recall curve with the
trapezoidal rule, which uses linear interpolation and can be too
optimistic.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : ndarray of shape (n_samples,) or (n_samples, n_classes)
True binary labels or binary label indicators.
y_score : ndarray of shape (n_samples,) or (n_samples, n_classes)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by :term:`decision_function` on some classifiers).
average : {'micro', 'samples', 'weighted', 'macro'} or None, \
default='macro'
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
pos_label : int or str, default=1
The label of the positive class. Only applied to binary ``y_true``.
For multilabel-indicator ``y_true``, ``pos_label`` is fixed to 1.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
average_precision : float
See Also
--------
roc_auc_score : Compute the area under the ROC curve.
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
Notes
-----
.. versionchanged:: 0.19
Instead of linearly interpolating between operating points, precisions
are weighted by the change in recall since the last operating point.
References
----------
.. [1] `Wikipedia entry for the Average precision
<https://en.wikipedia.org/w/index.php?title=Information_retrieval&
oldid=793358396#Average_precision>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores)
0.83...
"""
def _binary_uninterpolated_average_precision(
y_true, y_score, pos_label=1, sample_weight=None
):
precision, recall, _ = precision_recall_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
# Return the step function integral
# The following works because the last entry of precision is
# guaranteed to be 1, as returned by precision_recall_curve
return -np.sum(np.diff(recall) * np.array(precision)[:-1])
y_type = type_of_target(y_true, input_name="y_true")
if y_type == "multilabel-indicator" and pos_label != 1:
raise ValueError(
"Parameter pos_label is fixed to 1 for "
"multilabel-indicator y_true. Do not set "
"pos_label or set pos_label to 1."
)
elif y_type == "binary":
# Convert to Python primitive type to avoid NumPy type / Python str
# comparison. See https://github.com/numpy/numpy/issues/6784
present_labels = np.unique(y_true).tolist()
if len(present_labels) == 2 and pos_label not in present_labels:
raise ValueError(
f"pos_label={pos_label} is not a valid label. It should be "
f"one of {present_labels}"
)
average_precision = partial(
_binary_uninterpolated_average_precision, pos_label=p |
dlakata/VocabFinder | vocabfinder/__init__.py | Python | gpl-2.0 | 535 | 0.003738 | from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.security import Security, SQLAlchemyUserDatastore
from flask.ext.mail import Mail
from flask.ext.babel import Babel
from flask.ext.migrate import Migrate
app = Flask( | __name__)
app.config.from_object('config')
mail = Mail(app)
babel = Babel(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from vocabfinder.models import User, Role
datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, datastore)
import voca | bfinder.views
|
waseem18/oh-mainline | vendor/packages/scrapy/scrapy/tests/test_pipeline_media.py | Python | agpl-3.0 | 10,726 | 0.002238 | from twisted.trial import unittest
from twisted.python.failure import Failure
from twisted.internet import reactor
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.python import log as txlog
from scrapy.http import Request, Response
from scrapy.spider import BaseSpider
from scrapy.utils.request import request_fingerprint
from scrapy.contrib.pipeline.media import MediaPipeline
from scrapy.utils.test import get_crawler
from scrapy.utils.signal import disconnect_all
from scrapy import signals
from scrapy import log
def _mocked_download_func(request, info):
response = request.meta.get('response')
return response() if callable(response) else response
class BaseMediaPipelineTestCase(unittest.TestCase):
pipeline_class = MediaPipeline
def setUp(self):
self.spider = BaseSpider('media.com')
self.pipe = self.pipeline_class(download_func=_mocked_download_func)
self.pipe.open_spider(self.spider)
self.info = self.pipe.spiderinfo[self.spider]
def tearDown(self):
for name, signal in vars(signals).items():
if not name.startswith('_'):
disconnect_all(signal)
self.pipe.close_spider(self.spider)
def test_default_media_to_download(self):
request = Request('http://url')
assert self.pipe.media_to_download(request, self.info) is None
def test_default_get_media_requests(self):
item = dict(name='name')
assert self.pipe.get_media_requests(item, self.info) is None
def test_default_media_downloaded(self):
request = Request('http://url')
response = Response('http://url', body='')
assert self.pipe.media_downloaded(response, request, self.info) is response
def test_default_media_failed(self):
request = Request('http://url')
fail = Failure(Exception())
assert self.pipe.media_failed(fail, request, self.info) is fail
def test_default_item_completed(self):
item = dict(name='name')
assert self.pipe.item_completed([], item, self.info) is item
# Check that failures are logged by default
fail = Failure(Exception())
results = [(True, 1), (False, fail)]
events = []
txlog.addObserver(events.append)
new_item = self.pipe.item_completed(results, item, self.info)
txlog.removeObserver(events.append)
self.flushLoggedErrors()
assert new_item is item
assert len(events) == 1
assert events[0]['logLevel'] == log.ERROR
assert events[0]['failure'] is fail
# disable failure logging and check again
self.pipe.LOG_FAILED_RESULTS = False
events = []
txlog.addObserver(events.append)
new_item = self.pipe.item_completed(results, item, self.info)
txlog.removeObserver(events.append)
self.flushLoggedErrors()
assert new_item is item
assert len(events) == 0
@inlineCallbacks
def test_default_process_item(self):
item = dict(name='name')
new_item = yield self.pipe.process_item(item, self.spider)
assert new_item is item
class MockedMediaPipeline(MediaPipeline):
def __init__(self, *args, **kwargs):
super(MockedMediaPipeline, self).__init__(*args, **kwargs)
self._mockcalled = []
def download(self, request, info):
self._mockcalled.append('download')
return super(MockedMediaPipeline, self).download(request, info)
def media_to_download(self, request, info):
self._mockcalled.append('media_to_download')
if 'result' in request.meta:
return request.meta.get('result')
return super(MockedMediaPipeline, self).media_to_download(request, info)
def get_media_requests(self, item, info):
self._mockcalled.append('get_media_requests')
return item.get('requests')
def media_downloaded(self, response, request, info):
self._mockcalled.append('media_downloaded')
return super(MockedMediaPipeline, self).media_downloaded(response, request, info)
def media_failed(self, failure, request, info):
self._mockcalled.append('media_failed')
return super(MockedMediaPipeline, self).media_failed(failure, request, info)
def item_completed(self, results, item, info):
self._mockcalled.append('item_completed')
item = super(MockedMediaPipeline, self).item_completed(results, item, info)
item['results'] = results
return item
class MediaPipelineTestCase(BaseMediaPipelineTestCase):
pipeline_class = MockedMediaPipeline
@inlineCallbacks
def test_result_succeed(self):
cb = lambda _: self.pipe._mockcalled.append('request_callback') or _
eb = lambda _: self.pipe._mockcalled.append('request_errback') or _
rsp = Response('http://url1')
req = Request('http://url1', meta=dict(response=rsp), callback=cb, errback=eb)
item = dict(requests=req)
new_item = yield self.pipe.process_item(item, self.spider)
self.assertEqual(new_item['results'], [(True, rsp)])
self.assertEqual(self.pipe._mockcalled,
['get_media_requests', 'media_to_download',
'media_downloaded', 'request_callback', 'item_completed'])
@inlineCallbacks
def test_result_failure(self):
self.pipe.LOG_FAILED_RESULTS = False
cb = lambda _: self.pipe._mockcalled.append('request_callback') or _
eb = lambda _: self.pipe._mockcalled.append('request_errback') or _
fail = Failure(Exception())
req = Request('http://url1', meta=dict(response=fail), callback=cb, errback=eb)
item = dict(requests=req)
new_item = yield self.pipe.process_item(item, self.spider)
self.assertEqual(new_item['results'], [(False, fail)])
self.assertEqual(self.pipe._mockcalled,
['get_media_requests', 'media_to_download',
'media_failed', 'request_errback', 'item_completed'])
@inlineCallbacks
def test_mix_of_success_and_failure(self):
self.pipe.LOG_FAILED_RESULTS = False
rsp1 = Response('http://url1')
req1 = Request('http://url1', meta=dict(response=rsp1))
fail = Failure(Exception())
req2 = Request('http://url2', meta=dict(response=fail))
item = dict(requests=[req1, req2])
new_item = yield self.pipe.process_item(item, self.spider)
self.assertEqual(new_item['results'], [(True, rsp1), (False, fail)])
m = self.pipe._mockcalled
# only once
self.assertEqual(m[0], 'get_media_requests') # first hook called
self.assertEqual(m.count('get_med | ia_requests'), 1)
self.assertEqual(m.count('item_completed'), 1)
self.assertEqual(m[-1], 'item_completed') # last hook called
# twice, one per request
self. | assertEqual(m.count('media_to_download'), 2)
# one to handle success and other for failure
self.assertEqual(m.count('media_downloaded'), 1)
self.assertEqual(m.count('media_failed'), 1)
@inlineCallbacks
def test_get_media_requests(self):
# returns single Request (without callback)
req = Request('http://url')
item = dict(requests=req) # pass a single item
new_item = yield self.pipe.process_item(item, self.spider)
assert new_item is item
assert request_fingerprint(req) in self.info.downloaded
# returns iterable of Requests
req1 = Request('http://url1')
req2 = Request('http://url2')
item = dict(requests=iter([req1, req2]))
new_item = yield self.pipe.process_item(item, self.spider)
assert new_item is item
assert request_fingerprint(req1) in self.info.downloaded
assert request_fingerprint(req2) in self.info.downloaded
@inlineCallbacks
def test_results_are_cached_across_multiple_items(self):
rsp1 = Response('http://url1')
req1 = Request('http://url1', meta=dict(response=rsp1))
item = dict(requests=req1)
new_item = yield self.pipe.process_item(item, self.spider)
self.assertTrue(new_item |
honmaple/maple-bbs | forums/admin/forums.py | Python | gpl-3.0 | 1,143 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************************************************
# Copyright © 2016 jianglin
# File Name: forums.py
# Author: jianglin
# Email: xiyang0807@gmail.com
# Created: 2016-12-17 13:12:23 (CST)
# Last Update:星期五 2017-11-10 11:04:16 (CST)
# By:
# Description:
# **************************************************************************
from .views import BaseView
from forums.extension import db
from forums.api.forums.models import Board
from forums.api.tag.models import Tags
class BoardView(BaseView):
form_excluded_columns = ('topics')
class TagView(BaseView):
column_searchable_list = ['name']
form_excluded_columns = ('topics', 'followers')
def init_admin(admin):
admin.add_view(
BoardView(
Board,
db.session,
name='管理版块',
endpoint='admin_board',
category='管理社区'))
admin.add_view(
TagView(
Tags,
db.session,
nam | e='管理节点',
endpoint='admin_tag',
| category='管理社区'))
|
jrjhealey/bioinfo-tools | Shannon.py | Python | gpl-3.0 | 6,290 | 0.006518 | # This script will calculate Shannon entropy from a MSA.
# Dependencies:
# Biopython, Matplotlib, Math
"""
Shannon's entropy equation (latex format):
H=-\sum_{i=1}^{M} P_i\,log_2\,P_i
Entropy is a measure of the uncertainty of a probability distribution (p1, ..... , pM)
https://stepic.org/lesson/Scoring-Motifs-157/step/7?course=Bioinformatics-Algorithms&unit=436
Where, Pi is the fraction of nuleotide bases of nuleotide base type i,
and M is the number of nuleotide base types (A, T, G or C)
H ranges from 0 (only one base/residue in present at that position) to 4.322 (all 20 residues are equally
represented in that position).
Typically, positions with H >2.0 are considerered variable, whereas those with H < 2 are consider conserved.
Highly conserved positions are those with H <1.0 (Litwin and Jores, 1992).
A minimum number of sequences is however required (~100) for H to describe the diversity of a protein family.
"""
import os
import sys
import warnings
import traceback
__author__ = "Joe R. J. Healey"
__version__ = "1.0.0"
__title__ = "ShannonMSA"
__license__ = "GPLv3"
__author_email__ = "J.R.J.Healey@warwick.ac.uk"
def parseArgs():
"""Parse command line arguments"""
import argparse
try:
parser = argparse.ArgumentParser(
description='Compute per base/residue Shannon entropy of a Multiple Sequence Alignment.')
parser.add_argument('-a',
'--alignment',
action='store',
required=True,
help='The multiple sequence alignment (MSA) in any of the formats supported by Biopython\'s AlignIO.')
parser.add_argument('-f',
'--alnformat',
action='store',
default='fasta',
help='Specify the format of the input MSA t | o be passed in to AlignIO.')
parser.add_argument('-v',
'--verbose',
action='count',
default=0,
help='Verbose behaviour, printing parameters of the script.')
| parser.add_argument('-m',
'--runningmean',
action='store',
type=int,
default=0,
help='Return the running mean (a.k.a moving average) of the MSAs Shannon Entropy. Makes for slightly smoother plots. Providing the number of points to average over switches this on.')
parser.add_argument('--makeplot',
action='store_true',
help='Plot the results via Matplotlib.')
except:
print "An exception occurred with argument parsing. Check your provided options."
traceback.print_exc()
return parser.parse_args()
def parseMSA(msa, alnformat, verbose):
"""Parse in the MSA file using Biopython's AlignIO"""
from Bio import AlignIO
alignment = AlignIO.read(msa, alnformat)
# Do a little sanity checking:
seq_lengths_list = []
for record in alignment:
seq_lengths_list.append(len(record))
seq_lengths = set(seq_lengths_list)
if verbose > 0: print("Alignment length is:" + str(list(seq_lengths)))
if len(seq_lengths) != 1:
sys.stderr.write("Your alignment lengths aren't equal. Check your alignment file.")
sys.exit(1)
index = range(1, list(seq_lengths)[0]+1)
return alignment, list(seq_lengths), index
##################################################################
# Function to calcuate the Shannon's entropy per alignment column
# H=-\sum_{i=1}^{M} P_i\,log_2\,P_i (http://imed.med.ucm.es/Tools/svs_help.html)
# Gaps and N's are included in the calculation
##################################################################
def shannon_entropy(list_input):
"""Calculate Shannon's Entropy per column of the alignment (H=-\sum_{i=1}^{M} P_i\,log_2\,P_i)"""
import math
unique_base = set(list_input)
M = len(list_input)
entropy_list = []
# Number of residues in column
for base in unique_base:
n_i = list_input.count(base) # Number of residues of type i
P_i = n_i/float(M) # n_i(Number of residues of type i) / M(Number of residues in column)
entropy_i = P_i*(math.log(P_i,2))
entropy_list.append(entropy_i)
sh_entropy = -(sum(entropy_list))
return sh_entropy
def shannon_entropy_list_msa(alignment):
"""Calculate Shannon Entropy across the whole MSA"""
shannon_entropy_list = []
for col_no in xrange(len(list(alignment[0]))):
list_input = list(alignment[:, col_no])
shannon_entropy_list.append(shannon_entropy(list_input))
return shannon_entropy_list
def plot(index, sel, verbose):
""""Create a quick plot via matplotlib to visualise the extended spectrum"""
import matplotlib.pyplot as plt
if verbose > 0: print("Plotting data...")
plt.plot(index, sel)
plt.xlabel('MSA Position Index', fontsize=16)
plt.ylabel('Shannon Entropy', fontsize=16)
plt.show()
def running_mean(l, N):
sum = 0
result = list(0 for x in l)
for i in range( 0, N ):
sum = sum + l[i]
result[i] = sum / (i+1)
for i in range( N, len(l) ):
sum = sum - l[i-N] + l[i]
result[i] = sum / N
return result
def main():
"""Compute Shannon Entropy from a provided MSA."""
# Parse arguments
args = parseArgs()
# Convert object elements to standard variables for functions
msa = args.alignment
alnformat = args.alnformat
verbose = args.verbose
makeplot = args.makeplot
runningmean = args.runningmean
# Start calling functions to do the heavy lifting
alignment, seq_lengths, index = parseMSA(msa, alnformat, verbose)
sel = shannon_entropy_list_msa(alignment)
if runningmean > 0:
sel = running_mean(sel, runningmean)
if makeplot is True:
plot(index, sel, verbose)
if verbose > 0: print("Index" + '\t' + "Entropy")
for c1, c2 in zip(index, sel):
print(str(c1) + '\t' + str(c2))
if __name__ == '__main__':
main()
|
toenuff/treadmill | lib/python/treadmill/postmortem.py | Python | apache-2.0 | 4,780 | 0.000209 | """Collect Treadmill node information after a crash.
"""
from __future__ import absolute_import
import os
import glob
import logging
import shutil
import tempfile
from treadmill import fs
from treadmill import subproc
_LOGGER = logging.getLogger(__name__)
_IFCONFIG = 'ifconfig'
_SYSCTL = 'sysctl'
_DMESG = 'dmesg'
_TAIL = 'tail'
def _safe_copy(src, dest):
"""Copy file from src to dest if need, generate sub directory for dest"""
parent = os.path.dirname(dest)
if not os.path.exists(parent):
os.makedirs(parent)
try:
shutil.copyfile(src, dest)
_LOGGER.debug('file copied %s => %s', src, dest)
except OSError:
_LOGGER.exception('unable to copy %s => %s', src, dest)
def collect(approot, archive_filename):
"""Collect node information in case of blackout.
:param approot:
treadmill root, usually /var/tmp/treadmill
:type approot:
``str``
:param archive_filename:
archive path file
:type archive_filename:
``str``
"""
destroot = tempfile.mkdtemp()
_LOGGER.info('save node info in %s', destroot)
collect_init_services(approot, destroot)
collect_running_app(approot, destroot)
collect_sysctl(destroot)
collect_cgroup(approot, destroot)
collect_localdisk(approot, destroot)
collect_network(approot, destroot)
collect_message(destroot)
try:
archive_filename = fs.tar(sources=destroot,
target=archive_filename,
compression='gzip').name
_LOGGER.info('node info archive file: %s', archive_filename)
shutil.rmtree(destroot)
return archive_filename
except: # pylint: disable=W0702
# if tar bar is not generated successfully, we keep destroot
# we can find destroot path in log to check the files
_LOGGER.exception('Failed to generate node info archive')
return None
def collect_init_services(approot, destroot):
"""Get treadmill init services information in node."""
pattern = '%s/init/*/log/current' % approot
for current in glob.glob(pattern):
target = '%s%s' % (destroot, current)
_safe_copy(current, target)
def collect_running_app(approot, destroot):
"""Get treadmill running application information in node."""
pattern = '%s/running/*/run.*' % approot
for run_log in glob.glob(pattern):
target = '%s%s' % (destroot, run_log)
_safe_copy(run_log, target)
pattern = '%s/running/*/sys/*/log/current' % approot
for current in glob.glob(pattern):
target = '%s%s' % (destroot, current)
_safe_copy(current, target)
def collect_sysctl(destroot):
"""Get host sysctl (related to kernel)."""
sysctl = subproc.check_output([_SYSCTL, '-a'])
with open('%s/sysctl' % destroot, 'w+') as f:
f.write(sysctl)
def collect_cgroup(approot, destroot):
"""Get host treadmill cgroups inforamation."""
src = "%s/cgroup_svc" % approot
dest = "%s%s" % (destroot, src)
try:
shutil.copytree(src, dest)
except (shutil.Error, OSError):
_LOGGER.exception('fail to copy %s => %s', src, dest)
pattern = '/cgro | up/*/treadmill/core'
for cgrp_core in glob.glob(pattern):
core_dest = '%s%s' % (destroot, cgrp_core)
try:
shutil.copytree(cgrp_core, core_dest)
except (shutil.Error, OSError):
_LOGGER.exception('fail to copy %s => %s', src, dest)
def collect_localdisk(approot, destroot):
"""Get host local disk information."""
src = '%s/localdisk_svc' % ap | proot
dest = '%s%s' % (destroot, src)
try:
shutil.copytree(src, dest)
except (shutil.Error, OSError):
_LOGGER.exception('fail to copy %s => %s', src, dest)
# FIXME vgdisplay requires root
def collect_network(approot, destroot):
"""Get host network information."""
src = '%s/network_svc' % approot
dest = '%s%s' % (destroot, src)
try:
shutil.copytree(src, dest)
except (shutil.Error, OSError):
_LOGGER.exception('fail to copy %s => %s', src, dest)
ifconfig = subproc.check_output([_IFCONFIG])
with open('%s/ifconfig' % destroot, 'w') as f:
f.write(ifconfig)
def collect_message(destroot):
"""Get messages on the host."""
dmesg = subproc.check_output([_DMESG])
with open('%s/dmesg' % destroot, 'w') as f:
f.write(dmesg)
messages = subproc.check_output(
[_TAIL, '-n', '100', '/var/log/messages']
)
dest_messages = '%s/var/log/messages' % destroot
if not os.path.exists(os.path.dirname(dest_messages)):
os.makedirs(os.path.dirname(dest_messages))
with open(dest_messages, 'w') as f:
f.write(messages)
|
nvl1109/testmagic | magicserver/settings.py | Python | gpl-2.0 | 2,220 | 0 | """
Django settings for magicserver project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j0y302d%=(27u)ux)0u_7l6-la9nfw5ub=_9lrzxv7$@c=!yr='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'magicserver',
'testmagic',
)
MIDDLEWARE_CLASSES = (
'djang | o.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.cont | rib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'magicserver.urls'
WSGI_APPLICATION = 'magicserver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
'test': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'newdb.sqlite3'),
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
dsolimando/Hot | hot-cli/src/main/resources/examples/example.show.py | Python | gpl-3.0 | 564 | 0.046099 | s = "Hello My friend"
def printHello(request): |
return s
def printJson(request):
return {"items":{"name":"damien", "age":8}}
def onconnect(connection):
def ondata():
c | onnection.write(data)
def onclose():
print 'close'
connection.data(ondata)
connection.close(onclose)
show.rest.get("/pyitems").headers(["Accept: text/plain"]).then(printHello)
show.rest.get("/pyitems").headers(["Accept: application/json"]).then(printJson)
show.rest.put("/pyitem").then(printHello)
show.websocket.addHandler({'path':'/pychatroom'}).connect(onconnect) |
berkmancenter/mediacloud | apps/common/src/python/mediawords/solr/query.py | Python | agpl-3.0 | 4,650 | 0.002581 | """
Functions for parsing Solr queries.
"""
import datetime
import re
from typing import Dict, Any, Optional, List
from dateutil.relativedelta import relativedelta
from mediawords.db import DatabaseHandler
from mediawords.util.log import create_logger
from mediawords.util.perl import decode_object_from_bytes_if_needed
log = create_logger(__name__)
def __get_solr_query_month_clause(topic: Dict[str, Any], month_offset: int) -> Optional[str]:
"""
For the given topic, get a Solr 'publish_date' clause that will return one month of the seed query, starting at
'start_date' and offset by 'month_offset' months.
Return None if 'month_offset' puts the start date past the topic start date.
"""
topic = decode_object_from_bytes_if_needed(topic)
if isinstance(month_offset, bytes):
month_offset = decode_object_from_bytes_if_needed(month_offset)
month_offset = int(month_offset)
topic_start = datetime.datetime.strptime(topic['start_date'], '%Y-%m-%d')
topic_end = datetime.datetime.strptime(topic['end_date'], '%Y-%m-%d')
offset_start = topic_start + relativedelta(months=month_offset)
offset_end = offset_start + relativedelta(months=1)
if offset_start > topic_end:
return None
if offset_end > topic_end:
offset_end = topic_end
solr_start = offset_start.strftime('%Y-%m-%d') + 'T00:00:00Z'
solr_end = offset_end.strftime('%Y-%m-%d') + 'T23:59:59Z'
date_clause = f"publish_day:[{solr_start} TO {solr_end}]"
return date_clause
class McGetFullSolrQueryForTopicException(Exception):
pass
def get_full_solr_query_for_topic(db: DatabaseHandler,
topic: dict,
media_ids: List[int] = None,
media_tags_ids: List[int] = None,
month_offset: int = 0) -> Optional[Dict[str, str]]:
"""
Get the full Solr query by combining the 'solr_seed_query' with generated clauses for start and end date from
topics and media clauses from 'topics_media_map' and 'topics_media_tags_map'.
Only return a query for up to a month of the given a query, using the zero indexed 'month_offset' to fetch
'month_offset' to return months after the first.
Return None if the 'month_offset' puts the query start date beyond the topic end date. Otherwise return dictionary
in the form of { 'q': query, 'fq': filter_query }.
FIXME topic passed as a parameter might not even exist yet, e.g. this gets called as part of topics/create.
"""
topic = decode_object_from_bytes_if_needed(topic)
media_ids = decode_object_from_bytes_if_needed(media_ids)
media_tags_ids = decode_object_from_bytes_if_needed(media_tags_ids)
if isinstance(month_offset, bytes):
month_offset = decode_object_from_bytes_if_needed(month_offset)
if media_ids:
media_ids = [int(media_id) for media_id in media_ids]
if media_tags_ids:
media_tags_ids = [int(media_tag_id) for media_tag_id in media_tags_ids]
date_clause = __get_solr_query_month_clause(topic=topic, month_offset=month_offset)
if not date_clause:
return None
solr_query = f"( {topic['solr_seed_query']} )"
media_clauses = []
topics_id = topic.get('topics_id', None)
| if topics_id:
if not media_ids:
media_ids = db.query("""
SELECT media_id
FROM topics_media_map
WHERE topics_id = %(topics_id)s
""", {'topics_id': topics_id}).flat()
if not media_tags_ids:
media_tags_ids = db.que | ry("""
SELECT tags_id
FROM topics_media_tags_map
WHERE topics_id = %(topics_id)s
""", {'topics_id': topics_id}).flat()
if media_ids:
media_ids_list = ' '.join([str(_) for _ in media_ids])
media_clauses.append(f"media_id:( {media_ids_list} )")
if media_tags_ids:
media_tags_ids_list = ' '.join([str(_) for _ in media_tags_ids])
media_clauses.append(f"tags_id_media:( {media_tags_ids_list} )")
if not re.search(r'media_id:|tags_id_media:', topic.get('solr_seed_query', '')):
if not media_clauses:
raise McGetFullSolrQueryForTopicException("Query must include at least one media source or media set")
if media_clauses:
media_clause_list = ' or '.join(media_clauses)
solr_query += f" and ( {media_clause_list} )"
solr_params = {
'q': solr_query,
'fq': date_clause,
}
log.debug(f"Full Solr query: {solr_params}")
return solr_params
|
dremdem/sfc_sdk | sfc_main.py | Python | mit | 4,144 | 0.003861 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Testing module from creating product using
http://www.sendfromchina.com/default/index/webservice
"""
from flask import Flask, render_template, request
from flask_wtf.csrf import CSRFProtect
from werkzeug.datastructures import MultiDict
from forms import SFCCreateOrder, SFCCreateProduct, SFCOrderDetail, SFCASNInfo, SFCgetOrderByCode
from sfc_api import SFCAPI
app = Flask(__name__)
csrf = CSRFProtect(app)
csrf.init_app(app)
app.secret_key = 's3cr3tasdasdasdasd'
header_request = {'customerId': 'R2036',
'appToken': 'MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDK1YNcdunmWXoK1ys6hyi+LWQdPx6Vmr/9kNlKOw4cK5Q8FWA3nfGeeG49Pq2TlYKVLdSw1fr60AAJFQOuXmol6lmyn+/xwx6j21XLx9/4vdDNSTR8Hcp7oqGNNr5DlI0onhJ7sd+rAxhIOwLNnZv6T/XtVqQNuGVXTq/dX0zkaQIDAQAB',
'appKey': 'MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDGmDLbsI4jELqCHgWikWqACICp299WSoiWgqghEXkQfvdEvwS5XWpdsSmdQwryR2rtg0DiS3vf74oVYBDJVHgcUdc2ov7QI5TPBqXJped7OoyrqYzaYFYshzGWgYC0wu5RCb71p2+4Z8NwDoJlvMVU4/fD9pL59PW8yYH1u3x4ewIDAQAB'}
wsdl = 'http://fulfill.sendfromchina.com/default/svc/wsdl'
sfcapi = SFCAPI(p_wsdl=wsdl, p_header_request=header_request)
@app.route("/")
def hello():
return render_template('index.html')
@app.route("/create_product", methods=['GET', 'POST'])
def create_product():
res = None
if request.method == 'POST':
product_info = {k: v for k, v in request.form.items() if k not in ('csrf_token', 'pocId', 'poValue', 'imgUrl')}
product_info['image'] = {'imgUrl': request.form['imgUrl']}
product_info['qcs'] = {'pocId': request.form['pocId'], 'poValue': request.form['poValue']}
res = sfcapi.create_product(product_info)
form = SFCCreateProduct(request.form)
else:
form = SFCCreateProduct()
return render_template('create_product.html', form=form, res=res)
@app.route("/create_order", methods=['GET', 'POST' | ])
def create_order():
res = None
if request.method == 'POST':
order_info = {k: v for k, v in request.form.items() if k in
[i for i in SFCCreateOrder.__dict__.keys() if i[0] != '_']}
order_detail = {k: v f | or k, v in request.form.items() if
k in [i for i in SFCOrderDetail.__dict__.keys() if i[0] != '_']}
res = sfcapi.create_order(p_order_info=order_info, p_order_detail=order_detail)
form_order = SFCCreateOrder(MultiDict(order_info))
form_order_detail = SFCOrderDetail(MultiDict(order_detail))
else:
form_order = SFCCreateOrder()
form_order_detail = SFCOrderDetail()
return render_template('create_order.html', form_master=form_order, form_detail=form_order_detail, res=res)
@app.route("/create_asn", methods=['GET', 'POST'])
def create_asn():
res = None
if request.method == 'POST':
asn_info = {k: v for k, v in request.form.items() if k in
[i for i in SFCASNInfo.__dict__.keys() if i[0] != '_']}
order_detail = {k: v for k, v in request.form.items() if
k in [i for i in SFCOrderDetail.__dict__.keys() if i[0] != '_']}
form_asn = SFCASNInfo(MultiDict(asn_info))
form_order_detail = SFCOrderDetail(MultiDict(order_detail))
res = sfcapi.create_asn(p_asn_info=asn_info, p_order_detail=order_detail)
else:
form_asn = SFCASNInfo()
form_order_detail = SFCOrderDetail()
return render_template('create_asn.html', form_master=form_asn, form_detail=form_order_detail, res=res)
@app.route("/get_order_by_code", methods=['GET', 'POST'])
def get_order():
res = None
if request.method == 'POST':
res = sfcapi.get_order_by_code(order_code=request.form['ordersCode'], detail_level=request.form['detailLevel'])
form_get_order = SFCgetOrderByCode(
MultiDict({'ordersCode': request.form['ordersCode'], 'detailLevel': request.form['detailLevel']}))
else:
form_get_order = SFCgetOrderByCode()
return render_template('get_order_by_code.html', form=form_get_order, res=res)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
|
lizardsystem/flooding | flooding/manage.py | Python | gpl-3.0 | 228 | 0.008772 | import | os
i | f not 'DJANGO_SETTINGS_MODULE' in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'flooding.windows'
from django.core import management
if __name__ == '__main__':
management.execute_from_command_line()
|
maxhutch/pypif | pypif/obj/common/value.py | Python | apache-2.0 | 4,437 | 0.002705 | import numbers
from six import string_types
from pypif.obj.common.pio import Pio
from pypif.obj.common.scalar import Scalar
from pypif.obj.common.file_reference import FileReference
class Value(Pio):
"""
Information about a scalar, vector, or matrix, or a list of one of those.
"""
def __init__(self, name=None, scalars=None, vectors=None, matrices=None, files=None,
units=None, tags=None, **kwargs):
"""
Constructor.
:param name: String with the name of the value.
:param scalars: One or more dictionaries, strings, numbers, or :class:`.Scalar` objects.
:param vectors: One or more lists of dictionaries, strings, numbers, or :class:`.Scalar` objects,
each representing a vector.
:param matrices: One of more lists of lists of dictionaries, strings, numbers, or :class:`.Scalar` objects,
each representing a matrix with rows as the innermost lists.
:param files: One of more dictionaries, strings, or :class:`.FileReference` objects.
:param units: String with the units of the value.
:param tags: List of strings or numbers that are tags for this object.
:param kwargs: Dictionary of fields that are not supported.
"""
super(Value, self).__init__(tags=tags, **kwargs)
self._name = None
self.name = name
self._files = None
self.files = files
self._scalars = None
if scalars is not None:
self.scalars = scalars
self._vectors = None
if vectors is not None:
self.vectors = vectors
self._matrices = None
if matrices is not None:
self.matrices = matrices
self._units = None
self.units = units
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._validate_type('name', name, string_types)
self._name = name
@name.deleter
def name(self):
self._name = None
@property
def scalars(self):
return self._scalars
@scalars.sette | r
def scalars(self, scalars):
self._validate_list_type('scalars', scalars, dict, string_types, numbers.Number, Scalar)
self._scalars = self._get_object(Scalar, scalars)
self._scalars = [Scalar.normalize(x)
for x | in (self._scalars if isinstance(self._scalars, list) else [self._scalars])]
@scalars.deleter
def scalars(self):
self._scalars = None
@property
def vectors(self):
return self._vectors
@vectors.setter
def vectors(self, vectors):
self._validate_nested_list_type('vectors', vectors, 2, dict, string_types, numbers.Number, Scalar)
self._vectors = self._get_object(Scalar, vectors)
self._vectors = [list(map(Scalar.normalize, x))
for x in (self._vectors if isinstance(self._vectors[0], list) else [self._vectors])]
@vectors.deleter
def vectors(self):
self._vectors = None
@property
def matrices(self):
return self._matrices
@matrices.setter
def matrices(self, matrices):
self._validate_nested_list_type('matrices', matrices, 3, dict, string_types, numbers.Number, Scalar)
self._matrices = self._get_object(Scalar, matrices)
self._matrices = [list(map(lambda z: list(map(Scalar.normalize, z)), x))
for x in (self._matrices if isinstance(self._matrices[0][0], list) else [self._matrices])]
@matrices.deleter
def matrices(self):
self._matrices = None
@property
def units(self):
return self._units
@units.setter
def units(self, units):
self._validate_type('units', units, string_types)
self._units = units
@units.deleter
def units(self):
self._units = None
@property
def files(self):
return self._files
@files.setter
def files(self, files):
self._validate_list_type('files', files, dict, FileReference)
self._files = files
@files.deleter
def files(self):
self._files = None
def normalize(self):
if self.scalars is not None:
self.scalars = self.scalars
if self.vectors is not None:
self.vectors = self.vectors
if self.matrices is not None:
self.matrices = self.matrices
|
willpatterson/LogoBatch | logobatch/resources.py | Python | mit | 3,909 | 0.002558 | """ For classes managing computing and storage resources """
import os
import sys
import socket
import getpass
import paramiko
import subprocess
from collections import namedtuple
class Launcher(object):
"""
Base class for the lancher objects used to dispatch shell commands
to local and remote resources
"""
JobOut = namedtuple('JobOut', ['output', 'error'])
def __new__(cls, hostname):
if hostname is None:
return super(Launcher, cls).__new__(LocalLauncher)
else:
return super(Launcher, cls).__new__(RemoteLauncher)
@staticmethod
def create_background_command(commands):
"""
Recievces a single command or a set of commands as input
Makes a single command out of the command(s) that will run in
background
"""
if isinstance(commands, str):
commands = [commands]
return '({})&'.format('; '.join(commands))
def launch_command(self, command):
"""Virtual Abstract method"""
raise NotImplementedError("Implement in subclass")
class RemoteLauncher(Launcher):
"""
Used to launch shell commands on remote machines via ssh as well
as deal with input file and resulting output file transfers
"""
def __init__(self, hostname, username):
""" """
#Test and setup ssh connection
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(hostname, username=username)
def configure_remote_data(self):
"""Configures setup to input and output data between nodes and head"""
pass
def launch_command(self, command):
"""Launches command over ssh"""
if not isinstance(command, str):
try:
command = ' '.join(command)
except TypeError:
raise TypeError('Launch job requires a string or iterable')
_, ssh_stdout, ssh_stderr = self.ssh.exec_command(command)
return self.JobOut(ssh_stdout, ssh_stderr)
class LocalLauncher(Launcher):
"""Used to launch shell commands on the local | machine"""
def launch_command(self, command):
""" """
process = subprocess.Popen(command,
| shell=False,
stdout=subprocess.PIPE)
return self.JobOut(process.communicate())
class Resource(object):
"""
Base class for Resource objects used to launch jobs on different types
of remote and local computing resources
Base class acts as a factory for computing resources
"""
type_names = {}
def __new__(cls, **kwds):
"""Creates and returns proper resource object type"""
resource_type = kwds.get('resource_type', 'local')
for sub_cls in cls.__subclasses__():
if resource_type in sub_cls.type_names:
return super(Resource, cls).__new__(sub_cls)
raise TypeError("Resource doesn't exist: {}".format(resource_type))
def __init__(self, name, hostname=None, username=None, **kwds):
self.name = name
self.launcher = Launcher(hostname)
if username is None:
self.username = getpass.getuser()
else:
self.username = username
class ComputeServer(Resource):
"""
Resource subclass for dispactching jobs to a single, logical
unix computer
"""
type_names = {'compute_server'}
def __init__(self, name, hostname=None, **kwds):
super().__init__(self, name, hostname=hostname, **kwds)
self.command_number = kwds.get('command_number', 1)
class SlurmCluster(ComputeServer):
"""Resource subclass for dispactching jobs to a slurm cluster"""
type_names = {'slurm_cluster'}
def __init__(self, name, hostname=None, **kwds):
super().__init__(self, name, hostname=hostname, **kwds)
|
eucalyptus/se34euca | 3.3.1/se34euca/se34euca/lib/EucaUITestLib_Security_Group.py | Python | bsd-2-clause | 7,211 | 0.002774 | from se34euca.lib.EucaUITestLib_Base import *
import time
class EucaUITestLib_Security_Group(EucaUITestLib_Base):
def test_ui_gotopage_security_groups(self):
print
print "Started Test: GotoPage Security Groups"
self.click_element_by_id("euca-logo")
print
print "Test: Received the Page Title -> " + self.driver.title
self.click_element_by_id("dashboard-netsec-sgroup")
print
print "Test: Clicked the GoToPage Button"
self.verify_element_by_id("table-sgroups-new")
print
print "Finished Test: GotoPage Security Groups"
print
return 0
def test_ui_create_security_group(self):
print
print "Started Test: Create Security Group"
self.click_element_by_link_text("Dashboard")
self.verify_element_by_link_text("Launch new instance")
print
print "Test: Go to the Page Security Group"
self.click_element_by_css_selector("#dashboard-netsec-sgroup > span")
self.click_element_by_id("table-sgroups-new")
print
print "Test: Create Security Group"
self.set_keys_by_id("sgroup-name", "mywebservice")
self.set_keys_by_id("sgroup-description", "Rules for my webservice. Generated by Selenium")
print "Click: LINK_TEXT -> Rules"
self.click_element_by_link_text("Rules")
print "Select: ID -> sgroup-templete, TEXT -> SSH (TCP port 22, for terminal access)"
self.select_text_by_id("sgroup-template", "SSH (TCP port 22, for terminal access)")
self.set_keys_by_id("allow-ip", "0.0.0.0/0")
print "Click: ID -> sgroup-add-rule"
self.click_element_by_id("sgroup-add-rule")
self.verify_element_by_id("sgroup-rule-number-0")
print "Rule 0: " + self.get_text_by_css_selector("ul.sg-rules-list > li")
print "Select: ID -> sgroup-templete, TEXT -> HTTP (TCP port 80, for web servers)"
self.select_text_by_id("sgroup-template", "HTTP (TCP port 80, for web servers)")
self.set_keys_by_id("allow-ip", "0.0.0.0/0")
print "Click: ID -> sgroup-add-rule"
self.click_element_by_id("sgroup-add-rule")
self.verify_element_by_id("sgroup-rule-number-1")
print "Rule 1: " + self.get_text_by_xpath("//div[@id='sgroup-rules-list']/ul/li[2]")
print "Select: ID -> sgroup-templete, TEXT -> Custom ICMP"
Select(self.driver.find_element_by_id("sgroup-template")).select_by_visible_text("Custom ICMP")
print "Select: ID -> sgroup-type. TEXT -> All"
self.select_text_by_id("sgroup-type", "All")
self.set_keys_by_id("allow-ip", "0.0.0.0/0")
print "Click: ID -> sgroup-add-rule"
self.click_element_by_id("sgroup-add-rule")
self.verify_element_by_id("sgroup-rule-number-2")
print "Rule 2: " + self.get_text_by_xpath("//div[@id='sgroup-rules-list']/ul/li[3]")
self.click_element_by_id("sgroup-add-btn")
print
print "Finished: Create Security G | roup"
print
return 0
def test_ui_create_empty_security_group(self):
print
print "Started Test: Create Security Group"
self.cl | ick_element_by_link_text("Dashboard")
self.verify_element_by_link_text("Launch new instance")
print
print "Test: Go to the Page Security Group"
self.click_element_by_css_selector("#dashboard-netsec-sgroup > span")
self.click_element_by_id("table-sgroups-new")
print
print "Test: Create Security Group"
self.set_keys_by_id("sgroup-name", "mywebservice")
self.set_keys_by_id("sgroup-description", "test")
self.click_element_by_id("sgroup-add-btn")
print
print "Finished: Create Security Group"
print
return 0
def test_ui_add_rules_to_security_group(self):
print
print "Started Test: Add rules to a Security Group"
self.click_element_by_link_text("Dashboard")
self.verify_element_by_link_text("Launch new instance")
print
print "Test: Go to the Page Security Group"
self.click_element_by_link_text("Network & Security")
self.click_element_by_link_text("Security Groups")
print"Test: Checkbox the security group"
self.click_element_by_css_selector('span[title="test"]')
print "Test: Add rules"
self.click_element_by_id("more-actions-sgroups")
self.click_element_by_link_text("Manage rules")
print "Adding TCP rule"
self.select_text_by_css_selector("div.content-sections-wrapper > div.rules.content-section > div.form-row > #sgroup-template","SSH (TCP port 22, for terminal access)")
self.set_keys_by_css_selector("div.content-sections-wrapper > div.rules.content-section > #sgroup-more-rules > div.form-row.sg-inline-options > #allow-ip", "0.0.0.0/0")
self.click_element_by_id("sgroup-add-btn")
print
print "Finished: Create Security Group"
print
return 0
def test_ui_delete_security_group(self):
print
print "Started Test: Delete Security Group"
self.click_element_by_link_text("Dashboard")
self.verify_element_by_link_text("Launch new instance")
print
print "Test: Go to the Page Security Group"
self.click_element_by_css_selector("#dashboard-netsec-sgroup > span")
self.verify_element_by_id("table-sgroups-new")
self.click_element_by_css_selector("td.checkbox-cell.sorting_1 > input[type=\"checkbox\"]")
self.click_element_by_id("more-actions-sgroups")
print
print "Test: Delete Security Group"
self.click_element_by_link_text("Delete")
self.click_element_by_id("btn-sgroups-delete-delete")
print
print "Finished: Delete Security Group"
print
return 0
def test_ui_delete_security_group_all(self):
print
print "Started Test: Delete Security Group All"
self.click_element_by_link_text("Dashboard")
self.verify_element_by_link_text("Launch new instance")
print
print "Test: Go to the Page Security Group"
self.click_element_by_css_selector("#dashboard-netsec-sgroup > span")
self.click_element_by_id("sgroups-check-all")
self.click_element_by_id("more-actions-sgroups")
print
print "Test: Delete Security Group All"
self.click_element_by_link_text("Delete")
self.click_element_by_id("btn-sgroups-delete-delete")
print
print "Finished: Delete Security Group All"
print
return 0
def test_ui_check_security_group_count(self, sg_count):
print
print "Started Test: Check Security Group Count"
self.click_element_by_link_text("Dashboard")
self.verify_element_by_link_text("Launch new instance")
print "Verifying that Security Group Count on Dashboard is "+sg_count
self.verify_text_displayed_by_css("#dashboard-netsec-sgroup > span",sg_count)
print
print "Finished Test: Check Security Group Count"
print
return 0
if __name__ == "__main__":
unittest.main()
|
mattrobenolt/warehouse | warehouse/__about__.tmpl.py | Python | apache-2.0 | 1,202 | 0 | # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import | unicode_literals
# This file is automatically generated, do not edit it
__all__ = [
"__title__", "__sum | mary__", "__uri__", "__version__",
"__author__", "__email__", "__license__", "__copyright__",
]
__title__ = "warehouse"
__summary__ = "Next Generation Python Package Repository"
__uri__ = "https://github.com/pypa/warehouse"
__version__ = "{version}"
__build__ = "{build}"
__author__ = "Donald Stufft"
__email__ = "donald@stufft.io"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2013 Donald Stufft"
|
astorfi/TensorFlow-World | docs/conf.py | Python | mit | 5,320 | 0.000752 | # -*- coding: utf-8 -*-
#
# TensorFlow-World documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 28 22:26:19 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
# project = u'TensorFlow-World'
copyright = u'2017, Amirsina Torfi'
author = u'Amirsina Torfi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'show_powered_by': False,
'github_user': 'astorfi',
'github_repo': 'TensorFlow-World',
'github_banner': True,
'show_related': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named | "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Title
html_title = 'TensorFlow World'
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TensorFlow-Worlddoc'
# If true, links to the reST sources are added to the pages. |
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TensorFlow-World.tex', u'TensorFlow-World Documentation',
u'Amirsina Torfi', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tensorflow-world', u'TensorFlow-World Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TensorFlow-World', u'TensorFlow-World Documentation',
author, 'TensorFlow-World', 'One line description of project.',
'Miscellaneous'),
]
|
Bauble/bauble.api | test/spec/test_genus.py | Python | bsd-3-clause | 4,372 | 0.003202 | import pytest
import bauble.db as db
from bauble.model.family import Family
from bauble.model.genus import Genus, GenusSynonym, GenusNote
import test.api as api
@pytest.fixture
def setup(organization, session):
setup.organization = session.merge(organization)
setup.user = setup.organization.owners[0]
setup.session = session
db.set_session_schema(session, setup.organization.pg_schema)
return setup
def test_genus_json(setup):
session = setup.session
family = Family(family=api.get_random_name())
genus_name = api.get_random_name()
genus = Genus(family=family, genus=genus_name)
note = GenusNote(genus=genus, note="this is a test")
syn = GenusSynonym(genus=genus, synonym=genus)
session.add_all([family, genus, note, syn])
session.commit()
genus_json = genus.json()
assert 'id' in genus_json
assert genus_json['id'] == genus.id
assert 'genus' in genus_json
assert 'str' in genus_json
assert 'qualifier' in genus_json
note_json = note.json()
assert 'id' in note_json
assert 'genus_id' in note_json
assert note_json['genus_id'] == genus.id
syn_json = syn.json()
assert 'id' i | n syn_json
assert syn_json['genus_id'] == genus.id
assert syn_json['synonym_id'] == genus.id
session.delete(genus)
session.commit()
session.close()
def test_server(setup):
"""
Test the server properly handle /genus resources
"""
user = setup.user
family = api.create_resource('/family', {'family': api.get_random_name()}, user)
# create a genus
first_genus = api | .create_resource('/genus', {'genus': api.get_random_name(), 'family': family},
user)
# create another genus and use the first as a synonym
data = {'genus': api.get_random_name(),
'family': family,
'notes': [{'user': 'me', 'category': 'test', 'date': '2001-1-1',
'note': 'test note'},
{'user': 'me', 'category': 'test', 'date': '2002-2-2',
'note': 'test note2'}],
'synonyms': [first_genus]
#'synonyms': [{'synonym': first_genus}]
}
second_genus = api.create_resource('/genus', data, user)
assert 'id' in second_genus # created
# update the genus
second_genus['genus'] = api.get_random_name()
second_id = second_genus['id']
second_genus = api.update_resource('/genus/' + str(second_id), second_genus, user=user)
assert second_genus['id'] == second_id # make sure they have the same id after the update
# get the genus
first_genus = api.get_resource('/genus/' + str(first_genus['id']), user=user)
# query for genera and make sure the second genus is in the results
genera = api.query_resource('/genus', q=second_genus['genus'], user=user)
# TODO: ** shouldn't len(genera) be 1 since the name should be unique
#assert second_genus['ref'] in [genus['ref'] for genus in genera]
assert second_genus['id'] in [genus['id'] for genus in genera]
# test getting the genus relative to its family
# ** TODO: now we just embed the relation in the /genera/:id
# ** request....need to create a test to make sure it's happening
# genera = api.get_resource('/family/' + str(family['id']) + "/genera", user=user)
# assert first_genus['id'] in [genus['id'] for genus in genera]
# test getting a family with its genera relations
# ** TODO: now we just embed the relation in the /genera/:id
# ** request....need to create a test to make sure it's happening
#response_json = api.query_resource('/family', q=family['family'], relations="genera,notes", user=user)
#families = response_json
# TODO: *** i don't know if we still support returning relations like this...do
# we need to
# print(families[0]['genera'])
# assert first_genus['ref'] in [genus['ref'] for genus in families[0]['genera']]
# count the number of genera on a family
# TODO: ** count is temporarily disabled
# count = api.count_resource(family['ref'] + "/genera")
# assert count == "2"
# delete the created resources
api.delete_resource('/genus/' + str(first_genus['id']), user)
api.delete_resource('/genus/' + str(second_genus['id']), user)
api.delete_resource('/family/' + str(family['id']), user)
|
lxsmnv/spark | python/pyspark/streaming/tests.py | Python | apache-2.0 | 62,587 | 0.001646 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import sys
from itertools import chain
import time
import operator
import tempfile
import random
import struct
import shutil
from functools import reduce
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version >= "3":
long = int
from pyspark.context import SparkConf, SparkContext, RDD
from pyspark.storagelevel import StorageLevel
from pyspark.streaming.context import StreamingContext
from pyspark.streaming.kafka import Broker, KafkaUtils, OffsetRange, TopicAndPartition
from pyspark.streaming.flume import FlumeUtils
from pyspark.streaming.kinesis import KinesisUtils, InitialPositionInStream
from pyspark.streaming.listener import StreamingListener
class PySparkStreamingTestCase(unittest.TestCase):
timeout = 30 # seconds
duration = .5
@classmethod
def setUpClass(cls):
class_name = cls.__name__
conf = SparkConf().set("spark.default.parallelism", 1)
cls.sc = SparkContext(appName=class_name, conf=conf)
cls.sc.setCheckpointDir(tempfile.mkdtemp())
@classmethod
def tearDownClass(cls):
cls.sc.stop()
# Clean up in the JVM just in case there has been some issues in Python API
try:
jSparkContextOption = SparkContext._jvm.SparkContext.get()
if jSparkContextOption.nonEmpty():
jSparkContextOption.get().stop()
except:
pass
def setUp(self):
self.ssc = StreamingContext(self.sc, self.duration)
def tearDown(self):
if self.ssc is not None:
self.ssc.stop(False)
# Clean up in the JVM just in case there has been some issues in Python API
try:
jStreamingContextOption = StreamingContext._jvm.SparkContext.getActive()
if jStreamingContextOption.nonEmpty():
jStreamingContextOption.get().stop(False)
except:
pass
def wait_for(self, result, n):
start_time = time.time()
while len(result) < n and time.time() - start_time < self.timeout:
time.sleep(0.01)
if len(result) < n:
print("timeout after", self.timeout)
def _take(self, dstream, n):
"""
Return the first `n` elements in the stream (will start and stop).
"""
results = []
def take(_, rdd):
if rdd and len(results) < n:
results.extend(rdd.take(n - len(results)))
dstream.foreachRDD(take)
self.ssc.start()
self.wait_for(results, n)
return results
def _collect(self, dstream, n, block=True):
"""
Collect each RDDs into the returned list.
:return: list, which will have the collected items.
"""
result = []
def get_output(_, rdd):
if rdd and len(result) < n:
r = rdd.collect()
if r:
result.append(r)
dstream.foreachRDD(get_output)
if not block:
return result
self.ssc.start()
self.wait_for(result, n)
return result
def _test_func(self, input, func, expected, sort=False, input2=None):
"""
@param input: dataset for the test. This should be list of lists.
@param func: wrapped function. This function should return PythonDStream object.
@param expected: expected output for this testcase.
"""
if not isinstance(input[0], RDD):
input = [self.sc.parallelize(d, 1) for d in input]
input_stream = self.ssc.queueStream(input)
if input2 and not isinstance(input2[0], RDD):
input2 = [self.sc.parallelize(d, 1) for d in input2]
input_stream2 = self.ssc.queueStream(input2) if input2 is not None else None
# Apply test function to stream.
if input2:
stream = func(input_stream, input_stream2)
else:
stream = func(input_stream)
result = self._collect(stream, len(expected))
if sort:
self._sort_result_based_on_key(result)
self._sort_result_based_on_key(expected)
self.assertEqual(expected, result)
def _sort_result_based_on_key(self, outputs):
"""Sort the list based on first value."""
for output in outputs:
output.sort(key=lambda x: x[0])
class BasicOperationTests(PySparkStreamingTestCase):
def test_map(self):
"""Basic operation test for DStream.map."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.map(str)
expected = [list(map(str, x)) for x in input]
self._test_func(input, func, expected)
def test_flatMap(self):
"""Basic operation test for DStream.faltMap."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.flatMap(lambda x: (x, x * 2))
expected = [list(chain.from_iterable((map(lambda y: [y, y * 2], x))))
for x in input]
self._test_func(input, func, expected)
def test_filter(self):
"""Basic operation test for DStream.filter."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.filter(lambda x: x % 2 == 0)
expected = [[y for y in x if y % 2 == 0] for x in input]
self._test_func(input, func, expected)
def test_count(self):
"""Basic operation test for DStream.count."""
input = [range(5), range(10), range(20)]
def func(dstream):
return dstream.count()
expected = [[len(x)] for x in input]
self._test_func(input, func, expected)
def test_reduce(self):
"""Basic operation test for DStream.reduce."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.reduce(operator.add)
expected = [[reduce(operator.add, x)] for x in input]
self._test_func(input, func, expected)
def test_reduceByKey(self):
"""Basic operation test for DStream.reduceByKey."""
input = [[("a", 1), ("a", 1), (" | b", 1), ("b", 1)],
[("", 1), ("", 1), ("", 1), ("", 1)],
[(1, 1), (1, 1), | (2, 1), (2, 1), (3, 1)]]
def func(dstream):
return dstream.reduceByKey(operator.add)
expected = [[("a", 2), ("b", 2)], [("", 4)], [(1, 2), (2, 2), (3, 1)]]
self._test_func(input, func, expected, sort=True)
def test_mapValues(self):
"""Basic operation test for DStream.mapValues."""
input = [[("a", 2), ("b", 2), ("c", 1), ("d", 1)],
[(0, 4), (1, 1), (2, 2), (3, 3)],
[(1, 1), (2, 1), (3, 1), (4, 1)]]
def func(dstream):
return dstream.mapValues(lambda x: x + 10)
expected = [[("a", 12), ("b", 12), ("c", 11), ("d", 11)],
[(0, 14), (1, 11), (2, 12), (3, 13)],
[(1, 11), (2, 11), (3, 11), (4, 11)]]
self._test_func(input, func, expected, sort=True)
def test_flatMapValues(self):
"""Basic |
michaelkirk/QGIS | python/plugins/db_manager/db_plugins/postgis/info_model.py | Python | gpl-2.0 | 11,526 | 0.005379 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtGui import QApplication
from ..info_model import TableInfo, VectorTableInfo, RasterTableInfo
from ..html_elems import HtmlSection, HtmlParagraph, HtmlTable, HtmlTableHeader, HtmlTableCol
class PGTableInfo(TableInfo):
def __init__(self, table):
self.table = table
def generalInfo(self):
ret = []
# if the estimation is less than 100 rows, try to count them - it shouldn't take long time
if self.table.rowCount is None and self.table.estimatedRowCount < 100:
# row count information is not displayed yet, so just block
# table signals to avoid double refreshing (infoViewer->refreshRowCount->tableChanged->infoViewer)
self.table.blockSignals(True)
self.table.refreshRowCount()
self.table.blockSignals(False)
tbl = [
(QApplication.translate("DBManagerPlugin", "Relation type:"),
QApplication.translate("DBManagerPlugin", "View") if self.table.isView else QApplication.translate(
"DBManagerPlugin", "Table")),
(QApplication.translate("DBManagerPlugin", "Owner:"), self.table.owner)
]
if self.table.comment:
tbl.append((QApplication.translate("DBManagerPlugin", "Comment:"), self.table.comment))
tbl.extend([
(QApplication.translate("DBManagerPlugin", "Pages:"), self.table.pages),
(QApplication.translate("DBManagerPlugin", "Rows (estimation):"), self.table.estimatedRowCount )
])
# privileges
# has the user access to this | schema?
schema_priv = self.table.database().connector.getSchemaPrivileges(
self.table.schemaName()) if self.table.schema() else None
if schema_priv is None:
pass
elif not schema_priv[1]: # no usage privileges on the schema
tbl.append((QApplication.translate("DBManagerPlugin", "Privileges:"),
QApplication.translate("DBManagerPlugin",
| "<warning> This user doesn't have usage privileges for this schema!") ))
else:
table_priv = self.table.database().connector.getTablePrivileges((self.table.schemaName(), self.table.name))
privileges = []
if table_priv[0]:
privileges.append("select")
if self.table.rowCount is not None or self.table.rowCount >= 0:
tbl.append((QApplication.translate("DBManagerPlugin", "Rows (counted):"),
self.table.rowCount if self.table.rowCount is not None else QApplication.translate(
"DBManagerPlugin", 'Unknown (<a href="action:rows/count">find out</a>)')))
if table_priv[1]: privileges.append("insert")
if table_priv[2]: privileges.append("update")
if table_priv[3]: privileges.append("delete")
priv_string = u", ".join(privileges) if len(privileges) > 0 else QApplication.translate("DBManagerPlugin",
'<warning> This user has no privileges!')
tbl.append((QApplication.translate("DBManagerPlugin", "Privileges:"), priv_string ))
ret.append(HtmlTable(tbl))
if schema_priv is not None and schema_priv[1]:
if table_priv[0] and not table_priv[1] and not table_priv[2] and not table_priv[3]:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> This user has read-only privileges.")))
if not self.table.isView:
if self.table.rowCount is not None:
if abs(self.table.estimatedRowCount - self.table.rowCount) > 1 and \
(self.table.estimatedRowCount > 2 * self.table.rowCount or
self.table.rowCount > 2 * self.table.estimatedRowCount):
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> There's a significant difference between estimated and real row count. "
'Consider running <a href="action:vacuumanalyze/run">VACUUM ANALYZE</a>.')))
# primary key defined?
if not self.table.isView:
if len(filter(lambda fld: fld.primaryKey, self.table.fields())) <= 0:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> No primary key defined for this table!")))
return ret
def getSpatialInfo(self):
ret = []
info = self.db.connector.getSpatialInfo()
if info is None:
return
tbl = [
(QApplication.translate("DBManagerPlugin", "Library:"), info[0]),
(QApplication.translate("DBManagerPlugin", "Scripts:"), info[3]),
("GEOS:", info[1]),
("Proj:", info[2])
]
ret.append(HtmlTable(tbl))
if info[1] is not None and info[1] != info[2]:
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> Version of installed scripts doesn't match version of released scripts!\n"
"This is probably a result of incorrect PostGIS upgrade.")))
if not self.db.connector.has_geometry_columns:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> geometry_columns table doesn't exist!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
elif not self.db.connector.has_geometry_columns_access:
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> This user doesn't have privileges to read contents of geometry_columns table!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
return ret
def fieldsDetails(self):
tbl = []
# define the table header
header = (
"#", QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Length"), QApplication.translate("DBManagerPlugin", "Null"),
QApplication.translate("DBManagerPlugin", "Default") )
tbl.append(HtmlTableHeader(header))
# add table contents
for fld in self.table.fields():
char_max_len = fld.charMaxLen if fld.charMaxLen is not None and fld.charMaxLen != -1 else ""
is_null_txt = "N" if fld.notNull else "Y"
# make primary key field underlined
attrs = {"class": "underline"} if fld.primaryKey else None
name = HtmlTableCol(fld.nam |
csauer42/rover | controller/rover.py | Python | mit | 4,030 | 0.005211 | import pygame
from pygame.locals import *
from threading import Lock
from videoreceiver import VideoReceiver
from controlio import ControlIO
import sys
class Rover(object):
"""Primary control interface for Rover"""
FPS = 15
MLIMITLOW = 32
LLIMITLOW = 96
WIDTH = 1280
HEIGHT = 960
BLACK = (0,0,0)
WHITE = (255,255,255)
RED = (255,0,0)
def __init__(self, ip):
self.stop = False
self.ip = ip
def setup(self):
"""Handles library initialization and additional thread setup"""
pygame.init()
pygame.joystick.init()
self.clock = pygame.time.Clock()
self.font = pygame.font.Font('freesansbold.ttf', 16)
if pygame.joystick.get_count() > 0:
self.gamepad = pygame.joystick.Joystick(0)
self.gamepad.init()
else:
print("Gamepad not found. Exiting.")
sys.exit(2)
self.screen = pygame.display.set_mode([self.WIDTH,self.HEIGHT])
pygame.display.set_caption("Rover Control")
self.video_receiver = VideoReceiver(self.ip, self.FPS, (self.WIDTH, self.HEIGHT))
self.video_receiver.start()
self.cio = ControlIO(self.ip)
self.cio.start()
def js_convert(self, pos_list):
"""Convert gamepad values from pygame format to motor control format"""
c0 = int(pos_l | ist[0] * -255)
c1 = int(pos_list[1] * -255)
c2 = int(pos_list[2] * 512)
c3 = int(pos_list[3] | * -512)
if c0 < self.MLIMITLOW and c0 > -1 * self.MLIMITLOW:
c0 = 0
if c1 < self.MLIMITLOW and c1 > -1 * self.MLIMITLOW:
c1 = 0
if c2 < self.LLIMITLOW and c2 > -1 * self.LLIMITLOW:
c2 = 0
if c3 < self.LLIMITLOW and c3 > -1 * self.LLIMITLOW:
c3 = 0
return (c0, c1, c2, c3)
def text_objects(self, text):
"""Helper function for displaying text to screen"""
textSurface = self.font.render(text, True, self.WHITE)
return textSurface, textSurface.get_rect()
def gamepad_position(self, clist):
"""Diplay gamepad analog stick positions to screen"""
TextSurf, TextRect = self.text_objects("Move [%4d,%4d] Look[%4d,%4d]" % clist)
TextRect.center = (self.WIDTH-1000, self.HEIGHT-100)
self.screen.blit(TextSurf, TextRect)
def battery_voltage(self, voltage):
"""Display battery voltage to screen"""
TextSurf, TextRect = self.text_objects("Voltage: %.2f" % voltage)
TextRect.center = (self.WIDTH-280, self.HEIGHT-100)
self.screen.blit(TextSurf, TextRect)
def run(self):
"""Main control loop for rover"""
self.setup()
while not self.stop:
m0 = self.gamepad.get_axis(1)
m1 = self.gamepad.get_axis(0)
l0 = self.gamepad.get_axis(4)
l1 = self.gamepad.get_axis(3)
values = ( m0, m1, l0, l1 )
gamepad_values = self.js_convert(values)
self.screen.fill(self.BLACK)
self.screen.blit(self.video_receiver.get_frame(), (0,0))
self.gamepad_position(gamepad_values)
self.battery_voltage(self.cio.get_voltage())
pygame.display.flip()
if self.cio.is_ready():
self.cio.send_command(gamepad_values)
for event in pygame.event.get():
if event.type == QUIT:
self.stop = True
elif event.type == JOYBUTTONDOWN:
if event.button == 0:
print("Doing snapshot.")
self.video_receiver.snapshot()
elif event.button == 1:
print("Doing video.")
self.video_receiver.toggleVideo()
self.clock.tick(self.FPS)
self.video_receiver.active = False
self.cio.active = False
self.video_receiver.join()
self.cio.join()
pygame.quit()
|
aboganas/frappe | frappe/utils/redis_wrapper.py | Python | mit | 4,952 | 0.030695 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import redis, frappe, re
import cPickle as pickle
from frappe.utils import cstr
class RedisWrapper(redis.Redis):
"""Redis client that will automatically prefix conf.db_name"""
def make_key(self, key, user=None):
if user:
if user == True:
user = frappe.session.user
key = "user:{0}:{1}".format(user, key)
return "{0}|{1}".format(frappe.conf.db_name, key).encode('utf-8')
def set_value(self, key, val, user=None, expires_in_sec=None):
"""Sets cache value.
:param key: Cache key
:param val: Value to be cached
:param user: Prepends key with User
:param expires_in_sec: Expire value of this key in X seconds
"""
key = self.make_key(key, user)
if not expires_in_sec:
frappe.local.cache[key] = val
try:
if expires_in_sec:
self.setex(key, pickle.dumps(val), expires_in_sec)
else:
self.set(key, pickle.dumps(val))
except redis.exceptions.ConnectionError:
return None
def get_value(self, key, generator=None, user=None, expires=False):
"""Returns cache value. If not found and generator function is
given, it will call the generator.
:param key: Cache key.
:param generator: Function to be called to generate a value if `None` is returned.
:param expires: If the key is supposed to be with an expiry, don't store it in frappe.local
"""
original_key = key
key = self.make_key(key, user)
if key in frappe.local.cache:
val = frappe.local.cache[key]
else:
val = None
try:
val = self.get(key)
except redis.exceptions.ConnectionError:
pass
if val is not None:
val = pickle.loads(val)
if not expires:
if val is None and generator:
val = generator()
self.set_value(original_key, val, user=user)
else:
frappe.local.cache[key] = val
return val
def get_all(self, key):
ret = {}
for k in self.get_keys(key):
ret[key] = self.get_value(k)
return ret
def get_keys(self, key):
"""Return keys starting with `key`."""
try:
key = self.make_key(key + "*")
return self.keys(key)
except redis.exceptions.ConnectionError:
regex = re.compile(cstr(key).replace("|", "\|").replace("*", "[\w]*"))
return [k for k in frappe.local.cache.keys() if regex.match(k)]
def delete_keys(self, key):
"""Delete keys with wildcard `*`."""
try:
self.delete_value(self.get_keys(key), make_keys=False)
except redis.exceptions.ConnectionError:
pass
def delete_key(self, *args, **kwargs):
self.delete_value(*args, **kwargs)
def delete_value(self, keys, user=None, make_keys=True):
"""Delete value, list of values."""
if not isinstance(keys, (list, tuple)):
keys = (keys, )
for key in keys:
if make_keys:
key = self.make_ke | y(key)
try:
self.delete(key)
except redis.exceptions.ConnectionError:
pass
if key in frappe.local.cache:
del frappe.local.cache[key]
def lpush(self, key, value):
super(redis.Redis, self).lpush(self.make_key(key), | value)
def rpush(self, key, value):
super(redis.Redis, self).rpush(self.make_key(key), value)
def lpop(self, key):
return super(redis.Redis, self).lpop(self.make_key(key))
def llen(self, key):
return super(redis.Redis, self).llen(self.make_key(key))
def hset(self, name, key, value):
if not name in frappe.local.cache:
frappe.local.cache[name] = {}
frappe.local.cache[name][key] = value
try:
super(redis.Redis, self).hset(self.make_key(name), key, pickle.dumps(value))
except redis.exceptions.ConnectionError:
pass
def hgetall(self, name):
return {key: pickle.loads(value) for key, value in
super(redis.Redis, self).hgetall(self.make_key(name)).iteritems()}
def hget(self, name, key, generator=None):
if not name in frappe.local.cache:
frappe.local.cache[name] = {}
if key in frappe.local.cache[name]:
return frappe.local.cache[name][key]
value = None
try:
value = super(redis.Redis, self).hget(self.make_key(name), key)
except redis.exceptions.ConnectionError:
pass
if value:
value = pickle.loads(value)
frappe.local.cache[name][key] = value
elif generator:
value = generator()
try:
self.hset(name, key, value)
except redis.exceptions.ConnectionError:
pass
return value
def hdel(self, name, key):
if name in frappe.local.cache:
if key in frappe.local.cache[name]:
del frappe.local.cache[name][key]
try:
super(redis.Redis, self).hdel(self.make_key(name), key)
except redis.exceptions.ConnectionError:
pass
def hdel_keys(self, name_starts_with, key):
"""Delete hash names with wildcard `*` and key"""
for name in frappe.cache().get_keys(name_starts_with):
name = name.split("|", 1)[1]
self.hdel(name, key)
def hkeys(self, name):
try:
return super(redis.Redis, self).hkeys(self.make_key(name))
except redis.exceptions.ConnectionError:
return []
|
enkidulan/slidelint | src/slidelint/tests/utils.py | Python | apache-2.0 | 485 | 0 | from contextlib import contextmanager
from testfixtures import R | eplacer
import os
@contextmanager
def subprocess_context_helper(temp_dir, cmd):
config_file = os.path.join(temp_dir.path, 'tmp_file')
import subprocess
origing_popen = subprocess.Popen
with Replacer() as r:
def not_existing_program(*args, **kwargs):
return origing_popen(cmd, *args[1:], **kwargs)
r.replace('subprocess.Popen', not_existing_progr | am)
yield config_file
|
MarxMustermann/OfMiceAndMechs | src/itemFolder/logistics/__init__.py | Python | gpl-3.0 | 510 | 0.001961 | """
import os
for module in os.listdir(os.path.d | irname(__file__)):
if module == "__init__.py" or module[-3:] != ".py":
continue
__import__("src.itemFolder.logistics." + module[:-3], locals(), globals())
del module
"""
import src.itemFolder.logistics.container
import src.itemFolder.logistics.mover
import sr | c.itemFolder.logistics.pathingNode
import src.itemFolder.logistics.sorter
import src.itemFolder.logistics.typedStockpileManager
import src.itemFolder.logistics.uniformStockpileManager
|
JoyIfBam5/aws-sdk-cpp | scripts/prepare_regenerate_high_level_sdks.py | Python | apache-2.0 | 3,418 | 0.009655 | #!/usr/bin/env python
#
# Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
import argparse
import re
import os
import sys
import filecmp
import fnmatch
highLevelSdkList = [
"access-management",
"identity-management",
"queues",
"transfer",
"s3-encryption",
"text-to-speech"
];
def ParseArguments():
argMap = {}
parser = argparse.ArgumentParser(description="Prepare for high level sdks' regeneration")
parser.add_argument("--highLevelSdkName", action="store")
args = vars( parser.parse_args() )
argMap[ "highLevelSdkName" ] = args[ "highLevelSdkName" ] or None
return argMap
def prepareAutopkg(highLevelSdkName):
autopkgFi | le = "aws-cpp-sdk-" + highLevelSdkName + "/nuget/" + "aws-cpp-sdk-" + highLevelSdkName + ".autopkg";
with open(autopkgFile, "rt") as ftarget:
content = ftarget.read()
"""
The following regex code is going to change content like:
version : 1.0.153;
depend | encies {
packages: {
AWSSDKCPP-Core/1.0.140,
AWSSDKCPP-S3-Encryption/1.0.20060301.142
AWSSDKCPP-sqs/2.3.20070319.141
}
}
to:
version : @RUNTIME_MAJOR_VERSION@.@RUNTIME_MINOR_VERSION@;
dependencies {
packages: {
AWSSDKCPP-Core/@RUNTIME_MAJOR_VERSION@.@RUNTIME_MINOR_VERSION@,
AWSSDKCPP-S3-Encryption/@RUNTIME_MAJOR_VERSION@.20060301.@RUNTIME_MINOR_VERSION@
AWSSDKCPP-sqs/@RUNTIME_MAJOR_VERSION@.20070319.@RUNTIME_MINOR_VERSION@
}
}
note:
RUNTIME_MAJOR_VERSION has two parts separated by '.', like 1.0, 2.1 and so on.
RUNTIME_MINOR_VERSION is a single digit string like 79, 150, 142 and so on.
AWSSDKCPP-Core dosen't have a API version string in between MAJOR and MINOR version strings.
These version releated strings are changed to special tokens so as to be replaced with actual versions during release stage in our code pipeline.
"""
newContent = re.sub(r"version : \d+\.\d+\.\d+;", "version : @RUNTIME_MAJOR_VERSION@.@RUNTIME_MINOR_VERSION@;", content);
newContent = re.sub(r"AWSSDKCPP-Core/[^,]+?(,{0,1})\n", r"AWSSDKCPP-Core/@RUNTIME_MAJOR_VERSION@.@RUNTIME_MINOR_VERSION@\1\n", newContent);
newContent = re.sub(r"(AWSSDKCPP-[a-zA-Z\-\d]+)/\d+\.\d+\.(\d+)[^,]{0,}?(,{0,1})\n", r"\1/@RUNTIME_MAJOR_VERSION@.\2.@RUNTIME_MINOR_VERSION@\3\n", newContent);
if (content == newContent):
return False;
with open(autopkgFile, "wt") as fdest:
fdest.write(newContent)
return
def Main():
arguments = ParseArguments()
if arguments['highLevelSdkName']:
print('Preparing {}.'.format(arguments['highLevelSdkName']))
prepareAutopkg(arguments['highLevelSdkName']);
else:
for svc in highLevelSdkList:
prepareAutopkg(svc);
Main()
|
bwhite/hadoop_vision | performance/bgsub_fast.py | Python | gpl-3.0 | 1,821 | 0.00659 | import numpy as np
import ctypes
# Required Types
_uint8_ptr = ctypes.POINTER(ctypes.c_uint8)
_uint32_ptr = ctypes.POINTER(ctypes.c_uint32)
_float_ptr = ctypes.POINTER(ctypes.c_float)
_int = ctypes.c_int32
# Load library
_bg = np.ctypeslib.load_library('libbgsub_fast', '.')
#void bgsub_accum(unsigned int *image, int size, float *s, float *ss) {
_bg.bgsub_accum.restype = ctypes.c_int
_bg.bgsub_accum.argtypes = [ctypes.c_char_p, _int, _float_ptr, _float_ptr]
def accum(image, s, ss):
_bg.bgsub_accum(image,
len(image),
s.ctypes.data_as(_float_ptr),
ss.ctypes.data_as(_float_ptr))
#void bgsub_mean_var(int size, float *s, float *ss, int c, float *m, float *v)
_bg.bgsub_mean_var.restype = ctypes.c_int
_bg.bgsub_mean_var.argtypes = [_int, _float_ptr, _float_ptr, _int, _float_ptr, _float_ptr]
def mean_var(s, ss, c, m, v):
_bg.bgsub_mean_var(len(s),
s.ctypes.data_as(_float_ptr),
ss.ctypes.data_as(_float_ptr),
c,
m.ctypes.data_as(_float_ptr),
v.ctypes.data_as(_float_ptr))
#void bgsub_classify(unsigned char *image, int size, float *m | , float *bgsub)
_bg.bgsub_classify.restype = ctypes.c_int
_bg.bgsub_classify.argtypes = [ctypes.c_char_p, _int, _float_ptr, _float_ptr]
def classify(image, m, v, fg):
_bg.bgsub_classify(image,
len(image),
m.ctypes.data_as(_float_ptr),
fg.ctypes.data_as(_float_ptr))
out = (fg > v).astype(np.uint8)
out *= 255
return out
if __name__ == '__main__':
image = 'abc'
# accum
s = np.zeros(len(image), dtyp | e=np.float32)
ss = np.zeros(len(image), dtype=np.float32)
accum(image, s, ss)
print(s)
print(ss)
|
aino/aino-utkik | setup.py | Python | bsd-3-clause | 1,034 | 0.001934 | from setuptools import setup, find_packages
from setuptools.command.test import test
class TestCommand(test):
def run(self):
from tests.runtests import runtests
runtests()
setup(
name='aino-utkik',
version='0.9.1',
description='Small, clean code with a lazy view dispatcher and class based views for Django.',
long_description=open('README.rst').read(),
author='Mikko Hellsing',
author_email='mikko@aino.se',
license='BSD',
url='https://github.com/aino/aino-utkik',
packa | ges=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
cmdclass={"test": TestCommand},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Ap | proved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Framework :: Django',
],
)
|
belteshassar/cartopy | lib/cartopy/tests/test_coding_standards.py | Python | gpl-3.0 | 9,853 | 0.000203 | # (C) British Crown Copyright 2012 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from datetime import datetime
from fnmatch import fnmatch
from itertools import chain
import os
import re
import subprocess
import unittest
import pep8
import cartopy
LICENSE_TEMPLATE = """
# (C) British Crown Copyright {YEARS}, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.""".strip()
LICENSE_RE_PATTERN = re.escape(LICENSE_TEMPLATE).replace('\{YEARS\}', '(.*?)')
# Add shebang possibility or C comment starter to the LICENSE_RE_PATTERN
LICENSE_RE_PATTERN = r'((\#\!.*|\/\*)\n)?' + LICENSE_RE_PATTERN
LICENSE_RE = re.compile(LICENSE_RE_PATTERN, re.MULTILINE)
# Guess cartopy repo directory of cartopy - realpath is used to mitigate
# against Python finding the cartopy package via a symlink.
CARTOPY_DIR = os.path.realpath(os.path.dirname(cartopy.__file__))
REPO_DIR = os.getenv('CARTOPY_GIT_DIR',
os.path.dirname(os.path.dirname(CARTOPY_DIR)))
class TestLicenseHeaders(unittest.TestCase):
@staticmethod
def years_of_license_in_file(fh):
"""
Using :data:`LICENSE_RE` look for the years defined in the license
header of the given file handle.
If the license cannot be found in the given fh, None will be returned,
else a tuple of (start_year, end_year) will be returned.
"""
license_matches = LICENSE_RE.match(fh.read())
if not license_matches:
# no license found in file.
return None
years = license_matches.groups()[-1]
if len(years) == 4:
start_year = end_year = int(years)
elif len(years) == 11:
start_year, end_year = int(years[:4]), int(years[7:])
else:
fname = getattr(fh, 'name', 'unknown filename')
raise ValueError("Unexpected year(s) string in {}'s copyright "
"notice: {!r}".format(fname, years))
return (start_year, end_year)
@staticmethod
def last_change_by_fname():
"""
Return a dictionary of all the files under git which maps to
the datetime of their last modification in the git history.
.. note::
This function raises a ValueError if the repo root does
not have a ".git" folder. If git is not installed on the system,
or cannot be found by subprocess, an IOError may also be raised.
"""
# Check the ".git" folder exists at the repo dir.
if not os.path.isdir(os.path.join(REPO_DIR, '.git')):
raise ValueError('{} is not a git repository.'.format(REPO_DIR))
# Call "git whatchanged" to get the details of all the files and when
# they were last changed.
output = subprocess.check_output(['git', 'ls-tree', '-r',
'--name-only', 'HEAD'],
cwd=REPO_DIR)
output = output.decode().split('\n')
res = {}
for fname in output:
dt = subprocess.check_output(['git', 'log', '-1', '--pretty=%ct',
'--', fname],
cwd=REPO_DIR)
dt = datetime.fromtimestamp(int(dt))
res[fname] = dt
return res
def test_license_headers(self):
exclude_patterns = ('build/*',
'dist/*',
'docs/build/*',
'docs/source/examples/*.py',
'docs/source/sphinxext/*.py',
'lib/cartopy/examples/*.py')
try:
last_change_by_fname = self.last_change_by_fname()
except ValueError as e:
# Caught the case where this is not a git repo.
return self.skipTest('cartopy installation did not look like a '
'git repo: ' + str(e))
failed = False
for fname, last_change in sorted(last_change_by_fname.items()):
full_fname = os.path.join(REPO_DIR, fname)
root, ext = os.path.splitext(full_fname)
if ext in ('.py', '.pyx', '.c', '.cpp', '.h') and \
os.path.isfile(full_fname) and \
not any(fnmatch(fname, pat) for pat in exclude_patterns):
with open(full_fname) as fh:
years = TestLicenseHeaders.years_of_license_in_file(fh)
| if years is None:
print('The file {} has no valid header license and '
'has not been excluded from the license header '
'test.'.format(fname))
failed = True
elif last_change.year > years[1]:
print('The | file header at {} is out of date. The last'
' commit was in {}, but the copyright states it'
' was {}.'.format(fname, last_change.year,
years[1]))
failed = True
if failed:
raise ValueError('There were license header failures. See stdout.')
class TestCodeFormat(unittest.TestCase):
def test_pep8_conformance(self):
# Tests the cartopy codebase against the "pep8" tool.
#
# Users can add their own excluded files (should files exist in the
# local directory which is not in the repository) by adding a
# ".pep8_test_exclude.txt" file in the same directory as this test.
# The file should be a line separated list of filenames/directories
# as can be passed to the "pep8" tool's exclude list.
pep8style = pep8.StyleGuide(quiet=False)
pep8style.options.exclude.extend(['trace.py', '_crs.py',
'*/cartopy/geodesic/_geodesic.py'])
# Ignore E402 module level import not at top of file
pep8style.options.ignore += ('E402', )
# allow users to add their own exclude list
extra_exclude_file = os.path.join(os.path.dirname(__file__),
'.pep8_test_exclude.txt')
if os.path.exists(extra_exclude_file):
with open(extra_exclude_file, 'r') as fh:
extra_exclude = [line.strip() for line in fh if line.strip()]
pep8style.options.exclude.extend(extra_exclude)
result = pep8style.check_files([CARTOPY_DIR])
self.assertEqual(result.total_errors, 0, "Found code syntax "
"errors (and warnings).")
class TestFutureImports(unittest.TestCase):
excluded = (
'*/cartopy/examples/*.py',
'*/docs/source/examples/*.py',
'*/cartopy/_crs.p |
ITPS/oerp_gap_analysis_ITPS | utils/odf2gap/settings.py | Python | gpl-3.0 | 149 | 0 | username = 'admin' |
pwd = '123321...'
dbname = 'Prueba | '
host = '127.0.0.1'
port = '8069'
filepath = '/home/aphu/Downloads/'
filename = 'cantvnet.ods'
|
nsteinme/phy | phy/cluster/manual/view_models.py | Python | bsd-3-clause | 37,793 | 0.000397 | # -*- coding: utf-8 -*-
"""View model for clustered data."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os.path as op
import numpy as np
from six import string_types
from ...io.kwik.model import _DEFAULT_GROUPS
from ...utils.array import _unique, _spikes_in_clusters, _as_array
from ...utils.selector import Selector
from ...utils._misc import _show_shortcuts
from ...utils._types import _is_integer, _is_float
from ...utils._color import _selected_clusters_colors
from ...utils import _as_list
from ...stats.ccg import correlograms, _symmetrize_correlograms
from ...plot.ccg import CorrelogramView
from ...plot.features import FeatureView
from ...plot.waveforms import WaveformView
from ...plot.traces import TraceView
from ...gui.base import BaseViewModel, HTMLViewModel
from ...gui._utils import _read
#------------------------------------------------------------------------------
# Misc
#------------------------------------------------------------------------------
def _create_view(cls, backend=None, **kwargs):
if backend in ('pyqt4', None):
kwargs.update({'always_on_top': True})
return cls(**kwargs)
def _oddify(x):
return x if x % 2 == 1 else x + 1
#------------------------------------------------------------------------------
# Base view models
#------------------------------------------------------------------------------
class BaseClusterViewModel(BaseViewModel):
"""Interface between a view and a model."""
_view_class = None
def __init__(self, model=None,
store=None, wizard=None,
cluster_ids=None, **kwargs):
assert store is not None
self._store = store
self._wizard = wizard
super(BaseClusterViewModel, self).__init__(model=model,
**kwargs)
self._cluster_ids = None
if cluster_ids is not None:
self.select(_as_list(cluster_ids))
@property
def store(self):
"""The cluster store."""
return self._store
@property
def wizard(self):
"""The wizard."""
return self._wizard
@property
def cluster_ids(self):
"""Selected clusters."""
return self._cluster_ids
@property
def n_clusters(self):
"""Number of selected clusters."""
return len(self._cluster_ids)
# Public methods
#--------------------------------------------------------------------------
def select(self, cluster_ids, **kwargs):
"""Select a list of clusters."""
cluster_ids = _as_list(cluster_ids)
self._cluster_ids = cluster_ids
self.on_select(cluster_ids, **kwargs)
# Callback methods
#--------------------------------------------------------------------------
def on_select(self, cluster_ids, **kwargs):
"""Update the view after a new selection has been made.
Must be overriden."""
def on_cluster(self, up):
"""Called when a clustering action occurs.
May be overriden."""
def _css_cluster_colors():
colors = _selected_clusters_colors()
# HACK: this is the maximum number of clusters that can be displayed
# in an HTML view. If this number is exceeded, cluster colors will be
# wrong for the extra clusters.
n = 32
def _color(i):
i = i % len(colors)
c = colors[i]
c = (255 * c).astype(np.int32)
return 'rgb({}, {}, {})'.format(*c)
return ''.join(""".cluster_{i} {{
color: {color};
}}\n""".format(i=i, color=_color(i))
for i in range(n))
class HTMLClusterViewModel(BaseClusterViewModel, HTMLViewModel):
"""HTML view model that displays per-cluster information."""
def get_css(self, **kwargs):
# TODO: improve this
# Currently, child classes *must* append some CSS to this parent's
# method.
return _css_cluster_colors()
def on_select(self, cluster_ids, **kwargs):
"""Update the view after a new selection has been made."""
self.update(cluster_ids=cluster_ids)
def on_cluster(self, up):
"""Update the view after a clustering action."""
self.update(cluster_ids=self._cluster_ids, up=up)
class VispyViewModel(BaseClusterViewModel):
"""Create a VisPy view from a model.
This object uses an internal `Selector` instance to manage spike and
cluster selection.
"""
_imported_params = ('n_spikes_max', 'excerpt_size')
keyboard_shortcuts = {}
scale_factor = 1.
def __init__(self, **kwargs):
super(VispyViewModel, self).__init__(**kwargs)
# Call on_close() when the view is closed.
@self._view.connect
def on_close(e):
self.on_close()
def _create_view(self, **kwargs):
n_spikes_max = kwargs.get('n_spikes_max', None)
excerpt_size = kwargs.get('excerpt_size', None)
backend = kwargs.get('backend', None)
position = kwargs.get('position', None)
size = kwargs.get('size', None)
# Create the spike/cluster selector.
self._selector = Selector(self._model.spike_clusters,
n_spikes_max=n_spikes_max,
excerpt_size=excerpt_size,
)
# Create the VisPy canvas.
view = _create_view(self._view_class,
backend=backend,
position=position or (200, 200),
size=size or (600, 600),
)
view.connect(self.on_key_press)
return view
@property
def selector(self):
"""A Selector instance managing the selected spikes and clusters."""
return self._selector
@property
def cluster_ids(self):
"""Selected clusters."""
return self._selector.selected_clusters
@property
def spike_ids(self):
"""Selected spikes."""
return self._selector.selected_spikes
@property
def n_spikes(self):
"""Number of selected spikes."""
return self._selector.n_spikes
def update_spike_clusters(self, spikes=None, spike_clusters=None):
"""Update the spike clusters and cluster colors."""
if spikes is None:
spikes = self.spike_ids
if spike_clusters is None:
spike_clusters = self.model.spike_clusters[spikes]
n_clusters = len(_unique(spike_clusters))
visual = self._view.visual
# This updates the list of unique clusters in the view.
visual.spike_clusters = spike_clusters
visual.cluster_colors = _selected_clusters_colors(n_clusters)
def select(self, cluster_ids, **kwargs):
"""Select a set of clusters."""
self._selector.selected_clusters = cluster_ids
self.on_select(cluster_ids, **kwargs)
def on_select(self, cluster_ids, **kwargs):
"""Update the view after a new selection has been made.
Must be overriden.
"""
self.update_spike_clusters()
self._view.update()
def on_close(self):
"""Clear the view when the model is closed."""
self._view.visual.spike_clusters = []
self._view.update()
def on_key_press(self, event):
"""Called when a key is pressed."""
if event.key == 'h' and 'control' not in event.modifiers:
shortcuts = self._view.keyboard_shortcuts
shortcuts.update(self.keyboard_shortcuts)
_show_shortcuts(shortcuts, name=self.name)
def update(self):
"""Update the view."""
self.view.update()
#------ | ------------------------------------------------------------------------
# Stats panel
#------------------------------------------------------------------------------
class StatsViewModel(HTMLClusterViewModel):
"""Display cluster statistics."""
def get_html(self, cluster_ids=None, up=None):
| """Return the HTML table with the cluster |
Distrotech/reportlab | tools/pythonpoint/styles/htu.py | Python | bsd-3-clause | 5,206 | 0.006531 | from reportlab.lib import styles
from reportlab.lib import colors
from reportlab.lib.units import cm
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY
from reportlab.platypus import Preformatted, Paragraph, Frame, \
Image, Table, TableStyle, Spacer
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
def getParagraphStyles():
"""Returns a dictionary of styles to get you started.
We will provide a way to specify a module of these. Note that
this just includes TableStyles as well as ParagraphStyles for any
tables you wish to use.
"""
pdfmetrics.registerFont(TTFont('Verdana','verdana.ttf'))
pdfmetrics.registerFont(TTFont('Verdana-Bold','verdanab.ttf'))
pdfmetrics.registerFont(TTFont('Verdana-Italic','verdanai.ttf'))
pdfmetrics.registerFont(TTFont('Verdana-BoldItalic','verdanaz.ttf'))
pdfmetrics.registerFont(TTFont('Arial Narrow','arialn.ttf'))
pdfmetrics.registerFont(TTFont('Arial Narrow-Bold','arialnb.ttf'))
pdfmetrics.registerFont(TTFont('Arial Narrow-Italic','arialni.ttf'))
pdfmetrics.registerFont(TTFont('Arial Narrow-BoldItalic','arialnbi.ttf'))
stylesheet = {}
ParagraphStyle = styles.ParagraphStyle
para = ParagraphStyle('Normal', None) #the ancestor of all
para.fontName = 'Verdana'
para.fontSize = 28
para.leading = 32
para.spaceAfter = 6
stylesheet['Normal'] = para
#This one is spaced out a bit...
para = ParagraphStyle('BodyText', stylesheet['Normal'])
para.spaceBefore = 12
stylesheet['BodyText'] = para
#Indented, for lists
para = ParagraphStyle('Indent', stylesheet['Normal'])
para.leftIndent = 60
para.firstLineIndent = 0
stylesheet['Indent'] = para
para = ParagraphStyle('Centered', stylesheet['Normal'])
para.alignment = TA_CENTER
stylesheet['Centered'] = para
para = ParagraphStyle('BigCentered', stylesheet['Normal'])
para.fontSize = 32
para.alignment = TA_CENTER
para.spaceBefore = 12
para.spaceAfter = 12
stylesheet['BigCentered'] = para
para = ParagraphStyle('Italic', stylesheet['BodyText'])
para.fontName = 'Verdana-Italic'
stylesheet['Italic'] = para
para = ParagraphStyle('Title', stylesheet['Normal'])
para.fontName = 'Arial Narrow-Bold'
para.fontSize = 48
para.leading = 58
para.alignment = TA_CENTER
stylesheet['Title'] = para
para = ParagraphStyle('Heading1', stylesheet['Normal'])
para.fontName = 'Arial Narrow-Bold'
para.fontSize = 40
para.leading = 44
para.alignment = TA_CENTER
stylesheet['Heading1'] = para
para = ParagraphStyle('Heading2', stylesheet['Normal'])
para.fontName = 'Verdana'
para.fontSize = 32
para.leading = 36
para.spaceBefore = 32
para.spaceAfter = 12
stylesheet['Heading2'] = para
para = ParagraphStyle('Heading3', stylesheet['Normal'])
para.fontName = 'Verdana'
para.spaceBefore = 20
para.spaceAfter = 6
stylesheet['Heading3'] = para
para = ParagraphStyle('Heading4', stylesheet['Normal'])
para.fontName = 'Verdana-BoldItalic'
para.spaceBefore = 6
stylesheet['Heading4'] = para
para = ParagraphStyle('Bullet', stylesheet['Normal'])
para.firstLineIndent = 0
para.leftIndent = 56
para.spaceBefore = 6
para.bulletFontName = 'Symbol'
para.bulletFontSize = 24
para.bulletIndent = 20
stylesheet['Bullet'] = para
para = ParagraphStyle('Bullet2', stylesheet['Normal'])
para.firstLineIndent = 0
para.leftIndent = 80
para.spaceBefore = 6
para.fontSize = 24
para.bulletFontName = 'Symbol'
para.bulletFontSize = 20
para.bulletIndent = 60
stylesheet['Bullet2'] = para
para = ParagraphStyle('Definition', stylesheet['Normal'])
#use this for definition lists
para.firstLineIndent = 0
para.leftIndent = 60
para.bulletIndent = 0
para.bulletFontName = 'Verdana-BoldItalic'
para.bulletFontSize = 24
stylesheet['Definition'] = para
para = ParagraphStyle('Code', stylesheet['Normal'])
para.fontName = 'Courier'
para.fontSize = 16
para.leading = 18
para.leftIndent = 36
stylesheet['Code'] = para
para = ParagraphStyle('PythonCode', stylesheet['Normal'])
para.fontName = 'Courier'
para.fontSize = 16
para.leading = 18
para.leftIndent = 36
stylesheet['Code'] = para
para = ParagraphStyle('Small', stylesheet['Normal'])
para.fontSize = 12
para.leading = 14
stylesheet['Small'] = para
#now for a table
ts = TableStyle([
('FONT', (0,0), (-1,-1), 'Arial Narrow', 22),
('LINEABOVE', (0,1), (-1,1), 2, colors.green),
('LINEABOVE', (0,2), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 2, colors.green),
('LINEBEFORE', (0,1), (-1,-1), 2, colors.black),
('LINEAFTER', (0,1), (-1,-1), 2, colors.black),
('ALIGN', (4,1), (-1,-1), 'RIGHT'), #all numeric cells right aligned
('TEXTCOLOR', (0,2), (0,-1), | colors.black),
('BACKGROUND', (0,1), (-1,1), colors.Color(0,0.7,0.7))
])
stylesheet['table1'] = ts
return style | sheet
|
smclt30p/PCS | core/plugins/PluginLoader.py | Python | bsd-2-clause | 1,237 | 0.001617 | import importlib
import os
from PyQt5.QtCore import QSettings
class Continue(BaseException):
pass
class PluginLoader:
loadedPlugins = []
loaded = False
settings = QSettings("plugins.ini", QSettings.IniFormat)
@staticmethod
def getLoadedPlugins():
"""
This returns instances for all PluginImpl's from
core.plugins.load
:return: [] of plugins
"""
if not PluginLoader.loaded:
for plugin in os.listdir("plugins/."):
if not plugin.endswith("_plugin"):
continue
mod = importlib.import_module("plugi | ns." + plugin + ".PluginImpl")
if hasattr(mod, "PluginImpl"):
instance = getattr(mod, "PluginImpl")()
instance.nativeName = plugin
instance.settings = PluginLoader.settings
PluginLoader.loadedPlugins.append(instance)
PluginLoader.loaded = True
return PluginLoader.loadedPlugins
@classmethod
def reloadPlugins(cls):
print("Reloading plugins...")
Plu | ginLoader.loadedPlugins = []
PluginLoader.loaded = False
PluginLoader.getLoadedPlugins() |
pacbard/clsipy | wsgi/clsi.py | Python | mit | 4,642 | 0.007755 | import base64
from os.path import abspath, dirname, join
import sys, os
import uuid
import shutil
import untangle
import subprocess, threading
# missing module fix
sys.path.append(os.path.join(os.environ['OPENSHIFT_REPO_DIR'], 'wsgi'))
virtenv = os.environ['APPDIR']+'/virtenv'
os.environ['PYTHON_EGG_CACHE'] = os.path.join(virtenv, 'lib/python2.6/site-packages')
virtualenv = os.path.join(virtenv, 'bin/activate_this.py')
try:
execfile(virtualenv, dict(__file__=virtualenv))
except:
pass
# env variables
TMP = os.environ['CLSI_TMP']
PUBLIC = os.environ['CLSI_PUBLIC']
DATA = os.environ['CLSI_DATA']
BIN = os.environ['CLSI_BIN']
class Command(object):
# From:
# http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
def __init__(self, cmd):
self.cmd = cmd
self.process = None
def run(self, timeout):
def target():
print 'Thread started'
self.process = subprocess.Popen(self.cmd, shell=True
, stdout=subprocess.PIPE
, stderr=subprocess.STDOUT
)
print 'Thread finished'
out, err = self.process.communicate()
print(out)
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print 'Terminating process'
self.process.terminate()
thread.join()
class clsi:
def __init__(self):
self.id = str(uuid.uuid4())
self.tmp = TMP + self.id + "/"
self.public = PUBLIC + self.id + "/"
self.format = "pdf"
self.compiler = "pdflatex"
def parse(self, data):
req = untangle.parse(data)
# Check token
self._check_token(req.compile.token.cdata)
if req.compile.options.output_format:
self.format = req.compile.options.output_format.cdata
if req.compile.options.compiler:
self.compiler = req.compile.options.compiler.cdata
root = req.compile.resources['root-resource-path']
to_compile = self.tmp + root
# Writes the files to disk
for file in req.compile.resources.resource:
self._ensure_dir(self.tmp+file['path'])
f = open(self.tmp+file['path'], 'w')
if file['encoding'] == 'base64':
f.write(base64.b64decode(file.cdata))
elif file['encoding'] == 'utf-8':
data = file.cdata
f.write(data.encode('utf-8'))
else:
print 'Error in file encoding'
return(to_compile)
def run(self, file):
dir = os.path.dirname(file)+"/"
if self.compiler == "arara":
cmd = Command("PATH=${PATH}:"+ BIN +"; cd "+ dir +"; arara "+ file)
else:
cmd = Command("PATH=${PATH}:"+ BIN +"; cd "+ dir +"; "+ self.compiler +" -output-directory="+ dir +" "+ file)
cmd.run(timeout = 30)
log, out = self._move_results(file)
self._rm_tmp()
return [log, out]
def _move_results(self, file):
dir = os.path.dirname(file)+'/'
base = os.path.basename(file)
name = os.path.splitext(base)[0]
out = dir+name+'.'+self.format
log = dir+name+'.log'
if not os.path.exists(self.public):
os.makedirs(self.public)
if os.path.exists(out):
shutil.move(out, self.public + name + '.' + self.format)
if os.path.exists(log):
shutil.move(log, self.public + name +'.log')
if os.path.exists(self.public + name + '.' + self.format):
return([self.id+'/'+name+'.log', self.id+'/'+name+'.'+self.format])
else:
return([self.id+'/'+name+'.log', None])
def _rm_tmp(self):
if os.path.isdir(self.tmp):
shutil.rmtree(self.tmp)
def _ensure_dir(self, f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def _check_token(self, token):
# Set token value using rhc cli
# r | hc env-set CLSI_TOKEN=your_token --app your_app
# clsi_token is the fallback token in calse no token is defined
if token == os.getenv('CLSI_TOKEN', 'clsi_token'):
return True
else:
print("User "+ token +" not found in database")
re | turn sys.exit()
|
thomasstreet/ballast | ballast/core.py | Python | apache-2.0 | 4,607 | 0.000651 | import logging
import threading
import time
from ballast.discovery import ServerList
from ballast.rule import Rule, RoundRobinRule
from ballast.ping import (
Ping,
SocketPing,
PingStrategy,
SerialPingStrategy
)
class LoadBalancer(object):
DEFAULT_PING_INTERVAL = 30
MAX_PING_TIME = 3
def __init__(self, server_list, rule=None, ping_strategy=None, ping=None, ping_on_start=True):
assert isinstance(server_list, ServerList)
assert rule is None or isinstance(rule, Rule)
assert ping_strategy is None or isinstance(ping_strategy, PingStrategy)
assert ping is None or isinstance(ping, Ping)
# some locks for thread-safety
self._lock = threading.Lock()
self._server_lock = threading.Lock()
self._rule = rule \
if rule is not None \
else RoundRobinRule()
self._ping_strategy = ping_strategy \
if ping_strategy is not None \
else SerialPingStrategy()
self._ping = ping \
if ping is not None \
else SocketPing()
self.max_ping_time = self.MAX_PING_TIME
self._ | ping_interval = self.DEFAULT_PING_INTERVAL
self._server_list = server_list
self._servers = set()
| self._stats = LoadBalancerStats()
self._rule.load_balancer = self
self._logger = logging.getLogger(self.__module__)
# start our background worker
# to periodically ping our servers
self._ping_timer_running = False
self._ping_timer = None
if ping_on_start:
self._start_ping_timer()
@property
def ping_interval(self):
return self._ping_interval
@ping_interval.setter
def ping_interval(self, value):
self._ping_interval = value
if self._ping_timer_running:
self._stop_ping_timer()
self._start_ping_timer()
@property
def max_ping_time(self):
if self._ping is None:
return 0
return self._ping.max_ping_time
@max_ping_time.setter
def max_ping_time(self, value):
if self._ping is not None:
self._ping.max_ping_time = value
@property
def stats(self):
return self._stats
@property
def servers(self):
with self._server_lock:
return set(self._servers)
@property
def reachable_servers(self):
with self._server_lock:
servers = set()
for s in self._servers:
if s.is_alive:
servers.add(s)
return servers
def choose_server(self):
# choose a server, will
# throw if there are none
server = self._rule.choose()
return server
def mark_server_down(self, server):
self._logger.debug("Marking server down: %s", server)
server._is_alive = False
def ping(self, server=None):
if server is None:
self._ping_all_servers()
else:
is_alive = self._ping.is_alive(server)
server._is_alive = is_alive
def ping_async(self, server=None):
if server is None:
# self._ping_all_servers()
t = threading.Thread(name='ballast-worker', target=self._ping_all_servers)
t.daemon = True
t.start()
else:
is_alive = self._ping.is_alive(server)
server._is_alive = is_alive
def _ping_all_servers(self):
with self._server_lock:
results = self._ping_strategy.ping(
self._ping,
self._server_list
)
self._servers = set(results)
def _start_ping_timer(self):
with self._lock:
if self._ping_timer_running:
self._logger.debug("Background pinger already running")
return
self._ping_timer_running = True
self._ping_timer = threading.Thread(name='ballast-worker', target=self._ping_loop)
self._ping_timer.daemon = True
self._ping_timer.start()
def _stop_ping_timer(self):
with self._lock:
self._ping_timer_running = False
self._ping_timer = None
def _ping_loop(self):
while self._ping_timer_running:
try:
self._ping_all_servers()
except BaseException as e:
self._logger.error("There was an error pinging servers: %s", e)
time.sleep(self._ping_interval)
class LoadBalancerStats(object):
def get_server_stats(self, server):
pass
|
afronski/grammar-generator | grammar-generator/Elements/G/Populators/AllParametersListWithOneNamePopulator.py | Python | mit | 1,335 | 0.025468 | from Elements.G.Populators.Base.IElementPopulator import IElementPopulator
class AllParametersListWithOneNamePopulator(IElementPopulator):
def __init__(self, sectionName, templates, | settingsObject):
super(AllParametersListWithOneNamePopulator, self).__init__(sectionName, templates, settingsObject)
self | .separator = "\n|\n"
self.mainName = self.templateName.split('-')[1]
def getType(self):
return "CreateAllParametersListWithOneName"
def openClause(self, name):
return self.parameters["ModulePrefix"] + "_" + name.lower() + ":\n"
def closeClause(self):
return "\n;\n"
def populate(self):
constructedParameters = []
result = self.openClause(self.mainName)
for parameterListName, parameterList in self.parameters.iteritems():
if parameterListName.find("Parameters") != -1:
filledTemplatesList = []
for parameterName, parameters in parameterList.iteritems():
self.handleNotImplementedTemplates([ self.passedNotImplementedTemplateName ], parameters)
filledTemplatesList.append(self.templateResolver.fill(self.expandParameters(parameters)))
self.restoreDefault()
if len(filledTemplatesList) > 0:
constructedParameters.append(self.separator.join(filledTemplatesList))
result += self.separator.join(constructedParameters)
result += self.closeClause()
return result
|
Spiderlover/Toontown | toontown/dna/DNAFlatBuilding.py | Python | mit | 5,943 | 0.000505 | from panda3d.core import NodePath, DecalEffect
import DNANode
import DNAWall
import random
class DNAFlatBuilding(DNANode.DNANode):
COMPONENT_CODE = 9
currentWallHeight = 0
def __init__(self, name):
DNANode.DNANode.__init__(self, name)
self.width = 0
self.hasDoor = False
def setWidth(self, width):
self.width = width
def getWidth(self):
return self.width
def setCurrentWallHeight(self, currentWallHeight):
DNAFlatBuilding.currentWallHeight = currentWallHeight
def getCurrentWallHeight(self):
return DNAFlatBuilding.currentWallHeight
def setHasDoor(self, hasDoor):
self.hasDoor = hasDoor
def getHasDoor(self):
return self.hasDoor
def makeFromDGI(self, dgi):
DNANode.DNANode.makeFromDGI(self, dgi)
self.width = dgi.getInt16() / 100.0
self.hasDoor = dgi.getBool()
def setupSuitFlatBuilding(self, nodePath, dnaStorage):
name = self.getName()
if name[:2] != 'tb':
return
name = 'sb' + name[2:]
node = nodePath.attachNewNode(name)
node.setPosHpr(self.getPos(), self.getHpr())
numCodes = dnaStorage.getNumCatalogCodes('suit_wall')
if numCodes < 1:
return
code = dnaStorage.getCatalogCode(
'suit_wall', random.randint(0, numCodes - 1))
wallNode = dnaStorage.findNode(code)
if not wallNode:
return
wallNode = wallNode.copyTo(node, 0)
wallScale = wallNode.getScale()
wallScale.setX(self.width)
wallScale.setZ(DNAFlatBuilding.currentWallHeight)
wallNode.setScale(wallScale)
if self.getHasDoor():
wallNodePath = node.find('wall_*')
doorNode = dnaStorage.findNode('suit_door')
doorNode = doorNode.copyTo(wallNodePath, 0)
doorNode.setScale(NodePath(), (1, 1, 1))
doorNode.setPosHpr(0.5, 0, 0, 0, 0, 0)
wallNodePath.setEffect(DecalEffect.make())
node.flattenMedium()
node.stash()
def setupCogdoFlatBuilding(self, nodePath, dnaStorage):
name = self.getName()
if name[:2] != 'tb':
return
name = 'cb' + name[2:]
node = nodePath.attachNewNode(name)
node.setPosHpr(self.getPos(), self.getHpr())
numCodes = dnaStorage.getNumCatalogCodes('cogdo_wall')
if numCodes < 1:
return
code = dnaStorage.getCatalogCode(
'cogdo_wall', random.randint(0, numCodes - 1))
wallNode = dnaStorage.findNode(code)
if not wallNode:
return
wallNode = wallNode.copyTo(node, 0)
wallScale = wallNode.getScale()
wallScale.setX(self.width)
wallScale.setZ(DNAFlatBuilding.currentWallHeight)
wallNode.setScale(wallScale)
if self.getHasDoor():
wallNodePath = node | .find('wall_*')
doorNode = dnaStorage.findNode('suit_door')
doorNode = doorNode.copyTo(wallNodePath, 0)
doorNode.se | tScale(NodePath(), (1, 1, 1))
doorNode.setPosHpr(0.5, 0, 0, 0, 0, 0)
wallNodePath.setEffect(DecalEffect.make())
node.flattenMedium()
node.stash()
def traverse(self, nodePath, dnaStorage):
DNAFlatBuilding.currentWallHeight = 0
node = nodePath.attachNewNode(self.getName())
internalNode = node.attachNewNode(self.getName() + '-internal')
scale = self.getScale()
scale.setX(self.width)
internalNode.setScale(scale)
node.setPosHpr(self.getPos(), self.getHpr())
for child in self.children:
if isinstance(child, DNAWall.DNAWall):
child.traverse(internalNode, dnaStorage)
else:
child.traverse(node, dnaStorage)
if DNAFlatBuilding.currentWallHeight == 0:
print 'empty flat building with no walls'
else:
cameraBarrier = dnaStorage.findNode('wall_camera_barrier')
if cameraBarrier is None:
raise DNAError.DNAError('DNAFlatBuilding requires that there is a wall_camera_barrier in storage')
cameraBarrier = cameraBarrier.copyTo(internalNode, 0)
cameraBarrier.setScale((1, 1, DNAFlatBuilding.currentWallHeight))
internalNode.flattenStrong()
collisionNode = node.find('**/door_*/+CollisionNode')
if not collisionNode.isEmpty():
collisionNode.setName('KnockKnockDoorSphere_' + dnaStorage.getBlock(self.getName()))
cameraBarrier.wrtReparentTo(nodePath, 0)
wallCollection = internalNode.findAllMatches('wall*')
wallHolder = node.attachNewNode('wall_holder')
wallDecal = node.attachNewNode('wall_decal')
windowCollection = internalNode.findAllMatches('**/window*')
doorCollection = internalNode.findAllMatches('**/door*')
corniceCollection = internalNode.findAllMatches('**/cornice*_d')
wallCollection.reparentTo(wallHolder)
windowCollection.reparentTo(wallDecal)
doorCollection.reparentTo(wallDecal)
corniceCollection.reparentTo(wallDecal)
for i in xrange(wallHolder.getNumChildren()):
iNode = wallHolder.getChild(i)
iNode.clearTag('DNACode')
iNode.clearTag('DNARoot')
wallHolder.flattenStrong()
wallDecal.flattenStrong()
holderChild0 = wallHolder.getChild(0)
wallDecal.getChildren().reparentTo(holderChild0)
holderChild0.reparentTo(internalNode)
holderChild0.setEffect(DecalEffect.make())
wallHolder.removeNode()
wallDecal.removeNode()
self.setupSuitFlatBuilding(nodePath, dnaStorage)
self.setupCogdoFlatBuilding(nodePath, dnaStorage)
node.flattenStrong()
|
dayatz/taiga-back | tests/integration/test_occ.py | Python | agpl-3.0 | 15,300 | 0.00183 | # -*- coding: utf-8 -*- |
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barrag | án <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from unittest.mock import patch
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from .. import factories as f
pytestmark = pytest.mark.django_db
def test_valid_us_creation(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory.create(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("userstories-list")
data = {
'project': project.id,
'subject': 'test',
}
response = client.post(url, json.dumps(data), content_type="application/json")
assert response.status_code == 201
def test_invalid_concurrent_save_for_issue(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory.create(project=project, user=user, is_admin=True)
client.login(user)
mock_path = "taiga.projects.issues.api.IssueViewSet.pre_conditions_on_save"
with patch(mock_path):
url = reverse("issues-list")
data = {"subject": "test",
"project": project.id,
"status": f.IssueStatusFactory.create(project=project).id,
"severity": f.SeverityFactory.create(project=project).id,
"type": f.IssueTypeFactory.create(project=project).id,
"priority": f.PriorityFactory.create(project=project).id}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201, response.content
issue_id = response.data["id"]
url = reverse("issues-detail", args=(issue_id,))
data = {"version": 1, "subject": "test 1"}
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 200
data = {"version": 1, "subject": "test 2"}
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 400
def test_valid_concurrent_save_for_issue_different_versions(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory.create(project=project, user=user, is_admin=True)
client.login(user)
mock_path = "taiga.projects.issues.api.IssueViewSet.pre_conditions_on_save"
with patch(mock_path):
url = reverse("issues-list")
data = {"subject": "test",
"project": project.id,
"status": f.IssueStatusFactory.create(project=project).id,
"severity": f.SeverityFactory.create(project=project).id,
"type": f.IssueTypeFactory.create(project=project).id,
"priority": f.PriorityFactory.create(project=project).id}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201, response.content
issue_id = response.data["id"]
url = reverse("issues-detail", args=(issue_id,))
data = {"version": 1, "subject": "test 1"}
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 200
data = {"version": 2, "subject": "test 2"}
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 200
def test_valid_concurrent_save_for_issue_different_fields(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory.create(project=project, user=user, is_admin=True)
client.login(user)
mock_path = "taiga.projects.issues.api.IssueViewSet.pre_conditions_on_save"
with patch(mock_path):
url = reverse("issues-list")
data = {"subject": "test",
"project": project.id,
"status": f.IssueStatusFactory.create(project=project).id,
"severity": f.SeverityFactory.create(project=project).id,
"type": f.IssueTypeFactory.create(project=project).id,
"priority": f.PriorityFactory.create(project=project).id}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201, response.content
issue_id = response.data["id"]
url = reverse("issues-detail", args=(issue_id,))
data = {"version": 1, "subject": "test 1"}
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 200
data = {"version": 1, "description": "test 2"}
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 200
def test_invalid_concurrent_save_for_wiki_page(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory.create(project=project, user=user, is_admin=True)
client.login(user)
mock_path = "taiga.projects.wiki.api.WikiViewSet.pre_conditions_on_save"
with patch(mock_path):
url = reverse("wiki-list")
data = {"project": project.id, "slug": "test"}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201, response.content
wiki_id = response.data["id"]
url = reverse("wiki-detail", args=(wiki_id,))
data = {"version": 1, "content": "test 1"}
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 200
data = {"version": 1, "content": "test 2"}
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 400
def test_valid_concurrent_save_for_wiki_page_different_versions(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory.create(project=project, user=user, is_admin=True)
client.login(user)
mock_path = "taiga.projects.wiki.api.WikiViewSet.pre_conditions_on_save"
with patch(mock_path):
url = reverse("wiki-list")
data = {"project": project.id, "slug": "test"}
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201, response.content
wiki_id = response.data["id"]
url = reverse("wiki-detail", args=(wiki_id,))
data = {"version": 1, "content": "test 1"}
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 200
data = {"version": 2, "content": "test 2"}
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 200
def test_invalid_concurrent_save_for_us(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory.create(project=project, user=user, is_admin=True)
f.UserStoryFactory.create(version=10, project=project)
client.login(user)
mock_path = "taiga.projects.userstories.api.UserStoryViewSet.pre_conditions_on_save"
with patch(mock_path):
url = reverse("userstories-list")
data = {"subject": "test",
"project": project.id,
|
LethusTI/supportcenter | vendor/django/tests/regressiontests/localflavor/id/tests.py | Python | gpl-3.0 | 7,211 | 0.000277 | import warnings
from django.contrib.localflavor.id.forms import (IDPhoneNumberField,
IDPostCodeField, IDNationalIdentityNumberField, IDLicensePlateField,
IDProvinceSelect, IDLicensePlatePrefixSelect)
from django.test import SimpleTestCase
class IDLocalFlavorTests(SimpleTestCase):
def setUp(self):
self.save_warnings_state()
warnings.filterwarnings(
"ignore",
category=Runtime | Warning,
| module='django.contrib.localflavor.id.id_choices'
)
def tearDown(self):
self.restore_warnings_state()
def test_IDProvinceSelect(self):
f = IDProvinceSelect()
out = u'''<select name="provinces">
<option value="ACE">Aceh</option>
<option value="BLI">Bali</option>
<option value="BTN">Banten</option>
<option value="BKL">Bengkulu</option>
<option value="DIY">Yogyakarta</option>
<option value="JKT">Jakarta</option>
<option value="GOR">Gorontalo</option>
<option value="JMB">Jambi</option>
<option value="JBR">Jawa Barat</option>
<option value="JTG">Jawa Tengah</option>
<option value="JTM">Jawa Timur</option>
<option value="KBR">Kalimantan Barat</option>
<option value="KSL">Kalimantan Selatan</option>
<option value="KTG">Kalimantan Tengah</option>
<option value="KTM">Kalimantan Timur</option>
<option value="BBL">Kepulauan Bangka-Belitung</option>
<option value="KRI">Kepulauan Riau</option>
<option value="LPG" selected="selected">Lampung</option>
<option value="MLK">Maluku</option>
<option value="MUT">Maluku Utara</option>
<option value="NTB">Nusa Tenggara Barat</option>
<option value="NTT">Nusa Tenggara Timur</option>
<option value="PPA">Papua</option>
<option value="PPB">Papua Barat</option>
<option value="RIU">Riau</option>
<option value="SLB">Sulawesi Barat</option>
<option value="SLS">Sulawesi Selatan</option>
<option value="SLT">Sulawesi Tengah</option>
<option value="SLR">Sulawesi Tenggara</option>
<option value="SLU">Sulawesi Utara</option>
<option value="SMB">Sumatera Barat</option>
<option value="SMS">Sumatera Selatan</option>
<option value="SMU">Sumatera Utara</option>
</select>'''
self.assertHTMLEqual(f.render('provinces', 'LPG'), out)
def test_IDLicensePlatePrefixSelect(self):
f = IDLicensePlatePrefixSelect()
out = u'''<select name="codes">
<option value="A">Banten</option>
<option value="AA">Magelang</option>
<option value="AB">Yogyakarta</option>
<option value="AD">Surakarta - Solo</option>
<option value="AE">Madiun</option>
<option value="AG">Kediri</option>
<option value="B">Jakarta</option>
<option value="BA">Sumatera Barat</option>
<option value="BB">Tapanuli</option>
<option value="BD">Bengkulu</option>
<option value="BE" selected="selected">Lampung</option>
<option value="BG">Sumatera Selatan</option>
<option value="BH">Jambi</option>
<option value="BK">Sumatera Utara</option>
<option value="BL">Nanggroe Aceh Darussalam</option>
<option value="BM">Riau</option>
<option value="BN">Kepulauan Bangka Belitung</option>
<option value="BP">Kepulauan Riau</option>
<option value="CC">Corps Consulate</option>
<option value="CD">Corps Diplomatic</option>
<option value="D">Bandung</option>
<option value="DA">Kalimantan Selatan</option>
<option value="DB">Sulawesi Utara Daratan</option>
<option value="DC">Sulawesi Barat</option>
<option value="DD">Sulawesi Selatan</option>
<option value="DE">Maluku</option>
<option value="DG">Maluku Utara</option>
<option value="DH">NTT - Timor</option>
<option value="DK">Bali</option>
<option value="DL">Sulawesi Utara Kepulauan</option>
<option value="DM">Gorontalo</option>
<option value="DN">Sulawesi Tengah</option>
<option value="DR">NTB - Lombok</option>
<option value="DS">Papua dan Papua Barat</option>
<option value="DT">Sulawesi Tenggara</option>
<option value="E">Cirebon</option>
<option value="EA">NTB - Sumbawa</option>
<option value="EB">NTT - Flores</option>
<option value="ED">NTT - Sumba</option>
<option value="F">Bogor</option>
<option value="G">Pekalongan</option>
<option value="H">Semarang</option>
<option value="K">Pati</option>
<option value="KB">Kalimantan Barat</option>
<option value="KH">Kalimantan Tengah</option>
<option value="KT">Kalimantan Timur</option>
<option value="L">Surabaya</option>
<option value="M">Madura</option>
<option value="N">Malang</option>
<option value="P">Jember</option>
<option value="R">Banyumas</option>
<option value="RI">Federal Government</option>
<option value="S">Bojonegoro</option>
<option value="T">Purwakarta</option>
<option value="W">Sidoarjo</option>
<option value="Z">Garut</option>
</select>'''
self.assertHTMLEqual(f.render('codes', 'BE'), out)
def test_IDPhoneNumberField(self):
error_invalid = [u'Enter a valid phone number']
valid = {
'0812-3456789': u'0812-3456789',
'081234567890': u'081234567890',
'021 345 6789': u'021 345 6789',
'0213456789': u'0213456789',
'+62-21-3456789': u'+62-21-3456789',
'(021) 345 6789': u'(021) 345 6789',
}
invalid = {
'0123456789': error_invalid,
'+62-021-3456789': error_invalid,
'+62-0812-3456789': error_invalid,
'0812345678901': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDPhoneNumberField, valid, invalid)
def test_IDPostCodeField(self):
error_invalid = [u'Enter a valid post code']
valid = {
'12340': u'12340',
'25412': u'25412',
' 12340 ': u'12340',
}
invalid = {
'12 3 4 0': error_invalid,
'12345': error_invalid,
'10100': error_invalid,
'123456': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDPostCodeField, valid, invalid)
def test_IDNationalIdentityNumberField(self):
error_invalid = [u'Enter a valid NIK/KTP number']
valid = {
' 12.3456.010178 3456 ': u'12.3456.010178.3456',
'1234560101783456': u'12.3456.010178.3456',
'12.3456.010101.3456': u'12.3456.010101.3456',
}
invalid = {
'12.3456.310278.3456': error_invalid,
'00.0000.010101.0000': error_invalid,
'1234567890123456': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDNationalIdentityNumberField, valid, invalid)
def test_IDLicensePlateField(self):
error_invalid = [u'Enter a valid vehicle license plate number']
valid = {
' b 1234 ab ': u'B 1234 AB',
'B 1234 ABC': u'B 1234 ABC',
'A 12': u'A 12',
'DK 12345 12': u'DK 12345 12',
'RI 10': u'RI 10',
'CD 12 12': u'CD 12 12',
}
invalid = {
'CD 10 12': error_invalid,
'CD 1234 12': error_invalid,
'RI 10 AB': error_invalid,
'B 12345 01': error_invalid,
'N 1234 12': error_invalid,
'A 12 XYZ': error_invalid,
'Q 1234 AB': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDLicensePlateField, valid, invalid)
|
nasfarley88/rpg-game-terminal | main_terminal.py | Python | mit | 6,631 | 0.004524 | import curses
import gspread
from google_credentials import username, password
from textwrap import fill
from multiprocessing import Pipe, Process, Event
class MainTerminal:
"""Class to manage the loading and refreshing of the main terminal."""
def _parse_menu(
self,
tmp_menu_list,
max_width,
):
"""Parses the given spreadsheet and output the menu in the form of a dict and a str"""
tmp_menu_dict = {}
tmp_menu_headers = tmp_menu_list.pop(0)
tmp_menu_headers.pop(0)
for i in tmp_menu_list:
tmp_menu_dict[i[0]] = {}
for j, k in enumerate(tmp_menu_headers, start=1):
tmp_menu_dict[i[0]][k] = i[j]
tmp_str = ""
tmp_str = fill(
tmp_menu_dict['description']['description'], width=max_width
) + "\n"
tmp_menu_options = [x for x in tmp_menu_dict.keys() if x.find('option_')!=-1]
# TODO get it to capture the number from the key
tmp_menu_options.sort()
for i, j in enumerate(tmp_menu_options, start=1):
tmp_str += str(i) + ") " + tmp_menu_dict[j]['description'] + "\n"
# Assign the new variables
return (tmp_menu_dict, tmp_str)
# dict_input_pipe.send(tmp_menu_dict)
# str_input_pipe.send(tmp_str)
# self.curr_menu_dict = tmp_menu_dict
# self.curr_menu_str = tmp_str
def menu_ss(
self,
ss_title_output_pipe,
wks_title_output_pipe,
curr_menu_dict_input_pipe,
curr_menu_str_input_pipe,
menu_ss_event,
menu_ss_kill_event,
max_width=79,
):
gc = gspread.login(username, password)
curr_ss_title = ""
curr_wks_title = ""
while True:
if (menu_ss_event.is_set() and
ss_title_output_pipe.poll() and
wks_title_output_pipe.poll()):
ss_title = ss_title_output_pipe.recv()
wks_title = wks_title_output_pipe.recv()
# If the spreadsheet title is different, load it up again
if ss_title != curr_ss_title:
curr_ss = gc.open(ss_title)
if wks_title != curr_wks_title:
curr_wks = curr_ss.worksheet(wks_title).get_all_values()
tmp_dict, tmp_str = self._parse_menu(curr_wks, max_width)
curr_menu_dict_input_pipe.send(tmp_dict)
curr_menu_str_input_pipe.send(tmp_str)
curr_ss_title = ss_title
curr_wks_title = wks_title
menu_ss_event.clear()
ss_title_output_pipe.close()
wks_title_output_pipe.close()
curr_menu_dict_input_pipe.close()
curr_menu_str_input_pipe.close()
def __init__(self,
main_term_h,
main_term_w,
main_term_y,
main_term_x,
main_menu_ss_title='terminal_menus',
main_menu_wks_title='basic_menu',
):
"""Init function TODO this docstring."""
print "trying to init the curses new window for MainTerminal."
self.main_term = curses.newwin(main_term_h, main_term_w,
main_term_y, main_term_x)
print "made the new MainTerminal window"
self.main_term_h = main_term_h
self.main_term_w = main_term_w
self.main_menu_ss_title = main_menu_ss_title
self.main_menu_wks_title = main_menu_wks_title
self.curr_ss_title = main_menu_ss_title
# self.curr_ss = self.gc.open(self.curr_ss_title)
self.curr_wks_title = self.main_menu_wks_title
self.curr_menu_dict = {}
self.curr_menu_str = ""
self.ss_title_output_pipe, se | lf.ss_title_input_pipe = Pipe()
self.wks_title_output_pipe, self.wks_title_in | put_pipe = Pipe()
self.curr_menu_dict_output_pipe, self.curr_menu_dict_input_pipe = Pipe()
self.curr_menu_str_output_pipe, self.curr_menu_str_input_pipe = Pipe()
self.menu_ss_event = Event()
self.menu_ss_kill_event = Event()
self.menu_ss_kill_event.clear()
# Now, I create a thread to manage all the loading of spreadsheets
self.menu_ss_process = Process(
target=self.menu_ss,
args=(
self.ss_title_output_pipe,
self.wks_title_output_pipe,
self.curr_menu_dict_input_pipe,
self.curr_menu_str_input_pipe,
self.menu_ss_event,
self.menu_ss_kill_event,
self.main_term_w-1,
)
)
self.menu_ss_process.start()
def parse_menu(
self,
wks_title=None,
ss_title=None,
max_width=None,
):
"""A wrapper for parsing the menu so I can farm it out to a process. """
# Wipe the menu so it won't accept commands anymore
self.curr_menu_dict = {}
max_y, max_x = self.main_term.getmaxyx()
self.curr_menu_str = "\n"*(max_y/2) + " "*((max_x)/2-7) + "Loading menu..."
# self.curr_menu_str = str(self.main_term.getmaxyx())
if wks_title == None:
wks_title = self.main_menu_wks_title
else:
self.curr_wks_title = wks_title
if ss_title == None:
ss_title=self.main_menu_ss_title
elif ss_title != self.curr_ss_title:
self.curr_ss_title = ss_title
else:
# You should never get here
pass
self.ss_title_input_pipe.send(ss_title)
self.wks_title_input_pipe.send(wks_title)
# When the pipes are full, tell the thread it's safe to proceed
self.menu_ss_event.set()
def redraw(self):
"""Erases and noutrefreshes terminal."""
if self.curr_menu_str_output_pipe.poll():
self.curr_menu_str = self.curr_menu_str_output_pipe.recv()
if self.curr_menu_dict_output_pipe.poll():
self.curr_menu_dict = self.curr_menu_dict_output_pipe.recv()
self.main_term.erase()
self.main_term.addstr(0, 0, self.curr_menu_str)
self.main_term.noutrefresh()
def kill_menu_ss_process():
"""Simple. Does what it says."""
# signal it's time to kill the thread
menu_ss_kill_event.set()
import time
time.sleep(10)
menu_ss_process.terminate()
menu_ss_process.join()
|
CorwinTanner/vmupdate | tests/test_pkgmgr.py | Python | mit | 1,900 | 0.004211 | from vmupdate.config import config
from vmupdate.errors import UpdateError
from vmupdate.pkgmgr import get_pkgmgrs, run_pkgmgr
from vmupdate.vm import VM
from tests.case import TestCase
from tests.constants import *
from tests.context import get_data_path, mock
from tests.mocks import get_mock_virtualizer, get_mock_ssh_client
class PkgMgrTestCase(TestCase):
@classmethod
def setUpClass(cls):
config.load(get_data_path('testconfig.yaml'))
def setUp(self):
self.mock_ssh = self.add_mock('vmupdate.channel.SSHClient', new_callable=get_mock_ssh_client)
self.mock_virt = get_mock_virtualizer()
def test_get_pkgmgrs(self):
vm = VM(self.mock_virt, 'Test Machine 1')
pkgmgrs = get_pkgmgrs(vm)
self.assertEqual(pkgmgrs, [('testpkgmgr', ['update', 'upgrade'])])
def test_run_pkgmgr(self):
vm = VM(self.mock_virt, 'Test Machine 4')
run_pkgmgr(vm, TEST_PKGMGR, config.pkgmgrs[TEST_OS][TEST_PKGMGR])
self.mock_ssh.return_value.exec_command.assert_has_calls([mock.call('testpkgmgr update'),
mock.call('testpkgmgr upgrade')])
def test_run_pkgmgr_as_elevated(self):
vm = VM(self.mock_virt, 'Test Machine 1')
run_pkgmgr(vm, TEST_PKGMGR, config.pkgmgrs[TEST_OS][TEST_PKGMGR])
self.mock_ssh.return_value.exec_command.assert_ha | s_calls([mock.call('sudo -S testpkgmgr update'),
mock.call('sudo -S testpkgmgr upgrade')])
def | test_run_pkgmgr_update_error(self):
mock_stdout = self.mock_ssh.return_value.exec_command.return_value[1]
mock_stdout.channel.recv_exit_status.return_value = -1
vm = VM(self.mock_virt, 'Test Machine 4')
self.assertRaises(UpdateError, run_pkgmgr, vm, TEST_PKGMGR, config.pkgmgrs[TEST_OS][TEST_PKGMGR])
|
HeavenMin/PlantImageRecognition | deepLearning/flowerNet.py | Python | apache-2.0 | 19,281 | 0.021023 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR : MIN
PURPOSE : the deep learning CNN model, similar as inception
VERSION : 0.1
DATE : 4.2017
"""
__author__ = 'Min'
import math
import time
import tensorflow as tf
from datetime import datetime
NUM_CLASSES = 50
slim = tf.contrib.slim
# 产生截断的正太分布
# produce a truncated normal distriubtion
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# 生成默认参数
# Generate default parameters
def flowerNet_arg_scope(weight_decay = 0.00004, stddev = 0.1,
batch_norm_var_collection = 'moving_vars'):
batch_norm_params = {
# 参数衰减系数
# parameter attenuation coefficient
'decay': 0.9997,
'epsilon': 0.001,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# auto assign default values
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer = slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d],
weights_initializer = tf.truncated_normal_initializer(stddev = stddev),
activation_fn = tf.nn.relu, #Activation function
normalizer_fn = slim.batch_norm,
normalizer_params = batch_norm_params) as scope:
return scope
# 生成网络的卷积 池化部分
# generate convolitonal layer and pooling layer in the CNN
def flowerNet_base(inputs, scope = None):
end_points = {}
with tf.variable_scope(scope, 'Inception', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride = 1, padding = 'VALID'):
# convolutional layer 3x3/2 32
net = slim.conv2d(inputs, 32, [3, 3], stride = 2, scope = 'conv')
# convolutional layer 3x3/1 32
net = slim.conv2d(net, 32, [3, 3], scope = 'conv_1')
# convolutional layer 3x3/1 64
net = slim.conv2d(net, 64, [3, 3], padding= 'SAME',
scope = 'conv_2')
# max pool layer 3x3/2
net = slim.max_pool2d(net, [3, 3], stride = 2, scope = 'pool')
# convolutional layer 1x1/1 80
net = slim.conv2d(net, 80, [1, 1], scope = 'conv_3')
# convolutional layer 3x3/1 192
net = slim.conv2d(net, 192, [3, 3], scope = 'conv_4')
# max pool layer 3,3/2
net = slim.max_pool2d(net, [3, 3], stride = 2, scope = 'pool_1')
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride = 1, padding = 'SAME'):
# mixed module 1
with tf.variable_scope('mixed'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 48, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 64, [5, 5], scope = 'conv1')
with tf.variable_scope('branch2'):
branch2 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
branch2 = slim.conv2d(branch2, 96, [3, 3], scope = 'conv1')
branch2 = slim.conv2d(branch2, 96, [3, 3], scope = 'conv2')
with tf.variable_scope('branch3'):
branch3 = slim.avg_pool2d(net, [3, 3], scope = 'avgPool')
branch3 = slim.conv2d(branch3, 32, [1, 1], scope = 'conv')
net = tf.concat([branch0, branch1, branch2, branch3], 3)
# mixed module 2
with tf.variable_scope('mixed_1'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 48, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 64, [5, 5], scope = 'conv1')
with tf.variable_scope('branch2'):
branch2 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
branch2 = slim.conv2d(branch2, 96, [3, 3], scope = 'conv1')
branch2 = slim.conv2d(branch2, 96, [3, 3], scope = 'conv2')
with tf.variable_scope('branch3'):
branch3 = slim.avg_pool2d(net, [3, 3], scope = 'avgPool')
branch3 = slim.conv2d(branch3, 64, [1, 1], scope = 'conv')
net = tf.concat([branch0, branch1, branch2, branch3], 3)
# mixed module 3
with tf.variable_scope('mixed_2'):
with tf.variable_scope('branch0'):
| branch0 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
| with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 48, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 64, [5, 5], scope = 'conv1')
with tf.variable_scope('branch2'):
branch2 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
branch2 = slim.conv2d(branch2, 96, [3, 3], scope = 'conv1')
branch2 = slim.conv2d(branch2, 96, [3, 3], scope = 'conv2')
with tf.variable_scope('branch3'):
branch3 = slim.avg_pool2d(net, [3, 3], scope = 'avgPool')
branch3 = slim.conv2d(branch3, 64, [1, 1], scope = 'conv')
net = tf.concat([branch0, branch1, branch2, branch3], 3)
# mixed module 4
with tf.variable_scope('mixed_3'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 384, [3, 3], stride = 2,
padding = 'VALID', scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 96, [3, 3], scope = 'conv1')
branch1 = slim.conv2d(branch1, 96, [3, 3], stride = 2,
padding = 'VALID', scope = 'conv2')
with tf.variable_scope('branch2'):
branch2 = slim.max_pool2d(net, [3, 3], stride = 2,
padding = 'VALID', scope = 'maxPool')
net = tf.concat([branch0, branch1, branch2], 3)
# mixed module 5
with tf.variable_scope('mixed_4'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 192, [1, 1], scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 128, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 128, [1, 7], scope = 'conv1')
branch1 = slim.conv2d(branch1, 192, [7, 1], scope = 'conv2')
with tf.variable_scope('branch2'):
branch2 = slim.conv2d(net, 128, [1, 1], scope = 'conv')
branch2 = slim.conv2d(branch2, 128, [7, 1], scope = 'conv1')
branch2 = slim.conv2d(branch2, 128, [1, 7], scope = 'conv2')
branch2 = slim.conv2d(branch2, 128, [7, 1], scope = 'conv3')
branch2 = slim.conv2d(branch2, 192, [1, 7], scope = 'conv4')
with tf.variable_scope('branch3'):
branch3 = slim.avg_pool2d(net, [3, 3], scope = 'avgPool')
branch3 = slim.conv2d(branch3, 192, [1, 1], scope = 'conv')
net = tf.concat([branch0, branch1, branch2, branch3], 3)
# mixed module 6
with tf.variable_scope('mixed_5'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 192, [1, 1], scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 160, [1 |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_network_management_client_operations.py | Python | mit | 7,052 | 0.005105 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkManagementClientOperationsMixin:
async def check_dns_name_availability(
self,
location: str,
domain_name_label: str,
**kwargs: Any
) -> "_models.DnsNameAvailabilityResult":
"""Checks whether a domain name in the cloudapp.azure.com zone is available for use.
:param location: The location of the domain name.
:type location: str
:param domain_name_label: The domain name to be verified. It must conform to the following
regular expression: ^[a-z][a-z0-9-]{1,61}[a-z0-9]$.
:type domain_name_label: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DnsNameAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.DnsNameAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DnsNameAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.check_dns_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['domainNameLabel'] = self._serialize.query("domain_name_label", domain_name_label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DnsNameAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_dns_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/CheckDnsNameAvailability'} # type: ignore
async def supported_security_providers(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs: Any
) -> "_models.VirtualWanSecurityProviders":
"""Gives the supported security providers for the virtual wan.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN for which supported security providers are
needed.
:type virtual_wan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualWanSecurityProviders, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.VirtualWanSecurityProviders
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWanSecurityProviders"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.supported_security_providers.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
| map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
| deserialized = self._deserialize('VirtualWanSecurityProviders', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
supported_security_providers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/supportedSecurityProviders'} # type: ignore
|
ipa-led/airbus_coop | airbus_docgen/src/airbus_docgen/docgen/pkg/node/__init__.py | Python | apache-2.0 | 2,723 | 0.006978 | #!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from airbus_docgen.common import html
from airbus_docgen.common.html import HtmlElement
from airbus_docgen.docgen.pkg.node.description import NodeDescription
from airbus_docgen.docgen.pkg.node.ios import NodeInputOutput
from airbus_docgen.docgen.pkg.node.params import NodeParameters
class RosNode(HtmlElement):
def __init__(self):
HtmlElement.__init__(self,
tag=html.Sections.article,
attrib={"class":"node"})
def read(self, node_name, node_xml, isection, iarticle):
item_index = 0
try:
node_desc = NodeDescription()
if node_desc.read(node_name, node_xml) is True:
item_index += 1
title = HtmlElement(html.Sections.h4)
title.text = "%i.%i.%i. Description"%(isection, iarticle, item_index)
self.append(title)
self.append(node_desc)
exce | pt Exception as ex:
html.HTMLException(ex,self)
try:
node_io = NodeInputOutput()
if node_io.read(node_name, node_xml) is True:
item_index += 1
title = HtmlElement(html.Sections.h4)
title.text = "%i.%i.%i. Input/Output"%(isection, iarticle, item_index)
self.append(title)
self.append(node_io)
except Exception as ex:
html.HTMLException(ex,s | elf)
try:
node_params = NodeParameters()
if node_params.read(node_name, node_xml) is True:
item_index += 1
title = HtmlElement(html.Sections.h4)
title.text = "%i.%i.%i. Parameter(s)"%(isection, iarticle, item_index)
self.append(title)
self.append(node_params)
except Exception as ex:
html.HTMLException(ex,self)
if item_index is 0:
return False
else:
return True
|
ViralLeadership/numpy | benchmarks/benchmarks/bench_indexing.py | Python | bsd-3-clause | 1,883 | 0 | from __future__ import absolute_import, division, print_function
from .common import Benchmark, squares_, indexes_, indexes_rand_
import sys
import six
from numpy import memmap, float32, array
import numpy as np
class Indexing(Benchmark):
params = [["indexes_", "indexes_rand_"],
['I', ':,I', 'np.ix_(I, I)'],
['', '=1']]
param_names = ['indexes', 'sel', 'op']
def setup(self, indexes, sel, op):
sel = sel.replace('I', indexes)
ns = {'squares_': squares_,
'np': np,
'indexes_': indexes_,
'indexes_rand_': indexes_rand_}
if sys.version_info[0] >= 3:
code = "def run():\n for a in squares_.values(): a[%s]%s"
else:
code = "def run():\n for a in squares_.itervalues(): a[%s]%s"
code = code % (sel, op)
six.exec_(code, ns)
self.func = ns['run']
def time_op(self, indexes, | sel, op):
self.func()
class IndexingSeparate(Benchmark):
def setup(self):
self.fp = memmap('tmp.dat', dtype=float32, mode='w+', shape=(50, 60))
self.indexes = array([3, 4, 6, 10, 20])
def time_mmap_slicing(self):
for i in range(1000):
self.fp[5:10]
def time_mmap_fancy_indexing(self):
for i in range(1000):
self.fp[self.indexes]
class IndexingStructu | red0D(Benchmark):
def setup(self):
self.dt = np.dtype([('a', 'f4', 256)])
self.A = np.zeros((), self.dt)
self.B = self.A.copy()
self.a = np.zeros(1, self.dt)[0]
self.b = self.a.copy()
def time_array_slice(self):
self.B['a'][:] = self.A['a']
def time_array_all(self):
self.B['a'] = self.A['a']
def time_scalar_slice(self):
self.b['a'][:] = self.a['a']
def time_scalar_all(self):
self.b['a'] = self.a['a']
|
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/django/utils/http.py | Python | agpl-3.0 | 10,279 | 0.000973 | from __future__ import unicode_literals
import base64
import calendar
import datetime
import re
import sys
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.functional import allow_lazy
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlencode as original_urlencode,
urlparse,
)
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = str(":/?#[]@")
RFC3986_SUBDELIMS = str("!$&'()*+,;=")
PROTOCOL_TO_PORT = {
'http': 80,
'https': 443,
}
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list, tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avo | id
# returning a long (#15067). The long type was removed in Python 3.
if six.PY2 and value > sys.maxint:
raise ValueError("Base36 input too | large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if six.PY2:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encodes a bytestring in base64 for use in URLs, stripping any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decodes a base64 encoded string, adding back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"% |
RoboCupULaval/TrackBots | tracker/proto/messages_robocup_ssl_wrapper_pb2.py | Python | mit | 2,860 | 0.008392 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: messages_robocup_ssl_wrapper.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import messages_robocup_ssl_detection_pb2
import messages_robocup_ssl_geometry_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='messages_robocup_ssl_wrapper.proto',
package='',
serialized_pb=_b('\n\"messages_robocup_ssl_wrapper.proto\x1a$messages_robocup_ssl_detection.proto\x1a#messages_robocup_ssl_geometry.proto\"`\n\x11SSL_WrapperPacket\x12&\n\tdetection\x18\x01 \x01(\x0b\x32\x13.SSL_DetectionFrame\x12#\n\x08geometry\x18\x02 \x01(\x0b\x32\x11.SSL_GeometryData')
,
dependencies=[messages_robocup_ssl_detection_pb2.DESCRIPTOR,messages_robocup_ssl_geometry_pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SSL_WRAPPERPACKET = _descriptor.Descriptor(
name='SSL_WrapperPacket',
full_name='SSL_WrapperPacket',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='detection', full_name='SSL_WrapperPacket.detection', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='geometry', full_name='SSL_WrapperPacket.geometry', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=209,
)
_SSL_WRAPPERPACKET.fields_by_name['detection'].message_type = messages_robocup_ssl_detection_pb2._SSL_DETECTIONFRAME
_SSL_WRAPPERPACKET.fields_by_n | ame['geometry'].message_type = messages_robocup_ssl_geometry_pb2._SSL_GEOMETRYDATA
DESCRIPTOR.message_t | ypes_by_name['SSL_WrapperPacket'] = _SSL_WRAPPERPACKET
SSL_WrapperPacket = _reflection.GeneratedProtocolMessageType('SSL_WrapperPacket', (_message.Message,), dict(
DESCRIPTOR = _SSL_WRAPPERPACKET,
__module__ = 'messages_robocup_ssl_wrapper_pb2'
# @@protoc_insertion_point(class_scope:SSL_WrapperPacket)
))
_sym_db.RegisterMessage(SSL_WrapperPacket)
# @@protoc_insertion_point(module_scope)
|
google-research/federated | targeted_attack/attacked_fedavg.py | Python | apache-2.0 | 24,990 | 0.005082 | # Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of the Targeted Attacks in Federated Learning.
This is intended to implement and simulate existing targeted attacks in
federated learning systems. Most of the implementations are based on
'tff.ressearch.basedline_fedavg'. Similar simulation scripts can be used by
replacing relevant functions and plugging-in correct parameters.
Based on the following papers:
Analyzing Federated Learning through an Adversarial Lens
Arjun Nitin Bhagoji, Supriyo Chakraborty, Prateek Mittal,
Seraphin Calo ICML 2019.
https://arxiv.org/abs/1811.12470
How To Back door Federated Learning
Eugene Bagdasaryan, Andreas Veit, Yiqing Hua, Deborah Estrin,
Vitaly Shmatikov
https://arxiv.org/abs/1807.00459
"""
import collections
from typing import Any, Callable, OrderedDict
import attr
import tensorflow as tf
import tensorflow_federated as tff
@attr.s(eq=False, frozen=True)
class ClientOutput(object):
"""Structure for outputs returned from clients during federated optimization.
Fields:
- `weights_delta`: A dictionary of updates to the model's trainable
variables.
- `weights_delta_weight`: Weight to be used in a weighted mean when
aggregating `weights_delta`.
- `model_output`: A structure matching
`tff.learning.Model.report_local_unfinalized_metrics`, reflecting the
results of training on the input dataset.
- `optimizer_output`: Additional metrics or other outputs defined by the
optimizer.
"""
weights_delta = attr.ib()
weights_delta_weight = attr.ib()
model_output = attr.ib()
optimizer_output = attr.ib()
@attr.s(eq=False, frozen=True)
class ServerState(object):
"""Structure for state on the server.
Fields:
- `model`: A dictionary of model's trainable variables.
- `optimizer_state`: Variables of optimizer.
"""
model = attr.ib()
optimizer_state = attr.ib()
delta_aggregate_state = attr.ib()
def _create_optimizer_vars(model, optimizer):
model_weights = _get_weights(model)
delta = tf.nest.map_structure(tf.zeros_like, model_weights.trainable)
grads_and_vars = tf.nest.map_structure(
lambda x, v: (-1.0 * x, v), tf.nest.flatten(delta),
tf.nest.flatten(model_weights.trainable))
optimizer.apply_gradients(grads_and_vars, name='server_update')
return optimizer.variables()
def _get_weights(model):
return tff.learning.framework.ModelWeights.from_model(model)
def _get_norm(weights):
"""Compute the norm of a weight matrix.
Args:
weights: a OrderedDict specifying weight matrices at different layers.
Returns:
The norm of all layer weight matrices.
"""
return tf.linalg.global_norm(tf.nest.flatten(weights))
@tf.function
def server_update(model, server_optimizer, server_optimizer_vars, server_state,
weights_delta, new_delta_aggregate_state):
"""Updates `server_state` based on `weights_delta`.
Args:
model: A `tff.learning.Model`.
server_optimizer: A `tf.keras.optimizers.Optimizer`.
server_optimizer_vars: A list of previous variables of server_optimzer.
server_state: A `ServerState`, the state to be updated.
weights_delta: An update to the trainable variables of the model.
new_delta_aggregate_state: An update to the server state.
Returns:
An updated `ServerState`.
"""
model_weights = _get_weights(model)
tf.nest.map_structure(lambda a, b: a.assign(b),
(model_weights, server_optimizer_vars),
(server_state.model, server_state.optimizer_state))
grads_and_vars = tf.nest.map_structure(
lambda x, v: (-1.0 * x, v), tf.nest.flatten(weights_delta),
tf.nest.flatten(model_weights.trainable))
server_optimizer.apply_gradients(grads_and_vars, name='server_update')
return tff.structure.update_struct(
server_state,
model=model_weights,
optimizer_state=server_optimizer_vars,
delta_aggregate_state=new_delta_aggregate_state)
class ClientExplicitBoosting:
"""Client tensorflow logic for explicit boosting."""
def __init__(self, boost_factor):
"""Specify the boosting parameter.
Args:
boost_factor: A 'tf.float32' specifying how malicious update is boosted.
"""
self.boost_factor = boost_factor
@tf.function
def __call__(self, model, optimizer, benign_dataset, malicious_dataset,
client_type, initial_weights):
"""Updates client model with client potentially being malicious.
Args:
model: A `tff.learning.Model`.
optimizer: A 'tf.keras.optimizers.Optimizer'.
benign_dataset: A 'tf.data.Dataset' consisting of benign dataset.
malicious_dataset: A 'tf.data.Dataset' consisting of malicious dataset.
client_type: A 'tf.bool' indicating whether the client is malicious; iff
`True` the client will construct its update using `malicious_dataset`,
otherwise will construct the update using `benign_dataset`.
initial_weights: A `tff.learning.Model.weights` from server.
Returns:
A 'ClientOutput`.
"""
model_weights = _get_weights(model)
@tf.function
def reduce_fn(num_examples_sum, batch):
"""Runs `tff.learning.Model.train_on_batch` on local client batch."""
with tf.GradientTape() as tape:
output = model.forward_pass(batch)
gradients = tape.gradient(output.loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return num_examples_sum + tf.shape(output.predictions)[0]
@tf.function
def compute_benign_update():
"""compute benign update sent back to the server."""
tf.nest.map_structure(lambda a, b: a.assign(b), model_weights,
initial_weights)
num_examples_sum = benign_dataset.reduce(
initial_state=tf.constant(0), reduce_func=reduce_fn)
weights_delta_benign = tf.nest.map_structure(lambda a, b: a - b,
| model_weights.trainable,
| initial_weights.trainable)
aggregated_outputs = model.report_local_unfinalized_metrics()
return weights_delta_benign, aggregated_outputs, num_examples_sum
@tf.function
def compute_malicious_update():
"""compute malicious update sent back to the server."""
result = compute_benign_update()
weights_delta_benign, aggregated_outputs, num_examples_sum = result
tf.nest.map_structure(lambda a, b: a.assign(b), model_weights,
initial_weights)
malicious_dataset.reduce(
initial_state=tf.constant(0), reduce_func=reduce_fn)
weights_delta_malicious = tf.nest.map_structure(lambda a, b: a - b,
model_weights.trainable,
initial_weights.trainable)
weights_delta = tf.nest.map_structure(
tf.add, weights_delta_benign,
tf.nest.map_structure(lambda delta: delta * self.boost_factor,
weights_delta_malicious))
return weights_delta, aggregated_outputs, num_examples_sum
result = tf.cond(
tf.equal(client_type, True), compute_malicious_update,
compute_benign_update)
weights_delta, aggregated_outputs, num_examples_sum = result
weights_delta_weight = tf.cast(num_examples_sum, tf.float32)
weight_norm = _get_norm(weights_delta)
return ClientOutput(
weights_delta, weights_delta_weight, aggregated_outputs,
collections.OrderedDict({
|
rbramwell/pulp | client_lib/pulp/client/commands/options.py | Python | gpl-2.0 | 2,491 | 0.004014 | """
Contains CLI framework option and flag instances for options that are used
across multiple command areas. Examples include specifying a repository ID or
specifying notes on a resource.
The option instances in this module should **NEVER** be modified; changes will
be reflected across the CLI. If changes need to be made, for instance changing
the required flag, a copy of the option should be created and the copy
manipulated with the desired changes.
"""
from gettext import gettext as _
from pulp.client.extensions.extensions import PulpCliOption, PulpCliFlag
from pulp.client import parsers, validators
# General Resource
DESC_ID = _('unique identifier; only alphanumeric, -, and _ allowed')
DESC_ID_ALLOWING_DOTS = _('unique identifier; only alphanumeric, ., -, and _ allowed')
DESC_NAME = _('user-readable display name (may contain i18n characters)')
DESC_DESCRIPTION = _('user-readable description (may contain i18n characters)')
DESC_NOTE = _(
'adds/updates/deletes notes to programmatically identify the resource; '
'key-value pairs must be separated by an equal sign (e.g. key=value); multiple notes can '
'be | changed by specifying this option multiple times; notes are deleted by '
'specifying "" as the value')
DESC_ALL = _('match all records. If other filters are specified, they will be '
'applied. This option is only useful when you need to explicitly '
'request that no filters be applied.')
# General Resource
OPTION_NAME = PulpCliOption('--display-name', DESC_N | AME, required=False)
OPTION_DESCRIPTION = PulpCliOption('--description', DESC_DESCRIPTION, required=False)
OPTION_NOTES = PulpCliOption('--note', DESC_NOTE, required=False,
allow_multiple=True, parse_func=parsers.parse_notes)
# IDs
OPTION_REPO_ID = PulpCliOption('--repo-id', DESC_ID_ALLOWING_DOTS, required=True,
validate_func=validators.id_validator_allow_dots)
OPTION_GROUP_ID = PulpCliOption('--group-id', DESC_ID, required=True,
validate_func=validators.id_validator)
OPTION_CONSUMER_ID = PulpCliOption('--consumer-id', DESC_ID_ALLOWING_DOTS, required=True,
validate_func=validators.id_validator_allow_dots)
OPTION_CONTENT_SOURCE_ID = PulpCliOption('--source-id', DESC_ID, aliases=['-s'], required=False,
validate_func=validators.id_validator)
FLAG_ALL = PulpCliFlag('--all', DESC_ALL)
|
Zokol/ArduinoCNC | cnc.py | Python | mit | 2,290 | 0.049345 | import serial
import sys
from time import sleep
def move(dir):
print dir
ser.write(dir) # write a string
ser.sendBreak(0.25)
ser.flush()
sleep(1)
def ResetCoords():
dir = 'r'
print dir
ser.write(dir) # write a string
ser.sendBreak(0.25)
ser.flush()
sleep(1)
def DrawRect(dim):
print ""
print "Drawing 10-size rectangle"
out = ""
k = 2;
while(k > 1):
print "First side:"
dir = 'd'
for i in range(0, dim[0]):
move(dir)
print "Second side:"
dir = 'x'
for i in range(0, dim[1]):
move(dir)
print "Third side:"
dir = 'a'
for i in range(0, dim[0]):
move(dir)
print "Fourth side:"
dir = 'w'
for i in range(0, dim[1]):
move(dir)
print "Finished, starting over."
print "________________________"
k = k - 1
def ManualControl():
run = 1
while | run == 1:
print ""
print ""
print "___________________________"
print "Use Keypad or following keys to control motors"
print "Direction:"
print "q w e"
print "a s d"
print "z x c"
print "Drill control:"
print " Up: f"
print "Down: v"
print ""
print "Press m to exit | to menu"
print "___________________________"
select = raw_input(": ")
if select == "m":
run = 0
else:
move(select)
def DrawText():
print "This option is not ready yet"
return 0
def ClosePort():
ser.close() # close port
def OpenPort(port):
print ""
print "Initializing Com-port to device."
ser = serial.Serial(port, 9600, 8, serial.PARITY_NONE, 1, None, False, False, None, False, None)
print "" + ser.portstr + " is open"
return ser
def Menu():
print "___________________________"
print "Menu"
print "1. Manual Control"
print "2. Demonstration"
print "3. Text carving"
print "4. Quit"
print ""
select = raw_input("Select: ")
if select == "1":
ManualControl()
if select == "2":
DrawRect([5,5])
if select == "3":
DrawText()
if select == "4":
Quit()
def Quit():
ClosePort()
sys.exit()
print "Welcome to PyCNC 0.5"
print "Author: Heikki Juva @ 2011"
print ""
print "___________________________"
port = raw_input("Give CNC port name ")
ser = OpenPort(port)
print ""
while(1):
Menu() |
kylef/jsonschema-test | jsonschema_test/__init__.py | Python | bsd-2-clause | 3,350 | 0.002687 | import sys
import os
import json
from jsonschema import validate
def load_json(path):
with open(path) as fp:
return json.load(fp)
def load_json_suite(path):
schema = test_schema()
suite = load_json(path)
try:
validate(suite, schema)
except Exception as e:
print('{} is not a valid test file.'.format(path))
print(e)
exit(2)
return suite
def print_ansi(code, text):
print('\033[{}m{}\x1b[0m'.format(code, text))
print_bold = lambda text: print_ansi(1, text)
print_green = lambda text: print_ansi(32, text)
print_red = lambda text: print_ansi(31, text)
print_yellow = lambda text: print_ansi(33, text)
def test_schema():
path = os.path.join(os.path.dirname(__file__), 'schema.json')
return load_json(path)
def test(schema_path, suite_paths):
schema = load_json(schema_path)
suites = map(load_json_suite, suite_paths)
passes = 0
failures = 0
skipped = 0
for suite in suites:
for case in suite:
if case.get('skip', False):
print_yellow('-> {} (skipped)'.format(case['description']))
for test in case['tests']:
print_yellow(' -> {} (skipped)'.format(test['description']))
skipped += 1
print('')
continue
print_bold('-> {}'.format(case['description']))
fo | r test in case['tests']:
if test.get('skip', False):
skipped += 1
| print_yellow(' -> {} (skipped)'.format(test['description']))
continue
success = True
try:
validate(test['data'], schema)
except Exception as e:
if test['valid']:
success = False
else:
if not test['valid']:
success = False
if success:
passes += 1
print_green(' -> {}'.format(test['description']))
else:
failures += 1
print_red(' -> {}'.format(test['description']))
print(' Expected data to validate as: {}'.format(test['valid']))
print(' ' + json.dumps(test['data']))
print('')
print('')
if skipped > 0:
print('{} passes, {} skipped, {} failures'.format(passes, skipped, failures))
else:
print('{} passes, {} failures'.format(passes, failures))
if failures:
exit(1)
def usage():
print('Usage: {} <schema> [test suites, ...]'.format(sys.argv[0]))
def validate_json(filename):
if not os.path.isfile(filename):
print('{} does not exist.'.format(filename))
exit(2)
with open(filename) as fp:
try:
json.load(fp)
except Exception as e:
print('{} does not contain valid JSON.'.format(filename))
exit(2)
def main():
if len(sys.argv) > 1:
for filename in sys.argv[1:]:
validate_json(filename)
schema = sys.argv[1]
suites = sys.argv[2:]
if len(suites) > 0:
test(schema, suites)
else:
usage()
if __name__ == '__main__':
main()
|
pythonsingapore/pythonsingapore | website/webapps/django/myproject/myproject/urls.py | Python | mit | 1,625 | 0 | """Main urls.py for the ``pythonsingapore.com`` project."""
from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from cms.sitemaps import CMSSitemap
from myproject.sitemaps import BlogSitemap
admin.autodiscover()
sitemaps = {}
sitemaps['cmspages'] = CMSSitemap()
sitemaps['news'] = BlogSitemap()
urlpatterns = static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += staticfiles_urlpatterns()
if settings.DEBUG is False and settings.SANDBOX is True:
urlpatterns += patterns(
'',
(r'^404/$', 'django.views.defaults.page_not_found'),
(r'^500/$', 'django.views.defaults.server_error'),
url(r'^static/(?P<pat | h>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_ROOT}),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
urlpatterns += patterns(
'',
url(r'^sitemap.xml$', 'django.contrib.sitemaps.views.sitemap',
{'sitemaps': sitemaps, }),
url(r'^captcha/', include('captcha.urls')),
url(r'^i18n/', include('django.c | onf.urls.i18n')),
url(settings.ADMIN_URL, include(admin.site.urls)),
url(r'^admin-.+/', include('admin_honeypot.urls')),
url(r'^rosetta/', include('rosetta.urls')),
url(r'^accounts/', include('registration_email.backends.default.urls')),
url(r'^u/', include('user_data.urls')),
url(r'^', include('cms.urls')),
)
|
rpbeltran/cleverbot-api | Cleverbot API/Cleverbot/Cleverbot.py | Python | mit | 6,027 | 0.011117 |
#
# Cleverbot Webscraping API for Python
#
# Made By: Ryan Beltran
#
# On: March 30th 2016
#
# Version history:
# 1.0 3/30/16
#
# Todo list:
# * Disguise traffic as human
# * Make ask() more resilient to changes in site structure
# * Add reset operation by clearing CBSTATE cookie
# * Save sessions by storing raw CBSTATE's (and perhaps SESSIONIDs; experimentation needed)
# * Filter inappropriate responces
# * Filter self advertisements from Cleverbot
# * Further optimize the ask() function
#
#
# Disclaimer
#
# Please use webscrapers responcibly and practice fair and legal discression when deciding on how and if a scraper should be used.
# Under prec | edent from previous court cases including 'Ebay V. Bidders Edge', the use of a web scraper in ways that interfere with the host's intersts may (possibly) be considered illegal trespass.
# Cleverbot does not openly grant site access to robots, or humans attempting to access the site indirectly, and infact statedly denies it.
# Writing this code is not in violation of anyone's Terms of Use. RUNNING this code may be in violation of somebody's Terms of Use.
# If cleverbot blocks you IP address | as a result of your use of any of the following code, consider it a personal problem.
# Per my undrstanding, the only authorized ways of using Cleverbot are:
# * To have a human interface their web application directly at www.cleverbot.com
# * To pay a heck of a lot of money to use their cleverscript API
# Oh also one quick note, software should be free.
#
try: # ~~~~ Aquire Dependencies ~~~~
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time, urllib
except:
print "::Error 011 - Failure to load dependency (Selenium)"
class Cleverbot:
# An automated webscraper for the cleverbot chat AI
# Required Dependencies:
# Selenium
# Phantom JS
# Firefox (for debugging only, firebug plugin recommended to view cookies)
# As well as:
# Python 2.x
# and a stable internet conection
# |---------------------------------------------|
# |---------------------------------------------|
# | - - Error Codes - - |
# |---------------------------------------------|
# |---------------------------------------------|
# | |
# | ::Error 011 : Failure to load dependency |
# | ::Error 110 : Timeout - retrying |
# | ::Error 111 : Timeout - not retrying |
# | ::Error 211 : Failure to load web resource |
# | |
# | ::Failure 111 : Failure to answer prompt |
# |---------------------------------------------|
# |---------------------------------------------|
# Disable headless only for debugging
def __init__(self, headless = True):
try:
if headless:
self.driver = webdriver.PhantomJS('./phantomjs') # Load the phantom JS headless browser
self.driver.set_window_size(1200, 600) # Needs a size, this is kinda arbitrary
else:
self.driver = webdriver.Firefox() # Load the firefox browser
except:
if headless:
print "::Error 011 - Failure to load dependency (PhantomJS)"
else:
print "::Error 011 - Failure to load dependency (Firefox)"
self.load()
def load(self):
try:
self.driver.get("http://www.cleverbot.com/") # Load the cleverbot page
except:
print "::Error 211 : Failure to load resource"
def ask(self, question, maxretries = 8):
# Send the question
self.driver.find_element_by_class_name('stimulus').send_keys(question) # Input field is named 'stimulus'
time.sleep(0.1) # Just for safety
self.driver.execute_script("cleverbot.sendAI()") # This is the JS that is run by the form's 'onsubmit()'
# Wait for a responce
for iteration in range(maxretries): # Keep trying, it can take a while sometimes
try:
WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, "snipTextIcon"))) # An icon with a pair of sizzors will pop up when it is ready
break
except:
if iteration < maxretries:
print " ::Error 110 : Timeout - retrying "
else:
print " ::Error 111 : Timeout - not retrying (recurrent timeout limit exceeded) "
print " ::Failure 111 : Failure to answer prompt"
return ""
time.sleep(0.1)
# Aquire and return responce
cookies = self.driver.get_cookies() # Get cookies
cbstate = [x[u'value'] for x in cookies if x['name'] == u'CBSTATE'][0] # Full conversation is stored in the CBSTATE cookie
return str(urllib.unquote(cbstate.replace('%20',' ').split('&')[-1])) # Extract and format conversation
def getConversation():
cookies = self.driver.get_cookies() # Get cookies
cbstate = [x[u'value'] for x in cookies if x['name'] == u'CBSTATE'][0] # Full conversation is stored in the CBSTATE cookie
return str(urllib.unquote(cbstate.replace('%20',' ').split('&')[6:])) # Extract and format conversation
def quit(self):
self.driver.quit() # Shutdown driver
# A brief testing example
def run_example(headless = True):
cb = Cleverbot(headless)
query = raw_input("Ask cleverbot: ")
while query != "end":
print cb.ask(query)
query = raw_input("\nAsk cleverbot: ")
cb.quit()
run_example()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.