repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
RagaiAhmed/PSP
|
src/tasks.py
|
Python
|
apache-2.0
| 1,617
| 0.005566
|
import src.game_utils.function_proxy as check
from src.basic_functions import *
"""
This file is the one you'll be working on
read the documentation of the functions to know
what it must be able to do.
"""
def move_snake():
"""
This function controls how the snake moves
Uses an edited version of before implemented function in Snake class
"""
move_snake_head_to_next()
def grow_snake(body):
"""
This function is responsible for growing the snake when it eats food
:param body : takes the snake body to grow
"""
body.append(body[-1]) # adds a cube at the last place in the body
# where the added cube will follow the previous
|
cube an so on
def frame_logic():
"""
Controls Frame Logic
"""
snake = get_snake()
snake.move()
body = snake.body
if body[0] == get_food_position(): # if the snake ate a food
food_location(body) # calls a function to change food location taking care of not spawning on snake body
increase_score()
snake.grow()
elif body[0] in body[1:] or is_out_of_screen(body[0]): # checks if ea
|
ten itself or out of screen
game_over()
def food_location(body):
"""
:param body: Snake body to avoid
:return: None
"""
rnd_pnt = random_point()
while rnd_pnt in body:
rnd_pnt = random_point()
change_food_location(rnd_pnt)
def submit_your_functions():
check.proton_frame_logic = frame_logic
check.proton_grow_snake=grow_snake
check.proton_move_snake=move_snake
check.proton_change_food_location=food_location
|
UKTradeInvestment/export-wins-data
|
fdi/tests/util.py
|
Python
|
gpl-3.0
| 706
| 0
|
f
|
rom collections import UserDict
class PathDict(UserDict):
def __normalize_key(self, key):
tkey = key
if isinstance(key, str) and '.' in key:
tkey = tuple(key.split('.'))
return tkey
def __setitem__(self, key, value):
tkey = self.__normalize_key(key)
return super().__setitem__(tkey, value)
def __contains__(self, item):
tkey = self.__normalize_key(item)
return super().__contains__(tkey)
def __getit
|
em__(self, item):
tkey = self.__normalize_key(item)
return super().__getitem__(tkey)
def __delitem__(self, key):
tkey = self.__normalize_key(key)
return super().__delitem__(tkey)
|
toymachine/venster
|
venster/windows.py
|
Python
|
mit
| 23,583
| 0.018318
|
## Copyright (c) 2003 Henk Punt
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal in the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
## LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
## WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
from ctypes import *
#TODO auto ie/comctl detection
WIN32_IE = 0x0550
#TODO: auto unicode selection,
#if unicode:
# CreateWindowEx = windll.user32.CreateWindowExW
#else:
# CreateWindowEx = windll.user32.CreateWindowExA
#etc, etc
DWORD = c_ulong
HANDLE = c_ulong
UINT = c_uint
BOOL = c_int
HWND = HANDLE
HINSTANCE = HANDLE
HICON = HANDLE
HDC = HANDLE
HCURSOR = HANDLE
HBRUSH = HANDLE
HMENU = HANDLE
HBITMAP = HANDLE
HIMAGELIST = HANDLE
HGDIOBJ = HANDLE
HMETAFILE = HANDLE
ULONG = DWORD
ULONG_PTR = DWORD
UINT_PTR = DWORD
LONG_PTR = DWORD
INT = c_int
LPCTSTR = c_char_p
LPTSTR = c_char_p
PSTR = c_char_p
LPCSTR = c_char_p
LPCWSTR = c_wchar_p
LPSTR = c_char_p
LPWSTR = c_wchar_p
PVOID = c_void_p
USHORT = c_ushort
WORD = c_ushort
ATOM = WORD
SHORT = c_short
LPARAM = c_ulong
WPARAM = c_uint
LPVOID = c_voidp
LONG = c_long
BYTE = c_byte
TCHAR = c_char #TODO depends on unicode/wide conventions
DWORD_PTR = c_ulong #TODO what is this exactly?
INT_PTR = c_ulong #TODO what is this exactly?
COLORREF = c_ulong
CLIPFORMAT = WORD
FLOAT = c_float
CHAR = c_char
WCHAR = c_wchar
FXPT16DOT16 = c_long
FXPT2DOT30 = c_long
LCSCSTYPE = c_long
LCSGAMUTMATCH = c_long
COLOR16 = USHORT
LRESULT = LONG_PTR
#### Windows version detection ##############################
class OSVERSIONINFO(Structure):
_fields_ = [("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", TCHAR * 128)]
def isMajorMinor(self, major, minor):
return (self.dwMajorVersion, self.dwMinorVersion) == (major, minor)
GetVersion = windll.kernel32.GetVersionExA
versionInfo = OSVERSIONINFO()
versionInfo.dwOSVersionInfoSize = sizeof(versionInfo)
GetVersion(byref(versionInfo))
def MAKELONG(w1, w2):
return w1 | (w2 << 16)
MAKELPARAM = MAKELONG
def RGB(r,g,b):
return r | (g<<8) | (b<<16)
##### Windows Callback functions ################################
WNDPROC = WINFUNCTYPE(c_int, HWND, UINT, WPARAM, LPARAM)
DialogProc = WINFUNCTYPE(c_int, HWND, UINT, WPARAM, LPARAM)
CBTProc = WINFUNCTYPE(c_int, c_int, c_int, c_int)
MessageProc = CBTProc
EnumChildProc = WINFUNCTYPE(c_int, HWND, LPARAM)
MSGBOXCALLBACK = WINFUNCTYPE(c_int, HWND, LPARAM) #TODO look up real def
class WNDCLASSEX(Structure):
_fields_ = [("cbSize", UINT),
("style", UINT),
("lpfnWndProc", WNDPROC),
("cbClsExtra", INT),
("cbWndExtra", INT),
("hInstance", HINSTANCE),
("hIcon", HICON),
("hCursor", HCURSOR),
("hbrBackground", HBRUSH),
("lpszMenuName", LPCTSTR),
("lpszClassName", LPCTSTR),
("hIconSm", HICON)]
class POINT(Structure):
_fields_ = [("x", LONG),
("y", LONG)]
def __str__(self):
return "POINT {x: %d, y: %d}" % (self.x, self.y)
POINTL = POINT
class POINTS(Structure):
_fields_ = [("x", SHORT),
("y", SHORT)]
PtInRect = windll.user32.PtInRect
class RECT(Structure):
_fields_ = [("left", LONG),
("top", LONG),
("right", LONG),
("bottom", LONG)]
def __str__(self):
return "RECT {left: %d, top: %d, right: %d, bottom: %d}" % (self.left, self.top,
self.right, self.bottom)
def getHeight(self):
return self.bottom - self.top
height = property(getHeight, None, None, "")
def getWidth(self):
return self.right - self.left
width = property(getWidth, None, None, "")
def getSize(self):
return self.width, self.height
size = property(getSize, None, None, "")
def ContainsPoint(self, pt):
"""determines if this RECT contains the given POINT pt
returns True if pt is in this rect
"""
return bool(PtInRect(byref(self), pt))
RECTL = RECT
class SIZE(Structure):
_fields_ = [('cx', LONG),
('cy', LONG)]
SIZEL = SIZE
##class MSG(Structure):
## _fields_ = [("hWnd", HWND),
## ("message", UINT),
## ("wParam", WPARAM),
## ("lParam", LPARAM),
## ("time", DWORD),
## ("pt", POINT)]
## def __str__(self):
## return "MSG {%d %d %d %d %d %s}" % (self.hWnd, self.message, self.wParam, self.lParam,
## self.time, str(self.pt))
#Hack: we need to use the same MSG type as ctypes uses!
from ctypes.wintypes import MSG
class ACCEL(Structure):
_fields_ = [("fVirt", BYTE),
("key", WORD),
("cmd", WORD)]
class CREATESTRUCT(Structure):
_fields_ = [("lpCreateParams", LPVOID),
("hInstance", HINSTANCE),
("hMenu", HMENU),
("hwndParent", HWND),
("cx", INT),
("cy", INT),
("x", INT),
("y", INT),
("style", LONG),
("lpszName", LPCTSTR),
("lpszClass", LPCTSTR),
("dwExStyle", DWORD)]
class NMHDR(Structure):
_fields_ = [("hwndFrom", HWND),
("idFrom", UINT),
("code", UINT)]
class PAINTSTRUCT(Structure):
_fields_ = [("hdc", HDC),
("fErase", BOOL),
("rcPaint", RECT),
("fRestore", BOOL),
("fIncUpdate", BOOL),
("rgbReserved", c_char * 32)]
class MENUITEMINFO(Structure):
_fields_ = [("cbSize", UINT),
("fMask", UINT),
("fType", UINT),
("fState", UINT),
("wID", UINT),
("hSubMenu", HMENU),
("hbmpChecked", HBITMAP),
("hbmpUnchecked", HBITMAP),
("dwItemData", ULONG_PTR),
("dwTypeData", LPTSTR),
("cch", UINT),
("hbmpItem", HBITMAP)]
class DLGTEMPLATE(Structure):
_pack_ = 2
_fields_ = [
("style", DWORD),
("exStyle", DWORD),
("cDlgItems", WORD),
("x", c_short),
("y", c_short),
("cx", c_short),
("cy", c_short)
]
class DLGITEMTEMPLATE(Structure):
|
_pack_ = 2
_fields_ = [
("style", DWORD),
("exStyle", DWORD),
("x", c_short),
("y", c_short),
("cx", c_short),
("cy", c_short),
("id", WORD)
]
class COPYDATASTRUCT(Structure):
_f
|
ields_ = [
("dwData", ULONG_PTR),
("cbData", DWORD),
("lpData", PVOID)]
def LOWORD(dword):
return dword & 0x0000ffff
def HIWORD(dword):
return dword >> 16
TRUE = 1
FALSE = 0
NULL = 0
IDI_APPLICATION = 32512
SW_SHOW = 5
SW_SHOWNORMAL = 1
SW_HIDE = 0
EN_CHANGE = 768
MSGS = [('WM_NULL', 0),
('WM_
|
nastya/droidbot
|
droidbot/adapter/adb.py
|
Python
|
mit
| 13,744
| 0.003783
|
# This is the interface for adb
import subprocess
import logging
import re
from adapter import Adapter
import time
import sys
import os
class ADBException(Exception):
"""
Exception in ADB connection
"""
pass
class ADB(Adapter):
"""
interface of ADB
send adb commands via this, see:
http://developer.android.com/tools/help/adb.html
"""
UP = 0
DOWN = 1
DOWN_AND_UP = 2
MODEL_PROPERTY = "ro.product.model"
VERSION_SDK_PROPERTY = 'ro.build.version.sdk'
VERSION_RELEASE_PROPERTY = 'ro.build.version.release'
RO_SECURE_PROPERTY = 'ro.secure'
|
RO_DEBUGGABLE_PROPERTY = 'ro.debuggable'
def __init__(self, device=None):
"""
initi
|
ate a ADB connection from serial no
the serial no should be in output of `adb devices`
:param device: instance of Device
:return:
"""
self.logger = logging.getLogger(self.__class__.__name__)
if device is None:
from droidbot.device import Device
device = Device()
self.device = device
self.cmd_prefix = ['adb', "-s", device.serial]
def run_cmd(self, extra_args):
"""
run an adb command and return the output
:return: output of adb command
@param extra_args: arguments to run in adb
"""
if isinstance(extra_args, str) or isinstance(extra_args, unicode):
extra_args = extra_args.split()
if not isinstance(extra_args, list):
msg = "invalid arguments: %s\nshould be list or str, %s given" % (extra_args, type(extra_args))
self.logger.warning(msg)
raise ADBException(msg)
args = [] + self.cmd_prefix
args += extra_args
self.logger.debug('command:')
self.logger.debug(args)
try:
r = subprocess.check_output(args).strip()
except subprocess.CalledProcessError: #this might mean device/emulator crashed
os._exit()
self.logger.debug('return:')
self.logger.debug(r)
return r
def shell(self, extra_args):
"""
run an `adb shell` command
@param extra_args:
@return: output of adb shell command
"""
if isinstance(extra_args, str) or isinstance(extra_args, unicode):
extra_args = extra_args.split()
if not isinstance(extra_args, list):
msg = "invalid arguments: %s\nshould be list or str, %s given" % (extra_args, type(extra_args))
self.logger.warning(msg)
raise ADBException(msg)
shell_extra_args = ['shell'] + extra_args
return self.run_cmd(shell_extra_args)
def check_connectivity(self):
"""
check if adb is connected
:return: True for connected
"""
r = self.run_cmd("get-state")
return r.startswith("device")
def connect(self):
"""
connect adb
"""
self.logger.debug("connected")
def disconnect(self):
"""
disconnect adb
"""
print "[CONNECTION] %s is disconnected" % self.__class__.__name__
def get_property(self, property):
"""
get the value of property
@param property:
@return:
"""
return self.shell(["getprop", property])
def get_model_number(self):
"""
Get device model number. e.g. SM-G935F
"""
return self.get_property(ADB.MODEL_PROPERTY)
def get_sdk_version(self):
"""
Get version of SDK, e.g. 18, 20
"""
try:
return int(self.get_property(ADB.VERSION_SDK_PROPERTY))
except ValueError:
return self.get_property(ADB.VERSION_SDK_PROPERTY)
def get_release_version(self):
"""
Get release version, e.g. 4.3, 6.0
"""
return self.get_property(ADB.VERSION_RELEASE_PROPERTY)
def get_ro_secure(self):
"""
get ro.secure value
@return: 0/1
"""
return int(self.get_property(ADB.RO_SECURE_PROPERTY))
def get_ro_debuggable(self):
"""
get ro.debuggable value
@return: 0/1
"""
return int(self.get_property(ADB.RO_DEBUGGABLE_PROPERTY))
# The following methods are originally from androidviewclient project.
# https://github.com/dtmilano/AndroidViewClient.
def get_display_info(self):
"""
Gets C{mDefaultViewport} and then C{deviceWidth} and C{deviceHeight} values from dumpsys.
This is a method to obtain display dimensions and density
"""
display_info = {}
logical_display_re = re.compile(".*DisplayViewport\{valid=true, .*orientation=(?P<orientation>\d+),"
" .*deviceWidth=(?P<width>\d+), deviceHeight=(?P<height>\d+).*")
dumpsys_display_result = self.shell("dumpsys display")
if dumpsys_display_result is not None:
for line in dumpsys_display_result.splitlines():
m = logical_display_re.search(line, 0)
if m:
for prop in ['width', 'height', 'orientation']:
display_info[prop] = int(m.group(prop))
if 'width' not in display_info or 'height' not in display_info:
physical_display_re = re.compile('Physical size: (?P<width>\d+)x(?P<height>\d+)')
m = physical_display_re.search(self.shell('wm size'))
if m:
for prop in ['width', 'height']:
display_info[prop] = int(m.group(prop))
if 'width' not in display_info or 'height' not in display_info:
# This could also be mSystem or mOverscanScreen
display_re = re.compile('\s*mUnrestrictedScreen=\((?P<x>\d+),(?P<y>\d+)\) (?P<width>\d+)x(?P<height>\d+)')
# This is known to work on older versions (i.e. API 10) where mrestrictedScreen is not available
display_width_height_re = re.compile('\s*DisplayWidth=(?P<width>\d+) *DisplayHeight=(?P<height>\d+)')
for line in self.shell('dumpsys window').splitlines():
m = display_re.search(line, 0)
if not m:
m = display_width_height_re.search(line, 0)
if m:
for prop in ['width', 'height']:
display_info[prop] = int(m.group(prop))
if 'orientation' not in display_info:
surface_orientation_re = re.compile("SurfaceOrientation:\s+(\d+)")
output = self.shell("dumpsys input")
m = surface_orientation_re.search(output)
if m:
display_info['orientation'] = int(m.group(1))
density = None
float_re = re.compile(r"[-+]?\d*\.\d+|\d+")
d = self.get_property('ro.sf.lcd_density')
if float_re.match(d):
density = float(d)
else:
d = self.get_property('qemu.sf.lcd_density')
if float_re.match(d):
density = float(d)
else:
physicalDensityRE = re.compile('Physical density: (?P<density>[\d.]+)', re.MULTILINE)
m = physicalDensityRE.search(self.shell('wm density'))
if m:
density = float(m.group('density'))
if density is not None:
display_info['density'] = density
display_info_keys = {'width', 'height', 'orientation', 'density'}
if not display_info_keys.issuperset(display_info):
self.logger.warning("getDisplayInfo failed to get: %s" % display_info_keys)
return display_info
def get_enabled_accessibility_services(self):
"""
Get enabled accessibility services
:return: the enabled service names, each service name is in <package_name>/<service_name> format
"""
r = self.shell("settings get secure enabled_accessibility_services")
r = re.sub(r'(?m)^WARNING:.*\n?', '', r)
return r.strip().split(":") if r.strip() != '' else []
def disable_accessibility_service(self, service_name):
"""
Disable an accessibility service
:param se
|
andrewjylee/omniplay
|
logdb/pydot.py
|
Python
|
bsd-2-clause
| 60,152
| 0.01694
|
# -*- coding: Latin-1 -*-
"""Graphviz's dot language Python interface.
This module provides with a full interface to create handle modify
and process graphs in Graphviz's dot language.
References:
pydot Homepage: http://code.google.com/p/pydot/
Graphviz: http://www.graphviz.org/
DOT Language: http://www.graphviz.org/doc/info/lang.html
Programmed and tested with Graphviz 2.26.3 and Python 2.6 on OSX 10.6.4
Copyright (c) 2005-2011 Ero Carrera <ero.carrera@gmail.com>
Distributed under MIT license [http://opensource.org/licenses/mit-license.html].
"""
__revision__ = "$LastChangedRevision: 25 $"
__author__ = 'Ero Carrera'
__version__ = '1.0.%d' % int( __revision__[21:-2] )
__license__ = 'MIT'
import os
import re
import subprocess
import tempfile
import copy
try:
import dot_parser
except Exception, e:
print "Couldn't import dot_parser, loading of dot files will not be possible."
GRAPH_ATTRIBUTES = set( ['Damping', 'K', 'URL', 'aspect', 'bb', 'bgcolor',
'center', 'charset', 'clusterrank', 'colorscheme', 'comment', 'compound',
'concentrate', 'defaultdist', 'dim', 'dimen', 'diredgeconstraints',
'dpi', 'epsilon', 'esep', 'fontcolor', 'fontname', 'fontnames',
'fontpath', 'fontsize', 'id', 'label', 'labeljust', 'labelloc',
'landscape', 'layers', 'layersep', 'layout', 'levels', 'levelsgap',
'lheight', 'lp', 'lwidth', 'margin', 'maxiter', 'mclimit', 'mindist',
'mode', 'model', 'mosek', 'nodesep', 'nojustify', 'normalize', 'nslimit',
'nslimit1', 'ordering', 'orientation', 'outputorder', 'overlap',
'overlap_scaling', 'pack', 'packmode', 'pad', 'page', 'pagedir',
'quadtree', 'quantum', 'rankdir', 'ranksep', 'ratio', 'remincross',
'repulsiveforce', 'resolution', 'root', 'rotate', 'searchsize', 'sep',
'showboxes', 'size', 'smoothing', 'sortv', 'splines', 'start',
'stylesheet', 'target', 'truecolor', 'viewport', 'voro_margin',
# for subgraphs
'rank' ] )
EDGE_ATTRIBUTES = set( ['URL', 'arrowhead', 'arrowsize', 'arrowtail',
'color', 'colorscheme', 'comment', 'constraint', 'decorate', 'dir',
'edgeURL', 'edgehref', 'edgetarget', 'edgetooltip', 'fontcolor',
'fontname', 'fontsize', 'headURL', 'headclip', 'headhref', 'headlabel',
'headport', 'headtarget', 'headtooltip', 'href', 'id', 'label',
'labelURL', 'labelangle', 'labeldistance', 'labelfloat', 'labelfontcolor',
'labelfontname', 'labelfontsize', 'labelhref', 'labeltarget',
'labeltooltip', 'layer', 'len', 'lhead', 'lp', 'ltail', 'minlen',
'nojustify', 'penwidth', 'pos', 'samehead', 'sametail', 'showboxes',
'style', 'tailURL', 'tailclip', 'tailhref', 'taillabel', 'tailport',
'tailtarget', 'tailtooltip', 'target', 'tooltip', 'weight',
'rank' ] )
NODE_ATTRIBUTES = set( ['URL', 'color', 'colorscheme', 'comment',
'distortion', 'fillcolor', 'fixedsize', 'fontcolor', 'fontname',
'fontsize', 'group', 'height', 'id', 'image', 'imagescale', 'label',
'labelloc', 'layer', 'margin', 'nojustify', 'orientation', 'penwidth',
'peripheries', 'pin', 'pos', 'rects', 'regular', 'root', 'samplepoints',
'shape', 'shapefile', 'showboxes', 'sides', 'skew', 'sortv', 'style',
'target', 'tooltip', 'vertices', 'width', 'z',
# The following are attributes dot2tex
'texlbl', 'texmode' ] )
CLUSTER_ATTRIBUTES = set( ['K', 'URL', 'bgcolor', 'color', 'colorscheme',
'fillcolor', 'fontcolor', 'fontname', 'fontsize', 'label', 'labeljust',
'labelloc', 'lheight', 'lp', 'lwidth', 'nojustify', 'pencolor',
'penwidth', 'peripheries', 'sortv', 'style', 'target', 'tooltip'] )
#
# Extented version of ASPN's Python Cookbook Recipe:
# Frozen dictionaries.
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/414283
#
# This version freezes dictionaries used as values within dictionaries.
#
class frozendict(dict):
def _blocked_attribute(obj):
raise AttributeError, "A frozendict cannot be modified."
_blocked_attribute = property(_blocked_attribute)
__delitem__ = __setitem__ = clear = _blocked_attribute
pop = popitem = setdefault = update = _blocked_attribute
def __new__(cls, *args, **kw):
new = dict.__new__(cls)
args_ = []
for arg in args:
if isinstance(arg, dict):
arg = copy.copy(arg)
for k, v in arg.iteritems():
if isinstance(v, frozendict):
arg[k] = v
elif isinstance(v, dict):
arg[k] = frozendict(v)
elif isinstance(v, list):
v_ = list()
for elm in v:
if isinstance(elm, dict):
v_.append( frozendict(elm) )
else:
v_.append( elm )
arg[k] = tuple(v_)
args_.append( arg )
else:
args_.append( arg )
dict.__init__(new, *args_, **kw)
return new
def __init__(self, *args, **kw):
pass
def __hash__(self):
try:
return self._cached_hash
except AttributeError:
h = self._cached_hash = hash(tuple(sorted(self.iteritems())))
return h
def __repr__(self):
return "frozendict(%s)" % dict.__repr__(self)
dot_keywords = ['graph', 'subgraph', 'digraph', 'node', 'edge', 'strict']
id_re_alpha_nums = re.compile('^[_a-zA-Z][a-zA-Z0-9_:,]*$')
id_re_num = re.compile('^[0-9,]+$')
id_re_with_port = re.compile('^([^:]*):([^:]*)$')
id_re_dbl_quoted = re.compile('^\".*\"$', re.S)
id_re_html = re.compile('^<.*>$', re.S)
def needs_quotes( s ):
"""Checks whether a string is a dot language ID.
It will check whether the string is solely composed
by the characters allowed in an ID or not.
If the string is one of the reserved keywords it will
need quotes too.
"""
if s in dot_keywords:
return False
chars = [ord(c) for c in s if ord(c)>0x7f or ord(c)==0]
if chars and not id_re_dbl_quoted.match(s):
return True
for test in [id_re_alpha_nums, id_re_num, id_re_dbl_quoted, id_re_html]:
if test.match(s):
return False
m = id_re_with_port.match(s)
if m:
return needs_quotes(m.group(1)) or needs_quotes(m.group(2))
return True
def quote_if_necessary(s):
if isinstance(s, bool):
if s is True:
return 'True'
return 'False'
if not isinstance( s, basestring ):
return s
if needs_quotes(s):
replace = {'"' : r'\"',
"\n" : r'\n',
"\r" : r'\r'}
for (a,b) in replace.items():
s = s.replace(a, b)
return '"
|
' + s + '"'
return s
def graph_from_dot_data(data):
"""Load graph as defined by data in DOT format.
The data is assumed to b
|
e in DOT format. It will
be parsed and a Dot class will be returned,
representing the graph.
"""
return dot_parser.parse_dot_data(data)
def graph_from_dot_file(path):
"""Load graph as defined by a DOT file.
The file is assumed to be in DOT format. It will
be loaded, parsed and a Dot class will be returned,
representing the graph.
"""
fd = file(path, 'rb')
data = fd.read()
fd.close()
return graph_from_dot_data(data)
def graph_from_edges(edge_list, node_prefix='', directed=False):
"""Creates a basic graph out of an edge list.
The edge list has to be a list of tuples representing
the nodes connected by the edge.
The values can be anything: bool, int, float, str.
If the graph is undirected by default, it is only
calculated from one of the symmetric halves of the matrix.
"""
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for edge in edge_list:
if isinstance(edge[0], str):
src = node_prefix + edge[0]
else:
src = node_prefix + str(edge[0])
|
jaredlunde/cargo-orm
|
unit_tests/aio/AioPostgresPool.py
|
Python
|
mit
| 5,680
| 0
|
#!/usr/bin/python3 -S
# -*- coding: utf-8 -*-
"""
`Unit tests for cargo.clients.AioPostgresPool`
--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--
2016 Jared Lunde © The MIT License (MIT)
http://github.com/jaredlunde
"""
import unittest
import psycopg2
from cargo.cursors import *
from cargo.clients import AioPostgresPool, local_client
from unit_tests.aio import configure
class TestAioPostgresPool(unittest.TestCase):
@staticmethod
def setUpClass():
db = configure.db
configure.drop_schema(db, 'cargo_tests', cascade=True, if_exists=True)
configure.create_schema(db, 'cargo_tests')
configure.Plan(configure.Foo()).execute()
@staticmethod
def tearDownClass():
db = configure.db
configure.drop_schema(db, 'cargo_tests', cascade=True, if_exists=True)
local_client.clear()
'''def test_connect(self):
client = AioPostgresPool()
self.assertFalse(client.autocommit)
self.assertIsNone(client._pool)
self.assertDictEqual(client._connection_options, {})
self.assertIsNone(client._schema)
self.assertEqual(client.encoding, None)
self.assertEqual(client.cursor_factory, CNamedTupleCursor)
def test_connection(self):
client = AioPostgresPool(1, 2)
conn = client.get()
self.assertFalse(conn._connection.closed)
conn.close()
self.assertTrue(conn._connection.closed)
client = AioPostgresPool(1, 2)
conn = client.get()
self.assertFalse(conn._connection.closed)
def test_close(self):
client = AioPostgresPool(1, 2)
self.assertTrue(client.closed)
client.connect()
self.assertFalse(client.closed)
client.close()
self.assertTrue(client.closed)
def test_context_manager(self):
with AioPostgresPool(1, 2) as pool:
self.assertFalse(pool.closed)
with pool.get() as connection:
with pool.get() as connection2:
self.assertIsNot(connection, connection2)
with self.assertRaises(psycopg2.pool.PoolError):
with pool.get() as connection3:
pass
with pool.get() as connection4:
self.assertIsNot(connection, connection4)
self.assertIs(connection2.connection,
connection4.connection)
with self.assertRaises(psycopg2.pool.PoolError):
with pool.get() as connection5:
pass
self.assertTrue(pool.closed)
def test_connection_obj(self):
with AioPostgresPool(1, 2) as pool:
with pool.get() as connec
|
tion:
self.assertIs(connection.autocommit, pool.autocommit)
self.assertIs(connection._dsn, pool._dsn)
self.assertIs(connection._schema, pool._schema)
self.assertIs(connection.encoding, pool.enco
|
ding)
self.assertIs(connection.minconn, pool.minconn)
self.assertIs(connection.maxconn, pool.maxconn)
self.assertIs(connection.cursor_factory, pool.cursor_factory)
def test_put(self):
with AioPostgresPool(1, 2) as pool:
conn = pool.get()
self.assertIsNotNone(conn._connection)
conn2 = pool.get()
self.assertIsNot(conn2, conn)
with self.assertRaises(psycopg2.pool.PoolError):
pool.get()
# Put conn obj
pool.put(conn)
conn2 = pool.get()
self.assertIsNotNone(conn2)
# Put raw conn
pool.put(conn2.connection)
conn2 = pool.get()
self.assertIsNotNone(conn2)
self.assertTrue(pool.closed)
self.assertTrue(conn.closed)
self.assertTrue(conn2.closed)
def test_commit(self):
client = AioPostgresPool(1, 2)
conn = client.get()
cur = conn.cursor()
client.apply_schema(cur, 'cargo_tests')
cur.execute("INSERT INTO foo (uid, textfield) VALUES (1, 'bar')")
self.assertIsNone(conn.commit())
cur = conn.cursor()
with self.assertRaises(psycopg2.ProgrammingError):
cur.execute(
"INSERT INTO foo (uid, textfield) VALUES (1, 'bar', 4)")
conn.commit()
with self.assertRaises(psycopg2.InternalError):
cur.execute("INSERT INTO foo (uid, textfield) VALUES (1, 'bar')")
client.put(conn)
def test_rollback(self):
client = AioPostgresPool(1, 2)
conn = client.get()
cur = conn.cursor()
client.apply_schema(cur, 'cargo_tests')
cur.execute("INSERT INTO foo (uid, textfield) VALUES (2, 'bar')")
self.assertIsNone(conn.commit())
cur = conn.cursor()
with self.assertRaises(psycopg2.ProgrammingError):
cur.execute(
"INSERT INTO foo (uid, textfield) VALUES (1, 'bar', 4)")
conn.commit()
with self.assertRaises(psycopg2.InternalError):
cur.execute("INSERT INTO foo (uid, textfield) VALUES (1, 'bar')")
conn.rollback()
cur.execute("INSERT INTO foo (uid, textfield) VALUES (3, 'bar')")
self.assertIsNone(conn.commit())
client.put(conn)
def test_minconn_maxconn(self):
client = AioPostgresPool(10, 12)
self.assertEqual(client.pool.minconn, 10)
self.assertEqual(client.pool.maxconn, 12)'''
if __name__ == '__main__':
# Unit test
unittest.main()
|
akiokio/centralfitestoque
|
src/.pycharm_helpers/pydev/pydevd_xml.py
|
Python
|
bsd-2-clause
| 6,287
| 0.009384
|
import pydev_log
import traceback
import pydevd_resolver
from pydevd_constants import * #@UnusedWildImport
from types import * #@UnusedWildImport
try:
from urllib import quote
except:
from urllib.parse import quote #@UnresolvedImport
try:
from xml.sax.saxutils import escape
def makeValidXmlValue(s):
return escape(s, {'"': '"'})
except:
#Simple replacement if it's not there.
def makeValidXmlValue(s):
return s.replace('<', '<').replace('>', '>').replace('"', '"')
class ExceptionOnEvaluate:
def __init__(self, result):
self.result = result
#------------------------------------------------------------------------------------------------------ resolvers in map
if not sys.platform.startswith("java"):
typeMap = [
#None means that it should not be treated as a compound variable
#isintance does not accept a tuple on some versions of python, so, we must declare it expanded
(type(None), None,),
(int, None),
(float, None),
(complex, None),
(str, None),
(tuple, pydevd_resolver.tupleResolver),
(list, pydevd_resolver.tupleResolver),
(dict, pydevd_resolver.dictResolver),
]
try:
typeMap.append((long, None))
except:
pass #not available on all python versions
try:
typeMap.append((unicode, None))
except:
pass #not available on all python versions
try:
typeMap.append((set, pydevd_resolver.setResolver))
except:
pass #not available on all python versions
try:
typeMap.append((frozenset, pydevd_resolver.setResolver))
except:
pass #not available on all python versions
else: #platform is java
from org.python import core #@UnresolvedImport
typeMap = [
(core.PyNone, None),
(core.PyInteger, None),
(core.PyLong, None),
(core.PyFloat, None),
(core.PyComplex, None),
(core.PyString, None),
(core.PyTuple, pydevd_resolver.tupleResolver),
(core.PyList, pydevd_resolver.tupleResolver),
(core.PyDictionary, pydevd_resolver.dictResolver),
(core.PyStringMap, pydevd_resolver.dictResolver),
]
if hasattr(core, 'PyJavaInstance'):
#Jython 2.5b3 removed it.
typeMap.append((core.PyJavaInstance, pydevd_resolver.instanceResolver))
def getType(o):
""" returns a triple (typeObject, typeString, resolver
resolver != None means that variable is a container,
and should be displayed as a hierarchy.
Use the resolver to get its attributes.
All container objects should have a resolver.
"""
try:
type_object = type(o)
type_name = type_object.__name__
except:
#This happens for or
|
g.python.core.InitModule
return 'Unable to get Type', 'Unable to get Type', None
try:
if type_name
|
== 'org.python.core.PyJavaInstance':
return type_object, type_name, pydevd_resolver.instanceResolver
if type_name == 'org.python.core.PyArray':
return type_object, type_name, pydevd_resolver.jyArrayResolver
for t in typeMap:
if isinstance(o, t[0]):
return type_object, type_name, t[1]
except:
traceback.print_exc()
#no match return default
return type_object, type_name, pydevd_resolver.defaultResolver
def frameVarsToXML(frame_f_locals):
""" dumps frame variables to XML
<var name="var_name" scope="local" type="type" value="value"/>
"""
xml = ""
keys = frame_f_locals.keys()
if hasattr(keys, 'sort'):
keys.sort() #Python 3.0 does not have it
else:
keys = sorted(keys) #Jython 2.1 does not have it
for k in keys:
try:
v = frame_f_locals[k]
xml += varToXML(v, str(k))
except Exception:
traceback.print_exc()
pydev_log.error("Unexpected error, recovered safely.\n")
return xml
def varToXML(val, name, doTrim=True):
""" single variable or dictionary to xml representation """
is_exception_on_eval = isinstance(val, ExceptionOnEvaluate)
if is_exception_on_eval:
v = val.result
else:
v = val
type, typeName, resolver = getType(v)
try:
if hasattr(v, '__class__'):
try:
cName = str(v.__class__)
if cName.find('.') != -1:
cName = cName.split('.')[-1]
elif cName.find("'") != -1: #does not have '.' (could be something like <type 'int'>)
cName = cName[cName.index("'") + 1:]
if cName.endswith("'>"):
cName = cName[:-2]
except:
cName = str(v.__class__)
value = '%s: %s' % (cName, v)
else:
value = str(v)
except:
try:
value = repr(v)
except:
value = 'Unable to get repr for %s' % v.__class__
try:
name = quote(name, '/>_= ') #TODO: Fix PY-5834 without using quote
except:
pass
xml = '<var name="%s" type="%s"' % (makeValidXmlValue(name), makeValidXmlValue(typeName))
if value:
#cannot be too big... communication may not handle it.
if len(value) > MAXIMUM_VARIABLE_REPRESENTATION_SIZE and doTrim:
value = value[0:MAXIMUM_VARIABLE_REPRESENTATION_SIZE]
value += '...'
#fix to work with unicode values
try:
if not IS_PY3K:
if isinstance(value, unicode):
value = value.encode('utf-8')
else:
if isinstance(value, bytes):
value = value.encode('utf-8')
except TypeError: #in java, unicode is a function
pass
xmlValue = ' value="%s"' % (makeValidXmlValue(quote(value, '/>_= ')))
else:
xmlValue = ''
if is_exception_on_eval:
xmlCont = ' isErrorOnEval="True"'
else:
if resolver is not None:
xmlCont = ' isContainer="True"'
else:
xmlCont = ''
return ''.join((xml, xmlValue, xmlCont, ' />\n'))
|
jarped/QGIS
|
python/plugins/processing/algs/lidar/lastools/lasduplicate.py
|
Python
|
gpl-2.0
| 3,468
| 0.001442
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
lasduplicate.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUti
|
ls import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterFile
class lasduplicate(LAStoolsAlgorithm):
LOWEST_Z = "LOWEST_Z"
UN
|
IQUE_XYZ = "UNIQUE_XYZ"
SINGLE_RETURNS = "SINGLE_RETURNS"
RECORD_REMOVED = "RECORD_REMOVED"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('lasduplicate')
self.group, self.i18n_group = self.trAlgorithm('LAStools')
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParameter(ParameterBoolean(lasduplicate.LOWEST_Z,
self.tr("keep duplicate with lowest z coordinate"), False))
self.addParameter(ParameterBoolean(lasduplicate.UNIQUE_XYZ,
self.tr("only remove duplicates in x y and z"), False))
self.addParameter(ParameterBoolean(lasduplicate.SINGLE_RETURNS,
self.tr("mark surviving duplicate as single return"), False))
self.addParameter(ParameterFile(lasduplicate.RECORD_REMOVED,
self.tr("record removed duplicates to LAS/LAZ file")))
self.addParametersPointOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasduplicate")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
if self.getParameterValue(lasduplicate.LOWEST_Z):
commands.append("-lowest_z")
if self.getParameterValue(lasduplicate.UNIQUE_XYZ):
commands.append("-unique_xyz")
if self.getParameterValue(lasduplicate.SINGLE_RETURNS):
commands.append("-single_returns")
record_removed = self.getParameterValue(lasduplicate.RECORD_REMOVED)
if record_removed is not None and record_removed != "":
commands.append("-record_removed")
commands.append(record_removed)
self.addParametersPointOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
rdmorganiser/rdmo
|
rdmo/views/migrations/0015_remove_null_true.py
|
Python
|
apache-2.0
| 4,097
| 0.003661
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-03-13 11:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('views', '0014_data_migration'),
]
operations = [
migrations.AlterField(
model_name='view',
name='comment',
field=models.TextField(blank=True, help_text='Additional internal information about this view.', verbose_name='Comment'),
),
migrations.AlterField(
model_name='view',
name='help_lang1',
field=models.TextField(blank=True, help_text='The help text for this view in the primary language.', verbose_name='Help (primary)'),
),
migrations.AlterField(
model_name='view',
name='help_lang2',
field=models.TextField(blank=True, help_text='The help text for this view in the secondary language.', verbose_name='Help (secondary)'),
),
migrations.AlterField(
model_name='view',
name='help_lang3',
field=models.TextField(blank=True, help_text='The help text for this view in the tertiary language.', verbose_name='Help (tertiary)'),
),
migrations.AlterField(
model_name='view',
name='help_lang4',
field=models.TextField(blank=True, help_text='The help text for this view in the quaternary language.', verbose_name='Help (quaternary)'),
),
migrations.AlterField(
model_name='view',
name='help_lang5',
field=models.TextField(blank=True, help_text='The help text for this view in the quinary language.', verbose_name='Help (quinary)'),
),
migrations.AlterField(
model_name='view',
name='key',
field=models.SlugField(blank=True, help_text='The internal identifier of this view.', max_length=128, verbose_name='Key'),
),
migrations.AlterField(
model_name='view',
name='template',
field=models.TextField(blank=True, help_text='The template for this view, written in Django template language.', verbose_name='Template'),
),
migrations.AlterField(
model_name='view',
name='title_lang1',
field=models.CharField(blank=True, help_text='The title for this view in the primary language.', max_length=256, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='view',
name='title_lang2',
field=models.CharField(blank=True, help_text='The title for this view in the secondary language.', max_length=256, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='view',
name='title_lang3',
field=models.CharField(blank=True, help_text='The title for this view in the tertiary language.', max_length=256, verbose_name='Title (tertiary)'),
),
migrations.AlterField(
model_name='view',
name='title_lang4',
field=models.CharField(blank=True, help_text='The title for this view in the quaternary language.', max_length=256, verbose_name='Title (quaternary)'),
),
migrations.AlterField(
model_name='view',
name='title_lang5',
field=models.CharField(blank=True, help_text='The title for this view in the quinary language.', max_length=256, verbose_name='Title (quinary)'),
),
migrations.AlterField(
model_n
|
ame='view',
name='uri',
field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this view (auto-generated).', max_length=640, verbose_name='URI'),
),
migrations.AlterField(
model_name='view',
name='uri_prefix',
|
field=models.URLField(blank=True, help_text='The prefix for the URI of this view.', max_length=256, verbose_name='URI Prefix'),
),
]
|
NuAge-Solutions/NW
|
oj.py
|
Python
|
gpl-3.0
| 292
| 0
|
#!/usr/bin/env python
import sys
|
from imp import load_source
from os import path
src_path
|
= path.abspath(path.dirname(__file__))
oj_path = path.join(src_path, 'dependencies', 'oj')
sys.path.append(oj_path)
oj = load_source('oj', path.join(oj_path, 'utils', 'run.py'))
oj.run(src_path)
|
dmsuehir/spark-tk
|
regression-tests/sparktkregtests/testcases/frames/frame_group_by_test.py
|
Python
|
apache-2.0
| 10,492
| 0.000096
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test functionality of group_by, including aggregation_arguments """
import unittest
import pandas as pd
import numpy as np
import math
from sparktkregtests.lib import sparktk_test
class GroupByTest(sparktk_test.SparkTKTestCase):
# Aggregates and names for non-numeric aggregates
# (some aggregates are not defined on integers)
# atk aggregates, then numpy aggregates
pd_cols_str = ['size', '<lambda>', 'max', 'min']
numpy_aggs_str = ['size',
lambda x: pd.Series.nunique(x, False),
'max',
'min']
atk_cols_str = ['_COUNT', '_COUNT_DISTINCT', '_MAX', '_MIN']
pd_cols = ['mean', 'size', '<lambda>', 'max',
'min', 'std', 'nansum', 'var']
numpy_aggs = ['mean',
'size',
lambda x: pd.Series.nunique(x, False),
'max',
'min',
'std',
np.nansum,
'var']
atk_cols = ['_AVG', '_COUNT', '_COUNT_DISTINCT', '_MAX',
'_MIN', '_STDEV', '_SUM', '_VAR']
def setUp(self):
"""Build test frame"""
super(GroupByTest, self).setUp()
# Aggregates to test on strings
self.aggs_str = [self.context.agg.count,
self.context.agg.count_distinct,
self.context.agg.max,
self.context.agg.min]
# Aggregates for numeric columns
self.aggs = [self.context.agg.avg,
self.context.agg.count,
self.context.agg.count_distinct,
self.context.agg.max,
self.context.agg.min,
self.context.agg.stdev,
self.context.agg.sum,
self.context.agg.var]
schema_colors = [("Int32_0_15", int),
("Int32_0_31", int),
("colors", str),
("Int64_0_15", int),
("Int64_0_31", int),
("Float32_0_15", float),
("Float32_0_31", float),
("Float64_0_15", float),
("Float64_0_31", float)]
dataset = self.get_file("colors_32_9cols_128rows.csv")
self.frame = self.context.frame.import_csv(
dataset, schema=schema_colors)
def test_stats_on_string_avg(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.avg})
def test_stats_on_string_stdev(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.stdev})
def test_stats_on_string_sum(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.sum})
def test_stats_on_string_var(self):
"""Non-numeric aggregates e
|
rror on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.var})
def test_invalid_column_name(self):
"""Aggregate on non-existant column errors"""
with self.assertRaises(Exception):
self.frame.group_by(
'InvalidColumnName', {'colors': self.conte
|
xt.agg.var})
def test_group_int32_standard(self):
"""Test groupby on 1 column, int32"""
stats = self.frame.group_by(['Int32_0_15'], {'Int32_0_31': self.aggs})
self._validate(stats, 'Int32_0_31', ['Int32_0_15'])
def test_group_float32_standard(self):
"""Test groupby on 1 column, float32"""
stats = self.frame.group_by(
['Float32_0_15'], {'Float32_0_31': self.aggs})
self._validate(stats, 'Float32_0_31', ['Float32_0_15'])
def test_group_float64_standard(self):
"""Test groupby on 1 column, float64"""
stats = self.frame.group_by(
['Float64_0_15'], {'Float64_0_31': self.aggs})
self._validate(stats, 'Float64_0_31', ['Float64_0_15'])
def test_group_int64_standard(self):
"""Test groupby on 1 column, int64"""
stats = self.frame.group_by(['Int64_0_15'], {'Int64_0_31': self.aggs})
self._validate(stats, 'Int64_0_31', ['Int64_0_15'])
def Test_group_by_str_standard(self):
"""Test groupby on 1 column, string"""
stats = self.frame.group_by(['colors'], {'Int32_0_31': self.aggs})
self._validate_str(stats, 'Int32_0_31', ['colors'])
def test_group_by_str_agg_str(self):
"""Test groupby on 1 column, string, aggregate is string"""
stats = self.frame.group_by(['colors'], {'colors': self.aggs_str})
self._validate_str(stats, 'colors', ['colors'])
def test_group_int32_multiple_cols(self):
"""Test groupby on multiple columns, int32"""
stats = self.frame.group_by(
['Int32_0_15', 'Int32_0_31'], {'Int32_0_31': self.aggs})
self._validate(stats, 'Int32_0_31', ['Int32_0_15', 'Int32_0_31'])
def test_group_float32_multiple_cols(self):
"""Test groupby on multiple columns, float32"""
stats = self.frame.group_by(
['Float32_0_15', 'Float32_0_31'], {'Float32_0_31': self.aggs})
self._validate(stats, 'Float32_0_31', ['Float32_0_15', 'Float32_0_31'])
def test_group_float64_multiple_cols(self):
"""Test groupby on multiple columns, float64"""
stats = self.frame.group_by(
['Float64_0_15', 'Float64_0_31'], {'Float32_0_31': self.aggs})
self._validate(stats, 'Float32_0_31', ['Float64_0_15', 'Float64_0_31'])
def test_group_int64_multiple_cols(self):
"""Test groupby on multiple columns, int64"""
stats = self.frame.group_by(
['Int64_0_15', 'Int64_0_31'], {'Int64_0_31': self.aggs})
self._validate(stats, 'Int64_0_31', ['Int64_0_15', 'Int64_0_31'])
def test_groupby_str_multiple_cols(self):
"""Test groupby on multiple columns, string"""
stats = self.frame.group_by(
['colors', 'Int32_0_15'], {'colors': self.aggs_str})
self._validate_str(stats, 'colors', ['colors', 'Int32_0_15'])
def test_group_int32_none(self):
"""Test groupby none, int32 aggregate"""
stats = self.frame.group_by(None, {'Int32_0_31': self.aggs})
self._validate_single_group(stats, None, 'Int32_0_31')
def test_group_float32_none(self):
"""Test groupby none, float32 aggregate"""
stats = self.frame.group_by(None, {'Float32_0_31': self.aggs})
self._validate_single_group(stats, None, 'Float32_0_31')
def test_group_float64_none(self):
"""Test groupby none, float64 aggregate"""
stats = self.frame.group_by(None, {'Float64_0_31': self.aggs})
self._validate_single_group(stats, None, 'Float64_0_31')
def test_group_int64_none(self):
"""Test groupby none, int64 aggregate"""
stats = self.frame.group_by(None, {'Int64_0_31': self.aggs})
self._validate_single_group(stats, None, 'Int64_0_31')
def _validate_single_group(self, stats, groupby_cols, aggregator):
# Validate the result of atk groupby and pandas groupby are the same
# when there is single group (none)
pd_stats = stats.to_pandas(s
|
sgarrity/bedrock
|
bedrock/pocketfeed/api.py
|
Python
|
mpl-2.0
| 1,744
| 0.00172
|
import datetime
import re
import requests
from django.conf import settings
from django.utils.timezone import make_aware, utc
from raven.contrib.django.raven_compat.models import client as sentry_client
def get_articles_data(count=8):
payload = {
'consumer_key': settings.POCKET_CONSUMER_KEY,
'access_token': settings.POCKET_ACCESS_TOKEN,
'count': count,
'detailType': 'complete',
}
try:
resp = requests.post(settings.POCKET_API_URL, json=payload, timeout=5)
resp.raise_for_status()
return resp.json()
except Exception:
sentry_client.captureException()
return None
def complete_article
|
s_data(articles):
for _, article in articles:
# id from API should be moved to pocket_id to not conflict w/DB's id
article['pocket_id'] = article['id']
# convert time_shared from unix timestamp to datetime
a
|
rticle['time_shared'] = make_aware(datetime.datetime.fromtimestamp(int(article['time_shared'])), utc)
# remove data points we don't need
del article['comment']
del article['excerpt']
del article['id']
del article['quote']
check_article_image(article)
def check_article_image(article):
"""Determine if external image is available"""
# sanity check to make sure image provided by API actually exists and is https
if article['image_src'] and re.match(r'^https://', article['image_src'], flags=re.I):
try:
resp = requests.get(article['image_src'])
resp.raise_for_status()
except Exception:
sentry_client.captureException()
article['image_src'] = None
else:
article['image_src'] = None
|
bellowsj/aiopogo
|
aiopogo/pogoprotos/networking/requests/messages/evolve_pokemon_message_pb2.py
|
Python
|
mit
| 3,240
| 0.008025
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/evolve_pokemon_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.inventory.item import item_id_pb2 as pogoprotos_dot_inventory_dot_item_dot_item__id__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/evolve_pokemon_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nDpogoprotos/networking/requests/messages/evolve_pokemon_message.proto\x12\'pogoprotos.networking.requests.messages\x1a\'pogoprotos/inventory/item/item_id.proto\"q\n\x14\x45volvePokemonMessage\x12\x12\n\npokemon_id\x18\x01 \x01(\x06\x12\x45\n\x1a\x65volution_item_requirement\x18\x02 \x01(\x0e\x32!.pogoprotos.inventory.item.ItemIdb\x06proto3')
,
dependencies=[pogoprotos_dot_inventory_dot_item_dot_item__id__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EVOLVEPOKEMONMESSAGE = _descriptor.Descriptor(
name='EvolvePokemonMessage',
full_name='pogoprotos.networking.requests.messages.EvolvePokemonMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pokemon_id', full_name='pogoprotos.networking.requests.messages.EvolvePokemonMessage.pokemon_id', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='evolution_item_requirement', full_name='pogoprotos.networking.requests.messages.EvolvePokemonMessage.evolution_item_requirement', index=1,
number=2, type=14, cpp_type=8, label=1,
|
has_default_value=False, default_value
|
=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=154,
serialized_end=267,
)
_EVOLVEPOKEMONMESSAGE.fields_by_name['evolution_item_requirement'].enum_type = pogoprotos_dot_inventory_dot_item_dot_item__id__pb2._ITEMID
DESCRIPTOR.message_types_by_name['EvolvePokemonMessage'] = _EVOLVEPOKEMONMESSAGE
EvolvePokemonMessage = _reflection.GeneratedProtocolMessageType('EvolvePokemonMessage', (_message.Message,), dict(
DESCRIPTOR = _EVOLVEPOKEMONMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.evolve_pokemon_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.EvolvePokemonMessage)
))
_sym_db.RegisterMessage(EvolvePokemonMessage)
# @@protoc_insertion_point(module_scope)
|
pylessard/python-udsoncan
|
test/test_stubbed_isotpsock.py
|
Python
|
mit
| 2,233
| 0.000896
|
from test.UdsTest import UdsTest
from test.stub import StubbedIsoTPSocket
from udsoncan.exceptions import *
import socket
class TestStubbedIsoTPSocket(UdsTest):
def test_open(self):
tpsock = StubbedIsoTPSocket()
self.assertFalse(tpsock.bound)
tpsock.bind(interface='vcan0', rxid=0x100, txid=0x101)
self.assertTrue(tpsock.bound)
tpsock.close()
self.assertFalse(tpsock.bound)
def test_transmit(self):
tpsock1 = StubbedIsoTPSocket()
tpsock2 = StubbedIsoTPSocket(timeout=0.5)
tpsock1.bind(interface='vcan0', rxid=0x200, txid=0x201)
tpsock2.bind(interface='vcan0', rxid=0x201, txid=0x200)
payload1 = b"\x01\x02\x03\x04"
tpsock1.send(payload1)
payload2 = tpsock2.recv()
self.assertEqual(payload1, payload2)
def test_multicast(self):
tpsock1 = StubbedIsoTPSocket()
tpsock2 = StubbedIsoTPSocket(timeout=0.5)
tpsock3 = StubbedIsoTPSocket(timeout=0.5)
tpsock1.bind(interface='vcan0', rxid=0x300, txid=0x301)
tpsock2.bind(interface='vcan0', rxid=0x301, txid=0x300)
tpsock3.bind(interface='vcan0', rxid=0x301, txid=0x300)
payload1 = b"\x01\x02\x03\x04"
tpsock1.send(payload1)
payload2 = tpsock2.recv()
payload3 = tpsock3.recv()
self.assertEqual(payload1, payload2)
self.assertEqual(payload1, payload3)
def test_empty_on_close(self):
tpsock1 = StubbedIsoTPSocket()
tpsock2 = StubbedIsoTPSocket(timeout=0.2)
tpsock1.bind(interface='vcan0', rxid=0x400, txid=0x401)
tpsock2.bind(interface='vcan0', rxid=0x401, txid=0x400)
payload = b"\x01\x02\x03\x04"
tpsock1.send(payload)
tpsock2.close()
with self.assertRaises(socket.timeout):
tpsock2.recv()
def test_no_listener(self):
tpsock1 = StubbedIsoTPSocket()
tpsock2 = StubbedIsoTPSocket(timeout=0.2)
tpsock1.bind(interface='vcan0', rxid=0x400, txid=0x401)
payload = b"\x01\x02\x03\x04"
tpsock1.send(payload)
tpsock2.bind(i
|
nterface='vcan0', rxid=0x401, txid=0x400)
with self
|
.assertRaises(socket.timeout):
tpsock2.recv()
|
jinzekid/codehub
|
python/test_web_speed.py
|
Python
|
gpl-3.0
| 1,603
| 0.022163
|
import io,pycurl,sys,os,time
class idctest:
def __init__(self):
self.contents = ''
def body_callback(self,buf):
self.contents = self.contents + buf
def test_gzip(input_url):
t = idctest()
#gzip_test = file("gzip_test.txt", 'w')
c = pycurl.Curl()
c.setopt(pycurl.WRITEFUNCTION,t.body_callback)
c.setopt(pycurl.ENCODING, 'gzip')
c.setopt(pycurl.URL,input_url)
c.setopt(pycurl.MAXREDIRS, 5)
c.perform()
http_code = c.getinfo(pycurl.HTTP_CODE)
dns_resolve = c.getinfo(pycurl.NAMELOOKUP_TIME)
http_conn_time = c.getinfo(pycurl.CONNECT_TIME)
http_pre_trans = c.getinfo(pycurl.PRETRANSFER_TIME)
http_start_trans = c.getinfo(pycurl.STARTTRANSFER_TIME)
http_total_time = c.getinfo(pycurl.TOTAL_TIME)
http_size_download = c.getinfo(pycurl.SIZE_DOWNLOAD)
http_header_size = c.getinfo(pycurl.HEADER_SIZE)
http_speed_downlaod = c.getinfo(pycurl.SPEED_DOWNLOAD)
print('HTTP响应状态: %d' %http_code)
print('DNS解析时间:%.2f ms' %(dns_resolve*1000))
print('建立连接时间: %.2f ms' %(http_conn_time*1000))
print('准备传输时间: %.2f ms' %(http_pre_trans*1000))
print("传输开始时间: %.2f ms" %(http_start_trans*1000))
print("传输结束时间: %.2f ms" %(http_total_time*1000))
print("下载数据包大小: %d bytes/s" %http_size_download)
print("HTTP头大小: %d bytes/s" %http_header_size)
print("平均下载速度: %d k/s" %(http_speed_downlaod/1024))
if __name__ == '__main__':
input
|
_url = sys.argv[1]
test
|
_gzip(input_url)
|
hustodemon/spacewalk
|
backend/wsgi/__init__.py
|
Python
|
gpl-2.0
| 609
| 0
|
#
# Copyright (c) 2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNE
|
SS
# FOR A PARTICULAR PURPOSE. You
|
should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
|
diogenes1oliveira/mathbind
|
mathbind/types/basicvaluetype.py
|
Python
|
mit
| 3,498
| 0.002287
|
#!/usr/bin/env python3
from mathbind.types import BasicType
class BasicValueType(BasicType):
"""
Represents a basic pure type that can be passed by value, thus excluding arrays and pointers.
Attributes:
- typename (str): basic C typename (int, long long, unsi
|
gned, bool, etc)
- c_math_name (str): corresponding Mathematica C type
- math_name (str): corresponding Mathematica type (Integer, Real)
- c_name (str): corresponding C type (int, long long, float).
"""
def __init__(self, typename
|
):
self.typename = typename
type_parts = set(typename.split())
self.c_name = typename
if not type_parts:
raise ValueError
elif {'float', 'double'} & type_parts:
self.c_math_name = 'mreal'
self.math_name = 'Real'
elif 'bool' in type_parts:
self.c_name = 'int'
self.c_math_name = 'mbool'
self.math_name = 'Boolean'
elif not type_parts - {'signed', 'unsigned', 'char', 'int', 'short', 'long'}:
self.c_math_name = 'mint'
self.math_name = 'Integer'
else:
raise ValueError('Unrecognized C type')
@classmethod
def from_str(cls, s):
"""
Tries to build a new BasicValueType from the string specification, failing if
the type is a pointer or array-like.
"""
if '*' in s or '[' in s or ']' in s:
raise ValueError('Not a valid basic C type')
while ' ' in s:
s = s.replace(' ', ' ')
return BasicValueType(s.strip())
@classmethod
def from_prototype_cstr(cls, s):
"""
Tries to extract (type, argname) from the string.
"""
while s.count(' ') > 2:
s = s.replace(' ', '')
s = s.strip()
if not s.replace(' ', '').replace('_', '').isalnum():
raise ValueError('Unrecognized characters')
*words, argname = s.split()
return BasicValueType.from_str(' '.join(words)), argname.strip()
def __repr__(self):
return 'BasicValueType(typename=%r)' % self.typename
def __eq__(self, other):
return self.typename == other.typename
def retrieve_cstr(self, argname, index, tab='', suffix=None):
if suffix is None:
suffix = self.default_suffix
form = '{tab}{self.c_name} {argname} = MArgument_get{self.math_name}(Args{suffix}[{index}]);\n'
return form.format(argname=argname, self=self, tab=tab, index=index, suffix=suffix)
def return_cstr(self, func_call, tab='', suffix=None):
if suffix is None:
suffix = self.default_suffix
form = (
'{tab}{self.c_name} return_value{suffix} = {func_call};\n'
'{tab}MArgument_set{self.math_name}(Res{suffix}, return_value{suffix});\n'
)
return form.format(func_call=func_call, tab=tab, self=self, suffix=suffix)
def prototype_cstr(self, argname):
return self.c_name + ' ' + argname
def prototype_return_cstr(self):
"""
Returns a C string representing the declaration in a prototype return.
"""
return self.c_name
@property
def math_convert_f(self):
"""
Returns the Mathematica function responsible for converting values
to this one.
"""
if 'float' in self.typename or 'double' in self.typename:
return 'N'
else:
return 'IntegerPart'
|
jijeshmohan/webdriver-rb
|
firefox/src/py/firefox_profile.py
|
Python
|
apache-2.0
| 11,601
| 0.003017
|
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Firefox Profile management."""
import ConfigParser
import logging
import os
import platform
import re
import shutil
import subprocess
import tempfile
import zipfile
import utils
DEFAULT_PORT = 7055
ANONYMOUS_PROFILE_NAME = "WEBDRIVER_ANONYMOUS_PROFILE"
def get_profile_ini():
app_data_dir = utils.get_firefox_app_data_dir()
profile_ini = ConfigParser.SafeConfigParser()
profile_ini.read(os.path.join(app_data_dir, "profiles.ini"))
return profile_ini
class FirefoxProfile(object):
"""Represents a firefox profile."""
profile_ini = get_profile_ini()
def __init__(self, name=ANONYMOUS_PROFILE_NAME, port=DEFAULT_PORT,
template_profile=None, extension_path=None):
"""Creates a FirefoxProfile.
Args:
name: the profile name. A new firefox profile is created if the one
specified doesn't exist.
port: the port webdriver extension listens on for command
template_profile: if not none, the content of the specified profile
will be copied from this directory.
extension_path: the source of the webdriver extension
Usage:
-- Get a profile with a given name:
profile = FirefoxProfile("profile_name")
-- Get a new created profile:
profile = FirefoxProfile()
-- Get a new created profile with content copied from "/some/path":
profile = FirefoxProfile(template_profile="/some/path")
"""
self.name = name
self.port = port
if (extension_path is None):
self.extension_path = os.path.join(os.path.dirname(__file__), '..',
'build_artifacts', 'webdriver-extension.zip')
else:
self.extension_path = extension_path
if name == ANONYMOUS_PROFILE_NAME:
self._create_anonymous_profile(template_profile)
self._refresh_ini()
else:
self.initialize()
def _create_anonymous_profile(self, template_profile):
self.anonymous_profile_dir = tempfile.mkdtemp()
if template_profile is not None and os.path.exists(template_profile):
self._copy_profile_source(template_profile)
self._update_user_preference()
self.add_extension(extension_zip_path=self.extension_path)
self._launch_in_silent()
def initialize(self):
self.remove_lock_file()
self.add_extension(True, extension_zip_path=self.extension_path)
def _copy_profile_source(self, source_path):
"""Copy the profile content from source_path source_path.
"""
logging.info("Copying profile from '%s' to '%s'"
% (source_path, self.path))
try:
shutil.rmtree(self.path)
shutil.copytree(source_path, self.path)
self._launch_in_silent()
except OSError, err:
raise Exception("Errors in copying profile: %s" % err)
def add_extension(self, force_create=True, extension_zip_path=None):
"""Adds the webdriver extension to this profile.
If force_create is True, the fxdriver extension is updated if a
new version is accessable. The old extension is untouched if the
new version is unavailable, but it might be deleted if the new
version is accessable but the upgrade fails.
If force_create is False, nothing will happen if the extension
directory exists and otherwise a new extension will be installed.
The sources of a new extension are (in the order of preference)
(1) zipped file webdriver-extension.zip in the current directory,
which can be created using 'rake firefox_xpi' in
%webdriver_directory%, and
(2) zipped files pointed by extension_zip_path, and
(3) unzipped files specified by environment variable WEBDRIVER;
these unzipped files must include the generated xpt files,
see %webdriver_directory%/firefox/prebuilt, or run
'rake firefox_xpi' and use the built files generated in
%webdriver_directory%/build
Default value of force_create is True. This enables users to
install new extension by attaching new extension as specified; if
no files is specified, no installation will be performed even when
force_creat is True.
"""
extension_dir = os.path.join(self.path,
"extensions", "fxdriver@googlecode.com")
logging.debug("extension_dir : %s" % extension_dir)
if force_create or not os.path.exists(extension_dir):
extension_source_path = utils.unzip_to_temp_dir(
"webdriver-extension.zip")
if (extension_source_path is None or
not os.path.exists(extension_source_path)):
extension_source_path = utils.unzip_to_temp_dir(
extension_zip_path)
if (extension_source_path is None or
not os.path.exists(extension_source_path)):
webdriver_dir = os.getenv("WEBDRIVER")
if webdriver_dir is not None:
extension_source_path = os.path.join(
webdriver_dir, "firefox", "src", "extension")
if (extension_source_path is None or
not os.path.exists(extension_source_path)):
raise Exception(
"No extension found at %s" % extension_source_path)
logging.debug("extension_source_path : %s" % extension_source_path)
logging.info("Copying extenstion from '%s' to '%s'"
% (extension_source_path, extension_dir))
try:
if os.path.exists(extension_dir):
shutil.rmtree(extension_dir)
else:
#copytree()'s behavior on linux makes me to write these
#two lines to ensure that the parent directory exists,
#although it is not required according to the documentation.
os.makedirs(extension_dir)
shutil.rmtree(extension_dir)
shutil.copytree(extension_source_path, extension_dir)
logging.info("Extenstion has been copied from '%s' to '%s'"
% (extension_source_path, extension_dir))
except OSError, err:
logging.info("Fail to install firefox extension. %s" % err)
else:
logging.info("No extension installation required.")
def remove_lock_file(self):
for lock_file in [".parentlock", "lock", "parent.lock"]:
try:
os.remove(os.path.join(self.path, lock_file))
except OSError:
pass
@property
def path(self):
if "anonymous_profile_dir" in self.__dict__:
return self.anonymous_profile_dir
section = self._get_ini_section()
assert section is not None, "Profile doesn't exi
|
st in profiles.ini"
return os.path.join(utils.get_firefox_app_data_dir(),
self.profile_ini.get(section, "Path"))
@staticmethod
def _refresh_ini():
FirefoxProfile.profile_ini = get_profile_ini()
def _launch_in_silent(self):
os.environ["XRE_PROFILE_PATH"] = self.anonymous_p
|
rofile_dir
subprocess.Popen([utils.get_firefox_start_c
|
mahak/cinder
|
cinder/tests/unit/image/test_accelerator.py
|
Python
|
apache-2.0
| 3,676
| 0.002176
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from cinder.image import accelerator
from cinder.tests.unit import test
class fakeEngine(object):
def __init__(self):
pass
def compress_img(self, src, dest, run_as_root):
pass
def decompress_img(self, src, dest, run_as_root):
pass
class TestAccelerator(test.TestCase):
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
return_value = True)
def test_compress_img_engine_ready(self, mock_accel_engine_ready,
mock_get_engine):
source = mock.sentinel.source
dest = mock.sentinel.dest
run_as_root = mock.sentinel.run_as_root
mock_engine = mock.Mock(spec=fakeEngine)
mock_get_engine.return_value = mock_engine
accel = accelerator.ImageAccel(source, dest)
accel.compress_img(run_as_root=run_as_root)
mock_engine.compress_img.assert_called()
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
return_value = False)
def test_compress_img_engine_not_ready(self, mock_accel_engine_ready,
mock_get_engine):
source = mock.sentinel.source
dest = mock.sentinel.dest
run_as_root = mock.sentinel.run_as_root
mock_engine = mock.Mock(spec=fakeEngine)
mock_get_engine.return_value = mock_engine
accel = accelerator.ImageAccel(source, dest)
accel.compress_img(run_as_root=run_as_root)
mock_engine.compress_img.assert_not_called()
|
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
re
|
turn_value = True)
def test_decompress_img_engine_ready(self, mock_accel_engine_ready,
mock_get_engine):
source = mock.sentinel.source
dest = mock.sentinel.dest
run_as_root = mock.sentinel.run_as_root
mock_engine = mock.Mock(spec=fakeEngine)
mock_get_engine.return_value = mock_engine
accel = accelerator.ImageAccel(source, dest)
accel.decompress_img(run_as_root=run_as_root)
mock_engine.decompress_img.assert_called()
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
return_value = False)
def test_decompress_img_engine_not_ready(self, mock_accel_engine_ready,
mock_get_engine):
source = mock.sentinel.source
dest = mock.sentinel.dest
run_as_root = mock.sentinel.run_as_root
mock_engine = mock.Mock(spec=fakeEngine)
mock_get_engine.return_value = mock_engine
accel = accelerator.ImageAccel(source, dest)
accel.decompress_img(run_as_root=run_as_root)
mock_engine.decompress_img.assert_not_called()
|
glennlive/gnuradio-wg-grc
|
grc/gui/MainWindow.py
|
Python
|
gpl-3.0
| 13,903
| 0.006258
|
"""
Copyright 2008, 2009, 2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import os
import gtk
from . import Bars, Actions, Preferences, Utils
from .BlockTreeWindow import BlockTreeWindow
from .Constants import \
NEW_FLOGRAPH_TITLE, DEFAULT_REPORTS_WINDOW_WIDTH
from .Dialogs import TextDisplay, MessageDialogHelper
from .NotebookPage import NotebookPage
from ..core import Messages
MAIN_WINDOW_TITLE_TMPL = """\
#if not $saved
*#slurp
#end if
#if $basename
$basename#slurp
#else
$new_flowgraph_title#slurp
#end if
#if $read_only
(read only)#slurp
#end if
#if $dirname
- $dirname#slurp
#end if
- $platform_name#slurp
"""
PAGE_TITLE_MARKUP_TMPL = """\
#set $foreground = $saved and 'black' or 'red'
<span foreground="$foreground">$encode($title or $new_flowgraph_title)</span>#slurp
#if $read_only
(ro)#slurp
#end if
"""
############################################################
# Main window
############################################################
class MainWindow(gtk.Window):
"""The topmost window with menus, the tool bar, and other major windows."""
def __init__(self, platform, action_handler_callback):
"""
MainWindow contructor
Setup the menu, toolbar, flowgraph editor notebook, block selection window...
"""
self._platform = platform
gen_opts = platform.blocks['options'].get_param('generate_options')
generate_mode_default = gen_opts.get_value()
generate_modes = [
(o.get_key(), o.get_name(), o.get_key() == generate_mode_default)
for o in gen_opts.get_options()]
# load preferences
Preferences.load(platform)
#setup window
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
vbox = gtk.VBox()
self.hpaned = gtk.HPaned()
self.add(vbox)
#create the menu bar and toolbar
self.add_accel_group(Actions.get_accel_group())
self.menu_bar = Bars.MenuBar(generate_modes, action_handler_callback)
vbox.pack_start(self.menu_bar, False)
self.tool_bar = Bars.Toolbar(generate_modes, action_handler_callback )
vbox.pack_start(self.tool_bar, False)
vbox.pack_start(self.hpaned)
#create the notebook
self.notebook = gtk.Notebook()
self.page_to_be_closed = None
self.current_page = None
self.notebook.set_show_border(False)
self.notebook.set_scrollable(True) #scroll arrows for page tabs
self.notebook.connect('switch-page', self._handle_page_change)
#setup containers
self.flow_graph_vpaned = gtk.VPaned()
#flow_graph_box.pack_start(self.scrolled_window)
self.flow_graph_vpaned.pack1(self.notebook)
self.hpaned.pack1(self.flow_graph_vpaned)
self.btwin = BlockTreeWindow(platform, self.get_flow_graph);
self.hpaned.pack2(self.btwin, False) #dont allow resize
#create the reports window
self.text_display = TextDisplay()
#house the reports in a scrolled window
self.reports_scrolled_window = gtk.ScrolledWindow()
self.reports_scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.reports_scrolled_window.add(self.text_display)
self.reports_scrolled_window.set_size_request(-1, DEFAULT_REPORTS_WINDOW_WIDTH)
self.flow_graph_vpaned.pack2(self.reports_scrolled_window, False) #dont allow resize
#load preferences and show the main window
self.resize(*Preferences.main_window_size())
self.flow_graph_vpaned.set_position(Preferences.reports_window_position())
self.hpaned.set_position(Preferences.blocks_window_position())
self.show_all()
self.reports_scrolled_window.hide()
self.btwin.hide()
############################################################
# Event Handlers
############################################################
def _quit(self, window, event):
"""
Handle the delete event from the main window.
Generated by pressing X to close, alt+f4, or right click+close.
This method in turns calls the state handler to quit.
Returns:
true
"""
Actions.APPLICATION_QUIT()
return True
def _handle_page_change(self, notebook, page, page_num):
"""
Handle a page change. When the user clicks on a new tab,
reload the flow graph to update the vars window and
call handle states (select nothing) to update the buttons.
Args:
notebook: the notebook
page: new page
page_num: new page number
"""
self.current_page = self.notebook.get_nth_page(page_num)
Messages.send_page_switch(self.current_page.get_file_path())
Actions.PAGE_CHANGE()
############################################################
|
# Report Window
############################################################
def add_report_lin
|
e(self, line):
"""
Place line at the end of the text buffer, then scroll its window all the way down.
Args:
line: the new text
"""
self.text_display.insert(line)
############################################################
# Pages: create and close
############################################################
def new_page(self, file_path='', show=False):
"""
Create a new notebook page.
Set the tab to be selected.
Args:
file_path: optional file to load into the flow graph
show: true if the page should be shown after loading
"""
#if the file is already open, show the open page and return
if file_path and file_path in self._get_files(): #already open
page = self.notebook.get_nth_page(self._get_files().index(file_path))
self._set_page(page)
return
try: #try to load from file
if file_path: Messages.send_start_load(file_path)
flow_graph = self._platform.get_new_flow_graph()
flow_graph.grc_file_path = file_path
#print flow_graph
page = NotebookPage(
self,
flow_graph=flow_graph,
file_path=file_path,
)
if file_path: Messages.send_end_load()
except Exception, e: #return on failure
Messages.send_fail_load(e)
if isinstance(e, KeyError) and str(e) == "'options'":
# This error is unrecoverable, so crash gracefully
exit(-1)
return
#add this page to the notebook
self.notebook.append_page(page, page.get_tab())
try: self.notebook.set_tab_reorderable(page, True)
except: pass #gtk too old
self.notebook.set_tab_label_packing(page, False, False, gtk.PACK_START)
#only show if blank or manual
if not file_path or show: self._set_page(page)
def close_pages(self):
"""
Close all the pages in this notebook.
Returns:
true if all closed
"""
open_files = filter(lambda file: file, self._get_files()) #filter blank files
open_file = self.get_page().get_file_path()
#close each page
for page in sorted(self.get_pages(), key=lambda p: p.get_saved()):
self.page_to_be_closed = page
closed = self.close_page(False)
|
Azure/azure-sdk-for-python
|
sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2020_11_01_preview/aio/operations/_monitoring_settings_operations.py
|
Python
|
mit
| 16,766
| 0.005189
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._monitoring_settings_operations import build_get_request, build_update_patch_request_initial, build_update_put_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MonitoringSettingsOperations:
"""MonitoringSettingsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2020_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> "_models.MonitoringSettingResource":
"""Get the Monitoring Setting and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MonitoringSettingResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
async def _update_put_initial(
self,
resource_group_name: str,
service_name: str,
monitoring_setting_resource: "_models.MonitoringSettingResource",
**kwargs: Any
) -> "_models.MonitoringSettingResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(monitoring_setting_resource, 'MonitoringSettingResource')
request = build_update_put_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
content_type=content_type,
json=_json,
template_url=self._update_put_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return dese
|
rialized
_update_put_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: igno
|
re
@distributed_trace_async
async def begin_update_put(
self,
resource_group_name: str,
service_name: str,
monitoring_setting_resource: "_models.MonitoringSettingResource",
**kwargs: Any
) -> AsyncLROPoller["_models.MonitoringSettingResource"]:
"""Update the Monitoring Setting.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param monitoring_setting_resource: Parameters for the update operation.
:type monitoring_setting_resource:
~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time betwe
|
mrcatacroquer/Bridge
|
migrations/versions/2356a38169ea_followers.py
|
Python
|
mit
| 941
| 0.012752
|
"""followers
Revision ID: 2356a38169ea
Revises: 288cd3dc5a8
Create Date: 2013-12-31 16:10:34.500006
"""
# revision identifiers, used by Alembic.
revision = '2356a38169ea'
down_revision = '288cd3dc5a8'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
o
|
p.create_table('follows',
|
sa.Column('follower_id', sa.Integer(), nullable=False),
sa.Column('followed_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['followed_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('follower_id', 'followed_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('follows')
### end Alembic commands ###
|
superdesk/Live-Blog
|
plugins/media-archive-image/superdesk/media_archive/impl/image_data.py
|
Python
|
agpl-3.0
| 1,525
| 0.007213
|
'''
Created on Apr 19, 2012
@package: superdesk media archive
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
SQL Alchemy based implementation for the image data API.
'''
from ally.cdm.spec import ICDM
from ally.container import wire
from ally.container.ioc import injected
from ally.container.support import setup
from superdesk.media_archive.api.image_data import IImageDataService, QImageData
from superdesk.media_archive.core.impl.meta_service_base import \
MetaDataServiceBaseAlchemy
from superdesk.media_archive.core.spec import IMetaDataReferencer, \
IThumbnailManager
from superdesk.media_archive.meta.image_data import ImageDataMapped
# --------------------------------------------------------------------
@injec
|
ted
@setup(IImageDataService, name='imageDataService')
class ImageDataServiceAlchemy(MetaDataServiceBaseAlchemy, IMetaDataReferencer, IImageDataService):
'''
@see: IImageDataService
'''
cdmArchiveImage = ICDM; wire.entity('cdmArchiveImage')
thumbnailManager = IThumbnailManager; wire.entity('thumbnailManag
|
er')
def __init__(self):
assert isinstance(self.cdmArchiveImage, ICDM), 'Invalid archive CDM %s' % self.cdmArchiveImage
assert isinstance(self.thumbnailManager, IThumbnailManager), 'Invalid thumbnail manager %s' % self.thumbnailManager
MetaDataServiceBaseAlchemy.__init__(self, ImageDataMapped, QImageData, self, self.cdmArchiveImage, self.thumbnailManager)
|
kd0kfo/pi_lcd_controller
|
python/picontroller/button_listener.py
|
Python
|
gpl-3.0
| 1,373
| 0.00437
|
#!/usr/bin/env python
from time import sleep
class ButtonListener():
"""
Service that polls the button status device and calls a
callback funtion for each button pressed.
Callback function should return a boolean to show whether
or not the listening should continue.
"""
def __init__(self, button_callback, device_filename="/dev/buttons", num_buttons=8, *args, **kw):
self.button_callback = button_callback
self.button_device = open(device_filename, "r")
self.num_buttons = num_buttons
|
self.last_state = {"0": 0}
def listen(self):
while True:
raw_state = [ord(ch) for ch in self
|
.button_device.read(self.num_buttons)]
state = dict(zip(range(0, len(raw_state)), raw_state))
for (button, isup) in state.iteritems():
if isup:
state[button] = 1
else:
state[button] = 0
if not isup and button in self.last_state and self.last_state[button]:
if not self.button_callback(button):
return
self.last_state = state
sleep(0.2)
if __name__ == "__main__":
def print_button(button):
print("Button %s pressed" % button)
return True
service = ButtonListener(print_button)
service.listen()
|
ActiveState/code
|
recipes/Python/577680_Multithreaded_Mandelbrot_Fractal/recipe-577680.py
|
Python
|
mit
| 1,749
| 0.009148
|
# Multi-threaded Mandelbrot Fractal (Do not run using IDLE!)
# FB - 201104306
import threading
from PIL import Image
w = 512 # image width
h = 512 # image height
image = Image.new("RGB", (w, h))
wh = w * h
maxIt = 256 # max number of iterations allowed
# drawing region (
|
xa < xb & ya < yb)
xa = -2.0
xb = 1.0
ya = -1.5
yb = 1.5
xd = xb - xa
yd = yb - ya
numThr = 5 # number of threads to run
# lock = threading.Lock()
class ManFrThread(threading.Thread):
def __init__ (self, k)
|
:
self.k = k
threading.Thread.__init__(self)
def run(self):
# each thread only calculates its own share of pixels
for i in range(k, wh, numThr):
kx = i % w
ky = int(i / w)
a = xa + xd * kx / (w - 1.0)
b = ya + yd * ky / (h - 1.0)
x = a
y = b
for kc in range(maxIt):
x0 = x * x - y * y + a
y = 2.0 * x * y + b
x = x0
if x * x + y * y > 4:
# various color palettes can be created here
red = (kc % 8) * 32
green = (16 - kc % 16) * 16
blue = (kc % 16) * 16
# lock.acquire()
global image
image.putpixel((kx, ky), (red, green, blue))
# lock.release()
break
if __name__ == "__main__":
tArr = []
for k in range(numThr): # create all threads
tArr.append(ManFrThread(k))
for k in range(numThr): # start all threads
tArr[k].start()
for k in range(numThr): # wait until all threads finished
tArr[k].join()
image.save("MandelbrotFractal.png", "PNG")
|
tyndare/osmose-backend
|
analysers/analyser_osmosis_highway_turn_lanes.py
|
Python
|
gpl-3.0
| 4,965
| 0.006448
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2016 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from Analyser_Osmosis import Analyser_Osmosis
sql10 = """
CREATE TEMP TABLE turn_lanes_ends AS
SELECT
DISTINCT ON (id)
ends(nodes) AS id
FROM
ways
WHERE
tags != ''::hstore AND
tags?'highway' AND
(
tags->'highway' = 'motorway' OR
(tags->'highway' = 'trunk' AND tags->'oneway' = 'yes')
) AND
tags?'turn:lanes'
"""
sql11 = """
CREATE INDEX idx_turn_lanes_ends_id ON turn_lanes_ends(id);
"""
sql12 = """
CREATE TEMP TABLE turn_lanes_steps AS
SELECT
turn_lanes_ends.id AS nid,
CASE ways.tags->'oneway'
WHEN '-1' THEN turn_lanes_ends.id != ways.nodes[1]
ELSE turn_lanes_ends.id = ways.nodes[1]
END AS start_end,
ways.id,
ways.tags
FROM
ways
JOIN turn_lanes_ends ON
turn_lanes_ends.id = ways.nodes[1] OR
turn_lanes_ends.id = ways.nodes[array_length(ways.nodes, 1)]
WHERE
ways.tags != ''::hstore AND
ways.tags?'highway' AND
(NOT ways.tags?'access' OR ways.tags->'access' != 'no')
"""
sql13 = """
CREATE TEMP TABLE sum_turn_lanes_steps AS
SELECT
nid,
start_end,
SUM(CASE
WHEN tags->'lanes' ~ E'^[0-9]+$' THEN (tags->'lanes')::integer
WHEN tags?'turn:lanes' THEN array_length(string_to_array(tags-
|
>'turn:lanes', '|'), 1)
WHEN tags->'highway'
|
IN ('motorway', 'trunk') THEN 2
ELSE 1
END) AS lanes,
SUM(array_length(string_to_array(tags->'turn:lanes', 'slight_'), 1) - 1) AS lanes_slight,
SUM(array_length(string_to_array(tags->'turn:lanes', 'merge_to_'), 1) - 1) AS lanes_merge_to
FROM
turn_lanes_steps
GROUP BY
nid,
start_end
HAVING
BOOL_AND(tags->'highway' IN ('motorway', 'motorway_link') OR (tags->'highway' IN ('trunk', 'trunk_link') AND tags->'oneway' = 'yes'))
"""
sql14 = """
SELECT
nid,
ST_AsText(nodes.geom),
lin_lanes,
lin_lanes_merge_to,
lin_lanes_slight,
lout_lanes,
lout_lanes_merge_to,
lout_lanes_slight
FROM
(
SELECT
lin.nid,
lin.lanes AS lin_lanes,
lin.lanes_merge_to AS lin_lanes_merge_to,
lin.lanes_slight AS lin_lanes_slight,
lout.lanes AS lout_lanes,
lout.lanes_merge_to AS lout_lanes_merge_to,
lout.lanes_slight AS lout_lanes_slight
FROM
sum_turn_lanes_steps AS lin
JOIN sum_turn_lanes_steps AS lout ON
lin.nid = lout.nid AND
(
(
(lin.lanes_merge_to = 0 OR lin.lanes_merge_to IS NULL) AND
lout.lanes < lin.lanes
) OR (
lin.lanes_merge_to > 0 AND
NOT (
lout.lanes - lin.lanes_slight <= lin.lanes AND
lout.lanes - lin.lanes_slight - lout.lanes_merge_to <= lin.lanes - lin.lanes_merge_to + lout.lanes_slight
)
)
)
WHERE
NOT lin.start_end AND
lout.start_end
ORDER BY
1 -- Just to force the query planner to does not merge sub and main request
) AS t
JOIN nodes ON
nodes.id = nid AND
(NOT nodes.tags?'highway' OR nodes.tags->'highway' != 'traffic_signals')
"""
class Analyser_Osmosis_Highway_Turn_Lanes(Analyser_Osmosis):
def __init__(self, config, logger = None):
Analyser_Osmosis.__init__(self, config, logger)
self.classs[1] = {"item":"3160", "level": 2, "tag": ["highway", "fix:chair"], "desc": T_(u"Bad lanes number or lanes:turn before and after this node") }
def analyser_osmosis(self):
self.run(sql10)
self.run(sql11)
self.run(sql12)
self.run(sql13)
self.run(sql14, lambda res: {"class":1, "data":[self.node, self.positionAsText], "text": {"en": "lanes in %s(-%s+%s), lanes out %s(-%s+%s)" % (res[2], res[3] or 0, res[4] or 0, res[5], res[6] or 0, res[7] or 0) }})
|
oesteban/preprocessing-workflow
|
fmriprep/cli/sample_openfmri_tasks_list.py
|
Python
|
bsd-3-clause
| 3,643
| 0.000549
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
A tool to generate a tasks_list.sh file for running fmriprep
on subjects downloaded with datalad with sample_openfmri.py
"""
import os
import glob
CMDLINE = """\
{fmriprep_cmd} {bids_dir}/{dataset_dir} {output_dir}/{dataset_dir} participant \
-w {dataset_dir}/work --participant_label {participant_label} \
"""
def get_parser():
"""Build parser object"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
parser = ArgumentParser(
description='OpenfMRI participants sampler, for FMRIPREP\'s testing purposes',
formatter_class=RawTextHelpFormatter)
parser.add_argument('openfmri_dir', action='store',
help='the root folder of a the openfmri dataset')
parser.add_argument('output_dir', action='store',
help='the directory where outputs should be stored')
parser.add_argument('sample_file', action='store',
help='a YAML file containing the subsample schedule')
# optional arguments
parser.add_argument('--anat-only', action='store_true', default=False,
help='run only anatomical workflow')
parser.add_argument('--nthreads', action='store', type=int,
help='number of total threads')
parser.add_argument('--omp_nthreads', action='store', type=int,
help='number of threads for OMP-based interfaces')
parser.add_argument('--mem-gb', action='store', type=int,
help='available memory in GB')
parser.add_argument('--tasks-list-file', default='tasks_list.sh',
action='store', help='write output file')
parser.add_argument('-t', '--tasks-filter', action='store', nargs='*',
help='run only specific tasks')
parser.add_argument('--cmd-call', action='store', help='command to be run')
return parser
def main():
"""Entry point"""
import yaml
opts = get_parser().parse_args()
with open(opts.sample_file) as sfh:
sampledict = yaml.load(sfh)
cmdline = CMDLINE
if opts.anat_only:
cmdline += ' --anat-only'
if opts.nthreads:
cmdline += '--nthreads %d' % opts.nthreads
if opts.omp_nthreads:
cmdline += '--omp-nthreads %d' % opts.omp_nthreads
if opts.mem_gb:
cmdline += '--mem_mb %d' % (opts.mem_gb * 1000)
|
if opts.tasks_filter:
cmdline += '-t %s' % ' '.join(opts.tasks
|
_filter)
fmriprep_cmd = 'fmriprep'
if opts.cmd_call is None:
singularity_dir = os.getenv('SINGULARITY_BIN')
singularity_img = sorted(
glob.glob(os.path.join(singularity_dir, 'poldracklab_fmriprep_*')))
if singularity_img:
fmriprep_cmd = 'singularity run %s' % singularity_img[-1]
task_cmds = []
# Try to make this Python 2 compatible
try:
os.makedirs(opts.output_dir)
except OSError as e:
if e.errno != os.errno.EEXIST:
raise
for dset, sublist in sampledict.items():
for sub in sublist:
cmd = cmdline.format(
fmriprep_cmd=fmriprep_cmd,
bids_dir=opts.openfmri_dir,
dataset_dir=dset,
output_dir=opts.output_dir,
participant_label=sub,
)
task_cmds.append(cmd)
with open(opts.tasks_list_file, 'w') as tlfile:
tlfile.write('\n'.join(task_cmds))
if __name__ == '__main__':
main()
|
mr-ninja-snow/Self-Updating-Python-Program
|
setup.py
|
Python
|
gpl-3.0
| 334
| 0.005988
|
fro
|
m distutils.core import setup
from program_version import RELEASE
setup(name='program',
version=RELEASE,
description='A self updating program example',
author='Mr Snow',
author_email='ninja@snow.com',
url='https://github.com/mr-ni
|
nja-snow/Self-Updating-Python-Program.git',
packages=[],
)
|
dario-chiappetta/Due
|
due/agent.py
|
Python
|
gpl-3.0
| 7,123
| 0.026534
|
"""
Due is a learning, modular, action-oriented dialogue agent. `Agents` are the
entities that can take part in Episodes (:mod:`due.episode`), receiving and
issuing Events (:mod:`due.event`).
"""
import uuid
from abc import ABCMeta, abstractmethod
from datetime import datetime
from due.event import Event
from due import episode
from due.util.python import dynamic_import
class Agent(metaclass=ABCMeta):
"""
Participants in an Episodes are called Agents. An Agent models an unique
identity through its ID, and can be served on a number of channels using
packages in :mod:`due.serve`.
Most importantly, Agent classes implement Natural Language Understanding
(NLU) and Generation (NLG) models, which are the core of the whole
conversational experience; they are meant to learn from Episodes coming from
a corpus, as well as from live conversations with humans or other agents.
:param agent_id: an unique ID for the Agent
:type agent_id: `str`
:param name: a human-friendly name for the Agent
:type name: `str`
"""
def __init__(self, agent_id=None):
self.id = agent_id if agent_id is not None else str(uuid.uuid1())
@abstractmethod
def save(self):
"""
Returns the Agent as an object. This object can be loaded with
:func:`Agent.load` and can be (de)serialized using the
:mod:`due.persistence` module.
A saved Agent must be a dictionary containing exactly the following items:
* `version`: version of the class who saved the agent (often `due.__version__`)
* `class`: absolute import name of the Agent class (eg. `due.models.dummy.DummyAgent`)
* `data`: saved agent data. Will be passed to the Agent constructor's `_data` parameter
:return: an object representing the Agent
:rtype: object
"""
pass
@staticmethod
def load(saved_agent):
"""
Loads an Agent from an object that was produced with the :meth:`Agent.save`
method.
:param saved_agent: an Agent, as it was saved by :meth:`Agent.save`
:type saved_agent: object
:return: an Agent
:rtype: `due.agent.Agent`
"""
class_ = dynamic_import(saved_agent['class'])
return class_(_data=saved_agent['data'])
@abstractmethod
def learn_episodes(self, episodes):
"""
Submit a list of Episodes for the :class:`Agent` to learn.
:param episodes: a list of episodes
:type episodes: `list` of :class:`due.episode.Episode`
"""
pass
def learn_episode(self, episode):
"""
Submit an Episode for the Agent to learn. By default, this just wraps a
call to :meth:`Agent.learn_episode`
:param episode: an Episode
:type episode: :class:`due.episode.Episode`
"""
self.learn_episodes([episode])
@abstractmethod
def new_episode_callback(self, new_episode):
"""
This is a callback method that is invoked whenever the Agent is invited
to join a new conversation (Episode) with another one.
Note that this is an **abstract method**: subclasses of :class:`Agent`
must implement their own.
:param new_episode: the new Episode that the other Agent has created
:type new_episode: :class:`due.episode.Episode`
"""
pass
def start_episode(self, other):
"""
Create a new :class:`due.episode.Episode` to engage another Agent in a
new conversation.
:param other_agent: The Agent you are inviting to the conversation.
:type other_agent: :class:`due.agent.Agent`
:return: a new Episode object
:rtype: :class:`due.episode.LiveEpisode`
"""
result = episode.LiveEpisode(self, other)
other.new_episode_callback(result)
return result
def event_callback(self, event, episode):
"""
This is a callback method that is invoked whenever a new Event is acted
in an Episode. This method acts as a proxy to specific Event type
handlers:
* :meth:`Agent.utterance_callback` (:class:`due.event.Event.Type.Utterance`)
* :meth:`Agent.action_callback` (:class:`due.event.Event.Type.Action`)
* :meth:`Agent.leave_callback` (:class:`due.event.Event.Type.Leave`)
:param event: The new Event
:type event: :class:`due.event.Event`
:param episode: The Episode where the Event was acted
:type episode: :class:`due.episode.Episode`
:return: A list of response Events
:rtype: `list` of :class:`due.event.Event`
"""
if event.type == Event.Type.Utterance:
result = self.utterance_callback(episode)
elif event.type == Event.Type.Action:
result = self.action_callback(episode)
elif event.type == Event.Type.Leave:
result
|
= self.leave_callback(episode)
if not result:
result = []
return result
@abstractmethod
def utterance_callback(self, episode):
"""
This is a callback method that is invoked whenever a new Utterance
Event is acted in an Episode.
|
:param episode: the Episode where the Utterance was acted
:type episode: `due.episode.Episode`
:return: A list of response Events
:rtype: `list` of :class:`due.event.Event`
"""
pass
@abstractmethod
def action_callback(self, episode):
"""
This is a callback method that is invoked whenever a new Action Event
is acted in an Episode.
:param episode: the Episode where the Action was acted
:type episode: `due.episode.Episode`
:return: A list of response Events
:rtype: `list` of :class:`due.event.Event`
"""
pass
@abstractmethod
def leave_callback(self, episode):
"""
This is a callback method that is invoked whenever a new Leave Event is
acted in an Episode.
:param episode: the Episode where the Leave Event was acted
:type episode: `due.episode.Episode`
:return: A list of response Events
:rtype: `list` of :class:`due.event.Event`
"""
pass
def act_events(self, events, episode):
"""
Act a sequence of Events in the given Episode.
:param events: a list of Events
:type events: `list` of :class:`due.event.Event`
:param episode: an Episode
:type episode: :class:`due.episode.Episode`
"""
for e in events:
if e.type == Event.Type.Action:
e.payload.run()
episode.add_event(e)
def say(self, sentence, episode):
"""
Create an Event out of the given sentence and act the new Event in
the given Episode. :class:`Agent` subclassed may need to extend this
implementation with some output operation (eg. print on screen,
broadcast to a jabber chat...).
:param sentence: A sentence
:type sentence: :class:`str`
:param episode: An Episode
:type episode: :class:`due.episode.Episode`
"""
utterance_event = Event(Event.Type.Utterance, datetime.now(), self.id, sentence)
episode.add_event(utterance_event)
def do(self, action, episode):
"""
Create an Event out of the given Action and acts the new Event in the
given Episode.
:param action: An Action
:type action: :class:`due.action.Action`
"""
action.run()
action_event = Event(Event.Type.Action, datetime.now(), self.id, action)
episode.add_event(action_event)
def leave(self, episode):
"""
Acts a new Leave Event in the given Episode.
:param episode: One of the Agent's active episodes
:type episode: :class:`due.episode.Episode`
"""
leave_event = Event(Event.Type.Leave, datetime.now(), self.id, None)
episode.add_event(leave_event)
def __str__(self):
return f"<Agent: {self.id}>"
|
DemocracyFoundation/Epitome
|
Agora/forms.py
|
Python
|
agpl-3.0
| 40
| 0.025
|
from d
|
jango import f
|
orms
# future use
|
mindbaffle/ATF
|
Test/FunctionalTests/DomTreeEditorTestScripts/AddAllItems.py
|
Python
|
apache-2.0
| 20,874
| 0.007857
|
#Copyright (c) 2014 Sony Computer Entertainment America LLC. See License.txt.
import sys
sys.path.append("./CommonTestScripts")
import Test
doc = atfDocService.OpenNewDocument(editor)
#===================== 0: root ==================================
Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count")
package = editingContext.Insert[UIPackage](DomNode(UISchema.UIPackageType.Type), treeLister.TreeView.DomNode)
Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count")
print "Trying to add objects that cannot be a child of the root"
editingContext.Insert[UIForm](DomNode(UISchema.UIFormType.Type), treeLister.TreeView.DomNode)
Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count does not increase when adding a form")
editingContext.Insert[UIShader](DomNode(UISchema.UIShaderType.Type), treeLister.TreeView.DomNode)
Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count does not increase when adding a shader")
editingContext.Insert[UITexture](DomNode(UISchema.UITextureType.Type), treeLister.TreeView.DomNode)
Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count does not increase when adding a texture")
editingContext.Insert[UIFont](DomNode(UISchema.UIFontType.Type), treeLister.TreeView.DomNode)
Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count does not increase when adding a font")
editingContext.Insert[UISprite](DomNode(UISchema.UISpriteType.Type), treeLister.TreeView.DomNode)
Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify
|
root child count does not increase when adding a sprite")
editingContext.Insert[UITextItem](DomNode(UISchema.UITextItemType.Type), treeLister.TreeView.DomNode)
Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count does not increase when adding a text")
editingCont
|
ext.Insert[UIAnimation](DomNode(UISchema.UIAnimationType.Type), treeLister.TreeView.DomNode)
Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(treeLister.TreeView.DomNode)), "Verify root child count does not increase when adding an animation")
#===================== 1: Package ==================================
print "Adding children to a package"
Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count")
form = editingContext.Insert[UIForm](DomNode(UISchema.UIFormType.Type), package.DomNode)
Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count after adding form")
shader = editingContext.Insert[UIShader](DomNode(UISchema.UIShaderType.Type), package.DomNode)
Test.Equal(2, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count after adding shader")
texture = editingContext.Insert[UITexture](DomNode(UISchema.UITextureType.Type), package.DomNode)
Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count after adding texture")
font = editingContext.Insert[UIFont](DomNode(UISchema.UIFontType.Type), package.DomNode)
Test.Equal(4, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count after adding font")
packageChildCount = 4
print "Trying to add objects that cannot be a child of a package"
editingContext.Insert[UIPackage](DomNode(UISchema.UIPackageType.Type), package.DomNode)
Test.Equal(packageChildCount, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count does not increase after adding package")
editingContext.Insert[UISprite](DomNode(UISchema.UISpriteType.Type), package.DomNode)
Test.Equal(packageChildCount, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count does not increase after adding sprite")
editingContext.Insert[UITextItem](DomNode(UISchema.UITextItemType.Type), package.DomNode)
Test.Equal(packageChildCount, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count does not increase after adding text")
editingContext.Insert[UIAnimation](DomNode(UISchema.UIAnimationType.Type), package.DomNode)
Test.Equal(packageChildCount, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(package.DomNode)), "Verify package child count does not increase after adding animation")
#===================== 2: Form ==================================
print "Adding children to a form"
Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count")
sprite = editingContext.Insert[UISprite](DomNode(UISchema.UISpriteType.Type), form.DomNode)
Test.Equal(1, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count after adding sprite")
text = editingContext.Insert[UITextItem](DomNode(UISchema.UITextItemType.Type), form.DomNode)
Test.Equal(2, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count after adding text")
animation = editingContext.Insert[UIAnimation](DomNode(UISchema.UIAnimationType.Type), form.DomNode)
Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count after adding animation")
print "Trying to add objects that cannot be a child of a form"
editingContext.Insert[UIPackage](DomNode(UISchema.UIPackageType.Type), form.DomNode)
Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count does not increase after adding a package")
editingContext.Insert[UIForm](DomNode(UISchema.UIFormType.Type), form.DomNode)
Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count does not increase after adding a form")
editingContext.Insert[UIShader](DomNode(UISchema.UIShaderType.Type), form.DomNode)
Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count does not increase after adding a shader")
editingContext.Insert[UITexture](DomNode(UISchema.UITextureType.Type), form.DomNode)
Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count does not increase after adding a texture")
editingContext.Insert[UIFont](DomNode(UISchema.UIFontType.Type), form.DomNode)
Test.Equal(3, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(form.DomNode)), "Verify form child count does not increase after adding a font")
#===================== 3: Shader ==================================
print "Verify cannot add children to a shader"
editingContext.Insert[UIPackage](DomNode(UISchema.UIPackageType.Type), shader.DomNode)
Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(shader.DomNode)), "Verify shader child count does not increase when adding a package")
editingContext.Insert[UIForm](DomNode(UISchema.UIFormType.Type), shader.DomNode)
Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(shader.DomNode)), "Verify shader child count does not increase when adding a form")
editingContext.Insert[UIShader](DomNode(UISchema.UIShaderType.Type), shader.DomNode)
Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(shader.DomNode)), "Verify shader child count does not increase when adding a shader")
editingContext.Insert[UITexture](DomNode(UISchema.UITextureType.Type), shader.DomNode)
Test.Equal(0, Test.GetEnumerableCount(treeLister.TreeView.GetChildren(shader.DomNode)), "Verify shader child count does not increase when adding a texture")
editingContext.Insert[UIFont](DomNode(UISchema.UIFontType.Type), shader.DomNode)
Test.Equal(0, Test.GetEnumerableCount(treeL
|
thunderhoser/GewitterGefahr
|
gewittergefahr/prediction_paper_2019/make_predictor_figure.py
|
Python
|
mit
| 17,810
| 0.000842
|
"""Makes figure with GridRad and MYRORSS predictors."""
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as pyplot
from gewittergefahr.gg_utils import soundings
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import testing_io
from gewittergefahr.deep_learning import training_validation_io as trainval_io
from gewittergefahr.plotting import plotting_utils
from gewittergefahr.plotting import radar_plotting
from gewitt
|
ergefahr.plotting import imagemagick_utils
from gewittergefahr.scripts import plot_input_examples as plot_examples
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
MINOR_SEPARATOR_STRING = '\n\n' + '-' * 50 + '\n\n'
TIME_FORMAT = '%Y-%m-%d-%H%M%S'
DUMMY_TARGET_NAME = 'tornado_lead-time=0000-3600sec_distance=00000-10000m'
SOUNDING_FIELD_NAMES = [
|
soundings.U_WIND_NAME, soundings.V_WIND_NAME,
soundings.TEMPERATURE_NAME, soundings.SPECIFIC_HUMIDITY_NAME,
soundings.PRESSURE_NAME
]
SOUNDING_HEIGHTS_M_AGL = soundings.DEFAULT_HEIGHT_LEVELS_M_AGL
NUM_GRIDRAD_ROWS = 32
NUM_GRIDRAD_COLUMNS = 32
RADAR_HEIGHTS_M_AGL = numpy.array([3000], dtype=int)
GRIDRAD_FIELD_NAMES = [
radar_utils.REFL_NAME, radar_utils.SPECTRUM_WIDTH_NAME,
radar_utils.VORTICITY_NAME, radar_utils.DIVERGENCE_NAME
]
NUM_MYRORSS_ROWS = 64
NUM_MYRORSS_COLUMNS = 64
MYRORSS_SHEAR_FIELD_NAMES = [
radar_utils.LOW_LEVEL_SHEAR_NAME, radar_utils.MID_LEVEL_SHEAR_NAME
]
COLOUR_BAR_LENGTH = 0.8
DEFAULT_FONT_SIZE = 45
TITLE_FONT_SIZE = 45
COLOUR_BAR_FONT_SIZE = 45
SOUNDING_FONT_SIZE = 45
PANEL_LETTER_FONT_SIZE = 75
pyplot.rc('font', size=DEFAULT_FONT_SIZE)
pyplot.rc('axes', titlesize=DEFAULT_FONT_SIZE)
pyplot.rc('axes', labelsize=DEFAULT_FONT_SIZE)
pyplot.rc('xtick', labelsize=DEFAULT_FONT_SIZE)
pyplot.rc('ytick', labelsize=DEFAULT_FONT_SIZE)
pyplot.rc('legend', fontsize=DEFAULT_FONT_SIZE)
pyplot.rc('figure', titlesize=DEFAULT_FONT_SIZE)
X_LABEL_COORD_NORMALIZED = -0.02
Y_LABEL_COORD_NORMALIZED = 0.85
FIGURE_RESOLUTION_DPI = 300
CONCAT_FIGURE_SIZE_PX = int(1e7)
GRIDRAD_DIR_ARG_NAME = 'gridrad_example_dir_name'
GRIDRAD_ID_ARG_NAME = 'gridrad_full_id_string'
GRIDRAD_TIME_ARG_NAME = 'gridrad_time_string'
MYRORSS_DIR_ARG_NAME = 'myrorss_example_dir_name'
MYRORSS_ID_ARG_NAME = 'myrorss_full_id_string'
MYRORSS_TIME_ARG_NAME = 'myrorss_time_string'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
GRIDRAD_DIR_HELP_STRING = (
'Name of top-level directory with GridRad examples. Files therein will be '
'found by `input_examples.find_example_file` and read by '
'`input_examples.read_example_file`.')
GRIDRAD_ID_HELP_STRING = 'Full ID of GridRad storm object.'
GRIDRAD_TIME_HELP_STRING = (
'Valid time (format "yyyy-mm-dd-HHMMSS") of GridRad storm object.')
MYRORSS_DIR_HELP_STRING = 'Same as `{0:s}` but for MYRORSS.'.format(
GRIDRAD_DIR_ARG_NAME)
MYRORSS_ID_HELP_STRING = 'Same as `{0:s}` but for MYRORSS.'.format(
GRIDRAD_ID_ARG_NAME)
MYRORSS_TIME_HELP_STRING = 'Same as `{0:s}` but for MYRORSS.'.format(
GRIDRAD_TIME_ARG_NAME)
OUTPUT_DIR_HELP_STRING = (
'Name of output directory. Figures will be saved here.')
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + GRIDRAD_DIR_ARG_NAME, type=str, required=True,
help=GRIDRAD_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + GRIDRAD_ID_ARG_NAME, type=str, required=True,
help=GRIDRAD_ID_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + GRIDRAD_TIME_ARG_NAME, type=str, required=True,
help=GRIDRAD_TIME_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + MYRORSS_DIR_ARG_NAME, type=str, required=True,
help=MYRORSS_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + MYRORSS_ID_ARG_NAME, type=str, required=True,
help=MYRORSS_ID_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + MYRORSS_TIME_ARG_NAME, type=str, required=True,
help=MYRORSS_TIME_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING)
def _read_one_example(
top_example_dir_name, full_storm_id_string, storm_time_unix_sec,
source_name, radar_field_name, include_sounding):
"""Reads one example (storm object).
T = number of input tensors to model
H_s = number of heights in sounding
:param top_example_dir_name: See documentation at top of file.
:param full_storm_id_string: Full storm ID.
:param storm_time_unix_sec: Valid time of storm.
:param source_name: Radar source (must be accepted by
`radar_utils.check_data_source`).
:param radar_field_name: Name of radar field (must be accepted by
`radar_utils.check_field_name`).
:param include_sounding: Boolean flag.
:return: predictor_matrices: length-T list of numpy arrays, where
the [i]th array is the [i]th input tensor to the model. The first axis
of each array has length = 1.
:return: model_metadata_dict: See doc for `cnn.write_model_metadata`.
:return: sounding_pressures_pa: length-H numpy array of pressures. If
soundings were not read, this is None.
"""
if source_name == radar_utils.GRIDRAD_SOURCE_ID:
num_radar_rows = NUM_GRIDRAD_ROWS
num_radar_columns = NUM_GRIDRAD_COLUMNS
else:
num_radar_rows = NUM_MYRORSS_ROWS
num_radar_columns = NUM_MYRORSS_COLUMNS
training_option_dict = dict()
training_option_dict[trainval_io.RADAR_FIELDS_KEY] = [radar_field_name]
training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] = RADAR_HEIGHTS_M_AGL
training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] = (
SOUNDING_FIELD_NAMES if include_sounding else None
)
training_option_dict[trainval_io.SOUNDING_HEIGHTS_KEY] = (
SOUNDING_HEIGHTS_M_AGL
)
training_option_dict[trainval_io.NUM_ROWS_KEY] = num_radar_rows
training_option_dict[trainval_io.NUM_COLUMNS_KEY] = num_radar_columns
training_option_dict[trainval_io.NORMALIZATION_TYPE_KEY] = None
training_option_dict[trainval_io.TARGET_NAME_KEY] = DUMMY_TARGET_NAME
training_option_dict[trainval_io.BINARIZE_TARGET_KEY] = False
training_option_dict[trainval_io.SAMPLING_FRACTIONS_KEY] = None
training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None
training_option_dict[trainval_io.UPSAMPLE_REFLECTIVITY_KEY] = False
model_metadata_dict = {
cnn.TRAINING_OPTION_DICT_KEY: training_option_dict,
cnn.LAYER_OPERATIONS_KEY: None,
}
print(MINOR_SEPARATOR_STRING)
example_dict = testing_io.read_predictors_specific_examples(
top_example_dir_name=top_example_dir_name,
desired_full_id_strings=[full_storm_id_string],
desired_times_unix_sec=numpy.array([storm_time_unix_sec], dtype=int),
option_dict=model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY],
layer_operation_dicts=None
)
predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
sounding_pressure_matrix_pa = example_dict[
testing_io.SOUNDING_PRESSURES_KEY]
if sounding_pressure_matrix_pa is None:
sounding_pressures_pa = None
else:
sounding_pressures_pa = sounding_pressure_matrix_pa[0, ...]
return predictor_matrices, model_metadata_dict, sounding_pressures_pa
def _run(gridrad_example_dir_name, gridrad_full_id_string, gridrad_time_string,
myrorss_example_dir_name, myrorss_full_id_string, myrorss_time_string,
output_dir_name):
"""Makes figure with GridRad and MYRORSS predictors.
This is effectively the main method.
:param gridrad_example_dir_name: See documentation at top of file.
:param gridrad_full_id_string: Same.
:param gridrad_time_string: Same.
:param myrorss_example_dir_name: Same.
:param myrorss_full_id_string: Same.
:param myrorss_time_string: Same.
:param output_dir_name: Same.
"""
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name)
gridrad_time_unix_sec = time_conve
|
staute/shinken_deb
|
shinken/util.py
|
Python
|
agpl-3.0
| 24,533
| 0.001141
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import time
import re
import copy
import sys
import os
import json
try:
from ClusterShell.NodeSet import NodeSet, NodeSetParseRangeError
except ImportError:
NodeSet = None
from shinken.macroresolver import MacroResolver
from shinken.log import logger
try:
stdout_encoding = sys.stdout.encoding
safe_stdout = (stdout_encoding == 'UTF-8')
except Exception, exp:
logger.error('Encoding detection error= %s', exp)
safe_stdout = False
# ########## Strings #############
# Try to print strings, but if there is an utf8 error, go in simple ascii mode
# (Like if the terminal do not have en_US.UTF8 as LANG for example)
def safe_print(*args):
l = []
for e in args:
# If we got an str, go in unicode, and if we cannot print
# utf8, go in ascii mode
if isinstance(e, str):
if safe_stdout:
s = unicode(e, 'utf8', errors='ignore')
else:
s = e.decode('ascii', 'replace').encode('ascii', 'replace').\
decode('ascii', 'replace')
l.append(s)
# Same for unicode, but skip the unicode pass
elif isinstance(e, unicode):
if safe_stdout:
s = e
else:
s = e.encode('ascii', 'replace')
l.append(s)
# Other types can be directly convert in unicode
else:
l.append(unicode(e))
# Ok, now print it :)
print u' '.join(l)
def split_semicolon(line, maxsplit=None):
"""Split a line on semicolons characters but not on the escaped semicolons
"""
# Split on ';' character
splitted_line = line.split(';')
splitted_line_size = len(splitted_line)
# if maxsplit is not specified, we set it to the number of part
if maxsplit is None or 0 > maxsplit:
maxsplit = splitted_line_size
# Join parts to the next one, if ends with a '\'
# because we mustn't split if the semicolon is escaped
i = 0
while i < splitted_line_size - 1:
# for each part, check if its ends with a '\'
ends = splitted_line[i].endswith('\\')
if ends:
# remove the last character '\'
splitted_line[i] = splitted_line[i][:-1]
# append the next part to the current if it is not the last and the current
# ends with '\' or if there is more than maxsplit parts
if (ends or i >= maxsplit) and i < splitted_line_size - 1:
splitted_line[i] = ";".join([splitted_line[i], splitted_line[i + 1]])
# delete the next part
del splitted_line[i + 1]
splitted_line_size -= 1
# increase i only if we don't have append because after append the new
# string can end with '\'
else:
i += 1
return splitted_line
# Json-ify the objects
def jsonify_r(obj):
res = {}
cls = obj.__class__
if not hasattr(cls, 'properties'):
try:
json.dumps(obj)
return obj
except Exception, exp:
return None
properties = cls.properties.keys()
if hasattr(cls, 'running_properties'):
properties += cls.running_properties.keys()
for prop in properties:
if not hasattr(obj, prop):
continue
v = getattr(obj, prop)
# Maybe the property is not jsonable
try:
if isinstance(v, set):
v = list(v)
if isinstance(v, list):
v = sorted(v)
json.dumps(v)
res[prop] = v
except Exception, exp:
if isinstance(v, list):
lst = []
for _t in v:
t = getattr(_t.__class__, 'my_type', '')
if t == 'CommandCall':
try:
lst.append(_t.call)
except Exception:
pass
continue
if t and hasattr(_t, t + '_name'):
lst.append(getattr(_t, t + '_name'))
else:
pass
# print "CANNOT MANAGE OBJECT", _t, type(_t), t
res[prop] = lst
else:
t = getattr(v.__class__, 'my_type', '')
if t == 'CommandCall':
try:
res[prop] = v.call
except Exception:
pass
continue
if t and hasattr(v, t + '_name'):
res[prop] = getattr(v, t + '_name')
# else:
# print "CANNOT MANAGE OBJECT", v, type(v
|
), t
return res
# ################################## TIME ##################################
# @memoized
def get_end_of_day(
|
year, month_id, day):
end_time = (year, month_id, day, 23, 59, 59, 0, 0, -1)
end_time_epoch = time.mktime(end_time)
return end_time_epoch
# @memoized
def print_date(t):
return time.asctime(time.localtime(t))
# @memoized
def get_day(t):
return int(t - get_sec_from_morning(t))
# Same but for week day
def get_wday(t):
t_lt = time.localtime(t)
return t_lt.tm_wday
# @memoized
def get_sec_from_morning(t):
t_lt = time.localtime(t)
h = t_lt.tm_hour
m = t_lt.tm_min
s = t_lt.tm_sec
return h * 3600 + m * 60 + s
# @memoized
def get_start_of_day(year, month_id, day):
start_time = (year, month_id, day, 00, 00, 00, 0, 0, -1)
try:
start_time_epoch = time.mktime(start_time)
except OverflowError:
# Windows mktime sometimes crashes on (1970, 1, 1, ...)
start_time_epoch = 0.0
return start_time_epoch
# change a time in seconds like 3600 into a format: 0d 1h 0m 0s
def format_t_into_dhms_format(t):
s = t
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
return '%sd %sh %sm %ss' % (d, h, m, s)
# ################################ Pythonization ###########################
# first change to float so manage for example 25.0 to 25
def to_int(val):
return int(float(val))
def to_float(val):
return float(val)
def to_char(val):
return val[0]
def to_split(val, split_on_coma=True):
if isinstance(val, list):
return val
if not split_on_coma:
return [val]
val = val.split(',')
if val == ['']:
val = []
return val
def list_split(val, split_on_coma=True):
if not split_on_coma:
return val
new_val = []
for x in val:
new_val.extend(x.split(','))
return new_val
def to_best_int_float(val):
i = int(float(val))
f = float(val)
# If the f is a .0 value,
# best match is int
if i == f:
return i
return f
# bool('0') = true, so...
def to_bool(val):
if val == '1' or val == 'on' or val == 'true' or val == 'True':
return True
else:
return False
def from_bool_to_string(b):
if b:
return '1'
else:
return '0'
def from_bool_to_int(b):
if b:
return 1
else:
return 0
def from_list_to_split(val):
val = ','.join(['%s' % v for v in val])
return val
def from_float_to_int(val):
val = int(val)
return val
# Functions for bro
|
mgraupe/acq4
|
acq4/modules/DataManager/FileDataView.py
|
Python
|
mit
| 3,567
| 0.00841
|
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
from acq4.util.DataManager import *
#import acq4.Manager as Manager
import acq4.pyqtgraph as pg
#from acq4.pyqtgraph.MultiPlotWidget import MultiPlotWidget
#from acq4.pyqtgraph.ImageView import ImageView
from acq4.util.DictView import *
import acq4.util.metaarray as metaarray
import weakref
class FileDataView(QtGui.QSplitter):
def __init__(self, parent):
QtGui.QSplitter.__init__(self, parent)
#self.manager = Manager.getManager()
self.setOrientation(QtCore.Qt.Vertical)
self.current = None
self.currentType = None
self.widgets = []
self.dictWidget = None
#self.plots = []
def setCurrentFile(self, file):
#print "=============== set current file ============"
if file is self.current:
return
## What if we just want to update the data display?
#self.clear()
if file is None:
self.current = None
return
if file.isDir():
## Sequence or not?
return
else:
typ = file.fileType()
if typ is None:
return
else:
image = False
with pg.BusyCursor():
data = file.read()
if typ == 'ImageFile':
image = True
elif typ == 'MetaArray':
if data.ndim == 2 and not data.axisHasColumns(0) and not data.axisHasColumns(1):
image = True
elif data.ndim > 2:
image = True
else:
return
with pg.BusyCursor():
if image:
if self.currentType == 'image' and len(self.widgets) > 0:
try:
|
self.widgets[0].setImage(data, autoRange=False)
except:
print "widget types:", map(type, self.widgets)
raise
else:
self.clear()
w = pg.ImageView(self)
#print "add image:", w.ui.roiPlot.plotItem
#self.plots = [weakref.ref(w.ui.roiPlot.plotItem)]
self.addWidget(w)
w.set
|
Image(data)
self.widgets.append(w)
self.currentType = 'image'
else:
self.clear()
w = pg.MultiPlotWidget(self)
self.addWidget(w)
w.plot(data)
self.currentType = 'plot'
self.widgets.append(w)
#print "add mplot:", w.mPlotItem.plots
#self.plots = [weakref.ref(p[0]) for p in w.mPlotItem.plots]
if (hasattr(data, 'implements') and data.implements('MetaArray')):
if self.dictWidget is None:
w = DictView(data._info)
self.dictWidget = w
#w.setText(str(data._info[-1]))
self.addWidget(w)
self.widgets.append(w)
h = self.size().height()
self.setSizes([h*0.8, h*0.2])
else:
self.dictWidget.setData(data._info)
def clear(self):
for w in self.widgets:
w.close()
w.setParent(None)
self.widgets = []
self.dictWidget = None
|
Jcing95/iop-hd
|
test/functional/mining.py
|
Python
|
mit
| 5,574
| 0.002153
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mining RPCs
- getmininginfo
- getblocktemplate proposal mode
- submitblock"""
import copy
from binascii import b2a_hex
from decimal import Decimal
from test_framework.blocktools import create_coinbase
from test_framework.mininode import CBlock
from test_framework.test_framework import IoPTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
def b2x(b):
return b2a_hex(b).decode('ascii')
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal'})
assert_equal(rsp, expect)
class MiningTest(IoPTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0]
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['chain'], 'regtest')
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblockweight'], 0)
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334'))
assert_equal(mining_info['pooledtx'], 0)
# Mine a block to leave initial block download
node.generate(1)
tmpl = node.getblocktemplate()
self.log.info("getblocktemplate: Test capability advertised")
assert 'proposal' in tmpl['capabilities']
assert 'coinbasetxn' not in tmpl
coinbase_tx = create_coinbase(height=int(tmpl["height"]) + 1)
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
self.log.info("getblocktemplate: Test valid block")
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, b2x(block.serialize()[:-15]))
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].vin[0].prevout.hash += 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize()))
self.log.info("getblocktemplate: Test truncated final transaction")
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx.append(bad_block.vtx[0])
assert_template(node, bad_block, 'bad-txns-duplicate')
self.log.info("getblocktemplate: Test invalid transaction")
bad_block = copy.deepcopy(block)
bad_tx = copy.deepcopy(bad_block.vtx[0])
bad_tx.vin[0].prevout.hash = 255
bad_tx.rehash()
bad_block.vtx.append(bad_tx)
assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
self.log.info("getblocktemplate: Test nonfinal transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].nLockTime = 2 ** 32 - 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-txns-nonf
|
inal')
self.log.info("getblocktemplate: Test bad tx count")
# The tx count is immediately after the block header
TX_COUNT_OFFSET = 80
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1)
bad_block_sn[TX_COUNT_OFFSET] += 1
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'dat
|
a': b2x(bad_block_sn), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
bad_block.nBits = 469762303 # impossible in the real world
assert_template(node, bad_block, 'bad-diffbits')
self.log.info("getblocktemplate: Test bad merkle root")
bad_block = copy.deepcopy(block)
bad_block.hashMerkleRoot += 1
assert_template(node, bad_block, 'bad-txnmrklroot', False)
self.log.info("getblocktemplate: Test bad timestamps")
bad_block = copy.deepcopy(block)
bad_block.nTime = 2 ** 31 - 1
assert_template(node, bad_block, 'time-too-new')
bad_block.nTime = 0
assert_template(node, bad_block, 'time-too-old')
self.log.info("getblocktemplate: Test not best block")
bad_block = copy.deepcopy(block)
bad_block.hashPrevBlock = 123
assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
MiningTest().main()
|
makhidkarun/py_tools
|
lib/ship_crew.py
|
Python
|
gpl-3.0
| 852
| 0.026995
|
"""
ship_crew.py
Generates a minimal ship crew based on tonnage.
python crew -s 400
"""
from __future__ import print_function
import random
import sys
sys.path.append(".")
from character import Character
import character_tools
def get_career():
return random.choice(['Scouts', 'Navy', 'Merchants'
|
])
def create_crew(size):
for c in range(int(size/400)):
create_crewman("Pilot")
create_crewman("Navg")
for c in range(int(size/300)):
create_crewman("Eng")
def create_crewman(role):
if role == "Eng":
skill = "Engineering"
elif role == "Navg":
skill = "Navgigation"
elif role == "Helm":
skill = "Pilot"
else:
skill = "Computer"
crew = Character()
crew.generate_basic()
crew.run_career(get_career())
character_tools.add_skill(crew, skill)
print(role, end="
|
")
crew.display()
print("")
|
Ledoux/ShareYourSystem
|
Pythonlogy/draft/Noders/Grouper/Drafts/__init__ copy.py
|
Python
|
mit
| 4,755
| 0.04837
|
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Grouper establishes a group of parenting nodes for which
each level is setted in equivalent hdf5 structure.
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Hdformaters.Hdformater"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import functools
from ShareYourSystem.Standards.Classors import Doer
from ShareYourSystem.Functers import Switcher
#</ImportSpecificModules>
#<DefineFunctions>
def getGroupedPathStrWithPathStrsList(_PathStrsList):
#Reduce
PathStr=functools.reduce(
lambda _TotalPathStr,_PathStr:
_TotalPathStr+_PathStr
if (len(_TotalPathStr)>0 and _TotalPathStr[-1]=='/') and (len(_PathStr)>0 and _PathStr[0]!='/'
) or (len(_TotalPathStr)>0 and _TotalPathStr[-1]!='/') and (len(_PathStr)>0 and _PathStr[0]=='/')
else
_TotalPathStr[:-1]+_PathStr
if (len(_TotalPathStr)>0 and _TotalPathStr[-1]=='/') and (len(_PathStr)>0 and _PathStr[0]=='/'
)
else _TotalPathStr+'/'+_PathStr
if '/' not in [_PathStr,_TotalPathStr]
else "",
_PathStrsList
)
#Maybe add / at the beginning
if (len(PathStr)>0 and PathStr[0]!='/') or PathStr=="":
PathStr='/'+PathStr
#Return
return PathStr
#</DefineFunctions>
#<DefineClass>
@DecorationClass()
class GrouperClass(BaseClass
|
):
#Definition
RepresentingKeyStrsList=[
'GroupedParentVariable',
'GroupedInt',
'GroupedKeyStr',
'GroupedDeriveParentersList',
'GroupedPathStrsList',
'GroupedPathStr'
]
|
#@Hooker.HookerClass(**{'HookingAfterVariablesList':[{'CallingVariable':BaseClass.__init__}]})
def default_init(
self,
_GroupedParentVariable=None,
_GroupedInt=-1,
_GroupedKeyStr="",
_GroupedDeriveParentersList=None,
_GroupedPathStrsList=None,
_GroupedPathStr="/",
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
#set
self.HdformatingFileKeyStr=SYS.InflectEngine.plural(
Doer.getDoStrWithDoerStr(
self.__class__.NameStr
)
)+'.hdf5'
def do_group(self):
#debug
'''
self.debug(('self.',self,['ParentingNodeStr']))
'''
#Parent
self.parent()
#Check
if len(self.ParentedDeriveParentersList)>0:
UppestParentPointer=self.ParentedDeriveParentersList[-1]
else:
UppestParentPointer=self
#Then get also from the UppestParentPointer its UppestGroupedParentVariable
if hasattr(UppestParentPointer,'GroupedDeriveParentersList'):
if len(UppestParentPointer.GroupedDeriveParentersList)>0:
UppestGroupedParentVariable=UppestParentPointer.GroupedDeriveParentersList.GroupedDeriveParentersList[-1]
else:
UppestGroupedParentVariable=UppestParentPointer
#Definition of the Link
self.HdformatedFileVariable=UppestGroupedParentVariable.
HdformatedFileVariableKeyStr="HdformatedFileVariable"
#debug
#self.debug('UppestParentPointer.GroupingPathStr is '+UppestParentPointer.GroupingPathStr)
#Point on the FilePointer of the uppest grouped Parent
self.__setattr__(
HdformatedFileVariableKeyStr,
getattr(
UppestGroupedParentVariable,
"HdformatedFileVariable"
)
)
#Get it definitely !
FilePointer=getattr(self,HdformatedFileVariableKeyStr)
#debug
#print('FilePointer is ',FilePointer)
#Create a group in the hdf5 file
if FilePointer!=None:
#debug
'''
self.debug(('self.',self,['NodedPathStr']))
'''
#set the GroupedPathStr
self.GroupedPathStr=getGroupedPathStrWithPathStrsList(
[
UppestGroupedParentVariable.GroupedPathStr,
self.ParentedNodePathStr
]
)
#debug
'''
self.debug(('self.',self,['GroupedPathStr']))
'''
#Check if the Path exists
if self.GroupedPathStr not in FilePointer:
#set all the intermediate Paths before
PathStrsList=self.GroupedPathStr.split('/')[1:]
ParsingChildPathStr="/"
#set the PathStr from the top to the down (integrativ loop)
for ChildPathStr in PathStrsList:
#Go deeper
NewParsingChildPathStr=ParsingChildPathStr+ChildPathStr
#Create the group if not already
if NewParsingChildPathStr not in FilePointer:
if self.HdformatingModuleStr=="tables":
FilePointer.create_group(ParsingChildPathStr,ChildPathStr)
elif self.HdformatingModuleStr=="h5py":
Group=FilePointer[ParsingChildPathStr]
Group.create_group(ChildPathStr)
#Prepare the next group
ParsingChildPathStr=NewParsingChildPathStr+'/'
#Return self
return self
#</DefineClass>
|
sysadminmatmoz/odoo-clearcorp
|
account_exchange_rates_adjustment/__init__.py
|
Python
|
agpl-3.0
| 1,086
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have rece
|
ived a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################
|
################################################
import account_exchange_rates_adjustment
import wizard
|
legoktm/pywikipedia-rewrite
|
scripts/i18n/isbn.py
|
Python
|
mit
| 10,492
| 0.047954
|
# -*- coding: utf-8 -*-
msg = {
'en': {
'isbn-formatting': u'Robot: Formatting ISBN',
},
# Author: Csisc
# Author: Lloffiwr
# Author: Xqt
'qqq': {
'isbn-formatting': u'Edit summary when the bot fixes [http://en.wikipedia.org/wiki/International_Standard_Book_Number ISBN] number formatting.',
},
# Author: Csisc
'aeb': {
'isbn-formatting': u'روبوت: تنسيق ISBN',
},
# Author: Naudefj
'af': {
'isbn-formatting': u'Robot: Formatteer ISBN',
},
# Author: Als-Holder
'als': {
'isbn-formatting': u'Bot: ISBN formatiert',
},
# Author: Meno25
# Author: OsamaK
'ar': {
'isbn-formatting': u'روبوت: تنسيق ISBN',
},
# Author: Esbardu
# Author: Xuacu
'ast': {
'isbn-formatting': u'Robó: Formatiando l\'ISBN',
},
# Author: Khutuck
'az': {
'isbn-formatting': u'Bot: ISBN formatlandırılır',
},
# Author: E THP
'azb': {
'isbn-formatting': u'بوت:شابکلری ایستاندارد ائتمک',
},
# Author: Sagan
'ba': {
'isbn-formatting': u'Робот: ISBN үҙгәртеү',
},
# Author: Mucalexx
'bar': {
'isbn-formatting': u'Bot: Formaatir ISBN',
},
# Author: Yury Tarasievich
'be': {
'isbn-formatting': u'робат аформіў ISBN',
},
# Author: Jim-by
'be-x-old': {
'isbn-formatting': u'Робат: фарматаваньне ISBN',
},
# Author: DCLXVI
'bg': {
'isbn-formatting': u'Робот: Форматиране на ISBN',
},
# Author: Riemogerz
'bjn': {
'isbn-formatting': u'Bot: Mampurmat ISBN',
},
# Author: Bellayet
# Author: Wikitanvir
'bn': {
'isbn-formatting': u'বট: আইএসবিএন নম্বরের ফরম্যাট ঠিক করছে',
},
# Author: Fulup
'br': {
'isbn-formatting': u'Robot : O furmadiñ an ISBN',
},
# Author: CERminator
# Author: Edinwiki
'bs': {
'isbn-formatting': u'Bot: Oblikovanje ISBN',
},
# Author: SMP
'ca': {
'isbn-formatting': u'Robot: Format de l\'ISBN',
},
# Author: Asoxor
'ckb': {
'isbn-formatting': u'ڕۆبۆت: ڕاستکردنەوەی شێوازی ISBN',
},
# Author: Dontlietome7
'cs': {
'isbn-formatting': u'Robot: Formátování ISBN',
},
# Author: Salam
'cv': {
'isbn-formatting': u'робот: ISBN улӑштарни',
},
# Author: Lloffiwr
# Author: Xxglennxx
'cy': {
'isbn-formatting': u'Robot: Yn fformatio\'r rhif ISBN',
},
# Author: Peter Alberti
'da': {
'isbn-formatting': u'Robot: Formaterer ISBN',
},
'de': {
'isbn-formatting': u'Bot: Formatiere ISBN',
},
# Author: Eruedin
'de-ch': {
'isbn-formatting': u'Bot: Formatiere ISBN',
},
# Author: Erdemaslancan
'diq': {
'isbn-formatting': u'Boti ISBN\'i timar kerd',
},
# Author: Evropi
'el': {
'isbn-formatting': u'Ρομπότ: Μορφοποίηση ISBN',
},
# Author: Mihxil
# Author: Objectivesea
'eo': {
'isbn-formatting': u'Roboto: Aranĝas la ISBN',
},
# Author: Dferg
# Author: Invadinado
# Author: Xqt
'es': {
'isbn-formatting': u'Bot: Estandarizando ISBN',
},
# Author: Pikne
'et': {
'isbn-formatting': u'Robot: ISBN vormindatud',
},
# Author: An13sa
# Author: Xabier Armendaritz
'eu': {
'isbn-formatting': u'Robota: ISBNari formatua ematen',
},
# Author: ZxxZxxZ
'fa': {
'isbn-formatting': u'ربات: استانداردسازی شابک',
},
# Author: Crt
'fi': {
'isbn-formatting': u'Botti muotoili ISBN-tunnuksen',
},
# Author: EileenSanda
'fo': {
'isbn-formatting': u'Bottur: Formaterar ISBN',
},
# Author: Sherbrooke
'fr': {
'isbn-formatting': u'Robot : Mise en forme du ISBN',
},
# Author: ChrisPtDe
'frp': {
'isbn-formatting': u'Robot : misa en fôrma du ISBN',
},
# Author: Murma174
'frr': {
'isbn-formatting': u'Bot: Formatiare ISBN',
},
# Author: Klenje
'fur': {
'isbn-formatting': u'Robot: o formati il codiç ISBN',
},
# Author: Toliño
'gl': {
'isbn-formatting': u'Bot: Dou formato ISBN',
},
# Author: Jetlag
'hak': {
'isbn-formatting': u'機械人:格式化ISBN',
},
# Author: YaronSh
'he': {
'isbn-formatting': u'בוט: מעצב מסת״ב',
},
# Author: Ex13
'hr': {
'isbn-formatting': u'Bot: Oblikovanje ISBN',
},
# Author: Michawiki
'hsb': {
'isbn-formatting': u'Boćik: ISBN so formatuje',
},
# Author: Dani
'hu': {
'isbn-formatting': u'Bot: ISBN formázása',
},
# Author: Xelgen
'hy': {
'isbn-formatting': u'Ռոբոտը ուղղում է ԳՄՍՀի (ISBN) ձևաչափը',
},
# Author: McDutchie
'ia': {
'isbn-formatting': u'Robot: Formatation ISBN',
},
# Author: IvanLanin
'id': {
'isbn-formatting': u'Bot: Memformat ISBN',
},
# Author: Renan
'ie': {
'isbn-formatting': u'Machine: Formatant ISBN',
},
# Author: Lam-ang
'ilo': {
'isbn-formatting': u'Robot: Agiporpormat ti ISBN',
},
# Author: Snævar
'is': {
'isbn-formatting': u'Vélmenni: Forsnið ISBN',
},
# Author: Beta16
|
'it': {
'isbn-formatting': u'Bot: Formatto ISBN',
},
# Author: Fryed-peach
# Author: Shirayuki
'ja': {
'isbn-formatting': u'ロボットによる: ISBN の整形',
},
# Author: NoiX180
|
'jv': {
'isbn-formatting': u'Bot: Mormat ISBN',
},
# Author: 아라
'ko': {
'isbn-formatting': u'로봇: ISBN 형식 지정',
},
# Author: Purodha
'ksh': {
'isbn-formatting': u'Bot: ISBN zerääsch jemaat.',
},
# Author: George Animal
'ku': {
'isbn-formatting': u'Robot:ISBN\'ê format bike',
},
# Author: Robby
'lb': {
'isbn-formatting': u'Bot: ISBN formatéiert',
},
# Author: Pahles
'li': {
'isbn-formatting': u'Robot: ISBN opgemaak',
},
# Author: Hugo.arg
'lt': {
'isbn-formatting': u'Robotas: Formatuojamas ISBN',
},
# Author: Karlis
'lv': {
'isbn-formatting': u'Robots: ISBN formatējums',
},
# Author: StefanusRA
'map-bms': {
'isbn-formatting': u'Bot: Mbeneri format ISBN',
},
# Author: Jagwar
'mg': {
'isbn-formatting': u'Rôbô : manao formatage ny ISBN',
},
# Author: Luthfi94
'min': {
'isbn-formatting': u'Bot: Mamformat ISBN',
},
# Author: Bjankuloski06
'mk': {
'isbn-formatting': u'Робот: Форматирам ISBN',
},
# Author: Praveenp
'ml': {
'isbn-formatting': u'യന്ത്രം: ഐ.എസ്.ബി.എൻ. ശൈലി ശരിയാക്കുന്നു',
},
# Author: Htt
'mr': {
'isbn-formatting': u'सांगकाम्या: आयएसबीएन स्वरूपण',
},
# Author: Kurniasan
'ms': {
'isbn-formatting': u'Bot: Memformatkan ISBN',
},
# Author: Chrisportelli
'mt': {
'isbn-formatting': u'Bot: Format ISBN',
},
# Author: Lionslayer
'my': {
'isbn-formatting': u'ရိုဘော့ - ISBN နံပါတ်ကို ပုံစံချနေသည်',
},
# Author: Slomox
'nds': {
'isbn-formatting': u'Bot: ISBN-Format',
},
# Author: Servien
'nds-nl': {
'isbn-formatting': u'Bot: ISBN op-emaakt',
},
# Author: RajeshPandey
'ne': {
'isbn-formatting': u'रोबोट: ISBN मिलाउँदै',
},
'nl': {
'isbn-formatting': u'Robot: ISBN opgemaakt',
},
# Author: Harald Khan
# Author: Njardarlogar
'nn': {
'isbn-formatting': u'robot: formaterer ISBN',
},
# Author: Jon Harald Søby
'no': {
'isbn-formatting': u'robot: Formaterer ISBN',
},
# Author: Bouron
'os': {
'isbn-formatting': u'Робот: фæивта ISBN',
},
# Author: Sp5uhe
'pl': {
'isbn-formatting': u'Robot sformatował numer ISBN',
},
# Author: Borichèt
'pms': {
'isbn-formatting': u'Trigomiro: Formassion ëd l\'ISBN',
},
# Author: Hamilton Abreu
'pt': {
'isbn-formatting': u'Robô: A formatar o ISBN',
},
# Author: Hamilton Abreu
# Author: Helder.wiki
# Author: 555
'pt-br': {
'isbn-formatting': u'Bot: Formatando ISBN',
},
# Author: Minisarm
'ro': {
'isbn-formatting': u'Robot: Formatat codul ISBN',
},
# Author: Volkov
# Author: Александр Сигачёв
'ru': {
'isbn-formatting': u'бот: преобразование ISBN',
},
# Author: Gazeb
'rue': {
'isbn-formatting': u'Робот: Форматованя ISBN',
},
# Author: Avicennasis
'sco': {
'isbn-formatting': u'Robot: Formatting ISBN',
},
# Author: බිඟුවා
'si': {
'isbn-formatting': u'රොබෝ: ISBN ආකෘතිකරණය',
},
# Author: Wizzard
'sk': {
'isbn-formatting': u'Robot: Formátovanie ISBN',
},
# Author: Dbc334
'sl': {
'isbn-formatting': u'Robot: Oblikovanje ISBN',
},
# Author: Abshirdheere
'so': {
'isbn-formatting': u'Bot: Habayn ISBN',
},
# Author: Vinie007
'sq': {
'isbn-formatting': u'Robot: ISBN Formatimi',
},
# Author: Rancher
'sr': {
'isbn-formatting': u'Робот: обликовање ISBN-а',
},
# Author: Rancher
'sr-el': {
'isbn-formatting': u'Robot: oblikovanje ISBN-a',
},
# Author: Ainali
'sv': {
'isbn-formatting': u'Robot: Formaterar ISBN',
},
# Author: Przemub
'szl': {
'isbn-formatting': u'Robot: ISBN',
},
# Author: செல்வா
'ta': {
'isbn-formatting
|
alexanderfefelov/nav
|
python/nav/web/portadmin/views.py
|
Python
|
gpl-2.0
| 20,814
| 0
|
#
# Copyright 2010 (C) Norwegian University of Science and Technology
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""View controller for PortAdmin"""
import simplejson
import logging
from operator import or_ as OR
from django.http import HttpResponse
from django.template import RequestContext, Context
from django.shortcuts import render_to_response
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db.models import Q
from nav.django.utils import get_account
from nav.web.utils import create_title
from nav.models.manage import Netbox, Interface
from nav.web.portadmin.utils import (get_and_populate_livedata,
find_and_populate_allowed_vlans,
get_aliastemplate, get_ifaliasformat,
save_to_database,
check_format_on_ifalias,
find_allowed_vlans_for_user_on_netbox,
find_allowed_vlans_for_user,
filter_vlans, fetch_voice_vlans,
should_check_access_rights,
mark_detained_interfaces)
from nav.Snmp.errors import SnmpError, TimeOutException
from nav.portadmin.snmputils import SNMPFactory
from .forms import SearchForm
_logger = logging.getLogger("nav.web.portadmin")
def get_base_context(additional_paths=None, form=None):
"""Returns a base context for portadmin
:type additional_paths: list of tuple
"""
navpath = [('Home', '/'), ('PortAdmin', reverse('portadmin-i
|
ndex'))]
if additional_paths:
navpath += additional_paths
form = form if form else SearchForm()
return {
'header': {'name': 'PortAdmin',
'description': 'Configure interfaces on ip devices'},
'navpath': navpath,
'title': create_title(navpat
|
h),
'form': form
}
def index(request):
"""View for showing main page"""
netboxes = []
interfaces = []
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
netboxes, interfaces = search(form.cleaned_data['query'])
if len(netboxes) == 1 and not interfaces:
return search_by_sysname(request, netboxes[0].sysname)
elif len(interfaces) == 1 and not netboxes:
return search_by_interfaceid(request, interfaces[0].id)
else:
form = SearchForm()
context = get_base_context(form=form)
context['netboxes'] = netboxes
context['interfaces'] = interfaces
return render_to_response('portadmin/base.html',
context,
RequestContext(request))
def search(query):
"""Search for something in portadmin"""
netbox_filters = [
Q(sysname__icontains=query),
Q(ip=query)
]
netboxes = Netbox.objects.filter(
reduce(OR, netbox_filters)).order_by('sysname')
interfaces = Interface.objects.filter(
ifalias__icontains=query).order_by('netbox__sysname', 'ifname')
return netboxes, interfaces
def search_by_ip(request, ip):
"""View for showing a search done by ip-address"""
info_dict = get_base_context()
account = get_account(request)
try:
netbox = Netbox.objects.get(ip=ip)
except Netbox.DoesNotExist, do_not_exist_ex:
_logger.error("Netbox with ip %s not found; DoesNotExist = %s",
ip, do_not_exist_ex)
messages.error(request,
'Could not find netbox with ip-address %s' % str(ip))
return render_to_response('portadmin/base.html',
info_dict,
RequestContext(request))
else:
interfaces = netbox.get_swports_sorted()
info_dict = populate_infodict(request, account, netbox, interfaces)
return render_to_response(
'portadmin/netbox.html',
info_dict,
RequestContext(request))
def search_by_sysname(request, sysname):
"""View for showing a search done by sysname"""
info_dict = get_base_context()
account = get_account(request)
try:
netbox = Netbox.objects.get(sysname=sysname)
except Netbox.DoesNotExist, do_not_exist_ex:
_logger.error("Netbox %s not found; DoesNotExist = %s",
sysname, do_not_exist_ex)
messages.error(request,
'Could not find netbox with sysname %s' % sysname)
return render_to_response('portadmin/base.html',
info_dict,
RequestContext(request))
else:
interfaces = netbox.get_swports_sorted()
info_dict = populate_infodict(request, account, netbox, interfaces)
return render_to_response('portadmin/netbox.html',
info_dict,
RequestContext(request))
def search_by_interfaceid(request, interfaceid):
"""View for showing a search done by interface id"""
info_dict = get_base_context()
account = get_account(request)
try:
interface = Interface.objects.get(id=interfaceid)
except Interface.DoesNotExist, do_not_exist_ex:
_logger.error("Interface %s not found; DoesNotExist = %s",
interfaceid, do_not_exist_ex)
messages.error(request,
'Could not find interface with id %s' %
str(interfaceid))
return render_to_response('portadmin/base.html',
info_dict,
RequestContext(request))
else:
netbox = interface.netbox
interfaces = [interface]
info_dict = populate_infodict(request, account, netbox, interfaces)
return render_to_response('portadmin/netbox.html',
info_dict,
RequestContext(request))
def populate_infodict(request, account, netbox, interfaces):
"""Populate a dictionary used in every http response"""
allowed_vlans = []
voice_vlan = None
readonly = False
try:
fac = get_and_populate_livedata(netbox, interfaces)
allowed_vlans = find_and_populate_allowed_vlans(account, netbox,
interfaces, fac)
voice_vlan = fetch_voice_vlan_for_netbox(request, fac)
mark_detained_interfaces(interfaces)
except TimeOutException:
readonly = True
messages.error(request, "Timeout when contacting %s. Values displayed "
"are from database" % netbox.sysname)
if not netbox.read_only:
messages.error(request, "Read only community not set")
except SnmpError:
readonly = True
messages.error(request, "SNMP error when contacting %s. Values "
"displayed are from database" % netbox.sysname)
if check_read_write(netbox, request):
readonly = True
ifaliasformat = get_ifaliasformat()
aliastemplate = ''
if ifaliasformat:
tmpl = get_aliastemplate()
aliastemplate = tmpl.render(Context({'ifaliasformat': ifaliasformat}))
save_to_database(interfaces)
if voice_vlan:
set_voice_vlan_attribute(voice_vlan, interfaces)
info_dict = get_base_context([(netbox.sysname, )])
info_dict.update({'interfaces': interfaces,
|
Tatsh-ansible/ansible
|
lib/ansible/plugins/action/include_vars.py
|
Python
|
gpl-3.0
| 10,206
| 0.001372
|
# (c) 2016, Allen Sanabria <asanabria@linuxdynasty.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it
|
under the terms of the GNU General Public License as published by
# the
|
Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os import path, walk
import re
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native, to_text
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']
VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions']
VALID_FILE_ARGUMENTS = ['file', '_raw_params']
VALID_ALL = ['name']
def _set_dir_defaults(self):
if not self.depth:
self.depth = 0
if self.files_matching:
self.matcher = re.compile(r'{0}'.format(self.files_matching))
else:
self.matcher = None
if not self.ignore_files:
self.ignore_files = list()
if isinstance(self.ignore_files, str):
self.ignore_files = self.ignore_files.split()
elif isinstance(self.ignore_files, dict):
return {
'failed': True,
'message': '{0} must be a list'.format(self.ignore_files)
}
def _set_args(self):
""" Set instance variables based on the arguments that were passed """
self.return_results_as_name = self._task.args.get('name', None)
self.source_dir = self._task.args.get('dir', None)
self.source_file = self._task.args.get('file', None)
if not self.source_dir and not self.source_file:
self.source_file = self._task.args.get('_raw_params')
self.depth = self._task.args.get('depth', None)
self.files_matching = self._task.args.get('files_matching', None)
self.ignore_files = self._task.args.get('ignore_files', None)
self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)
# convert/validate extensions list
if isinstance(self.valid_extensions, string_types):
self.valid_extensions = list(self.valid_extensions)
if not isinstance(self.valid_extensions, list):
raise AnsibleError('Invalid type for "extensions" option, it must be a list')
def run(self, tmp=None, task_vars=None):
""" Load yml files recursively from a directory.
"""
if task_vars is None:
task_vars = dict()
self.show_content = True
self.included_files = []
# Validate arguments
dirs = 0
files = 0
for arg in self._task.args:
if arg in self.VALID_DIR_ARGUMENTS:
dirs += 1
elif arg in self.VALID_FILE_ARGUMENTS:
files += 1
elif arg in self.VALID_ALL:
pass
else:
raise AnsibleError('{0} is not a valid option in debug'.format(arg))
if dirs and files:
raise AnsibleError("Your are mixing file only and dir only arguments, these are incompatible")
# set internal vars from args
self._set_args()
results = dict()
if self.source_dir:
self._set_dir_defaults()
self._set_root_dir()
if path.exists(self.source_dir):
for root_dir, filenames in self._traverse_dir_depth():
failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))
if failed:
break
results.update(updated_results)
else:
failed = True
err_msg = ('{0} directory does not exist'.format(self.source_dir))
else:
try:
self.source_file = self._find_needle('vars', self.source_file)
failed, err_msg, updated_results = (
self._load_files(self.source_file)
)
if not failed:
results.update(updated_results)
except AnsibleError as e:
failed = True
err_msg = to_native(e)
if self.return_results_as_name:
scope = dict()
scope[self.return_results_as_name] = results
results = scope
result = super(ActionModule, self).run(tmp, task_vars)
if failed:
result['failed'] = failed
result['message'] = err_msg
result['ansible_included_var_files'] = self.included_files
result['ansible_facts'] = results
result['_ansible_no_log'] = not self.show_content
return result
def _set_root_dir(self):
if self._task._role:
if self.source_dir.split('/')[0] == 'vars':
path_to_use = (
path.join(self._task._role._role_path, self.source_dir)
)
if path.exists(path_to_use):
self.source_dir = path_to_use
else:
path_to_use = (
path.join(
self._task._role._role_path, 'vars', self.source_dir
)
)
self.source_dir = path_to_use
else:
current_dir = (
"/".join(self._task._ds._data_source.split('/')[:-1])
)
self.source_dir = path.join(current_dir, self.source_dir)
def _traverse_dir_depth(self):
""" Recursively iterate over a directory and sort the files in
alphabetical order. Do not iterate pass the set depth.
The default depth is unlimited.
"""
current_depth = 0
sorted_walk = list(walk(self.source_dir))
sorted_walk.sort(key=lambda x: x[0])
for current_root, current_dir, current_files in sorted_walk:
current_depth += 1
if current_depth <= self.depth or self.depth == 0:
current_files.sort()
yield (current_root, current_files)
else:
break
def _ignore_file(self, filename):
""" Return True if a file matches the list of ignore_files.
Args:
filename (str): The filename that is being matched against.
Returns:
Boolean
"""
for file_type in self.ignore_files:
try:
if re.search(r'{0}$'.format(file_type), filename):
return True
except Exception:
err_msg = 'Invalid regular expression: {0}'.format(file_type)
raise AnsibleError(err_msg)
return False
def _is_valid_file_ext(self, source_file):
""" Verify if source file has a valid extension
Args:
source_file (str): The full path of source file or source file.
Returns:
Bool
"""
file_ext = path.splitext(source_file)
print(file_ext[-1][2:])
return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)
def _load_files(self, filename, validate_extensions=False):
""" Loads a file and converts the output into a valid Python dict.
Args:
filename (str): The source file.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
if validate_extensions and not self._is_valid_file_ext(filename):
failed = True
e
|
tylereaves/26md
|
setlist/migrations/0013_show2.py
|
Python
|
bsd-3-clause
| 970
| 0.002062
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('setlist', '0012_remove_show_leg'),
]
operations = [
migrations.CreateModel(
name='Show2',
fields=[
('id', models.AutoFi
|
eld(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('venue', models.ForeignKey(to='setlist.Venue', to_field='id')),
('tour', models.ForeignKey(to='setlist.Tour', to_field='id')),
('date', models.DateField(db_in
|
dex=True)),
('setlist', models.TextField(default=b'', blank=True)),
('notes', models.TextField(default=b'', blank=True)),
('source', models.TextField(default=b'', blank=True)),
],
options={
},
bases=(models.Model,),
),
]
|
sio2project/oioioi
|
oioioi/disqualification/urls.py
|
Python
|
gpl-3.0
| 139
| 0
|
#
|
Force loading views
from oioioi.disqualification.views import disqualification_fragment
app_name = 'disqualification'
urlpatterns =
|
()
|
etalab/cowbots
|
email_changes.py
|
Python
|
agpl-3.0
| 16,025
| 0.016047
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# CowBots -- Error detection bots for CKAN-of-Worms
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 Etalab
# http://github.com/etalab/cowbots
#
# This file is part of CowBots.
#
# CowBots is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# CowBots is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Detect changes in CKAN-of-Worms objects and notify by email when some patterns are detected."""
import argparse
import ConfigParser
import email.header
import logging
import os
import re
import smtplib
import sys
from biryani1 import baseconv, custom_conv, jsonconv, netconv, states
import mako.lookup
app_dir = os.path.dirname(os.path.abspath(__file__))
app_name = os.path.splitext(os.path.basename(__file__))[0]
conf = None
conv = custom_conv(baseconv, jsonconv, netconv, states)
headers = None
line_re = re.compile(u"""(?P<indent>\s*)(?P<header>([-*]|=>|\[\d+\]|PS\s*\d*\s* ?:)\s*|)(?P<content>[^\s].*)$""")
log = logging.getLogger(app_name)
templates_lookup = None
# Converters
cow_response_to_value = conv.pipe(
conv.make_input_to_json(),
conv.not_none,
conv.test_isinstance(dict),
conv.struct(
dict(
apiVersion = conv.pipe(
conv.test_equals('1.0'),
conv.not_none,
),
context = conv.noop,
method = conv.pipe(
conv.test_isinstance(basestring),
conv.not_none,
),
params = conv.test_isinstance(dict),
url = conv.pipe(
conv.make_input_to_url(full = True),
conv.not_none,
),
value = conv.noop,
),
),
conv.function(lambda response: response['value']),
)
# Functions
def account_created(account):
log.debug(u'Notifying account creation: "{}".'.format(u' - '.join(
fragment
for fragment in [
account.get('fullname'),
account.get('name'),
account.get('email'),
]
if fragment is not None
)))
template = templates_lookup.get_template('new-account.mako')
message = template.render_unicode(
ckan_of_worms_url = conf['ckan_of_worms.site_url'],
account = account,
encoding = 'utf-8',
from_email = conf['from_email'],
qp = lambda s: to_quoted_printable(s, 'utf-8'),
to_emails = conf['admin_email'],
weckan_url = conf['weckan.site_url'],
wiki_url = conf['wiki.site_url'],
youckan_url = conf['youckan.site_url'],
).strip()
send_email(message)
def article_edited(article):
log.debug(u'Notifying article update: "{}".'.format(article['title']))
template = templates_lookup.get_template('edit-article.mako')
message = template.render_unicode(
article = article,
ckan_of_worms_url = conf['ckan_of_worms.site_url'],
encoding = 'utf-8',
from_email = conf['from_email'],
qp = lambda s: to_quoted_printable(s, 'utf-8'),
to_emails = conf['admin_email'],
weckan_url = conf['weckan.site_url'],
wiki_url = conf['wiki.site_url'],
youckan_url = conf['youckan.site_url'],
).strip()
send_email(message)
def dataset_created(dataset):
log.debug(u'Notifying dataset creation: "{}".'.format(dataset['name']))
template = templates_lookup.get_template('new-dataset.mako')
message = template.render_unicode(
ckan_of_worms_url = conf['ckan_of_worms.site_url'],
dataset = dataset,
encoding = 'utf-8',
from_email = conf['from_email'],
qp = lambda s: to_quoted_printable(s, 'utf-8'),
to_emails = conf['admin_email'],
weckan_url = conf['weckan.site_url'],
wiki_url = conf['wiki.site_url'],
youckan_url = conf['youckan.site_url'],
).strip()
send_email(message)
def group_created(group):
log.debug(u'Notifying group creation: "{}".'.format(group['name']))
template = templates_lookup.get_template('new-group.mako')
message = template.render_unicode(
ckan_of_worms_url = conf['ckan_of_worms.site_url'],
encoding = 'utf-8',
from_email = conf['from_email'],
group = group,
qp = lambda s: to_quoted_printable(s, 'utf-8'),
to_emails = conf['admin_email'],
weckan_url = conf['weckan.site_url'],
wiki_url = conf['wiki.site_url'],
youckan_url = conf['youckan.site_url'],
).strip()
send_email(message)
def main():
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('config', help = 'path of configuration file')
parser.add_argument('-f', '--fedmsg', action = 'store_true', help = 'poll fedmsg events')
parser.add_argument('-v', '--verbose', action = 'store_true', help = 'increase output verbosity')
global args
args = parser.parse_args()
logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
config_parser = ConfigParser.SafeConfigParser(dict(
here = os.path.dirname(os.path.abspath(os.path.normpath(args.config))),
))
config_parser.read(args.config)
global conf
conf = conv.check(conv.pipe(
conv.test_isinstance(dict),
conv.struct(
{
'admin_email': conv.pipe(
conv.function(lambda emails: set(emails.split())),
conv.uniform_sequence(
conv.pipe(
conv.input_to_email,
conv.test_email(),
),
constructor = lambda emails: sorted(set(emails)),
drop_none_items = True,
),
conv.empty_to_none,
conv.not_none,
),
'ckan_of_worms.site_url': conv.pipe(
conv.make_input_to_url(error_if_fragment = True, error_if_path = True, error_if_query = True,
full = True),
conv.not_none,
),
'from_email': conv.pipe(
conv.input_to_email,
conv.test_email(),
conv.not_none,
),
'user_agent': conv.pipe(
conv.cleanup_line,
conv.not_none,
),
'weckan.site_url': conv.pipe(
conv.make_input_to_url(error_if_fragment = True, error_if_path = True, error_if_query = True,
full = True),
conv.not_none,
),
'wiki.site_url': conv.pipe(
conv.make_input_to_url(error_if_fragment = True, error_if_path = True, error_if_query = True,
full = True),
conv.not_none,
),
'youckan.site_url': conv.pipe(
conv.make_input_to_url(error_if_fragment = True, error_if_path = True, error_if_query = True,
full = True),
|
conv.not_none,
),
},
default = 'drop',
),
conv.not_none,
))(dict(config_parser.items('CowBots-Email-Changes')), conv.def
|
ault_state)
cache_dir = os.path.join(app_dir, 'cache')
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
global headers
headers = {
'User-Agent': conf['user_agent'],
|
Foris/darwined-core-python-clients
|
darwined_core_python_clients/physical/buildings.py
|
Python
|
mit
| 2,117
| 0
|
import json
import requests
class Building():
"""Building Client."""
# Service Setup
config = {
'schema': 'http',
'host': 'localhost',
'port': '9202',
'endpoint': 'api/v1/buildings'
}
@classmethod
def base_url(cls):
"""Form the base url for the service."""
return "{schema}://{host}:{port}/{endpoint}".format(**cls.config)
@classmethod
def configure(cls, options={}):
cls.config.update(options)
@classmethod
def get_all(cls):
"""Return all buildings."""
r = requests.get(cls.base_url())
if r.status_code == 200:
return r.json()
else:
return None
@classmethod
def get(cls, code):
"""Return an building."""
r = requests.get(cls.base_url() + '/' + code)
if r.status_code == 200:
return r.json()
else:
return
|
None
@classmethod
def create(cls, attrs):
"""Create an building with the attributes passed in attrs dict."""
r = requests.post(cls.base_url(), data=json.dumps(attrs))
if r.status_code == 200:
return r.json()
else:
return None
@classmethod
def update(cls, code, attrs):
"""Update the building identified by code with attrs dict."""
r = requests.put(cls.base_url() + '/'
|
+ code, data=json.dumps(attrs))
if r.status_code == 200:
return r.json()
else:
return None
@classmethod
def delete(cls, code):
"""Delete the building identified by code."""
r = requests.delete(cls.base_url() + '/' + code)
return r.status_code == 204
@classmethod
def delete_all(cls):
"""Delete all buildings."""
r = requests.delete(cls.base_url())
return r.status_code == 204
@classmethod
def bulk_load(cls, json_string):
"""Bulk loads an array of buildings."""
h = {
'Content-Type': 'application/json'
}
return requests.post(cls.base_url(), data=json_string, headers=h)
|
CrazyDaiDai/learningPython
|
hellow.py
|
Python
|
gpl-3.0
| 1,589
| 0.019119
|
#!/usr/bin/env python3
#定义一个变量a
a = 100
#判断a是否大于等于0
if a >= 100:
# 如果大于等于0执行这里边的内容
print('a为正 a =',a)
else:
# 反之执行这段代码
print('a为负 a =',a)
#
#转义字符
#
print("I'm OK")
print('I\'m OK')
print('I\'m\tlearning\nPython')
# 使用 r'' 来使 '' 里边的字符串不需要转 // 但是这样不行 --> print(r'I'm OK')
print(r'\\n\\')
# 如果有很多换行的地方可以使用 '''...''' 来表示 试了一下 这个不行
print("line1\nline2\nline3")
#print(r'line1...line2...line3')
# True 注意大小写
print(3 > 2)
# False 注意大小写
print(2 > 3)
# and or not
# and 都为真 则真 反之假
print("3 > 2 and 2 > 1 -->",3 > 2 and 2 > 1)
print("
|
3 > 2 and 1 > 2 -->",3 > 2 and 1 > 2
|
)
print("2 > 3 and 1 > 2 -->",2 > 3 and 1 > 2)
# or 只要一个为真 则真 反之假
print("3 > 2 or 2 > 1 -->",3 > 2 or 2 > 1)
print("3 > 2 or 1 > 2 -->",3 > 2 or 1 > 2)
print("2 > 3 or 1 > 2 -->",2 > 3 or 1 > 2)
# not 取反
print("not 3 > 2 -->",not 3 > 2)
print("not 2 > 3 -->",not 2 > 3)
# None 在Python里边是一个特殊的值,None不能理解为0 因为0是有意义的,而None是一个特殊的空值
#
# 变量
#
a = 0
a_007 = "A_007"
answer = True
a = "ABC"
x = 2
x = x + 10
print(x)
b = a
a = "XYZ"
print(b)
#
# 在Python中通常全部大写的变量名表示常量
#
PI = 3.14159265359
#在Python中有两种除法
#1
print("10 / 3 --> ",10 / 3)
#2 --> 地板除 地板除只取结果的整数部分
print("10 // 3 --> ",10 // 3)
# 取余
print("10 % 3 -->",10 % 3)
|
netkicorp/wns-api-server
|
netki/util/__init__.py
|
Python
|
bsd-3-clause
| 23
| 0.043478
|
__author__ = 'md
|
avid
|
'
|
Omrigan/diseases
|
diseases/migrations/0001_initial.py
|
Python
|
mit
| 1,149
| 0.003481
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Case',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('dateStart', models.DateTimeField(verbose_name='Start case')),
('dateFinish', models.DateTimeField(verbose_name='Finish case')),
('description', models.CharField(max_length=4000)),
|
],
),
migrations.CreateModel(
name='Disease',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('title', models.CharField(max_length=1000)),
('dateCreation', models.DateTimeField(verbose_name='date published')),
|
('description', models.CharField(max_length=4000)),
],
),
]
|
helfertool/helfertool
|
src/badges/migrations/0004_auto_20160306_1424.py
|
Python
|
agpl-3.0
| 638
| 0.001567
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-06 13:24
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('badges', '0003_badgedesign_bg_color'),
]
operations = [
migrations.AlterField(
model_name='badgedesign',
name='bg_color',
field=models.CharField(default='#FFFFFF', help_text='E.g. #00ff00', max_length=7, validators=
|
[django.core.validators.RegexValidator('^#[a-fA-F0-9]{6}$')], verbose_name='Background color'),
),
|
]
|
sadnoodles/chromeremote
|
examples/xssbot.py
|
Python
|
gpl-3.0
| 2,168
| 0
|
# coding=utf-8
'''
xssbot 必须具有以下功能
1.可对指定url进行访问
2.拦截alert等框框
3.拦截页内跳转
4.锁定设定cookies(未实现)
在EditThisCookie的chrome扩展中使用扩展特有的接口实现
chrome.cookies.onChanged.addListener
但在https://chromedevtools.github.io/devtools-protocol/文档中并没有相关类似功能、
'''
from chromeremote import ChromeTabThread as ChromeTab
class XssbotTab
|
(ChromeTab):
# 一个页面允许运行10秒
TAB_TIMEOUT = 10
def __init__(self, url, host, port):
super(XssbotTab, self).__init__(host, port)
self.opened = False
self.url = url
self.ini
|
tjs = '''
window.alert =function(){};
window.confirm =function(){};
window.prompt = function(){};
window.open= function(){};
'''
def run(self):
def processNavigation(para):
# 仅处理第一次我们跳转,其他禁止跳转
if self.opened:
response = 'CancelAndIgnore'
else:
self.opened = True
response = 'Proceed'
self.Page.processNavigation(
response=response, navigationId=para['navigationId'])
def javascriptDialogOpening(para):
# 基本上rewrite后就不会有弹框了,但如果有就关闭弹框
self.Page.handleJavaScriptDialog(accept=False, promptText='')
self.open_tab()
self.Page.enable()
self.register_event("Page.navigationRequested", processNavigation)
self.register_event("Page.javascriptDialogOpening",
javascriptDialogOpening)
# 设置所有跳转需要进行管制
self.Page.setControlNavigations(enabled=True)
self.Page.addScriptToEvaluateOnLoad(
scriptSource=self.initjs, identifier='rewrite')
# 打开设定url
self.Page.navigate(url=self.url)
super(XssbotTab, self).run()
if __name__ == '__main__':
tab = XssbotTab(
'https://github.com/BugScanTeam/chromeremote', '127.0.0.1', 9222)
tab.start()
|
NejcZupec/ggrc-core
|
src/ggrc/models/category.py
|
Python
|
apache-2.0
| 1,823
| 0.013165
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from ggrc import db
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import validates
from .categorization import Categorization
from .mixins import deferred, Base, Hierarchical
class CategorizedPublishable(object):
def __init__(self, attr_name, type_name):
self.attr_name = attr_name
self.type_name = type_name
@property
def rel_class(self):
import ggrc.models
return getattr(ggrc.models, self.type_name)
def __call__(self, updater, obj, json_obj):
return updater.query_for(self.rel_class, json_obj, self.attr_name, True)
class CategoryBase(Hierarchical, Base, db.Model):
_table_plural = 'category_bases'
__tablename__ = 'categories'
type = db.Column(db.String)
name = deferred(db.Column(db.String), 'CategoryBase')
lft = deferred(db.Column(db.Integer), 'CategoryBase')
rgt = deferred(db.Column(db.Integer), 'CategoryBase')
scope_id = deferred(db.Column(db.Integer), 'CategoryBase')
depth = deferred(db.Column(db.Integer), 'CategoryBase')
required = deferred(db.Column(db.Boolean), 'CategoryBase')
__mapper_args__ = {
'polymorphic_on': type
}
categorizations = db.relationship(
'ggrc.models.categorization.Categorization',
backref='category',
cascad
|
e='all, delete-orphan',
)
@validates('type')
def validates_type(self, key, value):
return self.__class__.__name__
|
# REST properties
_publish_attrs = [
'name',
'type',
'required',
#'scope_id',
]
_sanitize_html = [
'name',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(CategoryBase, cls).eager_query()
return query.options()
|
azaghal/ansible
|
test/support/integration/plugins/modules/openssl_csr.py
|
Python
|
gpl-3.0
| 53,940
| 0.004301
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyrigt: (c) 2017, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: openssl_csr
version_added: '2.4'
short_description: Generate OpenSSL Certificate Signing Request (CSR)
description:
- This module allows one to (re)generate OpenSSL certificate signing requests.
- It uses the pyOpenSSL python library to interact with openssl. This module supports
the subjectAltName, keyUsage, extendedKeyUsage, basicConstraints and OCSP Must Staple
extensions.
- "Please note that the module regenerates existing CSR if it doesn't match the module's
options, or if it seems to be corrupt. If you are concerned that this could overwrite
your existing CSR, consider using the I(backup) option."
- The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the
PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in Ansible 2.13."
requirements:
- Either cryptography >= 1.3
- Or pyOpenSSL >= 0.15
author:
- Yanis Guenane (@Spredzy)
options:
state:
description:
- Whether the certificate signing request should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
digest:
description:
- The digest used when signing the certificate signing request with the private key.
type: str
default: sha256
privatekey_path:
description:
- The path to the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: path
privatekey_content:
description:
- The content of the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: str
version_added: "2.10"
privatekey_passphrase:
description:
- The passphrase for the private key.
- This is required if the private key is password protected.
type: str
version:
description:
- The version of the certificate signing request.
- "The only allowed value according to L(RFC 2986,https://tools.ietf.org/html/rfc2986#section-4.1)
is 1."
- This option will no longer accept unsupported values from Ansible 2.14 on.
type: int
default: 1
force:
description:
- Should the certificate signing request be forced regenerated by this ansible module.
type: bool
default: no
path:
description:
- The name of the file into which the generated OpenSSL certificate signing request will be written.
type: path
required: true
subject:
description:
- Key/value pairs that will be present in the subject name field of the certificate signing request.
- If you need to specify more than one value with the same key, use a list as value.
type: dict
version_added: '2.5'
country_name:
description:
- The countryName field of the certificate signing request subject.
type: str
aliases: [ C, countryName ]
state_or_province_name:
description:
- The stateOrProvinceName field of the certificate signing request subject.
type: str
aliases: [ ST, stateOrProvinceName ]
locality_name:
description:
- The localityName field of the certificate signing request subject.
type: str
aliases: [ L, localityName ]
organization_name:
description:
- The organizationName field of the certificate signing request subject.
type: str
aliases: [ O, organizationName ]
organizational_unit_name:
description:
- The organizationalUnitName field of the certificate signing request subject.
type: str
aliases: [ OU, organizationalUnitName ]
common_name:
description:
- The commonName field of the certificate signing request subject.
type: str
aliases: [ CN, commonName ]
email_address:
description:
- The emailAddress field of the certificate signing request subject.
type: str
aliases: [ E, emailAddress ]
subject_alt_name:
description:
- SAN extension to attach to the certificate signing request.
- This can either be a 'comma
|
separated string' or a YAML list.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA)
- Note that if no SAN is specified, but a common name, the common
name will be added as a SAN except if C(useCommonNameForSAN) is
set to I(false).
- More at U(https://too
|
ls.ietf.org/html/rfc5280#section-4.2.1.6).
type: list
elements: str
aliases: [ subjectAltName ]
subject_alt_name_critical:
description:
- Should the subjectAltName extension be considered as critical.
type: bool
aliases: [ subjectAltName_critical ]
use_common_name_for_san:
description:
- If set to C(yes), the module will fill the common name in for
C(subject_alt_name) with C(DNS:) prefix if no SAN is specified.
type: bool
default: yes
version_added: '2.8'
aliases: [ useCommonNameForSAN ]
key_usage:
description:
- This defines the purpose (e.g. encipherment, signature, certificate signing)
of the key contained in the certificate.
type: list
elements: str
aliases: [ keyUsage ]
key_usage_critical:
description:
- Should the keyUsage extension be considered as critical.
type: bool
aliases: [ keyUsage_critical ]
extended_key_usage:
description:
- Additional restrictions (e.g. client authentication, server authentication)
on the allowed purposes for which the public key may be used.
type: list
elements: str
aliases: [ extKeyUsage, extendedKeyUsage ]
extended_key_usage_critical:
description:
- Should the extkeyUsage extension be considered as critical.
type: bool
aliases: [ extKeyUsage_critical, extendedKeyUsage_critical ]
basic_constraints:
description:
- Indicates basic constraints, such as if the certificate is a CA.
type: list
elements: str
version_added: '2.5'
aliases: [ basicConstraints ]
basic_constraints_critical:
description:
- Should the basicConstraints extension be considered as critical.
type: bool
version_added: '2.5'
aliases: [ basicConstraints_critical ]
ocsp_must_staple:
description:
- Indicates that the certificate should contain the OCSP Must Staple
extension (U(https://tools.ietf.org/html/rfc7633)).
type: bool
version_added: '2.5'
aliases: [ ocspMustStaple ]
ocsp_must_staple_critical:
description:
- Should the OCSP Must Staple extension be considered as critical
- Note that according to the RFC, this extension should not be marked
|
greysondn/gamesolutions
|
twilioquest/python/codepath/functions.py
|
Python
|
mit
| 644
| 0.006211
|
# TwilioQuest version 3.1.26
|
# Works in:
# 3.1.26
# bog standard main function
def main():
print("functions")
hail_friend()
print("function arguments")
hail_friend("Operator")
print("function return values")
print(f"{add_numbers(45, -1)}")
# functions the tasks demand
def add_numbers(num1, num2):
return num1 + num2
def hail_friend(name=None):
# use default value to pass Func
|
tion challenge
if (None == name):
print("Hail, friend!")
else:
# use given value to pass argument challenge
print(f"Hail, {name}!")
# standard main guard
if ("__main__" == __name__):
main()
|
adapteva/epiphany-gdb
|
gdb/copyright.py
|
Python
|
gpl-2.0
| 11,525
| 0.000954
|
#! /usr/bin/env python
# Copyright (C) 2011-2012 Free Software Foundation, Inc.
#
# This file is part of GDB.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""copyright.py
This script updates the list of years in the copyright notices in
most files maintained by the GDB project.
Usage: cd src/gdb && python copyright.py
Always review the output of this script before committing it!
A useful command to review the output is:
% filterdiff -x \*.c -x \*.cc -x \*.h -x \*.exp updates.diff
This removes the bulk of the changes which are most likely to be correct.
"""
import datetime
import os
import os.path
import subprocess
# A list of prefixes that start a multi-line comment. These prefixes
# should not be repeatead when wraping long lines.
MULTILINE_COMMENT_PREFIXES = (
'/*', # C/C++
'<!--', # XML
'{', # Pascal
)
def get_update_list():
"""Return the list of files to update.
Assumes that the current working directory when called is the root
of the GDB source tree (NOT the gdb/ subdirectory!). The names of
the files are relative to that root directory.
"""
result = []
for gdb_dir in ('gdb', 'sim', 'include/gdb'):
for root, dirs, files in os.walk(gdb_dir, topdown=True):
for dirname in dirs:
reldirname = "%s/%s" % (root, dirname)
if (dirname in EXCLUDE_ALL_LIST
or reldirname in EXCLUDE_LIST
or reldirname in NOT_FSF_LIST
or reldirname in BY_HAND):
# Prune this directory from our search list.
dirs.remove(dirname)
for filename in files:
relpath = "%s/%s" % (root, filename)
if (filename in EXCLUDE_ALL_LIST
or relpath in EXCLUDE_LIST
or relpath in NOT_FSF_LIST
or relpath in BY_HAND):
# Ignore this file.
pass
else:
result.append(relpath)
return result
def update_files(update_list):
"""Update the copyright header of the files in the given list.
We use gnulib's update-copyright script for that.
"""
# Tell the update-copyright script that we do not want it to
# repeat the prefixes in MULTILINE_COMMENT_PREFIXES.
os.environ['MULTILINE_COMMENT_PREFIXES'] = \
'\n'.join(MULTILINE_COMMENT_PREFIXES)
# We want to use year intervals in the copyright notices.
os.environ['UPDATE_COPYRIGHT_USE_INTERVALS'] = '1'
# Perform the update, and save the output in a string.
update_cmd = ['bash', 'gdb/gnulib/extra/update-copyright'] + update_list
p = subprocess.Popen(update_cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
update_out = p.communicate()[0]
# Process the output. Typically, a lot of files do not have
# a copyright notice :-(. The update-copyright script prints
# a well defined warning when it did not find the copyright notice.
# For each of those, do a sanity check and see if they may in fact
# have one. For the files that are found not to have one, we filter
# the line out from the output, since there is nothing more to do,
# short of looking at each file and seeing which notice is appropriate.
# Too much work! (~4,000 files listed as of 2012-01-03).
update_out = update_out.splitlines()
warning_string = ': warning: copyright statement not found'
warning_len = len(warning_string)
for line in update_out:
if line.endswith('\n'):
line = line[:-1]
if line.endswith(warning_string):
filename = line[:-warning_len]
if may_have_copyright_notice(filename):
print line
else:
# Unrecognized file format. !?!
print "*** " + line
def may_have_copyright_notice(filename):
"""Check that the given file does not seem to have a copyright notice.
The filename is relative to the root directory.
This function assumes that the current working directory is that root
directory.
The algorigthm is fairly crude, meaning that it might return
some false positives. I do not think it will return any false
negatives... We might improve this function to handle more
complex cases later...
"""
# For now, it may have a copyright notice if we find the word
# "Copyright" at the (reasonable) start of the given file, say
# 50 lines...
MAX_LINES = 50
fd = open(filename)
lineno = 1
for line in fd:
if 'Copyright' in line:
return True
lineno += 1
if lineno > 50:
return False
return False
def main ():
"""The main subprogram."""
if not os.path.isfile("gnulib/extra/update-copyright"):
print "Error: This script must be called from the gdb directory."
root_dir = os.path.dirname(os.getcwd())
os.chdir(root_dir)
update_list = get_update_list()
update_files (update_list)
# Remind the user that some files need to be updated by HAND...
if BY_HAND:
print
print "\033[31mREMINDER: The following files must be updated by hand." \
"\033[0m"
for filename in BY_HAND:
print " ", filename
############################################################################
#
# Some constants, placed at the end because they take up a lot of room.
# The actual value of these constants is not significant to the understanding
# of the script.
#
############################################################################
# Files which should not be modified, either because they are
# generated, non-FSF, or otherwise special (e.g. license text,
# or test cases which must be sensitive to line numbering).
#
# Filenames are relative to the root directory.
EXCLUDE_LIST = (
'gdb/gdbarch.c', 'gdb/gdbarch.h',
'gdb/gnulib'
)
# Files which should not be modified, either because they are
# generated, non-FSF, or otherwise special (e.g. license text,
# or test cases which must be sensitive to line numbering).
#
# Matches any file or directory name anywhere. Use with caution.
# This is mostly for files that can be found in multiple directories.
# Eg: We want all files named COPYING to be left untouched.
EXCLUDE_ALL_LIST = (
"COPYING", "COPYING.LIB", "CVS", "configure", "copying.c",
"fdl.texi", "gpl.texi", "aclocal.m4",
)
# The list of files to update by hand.
BY_HAND = (
# These files are sensitive to line numbering.
"gdb/testsuite/gdb.base/step-line.inp",
"gdb/testsuite/gdb.base/step-line.c",
)
# The list of file which have a copyright, but not head by the FSF.
# Filenames are relative to the root directory.
NOT_FSF_LIST = (
"gdb/exc_request.defs",
"gdb/osf-share",
"gdb/gdbtk",
"gdb/testsuite/gdb.gdbtk/",
"sim/arm/armemu.h", "sim/arm/armos.c", "sim/arm/gdbhost.c",
"sim/arm/dbg_hif.h", "sim/arm/dbg_conf.h", "sim/arm/communicate.h",
"sim/arm/armos.h", "sim/arm/armcopro.c", "sim/arm/armemu.c",
"sim/arm/kid.c", "sim/arm/thumbemu.c", "sim/arm/armdefs.h",
"sim/arm/armopts.h", "sim/arm/dbg_cp.h", "sim/arm/dbg_rdi.h",
"sim/arm/parent.c", "sim/arm/armsupp.c", "sim/arm/armrdi.
|
c",
"sim/arm/bag.c", "sim/arm/armvirt.c", "sim/arm/main.c", "sim/arm/bag.h",
"sim/arm/communicate.c", "sim/arm/gdbhost.h", "sim/arm/armfpe.h",
"sim/arm/arminit.c",
"sim/common/cgen-fpu.c",
|
"sim/common/cgen-fpu.h", "sim/common/cgen-fpu.h",
"sim/common/cgen-accfp.c", "s
|
tedlaz/pyted
|
tests/pysqlqt/src/classes/json_data.py
|
Python
|
gpl-3.0
| 4,956
| 0
|
"""
@file
@brief This file loads and saves settings (as JSON)
@author Ted Lazaros <tedlaz@gmail.com>
@section LICENSE
"""
try:
import json
except ImportError:
import simplejson as json
import copy
from classes.logger import log
class JsonDataStore:
"""
This class which allows getting/setting of key/value settings,
and loading and saving to json files.
Internal storage of a dictionary.
Uses json or simplejson packages to serialize and deserialize
from json to dictionary.
Keys are assumed to be strings, but subclasses which override get/set
methods may use different key types.
The write_to_file and read_from_file methods are key type agnostic.
"""
# Create default data storage and default data type for logging messages
def __init__(self):
# Private data store, accessible through the get and set methods
self._data = {}
self.data_type = "json data"
def get(self, key):
""" Get copied value of a given key in data store """
key = key.lower()
# Determine if the root element is a dictionary or list
# (i.e. project data or settings data)
if type(self._data) == list:
# Settings data, search for matching "setting" attribute (i.e.list)
# Load user setting's values (for easy merging)
user_values = {}
for item in self._data:
if "setting" in item and "value" in item:
user_values[item["setting"].lower()] = item["value"]
# Settings data
return copy.deepcopy(user_values.get(key, None))
else:
# Project data (i.e dictionary)
return copy.deepcopy(self._data.get(key, None))
def set(self, key, value):
""" Store value in key """
key = key.lower()
# Determine if the root element is a dictionary or list
# (i.e. project data or settings data)
if type(self._data) == list:
# Settings data, search for matching "setting" attribute (i.e.list)
# Load user setting's values (for easy merging)
user_values = {}
for item in self._data:
if "setting" in item and "value" in item:
user_values[item["setting"].lower()] = item
# Settings data
user_values[key]["value"] = value
else:
# Project data (i.e dictionary)
self._data[key] = value
def merge_settings(self, default, user):
"""
Merge settings files, removing invalid settings based on default
settings. This is only called by some sub-classes that use
string keys
"""
# Determine if the root element is a dictionary or list
if type(default) == list:
# pass
# Load user setting's values (for easy merging)
user_values = {}
for item in user:
if "setting" in item and "value" in item:
user_values[item["setting"]] = item["value"]
# Update default values to match user values
for item in default:
user_value = user_values.get(item["setting"], None)
if user_value:
item["value"] = user_value
# Return merged list
return default
else:
# Root
|
object is a dictionary (i.e. project data)
for key in default:
if key not in user:
# Add missing key to user dictionary
user[key] = default[key]
# Return merged dictionary
return user
def read_fro
|
m_file(self, file_path):
""" Load JSON settings from a file """
# log.debug("loading {}".format(file_path))
try:
with open(file_path.encode('UTF-8'), 'r') as f:
contents = f.read()
if contents:
# log.debug("loaded", contents)
return json.loads(contents)
except Exception as ex:
msg = ("Couldn't load {} file: {}".format(self.data_type, ex))
log.error(msg)
raise Exception(msg)
msg = ("Couldn't load {} file, no data.".format(self.data_type))
log.warning(msg)
raise Exception(msg)
def write_to_file(self, file_path, data):
""" Save JSON settings to a file """
# log.debug("saving", file_path, data)
try:
with open(file_path.encode('UTF-8'), 'w') as f:
f.write(json.dumps(data))
except Exception as ex:
msg = ("Couldn't save {} file:\n{}\n{}".format(
self.data_type, file_path, ex))
log.error(msg)
raise Exception(msg)
|
a143753/AOJ
|
1576.py
|
Python
|
apache-2.0
| 497
| 0.026157
|
[m,n]
|
= map(int,input().split())
def find(v,cs):
for c in cities:
if v in c:
return (True,c)
return (False,set([v]))
cities = []
for _ in range(n):
[a,b] = map(int,input().split())
(ra,fa) = find(a,cities)
(rb,fb) = find(b,cities)
mg = fa | fb
if ra:
cities.remove(fa)
else:
m = m - 1
if rb:
if fa != fb:
cities.remove(fb)
else:
m = m - 1
cities.append(mg)
print(abs(m-len(
|
cities)))
|
zneak/capstone
|
bindings/python/setup.py
|
Python
|
bsd-3-clause
| 6,134
| 0.001304
|
#!/usr/bin/env python
import glob
import os
import platform
import shutil
import stat
import sys
from distutils import log
from distutils import dir_util
from distutils.command.build_clib import build_clib
from distutils.command.sdist import sdist
from distutils.core import setup
from distutils.sysconfig import get_python_lib
# prebuilt libraries for Windows - for sdist
PATH_LIB64 = "prebuilt/win64/capstone.dll"
PATH_LIB32 = "prebuilt/win32/capstone.dll"
# package name can be 'capstone' or 'capstone-windows'
PKG_NAME = 'capstone'
if os.path.exists(PATH_LIB64) and os.path.exists(PATH_LIB32):
PKG_NAME = 'capstone-windows'
VERSION = '3.0.2'
SYSTEM = sys.platform
SITE_PACKAGES = os.path.join(get_python_lib(), "capstone")
SETUP_DATA_FILES = []
# adapted from commit e504b81 of Nguyen Tan Cong
# Reference: https://docs.python.org/2/library/platform.html#cross-platform
is_64bits = sys.maxsize > 2**32
def copy_sources():
"""Copy the C sources into the source directory.
This rearranges the source files under the python distribution
directory.
"""
src = []
try:
dir_util.remove_tree("src/")
except (IOError, OSError):
pass
dir_util.copy_tree("../../arch", "src/arch/")
dir_util.copy_tree("../../include", "src/include/")
dir_util.copy_tree("../../msvc/headers", "src/msvc/headers")
src.extend(glob.glob("../../*.[ch]"))
src.extend(glob.glob("../../*.mk"))
src.extend(glob.glob("../../Makefile"))
src.extend(glob.glob("../../LICENSE*"))
src.extend(glob.glob("../../README"))
src.extend(glob.glob("../../*.TXT"))
src.extend(glob.glob("../../RELEASE_NOTES"))
src.extend(glob.glob("../../make.sh"))
src.extend(glob.glob("../../CMakeLists.txt"))
for filename in src:
outpath = os.path.join("./src/", os.path.basename(filename))
log.info("%s -> %s" % (filename, outpath))
shutil.copy(filename, outpath)
class custom_sdist(sdist):
"""Reshuffle files for distribution."""
def run(self):
# if prebuilt libraries are existent, then do not copy source
if os.path.exists(PATH_LIB64) and os.path.exists(PATH_LIB32):
return sdist.run(self)
copy_sources()
return sdist.run(self)
class custom_build_clib(build_clib):
"""Customized build_clib command."""
def run(self):
log.info('running custom_build_clib')
build_clib.run(self)
def finalize_options(self):
# We want build-clib to default to build-lib as defined by the "build"
# command. This is so the compiled library will be put in the right
# place along side the python code.
self.set_undefined_options('build',
('build_lib', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
build_clib.finalize_options(self)
def build_libraries(self, libraries):
if SYSTEM in ("win32", "cygwin"):
# if Windows prebuilt library is available, then include it
if is_64bits and os.path.exists(PATH_LIB64):
SETUP_DATA_FILES.append(PATH_LIB64)
return
elif os.path.exists(PATH_LIB32):
SETUP_DATA_FILES.append(PATH_LIB32)
return
# build library from source if src/ is existent
if not os.path.exists('src'):
return
try:
for (lib_name, build_info) in libraries:
log.info("building '%s' library", lib_name)
os.chdir("src")
# platform description refers at https://docs.python.org/2/library/sys.html#sys.platform
if SYSTEM == "win32":
# Windows build: this process requires few things:
# - CMake + MSVC installed
# - Run this command in an environment setup for MSVC
os.mkdir("build")
os.chdir("build")
# Do not build tests & static library
os.system('cmake -DCMAKE_BUILD_TYPE=RELEASE -DCAPSTONE_BUILD_TESTS=0 -DCAPSTONE_BUILD_STATIC=0 -G "NMake Makefiles" ..')
|
os.system("nmake")
os.chdir("..")
SETUP_DATA_FILES.append("src/build/capstone.dll")
elif SYSTEM == "cygwin":
os.chmod("make.sh", stat.S_IREAD|stat.S_IEXEC)
if is_64bits:
os.system("CAPSTONE_BUILD_CORE_ONLY=yes ./make.sh cygwin-mingw64")
|
else:
os.system("CAPSTONE_BUILD_CORE_ONLY=yes ./make.sh cygwin-mingw32")
SETUP_DATA_FILES.append("src/capstone.dll")
else: # Unix
os.chmod("make.sh", stat.S_IREAD|stat.S_IEXEC)
os.system("CAPSTONE_BUILD_CORE_ONLY=yes ./make.sh")
if SYSTEM == "darwin":
SETUP_DATA_FILES.append("src/libcapstone.dylib")
else: # Non-OSX
SETUP_DATA_FILES.append("src/libcapstone.so")
os.chdir("..")
except:
pass
def dummy_src():
return []
setup(
provides=['capstone'],
packages=['capstone'],
name=PKG_NAME,
version=VERSION,
author='Nguyen Anh Quynh',
author_email='aquynh@gmail.com',
description='Capstone disassembly engine',
url='http://www.capstone-engine.org',
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
requires=['ctypes'],
cmdclass=dict(
build_clib=custom_build_clib,
sdist=custom_sdist,
),
libraries=[(
'capstone', dict(
package='capstone',
sources=dummy_src()
),
)],
data_files=[(SITE_PACKAGES, SETUP_DATA_FILES)],
)
|
jimkmc/micropython
|
tools/make-frozen.py
|
Python
|
mit
| 1,041
| 0.000961
|
#!/usr/bin/env python3
#
# Create frozen modules structure for MicroPython.
#
# Usage:
#
# Have a directory with modules to be frozen (only modules, not packages
# supported so far):
#
# frozen/foo.py
# frozen/bar.py
#
# Run script, passing path to the directory above:
#
# ./make-frozen.py frozen > frozen.c
#
# Include frozen.c in your build, having defined MICROPY_MODULE_FROZEN in
# config.
#
import sys
import os
def module_n
|
ame(f):
return f[:-len(".py")]
modules = []
for dirpath, dirnames, filenames in os.walk(sys.argv[1]):
for f in filenames:
st = os.stat(dirpath + "/" + f)
modules.append((f, st))
print("#include <stdint.h>")
print("const uint16_t mp_frozen_sizes[] = {")
for f, st in modules:
print("%d," % st.st_siz
|
e)
print("0};")
print("const char mp_frozen_content[] = {")
for f, st in modules:
m = module_name(f)
print('"%s\\0"' % m)
data = open(sys.argv[1] + "/" + f).read()
data = repr(data)[1:-1]
data = data.replace('"', '\\"')
print('"%s"' % data)
print("};")
|
Ingenico-ePayments/connect-sdk-python2
|
ingenico/connect/sdk/meta_data_provider.py
|
Python
|
mit
| 5,912
| 0.001522
|
import platform
from base64 import b64encode
import re
from ingenico.connect.sdk.data_object import DataObject
from ingenico.connect.sdk.defaultimpl.default_marshaller import \
DefaultMarshaller
from ingenico.connect.sdk.domain.metadata.shopping_cart_extension import ShoppingCartExtension
from request_header import RequestHeader
class IterProperty(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
return self.func(owner)
class MetaDataProvider:
"""
Provides meta info about the server.
"""
__SDK_VERSION = "3.30.0"
__SERVER_META_INFO_HEADER = "X-GCS-ServerMetaInfo"
__prohibited_headers = [__SERVER_META_INFO_HEADER, "X-GCS-Idempotence-Key",
"Date", "Content-Type", "Authorization"]
__PROHIBITED_HEADERS = tuple(sorted(__prohibited_headers, key=str.lower))
__meta_data_headers = None
class ServerMetaInfo(DataObject):
platform_identifier = None
sdk_identifier = None
sdk_creator = None
integrator = None
shopping_cart_extension = None
def to_dictionary(self):
dictionary = super(MetaDataProvider.ServerMetaInfo, self).to_dictionary()
if self.platform_identifier is not None:
dictionary['platformIdentifier'] = self.platform_identifier
if self.sdk_identifier is not None:
dictionary['sdkIdentifier'] = self.sdk_identifier
if self.sdk_creator is not None:
dictionary['sdkCreator'] = self.sdk_creator
if self.integrator is not None:
dictionary['integrator'] = self.integrator
if self.shopping_cart_exten
|
sion is not N
|
one:
dictionary['shoppingCartExtension'] = self.shopping_cart_extension.to_dictionary()
return dictionary
def from_dictionary(self, dictionary):
super(MetaDataProvider.ServerMetaInfo, self).from_dictionary(dictionary)
if 'platformIdentifier' in dictionary:
self.platform_identifier = dictionary['platformIdentifier']
if 'sdkIdentifier' in dictionary:
self.sdk_identifier = dictionary['sdkIdentifier']
if 'sdkCreator' in dictionary:
self.sdk_creator = dictionary['sdkCreator']
if 'integrator' in dictionary:
self.integrator = dictionary['integrator']
if 'shoppingCartExtension' in dictionary:
if not isinstance(dictionary['shoppingCartExtension'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['shoppingCartExtension']))
self.shopping_cart_extension = ShoppingCartExtension.create_from_dictionary(dictionary['shoppingCartExtension'])
return self
def __init__(self, integrator, shopping_cart_extension=None,
additional_request_headers=()):
MetaDataProvider.__validate_additional_request_headers(
additional_request_headers)
for i in additional_request_headers:
i.name = re.sub(r'\r?\n(?:(?![\r\n])\s)*', " ", i.name)
i.name = i.name.strip()
i.value = re.sub(r'\r?\n(?:(?![\r\n])\s)*', " ", i.value)
i.value = i.value.strip()
server_meta_info = self.ServerMetaInfo()
server_meta_info.platform_identifier = self._platform_identifier
server_meta_info.sdk_identifier = self._sdk_identifier
server_meta_info.sdk_creator = "Ingenico"
server_meta_info.integrator = integrator
server_meta_info.shopping_cart_extension = shopping_cart_extension
server_meta_info_string = DefaultMarshaller.INSTANCE().marshal(
server_meta_info)
server_meta_info_header = RequestHeader(
self.__SERVER_META_INFO_HEADER, b64encode(
server_meta_info_string.encode('utf-8')))
if not additional_request_headers:
self.__meta_data_headers = tuple([server_meta_info_header])
else:
request_headers = [server_meta_info_header]
request_headers.extend(additional_request_headers)
self.__meta_data_headers = tuple(request_headers)
@staticmethod
def __validate_additional_request_headers(additional_request_headers):
if additional_request_headers is not None:
for additional_request_header in additional_request_headers:
MetaDataProvider.__validate_additional_request_header(
additional_request_header)
@staticmethod
def __validate_additional_request_header(additional_request_header):
try:
if additional_request_header.name in MetaDataProvider.__PROHIBITED_HEADERS:
raise ValueError("request header not allowed: ",
str(additional_request_header))
except AttributeError:
raise AttributeError("Each request header should have an attribute 'name' and an attribute 'value'")
@IterProperty
def prohibited_headers(self):
return self.__PROHIBITED_HEADERS
@property
def meta_data_headers(self):
"""
:return: The server related headers containing the META data to be
associated with the request (if any). This will always contain at least
an automatically generated header X-GCS-ServerMetaInfo.
"""
return self.__meta_data_headers
@property
def _platform_identifier(self):
return platform.system() + " " + platform.release() + "/" + \
platform.version() + " Python/" + platform.python_version() + \
" (" + platform.python_implementation() + "; " + \
str(platform.python_compiler()) + ")"
@property
def _sdk_identifier(self):
return "Python2ServerSDK/v" + self.__SDK_VERSION
|
zeldin/libsigrokdecode
|
decoders/timing/pd.py
|
Python
|
gpl-3.0
| 4,356
| 0.005282
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Torsten Duwe <duwe@suse.de>
## Copyright (C) 2014 Sebastien Bourdelin <sebastien.bourdelin@savoirfairelinux.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
from collections import deque
class SamplerateError(Exception):
pass
def normalize_time(t):
if t >= 1.0:
return '%.3f s (%.3f Hz)' % (t, (1/t))
elif t >= 0.001:
if 1/t/1000 < 1:
return '%.3f ms (%.3f Hz)' % (t * 1000.0, (1/t))
else:
return '%.3f ms (%.3f kHz)' % (t * 1000.0, (1/t)/1000)
elif t >= 0.000001:
if 1/t/1000/1000 < 1:
return '%.3f μs (%.3f kHz)' % (t * 1000.0 * 1000.0, (1/t)/1000)
else:
return '%.3f μs (%.3f MHz)' % (t * 1000.0 * 1000.0, (1/t)/1000/1000)
elif t >= 0.000000001:
if 1/t/1000/1000/1000:
return '%.3f ns (%.3f MHz)' % (t * 1000.0 * 1000.0 * 1000.0, (1/t)/1000/1000)
else:
return '%.3f ns (%.3f GHz)' % (t * 1000.0 * 1000.0 * 1000.0, (1/t)/1000/1000/1000)
else:
return '%f' % t
class Decoder(srd.Decoder):
api_version = 3
id = 'timing'
name = 'Timing'
longname = 'Timing calculation with frequency and averaging'
desc = 'Calculate time between edges.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['timing']
channels = (
{'id': 'data', 'name': 'Data'
|
, 'desc': 'Data line'},
)
annotations = (
('time', 'Time'),
('average', 'Average'),
)
annotation_rows = (
('time', 'Time', (0,)),
('average', 'Average', (1,)),
)
options = (
{ 'id': 'avg_period', 'desc': 'Averaging period', 'default': 100 },
)
def __init__(self):
self.samplerate = None
self.oldpin = None
self.last_samplenum = None
self.last_n = de
|
que()
self.chunks = 0
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.initial_pins = [0]
def decode(self):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
while True:
pin = self.wait({0: 'e'})
if self.oldpin is None:
self.oldpin = pin
self.last_samplenum = self.samplenum
continue
if self.oldpin != pin:
samples = self.samplenum - self.last_samplenum
t = samples / self.samplerate
self.chunks += 1
# Don't insert the first chunk into the averaging as it is
# not complete probably.
if self.last_samplenum is None or self.chunks < 2:
# Report the timing normalized.
self.put(self.last_samplenum, self.samplenum, self.out_ann,
[0, [normalize_time(t)]])
else:
if t > 0:
self.last_n.append(t)
if len(self.last_n) > self.options['avg_period']:
self.last_n.popleft()
# Report the timing normalized.
self.put(self.last_samplenum, self.samplenum, self.out_ann,
[0, [normalize_time(t)]])
self.put(self.last_samplenum, self.samplenum, self.out_ann,
[1, [normalize_time(sum(self.last_n) / len(self.last_n))]])
# Store data for next round.
self.last_samplenum = self.samplenum
self.oldpin = pin
|
creative-workflow/pi-setup
|
lib/piservices/policies.py
|
Python
|
mit
| 867
| 0.023068
|
import sys, os, fabric
class PiServicePolicies:
@staticmethod
def is_local():
return (not fabric.api.env.hosts or fabric.api.env.hosts[0] in ['localhost', '127.0.0.1', '::1'])
@staticmethod
def is_pi():
return os.path.isdir('/home/pi')
@staticmethod
def check_local_or_exit():
if not PiServicePolicies.is_local():
print "...only call
|
able on localhost!!!"
sys.exit(-1)
@staticmethod
def check_remote_or_exit():
if PiServicePolicies.is_local():
print "...only callable on remote host!!!"
sys.exit(-1)
def check_installed_or_exit(self):
if not PiServicePolicies.installed(self):
print "...first you have to install this service! fab pi %s:install"
sys.exit(-1)
def installed(self):
ret = self.file_exists('__init__.py')
if not re
|
t: print self.name+' not installed'
return ret
|
superdyzio/PWR-Stuff
|
AIR-ARR/Projekt Zespołowy/catkin_ws/devel/lib/python2.7/dist-packages/geographic_msgs/srv/_GetGeographicMap.py
|
Python
|
mit
| 33,019
| 0.015385
|
"""autogenerated by genpy from geographic_msgs/GetGeographicMapRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geographic_msgs.msg
class GetGeographicMapRequest(genpy.Message):
_md5sum = "505cc89008cb1745810d2ee4ea646d6e"
_type = "geographic_msgs/GetGeographicMapRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
string url
BoundingBox bounds
================================================================================
MSG: geographic_msgs/BoundingBox
# Geographic map bounding box.
#
# The two GeoPoints denote diagonally opposite corners of the box.
#
# If min_pt.latitude is NaN, the bounding box is "global", matching
# any valid latitude, longitude and altitude.
#
# If min_pt.altitude is NaN, the bounding box is two-dimensional and
# matches any altitude within the specified latitude and longitude
# range.
GeoPoint min_pt # lowest and most Southwestern corner
GeoPoint max_pt # highest and most Northeastern corner
================================================================================
MSG: geographic_msgs/GeoPoint
# Geographic point, using the WGS 84 reference ellipsoid.
# Latitude [degrees]. Positive is north of equator; negative is south
# (-90 <= latitude <= +90).
float64 latitude
# Longitude [degrees]. Positive is east of prime meridian; negative is
# west (-180 <= longitude <= +180). At the poles, latitude is -90 or
# +90, and longitude is irrelevant, but must be in range.
float64 longitude
# Altitude [m]. Positive is above the WGS 84 ellipsoid (NaN if unspecified).
float64 altitude
"""
__slots__ = ['url','bounds']
_slot_types = ['string','geographic_msgs/BoundingBox']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
url,bounds
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetGeographicMapRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.url is None:
self.url = ''
if self.bounds is None:
self.bounds = geographic_msgs.msg.BoundingBox()
else:
self.url = ''
self.bounds = geographic_msgs.msg.BoundingBox()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.url
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_6d.pack(_x.bounds.min_pt.latitude, _x.bounds.min_pt.longitude, _x.bounds.min_pt.altitude, _x.bounds.max_pt.latitude, _x.bounds.max_pt.longitude, _x.bounds.max_pt.altitude))
except struct.error as se:
|
self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x)))
|
)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.bounds is None:
self.bounds = geographic_msgs.msg.BoundingBox()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.url = str[start:end].decode('utf-8')
else:
self.url = str[start:end]
_x = self
start = end
end += 48
(_x.bounds.min_pt.latitude, _x.bounds.min_pt.longitude, _x.bounds.min_pt.altitude, _x.bounds.max_pt.latitude, _x.bounds.max_pt.longitude, _x.bounds.max_pt.altitude,) = _struct_6d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.url
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_6d.pack(_x.bounds.min_pt.latitude, _x.bounds.min_pt.longitude, _x.bounds.min_pt.altitude, _x.bounds.max_pt.latitude, _x.bounds.max_pt.longitude, _x.bounds.max_pt.altitude))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.bounds is None:
self.bounds = geographic_msgs.msg.BoundingBox()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.url = str[start:end].decode('utf-8')
else:
self.url = str[start:end]
_x = self
start = end
end += 48
(_x.bounds.min_pt.latitude, _x.bounds.min_pt.longitude, _x.bounds.min_pt.altitude, _x.bounds.max_pt.latitude, _x.bounds.max_pt.longitude, _x.bounds.max_pt.altitude,) = _struct_6d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_6d = struct.Struct("<6d")
"""autogenerated by genpy from geographic_msgs/GetGeographicMapResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geographic_msgs.msg
import std_msgs.msg
class GetGeographicMapResponse(genpy.Message):
_md5sum = "0910332806c65953a4f4252eb780811a"
_type = "geographic_msgs/GetGeographicMapResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
bool success
string status
GeographicMap map
================================================================================
MSG: geographic_msgs/GeographicMap
# Geographic map for a specified region.
Header header # stamp specifies time
# frame_id (normally /map)
UniqueID id # identifier for this map
BoundingBox bounds # 2D bounding box containing map
WayPoint[] points # way-points
MapFeature[] features # map features
KeyValue[] props # map properties
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is assoc
|
CodeLionX/CommentSearchEngine
|
cse/reader/ArticleMappingReader.py
|
Python
|
mit
| 1,729
| 0.009254
|
import csv
import os
class ArticleMappingReader(object):
def __init__(self, arcticlesFilepath, delimiter=','):
self.__delimiter = delimiter
self.__articlesFilepath = arcticlesFilepath
self.__articlesFile = None
self.__articlesReader = None
self.__currentArticleData = None
def open(self):
if not os.path.exists(os.path.dirname(self.__articlesFilepath)):
raise Exception("article mapping file not found!")
self.__articlesFile = open(self.__articlesFilepath, 'r', newline='', encoding="UTF-8")
self.__articlesReader = csv.reader(self.__articlesFile, delimiter=self.__delimiter
|
)
return self
def close(self):
self.__articlesFile.close()
def currentArticleId(self):
return self.__currentArticleData[0]
def currentArticleUrl(self):
return self.__currentArticleData[1]
def __parseIterRow(self, row):
articleId = int(row[0])
articleUrl = row[1]
return (articleId, articleUrl)
def __iter__(self):
del self.__currentArticleData
|
self.__articlesFile.seek(0)
iter(self.__articlesReader)
# skip csv header in iteration mode:
next(self.__articlesReader)
return self
def __next__(self):
self.__currentArticleData = self.__parseIterRow(next(self.__articlesReader))
return self.__currentArticleData
def __enter__(self):
return self.open()
def __exit__(self, type, value, traceback):
self.close()
if __name__ == "__main__":
with ArticleMappingReader(os.path.join("data", "articleMapping.data")) as reader:
for aid, url in reader:
print(aid, url)
|
mneary1/YGOPricesAPI
|
prices.py
|
Python
|
mit
| 2,551
| 0.000784
|
import requests
class YGOPricesAPI():
def __init__(self):
self.url = "http://yugiohprices.com/api"
def __make_request(self, url):
"""Request a resource from api"""
request = requests.get(url)
if request.status_code != 200:
status_code = request.status_code
reason = request.reason
raise Exception(f'Status code: {status_code} Reason: {
|
reason}')
return request.json()
def get_price_by_name(self, name):
"""Retrieves price data for every version of a card using its name"""
url = f"{self
|
.url}/get_card_prices/{name}"
return self.__make_request(url)
def get_price_by_tag(self, tag, rarity=None):
"""Retrieve price data for a specific version of a card using its print tag"""
if rarity:
url = f"{self.url}/price_history/{tag}?rarity={rarity}"
else:
url = f"{self.url}/price_for_print_tag/{tag}"
return self.__make_request(url)
def get_set_data(self, set_name):
"""Returns rarities and low/high/average prices for each card in the set."""
url = f"{self.url}/set_data/{set_name}"
return self.__make_request(url)
def get_sets(self):
"""Retrieve list of all set names in Yugioh Prices database"""
url = f"{self.url}/card_sets"
return self.__make_request(url)
def get_rising_and_falling(self):
"""Retrieve rising and falling cards list"""
url = f"{self.url}/rising_and_falling"
return self.__make_request(url)
def get_top_100(self, rarity=None):
"""Retrieve Top 100 most expensive cards"""
url = f"{self.url}/top_100_cards"
if rarity:
url = f"{url}?rarity={rarity}"
return self.__make_request(url)
def get_card_names(self):
"""Retrieve all cards name"""
url = f"{self.url}/card_names"
return self.__make_request(url)
def get_card_data(self, name):
"""Retrieve all information for a card using its name"""
url = f"{self.url}/card_data/{name}"
return self.__make_request(url)
def get_card_versions(self, name):
"""Retrieve a list of all known versions of a card using its name"""
url = f"{self.url}/card_versions/{name}"
return self.__make_request(url)
def get_card_support(self, name):
"""Retrieve a list of support cards for a given card using its name"""
url = f"{self.url}/card_support/{name}"
return self.__make_request(url)
|
openstack/compute-hyperv
|
compute_hyperv/nova/imagecache.py
|
Python
|
apache-2.0
| 11,925
| 0
|
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Image caching and management.
"""
import os
import re
from nova.compute import utils as compute_utils
from nova import exception
from nova import utils
from nova.virt import imagecache
from nova.virt import images
from os_win import utilsfactory
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from oslo_utils import uuidutils
from compute_hyperv.i18n import _
import compute_hyperv.nova.conf
from compute_hyperv.nova import pathutils
LOG = logging.getLogger(__name__)
CONF = compute_hyperv.nova.conf.CONF
class ImageCache(imagecache.ImageCacheManager):
def __init__(self):
super(ImageCache, self).__init__()
self._pathutils = pathutils.PathUtils()
self._vhdutils = utilsfactory.get_vhdutils()
self.used_images = []
self.unexplained_images = []
self.originals = []
def _get_root_vhd_size_gb(self, instance):
if instance.old_flavor:
return instance.old_flavor.root_gb
else:
return instance.flavor.root_gb
def _resize_and_cache_vhd(self, instance, vhd_path):
vhd_size = self._vhdutils.get_vhd_size(vhd_path)['VirtualSize']
root_vhd_size_gb = self._get_root_vhd_size_gb(instance)
root_vhd_size = root_vhd_size_gb * units.Gi
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
vhd_path, root_vhd_size))
if root_vhd_internal_size < vhd_size:
raise exception.FlavorDiskSmallerThanImage(
flavor_size=root_vhd_size, image_size=vhd_size)
if root_vhd_internal_size > vhd_size:
path_parts = os.path.splitext(vhd_path)
resized_vhd_path = '%s_%s%s' % (path_parts[0],
root_vhd_size_gb,
path_parts[1])
lock_path = os.path.dirname(resized_vhd_path)
lock_name = "%s-cache.lock" % os.path.basename(resized_vhd_path)
@utils.synchronized(name=lock_name, external=True,
lock_path=lock_path)
def copy_and_resize_vhd():
if not self._pathutils.exists(resized_vhd_path):
try:
LOG.debug("Copying VHD %(vhd_path)s to "
"%(resized_vhd_path)s",
{'vhd_path': vhd_path,
'resized_vhd_path': resized_vhd_path})
self._pathutils.copyfile(vhd_path, resized_vhd_path)
LOG.debug("Resizing VHD %(resized_vhd_path)s to new "
"size %(root_vhd_size)s",
{'resized_vhd_path': resized_vhd_path,
'root_vhd_size': root_vhd_size})
self._vhdutils.resize_vhd(resized_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(resized_vhd_path):
self._pathutils.remove(resized_vhd_path)
copy_and_resize_vhd()
return resized_vhd_path
def get_cached_image(self, context, instance, rescue_image_id=None):
image_id = rescue_image_id or instance.image_ref
image_type = self.get_image_format(context, image_id, instance)
trusted_certs = instance.trusted_certs
image_path, already_exists = self.cache_image(
context, image_id, image_type, trusted_certs)
# Note: rescue images are not resized.
is_vhd = image_path.split('.')[-1].lower() == 'vhd'
if (CONF.use_cow_images and is_vhd and not rescue_image_id):
# Resize the base VHD image as it's not possible to resize a
# differencing VHD. This does not apply to VHDX images.
resized_image_path = self._resize_and_cache_vhd(instance,
image_path)
if resized_image_path:
return resized_image_path
if rescue_image_id:
self._verify_rescue_image(instance, rescue_image_id, image_path)
return image_path
def fetch(self, context, image_id, path, trusted_certs=None):
with compute_utils.disk_ops_semaphore:
images.fetch(context, image_id, path, trusted_certs)
def append_image_format(self, path, image_type, do_rename=True):
if image_type == 'iso':
format_ext = 'iso'
else:
# Historically, the Hyper-V driver allowed VHDX images registered
# as VHD. We'll continue to do so for now.
format_ext = self._vhdutils.get_vhd_format(path)
new_path = path + '.' + format_ext.lower()
if do_rename:
self._pathutils.rename(path, new_path)
return new_path
def get_image_format(self, context, image_id, instance=None):
image_format = None
if instance:
image_format = instance.system_metadata['image_disk_format']
if not image_format:
image_info = images.get_info(context, image_id)
image_format = image_info['disk_format']
return image_format
def cache_image(self, context, image_id,
image_type=None, trusted_certs=None):
if not image_type:
image_type = self.get_image_format(context, image_id)
base_image_dir = self._pathutils.get_base_vhd_dir()
base_image_path = os.path.join(base_image_dir, image_id)
lock_name = "%s-cache.lock" % image_id
@utils.synchronized(name=lock_name, external=T
|
rue,
lock_path=base_image_dir)
def fetch_image_if_not_existing():
fetched = False
image_path = No
|
ne
for format_ext in ['vhd', 'vhdx', 'iso']:
test_path = base_image_path + '.' + format_ext
if self._pathutils.exists(test_path):
image_path = test_path
self._update_image_timestamp(image_id)
break
if not image_path:
try:
self.fetch(context, image_id, base_image_path,
trusted_certs)
fetched = True
image_path = self.append_image_format(
base_image_path, image_type)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_image_path):
self._pathutils.remove(base_image_path)
return image_path, fetched
return fetch_image_if_not_existing()
def _verify_rescue_image(self, instance, rescue_image_id,
rescue_image_path):
rescue_image_info = self._vhdutils.get_vhd_info(rescue_image_path)
rescue_image_size = rescue_image_info['VirtualSize']
flavor_disk_size = instance.flavor.root_gb * units.Gi
if rescue_image_size > flavor_disk_size:
err_msg = _('Using a rescue image bigger than the instance '
'flavor disk size is not allowed. '
'Rescue image
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-logic/azure/mgmt/logic/models/integration_account_map_paged.py
|
Python
|
mit
| 926
| 0.00108
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class IntegrationAccountMapPaged(Paged):
"""
A paging container for iterating over a list of
|
IntegrationAccountMap object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Int
|
egrationAccountMap]'}
}
def __init__(self, *args, **kwargs):
super(IntegrationAccountMapPaged, self).__init__(*args, **kwargs)
|
buchwj/xvector
|
client/xVClient/ui/LoginWidgetUI.py
|
Python
|
gpl-3.0
| 11,829
| 0.004734
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'LoginWidget.ui'
#
# Created: Wed Jul 13 22:46:23 2011
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_LoginWidget(object):
def setupUi(self, LoginWidget):
LoginWidget.setObjectName(_fromUtf8("LoginWidget"))
LoginWidget.resize(299, 342)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.s
|
etHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(LoginWidget.sizePolicy().hasHeightForWidth(
|
))
LoginWidget.setSizePolicy(sizePolicy)
self.verticalLayout = QtGui.QVBoxLayout(LoginWidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.formTabs = QtGui.QTabWidget(LoginWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.formTabs.sizePolicy().hasHeightForWidth())
self.formTabs.setSizePolicy(sizePolicy)
self.formTabs.setObjectName(_fromUtf8("formTabs"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.tab)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.formLayout = QtGui.QFormLayout()
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.lblUsername = QtGui.QLabel(self.tab)
self.lblUsername.setObjectName(_fromUtf8("lblUsername"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.lblUsername)
self.txtUsername = QtGui.QLineEdit(self.tab)
self.txtUsername.setObjectName(_fromUtf8("txtUsername"))
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.txtUsername)
self.lblPassword = QtGui.QLabel(self.tab)
self.lblPassword.setObjectName(_fromUtf8("lblPassword"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.lblPassword)
self.txtPassword = QtGui.QLineEdit(self.tab)
self.txtPassword.setEchoMode(QtGui.QLineEdit.Password)
self.txtPassword.setObjectName(_fromUtf8("txtPassword"))
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.txtPassword)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btnLogin = QtGui.QPushButton(self.tab)
self.btnLogin.setObjectName(_fromUtf8("btnLogin"))
self.horizontalLayout.addWidget(self.btnLogin)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.formLayout.setLayout(2, QtGui.QFormLayout.SpanningRole, self.horizontalLayout)
self.verticalLayout_2.addLayout(self.formLayout)
self.grpServerNews = QtGui.QGroupBox(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.grpServerNews.sizePolicy().hasHeightForWidth())
self.grpServerNews.setSizePolicy(sizePolicy)
self.grpServerNews.setObjectName(_fromUtf8("grpServerNews"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.grpServerNews)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.scrServerNews = QtGui.QScrollArea(self.grpServerNews)
self.scrServerNews.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.scrServerNews.setWidgetResizable(True)
self.scrServerNews.setObjectName(_fromUtf8("scrServerNews"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 255, 157))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.scrServerNews.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout_4.addWidget(self.scrServerNews)
self.verticalLayout_2.addWidget(self.grpServerNews)
self.formTabs.addTab(self.tab, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.tab_2)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.formLayout_2 = QtGui.QFormLayout()
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.lblRegUsername = QtGui.QLabel(self.tab_2)
self.lblRegUsername.setObjectName(_fromUtf8("lblRegUsername"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.lblRegUsername)
self.txtRegUsername = QtGui.QLineEdit(self.tab_2)
self.txtRegUsername.setObjectName(_fromUtf8("txtRegUsername"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.txtRegUsername)
self.lblRegPassword = QtGui.QLabel(self.tab_2)
self.lblRegPassword.setObjectName(_fromUtf8("lblRegPassword"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.lblRegPassword)
self.txtRegPassword = QtGui.QLineEdit(self.tab_2)
self.txtRegPassword.setEchoMode(QtGui.QLineEdit.Password)
self.txtRegPassword.setObjectName(_fromUtf8("txtRegPassword"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.txtRegPassword)
self.lblRegConfPwd = QtGui.QLabel(self.tab_2)
self.lblRegConfPwd.setObjectName(_fromUtf8("lblRegConfPwd"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.lblRegConfPwd)
self.txtRegConfPwd = QtGui.QLineEdit(self.tab_2)
self.txtRegConfPwd.setEchoMode(QtGui.QLineEdit.Password)
self.txtRegConfPwd.setObjectName(_fromUtf8("txtRegConfPwd"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.txtRegConfPwd)
self.lblRegEmail = QtGui.QLabel(self.tab_2)
self.lblRegEmail.setObjectName(_fromUtf8("lblRegEmail"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.LabelRole, self.lblRegEmail)
self.txtRegEmail = QtGui.QLineEdit(self.tab_2)
self.txtRegEmail.setObjectName(_fromUtf8("txtRegEmail"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.FieldRole, self.txtRegEmail)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.btnRegister = QtGui.QPushButton(self.tab_2)
self.btnRegister.setObjectName(_fromUtf8("btnRegister"))
self.horizontalLayout_2.addWidget(self.btnRegister)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem3)
self.formLayout_2.setLayout(4, QtGui.QFormLayout.SpanningRole, self.horizontalLayout_2)
self.verticalLayout_3.addLayout(self.formLayout_2)
self.lblRegHint = QtGui.QLabel(self.tab_2)
self.lblRegHint.setFrameShape(QtGui.QFrame.Box)
self.lblRegHint.setFrameShadow(QtGui.QFrame.Raised)
self.lblRegHint.setMidLineWidth(0)
self.lblRegHint.setText(_fromUtf8(""))
self.lblRegHint.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.lblRegHint.setObjectName(_fromUtf8("lblRegHint"))
self.v
|
kvar/ansible
|
lib/ansible/modules/network/fortios/fortios_system_replacemsg_traffic_quota.py
|
Python
|
gpl-3.0
| 10,207
| 0.001568
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_replacemsg_traffic_quota
short_description: Replacement messages in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system_replacemsg feature and traffic_quota category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_replacemsg_traffic_quota:
description:
- Replacement messages.
default: null
type: dict
suboptions:
buffer:
description:
- Message string.
type: str
format:
description:
- Format flag.
type: str
choices:
- none
- text
- html
- wml
header:
description:
- Header flag.
type: str
choices:
- none
- http
- 8bit
msg_type:
description:
- Message type.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Replacement messages.
fortios_system_replacemsg_traffic_quota:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_replacemsg_traffic_quota:
buffer: "<your_own_value>"
format: "none"
header: "none"
msg_type: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
|
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_replacemsg_traffic_quota_data(json):
option_list = ['buffer', 'format', 'header',
'msg_type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
ret
|
urn dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_replacemsg_traffic_quota(data, fos):
vdom = data['vdom']
state = data['state']
system_replacemsg_traffic_quota_data = data['system_replacemsg_traffic_quota']
filtered_data = underscore_to_hyphen(filter_system_replacemsg_traffic_quota_data(system_replacemsg_traffic_quota_data))
if state == "present":
return fos.set('system.replacemsg',
'traffic-quota',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system.replacemsg',
'traffic-quota',
mkey=filtered_data['msg-type'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system_replacemsg(data, fos):
if data['system_replacemsg_traffic_quota']:
resp = system_replacemsg_traffic_quota(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_
|
thierry1985/project-1022
|
domains/driverlogshift/readsetting.py
|
Python
|
mit
| 965
| 0.048705
|
#!/usr/bin/python
import re,os
def compare_file(x,y):
dr = re.compile(r"[\S]+?([0-9]+)[\S]+")
orderx = -1
ordery = -1
m = dr.match(x)
if m:
orderx = int(m.group(1))
m = dr.match(y)
if m:
ordery = int(m.group(1))
if orderx == -1 or ordery== -1:
return 0
if orderx>ordery:
return 1
elif orderx==ordery:
return 0
else:
return -1
file_list = []
dd = re.compile(r"p[\S]+?pddl$")
for f in os.listdir("./"):
if f == "readsoln.py":
continue;
m = dd.match(f)
if m:
file_list.append( f )
file_list.sort(compare_file)
index = 1
for f in file_list:
file = open(f,"r")
for line in file.readlines():
if "-" not in line:
continue
t = line.split("-")[1]
t
|
.strip()
l = line.split("-")[0]
l.strip()
if "truck" in t:
|
print "truck ", l.count("truck"),
elif "driver" in t:
print "driver", l.count("driver"),
elif "obj" in t:
print "Object ", l.count("package"),
print "--- ",f
|
guilhermefloriani/signature-recognition
|
network.py
|
Python
|
mit
| 4,512
| 0.005319
|
import numpy as np
import random
class NeuralNetwork():
def __init__(self, sizes):
# sizes is an array with the number of units in each layer
# [2,3,1] means w neurons of input, 3 in the hidden layer and 1 as output
self.num_layers = len(sizes)
self.sizes = sizes
# the syntax [1:] gets all elements of sizes array beginning at index 1 (second position)
# np,random.randn(rows, cols) retuns a matrix of random elements
# np.random.randn(2,1) =>
# array([[ 0.68265325],
# [-0.52939261]])
# biases will have one vector per layer
self.biases = [np.random.randn(y,1) for y in sizes[1:]]
#zip returns a tuple in which x is the element of the first array and y the element of the second
#sizes[:-1] returns all the elements till the second to last
#sizes[1:] returns all the elements from the second and on]
# [2,3,1] means:
# * matrix of 3 rows and 2 columns -- will be multiplied by the inputs
# * matrix of 1 row and 3 columns -- will multiply the hidden layer and produce the output
self.weights = [np.random.randn(y,x) for x,y in zip(sizes[:-1],sizes[1:])]
def feedforward(self, a):
for b,w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a) + b)
return a
def separate_batches(self, training_data, batch_size):
random.shuffle(training_data)
n = len(training_data)
# extracts chunks of data from the training set
# the xrange function will return indices starting with 0 untill n, with a step size o batch_size
# batches, then, will have several chunks of the main set, each defined by the batch_siz
|
e_variable
return [training_data[i:i + batch_size] for i in r
|
ange(0, n, batch_size)]
def update_batches(self, batches, alpha):
for batch in batches:
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
m = len(batch)
# x is a array of length 901
# y is a single value indicating the digit represented by the 901 elements
for x, y in batch:
delta_b, delta_w = self.backpropagation(x, y)
nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_b)]
nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_w)]
self.weights = [w - (alpha / m) * nw for w, nw in zip(self.weights, nabla_w)]
self.biases = [b - (alpha / m) * nb for b, nb in zip(self.biases, nabla_b)]
def backpropagation(self, x, y):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
activation = x
activations = [x]
zs = []
for b, w in zip(self.biases, self.weights):
# layer-bound b and w
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
for l in range(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def sgd(self, training_data, epochs, batch_size, alpha, test_data):
n_test = len(test_data)
for epoch in range(epochs):
batches = self.separate_batches(training_data, batch_size)
self.update_batches(batches, alpha)
print("Epoch {0}: {1} / {2}".format(epoch, self.evaluate(test_data), n_test))
def evaluate(self, test_data):
#r = [self.feedforward(x) for (x, y) in test_data]
#for a in r:
# print("{0}, {1}".format(format(a[0][0], 'f'), format(a[1][0], 'f')))
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
return output_activations - y
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
def sigmoid_prime(z):
return sigmoid(z) * (1-sigmoid(z))
|
ASCIT/donut-python
|
donut/modules/calendar/permissions.py
|
Python
|
mit
| 251
| 0
|
import enum
class calendar_permissions(enum.IntEnum):
ASCIT = 21
AVERY = 22
BECHTEL = 23
BLACKER = 24
DABNEY = 25
FLEMING = 2
|
6
LLOYD = 27
PAGE = 28
RICKETTS = 29
RUDDOCK = 30
OTHER = 31
|
ATHLETICS = 32
|
selimnairb/2014-02-25-swctest
|
lessons/thw-python-debugging/basic_exceptions/syntax_error.py
|
Python
|
bsd-2-clause
| 223
| 0.008969
|
#!/usr/bin/env python
"""
SyntaxError -
|
There's something wrong with how you wrote the surrounding code.
Check your parentheses, and make sure there are colons where nee
|
ded.
"""
while True
print "Where's the colon at?"
|
rahulunair/nova
|
nova/tests/functional/api_sample_tests/test_instance_actions.py
|
Python
|
apache-2.0
| 5,930
| 0
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api_sample_tests import test_servers
from nova.tests.functional import api_samples_test_base
class ServerActionsSampleJsonTest(test_servers.ServersSampleBase):
microversion = None
ADMIN_API = True
sample_dir = 'os-instance-actions'
def setUp(self):
super(ServerActionsSampleJsonTest, self).setUp()
# Create and stop a server
self.uuid = self._post_server()
self._get_response('servers/%s/action' % self.uuid, 'POST',
'{"os-stop": null}')
response = self._do_get('servers/%s/os-instance-actions' % self.uuid)
response_data = api_samples_test_base.pretty_data(response.content)
actions = api_samples_test_base.objectify(response_data)
self.action_stop = actions['instanceActions'][0]
self._wait_for_state_change({'id': self.uuid}, 'SHUTOFF')
def _get_subs(self):
return {
'uuid': self.uuid,
'project_id': self.action_stop['project_id']
}
def test_instance_action_get(self):
req_id = self.action_stop['request_id']
response = self._do_get('servers/%s/os-instance-actions/%s' %
(self.uuid, req_id))
# Non-admins can see event details except for the "traceback" field
# starting in the 2.51 microversion.
if self.ADMIN_API:
name = 'instance-action-get-resp'
else:
name = 'instance-action-get-non-admin-resp'
self._verify_response(name, self._get_subs(), response, 200)
def test_instance_actions_list(self):
response = self._do_get('servers/%s/os-instance-actions' % self.uuid)
self._verify_response('instance-actions-list-resp', self._get_subs(),
response, 200)
class ServerActionsV221SampleJsonTest(ServerActionsSampleJsonTest):
microversion = '2.21'
scenarios = [('v2_21', {'api_major_version': 'v2.1'})]
class ServerActionsV251AdminSampleJsonTest(ServerActionsSampleJsonTest):
"""Tests the 2.51 microversion for the os-instance-actions API.
The 2.51 microversion allows non-admins to see instance action event
details *except* for the traceback field.
The tests in this class are run as an admin user so all fields will be
displayed.
"""
microversion = '2.51'
scenarios = [('v2_51', {'api_major
|
_version': 'v2.1'})]
class ServerActionsV251NonAdminSampleJsonTest(ServerAc
|
tionsSampleJsonTest):
"""Tests the 2.51 microversion for the os-instance-actions API.
The 2.51 microversion allows non-admins to see instance action event
details *except* for the traceback field.
The tests in this class are run as a non-admin user so all fields except
for the ``traceback`` field will be displayed.
"""
ADMIN_API = False
microversion = '2.51'
scenarios = [('v2_51', {'api_major_version': 'v2.1'})]
class ServerActionsV258SampleJsonTest(ServerActionsV251AdminSampleJsonTest):
microversion = '2.58'
scenarios = [('v2_58', {'api_major_version': 'v2.1'})]
def test_instance_actions_list_with_limit(self):
response = self._do_get('servers/%s/os-instance-actions'
'?limit=1' % self.uuid)
self._verify_response('instance-actions-list-with-limit-resp',
self._get_subs(), response, 200)
def test_instance_actions_list_with_marker(self):
marker = self.action_stop['request_id']
response = self._do_get('servers/%s/os-instance-actions'
'?marker=%s' % (self.uuid, marker))
self._verify_response('instance-actions-list-with-marker-resp',
self._get_subs(), response, 200)
def test_instance_actions_with_changes_since(self):
stop_action_time = self.action_stop['start_time']
response = self._do_get(
'servers/%s/os-instance-actions'
'?changes-since=%s' % (self.uuid, stop_action_time))
self._verify_response(
'instance-actions-list-with-changes-since',
self._get_subs(), response, 200)
class ServerActionsV258NonAdminSampleJsonTest(ServerActionsV258SampleJsonTest):
ADMIN_API = False
class ServerActionsV262SampleJsonTest(ServerActionsV258SampleJsonTest):
microversion = '2.62'
scenarios = [('v2_62', {'api_major_version': 'v2.1'})]
def _get_subs(self):
return {
'uuid': self.uuid,
'project_id': self.action_stop['project_id'],
'event_host': r'\w+',
'event_hostId': '[a-f0-9]+'
}
class ServerActionsV262NonAdminSampleJsonTest(ServerActionsV262SampleJsonTest):
ADMIN_API = False
class ServerActionsV266SampleJsonTest(ServerActionsV262SampleJsonTest):
microversion = '2.66'
scenarios = [('v2_66', {'api_major_version': 'v2.1'})]
def test_instance_actions_with_changes_before(self):
stop_action_time = self.action_stop['updated_at']
response = self._do_get(
'servers/%s/os-instance-actions'
'?changes-before=%s' % (self.uuid, stop_action_time))
self._verify_response(
'instance-actions-list-with-changes-before',
self._get_subs(), response, 200)
|
nickraptis/fidibot
|
src/modules/help.py
|
Python
|
bsd-2-clause
| 2,615
| 0.000765
|
# Author: Nick Raptis <airscorp@gmail.com>
"""
Module for listing commands and help.
"""
from basemodule import BaseModule, BaseCommandContext
from alternatives import _
class HelpContext(BaseCommandContext):
def cmd_list(self, argument):
"""List commands"""
arg = argument.lower()
index = self.bot.help_index
public = "public commands -- %s" % " ".join(index['public'])
private = "private commands -- %s" % " ".join(index['private'])
if 'all' in arg or 'both' in arg:
output = "\n".join((public, private))
elif 'pub' in arg or self.target.startswith('#'):
output = public
elif 'priv' in arg or not self.target.startswith('#'):
output = private
else:
# we shouldn't be here
self.logger.error("cmd_list")
return
self.send(self.target, output)
def cmd_modules(self, argument):
"""List active modules"""
index = self.bot.help_index
output = "active modules -- %s" % " ".join(index['modules'].keys())
self.send(self.target, output)
def cmd_help(self, argument):
"""Get help on a command or module"""
arg = argument.lower()
index = self.bot.help_index
target = self.target
args = arg.split()
if not args:
s = "usage: help <command> [public|private] / help module <module>"
self.send(target, s)
elif args[0] == 'module':
args.pop(0)
if not args:
self.send(target, "usage: help module <module>")
else:
help_item = index['modules'].get(args[0])
if help_item:
self.send(target, help_item['summary'])
else:
se
|
lf.send(target, _("No help for %s"), args[0])
else:
args.append("")
cmd = args.pop(0)
|
cmd_type = args.pop(0)
if 'pu' in cmd_type or self.target.startswith('#'):
cmd_type = 'public'
elif 'pr' in cmd_type or not self.target.startswith('#'):
cmd_type = 'private'
else:
# we shouldn't be here
self.logger.error("cmd_list")
return
help_item = index[cmd_type].get(cmd)
if help_item:
self.send(target, index[cmd_type][cmd]['summary'])
else:
self.send(target, _("No help for %s"), cmd)
class HelpModule(BaseModule):
context_class = HelpContext
module = HelpModule
|
stefwalter/cockpit
|
test/common/cdp.py
|
Python
|
lgpl-2.1
| 9,003
| 0.002777
|
# -*- coding: utf-8 -*-
import fcntl
import glob
import json
import os
import random
import shutil
import socket
import subprocess
import sys
import tempfile
import time
TEST_DIR = os.path.normpath(os.path.dirname(os.path.realpath(os.path.join(__file__, ".."))))
def browser_path(headless=True):
"""Return path to CDP browser.
Support the following locations:
- /usr/lib*/chromium-browser/headless_shell (chromium-headless RPM), if
headless is true
- "chromium-browser", "chromium", or "google-chrome" in $PATH (distro package)
- node_modules/chromium/lib/chromium/chrome-linux/chrome (npm install chromium)
Exit with an error if none is found.
"""
if headless:
g = glob.glob("/usr/lib*/chromium-browser/headless_shell")
if g:
return g[0]
p = subproces
|
s.check_output("which chromium-browser || which chromium || which google-chrome || true", shell=True).strip()
if p:
return p
p = os.path.join(os.path.dirname(TEST_DIR), "node_modules/chromium/lib/chromium/chrome-linux/chrome")
if os.access(p, os.X_OK):
return p
return None
def jsquote(str):
return json.dumps(str)
class CDP:
def __init__(self, lang=None, headless=True, verbose=False, trace=False, inject_h
|
elpers=[]):
self.lang = lang
self.timeout = 60
self.valid = False
self.headless = headless
self.verbose = verbose
self.trace = trace
self.inject_helpers = inject_helpers
self._driver = None
self._browser = None
self._browser_home = None
self._browser_path = None
self._cdp_port_lockfile = None
def invoke(self, fn, **kwargs):
"""Call a particular CDP method such as Runtime.evaluate
Use command() for arbitrary JS code.
"""
trace = self.trace and not kwargs.get("no_trace", False)
try:
del kwargs["no_trace"]
except KeyError:
pass
cmd = fn + "(" + json.dumps(kwargs) + ")"
# frame support for Runtime.evaluate(): map frame name to
# executionContextId and insert into argument object; this must not be quoted
# see "Frame tracking" in cdp-driver.js for how this works
if fn == 'Runtime.evaluate' and self.cur_frame:
cmd = "%s, contextId: getFrameExecId(%s)%s" % (cmd[:-2], jsquote(self.cur_frame), cmd[-2:])
if trace:
print("-> " + kwargs.get('trace', cmd))
# avoid having to write the "client." prefix everywhere
cmd = "client." + cmd
res = self.command(cmd)
if trace:
if "result" in res:
print("<- " + repr(res["result"]))
else:
print("<- " + repr(res))
return res
def command(self, cmd):
if not self._driver:
self.start()
self._driver.stdin.write(cmd + "\n")
line = self._driver.stdout.readline()
if not line:
self.kill()
raise RuntimeError("CDP broken")
try:
res = json.loads(line)
except ValueError:
print(line.strip())
raise
if "error" in res:
if self.trace:
print("<- raise %s" % str(res["error"]))
raise RuntimeError(res["error"])
return res["result"]
def claim_port(self, port):
f = None
try:
f = open(os.path.join(tempfile.gettempdir(), ".cdp-%i.lock" % port), "w")
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
self._cdp_port_lockfile = f
return True
except (IOError, OSError):
if f:
f.close()
return False
def find_cdp_port(self):
"""Find an unused port and claim it through lock file"""
for retry in range(100):
# don't use the default CDP port 9222 to avoid interfering with running browsers
port = random.randint (9223, 10222)
if self.claim_port(port):
return port
else:
raise RuntimeError("unable to find free port")
def get_browser_path(self):
if self._browser_path is None:
self._browser_path = browser_path(self.headless)
return self._browser_path
def start(self):
environ = os.environ.copy()
if self.lang:
environ["LC_ALL"] = self.lang
self.cur_frame = None
# allow attaching to external browser
cdp_port = None
if "TEST_CDP_PORT" in os.environ:
p = int(os.environ["TEST_CDP_PORT"])
if self.claim_port(p):
# can fail when a test starts multiple browsers; only show the first one
cdp_port = p
if not cdp_port:
# start browser on a new port
cdp_port = self.find_cdp_port()
self._browser_home = tempfile.mkdtemp()
environ = os.environ.copy()
environ["HOME"] = self._browser_home
environ["LC_ALL"] = "C.utf8"
# this might be set for the tests themselves, but we must isolate caching between tests
try:
del environ["XDG_CACHE_HOME"]
except KeyError:
pass
exe = self.get_browser_path()
if not exe:
raise SystemError("chromium not installed")
if self.headless:
argv = [exe, "--headless"]
else:
argv = [os.path.join(TEST_DIR, "common/xvfb-wrapper"), exe]
# sandboxing does not work in Docker container
self._browser = subprocess.Popen(
argv + ["--disable-gpu", "--no-sandbox", "--disable-setuid-sandbox",
"--disable-namespace-sandbox", "--disable-seccomp-filter-sandbox",
"--disable-sandbox-denial-logging", "--window-size=1280x1200",
"--remote-debugging-port=%i" % cdp_port, "about:blank"],
env=environ, close_fds=True)
if self.verbose:
sys.stderr.write("Started %s (pid %i) on port %i\n" % (exe, self._browser.pid, cdp_port))
# wait for CDP to be up
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for retry in range(300):
try:
s.connect(('127.0.0.1', cdp_port))
break
except socket.error:
time.sleep(0.1)
else:
raise RuntimeError('timed out waiting for browser to start')
# now start the driver
if self.trace:
# enable frame/execution context debugging if tracing is on
environ["TEST_CDP_DEBUG"] = "1"
self._driver = subprocess.Popen(["%s/cdp-driver.js" % os.path.dirname(__file__), str(cdp_port)],
env=environ,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
close_fds=True)
self.valid = True
for inject in self.inject_helpers:
with open(inject) as f:
src = f.read()
# HACK: injecting sizzle fails on missing `document` in assert()
src = src.replace('function assert( fn ) {', 'function assert( fn ) { return true;')
self.invoke("Page.addScriptToEvaluateOnLoad", scriptSource=src, no_trace=True)
def kill(self):
self.valid = False
self.cur_frame = None
if self._driver:
self._driver.stdin.close()
self._driver.wait()
self._driver = None
if self._browser:
if self.verbose:
sys.stderr.write("Killing browser (pid %i)\n" % self._browser.pid)
try:
self._browser.terminate()
except OSError:
pass # ignore if it crashed for some reason
self._browser.wait()
self._browser = None
shutil.rmtree(self._browser_home, ignore_errors=True)
os.remove(self._cdp_port_lockfile.name)
|
vahtras/loprop
|
tests/test_bond.py
|
Python
|
gpl-3.0
| 7,956
| 0.005279
|
import numpy as np
STR_NOBOND = """AU
3 1 2 1
1 0.00000000 0.00000000 0.00000000 -0.66387672 0.00000000 -0.00000000 0.34509720 3.78326969 -0.00000000 -0.00000000 3.96610412 0.00000000 3.52668267 0.00000000 -0.00000000 -2.98430053 0.00000000 -0.00000000 0.00000000 -0.00000000 1.26744725 -0.00000000 2.16730601
1 1.43043000 0.00000000 1.10716000 0.33193836 -0.16057903 -0.00000000 -0.11299312 1.55235099 -0.00000000 1.15495299 0.60859677 -0.00000000 1.21104235 -4.46820475 0.00000000 -4.55909022 -0.05601735 0.00000000 -3.72029878 -0.00000000 0.46039909 -0.00000000 -2.40410436
1 -1.43043000 0.00000000 1.10716000 0.33193836 0.16057903 -0.00000000 -0.11299312 1.55235099 -0.00000000 -1.15495299 0.60859677 0.00000000 1.21104235 4.46820475 -0.00000000 -4.55909022 0.05601735 0.00000000 3.72029878 -0.00000000 0.46039909 -0.00000000 -2.40410436
Time used in Loprop
|
: 0.45 (cpu) 0.11 (wall)
"""
STR_BOND = """AU
5 1 22 1
1 0.00
|
000000 0.00000000 0.00000000 -0.66387672 0.00000000 -0.00000000 0.41788500 1.19165567 0.00000000 0.00000000 2.74891057 0.00000000 1.33653383 0.00000000 0.00000000 4.18425484 0.00000000 -0.00000000 -0.00000000 -0.00000000 0.19037387 0.00000000 5.96033807
1 0.71521500 0.00000000 0.55358000 0.00000000 -0.06567795 -0.00000000 -0.07278780 2.59161403 -0.00000000 1.21719355 1.98015668 -0.00000000 2.19014883 -7.24839104 0.00000000 -7.16855538 0.59534043 0.00000000 -5.74640170 -0.00000000 1.07707338 -0.00000000 -3.79303206
1 1.43043000 0.00000000 1.10716000 0.33193836 -0.12774005 0.00000000 -0.07659922 0.25654398 0.00000000 0.16487465 -0.00000000 -0.00000000 0.11596794 -0.84400923 0.00000000 -0.97481253 -0.35368757 -0.00000000 -0.84709793 0.00000000 -0.07813759 0.00000000 -0.50758833
1 -0.71521500 0.00000000 0.55358000 0.00000000 0.06567795 -0.00000000 -0.07278780 2.59161403 -0.00000000 1.21719355 -1.98015668 0.00000000 2.19014883 7.24839104 -0.00000000 -7.16855538 -0.59534043 0.00000000 5.74640170 -0.00000000 1.07707338 -0.00000000 -3.79303206
1 -1.43043000 0.00000000 1.10716000 0.33193836 0.12774005 0.00000000 -0.07659922 0.25654398 -0.00000000 -0.16487465 0.00000000 0.00000000 0.11596794 0.84400923 -0.00000000 -0.97481253 0.35368757 0.00000000 0.84709793 -0.00000000 -0.07813759 -0.00000000 -0.50758833
Time used in Loprop : 0.45 (cpu) 0.11 (wall)
"""
class TestBondH2O:
"""H2O tests bonded versus non-bonden results"""
def setup(self):
# Read in string that is for no bonds output
lines = [line for line in STR_BOND.split("\n") if len(line.split()) > 10]
a0 = 1.0
self.n_bond = np.array([8.0, 0.0, 1.0, 0.0, 1.0], dtype=float)
self.r_bond = a0 * np.array([l.split()[1:4] for l in lines], dtype=float)
self.q_bond = np.array([l.split()[4] for l in lines], dtype=float)
self.d_bond = np.array([l.split()[5:8] for l in lines], dtype=float)
self.a_bond = np.array([l.split()[8:15] for l in lines], dtype=float)
self.b_bond = np.array([l.split()[15:26] for l in lines], dtype=float)
self.coc_bond = np.einsum("ij,i", self.r_bond, self.n_bond) / self.n_bond.sum()
# Read in string that is for bonds output -b
lines = [line for line in STR_NOBOND.split("\n") if len(line.split()) > 10]
self.n_nobond = np.array([8.0, 1.0, 1.0], dtype=float)
self.r_nobond = a0 * np.array([l.split()[1:4] for l in lines], dtype=float)
self.q_nobond = np.array([l.split()[4] for l in lines], dtype=float)
self.d_nobond = np.array([l.split()[5:8] for l in lines], dtype=float)
self.a_nobond = np.array([l.split()[8:15] for l in lines], dtype=float)
self.b_nobond = np.array([l.split()[15:26] for l in lines], dtype=float)
self.coc_nobond = (
np.einsum("ij,i", self.r_nobond, self.n_nobond) / self.n_nobond.sum()
)
def test_bond_nobond_properties(self):
"""Center-of-charge equality"""
np.testing.assert_allclose(self.coc_bond, self.coc_nobond)
def test_a(self):
"""Polarizability equality"""
a_tot_bond = np.sum(self.a_bond)
a_tot_nobond = np.sum(self.a_nobond)
np.testing.assert_allclose(a_tot_bond, a_tot_nobond)
def test_b(self):
"""Hyperpolarizability equality"""
b_tot_bond = np.sum(self.b_bond)
b_tot_nobond = np.sum(self.b_nobond)
np.testing.assert_allclose(b_tot_bond, b_tot_nobond)
def test_dip(self):
"""Dipole equality"""
dip_bond = np.einsum(
"ij,i", (self.r_bond - self.coc_bond), self.q_bond
) + self.d_bond.sum(axis=0)
dip_nobond = np.einsum(
"ij,i", (self.r_nobond - self.coc_nobond), self.q_nobond
) + self.d_nobond.sum(axis=0)
np.testing.assert_allclose(dip_bond, dip_nobond)
class TestBondH2S:
"""H2O tests bonded versus non-bonden results"""
def setup(self):
# Read in string that is for no bonds output
lines = [line for line in STR_BOND.split("\n") if len(line.split()) > 10]
a0 = 1.0
self.n_bond = np.array([16.0, 0.0, 1.0, 0.0, 1.0], dtype=float)
self.r_bond = a0 * np.array([l.split()[1:4] for l in lines], dtype=float)
self.q_bond = np.array([l.split()[4] for l in lines], dtype=float)
self.d_bond = np.array([l.split()[5:8] for l in lines], dtype=float)
self.a_bond = np.array([l.split()[8:15] for l in lines], dtype=float)
self.b_bond = np.array([l.split()[15:26] for l in lines], dtype=float)
self.coc_bond = np.einsum("ij,i", self.r_bond, self.n_bond) / self.n_bond.sum()
# Read in string that is for bonds output -b
lines = [line for line in STR_NOBOND.split("\n") if len(line.split()) > 10]
self.n_nobond = np.array([16.0, 1.0, 1.0], dtype=float)
self.r_nobond = a0 * np.array([l.split()[1:4] for l in lines], dtype=float)
self.q_nobond = np.array([l.split()[4] for l in lines], dtype=float)
self.d_nobond = np.array([l.split()[5:8] for l in lines], dtype=float)
self.a_nobond = np.array([l.split()[8:15] for l in lines], dtype=float)
self.b_nobond = np.array([l.split()[15:26] for l in lines], dtype=float)
self.coc_nobond = (
np.einsum("ij,i", self.r_nobond, self.n_nobond) / self.n_nobond.sum()
)
def test_bond_nobond_properties(self):
"""Center-of-charge equality"""
np.testing.assert_allclose(self.coc_bond, self.coc_nobond)
def test_a(self):
"""Polarizability equality"""
a_tot_bond = np.sum(self.a_bond)
a_tot_nobond = np.sum(self.a_nobond)
np.testing.assert_allclose(a_tot_bond, a_tot_nobond)
def test_b(self):
"""Hyperpolarizability equality"""
b_tot_bond = np.sum(self.b_bond)
b_tot_nobond = np.sum(self.b_nobond)
np.testing.assert_allclose(b_tot_bond, b_tot_nobond)
def test_dip(self):
"""Dipole equality"""
dip_bond = np.einsum(
"ij,i", (self.r_bond - self.coc_bond), self.q_bond
) + self.d_bond.sum(axis=0)
dip_nobond = np.einsum(
"ij,i", (self.r_nobond - self.coc_nobond), self.q_nobond
) + self.d_nobond.sum(axis=0)
np.testing.assert_allclose(dip_bond, dip_nobond)
|
fanglinfang/myuw
|
myuw/test/cache.py
|
Python
|
apache-2.0
| 5,375
| 0
|
from django.test import TestCase
from restclients.mock_http import MockHTTP
from myuw.util.cache_implementation import MyUWCache
from restclients.models import CacheEntryTimed
from datetime import timedelta
CACHE = 'myuw.util.cache_implementation.MyUWCache'
class TestCustomCachePolicy(TestCase):
def test_sws_default_policies(self):
with self.settings(RESTCLIENTS_DAO_CACHE_CLASS=CACHE):
cache = MyUWCache()
ok_response = MockHTTP()
ok_response.status = 200
ok_response.data = "xx"
response = cache.getCache('sws', '/student/myuwcachetest1', {})
self.assertEquals(response, None)
cache.processResponse("sws",
"/student/myuwcachetest1",
ok_response)
response = cache.getCache('sws', '/student/myuwcachetest1', {})
self.assertEquals(response["response"].data, 'xx')
cache_entry = CacheEntryTimed.objects.get(
service="sws",
url="/student/myuwcachetest1")
# Cached response is returned after 3 hours and 58 minutes
orig_time_saved = cache_entry.time_saved
cache_entry.time_saved = (orig_time_saved -
timedelta(minutes=(60 * 4)-2))
cache_entry.save()
response = cache.getCache('sws', '/student/myuwcachetest1', {})
self.assertNotEquals(response, None)
# Cached response is not returned after 4 hours and 1 minute
|
cache
|
_entry.time_saved = (orig_time_saved -
timedelta(minutes=(60 * 4)+1))
cache_entry.save()
response = cache.getCache('sws', '/student/myuwcachetest1', {})
self.assertEquals(response, None)
def test_sws_term_policy(self):
with self.settings(RESTCLIENTS_DAO_CACHE_CLASS=CACHE):
cache = MyUWCache()
ok_response = MockHTTP()
ok_response.status = 200
ok_response.data = "xx"
response = cache.getCache(
'sws', '/student/v5/term/1014,summer.json', {})
self.assertEquals(response, None)
cache.processResponse(
"sws", "/student/v5/term/1014,summer.json", ok_response)
response = cache.getCache(
'sws', '/student/v5/term/1014,summer.json', {})
self.assertEquals(response["response"].data, 'xx')
cache_entry = CacheEntryTimed.objects.get(
service="sws", url="/student/v5/term/1014,summer.json")
# Cached response is returned after 29 days
orig_time_saved = cache_entry.time_saved
cache_entry.time_saved = orig_time_saved - timedelta(days=29)
cache_entry.save()
response = cache.getCache(
'sws', '/student/v5/term/1014,summer.json', {})
self.assertNotEquals(response, None)
# Cached response is not returned after 31 days
cache_entry.time_saved = orig_time_saved - timedelta(days=31)
cache_entry.save()
response = cache.getCache(
'sws', '/student/v5/term/1014,summer.json', {})
self.assertEquals(response, None)
def test_myplan_default(self):
with self.settings(RESTCLIENTS_DAO_CACHE_CLASS=CACHE):
cache = MyUWCache()
ok_response = MockHTTP()
ok_response.status = 200
ok_response.data = "xx"
response = cache.getCache('myplan', '/api/plan/xx', {})
self.assertEquals(response, None)
cache.processResponse("myplan", "/api/plan/xx", ok_response)
response = cache.getCache('myplan', '/api/plan/xx', {})
self.assertEquals(response, None)
def test_default_policies(self):
with self.settings(RESTCLIENTS_DAO_CACHE_CLASS=CACHE):
cache = MyUWCache()
ok_response = MockHTTP()
ok_response.status = 200
ok_response.data = "xx"
response = cache.getCache('no_such', '/student/myuwcachetest1', {})
self.assertEquals(response, None)
cache.processResponse(
"no_such", "/student/myuwcachetest1", ok_response)
response = cache.getCache('no_such', '/student/myuwcachetest1', {})
self.assertEquals(response["response"].data, 'xx')
cache_entry = CacheEntryTimed.objects.get(
service="no_such", url="/student/myuwcachetest1")
# Cached response is returned after 3 hours and 58 minutes
orig_time_saved = cache_entry.time_saved
cache_entry.time_saved = (orig_time_saved -
timedelta(minutes=(60 * 4)-2))
cache_entry.save()
response = cache.getCache('no_such', '/student/myuwcachetest1', {})
self.assertNotEquals(response, None)
# Cached response is not returned after 4 hours and 1 minute
cache_entry.time_saved = (orig_time_saved -
timedelta(minutes=(60 * 4)+1))
cache_entry.save()
response = cache.getCache('no_such', '/student/myuwcachetest1', {})
self.assertEquals(response, None)
|
MieRobot/Blogs
|
Blog_LinearRegression.py
|
Python
|
gpl-3.0
| 1,518
| 0.009223
|
# coding: utf-8
# In[2]:
# Import and read the datset
import numpy as np
from sklearn import linear_model
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv("C://Users//Koyel//Desktop/MieRobotAdvert.csv")
dataset.head()
# In[3]:
dataset.describe()
# In[4]:
dataset.columns
# In[5]:
import seaborn as sns
get_ipython().magic('matplotlib inline')
sns.pairplot(dataset)
# In[6]:
sns.heatmap(dataset.corr())
# In[7]:
dataset.columns
# In[8]:
X = dataset[['Facebook', 'Twitter', 'Google']]
y = dataset['Hits']
# In[9]:
from sklea
|
rn.model_selection import train_test_split
# In[10]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101)
# In[11]:
from sklearn.linear_model import LinearRegression
# In[12]:
lm = LinearRegression()
# In[13]:
lm.fit(X_train,y_train)
# In[14]:
print(lm.intercept_)
# In[15]:
coeff_df = pd.DataFrame(lm.coef_,X.columns,columns=['Calculated Coefficient'])
coeff_df
# In[17]:
predictions = lm.p
|
redict(X_test)
# In[26]:
plt.ylabel("likes predicted")
plt.title("Likes predicated for MieRobot.com blogs",color='r')
plt.scatter(y_test,predictions)
# In[23]:
print (lm.score)
# In[19]:
sns.distplot((y_test-predictions),bins=50);
# In[20]:
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, predictions))
print('MSE:', metrics.mean_squared_error(y_test, predictions))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))
# In[ ]:
|
dprog-philippe-docourt/django-qr-code
|
qr_code/qrcode/utils.py
|
Python
|
bsd-3-clause
| 30,602
| 0.003171
|
"""Utility classes and functions for configuring and setting up the content and the look of a QR code."""
import datetime
import decimal
from collections import namedtuple
from dataclasses import dataclass, asdict
from datetime import date
from typing import Optional, Any, Union, Sequence
from django.utils.html import escape
from qr_code.qrcode.constants import DEFAULT_MODULE_SIZE, SIZE_DICT, DEFAULT_ERROR_CORRECTION, DEFAULT_IMAGE_FORMAT
from segno import helpers
class QRCodeOptions:
"""
Represents the options used to create and draw a QR code.
"""
def __init__(
self,
size: Union[int, str] = DEFAULT_MODULE_SIZE,
border: int = 4,
version: Union[int, str, None] = None,
image_format: str = "svg",
error_correction: str = DEFAULT_ERROR_CORRECTION,
encoding: Optional[str] = "utf-8",
boost_error: bool = True,
micro: bool = False,
eci: bool = False,
dark_color: Union[tuple, str] = "#000",
light_color: Union[tuple, str] = "#fff",
finder_dark_color: bool = False,
finder_light_color: bool = False,
data_dark_color: bool = False,
data_light_color: bool = False,
version_dark_color: bool = False,
version_light_color: bool = False,
format_dark_color: bool = False,
format_light_color: bool = False,
alignment_dark_color: bool = False,
alignment_light_color: bool = False,
timing_dark_color: bool = False,
timing_light_color: bool = False,
separator_color: bool = False,
dark_module_color: bool = False,
quiet_zone_color: bool = False,
) -> None:
"""
:param size: The size of the QR code as an integer or a string. Default is *'m'*.
:type: str or int
:param int border: The size of the border (blank space around the code).
:param version: The version of the QR code gives the size of the matrix.
Default is *None* which mean automatic in order to avoid data overflow.
:param version: QR Code version. If the value is ``None`` (default), the
minimal version which fits for the input data will be used.
Valid values: "M1", "M2", "M3", "M4" (for Micro QR codes) or an
integer between 1 and 40 (for QR codes).
The `version` parameter is case insensitive.
:type version: int, str or None
:param str image_format: The graphics format used to render the QR code.
It can be either *'svg'* or *'png'*. Default is *'svg'*.
:param str error_correction: How much error correction that might be required
to read the code. It can be either *'L'*, *'M'*, *'Q'*, or *'H'*. Defa
|
ult is *'M'*.
|
:param bool boost_error: Tells whether the QR code encoding engine tries to increase the error correction level
if it does not affect the version. Error correction level is not increased when it impacts the version of
the code.
:param bool micro: Indicates if a Micro QR Code should be created. Default: False
:param encoding: Indicates the encoding in mode "byte". By default
`encoding` is ``UTF-8``. When set to ``None``, the implementation tries to use the standard conform
ISO/IEC 8859-1 encoding and if it does not fit, it will use UTF-8. Note that no ECI mode indicator is
inserted by default (see :paramref:`eci`).
The `encoding` parameter is case-insensitive.
:type encoding: str or None
:param bool eci: Indicates if binary data which does not use the default
encoding (ISO/IEC 8859-1) should enforce the ECI mode. Since a lot
of QR code readers do not support the ECI mode, this feature is
disabled by default and the data is encoded in the provided
`encoding` using the usual "byte" mode. Set `eci` to ``True`` if
an ECI header should be inserted into the QR Code. Note that
the implementation may not know the ECI designator for the provided
`encoding` and may raise an exception if the ECI designator cannot
be found.
The ECI mode is not supported by Micro QR Codes.
:param dark_color: Color of the dark modules (default: black). The
color can be provided as ``(R, G, B)`` tuple, as hexadecimal
format (``#RGB``, ``#RRGGBB`` ``RRGGBBAA``), or web color
name (i.e. ``red``).
:param light_color: Color of the light modules (default: white).
See `color` for valid values. If light is set to ``None`` the
light modules will be transparent.
:param finder_dark_color: Color of the dark finder modules (default: same as ``dark_color``)
:param finder_light_color: Color of the light finder modules (default: same as ``light_color``)
:param data_dark_color: Color of the dark data modules (default: same as ``dark_color``)
:param data_light_color: Color of the light data modules (default: same as ``light_color``)
:param version_dark_color: Color of the dark version modules (default: same as ``dark_color``)
:param version_light_color: Color of the light version modules (default: same as ``light_color``)
:param format_dark_color: Color of the dark format modules (default: same as ``dark_color``)
:param format_light_color: Color of the light format modules (default: same as ``light_color``)
:param alignment_dark_color: Color of the dark alignment modules (default: same as ``dark_color``)
:param alignment_light_color: Color of the light alignment modules (default: same as ``light_color``)
:param timing_dark_color: Color of the dark timing pattern modules (default: same as ``dark_color``)
:param timing_light_color: Color of the light timing pattern modules (default: same as ``light_color``)
:param separator_color: Color of the separator (default: same as ``light_color``)
:param dark_module_color: Color of the dark module (default: same as ``dark_color``)
:param quiet_zone_color: Color of the quiet zone modules (default: same as ``light_color``)
The *size* parameter gives the size of each module of the QR code matrix. It can be either a positive integer or one of the following letters:
* t or T: tiny (value: 6)
* s or S: small (value: 12)
* m or M: medium (value: 18)
* l or L: large (value: 30)
* h or H: huge (value: 48)
For PNG image format the size unit is in pixels, while the unit is 0.1 mm for SVG format.
The *border* parameter controls how many modules thick the border should be (blank space around the code).
The default is 4, which is the minimum according to the specs.
The *version* parameter is an integer from 1 to 40 that controls the size of the QR code matrix. Set to None to
determine this automatically. The smallest, version 1, is a 21 x 21 matrix. The biggest, version 40, is
177 x 177 matrix.
The size grows by 4 modules/side.
For Micro QR codes, valid values are "M1", "M2", "M3", "M4".
There are 4 error correction levels used for QR codes, with each one adding different amounts of "backup" data
depending on how much damage the QR code is expected to suffer in its intended environment, and hence how much
error correction may be required. The correction level can be configured with the *error_correction* parameter as follow:
* l or L: error correction level L – up to 7% damage
* m or M: error correction level M – up to 15% damage
* q or Q: error correction level Q – up to 25% damage
* h or H: error correction level H – up to 30% damage
You may enforce the creation of a Micro QR Code with `micro=True`. The `micro` option defaults to `False`.
The `encoding` option controls the text encoding used in mode "byte" (used for any general text content). By default `encoding` is ``UTF-8``. When set to ``None
|
jumpstarter-io/horizon
|
openstack_dashboard/dashboards/admin/images/properties/forms.py
|
Python
|
apache-2.0
| 3,269
| 0.000612
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from glanceclient import exc
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
def str2bool(value):
"""Convert a string value to boolean
"""
return value.lower() in ("yes", "true", "1")
# Mapping of property names to type, used for converting input string value
# before submitting.
PROPERTY_TYPES = {'min_disk': long, 'min_ram': long, 'protected': str2bool}
def convert_value(key, value):
"""Convert the property value to the proper type if necessary.
"""
_type = PROPERTY_TYPES.get(key)
if _type:
return _type(value)
return value
class CreateProperty(forms.SelfHandlingForm):
key = forms.CharField(max_length="255", label=_("Key"))
value = forms.CharField(label=_("Value"))
def handle(self, request, data):
try:
api.glance.image_update_properties(request,
self.initial['image_id'],
**{data['key']: convert_value(data['key'], data['value'])})
msg = _('Created custom property "%s".') % data['key']
messages.success(request, msg)
return True
except exc.HTTPForbidden:
msg = _('Unable to create image custom property. Property "%s" '
'is read only.') % data['key']
exceptions.handle(request, msg)
except exc.HTTPConflict:
msg = _('Unable to create image custom property. Property "%s" '
'already exists.') % data['key']
exceptions.handle(request, msg)
ex
|
cept Exception:
msg = _('Unable to create image custom '
'property "%s".') % data['key']
exceptions.handle(request, msg)
class EditProperty(forms.SelfHandlingForm):
key = forms.CharField(widget=forms.wid
|
gets.HiddenInput)
value = forms.CharField(label=_("Value"))
def handle(self, request, data):
try:
api.glance.image_update_properties(request,
self.initial['image_id'],
**{data['key']: convert_value(data['key'], data['value'])})
msg = _('Saved custom property "%s".') % data['key']
messages.success(request, msg)
return True
except exc.HTTPForbidden:
msg = _('Unable to edit image custom property. Property "%s" '
'is read only.') % data['key']
exceptions.handle(request, msg)
except Exception:
msg = _('Unable to edit image custom '
'property "%s".') % data['key']
exceptions.handle(request, msg)
|
souravbadami/oppia
|
schema_utils_test.py
|
Python
|
apache-2.0
| 25,162
| 0.000318
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for object schema definitions."""
# pylint: disable=relative-import
import inspect
from core.domain import email_manager
from core.tests import test_utils
import schema_utils
SCHEMA_KEY_ITEMS = schema_utils.SCHEMA_KEY_ITEMS
SCHEMA_KEY_LEN = schema_utils.SCHEMA_KEY_LEN
SCHEMA_KEY_PROPERTIES = schema_utils.SCHEMA_KEY_PROPERTIES
SCHEMA_KEY_TYPE = schema_utils.SCHEMA_KEY_TYPE
SCHEMA_KEY_POST_NORMALIZERS = schema_utils.SCHEMA_KEY_POST_NORMALIZERS
SCHEMA_KEY_CHOICES = schema_utils.SCHEMA_KEY_CHOICES
SCHEMA_KEY_NAME = schema_utils.SCHEMA_KEY_NAME
SCHEMA_KEY_SCHEMA = schema_utils.SCHEMA_KEY_SCHEMA
SCHEMA_KEY_OBJ_TYPE = schema_utils.SCHEMA_KEY_OBJ_TYPE
SCHEMA_KEY_VALIDATORS = schema_utils.SCHEMA_KEY_VALIDATORS
SCHEMA_KEY_DESCRIPTION = 'description'
SCHEMA_KEY_UI_CONFIG = 'ui_config'
# The following keys are always accepted as optional keys in any schema.
OPTIONAL_SCHEMA_KEYS = [
SCHEMA_KEY_CHOICES, SCHEMA_KEY_POST_NORMALIZERS, SCHEMA_KEY_UI_CONFIG,
SCHEMA_KEY_VALIDATORS]
SCHEMA_TYPE_BOOL = schema_utils.SCHEMA_TYPE_BOOL
# 'Custom' objects undergo an entirely separate normalization process, defined
# in the relevant extensions/objects/models/objects.py class.
SCHEMA_TYPE_CUSTOM = schema_utils.SCHEMA_TYPE_CUSTOM
SCHEMA_TYPE_DICT = schema_utils.SCHEMA_TYPE_DICT
SCHEMA_TYPE_FLOAT = schema_utils.SCHEMA_TYPE_FLOAT
SCHEMA_TYPE_HTML = schema_utils.SCHEMA_TYPE_HTML
SCHEMA_TYPE_INT = schema_utils.SCHEMA_TYPE_INT
SCHEMA_TYPE_LIST = schema_utils.SCHEMA_TYPE_LIST
SCHEMA_TYPE_UNICODE = schema_utils.SCHEMA_TYPE_UNICODE
ALLOWED_SCHEMA_TYPES = [
SCHEMA_TYPE_BOOL, SCHEMA_TYPE_CUSTOM, SCHEMA_TYPE_DICT, SCHEMA_TYPE_FLOAT,
SCHEMA_TYPE_HTML, SCHEMA_TYPE_INT, SCHEMA_TYPE_LIST, SCHEMA_TYPE_UNICODE]
ALLOWED_CUSTOM_OBJ_TYPES = [
'Filepath', 'LogicQuestion', 'MathLatexString', 'MusicPhrase',
'ParameterName', 'SanitizedUrl', 'Graph', 'ImageWithRegions',
'ListOfTabs']
# Schemas for the UI config for the various types. All of these configuration
# options are optional additions to the schema, and, if omitted, should not
# result in any errors.
# Note to developers: please keep this in sync with
# https://github.com/oppia/oppia/wiki/Schema-Based-Forms
UI_CONFIG_SPECS = {
SCHEMA_TYPE_BOOL: {},
SCHEMA_TYPE_DICT: {},
SCHEMA_TYPE_FLOAT: {},
SCHEMA_TYPE_HTML: {
'hide_complex_extensions': {
'type': SCHEMA_TYPE_BOOL,
},
'placeholder': {
'type': SCHEMA_TYPE_UNICODE,
}
},
SCHEMA_TYPE_INT: {},
SCHEMA_TYPE_LIST: {
'add_element_text': {
'type': SCHEMA_TYPE_UNICODE
}
},
SCHEMA_TYPE_UNICODE: {
'rows': {
'type': SCHEMA_TYPE_INT,
'validators': [{
'id': 'is_at_least',
'min_value': 1,
|
}]
},
'coding_mode': {
'type': SCHEMA_TYPE_UNICODE,
'choices': ['none', 'python', 'coffee
|
script'],
},
'placeholder': {
'type': SCHEMA_TYPE_UNICODE,
},
},
}
# Schemas for validators for the various types.
VALIDATOR_SPECS = {
SCHEMA_TYPE_BOOL: {},
SCHEMA_TYPE_DICT: {},
SCHEMA_TYPE_FLOAT: {
'is_at_least': {
'min_value': {
'type': SCHEMA_TYPE_FLOAT
}
},
'is_at_most': {
'max_value': {
'type': SCHEMA_TYPE_FLOAT
}
},
},
SCHEMA_TYPE_HTML: {},
SCHEMA_TYPE_INT: {
'is_at_least': {
'min_value': {
'type': SCHEMA_TYPE_INT
}
},
'is_at_most': {
'max_value': {
'type': SCHEMA_TYPE_INT
}
},
},
SCHEMA_TYPE_LIST: {
'has_length_at_least': {
'min_value': {
'type': SCHEMA_TYPE_INT,
'validators': [{
'id': 'is_at_least',
'min_value': 1,
}],
}
},
'has_length_at_most': {
'max_value': {
'type': SCHEMA_TYPE_INT,
'validators': [{
'id': 'is_at_least',
'min_value': 1,
}],
}
},
'is_uniquified': {},
},
SCHEMA_TYPE_UNICODE: {
'matches_regex': {
'regex': {
'type': SCHEMA_TYPE_UNICODE,
'validators': [{
'id': 'is_regex',
}]
}
},
'is_nonempty': {},
'is_regex': {},
'is_valid_email': {},
},
}
def _validate_ui_config(obj_type, ui_config):
"""Validates the value of a UI configuration."""
reference_dict = UI_CONFIG_SPECS[obj_type]
assert set(ui_config.keys()) <= set(reference_dict.keys())
for key, value in ui_config.iteritems():
schema_utils.normalize_against_schema(
value, reference_dict[key])
def _validate_validator(obj_type, validator):
"""Validates the value of a 'validator' field."""
reference_dict = VALIDATOR_SPECS[obj_type]
assert 'id' in validator and validator['id'] in reference_dict
customization_keys = validator.keys()
customization_keys.remove('id')
assert (set(customization_keys) ==
set(reference_dict[validator['id']].keys()))
for key in customization_keys:
value = validator[key]
schema = reference_dict[validator['id']][key]
try:
schema_utils.normalize_against_schema(value, schema)
except Exception as e:
raise AssertionError(e)
# Check that the id corresponds to a valid normalizer function.
validator_fn = schema_utils.get_validator(validator['id'])
assert set(inspect.getargspec(validator_fn).args) == set(
customization_keys + ['obj'])
def _validate_dict_keys(dict_to_check, required_keys, optional_keys):
"""Checks that all of the required keys, and possibly some of the optional
keys, are in the given dict.
Raises:
AssertionError: if the validation fails.
"""
assert set(required_keys) <= set(dict_to_check.keys()), (
'Missing keys: %s' % dict_to_check)
assert set(dict_to_check.keys()) <= set(required_keys + optional_keys), (
'Extra keys: %s' % dict_to_check)
def validate_schema(schema):
"""Validates a schema.
This is meant to be a utility function that should be used by tests to
ensure that all schema definitions in the codebase are valid.
Each schema is a dict with at least a key called 'type'. The 'type' can
take one of the SCHEMA_TYPE_* values declared above. In addition, there
may be additional keys for specific types:
- 'list' requires an additional 'items' property, which specifies the type
of the elements in the list. It also allows for an optional 'len'
property which specifies the len of the list.
- 'dict' requires an additional 'properties' property, which specifies the
names of the keys in the dict, and schema definitions for their values.
There may also be an optional 'post_normalizers' key whose value is a list
of normalizers.
Raises:
AssertionError: if the schema is not valid.
"""
assert isinstance(schema, dict)
assert SCHEMA_KEY_TYPE in schema
assert schema[SCHEMA_KEY_TYPE] in ALLOWED_SCHEMA_TYPES
if schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_CUSTOM:
_validate_dict_keys(
schema,
[SCHEMA_KEY_TYPE, SCHEMA_KEY_OBJ_TYPE],
|
hainm/elyxer
|
src/elyxer/gen/layout.py
|
Python
|
gpl-3.0
| 10,103
| 0.010792
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# eLyXer -- convert LyX source files to HTML output.
#
# Copyright (C) 2009 Alex Fernández
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# --end--
# Alex 20090411
# LyX layout and derived classes
from elyxer.util.trace import Trace
from elyxer.parse.parser import *
from elyxer.out.output import *
from elyxer.gen.container import *
from elyxer.gen.styles import *
from elyxer.gen.header import *
from elyxer.proc.postprocess import *
from elyxer.ref.label import *
from elyxer.ref.partkey import *
from elyxer.ref.link import *
class Layout(Container):
"A layout (block of text) inside a lyx file"
type = 'none'
def __init__(self):
"Initialize the layout."
self.contents = []
self.parser = BoundedParser()
self.output = TaggedOutput().setbreaklines(True)
def process(self):
"Get the type and numerate if necessary."
self.type = self.header[1]
if self.type in TagConfig.layouts:
self.output.tag = TagConfig.layouts[self.type] + ' class="' + self.type + '"'
elif self.type.replace('*', '') in TagConfig.layouts:
self.output.tag = TagConfig.layouts[self.type.replace('*', '')]
self.output.tag += ' class="' + self.type.replace('*', '-') + '"'
else:
self.output.tag = 'div class="' + self.type + '"'
self.numerate()
def numerate(self):
"Numerate if necessary."
partkey = PartKeyGenerator.forlayout(self)
if partkey:
self.partkey = partkey
self.output.tag = self.output.tag.replace('?', unicode(partkey.level))
def __unicode__(self):
"Return a printable representation."
if self.partkey:
return 'Layout ' + self.type + ' #' + unicode(self.partkey.partkey)
return 'Layout of type ' + self.type
class StandardLayout(Layout):
"A standard layout -- can be a true div or nothing at all"
indentation = False
def process(self):
self.type = 'standard'
self.output = ContentsOutput()
def complete(self, contents):
"Set the contents and return it."
self.process()
self.contents = contents
return self
class Title(Layout):
"The title of the whole document"
def process(self):
self.type = 'title'
self.output.tag = 'h1 class="title"'
title = self.extracttext()
DocumentTitle.title = title
Trace.message('Title: ' + title)
class Author(Layout):
"The document author"
def process(self):
self.type = 'author'
self.output.tag = 'h2 class="author"'
author = self.extracttext()
Trace.debug('Author: ' + author)
DocumentAuthor.appendauthor(author)
class Abstract(Layout):
"A paper abstract"
done = False
def process(self):
self.type = 'abstract'
self.output.tag = 'div class="abstract"'
if Abstract.done:
return
message = Translator.translate('abstract')
tagged = TaggedText().constant(message, 'p class="abstract-message"', True)
self.contents.insert(0, tagged)
Abstract.done = True
class FirstWorder(Layout):
"A layout where the first word is extracted"
def extractfirstword(self):
"Extract the first word as a list"
return self.extractfromcontents(self.contents)
def extractfromcontents(self, contents):
"Extract the first word in contents."
firstcontents = []
while len(contents) > 0:
if self.isfirstword(contents[0]):
firstcontents.append(contents[0])
del contents[0]
return firstcontents
if self.spaceincontainer(contents[0]):
extracted = self.extractfromcontainer(contents[0])
firstcontents.append(extracted)
return firstcontents
firstcontents.append(contents[0])
del contents[0]
return firstcontents
def extractfromcontainer(self, container):
"Extract the first word from a container cloning it including its output."
if isinstance(container, StringContainer):
return self.extractfromstring(container)
result = Cloner.clone(container)
result.output = container.output
result.contents = self.extractfromcontents(container.contents)
return result
def extractfromstring(self, container):
"Extract the first word from elyxer.a string container."
if not ' ' in container.string:
Trace.error('No space in string ' + container.string)
return container
split = container.string.split(' ', 1)
container.string = split[1]
return Constant(split[0])
def spaceincontainer(self, container):
"Find out if the container contains a space somewhere."
return ' ' in container.extracttext()
def isfirstword(self, container):
"Find out if the container is valid as a first word."
if not isinstance(container, FirstWord):
return False
return not container.isempty()
class FirstWord(Container):
"A container which is in itself a first word, unless it's empty."
"Should be inherited by other containers, e.g. ERT."
def isempty(self):
"Find out if the first word is empty."
Trace.error('Unimplemented isempty()')
return True
class Description(FirstWorder):
"A description layout"
def process(self):
"Set the first word to bold"
self.type = 'Description'
self.output.tag = 'div class="Description"'
firstword = self.extractfirstword()
if not firstword:
return
tag = 'span class="Description-entry"'
self.contents.insert(0, TaggedText().complete(firstword, tag))
self.contents.insert(1, Constant(u' '))
class List(FirstWorder):
"A list layout"
def process(self):
"Set the first word to bold"
self.type = 'List'
self.output.tag = 'div class="List"'
firstword = self.extractfirstword()
if not firstword:
return
first = TaggedText().complete(firstword, 'span class="List-entry"')
second =
|
TaggedText().complete(self.contents, 'span
|
class="List-contents"')
self.contents = [first, second]
class PlainLayout(Layout):
"A plain layout"
def process(self):
"Output just as contents."
self.output = ContentsOutput()
self.type = 'Plain'
def makevisible(self):
"Make the layout visible, output as tagged text."
self.output = TaggedOutput().settag('div class="PlainVisible"', True)
class LyXCode(Layout):
"A bit of LyX-Code."
def process(self):
"Output as pre."
self.output.tag = 'pre class="LyX-Code"'
for newline in self.searchall(Newline):
index = newline.parent.contents.index(newline)
newline.parent.contents[index] = Constant('\n')
class PostLayout(object):
"Numerate an indexed layout"
processedclass = Layout
def postprocess(self, last, layout, next):
"Group layouts and/or number them."
if layout.type in TagConfig.group['layouts']:
return self.group(last, layout)
if layout.partkey:
self.number(layout)
return layout
def group(self, last, layout):
"Group two layouts if they are the same type."
if not self.isgroupable(layout) or not self.isgroupable(last) or last.type != layout.type:
return layout
layout.contents = last.contents + [Constant('<br/>\n')] + layout.contents
last.contents = []
last.output = EmptyOutput()
return layout
def isgroupable(self, container):
"Check that the container can be grouped."
if not isinstance(container, Layout):
return False
for element in container.contents:
if not element.__class__.__name__ in LayoutConfig.groupable['allowed']:
return False
return True
def number(self, layout):
"Generate a number and place it before the text"
layout.partkey.addtoclabel(layout)
class
|
tkuriyama/jsonutils
|
jsonutils/lws/test/test_lws_logger.py
|
Python
|
mit
| 3,990
| 0
|
"""Test cases for JSON lws_logger module, assumes Pytest."""
from jsonutils.lws import lws_logger
class TestDictToTreeHelpers:
"""Test the helper functions for dict_to_tree."""
def test_flatten_list(self):
"""Test flattening of nested lists."""
f = lws_logger.flatten_list
nested = [1, [2, 3, [[4], 5]]]
assert list(f(nested)) == [1, 2, 3, 4, 5]
nested = [[[1]]]
assert list(f(nested)) == [1]
flat = [1, 2]
assert list(f(flat)) == [1, 2]
def test_filter_errors(self):
"""Test error filtering (helper function to filter_keys)."""
f = lws_logger.filter_errors
errors = {'key': 99,
'key_str': 'key error',
'val': -99,
'val_str': 'val error'}
seq = [100, 99, 99, 99]
assert f(seq, errors) == [100]
seq = [99]
assert f(seq, errors) == ['key error']
seq = [-99, -99, 100]
assert f(seq, errors) == [100]
seq = [-99, -99]
assert f(seq, errors) == ['val error']
def test_filter_errors_single(self):
"""Test list error term filtering, single error."""
f =
|
lws_logger.filter_keys
errors = {'key': 99,
'key_str': 'key error',
'val': -99,
|
'val_str': 'val error'}
pairs = [('a', 'hi'), ('a', 99), ('b', 'hi')]
filtered = [('a', 'hi'), ('b', 'hi')]
assert f(pairs, errors) == filtered
def test_filter_errors_multiple(self):
"""Test list error term filtering, multiple errors."""
f = lws_logger.filter_keys
errors = {'key': 99,
'key_str': 'key error',
'val': -99,
'val_str': 'val error'}
pairs = [('a', 'hi'), ('a', 99), ('a', 99),
('b', 'hi'), ('b', -99)]
filtered = [('a', 'hi'), ('b', 'hi')]
assert f(pairs, errors) == filtered
def test_filter_errors_only(self):
"""Test list error term filtering, only errors."""
f = lws_logger.filter_keys
errors = {'key': 99,
'key_str': 'key error',
'val': -99,
'val_str': 'val error'}
pairs = [('a', 99), ('b', -99)]
filtered = [('a', 'key error'), ('b', 'val error')]
assert f(pairs, errors) == filtered
class TestLoggerHelpers:
"""Test the helper functions for logger."""
def test_dict_to_tree_simple(self):
"""Test dict_to_tree simple dicts."""
f = lws_logger.dict_to_tree
simple_d = {'root': ['a', 'b']}
flat_list = [('root', 0), [('a', 1)], [('b', 1)]]
assert f(simple_d, 'root', [('root', 0)]) == flat_list
nested_d = {'root': ['a', 'b'], 'a': ['one', 'two']}
nested_list = [('root', 0), [('a', 1), [('one', 2)], [('two', 2)]],
[('b', 1)]]
assert f(nested_d, 'root', [('root', 0)]) == nested_list
def test_parse_errors_one(self):
"""Test scenario with one type of error."""
f = lws_logger.parse_errors
errors = {'key_str': 'key error',
'val_str': 'val error'}
nodes = [('one', 'key error'), ('two', 3), ('three', 3)]
output = 'Key Errors:\t1\nValue Errors:\t0'
assert f(nodes, errors) == (1, 0, output)
def test_parse_errors_both(self):
"""Test scenario with two types of errors."""
f = lws_logger.parse_errors
errors = {'key_str': 'key error',
'val_str': 'val error'}
nodes = [('one', 'key error'), ('two', 3), ('three', 3),
('four', 'val error')]
output = 'Key Errors:\t1\nValue Errors:\t1'
assert f(nodes, errors) == (1, 1, output)
def test_format_node(self):
"""Test node to string function."""
f = lws_logger.format_node
assert f('a', '----', 1) == '|----a'
assert f('a', '----', 2) == ' |----a'
|
Tusky/DjangoSample
|
blog/views.py
|
Python
|
mit
| 2,273
| 0.00088
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.generic import ListView, DetailView, CreateView
from blog.models import Post, Comment
class Posts(ListView):
"""
Display every post paginated or if given a type filter/search for it.
"""
model = Post
template_name = 'posts.html'
paginate_by = 5
def get_queryset(self):
qs = self.model.objects.for_display()
type_of_page = self.kwargs.get('type', False)
search_query = self.request.GET.get('q', False)
if type_of_pag
|
e == 'user':
qs = qs.filter(posted_by__user
|
name=self.kwargs.get('slug', ''))
elif type_of_page == 'category':
qs = qs.filter(categories__slug=self.kwargs.get('slug', ''))
if search_query:
qs = qs.filter(Q(title__icontains=search_query) | Q(content__icontains=search_query))
return qs
class SinglePost(DetailView):
"""
Display a single selected post.
"""
model = Post
template_name = 'post.html'
def get_queryset(self):
return self.model.objects.for_display()
class PostComment(CreateView):
"""
Saves comments received to a post. Removed ability to GET the page.
"""
model = Comment
fields = ['text']
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(PostComment, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse('blog:detail', kwargs={'slug': self.kwargs['slug']}))
def form_valid(self, form):
comment = form.save(commit=False)
comment.posted_by = self.request.user
comment.post = get_object_or_404(Post, slug=self.kwargs['slug'])
comment.save()
messages.success(self.request, 'Your comment was posted.')
return super(PostComment, self).form_valid(form)
def get_success_url(self):
return reverse('blog:detail', kwargs={'slug': self.kwargs['slug']})
|
lidavidm/mathics-heroku
|
venv/lib/python2.7/site-packages/sympy/printing/gtk.py
|
Python
|
gpl-3.0
| 510
| 0.003922
|
from __future__ import with_statement
from sympy.printin
|
g.mathml import mathml
import tempfile
import os
def print_gtk(x, start_viewer=True):
"""Print to Gtkmathview, a gtk widget capable of rendering Ma
|
thML.
Needs libgtkmathview-bin"""
from sympy.utilities.mathml import c2p
tmp = tempfile.mktemp() # create a temp file to store the result
with open(tmp, 'wb') as file:
file.write( c2p(mathml(x), simple=True) )
if start_viewer:
os.system("mathmlviewer " + tmp)
|
thebachchaoproject/bachchao-server
|
pahera/PythonModules/CheckIfUserExists_mod.py
|
Python
|
mit
| 1,431
| 0.004892
|
from pahera.models import Person
from django.db import connection, transaction
from pahera.Utilities import DictFetchAll
# To check whether there exists a user with same email or phone no before registering the new user..!!!
def VerifyTheUser(data):
cursor = connection.cursor()
email = data['email']
phone = data['phone']
cursor.execute("SELECT * from pahera_person WHERE email = %s OR phone_no = %s", [email, phone])
person_Data = {}
person_Data = DictFetchAll.dictfetchall(cursor)
if person_Data:
if person_Data[0]['email'] == email or person_Data[0]['phone_no'] == phone:
return False
else:
return True
else:
return True
# To check whether there exists a user with same email or phone no, before Updating the user..!!!
def VerifyTheUserUpdate(data, person):
cursor = connection.cursor()
email = data['email']
phone = data['phone']
cursor.execute("SELECT * from pahera_person WHERE email = %s OR phone_no = %s", [email, phone])
person_Data = {}
person_Data = DictFetchAll.dictfetchall(cursor)
if person_Data:
for post in person_Data:
if post['email'] == email or post['p
|
hone_no'] == phone:
if post['id'] == person.id:
return True
else:
return False
else:
|
return True
else:
return True
|
kogaki/pqkmeans
|
test/clustering/test_pqkmeans.py
|
Python
|
mit
| 2,918
| 0.004798
|
import unittest
import pqkmeans
import numpy
import collections
import pickle
class TestPQKMeans(unittest.TestCase):
def data_source(self, n: int):
for i in range(n):
yield [i * 100] * 6
def setUp(self):
# Train PQ encoder
self.encoder = pqkmeans.encoder.PQEncoder(num_subdim=3, Ks=20)
self.encoder.fit(numpy.array(list(self.data_source(200))))
def test_just_construction(self):
pqkmeans.clustering.PQKMeans(encoder=self.encoder, k=5, iteration=10, verbose=False)
def test_fit_and_predict(self):
engine = pqkmeans.clustering.PQKMeans(encoder=self.encoder, k=2, iteration=10, verbose=False)
codes = self.encoder.transform(numpy.array(list(self.data_source(100))))
predicted = engine.fit_predict(codes)
count = collections.defaultdict(int)
for cluster in predicted:
count[cluster] += 1
# roughly balanced clusters
self.assertGreaterEqual(min(count.values()), max(count.values()) * 0.7)
a = engine.predict(codes[0:1, :])
b = engine.predict(codes[0:1, :])
self.assertEqual(a, b)
def test_cluster_centers_are_really_nearest(self):
engine = pqkmeans.clustering.PQKMeans(encoder=self.encoder, k=2, iteration=10, verbose=False)
codes = self.encoder.transform(numpy.array(list(self.data_source(100))))
fit_predicted = engine.fit_predict(codes)
cluster_centers = numpy.array(engine.cluster_centers_, dtype=numpy.uint8)
predicted = engine.p
|
redict(codes)
self.assertTrue((fit_predicted == predicted).all())
# Reconstruct the o
|
riginal vectors
codes_decoded = self.encoder.inverse_transform(codes)
cluster_centers_decoded = self.encoder.inverse_transform(cluster_centers)
for cluster, code_decoded in zip(predicted, codes_decoded):
other_cluster = (cluster + 1) % max(predicted)
self.assertLessEqual(
numpy.linalg.norm(cluster_centers_decoded[cluster] - code_decoded),
numpy.linalg.norm(cluster_centers_decoded[other_cluster] - code_decoded)
)
def test_constructor_with_cluster_center(self):
# Run pqkmeans first.
engine = pqkmeans.clustering.PQKMeans(encoder=self.encoder, k=5, iteration=10, verbose=False)
codes = self.encoder.transform(numpy.array(list(self.data_source(100))))
fit_predicted = engine.fit_predict(codes)
cluster_centers = numpy.array(engine.cluster_centers_, dtype=numpy.uint8)
predicted = engine.predict(codes)
# save current engine and recover from savedata
engine_savedata = pickle.dumps(engine)
engine_recovered = pickle.loads(engine_savedata)
fit_predicted_from_recovered_obj = engine_recovered.predict(codes)
numpy.testing.assert_array_equal(predicted, fit_predicted_from_recovered_obj)
|
praus/shapy
|
shapy/framework/commands/qdisc.py
|
Python
|
mit
| 537
| 0.001862
|
HTBRootQdisc = """\
tc qdisc add dev {interface!s} root handle 1: \
htb default {default_c
|
lass!s}\
"""
HTBQdisc = """\
tc qdisc add dev {interface!s} parent {parent!s} handle {handle!s} \
htb default {default_class!s}\
"""
NetemDelayQdisc = """\
tc qdisc add dev {interface!s} parent {parent!s} handle
|
{handle!s} \
netem delay {delay!s}ms\
"""
IngressQdisc = "tc qdisc add dev {interface!s} ingress"
PRIOQdisc = "tc qdisc add dev {interface!s} root handle 1: prio"
pfifoQdisc = "tc qdisc add dev {interface!s} root handle 1: pfifo"
|
twilio/twilio-python
|
tests/unit/jwt/test_jwt.py
|
Python
|
mit
| 9,331
| 0.002251
|
import time as real_time
import unittest
import jwt as jwt_lib
from mock import patch
from twilio.jwt import Jwt, JwtDecodeError
class DummyJwt(Jwt):
"""Jwt implementation that allows setting arbitrary payload and headers for testing."""
ALGORITHM = 'HS256'
def __init__(self, secret_key, issuer, subject=None, algorithm=None,
nbf=Jwt.GENERATE, ttl=3600, valid_until=None, headers=None,
payload=None):
super(DummyJwt, self).__init__(
secret_key=secret_key,
issuer=issuer,
subject=subject,
algorithm=algorithm or self.ALGORITHM,
nbf=nbf,
ttl=ttl,
valid_until=valid_until
)
self._payload = payload or {}
self._headers = headers or {}
def _generate_payload(self):
return self._payload
def _generate_headers(self):
return self._headers
class JwtTest(unittest.TestCase):
def assertIn(self, foo, bar, msg=None):
"""backport for 2.6"""
assert foo in bar, (msg or "%s not found in %s" % (foo, bar))
def now(self):
return int(real_time.time())
def assertJwtsEqual(self, jwt, key, expected_payload=None, expected_headers=None):
expected_headers = expected_headers or {}
expected_payload = expected_payload or {}
decoded_payload = jwt_lib.decode(jwt, key, algorithms=["HS256"], options={"verify_signature": False})
decoded_headers = jwt_lib.get_unverified_header(jwt)
self.assertEqual(expected_headers, decoded_headers)
self.assertEqual(expected_payload, decoded_payload)
@patch('time.time')
def test_basic_encode(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0},
)
@patch('time.time')
def test_encode_with_subject(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', subject='subject', headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0, 'sub': 'subject'},
)
@patch('time.time')
def test_encode_without_nbf(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', subject='subject', headers={}, payload={}, nbf=None)
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'sub': 'subject'},
)
@patch('time.time')
def test_encode_custom_ttl(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 10, 'nbf': 0},
)
@patch('time.time')
def test_encode_ttl_added_to_current_time(self, time_mock):
time_mock.return_value = 50.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 60, 'nbf': 50},
)
@patch('time.time')
def test_encode_override_ttl(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(ttl=20),
'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 20, 'nbf': 0},
)
@patch('time.time')
def test_encode_valid_until_overrides_ttl(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, valid_until=70, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 70, 'nbf': 0},
)
@patch('time.time')
def test_encode_custom_nbf(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, nbf=5, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 10, 'nbf': 5},
)
@patch('time.time')
def test_encode_with_headers(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', headers={'sooper': 'secret'}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256', 'sooper': 'secret'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0},
)
@patch('time.time')
def test_encode_with_payload(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', payload={'root': 'true'})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0, 'root': 'true'},
)
@patch('time.time')
def test_encode_with_payload_and_headers(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', headers={'yes': 'oui'}, payload={'pay': 'me'})
|
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256', 'yes': 'oui'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0, 'pay': 'me'},
)
def test_encode_no_key_fails(self)
|
:
jwt = DummyJwt(None, 'issuer')
self.assertRaises(ValueError, jwt.to_jwt)
def test_encode_decode(self):
test_start = self.now()
jwt = DummyJwt('secret_key', 'issuer', subject='hey', payload={'sick': 'sick'})
decoded_jwt = Jwt.from_jwt(jwt.to_jwt(), 'secret_key')
self.assertGreaterEqual(decoded_jwt.valid_until, self.now() + 3600)
self.assertGreaterEqual(decoded_jwt.nbf, test_start)
self.assertEqual(decoded_jwt.issuer, 'issuer')
self.assertEqual(decoded_jwt.secret_key, 'secret_key')
self.assertEqual(decoded_jwt.algorithm, 'HS256')
self.assertEqual(decoded_jwt.subject, 'hey')
self.assertEqual(decoded_jwt.headers, {'typ': 'JWT', 'alg': 'HS256'})
self.assertDictContainsSubset({
'iss': 'issuer',
'sub': 'hey',
'sick': 'sick',
}, decoded_jwt.payload)
def test_encode_decode_mismatched_algorithms(self):
jwt = DummyJwt('secret_key', 'issuer', algorithm='HS512', subject='hey', payload={'sick': 'sick'})
self.assertRaises(JwtDecodeError, Jwt.from_jwt, jwt.to_jwt())
def test_decode_bad_secret(self):
jwt = DummyJwt('secret_key', 'issuer')
self.assertRaises(JwtDecodeError, Jwt.from_jwt, jwt.to_jwt(), 'letmeinplz')
def test_decode_modified_jwt_fails(self):
jwt = DummyJwt('secret_key', 'issuer')
example_jwt = jwt.to_jwt()
example_jwt = 'ABC' + example_jwt[3:]
self.assertRaises(JwtDecodeError, Jwt.from_jwt, example_jwt, 'secret_key')
def test_decode_validates_expiration(self):
expired_jwt = DummyJwt('secret_key', 'issuer', valid_until=self.now())
real_time.sleep(1)
self.assertRaises(JwtDecodeError, Jwt.from_jwt, expired_jwt.to_jwt(), 'secret_key')
def test_decod
|
thaim/ansible
|
lib/ansible/plugins/doc_fragments/openstack.py
|
Python
|
mit
| 3,726
| 0.001074
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard openstack documentation fragment
DOCUMENTATION = r'''
options:
cloud:
description:
- Named cl
|
oud or cloud config to operate against.
If I(cloud) is a string, it references a named cloud config as defined
in an OpenStack clouds.yaml file. Provides default values for I(auth)
and I(auth_type). This parameter is not needed if I(auth) is provided
or if OpenStack OS_* environment variables are present.
If I(cloud) is
|
a dict, it contains a complete cloud configuration like
would be in a section of clouds.yaml.
type: raw
auth:
description:
- Dictionary containing auth information as needed by the cloud's auth
plugin strategy. For the default I(password) plugin, this would contain
I(auth_url), I(username), I(password), I(project_name) and any
information about domains (for example, I(os_user_domain_name) or I(os_project_domain_name)) if the cloud supports them.
For other plugins,
this param will need to contain whatever parameters that auth plugin
requires. This parameter is not needed if a named cloud is provided or
OpenStack OS_* environment variables are present.
type: dict
auth_type:
description:
- Name of the auth plugin to use. If the cloud uses something other than
password authentication, the name of the plugin should be indicated here
and the contents of the I(auth) parameter should be updated accordingly.
type: str
region_name:
description:
- Name of the region.
type: str
wait:
description:
- Should ansible wait until the requested resource is complete.
type: bool
default: yes
timeout:
description:
- How long should ansible wait for the requested resource.
type: int
default: 180
api_timeout:
description:
- How long should the socket layer wait before timing out for API calls.
If this is omitted, nothing will be passed to the requests library.
type: int
validate_certs:
description:
- Whether or not SSL API requests should be verified.
- Before Ansible 2.3 this defaulted to C(yes).
type: bool
default: no
aliases: [ verify ]
ca_cert:
description:
- A path to a CA Cert bundle that can be used as part of verifying
SSL API requests.
type: str
aliases: [ cacert ]
client_cert:
description:
- A path to a client certificate to use as part of the SSL transaction.
type: str
aliases: [ cert ]
client_key:
description:
- A path to a client key to use as part of the SSL transaction.
type: str
aliases: [ key ]
interface:
description:
- Endpoint URL type to fetch from the service catalog.
type: str
choices: [ admin, internal, public ]
default: public
aliases: [ endpoint_type ]
version_added: "2.3"
requirements:
- python >= 2.7
- openstacksdk >= 0.12.0
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
may be used instead of providing explicit values.
- Auth information is driven by openstacksdk, which means that values
can come from a yaml config file in /etc/ansible/openstack.yaml,
/etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
standard environment variables, then finally by explicit parameters in
plays. More information can be found at
U(https://docs.openstack.org/openstacksdk/)
'''
|
googleapis/python-oslogin
|
google/cloud/oslogin_v1/types/__init__.py
|
Python
|
apache-2.0
| 1,116
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
|
http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .oslogin import (
DeletePosixAccountRequest,
DeleteSshPublicKeyRequest,
GetLoginProfi
|
leRequest,
GetSshPublicKeyRequest,
ImportSshPublicKeyRequest,
ImportSshPublicKeyResponse,
LoginProfile,
UpdateSshPublicKeyRequest,
)
__all__ = (
"DeletePosixAccountRequest",
"DeleteSshPublicKeyRequest",
"GetLoginProfileRequest",
"GetSshPublicKeyRequest",
"ImportSshPublicKeyRequest",
"ImportSshPublicKeyResponse",
"LoginProfile",
"UpdateSshPublicKeyRequest",
)
|
chaos-soft/chocola
|
files/apps.py
|
Python
|
mit
| 164
| 0
|
from django.apps import AppConfig
class File
|
sConfig(AppConfig):
name = 'files'
verbose_name = 'Files'
def
|
ready(self):
from . import signals
|
trehn/django-installer
|
django_installer/installer/forms.py
|
Python
|
isc
| 2,440
| 0.00041
|
try:
from configparser import NoSectionError, NoOptionError
except ImportError:
from ConfigParser import NoSectionError, NoOptionError
from django import forms
from django.utils.translation import ugettext as _
def get_opti
|
on(settings, section, option):
try:
return settings.get(section, option)
except NoSectionError:
return ""
except NoOptionError:
return ""
class BaseURLForm(forms.Form):
title = _("Base URL")
url = forms.URLField(
help_text=_("The absolute URL this application will be served at."),
initial="https://example.com",
label=_("URL"),
)
|
def populate_from_settings(self, settings):
self.data['url'] = get_option(settings, "base_url", "url")
def populate_settings(self, settings):
settings.add_section("base_url")
settings.set("base_url", "url", self.cleaned_data['url'])
class DatabaseForm(forms.Form):
title = _("Database")
engine = forms.ChoiceField(
choices=(
('django.db.backends.mysql', _("MySQL")),
('django.db.backends.oracle', _("Oracle")),
('django.db.backends.postgresql_psycopg2', _("Postgres")),
),
initial='django.db.backends.postgresql_psycopg2',
label=_("Engine"),
)
host = forms.CharField(
initial="localhost",
label=_("Hostname"),
max_length=128,
)
name = forms.CharField(
label=_("Database name"),
max_length=128,
)
password = forms.CharField(
label=_("Password"),
max_length=128,
required=False,
widget=forms.PasswordInput,
)
port = forms.IntegerField(
label=_("Port"),
min_value=1,
max_value=65535,
)
user = forms.CharField(
label=_("Username"),
min_length=1,
max_length=128,
)
def populate_from_settings(self, settings):
try:
for field in ('engine', 'host', 'name', 'password', 'port', 'user'):
self.data[field] = get_option(settings, "database", field)
except NoSectionError:
pass
def populate_settings(self, settings):
settings.add_section("database")
for field in ('engine', 'host', 'name', 'password', 'user'):
settings.set("database", field, self.cleaned_data[field])
settings.set("database", "port", str(self.cleaned_data['port']))
|
LunacyZeus/Tao-ni
|
web/Taoni/manage.py
|
Python
|
gpl-3.0
| 803
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Taoni.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
|
"Couldn't import Django. Are you sure i
|
t's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
kaedroho/wagtail
|
wagtail/contrib/postgres_search/tests/test_backend.py
|
Python
|
bsd-3-clause
| 7,353
| 0.000272
|
from django.test import TestCase
from wagtail.search.tests.test_backends import BackendTests
from wagtail.tests.search import models
from ..utils import BOOSTS_WEIGHTS, WEIGHTS_VALUES, determine_boosts_weights, get_weight
class TestPostgresSearchBackend(BackendTests, TestCase):
backend_path = 'wagtail.contrib.postgres_search.backend'
def test_weights(self):
self.assertListEqual(BOOSTS_WEIGHTS,
[(10, 'A'), (2, 'B'), (0.5, 'C'), (0.25, 'D')])
self.assertListEqual(WEIGHTS_VALUES, [0.025, 0.05, 0.2, 1.0])
self.assertEqual(get_weight(15), 'A')
self.assertEqual(get_weight(10), 'A')
self.assertEqual(get_weight(9.9), 'B')
self.assertEqual(get_weight(2), 'B')
self.assertEqual(get_weight(1.9), 'C')
self.assertEqual(get_weight(0), 'D')
self.assertEqual(get_weight(-1), 'D')
self.assertListEqual(determine_boosts_weights([1]),
[(1, 'A'), (0, 'B'), (0, 'C'), (0, 'D')])
self.assertListEqual(determine_boosts_weights([-1]),
[(-1, 'A'), (-1
|
, 'B'), (-1, 'C'), (-1, 'D')])
self.assertListEqual(determine_boosts_weights([-1, 1, 2]),
[(2, 'A'), (1, 'B'), (-1, 'C'), (-1, 'D')])
self.assertListEqual(determine_boosts_weights([0, 1, 2, 3]),
[(3, 'A'), (2, 'B'), (1, 'C'), (0, 'D')])
self.assertListEqual(determine_boosts_weights([0, 0.25, 0.75, 1, 1.5]),
|
[(1.5, 'A'), (1, 'B'), (0.5, 'C'), (0, 'D')])
self.assertListEqual(determine_boosts_weights([0, 1, 2, 3, 4, 5, 6]),
[(6, 'A'), (4, 'B'), (2, 'C'), (0, 'D')])
self.assertListEqual(determine_boosts_weights([-2, -1, 0, 1, 2, 3, 4]),
[(4, 'A'), (2, 'B'), (0, 'C'), (-2, 'D')])
def test_search_tsquery_chars(self):
"""
Checks that tsquery characters are correctly escaped
and do not generate a PostgreSQL syntax error.
"""
# Simple quote should be escaped inside each tsquery term.
results = self.backend.search("L'amour piqué par une abeille",
models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
results = self.backend.search("'starting quote",
models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
results = self.backend.search("ending quote'",
models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
results = self.backend.search("double quo''te",
models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
results = self.backend.search("triple quo'''te",
models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
# Now suffixes.
results = self.backend.search("Something:B", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
results = self.backend.search("Something:*", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
results = self.backend.search("Something:A*BCD", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
# Now the AND operator.
results = self.backend.search("first & second", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
# Now the OR operator.
results = self.backend.search("first | second", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
# Now the NOT operator.
results = self.backend.search("first & !second", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
# Now the phrase operator.
results = self.backend.search("first <-> second", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
def test_autocomplete_tsquery_chars(self):
"""
Checks that tsquery characters are correctly escaped
and do not generate a PostgreSQL syntax error.
"""
# Simple quote should be escaped inside each tsquery term.
results = self.backend.autocomplete("L'amour piqué par une abeille",
models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
results = self.backend.autocomplete("'starting quote",
models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
results = self.backend.autocomplete("ending quote'",
models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
results = self.backend.autocomplete("double quo''te",
models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
results = self.backend.autocomplete("triple quo'''te",
models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
# Backslashes should be escaped inside each tsquery term.
results = self.backend.autocomplete("backslash\\",
models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
# Now suffixes.
results = self.backend.autocomplete("Something:B", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
results = self.backend.autocomplete("Something:*", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
results = self.backend.autocomplete("Something:A*BCD", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
# Now the AND operator.
results = self.backend.autocomplete("first & second", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
# Now the OR operator.
results = self.backend.autocomplete("first | second", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
# Now the NOT operator.
results = self.backend.autocomplete("first & !second", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
# Now the phrase operator.
results = self.backend.autocomplete("first <-> second", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [])
def test_index_without_upsert(self):
# Test the add_items code path for Postgres 9.4, where upsert is not available
self.backend.reset_index()
index = self.backend.get_index_for_model(models.Book)
index._enable_upsert = False
index.add_items(models.Book, models.Book.objects.all())
results = self.backend.search("JavaScript", models.Book)
self.assertUnsortedListEqual([r.title for r in results], [
"JavaScript: The good parts",
"JavaScript: The Definitive Guide"
])
|
rafaeljusto/shelter
|
testing/scan_querier/scan_querier.input.py
|
Python
|
gpl-2.0
| 3,451
| 0.019994
|
#!/usr/bin/env python
# Copyright 2014 Rafael Dantas Justo. All rights reserved.
# Use of this source code is governed by a GPL
# license that can be found in the LICENSE file.
import getopt
import sys
import subprocess
import urllib.request
class NS:
def __init__(self):
self.name = ""
self.type = "NS"
self.namserver = ""
def __str__(self):
return "{} {} {}".format(self.name, self.type, self.namserver)
class DS:
def __init__(self):
self.name = ""
self.type = "DS"
self.keytag = 0
self.algorithm = 0
self.digestType = 0
self.digest = ""
def __str__(self):
return "{} {} {} {} {} {}".format(self.name, self.type, self.keytag,
self.algorithm, self.digestType, self.digest)
class A:
def __init__(self):
self.name = ""
self.type = "A"
self.address = ""
def __str__(self):
return "{} {} {}".format(self.name, self.type, self.address)
class AAAA:
def __init__(self):
self.name = ""
self.type = "AAAA"
self.address = ""
def __str__(self):
return "{} {} {}".format(self.name, self.type, self.address)
def retrieveData(url):
response = urllib.request.urlopen(url)
data = response.read()
response.close()
return data.decode()
def buildZone(data):
zone = []
for line in data.split("\n"):
lineParts = line.split()
if len(lineParts) < 4:
|
print(line)
continue
if lineParts[3] == "NS" and len(lineParts) == 5:
ns = NS()
ns.name = lineParts[0]
ns.namserver = lineParts[4]
zone.append(ns)
|
elif lineParts[3] == "A" and len(lineParts) == 5:
a = A()
a.name = lineParts[0]
a.address = lineParts[4]
zone.append(a)
elif lineParts[3] == "AAAA" and len(lineParts) == 5:
aaaa = AAAA()
aaaa.name = lineParts[0]
aaaa.address = lineParts[4]
zone.append(aaaa)
elif lineParts[3] == "DS" and len(lineParts) == 8:
ds = DS()
ds.name = lineParts[0]
ds.keytag = int(lineParts[4])
ds.algorithm = int(lineParts[5])
ds.digestType = int(lineParts[6])
ds.digest = lineParts[7]
zone.append(ds)
return zone
def writeZone(zone, outputPath):
output = open(outputPath, "w")
for rr in zone:
print(str(rr), file=output)
output.close()
###################################################################
defaultURL = "http://www.internic.net/domain/root.zone"
defaultOutput = "scan_querier.input"
def usage():
print("")
print("Usage: " + sys.argv[0] + " [-h|--help] [-u|--url] [-o|--output]")
print(" Where -h or --help is for showing this usage")
print(" -u or --url is the URL of the source file")
print(" -o or --output is the path where the Go code will written")
def main(argv):
try:
opts, args = getopt.getopt(argv, "u:o:", ["url", "output"])
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(1)
url = ""
outputPath = ""
for key, value in opts:
if key in ("-u", "--url"):
url = value
elif key in ("-o", "--output"):
outputPath = value
elif key in ("-h", "--help"):
usage()
sys.exit(0)
if len(url) == 0:
url = defaultURL
if len(outputPath) == 0:
outputPath = defaultOutput
try:
data = retrieveData(url)
rootZone = buildZone(data)
writeZone(rootZone, outputPath)
except KeyboardInterrupt:
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
cpieloth/CppMath
|
tools/PackBacker/packbacker/job.py
|
Python
|
apache-2.0
| 2,389
| 0.000837
|
__author__ = 'Christof Pieloth'
import logging
from packbacker.errors import ParameterError
from packbacker.installers import installer_prototypes
from packbacker.utils import UtilsUI
class Job(object):
log = logging.getLogger(__name__)
def __init__(self):
self._installers = []
def add_installer(self, installer):
self._installers.
|
append(installer)
def execute(self):
errors = 0
for i in self._installers:
if not UtilsUI.ask_for_execute('Install ' + i.label):
continue
|
try:
if i.install():
Job.log.info(i.name + ' executed.')
else:
errors += 1
Job.log.error('Error on executing ' + i.name + '!')
except Exception as ex:
errors += 1
Job.log.error('Unknown error:\n' + str(ex))
return errors
@staticmethod
def read_job(fname):
prototypes = []
prototypes.extend(installer_prototypes())
job = None
try:
job_file = open(fname, 'r')
except IOError as err:
Job.log.critical('Error on reading job file:\n' + str(err))
else:
with job_file:
job = Job()
for line in job_file:
if line[0] == '#':
continue
for p in prototypes:
if p.matches(line):
try:
params = Job.read_parameter(line)
cmd = p.instance(params)
job.add_installer(cmd)
except ParameterError as err:
Job.log.error("Installer '" + p.name + "' is skipped: " + str(err))
except Exception as ex:
Job.log.critical('Unknown error: \n' + str(ex))
continue
return job
@staticmethod
def read_parameter(line):
params = {}
i = line.find(': ') + 2
line = line[i:]
pairs = line.split(';')
for pair in pairs:
pair = pair.strip()
par = pair.split('=')
if len(par) == 2:
params[par[0]] = par[1]
return params
|
anjsimmo/simple-ml-pipeline
|
learners/traveltime_baserate.py
|
Python
|
mit
| 1,292
| 0.00387
|
import json
import pickle
import numpy as np
import pandas as pd
import numpy as np
import datatables.traveltime
def write_model(baserate, model_file):
"""
Write model to file
baserate -- average travel time
output_file -- file
"""
model_params = {
'baserate': baserate
}
mo
|
del_str = json.dumps(model_params)
with open(model_file, 'w') as out_f:
out_f.write(model_str)
def load_model(mo
|
del_file):
"""
Load linear model from file
model_file -- file
returns -- baserate
"""
with open(model_file, 'r') as model_f:
model_str = model_f.read()
model_params = json.loads(model_str)
return model_params['baserate']
def train(train_data_file, model_file):
data = datatables.traveltime.read_xs(train_data_file)
y = data['y'].values # travel times
# use mean value as baserate prediction
baserate = np.mean(y)
write_model(baserate, model_file)
def predict(model_file, test_xs_file, output_file):
baserate = load_model(model_file)
data = datatables.traveltime.read_xs(test_xs_file)
num_rows = data.shape[0]
# predict constant baserate for every row
y_pred = np.full(num_rows, baserate)
data['pred'] = y_pred
datatables.traveltime.write_pred(data, output_file)
|
TissueMAPS/TmLibrary
|
tmlib/submission.py
|
Python
|
agpl-3.0
| 1,761
| 0.000568
|
import logging
import tmlib.models as tm
class SubmissionManager(object):
'''Mixin class for submission and monitoring of computational tasks.'''
def __init__(self, experiment_id, program_name):
'''
Parameters
----------
experiment_id: int
ID of the processed experiment
program_name: str
name of the submitting program
'''
self.experiment_id = experiment_id
self.program_name = program_name
def register_submission(self, user_id=None):
'''Creates a database entry in the "submissions" table.
|
Parameters
----------
user_id: int, optional
ID of submitting user (if not the user who owns the experiment)
Returns
-------
Tuple[int, str]
ID of the submission and the name of the submitting user
Warning
-------
Ensure that the "submissions" table get updated once the jobs
were su
|
bmitted, i.e. added to a running `GC3Pie` engine.
To this end, use the ::meth:`tmlib.workflow.api.update_submission`
method.
See also
--------
:class:`tmlib.models.submission.Submission`
'''
with tm.utils.MainSession() as session:
if user_id is None:
experiment = session.query(tm.ExperimentReference).\
get(self.experiment_id)
user_id = experiment.user_id
submission = tm.Submission(
experiment_id=self.experiment_id, program=self.program_name,
user_id=user_id
)
session.add(submission)
session.commit()
return (submission.id, submission.user.name)
|
openweave/happy
|
happy/ReturnMsg.py
|
Python
|
apache-2.0
| 1,604
| 0
|
#!/usr/bin/env python3
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the
|
"License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific lan
|
guage governing permissions and
# limitations under the License.
#
##
# @file
# Implements ReturnMsg class.
#
# ReturnMsg is used to return not only numerical status of
# success or fail, but allows to return any data structure as well.
#
class ReturnMsg:
def __init__(self, value=None, data=None):
self.value = value
if data is None:
self.data = None
elif isinstance(data, dict):
self.data = data.copy()
elif isinstance(data, list):
self.data = data[:]
else:
self.data = data
def Value(self, value=None):
if value is None:
return self.value
else:
self.data = value
def Data(self, data=None):
if data is None:
return self.data
elif isinstance(data, dict):
self.data = data.copy()
elif isinstance(data, list):
self.data = data[:]
else:
self.data = data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.