repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
cga-harvard/cga-worldmap
|
geonode/contrib/dataverse_styles/style_layer_maker.py
|
Python
|
gpl-3.0
| 12,317
| 0.004871
|
"""
Given Style Rules, create an SLD in XML format add it to a layer
"""
if __name__=='__main__':
import os, sys
DJANGO_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(DJANGO_ROOT)
os.environ['DJANGO_SETTINGS_MODULE'] = 'geonode.settings'
import logging
import os
from random import choice
import re
from xml.etree.ElementTree import XML, ParseError
try:
from urlparse import urljoin
except:
from urllib.parse import urljoin # python 3.x
from django.utils.translation import ugettext as _
from django.conf import settings
from geonode.contrib.dataverse_connect.layer_metadata import LayerMetadata
from geonode.maps.models import Layer
from geonode.contrib.dataverse_styles.geoserver_rest_util import make_geoserver_json_put_request, make_geoserver_put_sld_request
from geonode.contrib.dataverse_styles.geonode_get_services import get_style_name_for_layer
LOGGER = logging.getLogger(__name__)
class StyleLayerMaker:
"""
Given Style Rules, create SLD XML and add it to a layer
Basic usage:
# Init object with an existing layer name
style_layer_maker = StyleLayerMaker('income_2so')
# Use some SLD info in XML format
sld_xml_content = open('test_rules.xml', 'r').read() # 'test_rules.xml' contains a SLD info in XML format
# Add sld_xml_content to the layer as the default style
success = style_layer_maker.add_sld_xml_to_layer(sld_xml_content)
# If operation failed, check error messages
if not success:
if style_layer_maker.err_found:
print ('\n'.join(err_msgs))
"""
def __init__(self, layer_name):
self.gs_catalog_obj = Layer.objects.gs_catalog
self.layer_name = layer_name
self.err_found = False
self.err_msgs = []
self.layer_metadata = None # LayerMetadata object
def add_err_msg(self, err_msg):
self.err_found = True
self.err_msgs.append(err_msg)
LOGGER.warn(err_msg)
def create_layer_metadata(self, layer_name):
if layer_name is None:
self.layer_metadata = None
return
#self.layer_metadata = LayerMetadata(**dict(geonode_layer_name=layer_name))
self.layer_metadata = LayerMetadata.create_metadata_using_layer_name(layer_name)
def get_layer_metadata(self):
"""Return a LayerMetadata object, if it exists"""
if self.layer_metadata:
return None
return self.layer_metadata
def add_sld_to_layer(self, formatted_sld_object):
# update layer via 2 PUT calls to the geoserver
return self.add_sld_xml_to_layer_via_puts(formatted_sld_object,\
self.layer_name)
# use direct python, but doesn't properly clear tile cache
#return self.add_sld_xml_to_layer(formatted_sld_object)
def get_url_to_set_sld_rules(self, style_name):
"""
Create url to set the new SLD to the layer via a put
#http://localhost:8000/gs/rest/styles/social_disorder_nydj_k_i_v.xml
This will be sent with a XML content containing the SLD rules
"""
if not style_name:
return None
# (1) Given the layer, retrieve the SLD containing the style name
#
# (to do)
# (2) Format the url for adding/retrieving styles
#
url_fragment = 'rest/styles/%s.xml' % (style_name)
full_url = urljoin(settings.GEOSERVER_BASE_URL, url_fragment)
return full_url
def get_set_default_style_url(self, layer_name):
"""
Given a layer name, return the REST url to set a default style
"""
if not layer_name:
return None
url_fragment = 'rest/layers/%s:%s' % (settings.DEFAULT_WORKSPACE, layer_name)
full_url = urljoin(settings.GEOSERVER_BASE_URL, url_fragment)
return full_url
def add_sld_xml_to_layer_via_puts(self, formatted_sld_object, layer_name):
if not formatted_sld_object or not layer_name:
return False
print '-' * 40
print 'formatted_sld_object.formatted_sld_xml'
print formatted_sld_object.formatted_sld_xml
print '-' * 40
# (1) Verify the XML
if not self.is_xml_verified(formatted_sld_object.formatted_sld_xml):
self.add_err_msg('The style information contains invalid XML')
return False
# (2) Set the new SLD to the layer via a put
#http://localhost:8000/gs/rest/styles/social_disorder_nydj_k_i_v.xml
# --------------------------------------
# Retrieve the style name for this layer
# --------------------------------------
(success, style_name_or_err_msg) = get_style_name_for_layer(layer_name)
if not success:
self.add_err_msg(style_name_or_err_msg)
return False
geoserver_sld_url = self.get_url_to_set_sld_rules(style_name_or_err_msg)
print 'geoserver_sld_url', geoserver_sld_url
print '-' * 40
print 'formatted_sld_object.formatted_sld_xml', formatted_sld_object.formatted_sld_xml
print '-' * 40
(response, content) = make_geoserver_put_sld_request(geoserver_sld_url, formatted_sld_object.formatted_sld_xml)
print 'response', response
print '-' * 40
print 'content', content
print '-' * 40
|
if response is None or not response.status == 200:
self.add_err_msg('Failed to set new style as the default')
return False
# (3) Set the new style as the default for the layer
# Send a PUT to the catalog to set the default sty
|
le
json_str = """{"layer":{"defaultStyle":{"name":"%s"},"styles":{},"enabled":true}}""" % formatted_sld_object.sld_name
geoserver_json_url = self.get_set_default_style_url(self.layer_name)
if geoserver_json_url is None:
self.add_err_msg('Failed to format the url to set new style for layer: %s' % self.layer_name)
return False
(response, content) = make_geoserver_json_put_request(geoserver_json_url, json_str)
if response is None or not response.status in (200, 201):
self.add_err_msg('Failed to set new style as the default')
return False
self.create_layer_metadata(self.layer_name)
print '-' * 40
print ('layer %s saved with style %s' % (self.layer_name, formatted_sld_object.sld_name))
return True
def add_sld_xml_to_layer(self, formatted_sld_object):
"""
NOT USING, tiles were not getting refreshed properly
Keeping code around in case needed in the future
"""
if not formatted_sld_object:
return False
print 'type(formatted_sld_object)', type(formatted_sld_object)
# (1) Verify the XML
if not self.is_xml_verified(formatted_sld_object.formatted_sld_xml):
self.add_err_msg('The style information contains invalid XML')
return False
# (2) Retrieve the layer
layer_obj = self.gs_catalog_obj.get_layer(self.layer_name)
if layer_obj is None:
self.add_err_msg('The layer "%s" does not exist' % self.layer_name)
return False
self.show_layer_style_list(layer_obj)
#self.clear_alternate_style_list(layer_obj)
# (3) Create a style name
#stylename = self.layer_name + self.get_random_suffix()
#while self.is_style_name_in_catalog(stylename):
# stylename = self.layer_name + self.get_random_suffix()
style_name = formatted_sld_object.sld_name
# (4) Add the xml style to the catalog, with the new name
try:
# sync names
self.gs_catalog_obj.create_style(style_name, formatted_sld_object.formatted_sld_xml)
except:
self.add_err_msg('Failed to add style to the catalog: %s' % style_name)
return False
# (5) Pull the style object back from the catalog
new_style_obj = self.gs_catalog_obj.get_style(style_name)
if new_style_obj is None:
self.add_err_msg('Failed t
|
mattvonrocketstein/smash
|
smashlib/ipy3x/core/page.py
|
Python
|
mit
| 12,350
| 0.001053
|
# encoding: utf-8
"""
Paging capabilities for IPython.core
Authors:
* Brian Granger
* Fernando Perez
Notes
-----
For now this uses ipapi, so it can't be in IPython.utils. If we can get
rid of that dependency, we could move it there.
-----
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import os
import re
import sys
import tempfile
from io import UnsupportedOperation
from IPython import get_ipython
from IPython.core.error import TryNext
from IPython.utils.data import chop
from IPython.utils import io
from IPython.utils.process import system
from IPython.utils.terminal import get_terminal_size
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
esc_re = re.compile(r"(\x1b[^m]+m)")
def page_dumb(strng, start=0, screen_lines=25):
"""Very dumb 'pager' in Python, for when nothing else works.
Only moves forward, same interface as page(), except for pager_cmd and
mode."""
out_ln = strng.splitlines()[start:]
screens = chop(out_ln, screen_lines - 1)
if len(screens) == 1:
print(os.linesep.join(screens[0]), file=io.stdout)
else:
last_escape = ""
for scr in screens[0:-1]:
hunk = os.linesep.join(scr)
print(last_escape + hunk, file=io.stdout)
if not page_more():
return
esc_list = esc_re.findall(hunk)
if len(esc_list) > 0:
last_escape = esc_list[-1]
print(last_escape + os.linesep.join(screens[-1]), file=io.stdout)
def _detect_screen_size(screen_lines_def):
"""Attempt to work out the number of lines on the screen.
This is called by page(). It can raise an error (e.g. when run in the
test suite), so it's separated out so it can easily be called in a try block.
"""
TERM = os.environ.get('TERM', None)
if not((TERM == 'xterm' or TERM == 'xterm-color') and sys.platform != 'sunos5'):
# curses causes problems on many terminals other than xterm, and
# some termios calls lock up on Sun OS5.
return screen_lines_def
try:
import termios
import curses
except ImportError:
return screen_lines_def
# There is a bug in curses, where *sometimes* it fails to properly
# initialize, and then after the endwin() call is made, the
# terminal is left in an unusable state. Rather than trying to
# check everytime for this (by requesting and comparing termios
# flags each time), we just save the initial terminal state and
# unconditionally reset it every time. It's cheaper than making
# the checks.
term_flags = termios.tcgetattr(sys.stdout)
# Curses modifies the stdout buffer size by default, which messes
# up Python's normal stdout buffering. This would manifest itself
# to IPython users as delayed printing on stdout after having used
# the pager.
#
# We can prevent this by manually setting the NCURSES_NO_SETBUF
# environment variable. For more details, see:
# http://bugs.python.org/issue10144
NCURSES_NO_SETBUF = os.environ.get('NCURSES_NO_SETBUF', None)
os.environ['NCURSES_NO_SETBUF'] = ''
# Proceed with curses initialization
try:
scr = curses.initscr()
except AttributeError:
# Curses on Solaris may not be complete, so we can't use it there
return screen_lines_def
screen_lines_real, screen_cols = scr.getmaxyx()
curses.endwin()
# Restore environment
if NCURSES_NO_SETBUF is None:
del os.environ['NCURSES_NO_SETBUF']
else:
os.environ['NCURSES_NO_SETBUF'] = NCURSES_NO_SETBUF
# Restore terminal state in case endwin() didn't.
termios.tcsetattr(sys.stdout, termios.TCSANOW, term_flags)
# Now we have what we
|
needed: the screen size in rows/columns
return screen_lines_real
# print '***Screen size:',screen_lines_real,'lines x',\
# screen_cols,'col
|
umns.' # dbg
def page(strng, start=0, screen_lines=0, pager_cmd=None):
"""Display a string, piping through a pager after a certain length.
strng can be a mime-bundle dict, supplying multiple representations,
keyed by mime-type.
The screen_lines parameter specifies the number of *usable* lines of your
terminal screen (total lines minus lines you need to reserve to show other
information).
If you set screen_lines to a number <=0, page() will try to auto-determine
your screen size and will only use up to (screen_size+screen_lines) for
printing, paging after that. That is, if you want auto-detection but need
to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for
auto-detection without any lines reserved simply use screen_lines = 0.
If a string won't fit in the allowed lines, it is sent through the
specified pager command. If none given, look for PAGER in the environment,
and ultimately default to less.
If no system pager works, the string is sent through a 'dumb pager'
written in python, very simplistic.
"""
# for compatibility with mime-bundle form:
if isinstance(strng, dict):
strng = strng['text/plain']
# Some routines may auto-compute start offsets incorrectly and pass a
# negative value. Offset to 0 for robustness.
start = max(0, start)
# first, try the hook
ip = get_ipython()
if ip:
try:
ip.hooks.show_in_pager(strng)
return
except TryNext:
pass
# Ugly kludge, but calling curses.initscr() flat out crashes in emacs
TERM = os.environ.get('TERM', 'dumb')
if TERM in ['dumb', 'emacs'] and os.name != 'nt':
print(strng)
return
# chop off the topmost part of the string we don't want to see
str_lines = strng.splitlines()[start:]
str_toprint = os.linesep.join(str_lines)
num_newlines = len(str_lines)
len_str = len(str_toprint)
# Dumb heuristics to guesstimate number of on-screen lines the string
# takes. Very basic, but good enough for docstrings in reasonable
# terminals. If someone later feels like refining it, it's not hard.
numlines = max(num_newlines, int(len_str / 80) + 1)
screen_lines_def = get_terminal_size()[1]
# auto-determine screen size
if screen_lines <= 0:
try:
screen_lines += _detect_screen_size(screen_lines_def)
except (TypeError, UnsupportedOperation):
print(str_toprint, file=io.stdout)
return
# print 'numlines',numlines,'screenlines',screen_lines # dbg
if numlines <= screen_lines:
# print '*** normal print' # dbg
print(str_toprint, file=io.stdout)
else:
# Try to open pager and default to internal one if that fails.
# All failure modes are tagged as 'retval=1', to match the return
# value of a failed system command. If any intermediate attempt
# sets retval to 1, at the end we resort to our own page_dumb() pager.
pager_cmd = get_pager_cmd(pager_cmd)
pager_cmd += ' ' + get_pager_start(pager_cmd, start)
if os.name == 'nt':
if pager_cmd.startswith('type'):
# The default WinXP 'type' command is failing on complex
# strings.
retval = 1
else:
fd, tmpname = tempfile.mkstemp('.txt')
try:
os.close(fd)
with open(tmpname, 'wt') as tmpfile:
tmpfile.write(strng)
|
fizyk/pyramid_decoy
|
src/pyramid_decoy/__init__.py
|
Python
|
mit
| 1,031
| 0
|
# Copyright (c) 2014 by pyramid_decoy authors and contributors
# <see AUTHORS file>
#
# This module is part of pyramid_decoy and is released under
# the MIT License (MIT): http://opensource.org/licenses/MIT
"""Main decoy module."""
__version__ = "0.2.0"
SETTINGS_PREFIX = "decoy"
def includeme(configurator):
"""
Configure decoy plugin on pyramid application.
:param pyramid.configurator.Configurator configurator: pyramid's
confi
|
gurator object
"""
configurator.registry["decoy"] = get_decoy_settings(
configurator.get_settings()
)
configurator.add_route("decoy", pattern="/*p")
configurator.add_view("pyramid_decoy.views.decoy", route_name="decoy
|
")
def get_decoy_settings(settings):
"""
Extract decoy settings out of all.
:param dict settings: pyramid app settings
:returns: decoy settings
:rtype: dict
"""
return {
k.split(".", 1)[-1]: v
for k, v in settings.items()
if k[: len(SETTINGS_PREFIX)] == SETTINGS_PREFIX
}
|
googlefonts/gfregression
|
Lib/gfregression/gf_families_ignore_camelcase.py
|
Python
|
apache-2.0
| 936
| 0.002137
|
from utils import secret
import requests
import re
|
import json
import os
def gf_families_ignore_camelcase():
"""Find family names in the GF collection which cannot be derived by
splitting the filename using a camelcase function e.g
VT323, PTSans.
If these filenames are split, they will be V T 323 and P T Sans."""
families = {}
api_u
|
rl = 'https://www.googleapis.com/webfonts/v1/webfonts?key={}'.format(
secret('GF_API_KEY')
)
r = requests.get(api_url)
for item in r.json()["items"]:
if re.search(r"[A-Z]{2}", item['family']):
families[item["family"].replace(" ", "")] = item["family"]
return families
def main():
current_dir = os.path.dirname(__file__)
families = gf_families_ignore_camelcase()
out = os.path.join(current_dir, "gf_families_ignore_camelcase.json")
json.dump(families , open(out, 'w'))
if __name__ == "__main__":
main()
|
hipnusleo/laserjet
|
resource/pypi/cffi-1.9.1/demo/gmp_build.py
|
Python
|
apache-2.0
| 733
| 0.001364
|
import cffi
#
# This is only a
|
demo based on the GMP library.
# There is a rather more complete (but perhaps outdated) version available at:
# http://bazaar.launchpad.net/~tolot-solar-empire/+junk/gmpy_cffi/files
#
ffibuilde
|
r = cffi.FFI()
ffibuilder.cdef("""
typedef struct { ...; } MP_INT;
typedef MP_INT mpz_t[1];
int mpz_init_set_str (MP_INT *dest_integer, char *src_cstring, int base);
void mpz_add (MP_INT *sum, MP_INT *addend1, MP_INT *addend2);
char * mpz_get_str (char *string, int base, MP_INT *integer);
""")
ffibuilder.set_source('_gmp_cffi', "#include <gmp.h>",
libraries=['gmp', 'm'])
if __name__ == '__main__':
ffibuilder.compile(verbose=True)
|
pandeyravi15/SGMBL
|
script/revcomp.py
|
Python
|
gpl-3.0
| 576
| 0.064236
|
import sys
inFile = open(sys.argv[1],'r')
nuc = {'A':'T','T':'A','G':'C','C':'G','K':'M','M':'K','R':'Y'
|
,'Y':'R','S':'W','W':'W','B':'V','V':'B','H':'G','D':'C','X':'N','N':'N'}
def revComp(seq):
rev = ''
for i in range(len(seq) - 1,-1,-1):
rev += nuc[seq[i]]
return rev
header = ''
seq = ''
for line in inFile:
if line[0] == ">":
if header != '':
print header
print revComp(seq.upper())
header = line.strip()
seq = ''
else:
seq += line.strip()
print header
print revComp(seq.u
|
pper())
|
vmassuchetto/dnstorm
|
dnstorm/app/context_processors.py
|
Python
|
gpl-2.0
| 1,367
| 0.003658
|
from django.contrib.auth.forms import AuthenticationForm
from django.core.urlresolvers import reverse
from actstream.models import user_stream
from dnstorm.app import DNSTORM_URL
from dnstorm.app.utils import get_option
from dnstorm.app.models import Problem, Idea
from dnstorm.app.utils import get_option
def base(request):
"""
Provides basic variables used for all templates.
"""
context = dict()
context['dnstorm_url'] = DNSTORM_URL
# Links
if
|
not context.get('site_title', None):
context['site_title'] = '%s | %s' % (
get_option('site_title'), get_option('site_description'))
context['site_url'] = get_option('site_url')
context['login_form'] = AuthenticationForm()
context['login_url'] = reverse('login') + '?next
|
=' + request.build_absolute_uri() if 'next' not in request.GET else ''
context['logout_url'] = reverse('logout') + '?next=' + request.build_absolute_uri() if 'next' not in request.GET else ''
# Checks
context['is_update'] = 'update' in request.resolver_match.url_name
# Activity
context['user_activity'] = user_stream(request.user, with_user_activity=True) if request.user.is_authenticated() else None
context['user_activity_counter'] = get_option('user_%d_activity_counter' % request.user.id) if request.user.is_authenticated() else None
return context
|
theoriginalgri/django-mssql
|
sqlserver_ado/compiler.py
|
Python
|
mit
| 10,087
| 0.002776
|
from __future__ import absolute_import, unicode_literals
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import django
from django.db.models.sql import compiler
import re
NEEDS_AGGREGATES_FIX = django.VERSION[:2] < (1, 7)
# query_class returns the base class to use for Django queries.
# The custom 'SqlServerQuery' class derives from django.db.models.sql.query.Query
# which is passed in as "QueryClass" by Django itself.
#
# SqlServerQuery overrides:
# ...insert queries to add "SET IDENTITY_INSERT" if needed.
# ...select queries to emulate LIMIT/OFFSET for sliced queries.
# Pattern to scan a column data type string and split the data type from any
# constraints or other included parts of a column definition. Based upon
# <column_definition> from http://msdn.microsoft.com/en-us/library/ms174979.aspx
_re_data_type_terminator = re.compile(
r'\s*\b(?:' +
r'filestream|collate|sparse|not|null|constraint|default|identity|rowguidcol' +
r'|primary|unique|clustered|nonclustered|with|on|foreign|references|check' +
')',
re.IGNORECASE,
)
class SQLCompiler(compiler.SQLCompiler):
def resolve_columns(self, row, fields=()):
values = []
index_extra_select = len(self.query.extra_select)
for value, field in zip_longest(row[index_extra_select:], fields):
# print '\tfield=%s\tvalue=%s' % (repr(field), repr(value))
if field:
try:
value = self.connection.ops.convert_values(value, field)
except ValueError:
pass
values.append(value)
return row[:index_extra_select] + tuple(values)
def compile(self, node):
"""
Added with Django 1.7 as a mechanism to evalute expressions
"""
sql_function = getattr(node, 'sql_function', None)
if sql_function and sql_function in self.connection.ops._sql_function_overrides:
sql_function, sql_template = self.connection.ops._sql_function_overrides[sql_function]
if sql_function:
node.sql_function = sql_function
if sql_template:
node.sql_template = sql_template
return super(SQLCompiler, self).compile(node)
def _fix_aggregates(self):
"""
MSSQL doesn't match the behavior of the other backends on a few of
the aggregate functions; different return type behavior, different
function names, etc.
MSSQL's implementation of AVG maintains datatype without proding. To
match behavior of other django backends, it needs to not drop remainders.
E.g. AVG([1, 2]) needs to yield 1.5, not 1
"""
for alias, aggregate in self.query.aggregate_select.items():
sql_function = getattr(aggregate, 'sql_function', None)
if not sql_function or sql_function not in self.connection.ops._sql_function_overrides:
continue
sql_function, sql_template = self.connection.ops._sql_function_overrides[sql_function]
if sql_function:
self.query.aggregate_select[alias].sql_function = sql_function
if sql_template:
self.query.aggregate_select[alias].sql_template = sql_template
def as_sql(self, with_limits=True, with_col_aliases=False):
# Django #12192 - Don't execute any DB query when QS slicing results in limit 0
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
if NEEDS_AGGREGATES_FIX:
# Django 1.7+ provides SQLCompiler.compile as a hook
self._fix_aggregates()
# Get out of the way if we're not a select query or there's no limiting involved.
has_limit_offset = with_limits and (self.query.low_mark or self.query.high_mark is not None)
try:
if not has_limit_offset:
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
setattr(self.query, '_mssql_ordering_not_allowed', with_col_aliases)
# let the base do its thing, but we'll handle limit/offset
sql, fields = super(SQLCompiler, self).as_sql(
with_limits=False,
with_col_aliases=with_col_aliases,
)
if has_limit_offset:
if ' order by ' not in sql.lower():
# Must have an ORDER BY to slice using OFFSET/FETCH. If
# there is none, use the first column, which is typically a
# PK
sql += ' ORDER BY 1'
sql += ' OFFSET %d ROWS' % (self.query.low_mark or 0)
if self.query.high_mark is not None:
sql += ' FETCH NEXT %d ROWS ONLY' % (self.query.high_mark - self.query.low_mark)
finally:
if not has_limit_offset:
# remove in case query is ever reused
delattr(self.query, '_mssql_ordering_not_allowed')
return sql, fields
def get_ordering(self):
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
if getattr(self.query, '_mssql_ordering_not_allowed', False):
if django.VERSION[1] == 1 and django.VERSION[2] < 6:
return (None, [])
return (None, [], [])
return super(SQLCompiler, self).get_ordering()
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
# search for after table/column list
_re_values_sub = re.compile(
r'(?P<prefix>\)|\])(?P<default>\s*|\s*default\s*)values(?P<suffix>\s*|\s+\()?',
re.IGNORECASE
)
# ... and insert the OUTPUT clause between it and the values list (or DEFAULT VALUES).
_values_repl = r'\g<prefix> OUTPUT INSERTED.{col} INTO @sqlserver_ado_return_id\g<default>VALUES\g<suffix>'
def as_sql(self, *args, **kwargs):
# Fix for Django ticket #14019
if not hasattr(self, 'return_id'):
self.return_id = False
result = super(SQLInsertCompiler, self).as_sql(*args, **kwargs)
if isinstance(result, list):
# Django 1.4 wraps return in list
return [self._fix_insert(x[0], x[1]) for x in result]
sql, params = result
return self._fix_insert(sql, params)
def _fix_insert(self, sql, params):
"""
Wrap the passed SQL with IDENTITY_INSERT statements and apply
other necessary fixes.
"""
meta = self.query.get_meta()
|
if meta.has_auto_field:
if hasattr(self.query, 'fields'):
# django 1.4 replaced columns with fields
fields = self.query.fields
auto_field = meta
|
.auto_field
else:
# < django 1.4
fields = self.query.columns
auto_field = meta.auto_field.db_column or meta.auto_field.column
auto_in_fields = auto_field in fields
quoted_table = self.connection.ops.quote_name(meta.db_table)
if not fields or (auto_in_fields and len(fields) == 1 and not params):
# convert format when inserting only the primary key without
# specifying a value
sql = 'INSERT INTO {0} DEFAULT VALUES'.format(
quoted_table
)
params = []
elif auto_in_fields:
# wrap with identity insert
sql = 'SET IDENTITY_INSERT {table} ON;{sql};SET IDENTITY_INSERT {table} OFF'.format(
table=quoted_table,
sql=sql,
)
# mangle SQL to return ID from insert
# http:
|
PetePriority/home-assistant
|
homeassistant/components/isy994/fan.py
|
Python
|
apache-2.0
| 3,213
| 0
|
"""
Support for ISY994 fans.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/fan.isy994/
"""
import logging
from typing import Callable
from homeassistant.components.fan import (FanEntity, DOMAIN, SPEED_OFF,
SPEED_LOW, SPEED_MEDIUM,
SPEED_HIGH, SUPPORT_SET_SPEED)
from homeassistant.components.isy994 import (ISY994_NODES, ISY994_PROGRAMS,
ISYDevice)
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
VALUE_TO_STATE = {
0: SPEED_OFF,
63: SPEED_LOW,
64: SPEED_LOW,
190: SPEED_MEDIUM,
191: SPEED_MEDIUM,
255: SPEED_HIGH,
}
STATE_TO_VALUE = {}
for key in VALUE_TO_STATE:
STATE_TO_VALUE[VALUE_TO_STATE[key]] = key
def setup_platform(hass, config: ConfigType,
add_entities: Callable[[list], None], discovery_info=None):
"""Set up the ISY994 fan platform."""
devices = []
for node in hass.data[ISY994_NODES][DOMAIN]:
devices.append(ISYFanDevice(node))
for name, status, actions in hass.data[ISY994_PROGRAMS][DOMAIN]:
devices.append(ISYFanProgram(name, status, actions))
add_entities(devices)
class ISYFanD
|
evice(ISYDevice, FanEntity):
"""Representation of an ISY994 fan device."""
@property
def speed(self) -> str:
"""Return the current speed."""
return VALUE_TO_STATE.get(self.value)
@property
def is_on(self) -> bool:
"""Get if the fan is on."""
return self.value != 0
def set_speed(self, speed: str) -> None:
|
"""Send the set speed command to the ISY994 fan device."""
self._node.on(val=STATE_TO_VALUE.get(speed, 255))
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Send the turn on command to the ISY994 fan device."""
self.set_speed(speed)
def turn_off(self, **kwargs) -> None:
"""Send the turn off command to the ISY994 fan device."""
self._node.off()
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
class ISYFanProgram(ISYFanDevice):
"""Representation of an ISY994 fan program."""
def __init__(self, name: str, node, actions) -> None:
"""Initialize the ISY994 fan program."""
super().__init__(node)
self._name = name
self._actions = actions
def turn_off(self, **kwargs) -> None:
"""Send the turn on command to ISY994 fan program."""
if not self._actions.runThen():
_LOGGER.error("Unable to turn off the fan")
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Send the turn off command to ISY994 fan program."""
if not self._actions.runElse():
_LOGGER.error("Unable to turn on the fan")
@property
def supported_features(self) -> int:
"""Flag supported features."""
return 0
|
eneldoserrata/marcos_openerp
|
addons/purchase/purchase.py
|
Python
|
agpl-3.0
| 67,788
| 0.007612
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import pytz
from openerp import SUPERUSER_ID
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp import netsvc
from openerp import pooler
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
cr.execute("""update purchase_order_line set
date_planned=%s
where
order_id=%s and
(date_planned=%s or date_planned<%s)""", (value,po.id,po.minimum_planned_date,value))
cr.execute("""update purchase_order set
minimum_planned_date=%s where id=%s""", (value, po.id))
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.purchase_id,sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
WHERE
p.purchase_id IN %s GROUP BY m.state, p.purchase_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line
|
in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=c
|
ontext):
result[line.order_id.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
invoiced = False
if purchase.invoiced_rate == 100.00:
invoiced = True
res[purchase.id] = invoiced
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ Sent'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Order'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'approved',
},
}
_columns = {
'name': fields.char('Order Reference', size=64, required=True, select=True, help="Unique number of the purchase order, computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', size=64,
help="Reference of the document that generated this purchase order request; a sales order or an internal procurement request."
),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, size=64,
help="Reference of the sales order or quotation sent by your supplier. It's mainly used to do the matching when you receive the products as this reference is usually written on the delivery order sent by your supplier."),
'date_order':fields.date('Order Date', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}, select=True, help="Date on which this document has been created."),
'date_approve':fields.date('Date Approved', readonly=1, select=True, help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states={'confirmed'
|
mfem/PyMFEM
|
mfem/_par/sparsemat.py
|
Python
|
bsd-3-clause
| 42,261
| 0.004023
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _sparsemat
else:
import _sparsemat
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _sparsemat.SWIG_PyInstanceMethod_New
_swig_new_static_method = _sparsemat.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._par.array
import mfem._par.mem_manager
import mfem._par.globals
import mfem._par.vector
import mfem._par.operators
import mfem._par.matrix
import mfem._par.densemat
def RAP_P(A, R, ORAP):
r"""RAP_P(SparseMatrix A, SparseMatrix R, SparseMatrix ORAP) -> SparseMatrix"""
return _sparsemat.RAP_P(A, R, ORAP)
RAP_P = _sparsemat.RAP_P
def RAP_R(Rt, A, P):
r"""RAP_R(SparseMatrix Rt, SparseMatrix A, SparseMatrix P) -> SparseMatrix"""
return _sparsemat.RAP_R(Rt, A, P)
RAP_R = _sparsemat.RAP_R
def OperatorPtr2SparseMatrix(op):
r"""OperatorPtr2SparseMatrix(mfem::OperatorPtr op) -> SparseMatrix"""
return _sparsemat.OperatorPtr2SparseMatrix(op)
OperatorPtr2SparseMatrix = _sparsemat.OperatorPtr2SparseMatrix
def OperatorHandle2SparseMatrix(op):
r"""OperatorHandle2SparseMatrix(mfem::OperatorHandle op) -> SparseMatrix"""
return _sparsemat.OperatorHandle2SparseMatrix(op)
OperatorHandle2SparseMatrix = _sparsemat.OperatorHandle2SparseMatrix
class RowNode(object):
r"""Proxy of C++ mfem::RowNode class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
Value = property(_sparsemat.RowNode_Value_get, _sparsemat.RowNode_Value_set, doc=r"""Value : double""")
Prev = property(_sparsemat.RowNode_Prev_get, _sparsemat.RowNode_Prev_set, doc=r"""Prev : p.mfem::RowNode""")
Column = property(_sparsemat.RowNode_Column_get, _sparsemat.RowNode_Column_set, doc=r"""Column : int""")
def __init__(self):
r"""__init__(RowNode self) -> RowNode"""
_sparsemat.RowNode_swiginit(self, _sparsemat.new_RowNode())
__swig_destroy__ = _sparsemat.delete_RowNode
# Register RowNode in _sparsemat:
_sparsemat.RowNode_swigregister(RowNode)
class SparseMatrix(mfem._par.matrix.AbstractSparseMatrix):
r"""Proxy of C++ mfem::SparseMatrix class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(SparseMatrix self) -> SparseMatrix
__init__(SparseMatrix self, int nrows, int ncols=-1) -> SparseMatrix
__init__(SparseMatrix self, int * i) -> SparseMatrix
__init__(SparseMatrix self, int * i, bool ownij, bool owna, bool issorted) -> SparseMatrix
__init__(SparseMatrix self, int nrows, int ncols, int rowsize) -> SparseMatrix
__init__(SparseMatrix self, SparseMatrix mat, bool copy_graph=True, mfem::MemoryType mt=PRESERVE) -> SparseMatrix
__init__(SparseMatrix self, Vector v) -> SparseMatrix
"""
import numpy as np
from scipy.sparse import csr_matrix
if len(args) == 1 and isinstance(args[0], csr_matrix):
csr = args[0]
if np.real(csr).dtype != 'float64':
csr = csr.astype('float64')
i = np.ascontiguousarray(csr.indptr)
j = np.ascontiguousarray(csr.indices)
data = np.ascontiguousarray(csr.data)
m, n = csr.shape
this = _sparsemat.new_SparseMatrix([i, j, data, m, n])
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
_sparsemat.SparseMatrix_SetGraphOwner(self, False)
_sparsemat.SparseMatrix_SetDataOwner(self, False)
self._i_data = i
self._j_data = j
self._d_data = data
return
_sparsemat.SparseMatrix_swiginit(self, _sparsemat.new_SparseMatrix(*args))
def UseGPUSparse(self, useGPUSparse_=True):
r"""UseGPUSparse(SparseMatrix self, bool useGPUSparse_=True)"""
return _sparsemat.SparseMatrix_UseGPUSparse(self, useGPUSparse_)
UseGPUSparse = _swig_new_instance_method(_sparsemat.SparseMatrix_UseGPUSparse)
def UseCuSparse(self, useCuSparse_=True):
r"""UseCuSparse(SparseMatrix self, bool useCuSparse_=True)"""
return _sparsemat.SparseMatrix_UseCuSparse(self, useCuSparse_)
UseCuSparse = _swig_new_instance_method(_sparsemat.SparseMatrix_UseCuSparse)
def MakeRef(self, master):
r"""MakeRef(SparseMatrix self, SparseMatrix master)"""
return _sparsemat.SparseMatrix_MakeRef(self, master)
MakeRef = _swig_new_instance_method(_sparsemat.SparseMatrix_MakeRef)
def Size(self):
r"""Size(SparseMatrix self) -> int"""
return _sparsemat.SparseMatrix_Size(self)
Size = _swig_new_instance_method(_sparsemat.SparseMatrix_Size)
def Clear(self):
r"""Clear(SparseMatrix self)"""
|
return _sparsemat.SparseMatrix_Clear(self)
Clear = _swig_new_instance_method(_sparsemat.SparseMatrix_Clear)
def ClearGPUSparse(self):
r"""ClearGPUSparse(SparseMatrix self)"""
return _sparsemat.SparseMatrix_ClearGPUSparse(self)
ClearGPUSparse = _swig_new_instance_method(_sparsemat.SparseMatrix_ClearGPUSparse)
def ClearCuSparse(self):
r"""ClearCuSparse(SparseMatrix self)"""
return _sparsemat.SparseMatrix_ClearCu
|
Sparse(self)
ClearCuSparse = _swig_new_instance_method(_sparsemat.SparseMatrix_ClearCuSparse)
def Empty(self):
r"""Empty(SparseMatrix self) -> bool"""
return _sparsemat.SparseMatrix_Empty(self)
Empty = _swig_new_instance_method(_sparsemat.SparseMatrix_Empty)
def GetI(self, *args):
r"""
GetI(SparseMatrix self) -> int
GetI(SparseMatrix self) -> int const *
"""
return _sparsemat.SparseMatrix_GetI(self, *args)
GetI = _swig_new_instance_method(_sparsemat.SparseMatrix_GetI)
def GetJ(self, *args):
r"""
GetJ(SparseMatrix self) -> int
GetJ(SparseMatrix self) -> int const *
"""
return _sparsemat.SparseMatrix_GetJ(self, *args)
GetJ = _swig_new_instance_method(_sparsemat.SparseMatrix_GetJ)
def GetData(self, *args):
r"""
GetData(SparseMatrix self) -> double
|
scottrice/Ice
|
tests/managed_rom_archive_tests.py
|
Python
|
mit
| 2,688
| 0.005952
|
import json
from mockito import *
import os
import shutil
import tempfile
import unittest
from ice.history import ManagedROMArchive
class ManagedROMArchiveTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.temppath = os.path.join(self.tempdir, "tempfile")
self.mock_user = mock()
self.mock_user.user_id = 1234
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_previous_managed_ids_returns_none_for_missing_file(self):
missing_path = os.path.join("some", "stupid", "path")
self.assertFalse(os.path.exists(missing_path))
archive = ManagedROMArchive(missing_path)
self.assertIsNone(archive.previous_managed_ids(self.mock_user))
def test_previous_managed_ids_raises_exception_for_malformed_json(self):
with open(self.temppath, "w+") as f:
f.write("notrealjson")
with self.assertRaises(ValueError):
archive = ManagedROMArchive(self.temppath)
def test_previous_managed_ids_returns_empty_list_for_missing_user(self):
data = {
"1337": []
}
with open(self.temppath, "w+") as f:
f.write(json.dumps(data))
archive = ManagedROMArchive(self.temppath)
self.assertEquals(archive.previous_managed_ids(self.mock_user), [])
def test_previous_managed_ids_returns_list_from_json(self):
data = {
"1234": [
"1234567890",
"0987654321",
]
}
with open(self.temppath, "w+") as f:
f.write(json.dumps(data))
archive = ManagedROMArchive(self.temppath)
self.assertEquals(archive.previous_managed_ids(self.mock_user), ["1234567890","0987654321"])
def test_set_managed_ids_creates_new_file_if_needed(self):
self.assertFalse(os.path.exists(self.temppath))
archive = ManagedROMArchive(self.temppath)
archive.set_managed_ids(self.mock_user, ["1234567890"])
self.assertTrue(os.path.exists(self.temppath))
def test_previous_managed_ids_returns_new_value_after_set_managed_ids(self):
archive = ManagedROMArchive(self.temppath)
new_ids = ["1234567890"]
s
|
elf.assertNotEqual(archive.previous_managed_ids(self.mock_user), new_ids)
archive.set_managed_ids(self.mock_user, ["1234567890"])
self.a
|
ssertEqual(archive.previous_managed_ids(self.mock_user), new_ids)
def test_creating_new_archive_after_set_managed_ids_uses_new_ids(self):
archive = ManagedROMArchive(self.temppath)
new_ids = ["1234567890"]
self.assertNotEqual(archive.previous_managed_ids(self.mock_user), new_ids)
archive.set_managed_ids(self.mock_user, ["1234567890"])
new_archive = ManagedROMArchive(self.temppath)
self.assertEqual(new_archive.previous_managed_ids(self.mock_user), new_ids)
|
dpgoetz/swift
|
swift/common/middleware/keystoneauth.py
|
Python
|
apache-2.0
| 25,905
| 0.000116
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from swift.common import utils as swift_utils
from swift.common.http import is_success
from swift.common.middleware import acl as swift_acl
from swift.common.request_helpers import get_sys_meta_prefix
from swift.common.swob import HTTPNotFound, HTTPForbidden, HTTPUnauthorized
from swift.common.utils import config_read_reseller_options, list_from_csv
from swift.proxy.controllers.base import get_account_info
import functools
PROJECT_DOMAIN_ID_HEADER = 'x-account-project-domain-id'
PROJECT_DOMAIN_ID_SYSMETA_HEADER = \
get_sys_meta_prefix('account') + 'project-domain-id'
# a string that is unique w.r.t valid ids
UNKNOWN_ID = '_unknown'
class KeystoneAuth(object):
"""Swift middleware to Keystone authorization system.
In Swift's proxy-server.conf add this middl
|
eware to your pipeline::
[pipeline:main]
pipeline = catch_errors cache authtoken keystoneauth proxy-server
Make sure you have the authtoken middleware before the
keystoneauth middleware.
The authtoken middleware will take care of validating the user and
keystoneauth will authorize access.
The authtoken middleware is shipped with keystonemiddleware - it
does not have any other dependencies than itself so
|
you can either
install it by copying the file directly in your python path or by
installing keystonemiddleware.
If support is required for unvalidated users (as with anonymous
access) or for formpost/staticweb/tempurl middleware, authtoken will
need to be configured with ``delay_auth_decision`` set to true. See
the Keystone documentation for more detail on how to configure the
authtoken middleware.
In proxy-server.conf you will need to have the setting account
auto creation to true::
[app:proxy-server]
account_autocreate = true
And add a swift authorization filter section, such as::
[filter:keystoneauth]
use = egg:swift#keystoneauth
operator_roles = admin, swiftoperator
The user who is able to give ACL / create Containers permissions
will be the user with a role listed in the ``operator_roles``
setting which by default includes the admin and the swiftoperator
roles.
The keystoneauth middleware maps a Keystone project/tenant to an account
in Swift by adding a prefix (``AUTH_`` by default) to the tenant/project
id.. For example, if the project id is ``1234``, the path is
``/v1/AUTH_1234``.
If the ``is_admin`` option is ``true``, a user whose username is the same
as the project name and who has any role on the project will have access
rights elevated to be the same as if the user had one of the
``operator_roles``. Note that the condition compares names rather than
UUIDs. This option is deprecated. It is ``false`` by default.
If you need to have a different reseller_prefix to be able to
mix different auth servers you can configure the option
``reseller_prefix`` in your keystoneauth entry like this::
reseller_prefix = NEWAUTH
Don't forget to also update the Keystone service endpoint configuration to
use NEWAUTH in the path.
It is possible to have several accounts associated with the same project.
This is done by listing several prefixes as shown in the following
example:
reseller_prefix = AUTH, SERVICE
This means that for project id '1234', the paths '/v1/AUTH_1234' and
'/v1/SERVICE_1234' are associated with the project and are authorized
using roles that a user has with that project. The core use of this feature
is that it is possible to provide different rules for each account
prefix. The following parameters may be prefixed with the appropriate
prefix:
operator_roles
service_roles
For backward compatibility, no prefix implies the parameter
applies to all reseller_prefixes. Here is an example, using two
prefixes::
reseller_prefix = AUTH, SERVICE
# The next three lines have identical effects (since the first applies
# to both prefixes).
operator_roles = admin, swiftoperator
AUTH_operator_roles = admin, swiftoperator
SERVICE_operator_roles = admin, swiftoperator
# The next line only applies to accounts with the SERVICE prefix
SERVICE_operator_roles = admin, some_other_role
X-Service-Token tokens are supported by the inclusion of the service_roles
configuration option. When present, this option requires that the
X-Service-Token header supply a token from a user who has a role listed
in service_roles. Here is an example configuration::
reseller_prefix = AUTH, SERVICE
AUTH_operator_roles = admin, swiftoperator
SERVICE_operator_roles = admin, swiftoperator
SERVICE_service_roles = service
The keystoneauth middleware supports cross-tenant access control using
the syntax ``<tenant>:<user>`` to specify a grantee in container Access
Control Lists (ACLs). For a request to be granted by an ACL, the grantee
``<tenant>`` must match the UUID of the tenant to which the request
token is scoped and the grantee ``<user>`` must match the UUID of the
user authenticated by the request token.
Note that names must no longer be used in cross-tenant ACLs because with
the introduction of domains in keystone names are no longer globally
unique.
For backwards compatibility, ACLs using names will be granted by
keystoneauth when it can be established that the grantee tenant,
the grantee user and the tenant being accessed are either not yet in a
domain (e.g. the request token has been obtained via the keystone v2
API) or are all in the default domain to which legacy accounts would
have been migrated. The default domain is identified by its UUID,
which by default has the value ``default``. This can be changed by
setting the ``default_domain_id`` option in the keystoneauth
configuration::
default_domain_id = default
The backwards compatible behavior can be disabled by setting the config
option ``allow_names_in_acls`` to false::
allow_names_in_acls = false
To enable this backwards compatibility, keystoneauth will attempt to
determine the domain id of a tenant when any new account is created,
and persist this as account metadata. If an account is created for a tenant
using a token with reselleradmin role that is not scoped on that tenant,
keystoneauth is unable to determine the domain id of the tenant;
keystoneauth will assume that the tenant may not be in the default domain
and therefore not match names in ACLs for that account.
By default, middleware higher in the WSGI pipeline may override auth
processing, useful for middleware such as tempurl and formpost. If you know
you're not going to use such middleware and you want a bit of extra
security you can disable this behaviour by setting the ``allow_overrides``
option to ``false``::
allow_overrides = false
:param app: The next WSGI app in the pipeline
:param conf: The dict of configuration values
"""
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = swift_utils.get_logger(conf, log_route='keystoneauth')
self.reseller_prefixes, self.account_rules = \
config_read_reseller_options(conf,
dict(operator_roles=['admin',
'swiftoperator'
|
jarjun/EmergencyTextToVoiceCall
|
app/forms.py
|
Python
|
mit
| 204
| 0.009804
|
from f
|
lask.ext.wtf import Form
from wtforms import TextAreaField
from wtforms.validators import DataRequired
class Reques
|
tForm(Form):
inputText = TextAreaField('inputText', validators=[DataRequired()])
|
garinh/cs
|
docs/support/docutils/languages/de.py
|
Python
|
lgpl-2.1
| 1,814
| 0
|
# Authors: David Goodger; Gunnar Schwant
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 21817 $
# Date: $Date: 2005-07-21 13:39:57 -0700 (Thu, 21 Jul 2005) $
# Copyright: This module has been
|
placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translate
|
d for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
German language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
'author': 'Autor',
'authors': 'Autoren',
'organization': 'Organisation',
'address': 'Adresse',
'contact': 'Kontakt',
'version': 'Version',
'revision': 'Revision',
'status': 'Status',
'date': 'Datum',
'dedication': 'Widmung',
'copyright': 'Copyright',
'abstract': 'Zusammenfassung',
'attention': 'Achtung!',
'caution': 'Vorsicht!',
'danger': '!GEFAHR!',
'error': 'Fehler',
'hint': 'Hinweis',
'important': 'Wichtig',
'note': 'Bemerkung',
'tip': 'Tipp',
'warning': 'Warnung',
'contents': 'Inhalt'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
'autor': 'author',
'autoren': 'authors',
'organisation': 'organization',
'adresse': 'address',
'kontakt': 'contact',
'version': 'version',
'revision': 'revision',
'status': 'status',
'datum': 'date',
'copyright': 'copyright',
'widmung': 'dedication',
'zusammenfassung': 'abstract'}
"""German (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
|
salessandri/programming-contests
|
project-euler/problem076.py
|
Python
|
gpl-3.0
| 1,376
| 0.00218
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
########################################################################
# Solves problem 76 from projectEuler.net.
# Finds the number of different ways that 100 can be written as sum of
# 2 positive integers.
# Copyright (C) 2010 Santiago Alessandri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General
|
Public License as published by
# the Free Software Foundation,
|
either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact me at san.lt.ss@gmail.com
# Visit my wiki at http://san-ss.wikidot.com
########################################################################
if __name__ == '__main__':
ways = [0 for x in range(101)]
ways[0] = 1
for coin in reversed(range(1, 100)):
for index in range(coin, 101):
ways[index] += ways[index - coin]
print("The result is:", ways[100])
|
moradin/renderdoc
|
util/test/tests/D3D11/D3D11_Texture_Zoo.py
|
Python
|
mit
| 457
| 0.002188
|
import rdtest
class D3D11_Texture_Zoo(rdtest.TestCase):
|
slow_test = True
demos_test_name = 'D3D11_Texture_Zoo'
def __init__(self):
rdtest.TestCase.__init__(self)
self.zoo_helper = rdtest.Texture_Zoo()
def check_capture(self):
# This takes ownership of the controller
|
and shuts it down when it's finished
self.zoo_helper.check_capture(self.capture_filename, self.controller)
self.controller = None
|
ptr-yudai/JokenPC
|
server/JPC.py
|
Python
|
mit
| 11,068
| 0.006781
|
# coding: utf-8
from geventwebsocket.handler import WebSocketHandler
from gevent import pywsgi, sleep
import json
import MySQLdb
class JPC:
#
# 初期化
#
def __init__(self, filepath_config):
import hashlib
# 設定ファイルをロード
fp = open(filepath_config, 'r')
config = json.load(fp)
fp.close()
# 設定をクラス変数に格納
self.host = config['host']
self.port = config['port']
self.langlist = json.load(open(config['langfile'], 'r'))
self.enckey = hashlib.md5(config['key']).digest()
self.db_host = config['db_host']
self.db_name = config['db_name']
self.db_username = config['db_username']
self.db_password = config['db_password']
return
#
# チェック
#
def execute(self):
import codecs
import commands
import os
import pwd
# 情報を取得
code = self.packet['code']
lang = self.packet['lang']
script = self.langlist['compile'][lang]
extension = self.langlist['extension'][lang]
# 必要なデータを生成
filepath_in = self.randstr(8) + extension
filepath_out = self.randstr(8)
username = self.randstr(16)
# /tmpに移動
os.chdir('/tmp/')
# ユーザーを作成する
try:
os.system("useradd -M {0}".format(username))
pwnam = pwd.getpwnam(username)
except Exception:
return
# コードを生成
fp = codecs.open(filepath_in, 'w', 'utf-8')
fp.write(code)
fp.close()
# コンパイル
compile_result = commands.getoutput(
script.format(input=filepath_in, output=filepath_out)
)
# コードを削除
try:
os.remove(filepath_in)
except Exception:
pass
# コンパイル結果を送信
try:
self.ws.send(json.dumps({'compile': compile_result}))
except Exception:
pass
# コンパイルできているか
if not os.path.exists(filepath_out):
print("[INFO] コンパイルに失敗しました。")
return
# 実行ファイルの権限を変更
try:
os.chmod(filepath_out, 0500)
os.chown(filepath_out, pwnam.pw_uid, pwnam.pw_gid)
# 出力例も一応
os.chown(self.record['output_code'], pwnam.pw_uid, pwnam.pw_gid)
except Exception:
try:
os.remove(filepath_out)
os.system("userdel -r {0}".format(username))
except Exception:
print("[ERROR] /tmp/{0}の削除に失敗しました。".format(filepath_out))
print("[ERROR] ユーザー{0}の削除に失敗しました。".format(username))
return
# チェックする
clear = True
for n in range(int(self.record['exec_time'])):
print("[INFO] {0}回目の試行が開始されました。".format(n + 1))
# 実行開始を宣言
try:
self.ws.send(json.dumps({'attempt': n + 1}))
except Exception:
pass
# 入力を生成
self.input_data = commands.getoutput(
self.record['input_code'] + " " + str(n)
)
# 出力を生成
self.output_data = self.run_command(username, self.record['output_code'])
# 実行結果を取得
result = self.run_command(username, './'+filepath_out)
#print "Input : ", self.input_data
#print "Answer : ", self.output_data
#print "Result : ", result
# タイムアウト
if result == False:
self.ws.send(json.dumps({'failure': n + 1}))
clear = False
print("[INFO] タイムアウトしました。")
continue
# 結果が違う
if self.output_data.rstrip('\n') != result.rstrip('\n'):
self.ws.send(json.dumps({'failure': n + 1}))
clear = False
print("[INFO] 結果に誤りがあります。")
continue
# 実行結果を宣言
try:
self.ws.send(json.dumps({'success': n + 1}))
print("[INFO] チェックが成功しました。")
except Exception:
pass
# 成功通知
if clear:
self.ws.send('{"complete":"success"}')
self.update_db()
else:
self.ws.send('{"complete":"failure"}')
# 実行ファイルを削除
try:
os.remove(filepath_out)
os.system("userdel -r {0}".format(username))
except Exception:
print("[ERROR] /tmp/{0}の削除に失敗しました。".format(filepath_out))
print("[ERROR] ユーザー{0}の削除に失敗しました。".format(username))
return
#
# コマンドを制限付きで実行
#
def run_command(self, username, filepath):
import subprocess
import time
import sys
# プロセスを生成
proc = subprocess.Popen(
[
'su',
username,
'-c',
'ulimit -v {0}; {1}'.format(
str(self.record['limit_memory']),
filepath
)
],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
stdin = subprocess.PIPE,
)
# 入力を送る
proc.stdin.write(self.input_data.rstrip('\n') + '\n')
proc.stdin.close()
# 時間制限を設定
deadline = time.time() + float(self.record['limit_time']) / 1000.0
while time.time() < deadline and proc.poll() == None:
time.sleep(0.20)
# タイムアウト
if proc.poll() == None:
if float(sys.version[:3]) >= 2.6:
proc.terminate()
return False
# 正常終了
stdout = proc.stdout.read()
return stdout
#
# 点数を追加
#
def update_db(self):
import time
cursor = self.db.cursor(MySQLdb.cursors.DictCursor)
# スコアを追加
cursor.execute("UPDATE account SET score=score+{score} WHERE user='{user}';".format(score=int(self.record['score']), user=self.user))
# 解答済み問題を追加
cursor.execute("UPDATE account SET solved=concat('{id},', solved) WHERE user='{user}';".format(id=self.record['id'], user=self.user))
|
# 解答数をインクリメント
cursor.execute("UPDATE problem SET solved=solved+1 WHERE id={id};".format(id=self.record['id']))
# 解答ユーザーを更新
cursor.execute("UPDATE problem SET solved_user='{user}' WHERE id={id};".format(user=self.user, id=self.record['id']))
# 解答時間を更新
cursor.execute("UPDATE problem SET last_date='{date}' WHERE id={id};".format(date=time.strftime('%Y-%m-%d %H:%M:%S'), id=self.record['id']))
cursor.close()
self.db
|
.commit()
return
#
# 新規要求を処理
#
def handle(self, env, response):
self.ws = env['wsgi.websocket']
print("[INFO] 新しい要求を受信しました。")
# 要求を取得
self.packet = self.ws.receive()
if not self.analyse_packet(): return
# 問題を取得
self.get_problem()
# 実行
self.execute()
return
#
# 問題の詳細を取得
#
def get_problem(self):
cursor = self.db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("SELECT * FROM problem WHERE id={id};".format(id=self.packet['id']))
self.record = cursor.fetchall()[0]
cursor.close()
return
#
# データを解析
#
def analyse_packet(self):
from Crypto.Cipher import AES
# パケットをJSONとして展開
try:
self.packet = json.loads(self.packet)
except Exception:
print("[ERROR] JSONの展開に失敗しました。")
return False
# データの整合性を確認
if not self.check_payload():
print("[ERROR] 不正なデータであると判別されました。")
self.ws.send('{"error":"無効なデータが送信されました。"}')
return False
# ユーザー名を復号化
iv = self.packet['iv'].decode('base64')
enc_user = self.packet['user'].decode('base64')
aes = AES.new(self.enckey, AES.MODE_CBC, iv)
self.user = aes.decrypt(enc_user).replace('\x00', '')
print("[INFO] この試行のユーザーは{0}です。".format(self.user))
# エスケープ
self.user = MySQLdb.escape_string(self.user)
self.packet['id'] = int(self.packet['id'])
return True
#
# payloadが有効かを調べる
#
|
AlanCoding/tower-cli
|
tests/test_cli_action.py
|
Python
|
apache-2.0
| 2,582
| 0
|
# Copyright 2015, Ansible, Inc.
# Luke Sneeringer <lsneeringer@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from click.testing import CliRunner
from tower_cli.cli.action import ActionSubcommand
from tests.compat import unittest
CATEGORIZED_OUTPUT = """Usage: foo [OPTIONS]
Field Options:
--bar TEXT foobar
Local Options:
--foo TEXT foobar
Global Options:
--tower-host TEXT foobar
Other Options:
--help Show this message and exit.
"""
class ActionCommandTests(unittest.TestCase):
"""A set of tests to ensure that the tower_cli Command class works
in the way we expect.
"""
def setUp(self):
self.runner = CliRunner()
|
def test_dash_dash_help(self):
"""Establish that no_args_is_help causes the help to be printed,
and an exit.
"""
# Create a command with which to test.
@click.command(no_args_is_help=True, cls=ActionSubcommand)
@click.argument('parrot')
def foo(parrot):
click.echo(parrot)
# Establish that this command echos if called with echo.
self.assertEqual(self.runner.invoke(foo, ['bar']).output, 'bar\n')
# Establis
|
h that this command sends help if called with nothing.
result = self.runner.invoke(foo)
self.assertIn('--help', result.output)
self.assertIn('Show this message and exit.\n', result.output)
def test_categorize_options(self):
"""Establish that options in help text are correctly categorized.
"""
@click.command(cls=ActionSubcommand)
@click.option('--foo', help='foobar')
@click.option('--bar', help='[FIELD]foobar')
@click.option('--tower-host', help='foobar')
def foo():
pass
result = self.runner.invoke(foo)
self.assertEqual(result.output, CATEGORIZED_OUTPUT)
@click.command(cls=ActionSubcommand, add_help_option=False)
def bar():
pass
result = self.runner.invoke(bar)
self.assertEqual(result.output, 'Usage: bar [OPTIONS]\n')
|
dbcls/dbcls-galaxy
|
lib/galaxy/datatypes/sniff.py
|
Python
|
mit
| 9,773
| 0.014734
|
"""
File format detector
"""
import logging, sys, os, csv, tempfile, shutil, re, zipfile
import registry
from galaxy import util
log = logging.getLogger(__name__)
def get_test_fname(fname):
"""Returns test data filename"""
path, name = os.path.split(__file__)
full_path = os.path.join(path, 'test', fname)
return full_path
def stream_to_file( stream, suffix='', prefix='', dir=None, text=False ):
"""Writes a stream to a temporary file, returns the temporary file's name"""
fd, temp_name = tempfile.mkstemp( suffix=suffix, prefix=prefix, dir=dir, text=text )
CHUNK_SIZE = 1048576
data_checked = False
is_compressed = False
is_binary = False
is_multi_byte = False
while 1:
chunk = stream.read( CHUNK_SIZE )
if not chunk:
break
if not data_checked:
# See if we're uploading a compressed file
if zipfile.is_zipfile( temp_name ):
is_compressed = True
else:
try:
if unicode( chunk[:2] ) == unicode( util.gzip_magic ):
is_compressed = True
except:
pass
if not is_compressed:
# See if we have a multi-byte character file
chars = chunk[:100]
is_multi_byte = util.is_multi_byte( chars )
if not is_multi_byte:
for char in chars:
if ord( char ) > 128:
is_binary = True
break
data_checked = True
if not is_compressed and not is_binary:
os.write( fd, chunk.encode( "utf-8" ) )
else:
# Compressed files must be encoded after they are uncompressed in the upload utility,
# while binary files should not be encoded at all.
os.write( fd, chunk )
os.close( fd )
return temp_name, is_multi_byte
def check_newlines( fname, bytes_to_read=52428800 ):
"""
Determines if there are any non-POSIX newlines in the first
number_of_bytes (by default, 50MB) of the file.
"""
CHUNK_SIZE = 2 ** 20
f = open( fname, 'r' )
for chunk in f.read( CHUNK_SIZE ):
if f.tell() > bytes_to_read:
break
if chunk.count( '\r' ):
f.close()
return True
f.close()
return False
def convert_newlines( fname ):
"""
Converts in place a file from universal line endings
to Posix line endings.
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("1 2\\r3 4")
>>> convert_newlines(fname)
2
>>> file(fname).read()
'1 2\\n3 4\\n'
"""
fd, temp_name = tempfile.mkstemp()
fp = os.fdopen( fd, "wt" )
for i, line in enumerate( file( fname, "U" ) ):
fp.write( "%s\n" % line.rstrip( "\r\n" ) )
fp.close()
shutil.move( temp_name, fname )
# Return number of lines in file.
return i + 1
def sep2tabs(fname, patt="\\s+"):
"""
Transforms in place a 'sep' separated file to a tab separated one
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("1 2\\n3 4\\n")
>>> sep2tabs(fname)
2
>>> file(fname).read()
'1\\t2\\n3\\t4\\n'
"""
regexp = re.compile( patt )
fd, temp_name = tempfile.mkstemp()
fp = os.fdopen( fd, "wt" )
for i, line in enumerate( file( fname ) ):
line = line.rstrip( '\r\n' )
elems = regexp.split( line )
fp.write( "%s\n" % '\t'.join( elems ) )
fp.close()
shutil.move( temp_name, fname )
# Return number of lines in file.
return i + 1
def convert_newlines_sep2tabs( fname, patt="\\s+" ):
"""
Combines above methods: convert_newlines() and sep2tabs()
so that files do not need to be read twice
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("1 2\\r3 4")
>>> convert_newlines_sep2tabs(fname)
2
>>> file(fname).read()
'1\\t2\\n3\\t4\\n'
"""
regexp = re.compile( patt )
fd, temp_name = tempfile.mkstemp()
fp = os.fdopen( fd, "wt" )
for i, line in enumerate( file( fname, "U" ) ):
line = line.rstrip( '\r\n' )
elems = regexp.split( line )
fp.write( "%s\n" % '\t'.join( elems ) )
fp.close()
shutil.move( temp_name, fname )
# Return number of lines in file.
return i + 1
def get_headers( fname, sep, count=60, is_multi_byte=False ):
"""
Returns a list with the first 'count' lines split by 'sep'
>>> fname = get_test_fname('complete.bed')
>>> get_headers(fname,'\\t')
[['chr7', '127475281', '127491632', 'NM_000230', '0', '+', '127486022', '127488767', '0', '3', '29,172,3225,', '0,10713,13126,'], ['chr7', '127486011', '127488900', 'D49487', '0', '+', '127486022', '127488767', '0', '2', '155,490,', '0,2399']]
"""
headers = []
for idx, line in enumerate(file(fname)):
line = line.rstrip('\n\r')
if is_multi_byte:
# TODO: fix this - sep is never found in line
line = unicode( line, 'utf-8' )
sep = sep.encode( 'utf-8' )
headers.append( line.split(sep) )
if idx == count:
break
return headers
def is_column_based( fname, sep='\t', skip=0, is_multi_byte=False ):
"""
Checks whether the file is column based with respect to a separator
(defaults to tab separator).
>>> fname = get_test_fname('test.gff')
>>> is_column_based(fname)
True
>>> fname = get_test_fname('test_tab.bed')
>>> is_column_based(fname)
True
>>> is_column_based(fname, sep=' ')
False
>>> fname = get_test_fname('test_space.txt')
>>> is_column_based(fname)
False
>>> is_column_based(fname, sep=' ')
True
>>> fname = get_test_fname('test_ensembl.tab')
>>> is_column_based(fname)
True
>>> fname = get_te
|
st_fname('test_tab1.tabular')
>>> is_column_based(fname, sep=' ', skip=0)
False
>>> fname = get_test_fname('test_tab1.tabular')
>>> is_column_based(fname)
True
"""
headers = get_hea
|
ders( fname, sep, is_multi_byte=is_multi_byte )
count = 0
if not headers:
return False
for hdr in headers[skip:]:
if hdr and hdr[0] and not hdr[0].startswith('#'):
if len(hdr) > 1:
count = len(hdr)
break
if count < 2:
return False
for hdr in headers[skip:]:
if hdr and hdr[0] and not hdr[0].startswith('#'):
if len(hdr) != count:
return False
return True
def guess_ext( fname, sniff_order=None, is_multi_byte=False ):
"""
Returns an extension that can be used in the datatype factory to
generate a data for the 'fname' file
>>> fname = get_test_fname('megablast_xml_parser_test1.blastxml')
>>> guess_ext(fname)
'blastxml'
>>> fname = get_test_fname('interval.interval')
>>> guess_ext(fname)
'interval'
>>> fname = get_test_fname('interval1.bed')
>>> guess_ext(fname)
'bed'
>>> fname = get_test_fname('test_tab.bed')
>>> guess_ext(fname)
'bed'
>>> fname = get_test_fname('sequence.maf')
>>> guess_ext(fname)
'maf'
>>> fname = get_test_fname('sequence.fasta')
>>> guess_ext(fname)
'fasta'
>>> fname = get_test_fname('file.html')
>>> guess_ext(fname)
'html'
>>> fname = get_test_fname('test.gff')
>>> guess_ext(fname)
'gff'
>>> fname = get_test_fname('gff_version_3.gff')
>>> guess_ext(fname)
'gff3'
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("a\\t2\\nc\\t1\\nd\\t0")
>>> guess_ext(fname)
'tabular'
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("a 1 2 x\\nb 3 4 y\\nc 5 6 z")
>>> guess_ext(fname)
'txt'
>>> fname = get_test_fname('test_tab1.tabular')
>>> guess_ext(fname)
'tabular'
>>> fname = get_test_fname('alignment.lav')
>>> guess_ext(fname)
'lav'
"""
if sniff_order is None:
datatypes_registry = registry.Registry()
sniff_order = datatypes_registry.sniff_order
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_virtual_router_peerings_operations.py
|
Python
|
mit
| 22,524
| 0.004972
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualRouterPeeringsOperations(object):
"""VirtualRouterPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deseri
|
alizer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthentication
|
Error, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified peering from a Virtual Router.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def get(
self,
resource_group_nam
|
a5kin/hecate
|
xentica/core/color_effects.py
|
Python
|
mit
| 4,034
| 0
|
"""
The collection of decorators for the ``color()`` method, each CA model
should have.
The method should be decorated by one of the classes below, otherwise
the correct model behavior will not be guaranteed.
All decorators are get the ``(red, green, blue)`` tuple from
``color()`` method, then process it to create some color effect.
A minimal example::
from xentica import core
from xentica.core import color_effects
class MyCA(core.CellularAutomaton):
state = core.IntegerProperty(max_val=1)
# ...
@color_effects.MovingAverage
def color(self):
red = self.main.state * 255
green = self.main.state * 255
blue = self.main.state * 255
return (red, green, blue)
"""
from xentica.core.variables import Constant
from xentica.core.mixins import BscaDetectorMixin
__all__ = ['ColorEffect', 'MovingAverage', ]
class ColorEffect(BscaDetectorMixin):
"""
The base class for other color effects.
You may also use it as a standalone color effect decorator, it just
doing nothing, storing the calculated RGB value directly.
To create your own class inherited from :class:`ColorEffect`, you
should override ``__call__`` method, and place a code of the color
processing into ``self.effect``. The code should process values
of ``new_r``, ``new_g``, ``new_b`` variables and store the result
back to them.
An example::
class MyEffect(ColorEffect):
def __call__(self, *args):
self.effect = "new_r += 20;"
self.effect += "new_g += 15;"
self.effect += "new_b += 10;"
return super().__call__(*args)
"""
def __init__(self, func):
"""Initialize base attributes."""
self.func = func
self.effect = ""
def __call__(self):
"""
Implement the color decorator.
Sibling classes should override this method, and return
``super`` result, like shown in the example above.
"""
red, green, blue = self.func(self.bsca)
code = """
int new_r = %s;
int new_g = %s;
int new_b = %s;
%s
col[i] = make_int3(new_r, new_g, new_b);
""" % (red, green, blue, self.effect)
self.bsca.append_code(code)
class MovingAverage(ColorEffect):
"""
Apply the moving average to each color channel separately.
With this effect, 3 additional settings are available for you in
``Experiment`` classes:
fade_in
The maximum delta by which a channel could
*increase* its value in a single timestep.
fade_out
The maximum delta by which a channel could
*decrease* its value in a single timestep.
smooth_factor
The divisor for two previous settings, to make
the effect even smoother.
"""
def __call__(self):
"""Implement the effect."""
if not hasattr(self.bsca, "fade_in"):
self.bsca.fade_in = 255
if not hasattr(self.bsca, "fade_out"):
self.bsca.fade_out = 255
if not hasattr(self.bsca, "smooth_factor"):
self.bsca.smooth_factor = 1
self.bsca.define_constant(Constant("FADE_IN", self.bsca.fade_in))
self.bsca.define_constant(Constant("FADE_OUT", self.bsca.fade_out))
self.bsca.define_constant(Constant("SMOOTH_FACTOR",
self.bsca.smooth_factor))
self.effect = """
new_r *= SMOOTH_FACTOR;
new_g *= SMOOTH_FACTOR;
new_b *= SMOOTH_FACTOR;
int3 old_col = col[i];
new_r = max(min(new_r, old_col.x + FADE_IN),
old_col.x - FADE_OUT);
new_g = max(min(new_g, old_col.y + FADE_IN),
old_col.y - F
|
ADE_OUT);
new_b = max(min(new_b, old_col.z + FADE_IN),
old_col.z - FADE_OUT);
"""
ret
|
urn super().__call__()
|
rescale/django-money
|
djmoney/settings.py
|
Python
|
bsd-3-clause
| 987
| 0.001013
|
# -*- coding: utf-8 -*-
import operator
from django.conf import settings
from moneyed import CURRENCIES, DEFAULT_CURRENCY, DEFAULT_CURRENCY_CODE
# The default currency, you can define this in your project's settings module
# This has to be a currency object imported from moneyed
DEFAULT_CURRENCY = getattr(settings, 'DEFAULT_CURRENCY', DEFAULT_CURRENCY)
# The default currency choices, you can define this in your project's
# settings module
PROJECT_CURRENCIES = getattr(settings, 'CURRENCIES', None)
CURRENCY_CHOICES = getattr(settings, 'CURRENCY_CHOICES', None)
if CURRENCY_CHOICES is None:
if PROJECT_CURRENCIES:
CUR
|
RENCY_CHOICES = [(code, CURRENCIES[code].name) for code in PROJECT_CURRENCIES]
else:
CURRENCY_CHOICES = [(c.code, c.name) for i, c in CURRENCIES.items() if
c.code
|
!= DEFAULT_CURRENCY_CODE]
CURRENCY_CHOICES.sort(key=operator.itemgetter(1, 0))
DECIMAL_PLACES = getattr(settings, 'CURRENCY_DECIMAL_PLACES', 2)
|
antonszilasi/honeybeex
|
honeybeex/honeybee/radiance/command/gensky.py
|
Python
|
gpl-3.0
| 5,946
| 0.001177
|
# coding=utf-8
from _commandbase import RadianceCommand
from ..datatype import RadiancePath, RadianceTuple
from ..parameters.gensky import GenskyParameters
import os
class Gensky(RadianceCommand):
u"""
gensky - Generate an annual Perez sky matrix from a weather tape.
The attributes for this class and their data descriptors are given below.
Please note that the first two inputs for each descriptor are for internal
naming purposes only.
Attributes:
outputName: An optional name for output file name (Default: 'untitled').
monthDayHour: A tuple containing input
|
s for month, day and hour.
genskyParameters: Radiance parameters for gensky. If None Default
parameters will be set. You can use self.genskyParameters to view,
add or remove the parameters before executing the command.
Usage:
from honeybee.radiance.parameters.gensky import GenSkyParameters
from honeybee.radiance.command.gensky import GenSky
# cre
|
ate and modify genskyParameters. In this case a sunny with no sun
# will be generated.
gnskyParam = GenSkyParameters()
gnskyParam.sunnySkyNoSun = True
# create the gensky Command.
gnsky = GenSky(monthDayHour=(1,1,11), genskyParameters=gnskyParam,
outputName = r'd:/sunnyWSun_010111.sky' )
# run gensky
gnsky.execute()
>
"""
monthDayHour = RadianceTuple('monthDayHour', 'month day hour', tupleSize=3,
testType=False)
outputFile = RadiancePath('outputFile', descriptiveName='output sky file',
relativePath=None, checkExists=False)
def __init__(self, outputName='untitled', monthDayHour=None,
genskyParameters=None):
"""Init command."""
RadianceCommand.__init__(self)
self.outputFile = outputName if outputName.lower().endswith(".sky") \
else outputName + ".sky"
"""results file for sky (Default: untitled)"""
self.monthDayHour = monthDayHour
self.genskyParameters = genskyParameters
@classmethod
def fromSkyType(cls, outputName='untitled', monthDayHour=(1, 21, 12),
skyType=0, latitude=None, longitude=None, meridian=None):
"""Create a sky by sky type.
Args:
outputName: An optional name for output file name (Default: 'untitled').
monthDayHour: A tuple containing inputs for month, day and hour.
skyType: An intger between 0-5 for CIE sky type.
0: [+s] Sunny with sun, 1: [-s] Sunny without sun,
2: [+i] Intermediate with sun, 3: [-i] Intermediate with no sun,
4: [-c] Cloudy overcast sky, 5: [-u] Uniform cloudy sky
latitude: [-a] A float number to indicate site altitude. Negative
angle indicates south latitude.
longitude: [-o] A float number to indicate site latitude. Negative
angle indicates east longitude.
meridian: [-m] A float number to indicate site meridian west of
Greenwich.
"""
_skyParameters = GenskyParameters(latitude=latitude, longitude=longitude,
meridian=meridian)
# modify parameters based on sky type
try:
skyType = int(skyType)
except TypeError:
"skyType should be an integer between 0-5."
assert 0 <= skyType <= 5, "Sky type should be an integer between 0-5."
if skyType == 0:
_skyParameters.sunnySky = True
elif skyType == 1:
_skyParameters.sunnySky = False
elif skyType == 2:
_skyParameters.intermSky = True
elif skyType == 3:
_skyParameters.intermSky = False
elif skyType == 4:
_skyParameters.cloudySky = True
elif skyType == 5:
_skyParameters.uniformCloudySky = True
return cls(outputName=outputName, monthDayHour=monthDayHour,
genskyParameters=_skyParameters)
@classmethod
def createUniformSkyfromIlluminanceValue(cls, outputName="untitled",
illuminanceValue=10000):
"""Uniform CIE sky based on illuminance value.
Attributes:
outputName: An optional name for output file name (Default: 'untitled').
illuminanceValue: Desired illuminance value in lux
"""
assert float(illuminanceValue) >= 0, "Illuminace value can't be negative."
_skyParameters = GenskyParameters(zenithBrightHorzDiff=illuminanceValue / 179.0)
return cls(outputName=outputName, genskyParameters=_skyParameters)
@classmethod
def fromRadiationValues(cls):
"""Create a sky based on sky radiation values."""
raise NotImplementedError()
@property
def genskyParameters(self):
"""Get and set genskyParameters."""
return self.__genskyParameters
@genskyParameters.setter
def genskyParameters(self, genskyParam):
self.__genskyParameters = genskyParam if genskyParam is not None \
else GenskyParameters()
assert hasattr(self.genskyParameters, "isRadianceParameters"), \
"input genskyParameters is not a valid parameters type."
def toRadString(self, relativePath=False):
"""Return full command as a string."""
# generate the name from self.weaFile
radString = "%s %s %s > %s" % (
self.normspace(os.path.join(self.radbinPath, 'gensky')),
self.monthDayHour.toRadString().replace("-monthDayHour ", ""),
self.genskyParameters.toRadString(),
self.normspace(self.outputFile.toRadString())
)
return radString
@property
def inputFiles(self):
"""Input files for this command."""
return None
|
openstack/keystone
|
keystone/common/policies/domain.py
|
Python
|
apache-2.0
| 3,937
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#
|
http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import versionutils
from oslo_policy import policy
from keystone.common.polici
|
es import base
DEPRECATED_REASON = (
"The domain API is now aware of system scope and default roles."
)
deprecated_list_domains = policy.DeprecatedRule(
name=base.IDENTITY % 'list_domains',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN
)
deprecated_get_domain = policy.DeprecatedRule(
name=base.IDENTITY % 'get_domain',
check_str=base.RULE_ADMIN_OR_TARGET_DOMAIN,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_domain = policy.DeprecatedRule(
name=base.IDENTITY % 'update_domain',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_domain = policy.DeprecatedRule(
name=base.IDENTITY % 'create_domain',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_domain = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_domain',
check_str=base.RULE_ADMIN_REQUIRED,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN
)
SYSTEM_USER_OR_DOMAIN_USER_OR_PROJECT_USER = (
'(role:reader and system_scope:all) or '
'token.domain.id:%(target.domain.id)s or '
'token.project.domain.id:%(target.domain.id)s'
)
domain_policies = [
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_domain',
# NOTE(lbragstad): This policy allows system, domain, and
# project-scoped tokens.
check_str=SYSTEM_USER_OR_DOMAIN_USER_OR_PROJECT_USER,
scope_types=['system', 'domain', 'project'],
description='Show domain details.',
operations=[{'path': '/v3/domains/{domain_id}',
'method': 'GET'}],
deprecated_rule=deprecated_get_domain),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_domains',
check_str=base.SYSTEM_READER,
scope_types=['system'],
description='List domains.',
operations=[{'path': '/v3/domains',
'method': 'GET'}],
deprecated_rule=deprecated_list_domains),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_domain',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Create domain.',
operations=[{'path': '/v3/domains',
'method': 'POST'}],
deprecated_rule=deprecated_create_domain),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_domain',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Update domain.',
operations=[{'path': '/v3/domains/{domain_id}',
'method': 'PATCH'}],
deprecated_rule=deprecated_update_domain),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_domain',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Delete domain.',
operations=[{'path': '/v3/domains/{domain_id}',
'method': 'DELETE'}],
deprecated_rule=deprecated_delete_domain),
]
def list_rules():
return domain_policies
|
hdm-dt-fb/rvt_model_services
|
process_model.py
|
Python
|
mit
| 14,344
| 0.00251
|
""" process_model.py
Usage:
process_model.py <command> <project_code> <full_model_path> [options]
Arguments:
command action to be run on model, like: qc, audit or dwf
currently available: qc, audit, dwf
project_code unique project code consisting of 'projectnumber_projectModelPart'
like 456_11 , 416_T99 or 377_S
full_model_path revit model path including file name
use cfg shortcut if your full model path is already set in config.ini
Options:
-h, --help Show this help screen.
--viewer run revit in viewer mode (-> no transactions)
--html_path=<html> path to store html bokeh graphs, default in /commands/qc/*.html
--write_warn_ids write warning ids from warning command
--rvt_path=<rvt> full path to force specific rvt version other than detected
--rvt_ver=<rvtver> specify revit version and skip checking revit file version
(helpful if opening revit server files)
--audit activate open model with audit
--noworkshared open non-workshared model
--nodetach do not open workshared model detached
--notify choose to be notified with configured notify module(s)
--nofilecheck skips verifying model path actually exists
(helpful if opening revit server files)
--skip_hash_unchanged skips processing unchanged file
--timeout=<seconds> timeout in seconds before revit process gets terminated
"""
from docopt import docopt
import os
import pathlib
import hashlib
import subprocess
import psutil
import configparser
import time
import datetime
import logging
import colorful as col
import rvt_detector
from collections import defaultdict
from importlib import machinery
from tinydb import TinyDB, Query
from utils import rvt_journal_parser, rvt_journal_purger
from utils.win_utils import proc_open_files
from utils.rms_paths import get_paths
from notify.email import send_mail
from notify.slack import send_slack
from notify.req_post import send_post
def check_cfg_path(prj_number, cfg_str_or_path, cfg_path):
config = configparser.ConfigParser()
ini_file = cfg_path / "config.ini"
if cfg_str_or_path == "cfg":
if not cfg_str_or_path.exists():
if ini_file.exists():
config.read(ini_file)
if prj_number in config:
config_path = config[prj_number]["path"]
return config_path
return pathlib.Path(cfg_str_or_path)
def get_model_hash(rvt_model_path):
"""
Creates a hash of provided rvt model file
:param rvt_model_path:
:return: hash string
"""
BLOCKSIZE = 65536
hasher = hashlib.sha256()
with open(rvt_model_path, "rb") as rvt:
buf = rvt.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = rvt.read(BLOCKSIZE)
return hasher.hexdigest()
def check_hash_unchanged(hash_db, rvt_model_path, model_hash, date):
model_info = {"<full_model_path>": str(rvt_model_path),
">last_hash": model_hash,
">last_hash_date": date,
}
unchanged = hash_db.search((Query()["<full_model_path>"] == str(rvt_model_path)) &
(Query()[">last_hash"] == model_hash)
)
if unchanged:
return True
else:
hash_db.upsert(model_info, Query()["<full_model_path>"] == str(rvt_model_path)
)
def exit_with_log(message, severity=logging.warning, exit_return_code=1):
"""
Ends the whole script with a warning.
:param message:
:param exit_return_code:
:return:
"""
severity(f"{project_code};{current_proc_hash};{exit_return_code};;{message}")
exit()
def get_jrn_and_post_process(search_command, commands_dir):
"""
Searches command paths for register dict in __init__.py in command roots to
prepare appropriate command strings to be inserted into the journal file
:param search_command: command name to look up
:param commands_dir: commands directory
:return: command module, post process dict
"""
found_dir = False
module_rjm = None
post_proc_dict = defaultdict()
for directory in os.scandir(commands_dir):
command_name = directory.name
# print(command_name)
if search_command == command_name:
found_dir = True
print(f" found appropriate command directory {commands_dir / command_name}")
mod_init = commands_dir / command_name / "__init__.py"
if mod_init.exists():
mod = machinery.SourceFileLoader(command_name, str(mod_init)).load_module()
if "register" in dir(mod):
if mod.register["name"] == command_name:
if "rjm" in mod.register:
module_rjm = mod.register["rjm"]
if "post_process" in mod.register:
external_args = []
for arg in mod.register["post_process"]["args"]:
external_args.append(globals().get(arg))
post_proc_dict["func"] = mod.register["post_process"]["func"]
post_proc_dict["args"] = external_args
else:
exit_with_log('__init__.py in command directory not found')
if not found_dir:
print(col.bold_red(f" appropriate command directory for '{search_command}' not found - aborting."))
exit_with_log('command directory not found')
return module_rjm, post_proc_dict
def get_rvt_proc_journal(process, jrn_file_path):
open_files = process.open_files()
for proc_file in open_files:
file_name = pathlib.Path(proc_file.path).name
if file_name.startswith("journal"):
return proc_file.path
# if nothing found using the process.open_files
# dig deeper and get nasty
for proc_res in proc_open_files(process):
res_name = pathlib.Path(proc_res).name
if res_name.startswith("journal") and res_name.endswith("txt"):
return jrn_file_path / res_name
today_int = int(datetime.date.today().strftime("%Y%m%d"))
rms_paths = get_paths(__file__)
args = docopt(__doc__)
command = args["<command>"]
project_code = args["<project_code>"]
full_model_path = args["<full_model_path>"]
full_model_path = check_cfg_path(project_code, full_model_path, rms_paths.root)
model_path = full_model_path.parent
model_file_name = full_model_path.name
timeout = args["--timeout"]
html_path = args["--html_path"]
write_warn_ids = args["--write_warn_ids"]
rvt_override_path = args["--rvt_path"]
rvt_override_version = args["--rvt_ver"]
notify = args["--notify"]
disable_filecheck = args["--nofilecheck"]
disable_detach = args["--nodetach"]
disable_ws = args["--noworkshared"]
skip_hash_unchanged = args["--skip_hash_unchanged"]
audit = args["--audit"]
viewer = args["--viewer"
|
]
if viewer:
viewer = "/viewer"
comma_concat_arg
|
s = ",".join([f"{k}={v}" for k, v in args.items()])
print(col.bold_blue(f"+process model job control started with command: {command}"))
print(col.bold_orange(f"-detected following root path:"))
print(f" {rms_paths.root}")
format_json = {"sort_keys": True, "indent": 4, "separators": (',', ': ')}
hashes_db = TinyDB(rms_paths.db / "model_hashes.json", **format_json)
journal_file_path = rms_paths.journals / f"{project_code}.txt"
model_exists = full_model_path.exists()
timeout = int(timeout) if timeout else 60
if not html_path:
if command == "qc":
html_path = rms_paths.com_qc
elif command == "warnings":
html_path = rms_paths.com_warnings
elif not pathlib.Path(html_path).exists():
i
|
pepsipepsi/nodebox_opengl_python3
|
examples/04-text/02-style.py
|
Python
|
bsd-3-clause
| 687
| 0.039301
|
import os, sys
sys.path.insert(0, os.pat
|
h.join("..",".."))
from nodebox.graphics.context import *
from nodebox.graphics import *
txt = Text("So long!\nThanks for all the fish.",
font = "Droid S
|
erif",
fontsize = 20,
fontweight = BOLD,
lineheight = 1.2,
fill = color(0.25))
# Text.style() can be used to style individual characters in the text.
# It takes a start index, a stop index, and optional styling parameters:
txt.style(9, len(txt), fontsize=txt.fontsize/2, fontweight=NORMAL)
def draw(canvas):
canvas.clear()
x = (canvas.width - textwidth(txt)) / 2
y = 250
text(txt, x, y)
canvas.size = 500, 500
canvas.run(draw)
|
UManPychron/pychron
|
pychron/processing/ratios/ratio_editor.py
|
Python
|
apache-2.0
| 4,353
| 0.003446
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Instance, Float
from traitsui.api import View, Item, UItem, VGroup
# ============= standard library imports ========================
from uncertainties import nominal_value
# ============= local library imports ==========================
from pychron.envisage.tasks.base_editor import BaseTraitsEditor
from pychron.graph.stacked_regression_graph import StackedRegressionGraph
class RatioEditor(BaseTraitsEditor):
"""
"""
graph = Instance(StackedRegressionGraph)
intercept_ratio = Float
time_zero_offset = Float(0, auto_set=False, enter_set=True)
ratio_intercept = Float
basename = ''
def _time_zero_offset_changed(self):
self.refresh_plot()
def setup(self):
self.data = self.analysis.isotopes
self.setup_graph()
def setup_graph(self):
cd = dict(padding=20,
spacing=5,
stack_order='top_to_bottom')
g = StackedRegressionGraph(container_dict=cd)
self.graph = g
self.refresh_plot()
def refresh_plot(self):
g = self.graph
d = self.data
g.clear()
for ni, di in [('Ar40', 'Ar39')]:
niso, diso = d[ni], d[di]
self.plot_ratio(g, niso, diso)
self.
|
intercept_ratio = nominal_value(niso.uvalue / diso.uvalue)
def plot_ratio(self, g, niso, diso):
niso.time_zero_offset = self.time_zero_offset
diso.time_zero_offset = self.time_zero_offset
fd = {'filter_outliers': True, 'std_devs': 2, 'iterations': 1}
niso.filter_outliers_dict = f
|
d
diso.filter_outliers_dict = fd
niso.dirty = True
diso.dirty = True
g.new_plot()
g.set_x_limits(min_=0, max_=100)
g.set_y_title(niso.name)
_,_,nl = g.new_series(niso.offset_xs, niso.ys, filter_outliers_dict=fd)
g.new_plot()
g.set_y_title(diso.name)
_,_,dl = g.new_series(diso.offset_xs, diso.ys, filter_outliers_dict=fd)
# g.new_plot()
# nreg = nl.regressor
# dreg = dl.regressor
#
# xs = nreg.xs
# ys = nreg.predict(xs)/dreg.predict(xs)
# _,_,l =g.new_series(xs, ys, fit='parabolic')
# reg = l.regressor
# self.regressed_ratio_intercept = reg.predict(0)
# xs = linspace(0, 100)
# rys = niso.regressor.predict(xs) / diso.regressor.predict(xs)
xs = niso.offset_xs
rys = niso.ys / diso.ys
g.new_plot()
g.set_y_title('{}/{}'.format(niso.name, diso.name))
g.set_x_title('Time (s)')
# p,s,l = g.new_series(xs, rys, fit='parabolic', filter_outliers_dict=fd)
fd = {'filter_outliers': True, 'std_devs': 2, 'iterations': 1}
fitfunc = lambda p, x: (p[0]*x+p[1])/(p[2]*x+p[3])
fit = ((fitfunc, [1,1,1,1]), None)
p,s,l = g.new_series(xs, rys, fit=fit,
use_error_envelope=False,
filter_outliers_dict=fd)
reg = l.regressor
self.ratio_intercept = reg.predict(0)
def traits_view(self):
v = View(UItem('graph', style='custom'),
VGroup(Item('time_zero_offset'),
Item('intercept_ratio', style='readonly'),
Item('ratio_intercept', style='readonly')))
return v
if __name__ == '__main__':
re = RatioEditor()
re.setup()
re.configure_traits()
# ============= EOF =============================================
|
hqpr/findyour3d
|
config/settings/local.py
|
Python
|
mit
| 1,853
| 0.001079
|
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='d;z^Q:0HDfCSKXQE|zp&U8)n)P7Y[E<r0nY*m)F&1`*t$>gf9N')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# -----------------------------------------
|
-------------------------------------
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
|
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['django_extensions', ]
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/pymodules/python2.7/gwibber/microblog/urlshorter/zima.py
|
Python
|
gpl-3.0
| 56
| 0.017857
|
/
|
usr/share/pyshared/gwibber/microblog/urlshorte
|
r/zima.py
|
fandemonium/code
|
parsers/img_oid_to_ncbi_from_html.py
|
Python
|
mit
| 234
| 0.004274
|
import sys
import re
for lines in open(sys.argv[1], "rU"):
line = lines.strip()
lexemes = re.split(".out:", line)
|
oid = lexemes[0].split(".")[1]
ncbi = re.split("val=|'", lexemes[1])[2]
print oid + " \t" + nc
|
bi
|
xuru/pyvisdk
|
pyvisdk/do/cluster_destroyed_event.py
|
Python
|
mit
| 1,147
| 0.00959
|
import logging
from pyvisdk.except
|
ions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ClusterDestroyedEvent(vim, *args, **kwargs):
'''This event records when a
|
cluster is destroyed.'''
obj = vim.client.factory.create('ns0:ClusterDestroyedEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/__init__.py
|
Python
|
apache-2.0
| 9,237
| 0.001191
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class interface_ref(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface-ref. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Reference to an interface or subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "interface-ref"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
|
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(geta
|
ttr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"interfaces",
"interface",
"subinterfaces",
"subinterface",
"ipv4",
"unnumbered",
"interface-ref",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config (container)
YANG Description: Configured reference to interface / subinterface
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configured reference to interface / subinterface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/state (container)
YANG Description: Operational state for interface-ref
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state for interface-ref
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helpe
|
casep/Molido
|
sdate.py
|
Python
|
gpl-2.0
| 350
| 0.002857
|
#!/usr/bin/env python
""" Calculate the Julian Date """
import time
import math
t = time.time()
""" Technically, we should be adding 2440587.5,
however, since we are trying to s
|
tick to the stardate
concept, we add only 40587.5"""
jd = (t / 86400.0 + 40587.5)
# Use the idea that 10 Julian days is eq
|
ual to 1 stardate
print "%05.2f" % jd
|
li282886931/apistore
|
robot/robot/news/eladies_sina_com_cn.py
|
Python
|
gpl-2.0
| 2,757
| 0.005261
|
# -*- coding: utf-8 -*-
import sys
import time
import json
import datetime
import scrapy
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
import robot
from robot.items import RobotItem
from robot.models.base import Base
from robot.settings import logger
class EladiedSinaComCnItem(RobotItem):
title = scrapy.Field() #标题
cl = scrapy.Field() #分类
picmsg = scrapy.Field() #图片信息
time = scrapy.Field() #创建时间
@property
def module(self):
return 'news'
class Process(Base):
def __init__(self):
pass
def process(self, item):
@robot.utils.checkHave
def havePicGirl():
"""检测是否存在条目"""
sql = """SELECT * FROM girlpic WHERE picmsg=%s"""
return sql, self.db, [item['picmsg']]
if not havePicGirl():
sql = """INSERT INTO girlpic (title, cl, picmsg, createtime) values (%s, %s, %s, %s)"""
self.db.insert(sql, item['title'], item['cl'], item['picmsg'], datetime.date.today())
class EladiedSinaComCn(scrapy.Spider):
"""
抓取微信搜索首页内容
"""
name = "eladies_sina_com_cn"
allowed_domains = ["sina.com.cn", ]
start_urls = ["http://eladies.sina.com.cn/photo/", ]
def parse(self, response):
logger.info('[%s] %s' % (datetime.date.today(), response
|
.url))
hxs = HtmlXPathSelector(response)
l = [
# 视觉大片
{'id': 'SI_Scroll_2_Cont', 'cl': 'photograph_gallery'},
# 八卦
{'id': 'SI_Scroll_3_Cont', 'cl': 'gossip'},
# 服饰搭配
|
{'id': 'SI_Scroll_4_Cont', 'cl': 'style'},
# 美体瘦身
{'id': 'SI_Scroll_5_Cont', 'cl': 'body'},
# 彩妆美发
{'id': 'SI_Scroll_6_Cont', 'cl': 'beauty'},
]
for d in l:
sites = hxs.select('//div[@id="%s"]/div/div/a/@href' % d['id']).extract()
for site in sites:
cl = d['cl']
request = Request(site, callback=self.deepParse, meta={'cl': cl},)
yield request
def deepParse(self, response):
hxs = HtmlXPathSelector(response)
item = EladiedSinaComCnItem()
item['title'] = hxs.select('//div[@id="eData"]/dl[1]/dt/text()').extract()[0]
picl = hxs.select('//div[@id="eData"]/dl/dd[1]/text()').extract()
descl = hxs.select('//div[@id="eData"]/dl/dd[5]/text()').extract()
item['time'] = time.strftime("%Y-%m-%d", time.localtime(int(time.time())))
item['cl'] = response.meta['cl']
item['picmsg'] = json.dumps([{'pic': pic, 'desc': desc} for (pic, desc) in zip(picl, descl)])
yield item
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/operations/availability_sets_operations.py
|
Python
|
mit
| 17,248
| 0.002609
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class AvailabilitySetsOperations(object):
"""AvailabilitySetsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-03-30".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-30"
self.config = config
def create_or_update(
self, resource_group_name, availability_set_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create or update an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param parameters: Parameters supplied to the Create Availability Set
operation.
:type parameters:
~azure.mgmt.compute.v2017_03_30.models.AvailabilitySet
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param op
|
eration_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AvailabilitySet or Client
|
RawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2017_03_30.models.AvailabilitySet or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'AvailabilitySet')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AvailabilitySet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, availability_set_name, custom_headers=None, raw=False, **operation_config):
"""Delete an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: OperationStatusResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, resource_group_name, availability_set_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves information about an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Oper
|
laowantong/mocodo
|
mocodo/tests/__init__.py
|
Python
|
mit
| 33
| 0
|
from __fu
|
ture__ import division
|
|
iandennismiller/gthnk
|
src/gthnk/server.py
|
Python
|
mit
| 1,615
| 0.000619
|
# -*- coding: utf-8 -*-
# gthn
|
k (c) Ian Dennis Miller
import os
|
import flask
import logging
from flaskext.markdown import Markdown
from mdx_linkify.mdx_linkify import LinkifyExtension
from mdx_journal import JournalExtension
from . import db, login_manager, bcrypt
from .models.day import Day
from .models.entry import Entry
from .models.page import Page
from .models.user import User
def create_app():
app = flask.Flask(__name__)
try:
app.config.from_envvar('SETTINGS')
except RuntimeError:
default_filename = os.path.expanduser('~/.gthnk/gthnk.conf')
if os.path.isfile(default_filename):
print("WARN: using default configuration file ~/.gthnk/gthnk.conf")
app.config.from_pyfile(default_filename)
logging.basicConfig(
format='%(asctime)s %(module)-16s %(levelname)-8s %(message)s',
filename=app.config["LOG"],
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
logging.info("Server: Start")
logging.info("Database: {}".format(app.config['SQLALCHEMY_DATABASE_URI']))
from .blueprints.root import root
app.register_blueprint(root)
from .blueprints.auth import auth
app.register_blueprint(auth)
from .blueprints.day import day
app.register_blueprint(day)
# from .blueprints.attachments import attachments
# app.register_blueprint(attachments)
db.init_app(app)
login_manager.init_app(app)
bcrypt.init_app(app)
app.markdown = Markdown(app, extensions=[
LinkifyExtension(),
JournalExtension()
])
return app
app = create_app()
|
Richard-Mathie/cassandra_benchmark
|
vendor/github.com/datastax/python-driver/tests/integration/cqlengine/columns/test_validation.py
|
Python
|
apache-2.0
| 21,274
| 0.001081
|
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the speci
|
fic language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import sys
from datetime import datetime, timedelta, date, tzinfo
from decimal import Decimal as D
from uuid import uuid4, uuid1
from cassandra import InvalidRequest
from cassandra.cqlengine.columns import TimeUUID
from cassandra.cqlengine.columns import Ascii
from cassandra.cqlengin
|
e.columns import Text
from cassandra.cqlengine.columns import Integer
from cassandra.cqlengine.columns import BigInt
from cassandra.cqlengine.columns import VarInt
from cassandra.cqlengine.columns import DateTime
from cassandra.cqlengine.columns import Date
from cassandra.cqlengine.columns import UUID
from cassandra.cqlengine.columns import Boolean
from cassandra.cqlengine.columns import Decimal
from cassandra.cqlengine.columns import Inet
from cassandra.cqlengine.connection import execute
from cassandra.cqlengine.management import sync_table, drop_table
from cassandra.cqlengine.models import Model, ValidationError
from cassandra import util
from tests.integration import PROTOCOL_VERSION
from tests.integration.cqlengine.base import BaseCassEngTestCase
class TestDatetime(BaseCassEngTestCase):
class DatetimeTest(Model):
test_id = Integer(primary_key=True)
created_at = DateTime()
@classmethod
def setUpClass(cls):
sync_table(cls.DatetimeTest)
@classmethod
def tearDownClass(cls):
drop_table(cls.DatetimeTest)
def test_datetime_io(self):
now = datetime.now()
self.DatetimeTest.objects.create(test_id=0, created_at=now)
dt2 = self.DatetimeTest.objects(test_id=0).first()
assert dt2.created_at.timetuple()[:6] == now.timetuple()[:6]
def test_datetime_tzinfo_io(self):
class TZ(tzinfo):
def utcoffset(self, date_time):
return timedelta(hours=-1)
def dst(self, date_time):
return None
now = datetime(1982, 1, 1, tzinfo=TZ())
dt = self.DatetimeTest.objects.create(test_id=1, created_at=now)
dt2 = self.DatetimeTest.objects(test_id=1).first()
assert dt2.created_at.timetuple()[:6] == (now + timedelta(hours=1)).timetuple()[:6]
def test_datetime_date_support(self):
today = date.today()
self.DatetimeTest.objects.create(test_id=2, created_at=today)
dt2 = self.DatetimeTest.objects(test_id=2).first()
assert dt2.created_at.isoformat() == datetime(today.year, today.month, today.day).isoformat()
def test_datetime_none(self):
dt = self.DatetimeTest.objects.create(test_id=3, created_at=None)
dt2 = self.DatetimeTest.objects(test_id=3).first()
assert dt2.created_at is None
dts = self.DatetimeTest.objects.filter(test_id=3).values_list('created_at')
assert dts[0][0] is None
def test_datetime_invalid(self):
dt_value= 'INVALID'
with self.assertRaises(TypeError):
self.DatetimeTest.objects.create(test_id=4, created_at=dt_value)
def test_datetime_timestamp(self):
dt_value = 1454520554
self.DatetimeTest.objects.create(test_id=5, created_at=dt_value)
dt2 = self.DatetimeTest.objects(test_id=5).first()
assert dt2.created_at == datetime.utcfromtimestamp(dt_value)
def test_datetime_large(self):
dt_value = datetime(2038, 12, 31, 10, 10, 10, 123000)
self.DatetimeTest.objects.create(test_id=6, created_at=dt_value)
dt2 = self.DatetimeTest.objects(test_id=6).first()
assert dt2.created_at == dt_value
def test_datetime_truncate_microseconds(self):
"""
Test to ensure that truncate microseconds works as expected.
This will be default behavior in the future and we will need to modify the tests to comply
with new behavior
@since 3.2
@jira_ticket PYTHON-273
@expected_result microseconds should be to the nearest thousand when truncate is set.
@test_category object_mapper
"""
DateTime.truncate_microseconds = True
try:
dt_value = datetime(2024, 12, 31, 10, 10, 10, 923567)
dt_truncated = datetime(2024, 12, 31, 10, 10, 10, 923000)
self.DatetimeTest.objects.create(test_id=6, created_at=dt_value)
dt2 = self.DatetimeTest.objects(test_id=6).first()
self.assertEqual(dt2.created_at,dt_truncated)
finally:
# We need to always return behavior to default
DateTime.truncate_microseconds = False
class TestBoolDefault(BaseCassEngTestCase):
class BoolDefaultValueTest(Model):
test_id = Integer(primary_key=True)
stuff = Boolean(default=True)
@classmethod
def setUpClass(cls):
sync_table(cls.BoolDefaultValueTest)
def test_default_is_set(self):
tmp = self.BoolDefaultValueTest.create(test_id=1)
self.assertEqual(True, tmp.stuff)
tmp2 = self.BoolDefaultValueTest.get(test_id=1)
self.assertEqual(True, tmp2.stuff)
class TestBoolValidation(BaseCassEngTestCase):
class BoolValidationTest(Model):
test_id = Integer(primary_key=True)
bool_column = Boolean()
@classmethod
def setUpClass(cls):
sync_table(cls.BoolValidationTest)
def test_validation_preserves_none(self):
test_obj = self.BoolValidationTest(test_id=1)
test_obj.validate()
self.assertIsNone(test_obj.bool_column)
class TestVarInt(BaseCassEngTestCase):
class VarIntTest(Model):
test_id = Integer(primary_key=True)
bignum = VarInt(primary_key=True)
@classmethod
def setUpClass(cls):
sync_table(cls.VarIntTest)
@classmethod
def tearDownClass(cls):
sync_table(cls.VarIntTest)
def test_varint_io(self):
# TODO: this is a weird test. i changed the number from sys.maxint (which doesn't exist in python 3)
# to the giant number below and it broken between runs.
long_int = 92834902384092834092384028340283048239048203480234823048230482304820348239
int1 = self.VarIntTest.objects.create(test_id=0, bignum=long_int)
int2 = self.VarIntTest.objects(test_id=0).first()
self.assertEqual(int1.bignum, int2.bignum)
class TestDate(BaseCassEngTestCase):
class DateTest(Model):
test_id = Integer(primary_key=True)
created_at = Date()
@classmethod
def setUpClass(cls):
if PROTOCOL_VERSION < 4:
return
sync_table(cls.DateTest)
@classmethod
def tearDownClass(cls):
if PROTOCOL_VERSION < 4:
return
drop_table(cls.DateTest)
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
def test_date_io(self):
today = date.today()
self.DateTest.objects.create(test_id=0, created_at=today)
result = self.DateTest.objects(test_id=0).first()
self.assertEqual(result.created_at, util.Date(today))
def test_date_io_using_datetime(self):
now = datetime.utcnow()
self.DateTest.objects.create(test_id=0, created_at=now)
result = self.DateTest.objects(test_id=0).first()
self.assertIsInstance(result.created_at, util.Date)
self.assertEqual(result.created_at, util.Date(now))
def test_date_none(self):
self.DateTest.objects.create(test_id=1, created_at=None)
dt2 = self.DateTest.objects(test_id=1).first()
assert dt2.created_at is None
dts = s
|
khertan/ownNotes
|
python/webdav/acp/Privilege.py
|
Python
|
gpl-3.0
| 4,423
| 0.007009
|
# Copyright 2008 German Aerospace Center (DLR)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Handling for privileges for grant and deny clauses in ACEs
according to WebDAV ACP specification.
"""
from webdav import Constants
from webdav.Connection import WebdavError
__version__ = "$LastChangedRevision$"
class Privilege(object):
"""This class provides functionality for handling privileges for ACEs.
@ivar name: Name of the privilege.
@type name: C{string}
@cvar __privileges: List of allowed XML tags for privileges.
@type __privileges: C{tuple} of C{string}s
|
"""
|
__privileges = list()
def __init__(self, privilege=None, domroot=None):
"""
Constructor should be called with either no parameters (create blank Privilege),
one parameter (a DOM tree or privilege name to initialize it directly).
@param domroot: A DOM tree (default: None).
@type domroot: L{webdav.WebdavResponse.Element} object
@param privilege: The valid name of a privilege (default: None).
@type privilege: C{string}
@raise WebdavError: When non-valid parameters or sets of parameters are
passed a L{WebdavError} is raised.
"""
self.name = None
if domroot:
if len(domroot.children) != 1:
raise WebdavError('Wrong number of elements for Privilege constructor, we have: %i' \
% (len(domroot.children)))
else:
child = domroot.children[0]
if child.ns == Constants.NS_DAV and child.name in self.__privileges:
self.name = child.name
else:
raise WebdavError('Not a valid privilege tag, we have: %s%s' \
% (child.ns, child.name))
elif privilege:
if privilege in self.__privileges:
self.name = privilege
else:
raise WebdavError('Not a valid privilege tag, we have: %s.' % str(privilege))
@classmethod
def registerPrivileges(cls, privileges):
"""
Registers supported privilege tags.
@param privileges: List of privilege tags.
@type privileges: C{list} of C{unicode}
"""
for privilege in privileges:
cls.__privileges.append(privilege)
def __cmp__(self, other):
""" Compares two Privilege instances. """
if not isinstance(other, Privilege):
return 1
if self.name != other.name:
return 1
else:
return 0
def __repr__(self):
""" Returns the string representation of an instance. """
return '<class Privilege: name: "%s">' % (self.name)
def copy(self, other):
"""
Copy Privilege object.
@param other: Another privilege to copy.
@type other: L{Privilege} object
@raise WebdavError: When an object that is not a L{Privilege} is passed
a L{WebdavError} is raised.
"""
if not isinstance(other, Privilege):
raise WebdavError('Non-Privilege object passed to copy method: %s' % other.__class__)
self.name = other.name
def toXML(self):
"""
Returns privilege content as string in valid XML as described in WebDAV ACP.
@param defaultNameSpace: Name space (default: None).
@type defaultNameSpace: C(string)
"""
assert self.name != None, "privilege is not initialized or does not contain valid content!"
privilege = 'D:' + Constants.TAG_PRIVILEGE
return '<%s><D:%s/></%s>' % (privilege, self.name, privilege)
|
domin1101/malmo-challenge
|
malmopy/version.py
|
Python
|
mit
| 1,232
| 0.007305
|
# Copyright (c) 2017 Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
#
|
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE
|
, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================================================================
VERSION = '0.1.0'
|
marcos-sb/quick-openstacked-hadoop
|
Alba/albaproject/mapred/models.py
|
Python
|
apache-2.0
| 2,416
| 0.009934
|
from django.db import models
from django.contrib.auth.models import User
from albaproject.settings import MEDIA_ROOT
import pdb
def _upload_to_generic(prefix_path=None, instance=None, field=None, filename=None):
#pdb.set_trace()
if not instance.pk: # generate DB PK if not present
instance.save()
if not prefix_path:
if not filename:
return '{0}/job_{1}/{2}'.format(instance.user.username, instance.pk,
field)
return '{0}/job_{1}/{2}/{3}'.format(instance.user.username, instance.pk,
field, filename)
return '{0}/{1}/job_{2}/{3}'.format(prefix_path, instance.user.username,
instance.pk, field)
class Job(models.Model):
def __unicode__(self):
return str(self.id)
def save(self, *args, **kwargs):
#pdb.set_trace()
_input = self.file_input
_job = self.mapred_job
_output = self.file_output
self.file_input = None
self.mapred_job = None
self.file_output = None
super(Job, self).save(*args,**kwargs)
self.save = super(Job, self).save
self.file_input = _input
self.mapred_job = _job
self.file_output = _output
self.save() #super.save
def input_dest(self, filename):
return _upload_to_generic(None, self, 'input', filename)
def mapred_dest(self, filename):
return
|
_upload_to_generic(None, self, 'mapred', filename)
def output_dest(self, filename):
return _upload_to_generic(None, self, 'output', filename)
def output_path(self):
return _upload_to_generic(MEDIA_ROOT, self, 'output', None)
user = models.ForeignKey(User)
file_input = models.FileField(upload_to=i
|
nput_dest, null=True)
mapred_job = models.FileField(upload_to=mapred_dest, null=True)
fully_qualified_job_impl_class = models.CharField(max_length=200, null=True)
file_output = models.FileField(upload_to=output_dest, null=True)
submission_date = models.DateTimeField(auto_now_add=True)
class Server(models.Model):
job = models.ForeignKey(Job)
openstack_id = models.CharField(max_length=200)
server_name = models.CharField(max_length=200)
vcpus = models.PositiveSmallIntegerField()
ram = models.PositiveIntegerField()
disk = models.PositiveIntegerField()
|
jbalogh/zamboni
|
apps/search/forms.py
|
Python
|
bsd-3-clause
| 11,040
| 0.000815
|
import collections
from django import forms
from django.forms.util import ErrorDict
from tower import ugettext as _, ugettext_lazy as _lazy
import amo
from amo import helpers
from applications.models import AppVersion
sort_by = (
('', _lazy(u'Keyword Match')),
('updated', _lazy(u'Updated', 'advanced_search_form_updated')),
('newest', _lazy(u'Created', 'advanced_search_form_newest')),
('weeklydownloads', _lazy(u'Downloads')),
('users', _lazy(u'Users')),
('averagerating', _lazy(u'Rating', 'advanced_search_form_rating')),
)
collection_sort_by = (
('weekly', _lazy(u'Most popular this week')),
('monthly', _lazy(u'Most popular this month')),
('all', _lazy(u'Most popular all time')),
('rating', _lazy(u'Highest Rated')),
('newest', _lazy(u'Newest')),
)
per_page = (20, 50, )
tuplize = lambda x: divmod(int(x * 10), 10)
# These releases were so minor that we don't want to search for them.
skip_versions = collections.defaultdict(list)
skip_versions[amo.FIREFOX] = [tuplize(v) for v in amo.FIREFOX.exclude_versions]
min_version = collections.defaultdict(lambda: (0, 0))
min_version.update({
amo.FIREFOX: tuplize(amo.FIREFOX.min_display_version),
amo.THUNDERBIRD: tuplize(amo.THUNDERBIRD.min_display_version),
amo.SEAMONKEY: tuplize(amo.SEAMONKEY.min_display_version),
amo.SUNBIRD: tuplize(amo.SUNBIRD.min_display_version),
})
def get_app_versions(app):
appversions = AppVersion.objects.filter(application=app.id)
min_ver, skip = min_version[app], skip_versions[app]
versions = [(a.major, a.minor1) for a in appversions]
strings = ['%s.%s' % v for v in sorted(set(versions), reverse=True)
if v >= min_ver and v not in skip]
return [('any', _('Any'))] + zip(strings, strings)
# Fake categories to slip some add-on types into the search groups.
_Cat = collections.namedtuple('Cat', 'id name weight type')
def get_search_groups(app):
sub = []
types_ = [t for t in (amo.ADDON_DICT, amo.ADDON_SEARCH, amo.ADDON_THEME)
if t in app.types]
for type_ in types_:
sub.append(_Cat(0, amo.ADDON_TYPES[type_], 0, type_))
sub.extend(helpers.sidebar(app)[0])
sub = [('%s,%s' % (a.type, a.id), a.name) for a in
sorted(sub, key=lambda x: (x.weight, x.name))]
top_level = [('all', _('all add-ons')),
('collections', _('all collections')), ]
if amo.ADDON_PERSONA in app.types:
top_level += (('personas', _('all personas')),)
return top_level[:1] + sub + top_level[1:], top_level
SEARCH_CHOICES = (
('all', _lazy('search for add-ons')),
('collections', _lazy('search for collections')),
('personas', _lazy('search for personas')),
('apps', _lazy('search for apps')))
class SimpleSearchForm(forms.Form):
"""Powers the search box on every page."""
q = forms.CharField(required=False)
cat = forms.CharField(required=False, widget=forms.HiddenInput)
appver = forms.CharField(required=False, widget=forms.HiddenInput)
platform = forms.CharField(required=False, widget=forms.HiddenInput)
choices = dict(SEARCH_CHOICES)
def clean_cat(self):
self.data = dict(self.data.items())
return self.data.setdefault('cat', 'all')
def placeholder(self):
val = self.clean_cat()
return self.choices.get(val, self.choices['all'])
def SearchForm(request):
current_app = request.APP or amo.FIREFOX
search_groups, top_level = get_search_groups(current_app)
class _SearchForm(SimpleSearchForm):
|
cat = forms.ChoiceField(choices=search_groups, required=False)
# This gets replaced by a <select> with js.
lver = forms.ChoiceField(
label=_(u'{0} Version').format(unicode(current_app.pretty)),
choices=get_app_versions(current_app), required=False)
appver = forms.CharField(required=False)
atype = forms.TypedChoiceField(label=_('Type'),
choices=[(t, amo.ADDON_TYPE[t]) fo
|
r t in amo.ADDON_SEARCH_TYPES],
required=False, coerce=int, empty_value=amo.ADDON_ANY)
pid = forms.TypedChoiceField(label=_('Platform'),
choices=[(p[0], p[1].name) for p in amo.PLATFORMS.iteritems()
if p[1] != amo.PLATFORM_ANY], required=False,
coerce=int, empty_value=amo.PLATFORM_ANY.id)
platform = forms.ChoiceField(required=False,
choices=[[p.shortname, p.id] for p in amo.PLATFORMS.values()])
sort = forms.ChoiceField(label=_('Sort By'), choices=sort_by,
required=False)
pp = forms.TypedChoiceField(label=_('Per Page'),
choices=zip(per_page, per_page), required=False, coerce=int,
empty_value=per_page[0])
advanced = forms.BooleanField(widget=forms.HiddenInput, required=False)
tag = forms.CharField(widget=forms.HiddenInput, required=False)
page = forms.IntegerField(widget=forms.HiddenInput, required=False)
# Attach these to the form for usage in the template.
top_level_cat = dict(top_level)
def clean_platform(self):
p = self.cleaned_data.get('platform')
choices = dict(self.fields['platform'].choices)
return choices.get(p)
# TODO(jbalogh): when we start using this form for zamboni search, it
# should check that the appid and lver match up using app_versions.
def clean(self):
d = self.cleaned_data
raw = self.data
# Set some defaults
if not d.get('appid'):
d['appid'] = request.APP.id
# Since not all categories are listed in this form, we use the raw
# data.
if 'cat' in raw:
if ',' in raw['cat']:
try:
d['atype'], d['cat'] = map(int, raw['cat'].split(','))
except ValueError:
d['cat'] = None
elif raw['cat'] == 'all':
d['cat'] = None
if 'page' not in d or not d['page'] or d['page'] < 1:
d['page'] = 1
return d
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
Does not remove cleaned_data if there are errors.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data
# has changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
d = request.GET.copy()
return _SearchForm(d)
class SecondarySearchForm(forms.Form):
q = forms.CharField(widget=forms.HiddenInput, required=False)
cat = forms.CharField(widget=forms.HiddenInput)
pp = forms.CharField(widget=forms.HiddenInput, required=False)
sortby = forms.ChoiceField(label=_lazy(u'Sort By'),
choices=collection_sort_by,
initial='weekly', required=False)
page = forms.IntegerField(widget=forms.HiddenInput, required=False)
def clean_pp(self):
d = self.cleaned_data['pp']
try:
return int(d)
except:
return per_page[0]
def clean(self):
d = self.cleaned_data
if not d.get('pp'):
d['pp'] = per_page[0]
return d
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
Does not remove cleaned_data if there are errors.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data
# has changed from the initial data
|
rajul/tvb-framework
|
tvb/config/logger/cluster_handler.py
|
Python
|
gpl-2.0
| 3,331
| 0.006304
|
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
.. moduleauthor:: Calin Pavel <calin.pavel@codemart.ro>
"""
import os
import logging
from logging.handlers import MemoryHandler
from tvb.basic.profile import TvbProfile
from tvb.basic.logger.simple_handler import SimpleTimedRotatingFileHandler
class ClusterTimedRotatingFileHandler(MemoryHandler):
"""
This is a custom rotating file handler which computes the name of the file depending on the
execution environment (web node or cluster node)
"""
# Name of the log f
|
ile where code from Web application will be stored
WEB_LOG_FILE = "web_application.log"
# Name of the file where to write logs from the code executed on cluster nodes
CLUSTER_NODES_LOG_FILE = "operations_executions.log"
# Size of the buffer which store log entries in memory
# in number of lines
BUFFER_CAPACITY = 20
def __init__(self, when='h
|
', interval=1, backupCount=0):
"""
Constructor for logging formatter.
"""
# Formatting string
format_str = '%(asctime)s - %(levelname)s'
if TvbProfile.current.cluster.IN_OPERATION_EXECUTION_PROCESS:
log_file = self.CLUSTER_NODES_LOG_FILE
if TvbProfile.current.cluster.IS_RUNNING_ON_CLUSTER_NODE:
node_name = TvbProfile.current.cluster.CLUSTER_NODE_NAME
if node_name is not None:
format_str += ' [node:' + str(node_name) + '] '
else:
format_str += ' [proc:' + str(os.getpid()) + '] '
else:
log_file = self.WEB_LOG_FILE
format_str += ' - %(name)s - %(message)s'
rotating_file_handler = SimpleTimedRotatingFileHandler(log_file, when, interval, backupCount)
rotating_file_handler.setFormatter(logging.Formatter(format_str))
MemoryHandler.__init__(self, capacity=self.BUFFER_CAPACITY, target=rotating_file_handler)
|
chrisboo/pyhistogram
|
examples/plot_simple_1D_hist_example.py
|
Python
|
gpl-3.0
| 478
| 0
|
"""
===============================================
Demonstration for filling a histogram in a loop
===============================================
A simple, one dimensional histogram is filled in a loop with random
values. The result is than plotted with the build in plot com
|
mand.
"""
from pyhistogram import Hist
import numpy as np
import matplotlib.pyplot as plt
h = Hist(20, -5, 5)
sampl
|
e = np.random.normal(size=500)
for v in sample:
h.fill(v)
h.plot()
plt.show()
|
opennode/nodeconductor-assembly-waldur
|
src/waldur_rancher/management/commands/sync_users.py
|
Python
|
mit
| 986
| 0.004057
|
import logging
from django.core.management.base import BaseCommand
from waldur_rancher.utils import SyncUser
logger = logging.getLogger(__name__)
class Command(BaseCommand):
h
|
elp = """Sync users from Waldur to Rancher."""
def handle(self, *args, **options):
def print_message(count, action, name='user'):
|
if count == 1:
self.stdout.write(
self.style.SUCCESS('%s %s has been %s.' % (count, name, action))
)
else:
self.stdout.write(
self.style.SUCCESS('%s %ss have been %s.' % (count, name, action))
)
result = SyncUser.run()
for action in ['blocked', 'created', 'activated', 'updated']:
print_message(result.get(action, 0), action)
print_message(result.get('project roles deleted', 0), 'deleted', 'project role')
print_message(result('project roles created', 0), 'created', 'project role')
|
didrocks/cupstream2distro
|
cupstream2distro/utils.py
|
Python
|
gpl-3.0
| 904
| 0
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms o
|
f the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty
|
of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from contextlib import contextmanager
# this is stolen from python 3.4 :)
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
|
linky00/pythonthegathering
|
test.py
|
Python
|
mit
| 209
| 0.009569
|
from pythonthegathering import ManaPool, spell
pool = ManaPoo
|
l()
@spell('WBB')
def boop(x):
p
|
rint(x)
pool.tap('plains').tap('swamp').tap('swamp')
boop('boop', mana_pool=pool, mana_pay={'W': 1, 'B': 2})
|
robertdown/atlas_docs
|
atlas_doc/migrations/0008_auto_20170828_0043.py
|
Python
|
gpl-3.0
| 461
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-28 04:43
fr
|
om __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('atlas_doc', '0007_page_version'),
]
opera
|
tions = [
migrations.AlterField(
model_name='collection',
name='prev_rev',
field=models.UUIDField(blank=True, null=True),
),
]
|
droundy/deft
|
papers/hughes-saft/figs/density_calc.py
|
Python
|
gpl-2.0
| 986
| 0.004057
|
#!/usr/bin/env python
import math
fin = open('figs/single-rod-in-water.dat', 'r')
fout = open('figs/single-rods-calculated-density.dat', 'w')
kB = 3.16681539628059e-6 # This is Boltzmann's constant in Hartree/Kelvin
first = 1
nm = 18.8972613
for line in fin:
|
current = str(line)
pieces = current.split('\t')
if first:
r2 = float(pieces[0])/2*nm
E2 = float(pieces[1])
first = 0
else:
if ((float(pieces[0])/2*nm - r2) > 0.25):
r1 = r2
r2 = float(pieces[0])/2*nm
E1 = E2
E2 = float(pieces[1]) #
|
actually it's energy per unit length!
length = 1 # arbitrary
r = (r1 + r2)/2
dEdR = (E2-E1)/(r2-r1)*length
area = 2*math.pi*r*length
force = dEdR
pressure = force/area
kT = kB*298 # about this
ncontact = pressure/kT
fout.write(str(r)+'\t'+str(ncontact)+'\n')
fin.close()
fout.close()
|
jennywoites/MUSSA
|
MUSSA_Flask/app/API_Rest/Services/AlumnoServices/EncuestaAlumnoService.py
|
Python
|
gpl-3.0
| 6,276
| 0.002709
|
from flask_user import login_required
from app.API_Rest.Services.BaseService import BaseService
from app.models.generadorJSON.respuestas_encuestas_generadorJSON import generarJSON_encuesta_alumno
from app.models.respuestas_encuesta_models import EncuestaAlumno, RespuestaEncuestaTematica, RespuestaEncuestaTags
from app.models.palabras_clave_models import PalabrasClaveParaMateria, TematicaPorMateria
from app.models.alumno_models import MateriasAlumno
from app.models.horarios_models import Curso
from app.API_Rest.codes import *
from app.models.respuestas_encuesta_models import RespuestaEncuestaAlumno, RespuestaEncuestaEstrellas
from app.DAO.EncuestasDAO import *
class EncuestaAlumnoService(BaseService):
def getNombreClaseServicio(self):
return "Encuesta Alumno Service"
##########################################
## Servicios ##
##########################################
@login_required
def get(self, idEncuestaAlumno):
self.logg_parametros_recibidos()
parametros_son_validos, msj, codigo = self.validar_parametros(dict([
("idEncuestaAlumno", {
self.PARAMETRO: idEncuestaAlumno,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.id_es_valido, []),
(self.existe_id, [EncuestaAlumno]),
(self.encuesta_pertenece_al_alumno, [])
]
})
]))
if not parametros_son_validos:
self.logg_error(msj)
return {'Error': msj}, codigo
encuesta = EncuestaAlumno.query.get(idEncuestaAlumno)
result = (generarJSON_encuesta_alumno(encuesta), SUCCESS_OK)
self.logg_resultado(result)
return result
@login_required
def post(self, idEncuestaAlumno):
self.logg_parametros_recibidos()
alumno = self.obtener_alumno_usuario_actual()
if not alumno:
msj = "El usuario no tiene ningun alumno asociado"
self.logg_error(msj)
return {'Error': msj}, CLIENT_ERROR_NOT_FOUND
finalizada = self.obtener_booleano("finalizada")
parametros_son_validos, msj, codigo = self.validar_parametros(dict([
("idEncuestaAlumno", {
self.PARAMETRO: idEncuestaAlumno,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.id_es_valido, []),
(self.existe_id, [EncuestaAlumno]),
(self.encuesta_pertenece_al_alumno, []),
(self.encuesta_no_esta_finalizada, [])
]
}),
("finalizada", {
self.PARAMETRO: finalizada,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.booleano_es_valido, [])
]
})
]))
|
if not parametros_son_validos:
self.logg_error(msj)
return {'Error': msj}, codigo
encuesta = EncuestaAlumno.query.get(idEncuestaAlumno)
encuesta.finalizada = finaliza
|
da
db.session.commit()
materiaAlumno = MateriasAlumno.query.get(encuesta.materia_alumno_id)
self.agregarPalabrasClavesALasMaterias(encuesta, materiaAlumno.materia_id)
self.agregarTematicasALasMaterias(encuesta, materiaAlumno.materia_id)
self.actualizar_puntaje_y_cantidad_encuestas_curso(encuesta, materiaAlumno.curso_id)
result = SUCCESS_NO_CONTENT
self.logg_resultado(result)
return result
def actualizar_puntaje_y_cantidad_encuestas_curso(self, encuesta, id_curso):
curso = Curso.query.get(id_curso)
curso.puntaje_total_encuestas += encuesta.obtener_cantidad_estrellas_elegidas()
curso.cantidad_encuestas_completas += 1
db.session.commit()
def agregarPalabrasClavesALasMaterias(self, encuesta, id_materia):
respuestas = RespuestaEncuestaTags.query\
.filter(RespuestaEncuestaTags.rta_encuesta_alumno_id.in_(
RespuestaEncuestaAlumno.query.with_entities(RespuestaEncuestaAlumno.id)
.filter_by(encuesta_alumno_id=encuesta.id)
)).all()
for respuesta in respuestas:
entrada = PalabrasClaveParaMateria.query.filter_by(materia_id=id_materia)\
.filter_by(palabra_clave_id=respuesta.palabra_clave_id).first()
if not entrada:
entrada = PalabrasClaveParaMateria(
materia_id=id_materia,
palabra_clave_id=respuesta.palabra_clave_id,
cantidad_encuestas_asociadas=0
)
db.session.add(entrada)
entrada.cantidad_encuestas_asociadas += 1
db.session.commit()
def agregarTematicasALasMaterias(self, encuesta, id_materia):
respuestas = RespuestaEncuestaTematica.query \
.filter(RespuestaEncuestaTematica.rta_encuesta_alumno_id.in_(
RespuestaEncuestaAlumno.query.with_entities(RespuestaEncuestaAlumno.id)
.filter_by(encuesta_alumno_id=encuesta.id)
)).all()
for respuesta in respuestas:
entrada = TematicaPorMateria.query.filter_by(materia_id=id_materia).\
filter_by(tematica_id=respuesta.tematica_id).first()
if not entrada:
entrada = TematicaPorMateria(
materia_id=id_materia,
tematica_id=respuesta.tematica_id,
cantidad_encuestas_asociadas=0
)
db.session.add(entrada)
entrada.cantidad_encuestas_asociadas += 1
db.session.commit()
def encuesta_no_esta_finalizada(self, nombre_parametro, valor, esObligatorio):
encuesta = EncuestaAlumno.query.get(valor)
return self.mensaje_OK(nombre_parametro) if not encuesta.finalizada \
else (False, 'La encuesta ya se encuentra finalizada', CLIENT_ERROR_METHOD_NOT_ALLOWED)
#########################################
CLASE = EncuestaAlumnoService
URLS_SERVICIOS = (
'/api/alumno/encuesta/<int:idEncuestaAlumno>',
)
#########################################
|
margulies/surfdist
|
nipype/surfdist_nipype.py
|
Python
|
mit
| 8,586
| 0.02737
|
#!/usr/bin/env python
from nipype.interfaces.io import FreeSurferSource, DataSink
from nipype.interfaces.utility import IdentityInterface
from nipype import Workflow, Node, MapNode, JoinNode, Function
import nibabel as nib
import numpy as np
import os
import surfdist as sd
import csv
def trimming(itemz, phrase):
item = [x for x in itemz if phrase in x][0]
return item
def genfname(hemi, source, target):
fname = hemi + '_' + source + '_' + target
return fname
def calc_surfdist(surface, labels, annot, reg, origin, target):
import nibabel as nib
import numpy as np
import os
from surfdist import load, utils, surfdist
import csv
""" inputs:
surface - surface file (e.g. lh.pial, with full path)
labels - label file (e.g. lh.cortex.label, with full path)
annot - annot file (e.g. lh.aparc.a2009s.annot, with full path)
reg - registration file (lh.sphere.reg)
origin - the label from which we calculate distances
target - target surface (e.g. fsaverage4)
"""
# Load stuff
surf = nib.freesurfer.read_geometry(surface)
cort = np.sort(nib.freesurfer.read_label(labels))
src = load.load_freesurfer_label(annot, origin, cort)
# Calculate distances
dist = surfdist.dist_calc(surf, cort, src)
# Project distances to target
trg = nib.freesurfer.read_geometry(target)[0]
native = nib.freesurfer.read_geometry(reg)[0]
idx_trg_to_native = utils.find_node_match(trg, native)[0]
# Get indices in trg space
distt = dist[idx_trg_to_native]
# Write to file and return file handle
filename = os.path.join(os.getcwd(),'distances.csv')
distt.tofile(filename,sep=",")
return filename
def stack_files(files, hemi, source, target):
"""
This function takes a list of files as input and vstacks them
"""
import csv
import os
import numpy as np
fname = "sdist_%s_%s_%s.csv" % (hemi, source, target)
filename = os.path.join(os.getcwd(),fname)
alldist = []
for dfile in files:
alldist.append(np.genfromtxt(dfile, delimiter=','))
alldist = np.array(alldist)
alldist.tofile(filename,",")
return filenam
|
e
def create_surfdist_workflow(subjects_dir,
subject_list,
|
sources,
target,
hemi,
atlas,
labs,
name):
sd = Workflow(name=name)
# Run a separate tree for each template, hemisphere and source structure
infosource = Node(IdentityInterface(fields=['template','hemi','source']), name="infosource")
infosource.iterables = [('template', target),('hemi', hemi),('source',sources)]
# Get template files
fsst = Node(FreeSurferSource(),name='FS_Source_template')
fsst.inputs.subjects_dir = subjects_dir
sd.connect(infosource,'template',fsst,'subject_id')
sd.connect(infosource,'hemi',fsst,'hemi')
# Generate folder name for output
genfoldname = Node(Function(input_names=['hemi','source','target'],
output_names=['cname'], function=genfname),
name='genfoldname')
sd.connect(infosource,'hemi',genfoldname,'hemi')
sd.connect(infosource,'source',genfoldname,'source')
sd.connect(infosource,'template',genfoldname,'target')
# Get subjects
fss = Node(FreeSurferSource(),name='FS_Source')
fss.iterables = ('subject_id', subject_list)
fss.inputs.subjects_dir = subjects_dir
fss.inputs.subject_id = subject_list
sd.connect(infosource,'hemi',fss,'hemi')
# Trim labels
tlab = Node(Function(input_names=['itemz','phrase'],
output_names=['item'], function=trimming),
name='tlab')
tlab.inputs.phrase = labs
sd.connect(fss,'label',tlab,'itemz')
# Trim annotations
tannot = Node(Function(input_names=['itemz','phrase'],
output_names=['item'], function=trimming),
name='tannot')
tannot.inputs.phrase = atlas
sd.connect(fss,'annot',tannot,'itemz')
# Calculate distances for each hemi
sdist = Node(Function(input_names=['surface','labels','annot','reg','origin','target'],
output_names=['distances'], function=calc_surfdist),
name='sdist')
sd.connect(infosource,'source',sdist,'origin')
sd.connect(fss,'pial',sdist,'surface')
sd.connect(tlab,'item',sdist,'labels')
sd.connect(tannot,'item',sdist,'annot')
sd.connect(fss,'sphere_reg',sdist,'reg')
sd.connect(fsst,'sphere_reg',sdist,'target')
# Gather data for each hemi from all subjects
bucket = JoinNode(Function(input_names=['files','hemi','source','target'],output_names=['group_dist'],
function=stack_files), joinsource = fss, joinfield = 'files', name='bucket')
sd.connect(infosource,'source',bucket,'source')
sd.connect(infosource,'template',bucket,'target')
sd.connect(infosource,'hemi',bucket,'hemi')
sd.connect(sdist,'distances',bucket,'files')
# Sink the data
datasink = Node(DataSink(), name='sinker')
datasink.inputs.parameterization = False
datasink.inputs.base_directory = os.path.abspath(args.sink)
sd.connect(genfoldname,'cname',datasink,'container')
sd.connect(bucket,'group_dist',datasink,'group_distances')
return sd
def create_workflow(args, name=None):
with open(args.subject_file) as f:
subject_list = f.read().splitlines()
if name is None:
name = 'surfdist'
kwargs = dict(subjects_dir = args.subjects_dir,
subject_list = subject_list,
sources = args.sources,
target = args.target_surfs,
hemi = args.hemi,
atlas = args.annot,
labs = args.labels,
name=name)
wf = create_surfdist_workflow(**kwargs)
return wf
if __name__ == "__main__":
from argparse import ArgumentParser, RawTextHelpFormatter
import os
defstr = ' (default %(default)s)'
parser = ArgumentParser(description='''This script generates and runs a nipype pipeline for calculating distances from source label(s)
on a Freesurfer surface. After calculating the distances in native space it transforms
the distances into selected target space and creates a CSV file containing data for all
subjects. This table can be used for permutation testing in PALM.''',
formatter_class=RawTextHelpFormatter)
parser.add_argument("-s", "--subject_ids", dest="subject_file",
help="Subject list file", required=True)
parser.add_argument("-sd", "--subjects_dir", dest="subjects_dir",
help="FreeSurfer subject directory", required=True)
parser.add_argument("-t", "--target_surfaces", dest="target_surfs", nargs="+",
default=['fsaverage5'],
help="FreeSurfer target surfaces" + defstr)
parser.add_argument("-a", "--annot", dest="annot",
default='aparc.a2009s',
help="Annotation for source label(s)" + defstr)
parser.add_argument("-l", "--label", dest="labels",
default='cortex',
help="Label(s)" + defstr)
parser.add_argument("-src", "--source", dest="sources", nargs = "+",
default=['S_central'],
help="Label(s) to calculate distances from" + defstr)
parser.add_argument("-hemi", "--hemi", dest="hemi", nargs = "+",
default=['lh','rh'],
help="Hemisphere(s) for distance calculation" + defstr)
parser.add_argument("-o", "--output_dir", dest="sink",
default=os.path.join(os.getcwd(),'geodesic_distances'),
help="Output directory base")
parser.add_argument("-w", "--work_dir", dest="work_dir",
help="Output directory base")
parser.add_argument("-p", "--plugin", dest="plugin",
default='Linear',
help="Plugin to use")
parser.add_argument("--plugin_args", dest="plugin_args",
hel
|
eunchong/build
|
third_party/twisted_10_2/twisted/protocols/ftp.py
|
Python
|
bsd-3-clause
| 93,283
| 0.002969
|
# -*- test-case-name: twisted.test.test_ftp -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An FTP protocol implementation
@author: Itamar Shtull-Trauring
@author: Jp Calderone
@author: Andrew Bennetts
"""
# System Imports
import os
import time
import re
import operator
import stat
import errno
import fnmatch
import warnings
try:
import pwd, grp
except ImportError:
pwd = grp = None
from zope.interface import Interface, implements
# Twisted Imports
from twisted import copyright
from twisted.internet import reactor, interfaces, protocol, error, defer
from twisted.protocols import basic, policies
from twisted.python import log, failure, filepath
from twisted.python.compat import reduce
from twisted.cred import error as cred_error, portal, credentials, checkers
# constants
# response codes
RESTART_MARKER_REPLY = "100"
SERVICE_READY_IN_N_MINUTES = "120"
DATA_CNX_ALREADY_OPEN_START_XFR = "125"
FILE_STATUS_OK_OPEN_DATA_CNX = "150"
CMD_OK = "200.1"
TYPE_SET_OK = "200.2"
ENTERING_PORT_MODE = "200.3"
CMD_NOT_IMPLMNTD_SUPERFLUOUS = "202"
SYS_STATUS_OR_HELP_REPLY = "211"
DIR_STATUS = "212"
FILE_STATUS = "213"
HELP_MSG = "214"
NAME_SYS_TYPE = "215"
SVC_READY_FOR_NEW_USER = "220.1"
WELC
|
OME_MSG = "220.2"
SVC_CLOSING_CTRL_CNX = "221"
GOODBYE_MSG = "221"
DATA_CNX_OPEN_NO_XFR_IN_PROGRESS = "225"
CLOSING_DATA_CNX = "226"
TXFR_COMPLETE_OK = "226"
ENTERING_PASV_MODE = "227"
ENTERING_EPSV_MODE = "229"
USR_LOGG
|
ED_IN_PROCEED = "230.1" # v1 of code 230
GUEST_LOGGED_IN_PROCEED = "230.2" # v2 of code 230
REQ_FILE_ACTN_COMPLETED_OK = "250"
PWD_REPLY = "257.1"
MKD_REPLY = "257.2"
USR_NAME_OK_NEED_PASS = "331.1" # v1 of Code 331
GUEST_NAME_OK_NEED_EMAIL = "331.2" # v2 of code 331
NEED_ACCT_FOR_LOGIN = "332"
REQ_FILE_ACTN_PENDING_FURTHER_INFO = "350"
SVC_NOT_AVAIL_CLOSING_CTRL_CNX = "421.1"
TOO_MANY_CONNECTIONS = "421.2"
CANT_OPEN_DATA_CNX = "425"
CNX_CLOSED_TXFR_ABORTED = "426"
REQ_ACTN_ABRTD_FILE_UNAVAIL = "450"
REQ_ACTN_ABRTD_LOCAL_ERR = "451"
REQ_ACTN_ABRTD_INSUFF_STORAGE = "452"
SYNTAX_ERR = "500"
SYNTAX_ERR_IN_ARGS = "501"
CMD_NOT_IMPLMNTD = "502"
BAD_CMD_SEQ = "503"
CMD_NOT_IMPLMNTD_FOR_PARAM = "504"
NOT_LOGGED_IN = "530.1" # v1 of code 530 - please log in
AUTH_FAILURE = "530.2" # v2 of code 530 - authorization failure
NEED_ACCT_FOR_STOR = "532"
FILE_NOT_FOUND = "550.1" # no such file or directory
PERMISSION_DENIED = "550.2" # permission denied
ANON_USER_DENIED = "550.3" # anonymous users can't alter filesystem
IS_NOT_A_DIR = "550.4" # rmd called on a path that is not a directory
REQ_ACTN_NOT_TAKEN = "550.5"
FILE_EXISTS = "550.6"
IS_A_DIR = "550.7"
PAGE_TYPE_UNK = "551"
EXCEEDED_STORAGE_ALLOC = "552"
FILENAME_NOT_ALLOWED = "553"
RESPONSE = {
# -- 100's --
RESTART_MARKER_REPLY: '110 MARK yyyy-mmmm', # TODO: this must be fixed
SERVICE_READY_IN_N_MINUTES: '120 service ready in %s minutes',
DATA_CNX_ALREADY_OPEN_START_XFR: '125 Data connection already open, starting transfer',
FILE_STATUS_OK_OPEN_DATA_CNX: '150 File status okay; about to open data connection.',
# -- 200's --
CMD_OK: '200 Command OK',
TYPE_SET_OK: '200 Type set to %s.',
ENTERING_PORT_MODE: '200 PORT OK',
CMD_NOT_IMPLMNTD_SUPERFLUOUS: '202 Command not implemented, superfluous at this site',
SYS_STATUS_OR_HELP_REPLY: '211 System status reply',
DIR_STATUS: '212 %s',
FILE_STATUS: '213 %s',
HELP_MSG: '214 help: %s',
NAME_SYS_TYPE: '215 UNIX Type: L8',
WELCOME_MSG: "220 %s",
SVC_READY_FOR_NEW_USER: '220 Service ready',
GOODBYE_MSG: '221 Goodbye.',
DATA_CNX_OPEN_NO_XFR_IN_PROGRESS: '225 data connection open, no transfer in progress',
CLOSING_DATA_CNX: '226 Abort successful',
TXFR_COMPLETE_OK: '226 Transfer Complete.',
ENTERING_PASV_MODE: '227 Entering Passive Mode (%s).',
ENTERING_EPSV_MODE: '229 Entering Extended Passive Mode (|||%s|).', # where is epsv defined in the rfc's?
USR_LOGGED_IN_PROCEED: '230 User logged in, proceed',
GUEST_LOGGED_IN_PROCEED: '230 Anonymous login ok, access restrictions apply.',
REQ_FILE_ACTN_COMPLETED_OK: '250 Requested File Action Completed OK', #i.e. CWD completed ok
PWD_REPLY: '257 "%s"',
MKD_REPLY: '257 "%s" created',
# -- 300's --
'userotp': '331 Response to %s.', # ???
USR_NAME_OK_NEED_PASS: '331 Password required for %s.',
GUEST_NAME_OK_NEED_EMAIL: '331 Guest login ok, type your email address as password.',
REQ_FILE_ACTN_PENDING_FURTHER_INFO: '350 Requested file action pending further information.',
# -- 400's --
SVC_NOT_AVAIL_CLOSING_CTRL_CNX: '421 Service not available, closing control connection.',
TOO_MANY_CONNECTIONS: '421 Too many users right now, try again in a few minutes.',
CANT_OPEN_DATA_CNX: "425 Can't open data connection.",
CNX_CLOSED_TXFR_ABORTED: '426 Transfer aborted. Data connection closed.',
REQ_ACTN_ABRTD_LOCAL_ERR: '451 Requested action aborted. Local error in processing.',
# -- 500's --
SYNTAX_ERR: "500 Syntax error: %s",
SYNTAX_ERR_IN_ARGS: '501 syntax error in argument(s) %s.',
CMD_NOT_IMPLMNTD: "502 Command '%s' not implemented",
BAD_CMD_SEQ: '503 Incorrect sequence of commands: %s',
CMD_NOT_IMPLMNTD_FOR_PARAM: "504 Not implemented for parameter '%s'.",
NOT_LOGGED_IN: '530 Please login with USER and PASS.',
AUTH_FAILURE: '530 Sorry, Authentication failed.',
NEED_ACCT_FOR_STOR: '532 Need an account for storing files',
FILE_NOT_FOUND: '550 %s: No such file or directory.',
PERMISSION_DENIED: '550 %s: Permission denied.',
ANON_USER_DENIED: '550 Anonymous users are forbidden to change the filesystem',
IS_NOT_A_DIR: '550 Cannot rmd, %s is not a directory',
FILE_EXISTS: '550 %s: File exists',
IS_A_DIR: '550 %s: is a directory',
REQ_ACTN_NOT_TAKEN: '550 Requested action not taken: %s',
EXCEEDED_STORAGE_ALLOC: '552 Requested file action aborted, exceeded file storage allocation',
FILENAME_NOT_ALLOWED: '553 Requested action not taken, file name not allowed'
}
class InvalidPath(Exception):
"""
Internal exception used to signify an error during parsin
|
coolharsh55/hdd-indexer
|
setup.py
|
Python
|
mit
| 8,480
| 0
|
"""Setup for HDD-indexer
This module provides the setup for ``hdd-indexer`` by downloading its
`dependencies`, creating the `database`, createing a sampel `user`.
Usage:
$ python setup.py
Dependencies:
The dependencies are installed with pip.
$ pip install -r requirements.txt
Database:
The database is created via `migrations` from `django`
$ python manage.py migrate
Superuser:
The superuser is created for accessing the admin interface.
It has the crendentials `u:user` and `p:pass`
$ python manage.py createsuperuser
username: user
email: user@example.com
password: pass
Webserver:
The django webserver is started at localhost port 8000
$ python manage.py runserver
Browser:
A browser page is opened at localhost:8000 to continue setup.
"""
import os
import platform
import pickle
import subprocess
import urllib2
PICKLED_SETUPFILE = './.setup.pickle'
SETUP_STATUS = {}
def depickle_setup():
"""Load setup status from pickle
Args:
None
Returns:
dict:
console_completed(bool): console setup completed
last_complete(bool): whether the last setup completed
successfully
installed_dependencies(bool): installed dependencies
database_migrate(bool): database migrated
user_admin(bool): create an admin user
Raises:
None
"""
try:
if os.path.isfile(PICKLED_SETUPFILE):
# setup pickle exists, has been written previously
with open(PICKLED_SETUPFILE, 'r') as file:
setup_status = pickle.load(file)
# TODO: assert setup status is a dict
# TODO: assert setup status fields are present
# TODO: assert setup status values are valid
return setup_status
else:
# setup pickle does not exist, setup run first time
setup_status = {
'console_completed': False,
'last_completed': False,
'installed_dependencies': False,
'database_migrate': False,
'user_admin': False,
}
pickle_setup(setup_status)
return setup_status
except Exception:
pass
# TODO: logging
def pickle_setup(setup_dict):
"""Save setup status to pickle
Args:
setup_dict(dict):
console_completed(bool): console_setup_completed
last_complete(bool): whether the last setup completed
successfully
installed_dependencies(bool): installed dependencies
database_migrate(bool): database migrated
user_admin(bool): create an admin user
Returns:
None
Raises:
None
"""
assert type(setup_dict) == dict
# TODO: check setup dict has valid keys
# TODO: check setup dict has valid values
try:
with open(PICKLED_SETUPFILE, 'w') as file:
pickle.dump(setup_dict, file)
except Exception:
pass
# TODO: logging
def welcome_message():
"""
"""
# Welcome message
cls()
print 'Welcome to HDD-indexer'
print '----------------------'
print '----------------------'
if SETUP_STATUS['last_completed']:
print "Let's start with the setup."
else:
print "Let's continue with the setup."
def install_dependencies():
# Internet checkup
print "We'll make sure you are connected to the internet first."
raw_input("Press Enter to continue...")
if not internet_on():
print 'What! No Internet...? :('
return # cannot install dependencies without the internet
print 'Oooh... Connectivity! :)'
raw_input("Press Enter to continue...")
# Dependencies
cls()
print "The first thing we'll do is install the dependencies"
raw_input("Press Enter to continue...")
# pip install -r requirements.txt
# TODO: assert requirements.txt exists
cmd = ['pip', 'install', '-r', 'requirements.txt']
# open a subprocess and pipe its output
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for line in p.stdout:
print line,
p.wait()
if p.returncode:
print 'ERROR! ERROR! ERROR!'
return
SETUP_STATUS['installed_dependencies'] = True
pickle_setup(SETUP_STATUS)
print "Excellent! We're set!"
raw_input("Press Enter to continue...")
def database_migrate():
"""
"""
# Database
cls()
print "Now let's setup the database for you..."
raw_input("Press Enter to continue...")
print '----------------------'
print 'MIGRATING DATABASE'
# python manage.py migrate
# This will run django's migrations, which creates the database
# and its associated tables / schema
cmd = ['python', 'manage.
|
py', 'migrate']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for line in p.stdout:
print line,
p.wait()
if p.returncode:
print 'ERROR! ERROR! ERROR!'
return
SETUP_STATUS['database_migrate'] = True
pickle_setup(SETUP_STATUS)
raw_input("Press Enter to continue...")
def create_user_admin():
"""
"""
# U
|
ser
cls()
print "Now that it's done, let's create a user for you!"
print '----------------------'
print "username: user"
print "password: pass"
print '----------------------'
print "You ready?"
raw_input("Press Enter to continue...")
# load django's settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hdd_indexer.settings")
import django # import django inline only when required
django.setup() # call django to load its settings
from django.contrib.auth.models import User
try:
# get the user with u=user
p = User.objects.get(username='user')
# if exists, delete it
p.delete()
except User.DoesNotExist:
# does not exist, therefore let's it
# TODO: check if pass can be changed programmatically instead of
# deleting the user and creating it again
pass
User.objects.create_superuser('user', 'user@example.com', 'pass')
SETUP_STATUS['user_admin'] = True
pickle_setup(SETUP_STATUS)
print 'Alright, done!'
raw_input("Press Enter to continue...")
def start(setup_status):
"""Start - starts the setup
start carries the main function calls for setup.py.
It notifies the user about each step, and waits for conformation.
No notice or cancellation is allowed explicitly.
If the user wishes to quit, they can do so by breaking the setup.
Args:
None
Returns:
None
Raises:
None
"""
global SETUP_STATUS
SETUP_STATUS = setup_status
welcome_message()
err = None
if not SETUP_STATUS['installed_dependencies'] and not err:
err = install_dependencies()
if not SETUP_STATUS['database_migrate'] and not err:
err = database_migrate()
if not SETUP_STATUS['user_admin'] and not err:
err = create_user_admin()
if not err:
SETUP_STATUS['console_completed'] = True
else:
SETUP_STATUS['console_completed'] = False
pickle_setup(SETUP_STATUS)
return SETUP_STATUS
def internet_on():
"""Check if internet connectivity is present
The function checks if internet is on by connecting to a
website (www.google.co.in) and analysing its response.
Args:
None
Returns:
bool: True if ON, False otherwise.
Raises:
None
"""
try:
urllib2.urlopen('http://216.58.196.99', timeout=10)
return True
except urllib2.URLError:
pass
return False
def cls():
"""Clear Screen
The function clears the screen in any platform (POSIX / Windows).
It checks which system is running and uses the approporiate commands
based on the default terminal.
For Windows:
platform.system returns 'Windows'
screen can be cleared in terminal using 'clear'
For Others:
screen can be cleared using 'cls' across all POSIX systems
Args:
|
dayatz/taiga-back
|
taiga/celery.py
|
Python
|
agpl-3.0
| 1,321
| 0.000758
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Fo
|
undation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILIT
|
Y or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
from django.conf import settings
try:
from settings import celery_local as celery_settings
except ImportError:
from settings import celery as celery_settings
app = Celery('taiga')
app.config_from_object(celery_settings)
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
mrgloom/menpofit
|
menpofit/modelinstance.py
|
Python
|
bsd-3-clause
| 11,665
| 0.000171
|
import numpy as np
from menpo.base import Targetable, Vectorizable
from menpo.model import MeanInstanceLinearModel
from menpofit.differentiable import DP
def similarity_2d_instance_model(shape):
r"""
A MeanInstanceLinearModel that encodes all possible 2D similarity
transforms of a 2D shape (of n_points).
Parameters
----------
shape : 2D :class:`menpo.shape.Shape`
Returns
-------
model : `menpo.model.linear.MeanInstanceLinearModel`
Model with four components, linear combinations of which
represent the original shape under a similarity transform. The
model is exhaustive (that is, all possible similarity transforms
can be expressed in the model).
"""
shape_vector = shape.as_vector()
components = np.zeros((4, shape_vector.shape[0]))
components[0, :] = shape_vector # Comp. 1 - just the shape
rotated_ccw = shape.points[:, ::-1].copy() # flip x,y -> y,x
rotated_ccw[:, 0] = -rotated_ccw[:, 0] # negate (old) y
components[1, :] = rotated_ccw.flatten() # C2 - the shape rotated 90 degs
components[2, ::2] = 1 # Tx
components[3, 1::2] = 1 # Ty
return MeanInstanceLinearModel(components, shape_vector, shape)
class ModelInstance(Targetable, Vectorizable, DP):
r"""A instance of a :map:`InstanceBackedModel`.
This class describes an instance produced from one of Menpo's
:map:`InstanceBackedModel`. The actual instance provided by the model can
be found at self.target. This class is targetable, and so
:meth:`set_target` can be used to update the target - this will produce the
closest possible instance the Model can produce to the target and set the
weights accordingly.
Parameters
----------
model : :map:`InstanceBackedModel`
The generative model that instances will be taken from
"""
def __init__(self, model):
self.model = model
self._target = None
# set all weights to 0 (yielding the mean, first call to
# from_vector_inplace() or set_target() will update this)
self._weights = np.zeros(self.model.n_active_components)
self._sync_target_from_state()
@property
def n_weights(self):
r"""
The number of parameters in the linear model.
:type: int
"""
return self.model.n_active_components
@property
def weights(self):
r"""
In this simple :map:`ModelInstance` the weights are just the weights
of the model.
"""
return self._weights
@property
def target(self):
return self._target
def _target_setter(self, new_target):
r"""
Called by the Targetable framework when set_target() is called.
This method **ONLY SETS THE NEW TARGET** it does no synchronisation
logic (for that, see _sync_state_from_target())
"""
self._target = new_target
def _new_target_from_state(self):
r"""
Return the appropriate target for the parameters provided.
Subclasses can override this.
Returns
-------
new_target: model instance
"""
return self.model.instance(self.weights)
def _sync_state_from_target(self):
# 1. Find the optimum parameters and set them
self._weights = self._weights_for_target(self.target)
# 2. Find the closest target the model can reproduce and trigger an
# update of our transform
self._target_setter(self._new_target_from_state())
def _weights_for_target(self, target):
r"""
Return the appropriate model weights for target provided.
Subclasses can override this.
Parameters
----------
target: model instance
The target that the statistical model will try to reproduce
Returns
-------
weights: (P,) ndarray
Weights of the statistical model that generate the closest
instance to the requested target
"""
return self.model.project(target)
def _as_vector(self):
r"""
Return the current parameters of this transform - this is the
just the linear model's weights
Returns
-------
params : (`n_parameters`,) ndarray
The vector of parameters
"""
return self.weights
def from_vector_inplace(self, vector):
r"""
Updates this :map:`ModelInstance` from it's
vectorized form (in this case, simply the weights on the linear model)
"""
self._weights = vector
self._sync_target_from_state()
class PDM(ModelInstance, DP):
r"""Specialization of :map:`ModelInstance` for use with spatial data.
"""
@property
def n_dims(self):
r"""
The number of dimensions of the spatial instance of the model
:type: int
"""
return self.model.template_instance.n_dims
def d_dp(self, points):
"""
Returns the Jacobian of the PCA model reshaped to have the standard
Jacobian shape:
n_points x n_params x n_dims
which maps to
n_features x n_components x n_dims
on the linear model
Returns
-------
jacobian : (n_features, n_components, n_dims) ndarray
The Jacobian of the model in the standard Jacobian shape.
"""
d_dp = self.model.components.reshape(self.model.n_active_components,
-1, self.n_dims)
return d_dp.swapaxes(0, 1)
# TODO: document me
class GlobalPDM(PDM):
r"""
"""
def __init__(self, model, global_transform_cls):
# Start the global_transform as an identity (first call to
# from_vector_inplace() or set_target() will update this)
mean = model.mean()
self.global_transform = global_transform_cls(mean, mean)
super(GlobalPDM, self).__init__(model)
@property
def n_global_parameters(self):
r"""
The number of parameters in the `global_tra
|
nsform`
:type: int
"""
return self.global_transform.n_parameters
@property
def global_parameters(self):
|
r"""
The parameters for the global transform.
:type: (`n_global_parameters`,) ndarray
"""
return self.global_transform.as_vector()
def _new_target_from_state(self):
r"""
Return the appropriate target for the model weights provided,
accounting for the effect of the global transform
Returns
-------
new_target: :class:`menpo.shape.PointCloud`
A new target for the weights provided
"""
return self.global_transform.apply(self.model.instance(self.weights))
def _weights_for_target(self, target):
r"""
Return the appropriate model weights for target provided, accounting
for the effect of the global transform. Note that this method
updates the global transform to be in the correct state.
Parameters
----------
target: :class:`menpo.shape.PointCloud`
The target that the statistical model will try to reproduce
Returns
-------
weights: (P,) ndarray
Weights of the statistical model that generate the closest
PointCloud to the requested target
"""
self._update_global_transform(target)
projected_target = self.global_transform.pseudoinverse().apply(target)
# now we have the target in model space, project it to recover the
# weights
new_weights = self.model.project(projected_target)
# TODO investigate the impact of this, could be problematic
# the model can't perfectly reproduce the target we asked for -
# reset the global_transform.target to what it CAN produce
#refined_target = self._target_for_weights(new_weights)
#self.global_transform.target = refined_target
return new_weights
def _update_global_transform(s
|
mattsch/Sickbeard
|
sickbeard/tvcache.py
|
Python
|
gpl-3.0
| 1,097
| 0.012762
|
import time
import datetime
import sqlite3
import urllib
import gzip
import urllib2
import StringIO
import sickbeard
from sickbeard import db
from sickbeard import logger
from sickbeard.common import *
class TVCache():
def __init__(self, providerName):
self.providerName = providerName
def _getDB(self):
|
return db.DBConnection("cache.db")
def _clearCache(self):
myDB = self._getDB()
myDB.action("DELETE FROM "+self.providerName+" WHERE 1")
def updateCache(self):
print "This should be overridden by implementing classes"
pass
de
|
f searchCache(self, show, season, episode, quality=ANY):
myDB = self._getDB()
sql = "SELECT * FROM "+self.providerName+" WHERE tvdbid = "+str(show.tvdbid)+ \
" AND season = "+str(season)+" AND episode = "+str(episode)
if quality != ANY:
sql += " AND quality = "+str(quality)
return myDB.select(sql)
|
ekohl/ganeti
|
lib/opcodes.py
|
Python
|
gpl-2.0
| 47,432
| 0.005081
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""OpCodes module
This module implements the data structures which define the cluster
operations - the so-called opcodes.
Every operation which modifies the cluster state is expressed via
opcodes.
"""
# this are practically structures, so disable the message about too
# few public methods:
# pylint: disable-msg=R0903
import logging
import re
import operator
from ganeti import constants
from ganeti import errors
from ganeti import ht
# Common opcode attributes
#: output fields for a query operation
_POutputFields = ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"Selected output fields")
#: the shutdown timeout
_PShutdownTimeout = \
("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TPositiveInt,
"How long to wait for instance to shut down")
#: the force parameter
_PForce = ("force", False, ht.TBool, "Whether to force the operation")
#: a required instance name (for single-instance LUs)
_PInstanceName = ("instance_name", ht.NoDefault, ht.TNonEmptyString,
"Instance name")
#: Whether to ignore offline nodes
_PIgnoreOfflineNodes = ("ignore_offline_nodes", False, ht.TBool,
"Whether to ignore offline nodes")
#: a required node name (for single-node LUs)
_PNodeName = ("node_name", ht.NoDefault, ht.TNonEmptyString, "Node name")
#: a required node group name (for single-group LUs)
_PGroupName = ("group_name", ht.NoDefault, ht.TNonEmptyString, "Group name")
#: Migration type (live/non-live)
_PMigrationMode = ("mode", None,
ht.TOr(ht.TNone, ht.TElemOf(constants.HT_MIGRATION_MODES)),
"Migration mode")
#: Obsolete 'live' migration mode (boolean)
_PMigrationLive = ("live", None, ht.TMaybeBool,
"Legacy setting for live migration, do not use")
#: Tag type
_PTagKind = ("kind", ht.NoDefault, ht.TElemOf(constants.VALID_TAG_TYPES), None)
#: List of tag strings
_PTags = ("tags", ht.NoDefault, ht.TListOf(ht.TNonEmptyString), None)
_PForceVariant = ("force_variant", False, ht.TBool,
"Whether to force an unknown OS variant")
_PWaitForSync = ("wait_for_sync", True, ht.TBool,
"Whether to wait for the disk to synchronize")
_PIgnoreConsistency = ("ignore_consistency", False, ht.TBool,
"Whether to ignore disk consistency")
_PStorageName = ("name", ht.NoDefault, ht.TMaybeString, "Storage name")
_PUseLocking = ("use_locking", False, ht.TBool,
"Whether to use synchronization")
_PNameCheck = ("name_check", True, ht.TBool, "Whether to check name")
_PNodeGroupAllocPolicy = \
("alloc_policy", None,
ht.TOr(ht.TNone, ht.TElemOf(constants.VALID_ALLOC_POLICIES)),
"Instance allocation policy")
_PGroupNodeParams = ("ndparams", None, ht.TMaybeDict,
"Default node parameters for group")
_PQueryWhat = ("what", ht.NoDefault, ht.TElemOf(constants.QR_VIA_OP),
"Resource(s) to query for")
_PIpCheckDoc = "Whether to ensure instance's IP address is inactive"
#: Do not remember instance state changes
_PNoRemember = ("no_remember", False, ht.TBool,
"Do not remember the state change")
#: Target node for instance migration/failover
_PMigrationTargetNode = ("target_node", None, ht.TMaybeString,
"Target node for shared-storage instances")
#: OP_ID conversion regular expression
_OPID_RE = re.compile("([a-z])([A-Z])")
#: Utility function for L{OpClusterSetParams}
_TestClusterOsList = ht.TOr(ht.TNone,
ht.TListOf(ht.TAnd(ht.TList, ht.TIsLength(2),
ht.TMap(ht.WithDesc("GetFirstItem")(operator.itemgetter(0)),
ht.TElemOf(constants.DDMS_VALUES)))))
# TODO: Generate check from constants.INIC_PARAMS_TYPES
#: Utility function for testing NIC definitions
_TestNicDef = ht.TDictOf(ht.TElemOf(constants.INIC_PARAMS),
ht.TOr(ht.TNone, ht.TNonEmptyString))
_SUMMARY_PREFIX = {
"CLUSTER_": "C_",
"GROUP_": "G_",
"NODE_": "N_",
"INSTANCE_": "I_",
}
def _NameToId(name):
"""Convert an opcode class name to an OP_ID.
@type name: string
@param name: the class name, as OpXxxYyy
@rtype: string
@return: the name in the OP_XXXX_YYYY format
"""
if not name.startswith("Op"):
return None
# Note: (?<=[a-z])(?=[A-Z]) would be ideal, since it wouldn't
# consume any input, and hence we would just have all the elements
# in the list, one by one; but it seems that split doesn't work on
# non-consuming input, hence we have to process the input string a
# bit
name = _OPID_RE.sub(r"\1,\2", name)
elems = name.split(",")
return "_".join(n.upper() for n in elems)
def RequireFileStorage():
"""Checks that file storage is enabled.
While it doesn't really fit into this module, L{utils} was deemed too large
of a dependency to be imported for just one or two functions.
@raise errors.OpPrereqError: when file storage is disabled
"""
if not constants.ENABLE_FILE_STORAGE:
raise errors.OpPrereqError("File storage disabled at configure time",
errors.ECODE_INVAL)
def RequireSharedFileStorage():
"""Checks that shared file storage is enabled.
While it doesn't really fit into this module, L{utils} was deemed too large
of a dependency to be imported for just one or two functions.
@raise errors.OpPrereqError: when shared file storage is disabled
"""
if not constants.ENABLE_SHARED_FILE_STORAGE:
raise errors.OpPrereqError("Shared file storage disabled at"
" configure time", errors.ECODE_INVAL)
@ht.WithDesc("CheckFileStorage")
def _CheckFileStorage(value):
"""Ensures file storage is enabled if used.
"""
if value == constants.DT_FILE:
RequireFileStorage()
elif value == constants.DT_SHARED_FILE:
RequireSharedFileStorage()
return True
_CheckDiskTemplate = ht.TAnd(ht.TElemOf(constants.DISK_TEMPLATES),
|
_CheckFileStorage)
def _CheckStorageType(storage_type):
"""Ensure a given storage type is valid.
"""
if storage_type not in constants.VALID_STORAGE_TYPES:
raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
|
errors.ECODE_INVAL)
if storage_type == constants.ST_FILE:
RequireFileStorage()
return True
#: Storage type parameter
_PStorageType = ("storage_type", ht.NoDefault, _CheckStorageType,
"Storage type")
class _AutoOpParamSlots(type):
"""Meta class for opcode definitions.
"""
def __new__(mcs, name, bases, attrs):
"""Called when a class should be created.
@param mcs: The meta class
@param name: Name of created class
@param bases: Base classes
@type attrs: dict
@param attrs: Class attributes
"""
assert "__slots__" not in attrs, \
"Class '%s' defines __slots__ when it should use OP_PARAMS" % name
assert "OP_ID" not in attrs, "Class '%s' defining OP_ID" % name
attrs["OP_ID"] = _NameToId(name)
# Always set OP_PARAMS to avoid duplicates in BaseOpCode.GetAllParams
params = attrs.setdefault("OP_PARAMS", [])
# Use parameter names as slots
slots = [pname for (pname, _, _, _) in params]
assert "OP_DSC_FIELD" not in attrs or attrs["OP_DSC_FIELD"] in slots, \
"Class '%s' uses unknown field in OP_DSC_FIELD" % name
attrs["__slots__"] = slots
return
|
galaxy-iuc/parsec
|
parsec/commands/workflows/run_workflow.py
|
Python
|
apache-2.0
| 4,029
| 0.001986
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('run_workflow')
@click.argument("workflow_id", type=str)
@click.option(
"--dataset_map",
help="A mapping of workflow inputs to datasets. The datasets source can be a LibraryDatasetDatasetAssociation (``ldda``), LibraryDataset (``ld``), or HistoryDatasetAssociation (``hda``). The map must be in the following format: ``{'<input>': {'id': <encoded dataset ID>, 'src': '[ldda, ld, hda]'}}`` (e.g. ``{'23': {'id': '29beef4fadeed09f', 'src': 'ld'}}``)",
type=str
)
@click.option(
"--params",
help="A mapping of non-datasets tool parameters (see below)",
type=str
)
@click.option(
"--history_id",
help="The encoded history ID where to store the workflow output. Alternatively, ``history_name`` may be specified to create a new histor
|
y.",
type=str
)
@click.option(
"--history_name",
help="Create a new history with the given name to store the workflow output. If both ``history_id`` and ``hist
|
ory_name`` are provided, ``history_name`` is ignored. If neither is specified, a new 'Unnamed history' is created.",
type=str
)
@click.option(
"--import_inputs_to_history",
help="If ``True``, used workflow inputs will be imported into the history. If ``False``, only workflow outputs will be visible in the given history.",
is_flag=True
)
@click.option(
"--replacement_params",
help="pattern-based replacements for post-job actions (see below)",
type=str
)
@pass_context
@custom_exception
@json_output
def cli(ctx, workflow_id, dataset_map="", params="", history_id="", history_name="", import_inputs_to_history=False, replacement_params=""):
"""Run the workflow identified by ``workflow_id``.
Output:
A dict containing the history ID where the outputs are placed
as well as output dataset IDs. For example::
{'history': '64177123325c9cfd',
'outputs': ['aa4d3084af404259']}
The ``params`` dict should be specified as follows::
{STEP_ID: PARAM_DICT, ...}
where PARAM_DICT is::
{PARAM_NAME: VALUE, ...}
For backwards compatibility, the following (deprecated) format is
also supported for ``params``::
{TOOL_ID: PARAM_DICT, ...}
in which case PARAM_DICT affects all steps with the given tool id.
If both by-tool-id and by-step-id specifications are used, the
latter takes precedence.
Finally (again, for backwards compatibility), PARAM_DICT can also
be specified as::
{'param': PARAM_NAME, 'value': VALUE}
Note that this format allows only one parameter to be set per step.
The ``replacement_params`` dict should map parameter names in
post-job actions (PJAs) to their runtime values. For
instance, if the final step has a PJA like the following::
{'RenameDatasetActionout_file1': {'action_arguments': {'newname': '${output}'},
'action_type': 'RenameDatasetAction',
'output_name': 'out_file1'}}
then the following renames the output dataset to 'foo'::
replacement_params = {'output': 'foo'}
see also `this email thread
<http://lists.bx.psu.edu/pipermail/galaxy-dev/2011-September/006875.html>`_.
.. warning::
This method waits for the whole workflow to be scheduled before
returning and does not scale to large workflows as a result. This
method has therefore been deprecated in favor of
:meth:`invoke_workflow`, which also features improved default
behavior for dataset input handling.
"""
return ctx.gi.workflows.run_workflow(workflow_id, dataset_map=dataset_map, params=params, history_id=history_id, history_name=history_name, import_inputs_to_history=import_inputs_to_history, replacement_params=replacement_params)
|
AXAz0r/apex-sigma-core
|
sigma/modules/minigames/racing/nodes/race_storage.py
|
Python
|
gpl-3.0
| 1,238
| 0
|
import copy
import secrets
races = {}
colors = {
'🐶': 0xccd6dd,
'🐱': 0xffcb4e,
'🐭': 0x99aab5,
'🐰': 0x99aab5,
'🐙': 0x9266cc,
'🐠': 0xffcc4d,
'🦊': 0xf4900c,
'🦀': 0xbe1931,
'🐸': 0x77b255,
'🐧': 0xf5f8fa
}
names = {
'🐶': 'dog',
'🐱': '
|
cat',
'🐭': 'mouse',
'🐰': 'rabbit',
'🐙': 'octopus',
'🐠': 'fish',
'🦊': 'fox',
'🦀': 'crab',
'🐸': 'frog',
'🐧': 'penguin'
}
participant_icons = ['🐶', '🐱', '🐭', '🐰', '🐙', '🐠', '🦊', '🦀', '🐸', '🐧']
def make_race(channel_id, buyin):
icon_copy = copy.deepcopy(participant_icon
|
s)
race_data = {
'icons': icon_copy,
'users': [],
'buyin': buyin
}
races.update({channel_id: race_data})
def add_participant(channel_id, user):
race = races[channel_id]
icons = race['icons']
users = race['users']
usr_icon = secrets.choice(icons)
icons.remove(usr_icon)
race.update({'icons': icons})
participant_data = {
'user': user,
'icon': usr_icon
}
users.append(participant_data)
race.update({'users': users})
races.update({channel_id: race})
return usr_icon
|
andrewisakov/taximaster_x
|
router/main.py
|
Python
|
unlicense
| 572
| 0
|
#!/usr/bin/python3
import tornado.ioloop
import tornado.options
import tornado.httpserver
fr
|
om routes import routes_setup
import settings
clients = []
if __name__ == '__main__':
tornado.options.parse_command_line()
tornado.options.define('log_file_max_size', default=str(10*1024*1024))
tornado.options.define('log_file_prefix', default='router.log')
app = tornado.web.Application(routes_setup(), **settings.settings)
http_server = tornado.httpserver.HTTPServer(app)
h
|
ttp_server.listen(settings.PORT)
tornado.ioloop.IOLoop.current().start()
|
ujdhesa/unisubs
|
apps/statistic/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 10,707
| 0.008219
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EmailShareStatistic'
db.create_table('statistic_emailsharestatistic', (
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True))
|
,
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.CustomUser'], null=True, blank=True)),
))
db.send_create_signal('statistic', ['EmailShare
|
Statistic'])
# Adding model 'TweeterShareStatistic'
db.create_table('statistic_tweetersharestatistic', (
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.CustomUser'], null=True, blank=True)),
))
db.send_create_signal('statistic', ['TweeterShareStatistic'])
# Adding model 'FBShareStatistic'
db.create_table('statistic_fbsharestatistic', (
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.CustomUser'], null=True, blank=True)),
))
db.send_create_signal('statistic', ['FBShareStatistic'])
# Adding model 'SubtitleFetchStatistic'
db.create_table('statistic_subtitlefetchstatistic', (
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['videos.Video'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('language', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('statistic', ['SubtitleFetchStatistic'])
def backwards(self, orm):
# Deleting model 'EmailShareStatistic'
db.delete_table('statistic_emailsharestatistic')
# Deleting model 'TweeterShareStatistic'
db.delete_table('statistic_tweetersharestatistic')
# Deleting model 'FBShareStatistic'
db.delete_table('statistic_fbsharestatistic')
# Deleting model 'SubtitleFetchStatistic'
db.delete_table('statistic_subtitlefetchstatistic')
models = {
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'statistic.emailsharestatistic': {
'Meta': {'object_name': 'EmailShareStatistic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'})
},
'statistic.fbsharestatistic': {
'Meta': {'object_name': 'FBShareStatistic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'})
},
'statistic.subtitlefetchstatistic': {
'Meta': {'object_name': 'SubtitleFetchStatistic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fi
|
facebookresearch/Detectron
|
detectron/datasets/cityscapes_json_dataset_evaluator.py
|
Python
|
apache-2.0
| 3,355
| 0
|
# Copyright (c) 2017-present, Facebook, In
|
c.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASI
|
S,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Functions for evaluating results on Cityscapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import logging
import os
import uuid
import pycocotools.mask as mask_util
from detectron.core.config import cfg
from detectron.datasets.dataset_catalog import get_raw_dir
logger = logging.getLogger(__name__)
def evaluate_masks(
json_dataset,
all_boxes,
all_segms,
output_dir,
use_salt=True,
cleanup=False
):
if cfg.CLUSTER.ON_CLUSTER:
# On the cluster avoid saving these files in the job directory
output_dir = '/tmp'
res_file = os.path.join(
output_dir, 'segmentations_' + json_dataset.name + '_results')
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
results_dir = os.path.join(output_dir, 'results')
if not os.path.exists(results_dir):
os.mkdir(results_dir)
os.environ['CITYSCAPES_DATASET'] = get_raw_dir(json_dataset.name)
os.environ['CITYSCAPES_RESULTS'] = output_dir
# Load the Cityscapes eval script *after* setting the required env vars,
# since the script reads their values into global variables (at load time).
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling \
as cityscapes_eval
roidb = json_dataset.get_roidb()
for i, entry in enumerate(roidb):
im_name = entry['image']
basename = os.path.splitext(os.path.basename(im_name))[0]
txtname = os.path.join(output_dir, basename + 'pred.txt')
with open(txtname, 'w') as fid_txt:
if i % 10 == 0:
logger.info('i: {}: {}'.format(i, basename))
for j in range(1, len(all_segms)):
clss = json_dataset.classes[j]
clss_id = cityscapes_eval.name2label[clss].id
segms = all_segms[j][i]
boxes = all_boxes[j][i]
if segms == []:
continue
masks = mask_util.decode(segms)
for k in range(boxes.shape[0]):
score = boxes[k, -1]
mask = masks[:, :, k]
pngname = os.path.join(
'results',
basename + '_' + clss + '_{}.png'.format(k))
# write txt
fid_txt.write('{} {} {}\n'.format(pngname, clss_id, score))
# save mask
cv2.imwrite(os.path.join(output_dir, pngname), mask * 255)
logger.info('Evaluating...')
cityscapes_eval.main([])
return None
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/hybrid/test_add_iz.py
|
Python
|
apache-2.0
| 623
| 0.004815
|
'''
New Integration Test for hybrid.
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
im
|
port zstackwoodpecker.test_state as test_state
test_obj_dict = test_state.TestStateDict()
test_stub = test_lib.lib_get_test_stub()
hybrid = test_stub.HybridObject()
def test():
hybrid.add_datacenter_iz()
hybrid.del_iz()
test_util.test_pass('Add Delete Identity Zone Test Success')
#Will be called only if exception happens in
|
test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
|
nickpascucci/Robot-Arm
|
software/desktop/brazo/brazo_lib/preferences.py
|
Python
|
mit
| 3,116
| 0.007702
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# This file is in the public domain
### END LICENSE
"""Provides a shared preferences dictionary"""
from desktopcouch.records.server import CouchDatabase
from desktopcouch.records.record import Record
import gtk
import gobject
class User_dict(dict):
''' a dictionary with extra methods:
persistence: load, save and db_connect
gobject signals: connect and emit.
Don't use this directly. Please use the preferences instance.'''
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
# Set up couchdb.
self._db_name = "brazo"
self._key = None
self._database = None
self._record_type = (
"http://wiki.ubuntu.com/Quickly/RecordTypes/Brazo/"
"Preferences")
class Publisher(gtk.Invisible): # pylint: disable=R0904
'''set up signals in a separate class
gtk.Invisible has 230 public methods'''
__gsignals__ = {'changed' : (gobject.SIGNAL_RUN_LAST,
|
gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
'loaded' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE, (gobject.TYPE_PYOBJ
|
ECT,))}
publisher = Publisher()
self.emit = publisher.emit
self.connect = publisher.connect
def db_connect(self):
'''connect to couchdb
create if necessary'''
# logging.basicConfig will be called now
self._database = CouchDatabase(self._db_name, create=True)
def save(self):
'save to couchdb'
self._database.update_fields(self._key, self)
def load(self):
'load from couchdb'
self.update({"record_type": self._record_type})
results = self._database.get_records(
record_type=self._record_type, create_view=True)
if len(results.rows) == 0:
# No preferences have ever been saved
# save them before returning.
self._key = self._database.put_record(Record(self))
else:
self.update(results.rows[0].value)
del self['_rev']
self._key = results.rows[0].value["_id"]
self.emit('loaded', None)
def update(self, *args, **kwds):
''' interface for dictionary
send changed signal when appropriate '''
# parse args
new_data = {}
new_data.update(*args, **kwds)
changed_keys = []
for key in new_data.keys():
if new_data.get(key) != dict.get(self, key):
changed_keys.append(key)
dict.update(self, new_data)
if changed_keys:
self.emit('changed', tuple(changed_keys))
def __setitem__(self, key, value):
''' interface for dictionary
send changed signal when appropriate '''
if value != dict.get(self, key):
dict.__setitem__(self, key, value)
self.emit('changed', (key,))
preferences = User_dict()
|
Pasotaku/Anime-Feud-Survey-Backend
|
Old Python Code/settings_db_init.py
|
Python
|
mit
| 617
| 0.001621
|
import sqlite3
import os
def init():
"""
Creates and initializes settings database.
Doesn't do anything if
|
the file already exists. Remove the local copy to recreate the database.
"""
if not os.path.isfile("settings.sqlite"):
app_db_connection = sqlite3.connect('settings.sqlite')
app_db = app_db_connection.cursor()
app_db.execute("CREATE TABLE oauth (site, rate_remaining, rate_reset)")
app_db.execute("INSERT INTO oauth VALUES ('reddit', 30, 60)")
app_db_connection.commit()
app_db_c
|
onnection.close()
if __name__ == "__main__":
init()
|
dan-git/outdoor_bot
|
src/outdoor_bot/msg/_mainTargetsCommand_msg.py
|
Python
|
bsd-2-clause
| 4,588
| 0.017873
|
"""autogenerated by genpy from outdoor_bot/mainTargetsCommand_msg.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class mainTargetsCommand_msg(genpy.Message):
_md5sum = "b1faa92c8dffb7694c609d94e4e2d116"
_type = "outdoor_bot/mainTargetsCommand_msg"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int32 cameraName
int32 regularDigcamZoom
int32 zoomDigcamZoom
int32 homeDigcamZoom
float32 approxRange
bool firstTarget
"""
__slots__ = ['cameraName','regularDigcamZoom','zoomDigcamZoom','homeDigcamZoom','approxRange','firstTarget']
_slot_types = ['int32','int32','int32','int32','float32','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
cameraName,regularDigcamZoom,zoomDigcamZoom,homeDigcamZoom,approxRange,firstTarget
:
|
param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(mainTargetsCommand_msg, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.cameraName is None:
self.cameraName = 0
if self.regularDigcamZoom is None:
self.regularDigcamZoom = 0
|
if self.zoomDigcamZoom is None:
self.zoomDigcamZoom = 0
if self.homeDigcamZoom is None:
self.homeDigcamZoom = 0
if self.approxRange is None:
self.approxRange = 0.
if self.firstTarget is None:
self.firstTarget = False
else:
self.cameraName = 0
self.regularDigcamZoom = 0
self.zoomDigcamZoom = 0
self.homeDigcamZoom = 0
self.approxRange = 0.
self.firstTarget = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_4ifB.pack(_x.cameraName, _x.regularDigcamZoom, _x.zoomDigcamZoom, _x.homeDigcamZoom, _x.approxRange, _x.firstTarget))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 21
(_x.cameraName, _x.regularDigcamZoom, _x.zoomDigcamZoom, _x.homeDigcamZoom, _x.approxRange, _x.firstTarget,) = _struct_4ifB.unpack(str[start:end])
self.firstTarget = bool(self.firstTarget)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_4ifB.pack(_x.cameraName, _x.regularDigcamZoom, _x.zoomDigcamZoom, _x.homeDigcamZoom, _x.approxRange, _x.firstTarget))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 21
(_x.cameraName, _x.regularDigcamZoom, _x.zoomDigcamZoom, _x.homeDigcamZoom, _x.approxRange, _x.firstTarget,) = _struct_4ifB.unpack(str[start:end])
self.firstTarget = bool(self.firstTarget)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_4ifB = struct.Struct("<4ifB")
|
jamesthechamp/zamboni
|
mkt/operators/helpers.py
|
Python
|
bsd-3-clause
| 360
| 0
|
imp
|
ort jinja2
from jingo import register
from tower import ugettext_lazy as _lazy
from mkt.site.helpers import page_title
@register.function
@jinja2.contextfunction
def operators_page_title(context, title=None):
section = _lazy('Operator Dashboard')
title = u'%s | %s' % (title, section) if title else section
return page_title(context, ti
|
tle)
|
malaonline/Server
|
server/app/migrations/0186_change_user_group.py
|
Python
|
mit
| 7,213
| 0.000161
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
group_and_permission = [
("超级管理员", [], ['all']),
("运营部", ['Can change teacher'],
[
# 待上架, 已上架老师, 老师编辑
'teachers_unpublished',
'teachers_published',
'teachers_unpublished_edit',
'teachers_published_edit',
'teachers_action',
# 测评建档, 可查看课表, 不能调课停课, 可申请退款
'evaluations',
'evaluations_action',
'student_schedule_manage',
'orders_action',
|
# 课程列表, 投诉, 考勤
'school_timeslot',
# 中心设置, 编辑
'schools',
'staff_school',
# 订单查看, 申请退费
'orders_review',
'orders_action',
# 奖学金设置, 领用列表
'coupon_config',
'coupons_list',
]),
("财务主管", [],
[
# 教师银行卡查询
'teachers_ban
|
kcard_list',
# 订单查看, 申请退费, 审核
'orders_review',
'orders_refund',
'orders_action',
# 老师收入列表, 收入明细, 提现审核
'teachers_income_list',
'teachers_income_detail',
'teachers_withdrawal_list',
# 奖学金, 领用列表
'coupons_list',
# 校区收入记录(财务)
'school_income_audit',
'school_income_audit_v2',
]),
("会计出纳", [],
[
# 订单查看, 申请退费, 审核
'orders_review',
'orders_refund',
'orders_action',
# 老师收入列表, 收入明细, 提现审核
'teachers_income_list',
'teachers_income_detail',
'teachers_withdrawal_list',
# 奖学金, 领用列表
'coupons_list',
]),
("中心主任", ['Can change teacher'],
[
# 新注册, 待上架, 已上架老师, 老师编辑
'teachers',
'teachers_unpublished',
'teachers_published',
'teachers_unpublished_edit',
'teachers_published_edit',
'teachers_action',
# 测评建档, 查看课表, 调课停课(操作和记录), 可申请退款
'evaluations',
'evaluations_action',
'student_schedule_manage',
'student_schedule_action',
'student_schedule_changelog',
'orders_action',
# 课程列表, 投诉, 考勤
'school_timeslot',
# 中心设置, 编辑
'schools',
'staff_school',
# 订单查看, 申请退费
'orders_review',
'orders_action',
# 老师收入列表, 收入明细
'teachers_income_list',
'teachers_income_detail',
# 奖学金设置, 领用列表
'coupon_config',
'coupons_list',
# 修改密码
'staff_auth',
# 校区收入记录
'school_income_records',
'school_income_records_v2',
# 阶梯价格设置(一对一)
'school_price_cfg',
# 校区账户信息
'school_account_info',
'school_account_info_v2',
]),
("教师委员会主任", ['Can change teacher'],
[
# 新注册, 待上架, 已上架老师, 老师编辑
'teachers',
'teachers_unpublished',
'teachers_published',
'teachers_unpublished_edit',
'teachers_published_edit',
'teachers_action',
# 测评建档, 只能查看课表, 不能调课停课, 也不可申请退款
'evaluations',
'evaluations_action',
'student_schedule_manage',
# 课程列表
# todo: 投诉, 考勤 也在这个页面, 因此目前是允许的
'school_timeslot',
]),
("教师委员会主任助理", ['Can change teacher'],
[
# 新注册, 待上架, 已上架老师, 老师编辑
'teachers',
'teachers_unpublished',
'teachers_published',
'teachers_unpublished_edit',
'teachers_published_edit',
'teachers_action',
# 测评建档, 只能查看课表, 不能调课停课, 也不可申请退款
'evaluations',
'evaluations_action',
'student_schedule_manage',
]),
("学习顾问", [],
[
# 测评建档, 查看课表, 调课停课(操作和记录), 不可申请退款
'evaluations',
'evaluations_action',
'student_schedule_manage',
'student_schedule_action',
'student_schedule_changelog',
# 课程列表
# todo: 投诉, 考勤 也在这个页面, 因此目前是允许的
'school_timeslot',
]),
("社区店长", [],
[
# 课程列表
# todo: 目前查看所有中心课程
# todo: 投诉, 考勤 也在这个页面, 因此目前是允许的
'school_timeslot',
# 修改密码
'staff_auth',
]),
("老师", [], []),
("家长", ['Can change parent', 'Can change profile'], []),
("学生", [], []),
# todo: 师资管理员暂时只作为登录后台
("师资管理员", ['Can change teacher'], [])
]
def _add_test_user_into_group(apps, test_user_format, count, group_name,
newUserData=None):
Group = apps.get_model('auth', 'Group')
User = apps.get_model('auth', 'User')
user_group = Group.objects.get(name=group_name)
for i in range(count):
username = test_user_format.format(id=i)
try:
if newUserData:
user, created = User.objects.get_or_create(
username=username, defaults=newUserData)
else:
user = User.objects.get(username=username)
except User.DoesNotExist:
#print("{user} not exist".format(user=test_user_format))
continue
user.groups.add(user_group)
def change_user_group(apps, schema_editor):
Permission = apps.get_model('auth', 'Permission')
Group = apps.get_model('auth', 'Group')
StaffPermission = apps.get_model('app', 'StaffPermission')
global group_and_permission
for group_name, permission_list, allowed_url_names in group_and_permission:
new_group, group_create = Group.objects.get_or_create(name=group_name)
new_group.save()
new_group.staffpermission_set.clear()
for url_name in allowed_url_names:
new_url_name, created = StaffPermission.objects.get_or_create(
allowed_url_name=url_name)
new_group.staffpermission_set.add(new_url_name)
new_group.permissions.clear()
for permission_name in permission_list:
permission = Permission.objects.get(name=permission_name)
new_group.permissions.add(permission)
_add_test_user_into_group(apps, 'test', 1, '超级管理员')
class Migration(migrations.Migration):
dependencies = [
('app', '0185_livecoursetimeslot_mistakes_pushed'),
]
operations = [
migrations.RunPython(change_user_group),
]
|
rgeorgi/intent
|
intent/tests/instance_construction_tests.py
|
Python
|
mit
| 718
| 0.009749
|
from unittest import TestCase
from intent.igt.rgxigt import RGIgt
class ConstructIGTTests(TestCase):
def setUp(self):
self.lines = [{'text':'This is a test','tag':'L'},
{'text':'blah blah blah blah','tag':'G'}]
def test_add_raw_lines(self):
|
inst = RGIgt(id='i1')
inst.add_raw_tier(self.lines)
self.assertEqual(len(inst.raw_tier()), 2)
def test_add_clean_lines(self):
inst = RGIgt(id='i1')
inst.add_clean_tier(self.lines)
self.assertEqual(len(inst.clean_tier()), 2)
def test_add_norm_lines(self):
inst = RGIgt(id='i1')
|
inst.add_clean_tier(self.lines)
self.assertEqual(len(inst.clean_tier()), 2)
|
xesscorp/KiCost
|
kicost/distributors/__init__.py
|
Python
|
mit
| 3,347
| 0.000299
|
# -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2018 by XESS Corporation / Hildo Guillardi Júnior
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = 'XESS Corporation'
__email__ = 'info@xess.com'
from .distributor import distributor_class
# Export the ORDER_COL_USERFIELDS content
from .distributors_info import ORDER_COL_USERFIELDS # noqa: F401
# Import and register here the API / local / scrape modules.
from .dist_local_template import dist_local_template # noqa: F401
from .api_octopart import api_octopart # noqa: F401
from .api_partinfo_kitspace import api_partinfo_kitspace # noqa: F401
#
# Some wrappers
#
def init_distributor_dict():
distributor_class.init_dist_dict()
def get_dist_parts_info(parts, dist_list, currency):
distributor_class.get_dist_parts_info(parts, dist_list, currency)
def get_registered_apis():
return distributor_class.registered
def get_distributors_list():
''' List of distributors registered by the API modules '''
return list(distributor_class.get_distributors_iter())
def get_distributors_iter():
''' Iterator for the distributors registered by the API modules '''
return distributor_class.get_distributors_iter()
def get_distributor_info(name):
''' Gets all the information about a supported distributor.
This information comes from the list collected from the APIs, not from the fixed template. '''
return distributor_class.get_distributor_info(name)
def
|
get_dist_name_from_label(label):
''' Returns the internal distributor name for a provided label. '''
return distributor_class.label2name.get(label.lower())
def set_distributors_logger(logger):
''' Sets the logger used by the class '''
distributor_class.logger = logger
def set_distributors_progress(cls):
''' Configures the class use
|
d to indicate progress '''
distributor_class.progress = cls
def set_api_options(api, **kwargs):
''' Configure an API (by name) '''
distributor_class.set_api_options(api, **kwargs)
def set_api_status(api, enabled):
''' Enable/Disable a particular API '''
distributor_class.set_api_status(api, enabled)
def get_api_status(api):
''' Find if an API is enabled '''
return distributor_class.get_api_status(api)
# Init distributor dict during import.
init_distributor_dict()
|
Linaro/squad
|
test/test_code_quality.py
|
Python
|
agpl-3.0
| 285
| 0
|
import subprocess
import shutil
from unittest import TestCase
if shutil.which('flake8'):
class TestCodeQualit
|
y(TestCase):
def test_flake8(self):
self.assertEqual(0, subprocess.call(
|
'flake8'))
else:
print("I: skipping flake8 test (flake8 not available)")
|
matthewkenzie/gammacombo
|
scripts/plotModuleExample.py
|
Python
|
gpl-3.0
| 1,823
| 0.044981
|
#!/usr/bin/env python
import ROOT as r
def setTextBoxAttributes(text, color, font):
text.SetTextColor(color)
text.SetTextAlign(13)
text.SetTextSize(0.04)
text.SetTextFont(font)
def
|
dress(canv, colors):
# need this to keep the objects alive
objs = []
vub_inc = r.TLatex(0.16,0.655,"V_{ub} inclusive")
vub_inc.SetNDC()
vub_inc.Draw("same")
objs.append(vub_inc)
vcb_inc = r.TLatex(0.68,0.72,"V_{cb} inclusive")
vcb_inc.SetNDC()
vcb_inc.SetTextAngle(90)
vcb_inc.Draw("same")
objs.append(vcb_inc)
vub_excl = r.TLatex(0.16,0.455,"V_{ub} exclusive")
vub_excl.SetNDC()
vub_excl.Draw("same")
objs.append(vub_excl)
vcb_excl = r.TLatex(0.45,
|
0.72,"V_{cb} exclusive")
vcb_excl.SetNDC()
vcb_excl.SetTextAngle(90)
vcb_excl.Draw("same")
objs.append(vcb_excl)
vub_vcb_lhcb = r.TLatex(0.17,0.29,"V_{ub}/V_{cb} LHCb")
vub_vcb_lhcb.SetNDC()
vub_vcb_lhcb.SetTextAngle(8)
vub_vcb_lhcb.Draw("same")
objs.append(vub_vcb_lhcb)
indirect = r.TLatex(0.17,0.38,"Indirect (CKM fitter)")
indirect.SetNDC()
indirect.SetTextAngle(9)
indirect.Draw("same")
objs.append(indirect)
comb_inc = r.TLatex(0.66,0.61,"Comb. incl.")
comb_inc.SetNDC()
comb_inc.Draw("same")
objs.append(comb_inc)
comb_excl = r.TLatex(0.43,0.40,"Comb. excl.")
comb_excl.SetNDC()
comb_excl.SetTextAngle(0)
comb_excl.Draw("same")
objs.append(comb_excl)
group1 = r.TLatex(0.18,0.85,"#splitline{PDG 2014 +}{CKM fitter +}")
group1.SetNDC()
group1.SetTextFont(132)
group1.SetTextSize(0.05)
group1.Draw("same")
objs.append(group1)
group2 = r.TLatex(0.18,0.75,"#splitline{#Lambda_{b}#rightarrowp#mu#nu (LHCb)}{}")
group2.SetNDC()
group2.SetTextFont(132)
group2.SetTextSize(0.05)
group2.Draw("same")
objs.append(group2)
canv.Update()
canv.Modified()
return objs
|
smurfix/HomEvenT
|
test/mod_path.py
|
Python
|
gpl-3.0
| 1,386
| 0.025271
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
## Copyright © 2007-2012, Matthias Urlichs <matthias@urlichs.de>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software
|
Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
from homevent.reactor import ShutdownHandler
from homevent.module i
|
mport load_module
from homevent.statement import main_words
from test import run
input = """\
block:
if exists path "..":
log DEBUG Yes
else:
log DEBUG No1
if exists path "...":
log DEBUG No2
else:
log DEBUG Yes
if exists directory "..":
log DEBUG Yes
else:
log DEBUG No3
if exists directory "README":
log DEBUG No4
else:
log DEBUG Yes
if exists file "README":
log DEBUG Yes
else:
log DEBUG No5
if exists file "..":
log DEBUG No6
else:
log DEBUG Yes
shutdown
"""
main_words.register_statement(ShutdownHandler)
load_module("logging")
load_module("ifelse")
load_module("path")
load_module("block")
run("path",input)
|
emanuelcovaci/TLT
|
blog/contact/forms.py
|
Python
|
agpl-3.0
| 1,487
| 0.000672
|
from django.core.validators import validate_email
from django import forms
from captcha.fields import ReCaptchaField
from .models import ContactUs
class CreateContact(forms.ModelForm):
captcha = ReCaptchaField()
class Meta:
model = ContactUs
fields = '__all__'
widgets = {
'email': forms.EmailInput({'required': 'required',
'placeholder': 'Email'}),
'message': forms.Textarea(attrs={'required': 'required',
'placeholder': 'Message'})
}
def clean_first_name(self):
first_name = self.cleaned_data['first_name']
if not first_name.isalpha():
raise forms.ValidationError("Introdu un prenume valid")
return first_name
def clean_email(self):
email = self.clean
|
ed_data['email']
if validate_email(email):
raise forms.ValidationError("Adresa de email nu e valida")
return email
def clean_last_name(self):
last_name = self.cleaned_data['last_name']
|
if not last_name.isalpha():
raise forms.ValidationError("Introdu un nume corect")
return last_name
def clean_message(self):
message = self.cleaned_data['message']
if len(message) < 50:
raise forms.ValidationError(
"Mesajul tau e prea scurt!"
"Trebuie sa contina minim 50 de caractere")
return message
|
DCOD-OpenSource/django-simple-help
|
simple_help/admin.py
|
Python
|
mit
| 1,108
| 0.000903
|
# -*- coding: utf-8 -*-
# django-simple-help
# simple_help/admin.py
from __future__ import unicode_literals
from django.contrib import admin
try: # add modeltranslation
from modeltranslation.translator import translator
from modeltranslation.admin import TabbedDjangoJqueryTranslationAdmin
except ImportError:
pass
from simple_help.models import PageHelp
from simple_help.forms import PageHelpAdminForm
from simple_help.utils import modeltranslation
try:
from simple_help.translation import PageHelpTranslationOptions
except ImportError:
pass
__all__ = [
"PageHelpAdmin",
]
class PageHelpAdmin(TabbedDjangoJqueryTranslationAdmin if modeltranslation() else admin.ModelAdmin):
"""
Customize PageHelp model for admin area.
"""
list_display = ["page", "title", ]
search_fields = ["title", ]
list_filter = ["page", ]
form = PageHelpAdminForm
if modeltranslation():
# registering translation options
|
translator.register(PageHelp, PageHelpTranslationOptions)
# registering admi
|
n custom classes
admin.site.register(PageHelp, PageHelpAdmin)
|
iem-projects/WILMAmix
|
WILMA/net/resolv.py
|
Python
|
gpl-2.0
| 2,201
| 0.011369
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2013, IOhannes m zmölnig, IEM
# This file is part of WILMix
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it w
|
ill be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WILMix. If not, see <http://www.gnu.org/licenses/>.
from PySide.QtNetwork import QHostInfo, QHostAddress
def getAddress(hostname, preferIPv6=None):
# IPv6=tru
|
e: prefer IPv6 addresses (if there are none, the function might still return IPv4)
# IPv6=false: prefer IPv4 addresses (if there are none, the function might still return IPv6)
# IPv6=None: first available address returned
info=QHostInfo()
adr=info.fromName(hostname).addresses()
if not adr: return None
if preferIPv6 is None:
return adr[0].toString()
for a_ in adr:
a=QHostAddress(a_)
if preferIPv6:
if a.toIPv6Address():
return a.toString()
else:
if a.toIPv4Address():
return a.toString()
return adr[0].toString()
if __name__ == '__main__':
def testfun(name, ipv6):
addr=getAddress(name, ipv6)
print("%s -> %s" % (name, addr))
import sys
progname=sys.argv[0]
ipv6=None
args=[]
if len(sys.argv)>1:
s=sys.argv[1]
if s.startswith('-'):
args=sys.argv[2:]
if "-ipv4" == s:
ipv6=False
elif "-ipv6" == s:
ipv6=True
else:
print("Usage: resolv.py [-ipv4|-ipv6] <host1> [<host2> ...]")
sys.exit(1)
else:
args=sys.argv[1:]
if not args:
args=['localhost', 'umlautq', 'example.com']
for h in args:
testfun(h,ipv6)
|
morenopc/edx-platform
|
lms/djangoapps/courseware/tests/test_views.py
|
Python
|
agpl-3.0
| 21,213
| 0.002641
|
# coding=UTF-8
"""
Tests courseware views.py
"""
import unittest
from datetime import datetime
from mock import MagicMock, patch
from pytz import UTC
from django.test import TestCase
from django.http import Http404
from django.test.utils import override_settings
from django.contrib.auth.models import User, AnonymousUser
from django.test.client import RequestFactory
from django.conf import settings
from django.core.urlresolvers import reverse
from student.models import CourseEnrollment
from student.tests.factories import AdminFactory
from edxmako.middleware import MakoMiddleware
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.locations import SlashSeparatedCourseKey
from student.tests.factories import UserFactory
import courseware.views as views
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from course_modes.models import CourseMode
import shoppingcart
from util.tests.test_date_utils import fake_ugettext, fake_pgettext
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestJumpTo(TestCase):
"""
Check the jumpto link for a course.
"""
def setUp(self):
# Use toy course from XML
self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
def test_jumpto_invalid_location(self):
location = self.course_key.make_usage_key(None, 'NoSuchPlace')
# This is fragile, but unfortunately the problem is that within the LMS we
# can't use the reverse calls from the CMS
jumpto_url = '{0}/{1}/jump_to/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
response = self.client.get(jumpto_url)
self.assertEqual(response.status_code, 404)
def test_jumpto_from_chapter(self):
location = self.course_key.make_usage_key('chapter', 'Overview')
jumpto_url = '{0}/{1}/jump_to/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
expected = 'courses/edX/toy/2012_Fall/courseware/Overview/'
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
def test_jumpto_id(self):
jumpto_url = '{0}/{1}/jump_to_id/{2}'.format('/courses', self.course_key.to_deprecated_string(), 'Overview')
expected = 'courses/edX/toy/2012_Fall/courseware/Overview/'
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
def test_jumpto_id_invalid_location(self):
location = Location('edX', 'toy', 'NoSuchPlace', None, None, None)
jumpto_url = '{0}/{1}/jump_to_id/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
response = self.client.get(jumpto_url)
self.assertEqual(response.status_code, 404)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class ViewsTestCase(TestCase):
"""
Tests for views.py methods.
"""
def setUp(self):
course = CourseFactory()
chapter = ItemFactory(category='chapter', parent_location=course.location) # pylint: disable=no-member
section = ItemFactory(category='sequential', parent_location=chapter.location, due=datetime(2013, 9, 18, 11, 30, 00))
vertical = ItemFactory(category='vertical', parent_location=section.location)
self.component = ItemFactory(category='problem', parent_location=vertical.location)
self.course_key = course.id
self.user = User.objects.create(username='dummy', password='123456',
email='test@mit.edu')
self.date = datetime(2013, 1, 22, tzinfo=UTC)
self.enrollment = CourseEnrollment.enroll(self.user, self.course_key)
self.enrollment.created = self.date
self.enrollment.save()
self.request_factory = RequestFactory()
chapter = 'Overview'
self.chapter_url = '%s/%s/%s' % ('/courses', self.course_key, chapter)
@unittest.skipUnless(settings.FEATURES.get('ENABLE_SHOPPING_CART'), "Shopping Cart not enabled in settings")
@patch.dict(settings.FEATURES, {'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_course_about_in_cart(self):
in_cart_span = '<span class="add-to-cart">'
# don't mock this course due to shopping cart existence checking
course = CourseFactory.create(org="new", number="unenrolled", display_name="course")
request = self.request_factory.get(reverse('about_course', args=[course.id.to_deprecated_string()]))
request.user = AnonymousUser()
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertNotIn(in_cart_span, response.content)
# authenticated user with nothing in cart
request.user = self.user
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertNotIn(in_cart_span, response.content)
# now add the course to the cart
cart = shoppingcart.models.Order.get_cart_for_user(self.user)
shoppingcart.models.PaidCourseRegistration.add_to_order(cart, course.id)
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertIn(in_cart_span, response.content)
def test_user_groups(self):
# depreciated function
mock_user = MagicMock()
mock_user.is_authenticated.return_value = False
self.assertEqual(views.user_groups(mock_user), [])
def test_get_current_child(self):
self.assertIsNone(views.get_current_child(MagicMock()))
mock_xmodule = MagicMock()
mock_xmodule.position = -1
mock_xmodule.get_display_items.return_value = ['one', 'two']
self.assertEqual(views.get_current_child(mock_xmodule), 'one')
mock_xmodule_2 = MagicMock()
mock_xmodule_2.position = 3
mock_xmodule_2.get_display_items.return_value = []
self.assertIsNone(views.get_current_child(mock_xmodule_2))
def test_redirect_to_course_position(self):
mock_module = MagicMock()
mock_module.descriptor.id = 'Underwater Basketweaving'
mock_module.position = 3
mock_module.get_display_items.return_value = []
self.assertRaises
|
(Http404, views.redirect_to_course_position,
mock_module)
def test_registered_for_course(self):
self.assertFalse(views.registered_for_co
|
urse('Basketweaving', None))
mock_user = MagicMock()
mock_user.is_authenticated.return_value = False
self.assertFalse(views.registered_for_course('dummy', mock_user))
mock_course = MagicMock()
mock_course.id = self.course_key
self.assertTrue(views.registered_for_course(mock_course, self.user))
def test_jump_to_invalid(self):
# TODO add a test for invalid location
# TODO add a test for no data *
request = self.request_factory.get(self.chapter_url)
self.assertRaisesRegexp(Http404, 'Invalid course_key or usage_key', views.jump_to,
request, 'bar', ())
def test_no_end_on_about_page(self):
# Toy course has no course end date or about/end_date blob
self.verify_end_date('edX/toy/TT_2012_Fall')
def test_no_end_about_blob(self):
# test_end has a course end date, no end_date HTML blob
self.verify_end_date("edX/test_end/2012_Fall", "Sep 17, 2015")
def test_about_blob_end_date(self):
# test_about_blob_end_date has both a course end date and an end_date HTML blob.
# HTML blob wins
self.verify_end_date("edX/test_about_blob_end_date/2012_Fall", "Learning never ends")
def verify_end_date(self, course_id, expected_end_text
|
dmulholland/ivy
|
ivy/ext/ivy_jinja.py
|
Python
|
unlicense
| 1,779
| 0.000562
|
# ------------------------------------------------------------------------------
# This extension adds support for Jinja templates.
# ------------------------------------------------------------------------------
impor
|
t sys
from ivy import hooks, site, templates
try:
import jinja2
except ImportError:
jinja2 = None
# Stores an initialized Jinja environment instance.
env = None
# The jinja2 package is an optional dependency.
if jinja2:
# Initialize our Jinja environment on the 'init' event hook.
@hooks.register('init')
def init():
# Initialize a template loader.
|
settings = {
'loader': jinja2.FileSystemLoader(site.theme('templates'))
}
# Check the site's config file for any custom settings.
settings.update(site.config.get('jinja', {}))
# Initialize an Environment instance.
global env
env = jinja2.Environment(**settings)
# Register our template engine callback for files with a .jinja extension.
@templates.register('jinja')
def callback(page, filename):
try:
template = env.get_template(filename)
return template.render(page)
except jinja2.TemplateError as err:
msg = "------------------------\n"
msg += " Jinja Template Error \n"
msg += "------------------------\n\n"
msg += " Template: %s\n" % filename
msg += " Page: %s\n\n" % page['filepath']
msg += " %s: %s" % (err.__class__.__name__, err)
if err.__context__:
cause = err.__context__
msg += "\n\n The following cause was reported:\n\n"
msg += " %s: %s" % (cause.__class__.__name__, cause)
sys.exit(msg)
|
algochecker/algochecker-web
|
webapp/forms.py
|
Python
|
mit
| 6,858
| 0.001896
|
from django import forms
from django.forms import Form, ModelForm
from django.utils import timezone
from webapp.models import Task, TaskGroup, TaskGroupSet
from webapp.validators import validate_package
from webapp.widgets import CustomSplitDateTimeWidget
class TaskGroupForm(ModelForm):
class Meta:
model = TaskGroup
fields = '__all__'
exclude = ['raw_csv', 'is_public']
labels = {
'name': 'Group name',
'description': 'Description',
'is_public': 'Public'
}
help_texts = {
'is_public': 'determines whether group is public or not'
}
def __init__(self, *args, **kwargs):
kwargs.pop('edit', None)
super(TaskGroupForm, self).__init__(*args, **kwargs)
class TaskGroupCSVForm(Form):
file = forms.FileField()
upload_csv = forms.IntegerField(initial=1, widget=forms.HiddenInput)
class TaskGroupAccessForm(Form):
grant_single = forms.IntegerField(initial=1, widget=forms.HiddenInput)
username = forms.CharField(
max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'username', 'class': 'form-control'})
)
class TaskGroupInviteForm(Form):
send_invitation = forms.CharField(initial=1, widget=forms.HiddenInput)
email = forms.EmailField(widget=forms.EmailInput(attrs={'placeholder': 'E-mail', 'class': 'form-control'}))
class TaskForm(ModelForm):
deadline = forms.SplitDateTimeField(
input_date_formats=['%Y-%m-%d'],
input_time_formats=['%H:%M:%S'],
widget=CustomSplitDateTimeWidget(
date_attrs={'placeholder': 'Date: yyyy-mm-dd', 'data-dpk': '1'},
time_attrs={'placeholder': 'Time: hh:mm:ss'},
date_format='%Y-%m-%d',
time_format='%H:%M:%S'
),
help_text='Set blank if no deadline',
required=False
)
package = forms.FileField(
label='Package',
help_text='.zip package created according to guidelines',
widget=forms.FileInput,
validators=[validate_package]
)
class Meta:
model = Task
fields = '__all__'
exclude = ['task_group']
labels = {
'name': 'Task name',
'description_brief': 'Short description',
'tg_set': 'Task set',
'submission_limit': 'Submissions limit',
'result_type': 'Result priority',
'files_count_limit': 'Max. files amount',
'file_size_limit': 'Max. file size'
}
help_texts = {
'description': 'Markdown can be used here',
'description_brief': 'Short description will be shown on the tasks list page',
'tg_set': 'Task set to which this task belongs',
'result_type': 'Pattern, according to which results list will appear.',
'submission_limit': 'Limit of submissions per user. Put 0 if unlimited',
'files_count_limit': 'Maximal amount of files in one submission',
'file_size_limit': 'Maximal size of single file (in bytes)'
}
widgets = {
'package': forms.FileInput
}
def __init__(self, *args, **kwargs):
edit = kwargs.pop('edit', None)
super(TaskForm, self).__init__(*args, **kwargs)
if edit:
self.fields['package'].label = 'New package'
self.fields['package'].required = False
self.fields['tg_set'].queryset = TaskGroupSet.objects.filter(task_group_id=self.instance.task_group_id)
else:
self.fields['deadline'].initial = timezone.now() + timezone.timedelta(days=14)
del self.fields['tg_set']
class InvalidateSubmissionForm(Form):
comment = forms.CharField(
label='Your comment',
widget=forms.Textarea(attrs={'placeholder': 'Type in the reason here'}),
required=True
|
)
class CopyTaskGroup(Form):
name = forms.CharField(
label='New name',
widget=forms.TextInput(attrs={'placeholder': 'New name'}),
required=True
)
description = form
|
s.CharField(
label='Description',
widget=forms.Textarea(attrs={'placeholder': 'Type in new description (optional)'}),
required=False
)
class TaskGroupSetForm(ModelForm):
class Meta:
model = TaskGroupSet
fields = '__all__'
exclude = ['task_group']
labels = {
'name': 'Name',
'description': 'Description'
}
class TaskGroupBulkDeadlines(Form):
set_id = forms.IntegerField(
required=True,
widget=forms.HiddenInput()
)
deadline = forms.SplitDateTimeField(
input_date_formats=['%Y-%m-%d'],
input_time_formats=['%H:%M:%S'],
widget=CustomSplitDateTimeWidget(
date_attrs={'placeholder': 'Date: yyyy-mm-dd', 'data-dpk': '1'},
time_attrs={'placeholder': 'Time: hh:mm:ss'},
date_format='%Y-%m-%d',
time_format='%H:%M:%S'
),
required=False,
label='name of the set'
)
def __init__(self, *args, **kwargs):
super(TaskGroupBulkDeadlines, self).__init__(*args, **kwargs)
self.fields['deadline'].label = self.initial.get('set_name')
class FeedbackFrom(Form):
TOPIC = (
('', '- Please select -'),
('proposal', 'I have a proposal'),
('report', 'I want to report a problem'),
('question', 'I have a question'),
('other', 'Other')
)
theme = forms.ChoiceField(label='What happened?', choices=TOPIC)
email = forms.EmailField(
label='',
widget=forms.EmailInput(attrs={'placeholder': 'Contact e-mail'})
)
content = forms.CharField(
label='Write your message here:',
widget=forms.Textarea
)
class InternalLoginForm(Form):
username = forms.CharField(label='Username')
password = forms.CharField(label='Password', widget=forms.PasswordInput)
class InternalRegisterForm(Form):
username = forms.CharField(min_length=3, label='Username')
password = forms.CharField(min_length=8, label='Password', widget=forms.PasswordInput)
repeat_password = forms.CharField(label='Repeat password', widget=forms.PasswordInput)
first_name = forms.CharField(min_length=1, label='First name')
last_name = forms.CharField(min_length=1, label='Last name')
email = forms.CharField(label='E-mail address', widget=forms.EmailInput)
class PasswordForgetInitForm(Form):
username = forms.CharField(min_length=3, label='Username')
email = forms.CharField(label='E-mail address', widget=forms.EmailInput)
class PasswordForgetResetForm(Form):
password = forms.CharField(min_length=8, label='Password', widget=forms.PasswordInput)
repeat_password = forms.CharField(label='Repeat password', widget=forms.PasswordInput)
|
Brocade-OpenSource/OpenStack-DNRM-Neutron
|
neutron/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py
|
Python
|
apache-2.0
| 2,023
| 0.001483
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language
|
governing permissions and limitations
# under
|
the License.
#
"""nvp_netbinding
Revision ID: 1d76643bcec4
Revises: 3cb5d900c5de
Create Date: 2013-01-15 07:36:10.024346
"""
# revision identifiers, used by Alembic.
revision = '1d76643bcec4'
down_revision = '3cb5d900c5de'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.create_table(
'nvp_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('binding_type',
sa.Enum('flat', 'vlan', 'stt', 'gre',
name='nvp_network_bindings_binding_type'),
nullable=False),
sa.Column('tz_uuid', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_table('nvp_network_bindings')
|
tsmrachel/remo
|
remo/dashboard/admin.py
|
Python
|
bsd-3-clause
| 835
| 0
|
from django.contrib import admin
from django.utils.encoding import smart_text
from import_export.admin import ExportMixin
from remo.dashboard.models import ActionItem
def encode_action_item_names(modeladmin, request, queryset):
for obj in queryset:
ActionItem.objects.filter(pk=obj.id).update(name=smart_text(obj.name))
encode_action_item_names.short_description = 'Encode action item names'
class ActionItemAdmin(ExportMixin, admin.ModelAdmin):
model = ActionI
|
tem
list_display = ('__unicode__', 'user', 'due_date', 'created_on',
'priority', 'updated_on', 'object_id',)
search_fields = ['user__first_name', 'user__last_
|
name',
'user__userprofile__display_name', 'name']
actions = [encode_action_item_names]
admin.site.register(ActionItem, ActionItemAdmin)
|
geminy/aidear
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/build/android/play_services/preprocess.py
|
Python
|
gpl-3.0
| 9,974
| 0.008823
|
#!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Prepares the Google Play services split client libraries before usage by
Chrome's build system.
We need to preprocess Google Play services before using it in Chrome
builds for 2 main reasons:
- Getting rid of unused resources: unsupported languages, unused
drawables, etc.
- Merging the differents jars so that it can be proguarded more
easily. This is necessary since debug and test apks get very close
to the dex limit.
The script is supposed to be used with the maven repository that can be
obtained by downloading the "extra-google-m2repository" from the Android SDK
Manager. It also supports importing from already extracted AAR files using the
--is-extracted-repo flag. The expected directory structure in that case would
look like:
REPOSITORY_DIR
+-- CLIENT_1
| +-- <content of the first AAR file>
+-- CLIENT_2
+-- etc.
The output is a directory with the following structure:
OUT_DIR
+-- google-play-services.jar
+-- res
| +-- CLIENT_1
| | +-- color
| | +-- values
| | +-- etc.
| +-- CLIENT_2
| +-- ...
+-- stub
+-- res/[.git-keep-directory]
+-- src/android/UnusedStub.java
Requires the `jar` utility in the path.
'''
import argparse
import glob
import itertools
import os
import shutil
import stat
import sys
import tempfile
import zipfile
from datetime import datetime
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import devil_chromium
from devil.utils import cmd_helper
from play_services import utils
from pylib.utils import argparse_utils
def main():
parser = argparse.ArgumentParser(description=(
"Prepares the Google Play services split client libraries before usage "
"by Chrome's build system. See the script's documentation for more a "
"detailed help."))
argparse_utils.CustomHelpAction.EnableFor(parser)
required_args = parser.add_argument_group('required named arguments')
required_args.add_argument('-r',
'--repository',
help=('the Google Play services repository '
'location'),
required=True,
metavar='FILE')
required_args.add_argument('-o',
'--out-dir',
help='the output directory',
required=True,
metavar='FILE')
required_args.add_argument('-c',
'--config-file',
help='the config file path',
required=True,
metavar='FILE')
parser.add_argument('-x',
'--is-extracted-repo',
action='store_true',
help='the provided repository is not made of AAR files')
parser.add_argument('--config-help',
action='custom_help',
custom_help_text=utils.ConfigParser.__doc__,
help='show the configuration file format help')
args = parser.parse_args()
devil_chromium.Initialize()
return ProcessGooglePlayServices(args.repository,
|
args.out_dir,
args.config_file,
args.is_extracted_repo)
def ProcessGooglePlayServices(repo, out_dir, config_path, is_extracted_repo):
config = utils.ConfigParser(config_path)
tmp_root = tempfile.mkdtemp()
try:
tmp_paths = _SetupTe
|
mpDir(tmp_root)
if is_extracted_repo:
_ImportFromExtractedRepo(config, tmp_paths, repo)
else:
_ImportFromAars(config, tmp_paths, repo)
_GenerateCombinedJar(tmp_paths)
_ProcessResources(config, tmp_paths, repo)
_BuildOutput(config, tmp_paths, out_dir)
finally:
shutil.rmtree(tmp_root)
return 0
def _SetupTempDir(tmp_root):
tmp_paths = {
'root': tmp_root,
'imported_clients': os.path.join(tmp_root, 'imported_clients'),
'extracted_jars': os.path.join(tmp_root, 'jar'),
'combined_jar': os.path.join(tmp_root, 'google-play-services.jar'),
}
os.mkdir(tmp_paths['imported_clients'])
os.mkdir(tmp_paths['extracted_jars'])
return tmp_paths
def _SetupOutputDir(out_dir):
out_paths = {
'root': out_dir,
'res': os.path.join(out_dir, 'res'),
'jar': os.path.join(out_dir, 'google-play-services.jar'),
'stub': os.path.join(out_dir, 'stub'),
}
shutil.rmtree(out_paths['jar'], ignore_errors=True)
shutil.rmtree(out_paths['res'], ignore_errors=True)
shutil.rmtree(out_paths['stub'], ignore_errors=True)
return out_paths
def _MakeWritable(dir_path):
for root, dirs, files in os.walk(dir_path):
for path in itertools.chain(dirs, files):
st = os.stat(os.path.join(root, path))
os.chmod(os.path.join(root, path), st.st_mode | stat.S_IWUSR)
# E.g. turn "base_1p" into "base"
def _RemovePartySuffix(client):
return client[:-3] if client[-3:] == '_1p' else client
def _ImportFromAars(config, tmp_paths, repo):
for client in config.clients:
client_name = _RemovePartySuffix(client)
aar_name = 'client_' + client + '.aar'
aar_path = os.path.join(repo, client_name, aar_name)
aar_out_path = os.path.join(tmp_paths['imported_clients'], client)
_ExtractAll(aar_path, aar_out_path)
client_jar_path = os.path.join(aar_out_path, 'classes.jar')
_ExtractAll(client_jar_path, tmp_paths['extracted_jars'])
def _ImportFromExtractedRepo(config, tmp_paths, repo):
# Import the clients
try:
for client in config.clients:
client_out_dir = os.path.join(tmp_paths['imported_clients'], client)
shutil.copytree(os.path.join(repo, client), client_out_dir)
client_jar_path = os.path.join(client_out_dir, 'classes.jar')
_ExtractAll(client_jar_path, tmp_paths['extracted_jars'])
finally:
_MakeWritable(tmp_paths['imported_clients'])
def _GenerateCombinedJar(tmp_paths):
out_file_name = tmp_paths['combined_jar']
working_dir = tmp_paths['extracted_jars']
cmd_helper.Call(['jar', '-cf', out_file_name, '-C', working_dir, '.'])
def _ProcessResources(config, tmp_paths, repo):
LOCALIZED_VALUES_BASE_NAME = 'values-'
locale_whitelist = set(config.locale_whitelist)
# The directory structure here is:
# <imported_clients temp dir>/<client name>_1p/res/<res type>/<res file>.xml
for client_dir in os.listdir(tmp_paths['imported_clients']):
client_prefix = _RemovePartySuffix(client_dir) + '_'
res_path = os.path.join(tmp_paths['imported_clients'], client_dir, 'res')
if not os.path.isdir(res_path):
continue
for res_type in os.listdir(res_path):
res_type_path = os.path.join(res_path, res_type)
if res_type.startswith('drawable'):
shutil.rmtree(res_type_path)
continue
if res_type.startswith(LOCALIZED_VALUES_BASE_NAME):
dir_locale = res_type[len(LOCALIZED_VALUES_BASE_NAME):]
if dir_locale not in locale_whitelist:
shutil.rmtree(res_type_path)
continue
if res_type.startswith('values'):
# Beginning with v3, resource file names are not necessarily unique, and
# would overwrite each other when merged at build time. Prefix each
# "values" resource file with its client name.
for res_file in os.listdir(res_type_path):
os.rename(os.path.join(res_type_path, res_file),
os.path.join(res_type_path, client_prefix + res_file))
# Reimport files from the whitelist.
for res_path in config.resource_whitelist:
for whitelisted_file in glob.glob(os.path.join(repo, res_path)):
resolved_file = os.path.relpath(whitelisted_file, repo)
rebased_res = os.path.join(tmp_paths['imported_clients'], resolved_file)
if not os.path.exists(os.path.dirname(rebased_res)):
os.makedirs(os.path.dirname(rebased_res))
shutil.copy(os.path.join(repo, whi
|
coreboot/chrome-ec
|
extra/cr50_rma_open/cr50_rma_open.py
|
Python
|
bsd-3-clause
| 26,490
| 0.000227
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Used to access the cr50 console and handle RMA Open
"""Open cr50 using RMA authentication.
Run RMA Open to enable CCD on Cr50. The utility can be used to get a
url that will generate an authcode to open cr50. It can also be used to
try opening cr50 with the generated authcode.
The last challenge is the only valid one, so don't generate a challenge
10 times and then use the first URL. You can only use the last one.
For RMA Open:
Connect suzyq to the dut and your workstation.
Check the basic setup with
sudo python cr50_rma_open.py -c
If the setup is broken. Follow the debug print statements to try to fix
the error. Rerun until the script says Cr50 setup ok.
After the setup is verified, run the following command to generate the
challenge url
sudo python cr50_rma_open.py -g -i $HWID
Go to the URL from by that command to generate an authcode. Once you have
the authcode, you can use it to open cr50.
sudo python cr50_rma_open.py -a $AUTHCODE
If for some reason hardware write protect doesn't get disabled during rma
open or gets enabled at some point the script can be used to disable
write protect.
sudo python cr50_rma_open.py -w
When prepping devices for the testlab, you need to enable testlab mode.
Prod cr50 images can't enable testlab mode. If the device is running a
prod image, you can skip this step.
sudo python cr50_rma_open.py -t
"""
import argparse
import glob
import logging
import re
import subprocess
import sys
import time
import serial
SCRIPT_VERSION = 5
CCD_IS_UNRESTRICTED = 1 << 0
WP_IS_DISABLED = 1 << 1
TESTLAB_IS_ENABLED = 1 << 2
RMA_OPENED = CCD_IS_UNRESTRICTED | WP_IS_DISABLED
URL = ('https://www.google.com/chromeos/partner/console/cr50reset?'
'challenge=%s&hwid=%s')
RMA_SUPPORT_PROD = '0.3.3'
RMA_SUPPORT_PREPVT = '0.4.5'
DEV_MODE_OPEN_PROD = '0.3.9'
DEV_MODE_OPEN_PREPVT = '0.4.7'
TESTLAB_PROD = '0.3.10'
CR50_USB = '18d1:5014'
CR50_LSUSB_CMD = ['lsusb', '-vd', CR50_USB]
ERASED_BID = 'ffffffff'
DEBUG_MISSING_USB = """
Unable to find Cr50 Device 18d1:5014
DEBUG MISSING USB:
- Make sure suzyq is plugged into the correct DUT port
- Try flipping the cable
- unplug the cable for 5s then plug it back in
"""
DEBUG_DEVICE = """
DEBUG DEVICE COMMUNICATION:
Issues communicating with %s
A 18d1:5014 device exists, so make sure you have selected the correct
/dev/ttyUSB
"""
DEBUG_SERIALNAME = """
DEBUG SERIALNAME:
Found the USB device, but can't match the usb serialname. Check the
serialname you passed into cr50_rma_open or try running without a
serialname.
"""
DEBUG_CONNECTION = """
DEBUG CONNECTION:
Found the USB device but
|
cant communicate with any of the consoles.
Try Running cr50_rma_open again. If it still fails unplug the ccd cable
for 5 seconds and plug it back in.
"""
DEBUG_TOO_MANY_USB_DEVICES = """
DEBUG SELECT USB:
More than one cr50 usb device was found. Disconnect all but one device
or use the -s option with the correct usb serialname.
"""
DEBUG_ERASED
|
_BOARD_ID = """
DEBUG ERASED BOARD ID:
If you are using a prePVT device run
/usr/share/cros/cr50-set-board-id.sh proto
If you are running a MP device, please talk to someone.
"""
DEBUG_AUTHCODE_MISMATCH = """
DEBUG AUTHCODE MISMATCH:
- Check the URL matches the one generated by the last cr50_rma_open
run.
- Check you used the correct authcode.
- Make sure the cr50 version is greater than 3.3.
- try generating another URL by rerunning the generate command and
rerunning the process.
"""
DEBUG_DUT_CONTROL_OSERROR = """
Run from chroot if you are trying to use a /dev/pts ccd servo console
"""
class RMAOpen(object):
"""Used to find the cr50 console and run RMA open"""
ENABLE_TESTLAB_CMD = 'ccd testlab enabled\n'
def __init__(self, device=None, usb_serial=None, servo_port=None, ip=None):
self.servo_port = servo_port if servo_port else '9999'
self.ip = ip
if device:
self.set_cr50_device(device)
elif servo_port:
self.find_cr50_servo_uart()
else:
self.find_cr50_device(usb_serial)
logging.info('DEVICE: %s', self.device)
self.check_version()
self.print_platform_info()
logging.info('Cr50 setup ok')
self.update_ccd_state()
self.using_ccd = self.device_is_running_with_servo_ccd()
def _dut_control(self, control):
"""Run dut-control and return the response"""
try:
cmd = ['dut-control', '-p', self.servo_port, control]
return subprocess.check_output(cmd, encoding='utf-8').strip()
except OSError:
logging.warning(DEBUG_DUT_CONTROL_OSERROR)
raise
def find_cr50_servo_uart(self):
"""Save the device used for the console.
Find the console and configure it, so it can be used with this script.
"""
self._dut_control('cr50_uart_timestamp:off')
self.device = self._dut_control('cr50_uart_pty').split(':')[-1]
def set_cr50_device(self, device):
"""Save the device used for the console"""
self.device = device
def send_cmd_get_output(self, cmd, nbytes=0):
"""Send a cr50 command and get the output
Args:
cmd: The cr50 command string
nbytes: The number of bytes to read from the console. If 0 read all
of the console output.
Returns:
The command output
"""
try:
ser = serial.Serial(self.device, timeout=1)
except OSError:
logging.warning('Permission denied %s', self.device)
logging.warning('Try running cr50_rma_open with sudo')
raise
write_cmd = cmd + '\n\n'
ser.write(write_cmd.encode('utf-8'))
if nbytes:
output = ser.read(nbytes)
else:
output = ser.readall()
ser.close()
output = output.decode('utf-8').strip() if output else ''
# Return only the command output
split_cmd = cmd + '\r'
if cmd and split_cmd in output:
return ''.join(output.rpartition(split_cmd)[1::]).split('>')[0]
return output
def device_is_running_with_servo_ccd(self):
"""Return True if the device is a servod ccd console"""
# servod uses /dev/pts consoles. Non-servod uses /dev/ttyUSBX
if '/dev/pts' not in self.device:
return False
# If cr50 doesn't show rdd is connected, cr50 the device must not be
# a ccd device
if 'Rdd: connected' not in self.send_cmd_get_output('ccdstate'):
return False
# Check if the servod is running with ccd. This requires the script
# is run in the chroot, so run it last.
if 'ccd_cr50' not in self._dut_control('servo_type'):
return False
logging.info('running through servod ccd')
return True
def get_rma_challenge(self):
"""Get the rma_auth challenge
There are two challenge formats
"
ABEQ8 UGA4F AVEQP SHCKV
DGGPR N8JHG V8PNC LCHR2
T27VF PRGBS N3ZXF RCCT2
UBMKP ACM7E WUZUA A4GTN
"
and
"
generated challenge:
CBYRYBEMH2Y75TC...rest of challenge
"
support extracting the challenge from both.
Returns:
The RMA challenge with all whitespace removed.
"""
output = self.send_cmd_get_output('rma_auth').strip()
logging.info('rma_auth output:\n%s', output)
# Extract the challenge from the console output
if 'generated challenge:' in output:
return output.split('generated challenge:')[-1].strip()
challenge = ''.join(re.findall(r' \S{5}' * 4, output))
# Remove all whitespace
return re.sub(r'\s', '', challenge)
def generate_challenge_url(self, hwid):
"""Get the rma_auth challenge
Returns:
The RMA challenge with all whit
|
vfilimonov/pydatastream
|
pydatastream/pydatastream.py
|
Python
|
mit
| 32,857
| 0.002404
|
""" pydatastream main module
(c) Vladimir Filimonov, 2013 - 2021
"""
import warnings
import json
import math
from functools import wraps
import requests
import pandas as pd
###############################################################################
_URL = 'https://product.datastream.com/dswsclient/V1/DSService.svc/rest/'
_FLDS_XREF = ('DSCD,EXMNEM,GEOGC,GEOGN,IBTKR,INDC,INDG,INDM,INDX,INDXEG,'
'INDXFS,INDXL,INDXS,ISIN,ISINID,LOC,MNEM,NAME,SECD,TYPE'.split(','))
_FLDS_XREF_FUT = ('MNEM,NAME,FLOT,FEX,GEOGC,GEOGN,EXCODE,LTDT,FUTBDATE,PCUR,ISOCUR,'
'TICKS,TICKV,TCYCLE,TPLAT'.split(','))
_ASSET_TYPE_CODES = {'BD': 'Bonds & Convertibles',
'BDIND': 'Bond Indices & Credit Default Swaps',
'CMD': 'Commodities',
'EC': 'Economics',
'EQ': 'Equities',
'EQIND': 'Equity Indices',
'EX': 'Exchange Rates',
'FT': 'Futures',
'INT': 'Interest Rates',
'INVT': 'Investment Trusts',
'OP': 'Options',
'UT': 'Unit Trusts',
'EWT': 'Warrants',
'NA': 'Not available'}
###############################################################################
_INFO = """PyDatastream documentation (GitHub):
https://github.com/vfilimonov/pydatastream
Datastream Navigator:
http://product.datastream.com/navigator/
Official support
https://customers.reuters.com/sc/Contactus/simple?product=Datastream&env=PU&TP=Y
Webpage for testing REST API requests
http://product.datastream.com/dswsclient/Docs/TestRestV1.aspx
Documentation for DSWS API
http://product.datastream.com/dswsclient/Docs/Default.aspx
Datastream Web Service Developer community
https://developers.refinitiv.com/eikon-apis/datastream-web-service
"""
###############################################################################
########################
|
#######################################################
def _convert_date(date):
""" Convert date to YYYY-MM-DD """
if date is None:
return ''
if isinstance(date, str) and (date.upper() == 'BDATE'):
return 'BDATE'
return pd.Timestamp(date).strftime('%Y-%m-%d')
def _parse
|
_dates(dates):
""" Parse dates
Example:
/Date(1565817068486) -> 2019-08-14T21:11:08.486000000
/Date(1565568000000+0000) -> 2019-08-12T00:00:00.000000000
"""
if dates is None:
return None
if isinstance(dates, str):
return pd.Timestamp(_parse_dates([dates])[0])
res = [int(_[6:(-7 if '+' in _ else -2)]) for _ in dates]
return pd.to_datetime(res, unit='ms').values
class DatastreamException(Exception):
""" Exception class for Datastream """
###############################################################################
def lazy_property(fn):
""" Lazy-evaluated property of an object """
attr_name = '__lazy__' + fn.__name__
@property
@wraps(fn)
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
###############################################################################
# Main Datastream class
###############################################################################
class Datastream():
""" Python interface to the Refinitiv Datastream API via Datastream Web
Services (DSWS).
"""
def __init__(self, username, password, raise_on_error=True, proxy=None, **kwargs):
"""Establish a connection to the Python interface to the Refinitiv Datastream
(former Thomson Reuters Datastream) API via Datastream Web Services (DSWS).
username / password - credentials for the DSWS account.
raise_on_error - If True then error request will raise a "DatastreamException",
otherwise either empty dataframe or partially
retrieved data will be returned
proxy - URL for the proxy server. Valid values:
(a) None: no proxy is used
(b) string of format "host:port" or "username:password@host:port"
Note: credentials will be saved in memory. In case if this is not
desirable for security reasons, call the constructor having None
instead of values and manually call renew_token(username, password)
when needed.
A custom REST API url (if necessary for some reasons) could be provided
via "url" parameter.
"""
self.raise_on_error = raise_on_error
self.last_request = None
self.last_metadata = None
self._last_response_raw = None
# Setting up proxy parameters if necessary
if isinstance(proxy, str):
self._proxy = {'http': proxy, 'https': proxy}
elif proxy is None:
self._proxy = None
else:
raise ValueError('Proxy parameter should be either None or string')
self._url = kwargs.pop('url', _URL)
self._username = username
self._password = password
# request new token
self.renew_token(username, password)
###########################################################################
@staticmethod
def info():
""" Some useful links """
print(_INFO)
###########################################################################
def _api_post(self, method, request):
""" Call to the POST method of DSWS API """
url = self._url + method
self.last_request = {'url': url, 'request': request, 'error': None}
self.last_metadata = None
try:
res = requests.post(url, json=request, proxies=self._proxy)
self.last_request['response'] = res.text
except Exception as e:
self.last_request['error'] = str(e)
raise
try:
response = self.last_request['response'] = json.loads(self.last_request['response'])
except json.JSONDecodeError as e:
raise DatastreamException('Server response could not be parsed') from e
if 'Code' in response:
code = response['Code']
if response['SubCode'] is not None:
code += '/' + response['SubCode']
errormsg = f'{code}: {response["Message"]}'
self.last_request['error'] = errormsg
raise DatastreamException(errormsg)
return self.last_request['response']
###########################################################################
def renew_token(self, username=None, password=None):
""" Request new token from the server """
if username is None or password is None:
warnings.warn('Username or password is not provided - could not renew token')
return
data = {"UserName": username, "Password": password}
self._token = dict(self._api_post('GetToken', data))
self._token['TokenExpiry'] = _parse_dates(self._token['TokenExpiry']).tz_localize('UTC')
# Token is invalidated 15 minutes before exporation time
# Note: According to https://github.com/vfilimonov/pydatastream/issues/27
# tokens do not always respect the (as of now 24 hours) expiry time
# So for this reason I limit the token life at 6 hours.
self._token['RenewTokenAt'] = min(self._token['TokenExpiry'] - pd.Timedelta('15m'),
pd.Timestamp.utcnow() + pd.Timedelta('6H'))
@property
def _token_is_expired(self):
if self._token is None:
return True
if pd.Timestamp.utcnow() > self._token['RenewTokenAt']:
return True
return False
@property
def token(self):
""" Return actual token and renew it if necessary. """
if self._token_is_expired:
self.renew_token(self._username, self._password)
return self._token['TokenValue']
############
|
renqianluo/DLT2T
|
DLT2T/utils/registry_test.py
|
Python
|
apache-2.0
| 7,045
| 0.008375
|
# coding=utf-8
# Copyright 2017 The DLT2T Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for DLT2T.registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from DLT2T.utils import modality
from DLT2T.utils import registry
from DLT2T.utils import t2t_model
import tensorflow as tf
# pylint: disable=unused-variable
class ModelRegistryTest(tf.test.TestCase):
def setUp(self):
registry._reset()
def testT2TModelRegistration(self):
@registry.register_model
class MyModel1(t2t_model.T2TModel):
pass
model = registry.model("my_model1")
self.assertTrue(model is MyModel1)
def testNamedRegistration(self):
@registry.register_model("model2")
class MyModel1(t2t_model.T2TModel):
pass
model = registry.model("model2")
self.assertTrue(model is MyModel1)
def testNonT2TModelRegistration(self):
@registry.register_model
def model_fn():
pass
model = registry.model("model_fn")
self.assertTrue(model is model_fn)
def testUnknownModel(self):
with self.assertRaisesRegexp(LookupError, "never registered"):
registry.model("not_registered")
def testDuplicateRegistration(self):
@registry.register_model
def m1():
pass
with self.assertRaisesRegexp(LookupError, "already registered"):
@registry.register_model("m1")
def m2():
pass
def testListModels(self):
@registry.register_model
def m1():
pass
@registry.register_model
def m2():
pass
self.assertSetEqual(set(["m1", "m2"]), set(registry.list_models()))
def testSnakeCase(self):
convert = registry._convert_camel_to_snake
self.assertEqual("typical_camel_case", convert("TypicalCamelCase"))
self.assertEqual("numbers_fuse2gether", convert("NumbersFuse2gether"))
self.assertEqual("numbers_fuse2_gether", convert("NumbersFuse2Gether"))
self.assertEqual("lstm_seq2_seq", convert("LSTMSeq2Seq"))
self.assertEqual("starts_lower", convert("startsLower"))
self.assertEqual("starts_lower_caps", convert("startsLowerCAPS"))
self.assertEqual("caps_fuse_together", convert("CapsFUSETogether"))
self.assertEqual("startscap", convert("Startscap"))
self.assertEqual("s_tartscap", convert("STartscap"))
class HParamRegistryTest(tf.test.TestCase):
def setUp(self):
registry._reset()
def testHParamSet(self):
@registry.register_hparams
def my_hparams_set():
pass
@registry.register_ranged_hparams
def my_hparams_range(_):
pass
self.assertTrue(registry.hparams("my_hparams_set") is my_hparams_set)
self.assertTrue(
registry.ranged_hparams("my_hparams_range") is my_hparams_range)
def testNamedRegistration(self):
@registry.register_hparams("a")
def my_hparams_set():
pass
@registry.register_ranged_hparams("a")
def my_hparams_range(_):
pass
self.assertTrue(registry.hparams("a") is my_hparams_set)
self.assertTrue(registry.ranged_hparams("a") is my_hparams_range)
def testUnknownHparams(self):
with self.assertRaisesRegexp(LookupError, "never registered"):
registry.hparams("not_registered")
with self.assertRaisesRegexp(LookupError, "never registered"):
registry.ranged_hparams("not_registered")
def testDuplicateRegistration(self):
@registry.register_hparams
def hp1():
pass
with self.assertRaisesRegexp(LookupError, "already registered"):
@registry.register_hparams("hp1")
def hp2():
pass
@registry.register_ranged_hparams
def rhp1(_):
pass
with self.assertRaisesRegexp(LookupError, "already registered"):
@registry.register_ranged_hparams("rhp1")
def rhp2(_):
pass
def testListHparams(self):
@registry.register_hparams
def hp1():
pass
@registry.register_hparams("hp2_named")
def hp2():
pass
@registry.register_ranged_hparams
def rhp1(_):
pass
@registry.register_ranged_hparams("rhp2_named")
def rhp2(_):
pass
self.assertSetEqual(set(["hp1", "hp2_named"]), set(registry.list_hparams()))
self.assertSetEqual(
set(["rhp1", "rhp2_named"]), set(registry.list_ranged_hparams()))
def testRangeSignatureCheck(self):
with self.assertRaisesRegexp(ValueError, "must take a single argument"):
@registry.register_ranged_hparams
def rhp_bad():
pass
with self.assertRaisesRegexp(ValueError, "must take a single argument"):
@registry.register_ranged_hparams
def rhp_bad2(a, b): # pylint: disable=unused-argument
pass
class ModalityRegistryTest(tf.test.TestCase):
def setUp(self):
registry._reset()
def testModalityRegistration(self):
@registry.register_symbol_modality
class MySymbolModality(modality.Modality):
pass
@registry.register_audio_modality
class MyAudioModality(modality.Modality):
pass
@registry.register_image_modality
class MyImageModality(modality.Modality):
pass
@registry.register_class_label_modality
class MyClassLabelModality(modality.Modality):
pass
self.assertTrue(
registry.symbol_modality("my_symbol_modality") is MySymbolModality)
self.assertTrue(
registry.audio_modality("my_audio_modality") is MyAudioModality)
self.assertTrue(
registry.image_modality("my_image_modality") is MyImageModality)
self.assertTrue(
registry.class_label_modality("my_class_label_modality") is
MyClassLabelModality)
def testDefaultNameLookup(self):
@registry.register_symbol_modality("default")
class
|
MyDefaultModality(modality.Modality):
pass
|
self.assertTrue(registry.symbol_modality() is MyDefaultModality)
def testList(self):
@registry.register_symbol_modality
class MySymbolModality(modality.Modality):
pass
@registry.register_audio_modality
class MyAudioModality(modality.Modality):
pass
@registry.register_image_modality
class MyImageModality(modality.Modality):
pass
@registry.register_class_label_modality
class MyClassLabelModality(modality.Modality):
pass
expected = [
"symbol:my_symbol_modality", "audio:my_audio_modality",
"image:my_image_modality", "class_label:my_class_label_modality"
]
self.assertSetEqual(set(registry.list_modalities()), set(expected))
if __name__ == "__main__":
tf.test.main()
|
matheusmonte/PythonScripts
|
Salario078.py
|
Python
|
mit
| 62
| 0.016129
|
salario
|
= float(raw_input())
print(
|
salario + (salario * 0.78))
|
TechJournalist/stackalytics
|
tests/unit/test_web_utils.py
|
Python
|
apache-2.0
| 3,954
| 0.000506
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import testtools
from dashboard import web
class TestWebUtils(testtools.TestCase):
def setUp(self):
super(TestWebUtils, self).setUp()
def test_make_commit_message(self):
message = '''
During finish_migration the manager calls initialize_connection but doesn't
update the block_device_mapping with the potentially new connection_info
returned.
Fixes bug 1076801
Change-Id: Ie49ccd2138905e178843b375a9b16c3fe572d1db'''
module = 'test'
record = {
'message': message,
'module': module,
}
expected = '''\
During finish_migration the manager calls initialize_connection but doesn't \
update the block_device_mapping with the potentially new connection_info \
returned.
Fixes bug <a href="https://bugs.launchpad.net/bugs/1076801" class="ext_link">\
1076801</a>
''' + ('Change-Id: <a href="https://review.openstack.org/#q,'
'Ie49ccd2138905e178843b375a9b16c3fe572d1db,n,z" class="ext_link">'
'Ie49ccd2138905e178843b375a9b16c3fe572d1db</a>')
observed = web.make_commit_message(record)
self.assertEqual(expected, observed,
'Commit message should be processed correctly')
def test_make_commit_message_blueprint_link(self):
message = '''
Implemented new driver for Cinder <:
Implements Blueprint super-driver
Change-Id: Ie49ccd2138905e178843b375a9b16c3fe572d1db'''
module = 'cinder'
record = {
'message': message,
'module': module,
}
expected = '''\
Implemented new driver for Cinder <:
Implements Blueprint ''' + (
'<a href="https://blueprints.launchpad.
|
net/cinder/+spec/'
'super-driver" class="ext_link">super-driver</a>' + '\n' +
'Change-Id: <a href="https://review.openstack.org/#q,'
'Ie49ccd2138905e17
|
8843b375a9b16c3fe572d1db,n,z" class="ext_link">'
'Ie49ccd2138905e178843b375a9b16c3fe572d1db</a>')
observed = web.make_commit_message(record)
self.assertEqual(expected, observed,
'Commit message should be processed correctly')
@mock.patch('dashboard.web.get_vault')
@mock.patch('dashboard.web.get_user_from_runtime_storage')
def test_make_page_title(self, user_patch, vault_patch):
memory_storage_mock = mock.Mock()
memory_storage_mock.get_original_company_name = mock.Mock(
return_value='Mirantis'
)
vault_patch.return_value = {'memory_storage': memory_storage_mock}
user_patch.return_value = {'user_name': 'John Doe'}
self.assertEqual('OpenStack community contribution in all releases',
web.make_page_title('', '', '', 'all'))
self.assertEqual('OpenStack community contribution in Havana release',
web.make_page_title('', '', '', 'Havana'))
self.assertEqual('Mirantis contribution in Havana release',
web.make_page_title('Mirantis', '', '', 'Havana'))
self.assertEqual('John Doe contribution in Havana release',
web.make_page_title('', 'john_doe', '', 'Havana'))
self.assertEqual(
'John Doe (Mirantis) contribution to neutron in Havana release',
web.make_page_title('Mirantis', 'John Doe', 'neutron', 'Havana'))
|
decvalts/landlab
|
landlab/components/stream_power/stream_power.py
|
Python
|
mit
| 17,514
| 0.010049
|
from __future__ import print_function
import numpy as np
from landlab import ModelParameterDictionary
from landlab.core.model_parameter_dictionary import MissingKeyError, ParameterValueError
from landlab.field.scalar_data_fields import FieldError
from landlab.grid.base import BAD_INDEX_VALUE
class StreamPowerEroder(object):
"""
This component is now verified stable for simple m,n specified, followup-to-
Fastscape-flow-routing cases. Threshold appears stable.
The more exciting cases (e.g., specifying a,b,c; forcing with W or Q) are
untested, but should run.
There is as yet no explicit stabilization check on the timestep. If your
run destabilizes, try reducing dt.
See, e.g., ./examples/simple_sp_driver.py
DEJH Sept 2013, major modifications Sept 14.
This component *should* run on any grid, but untested.
"""
def __init__(self, grid, params):
self.initialize(grid, params)
#This draws attention to a potential problem. It will be easy to have modules update z, but because noone "owns" the data, to forget to also update dz/dx...
#How about a built in grid utility that updates "derived" data (i.e., using only grid fns, e.g., slope, curvature) at the end of any given tstep loop?
#Or an explicit flagging system for all variables in the modelfield indicating if they have been updated this timestep. (Currently implemented)
#Or wipe the existance of any derived grid data at the end of a timestep entirely, so modules find they don't have it next timestep.
def initialize(self, grid, params_file):
'''
params_file is the name of the text file containing the parameters
needed for this stream power component.
Module erodes where channels are, implemented as
E = K * A**m * S**n - sp_crit,
and if E<0, E=0.
If 'use_W' is declared and True, the module instead implements:
E = K * A**m * S**n / W - sp_crit
***Parameters for input file***
OBLIGATORY:
K_sp -> positive float, the prefactor. This is defined per unit
time, not per tstep. Type the string 'array' to cause the
component's erode method to look for an array of values of K
(see documentation for 'erode').
ALTERNATIVES:
*either*
m_sp -> positive float, the power on A
and
n_sp -> positive float, the power on S
*or*
sp_type -> String. Must be one of 'Total', 'Unit', or 'Shear_stress'.
and (following Whipple & Tucker 1999)
a_sp -> +ve float. The power on the SP/shear term to get the erosion
rate.
b_sp -> +ve float. The power on discharge to get width, "hydraulic
geometry". Unnecessary if sp_type='Total'.
c_sp -> +ve float. The power on area to get discharge, "basin
hydology".
... If 'Total', m=a*c, n=a.
... If 'Unit', m=a*c*(1-b), n=a.
... If 'Shear_stress', m=2*a*c*(1-b)/3, n = 2*a/3.
OPTIONS:
threshold_sp -> +ve float; the threshold sp_crit. Defaults to 0.
This threshold is assumed to be in "stream power" units, i.e.,
if 'Shear_stress', the value should be tau**a.
dt -> +ve float. If set, this is the fixed timestep for this
component. Can be overridden easily as a parameter in erode().
If not set (default), this parameter MUST be set in erode().
use_W -> Bool; if True, component will look for node-centered data
describing channel width in grid.at_node['channel_width'], and
use it to implement incision ~ stream power per unit width.
Defaults to False. If you set sp_m and sp_n, follows the
equation given above. If you set sp_type, it will be ignored if
'Total', but used directly if you want 'Unit' or 'Shear_stress'.
use_Q -> Bool. If true, the equation becomes E=K*Q**m*S**n.
Effectively sets c=1 in Wh&T's 1999 derivation, if you are
setting m and n through a, b, and c.
'''
self.grid = grid
self.fraction_gradient_change = 1.
self.link_S_with_trailing_blank = np.zeros(grid.number_of_links+1) #needs to be filled with values in execution
self.count_active_links = np.zeros_like(self.link_S_with_trailing_blank, dtype=int)
self.count_active_links[:-1] = 1
inputs = ModelParameterDictionary(params_file)
try:
self._K_unit_time = inputs.read_float('K_sp')
except ParameterValueError: #it was a string
self.use_K = True
else:
self.use_K = False
try:
self.sp_crit = inputs.read_float('threshold_sp')
self.set_threshold = True #flag for sed_flux_dep_incision to see if the threshold was manually set.
print("Found a threshold to use: ", self.sp_crit)
except MissingKeyError:
self.sp_crit = 0.
self.set_threshold = False
try:
self.tstep = inputs.read_float('dt')
except MissingKeyError:
pass
try:
self.use_W = inputs.read_bool('use_W')
except MissingKeyError:
self.use_W = False
try:
self.use_Q = inputs.read_bool('use_Q')
except MissingKeyError:
self.use_Q = False
try:
self._m = inputs.read_float('m_sp')
except MissingKeyError:
self._type = inputs.read_string('sp_type')
self._a = inputs.read_float('a_sp')
try:
self._b = inputs.read_float('b_sp')
except MissingKeyError:
if self.use_W:
self._b = 0.
else:
raise NameError('b was not set')
try:
self._c = inputs.read_float('c_sp')
except MissingKeyError:
if self.use_Q:
self._c = 1.
else:
raise NameError('c was not set')
if self._type == 'Total':
self._n = self._a
self._m = self._a*self._c #==_a if use_Q
elif self._type == 'Unit':
self._n = self._a
self._m = self._a*self._c*(1.-self._b) #==_a iff use_Q&use_W etc
elif self._type == 'Shear_stress':
self._m = 2.*self._a*self._c*(1.-self._b)/3.
self._n = 2.*self._a/3.
else:
raise MissingKeyError('Not enough information was provided on the exponents to use!')
else:
self._n = inputs.read_float('n_sp')
#m and n will always be set, but care needs to be taken to include Q and W directly if appropriate
self.stream_power_erosion = grid.zeros(centering='node')
##Flags for self-building of derived data:
#self.made_link_gradients = False
##This will us the MPD once finalized
##Now perform checks for existance of needed data items:
#try:
# _ = self.grid.at_link['planet_surface__derivative_of_elevation']
#except FieldError:
# self.made_link_gradients = True
def erode(self, grid, dt, node_elevs='topographic__elevation',
node_drainage_areas='drainage_area',
flow_receiver='flow_receiver',
node_order_upstream='upstream_ID_order',
slopes_at_nodes='topographic__steepest_slope',
link_node_mapping='links_to_flow_receiver',
link_slopes=None, slopes_from_elev
|
s=None,
W_if_used=None, Q_if_used=None, K_if_used=None):
"""
A simple, explicit implementation of a stream power algorithm.
*grid* & *dt* are the grid object and timestep (floa
|
t) respectively.
*node_elevs* is the elevations on the grid, either a field string or
n
|
DTOcean/dtocean-core
|
example_data/fixed_tidal_fixed_layout_10_scenario.py
|
Python
|
gpl-3.0
| 31,371
| 0.008288
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 09 10:39:38 2015
@author: 108630
"""
import os
import numpy as np
import pandas as pd
from scipy.stats import multivariate_normal, norm
from dtocean_core.utils.moorings import get_moorings_tables
# Note that the electrical folder in the test_data directory should be
# placed in the same folder as this file
this_dir = os.path.dirname(os.path.realpath(__file__))
elec_dir = os.path.join(this_dir, "electrical")
moor_dir = os.path.join(this_dir, "moorings")
## CONSTANTS
gravity = 9.80665 #gravity
seaden = 1025.0 #sea water density
airden = 1.226 #air density
#cylinder drag coefficients
dragcoefcyl = [[0.0, 0.0, 1e-5, 1e-2],
[1e4, 1.2, 1.2, 1.15],
[2e4, 1.2, 1.2, 1.05],
[3e4, 1.2, 1.2, 0.87],
[4e4, 1.2, 1.15, 0.82],
[5e4, 1.2, 1.0, 0.8],
[6e4, 1.2, 0.9, 0.8],
[7e4, 1.2, 0.85, 0.83],
[8e4, 1.2, 0.7, 0.9],
[9e4, 1.2, 0.65, 0.94],
[1e5, 1.2, 0.6, 0.95],
[2e5, 1.2, 0.35, 1.02],
[3e5, 1.15, 0.3, 1.03],
[4e5, 0.95, 0.33, 1.05],
[5e5, 0.6, 0.35, 1.06],
[6e5, 0.35, 0.38, 1.07],
[7e5, 0.29, 0.4, 1.07],
[8e5, 0.31, 0.43, 1.08],
[9e5, 0.33, 0.45, 1.08],
[1e6, 0.35, 0.47, 1.08],
[2e6, 0.54, 0.53, 1.08],
[3e6, 0.62, 0.62, 1.08],
[4e6, 0.67, 0.67, 1.08]]
#cylinder wake amplification factors
wakeampfactorcyl = [[0.0, 2.0, 2.0],
[5.0, 0.4, 0.8],
[10.0, 0.78, 1.3],
[15.0, 1.07, 1.4],
[20.0, 1.25, 1.25],
[25.0, 1.2, 1.2],
[30.0, 1.18, 1.18],
[35.0, 1.12, 1.12],
[40.0, 1.1, 1.1],
[45.0, 1.06, 1.06],
[50.0, 1.03, 1.03],
[55.0, 1.01, 1.01],
[60.0, 1.0, 1.0]]
#rectangular section wind drag coefficients
winddragcoefrect = [[4.0, 1.2, 1.3, 1.4, 1.5, 1.6, 1.6, 1.6],
[3.0, 1.1, 1.2, 1.25, 1.35, 1.4, 1.4, 1.4],
[2.0, 1.0, 1.05, 1.1, 1.15, 1.2, 1.2, 1.2],
[1.5, 0.95, 1.0, 1.05, 1.1, 1.15, 1.15, 1.15],
[1.0, 0.9, 0.95, 1.0, 1.05, 1.1, 1.2, 1.4],
[0.6667, 0.8, 0.85, 0.9, 0.95, 1.0, 1.0, 1.0],
[0.5, 0.75, 0.75, 0.8, 0.85, 0.9, 0.9, 0.9],
[0.3333, 0.7, 0.75, 0.75, 0.75, 0.8, 0.8, 0.8],
[0.25, 0.7, 0.7, 0.75, 0.75, 0.75, 0.75, 0.75]]
#rectangular section current drag coefficients
currentdragcoefrect = [[10.0000, 1.88],
[5.0000, 1.95],
[3.3333, 2.06],
[2.5000, 2.24],
[2.0000, 2.39],
[1.6667, 2.6],
[1.4286, 2.73],
[1.2500, 2.5],
[1.1111, 2.31],
[1.0000, 2.19],
[0.9091, 2.06],
[0.8333, 1.95],
[0.7692, 1.87],
[0.7143, 1.8],
[0.6667, 1.73],
[0.6250, 1.67],
[0.5882, 1.63],
[0.5556, 1.58],
[0.5263, 1.52],
[0.5000, 1.49],
[0.4762, 1.46],
[0.4545, 1.44],
[0.4348, 1.41],
[0.4167, 1.37],
[0.4000, 1.35],
[0.3846, 1.32],
[0.3704, 1.29],
[0.3571, 1.26],
[0.3448, 1.25],
[0.3333, 1.23],
[0.3226, 1.21],
[0.3125, 1.2],
[0.3030, 1.19],
[0.2941, 1.18],
[0.2857, 1.16],
[0.2778, 1.15],
[0.2703, 1.15],
[0.2632, 1.15],
[0.2564, 1.15],
[0.2500, 1.15]]
#rectangular section wave drift coefficients
driftcoeffloatrect = [[0.0, 0.0],
[0.1, 0.02],
[0.2, 0.06],
[0.3, 0.15],
[0.4, 0.28],
[0.5, 0.44],
[0.6, 0.60],
[0.7, 0.74],
[0.8, 0.84],
[0.9, 0.91],
[1.0, 0.94],
[1.1, 0.97],
[1.2, 0.98],
[1.3, 0.99],
[1.4, 1.0],
[1.5, 1.0]]
#rectangular section wave inertia coefficients
waveinertiacoefrect = [[10.0, 2.23],
[5.0, 1.98],
[2.0, 1.7],
[1.0, 1.51],
[0.5, 1.36],
[0.2, 1.21],
[0.1, 1.14]]
## LEASE AREA
startx = 1000.
endx = 2000.
dx = 10.
numx = int(float(endx - startx) / dx) + 1
starty = 0.
endy = 2500.
dy = 10.
numy = int(float(endy - starty) / dy) + 1
x = np.linspace(startx, endx, numx)
y = np.linspace(starty, endy, numy)
nx = len(x)
|
ny = len(y)
# Bathymetry
X, Y = np.meshgrid(x,y)
Z = np.zeros(X.shape) - 50.
depths = Z.T[:, :, np.newaxis]
sediments = np.chararray((nx,ny,1), itemsize=20)
sediments[:] = "loose sand"
strata = {"values": {'depth': depths,
'sediment': sediments},
"coords": [x, y,
|
["layer 1"]]}
# Soil characteristics
max_temp = 10.
max_soil_res = 10.
target_burial_depth = 10
# Polygons
lease_area = [(startx, starty),
(endx, starty),
(endx, endy),
(startx, endy)]
#nogo_areas = [np.array([[50., 50.],[60., 50.],[60., 60.],[50., 60.]])]
nogo_areas = None
# Tidal time series
n_bins = 6
time_points = 48
t = np.linspace(0, 1, time_points)
rv = norm()
time_sin = np.sin(np.linspace(0, 4*np.pi, time_points))
time_scaled = time_sin * (1. / np.amax(time_sin))
xgrid, ygrid = np.meshgrid(x,y)
pos = np.dstack((xgrid, ygrid))
rv = multivariate_normal([500., 150.], [[max(x)*5., max(y)*2.],
[max(y)*2., max(x)*5.]])
u_max = 0.
v_max = 6.
ssh_max = 1.
TI = 0.1
grid_pdf = rv.pdf(pos).T
#u_scaled = grid_pdf * (u_max / np.amax(grid_pdf))
u_scaled = np.ones((nx, ny)) * u_max
v_scaled = np.ones((nx, ny)) * v_max
ssh_scaled = grid_pdf * (ssh_max / np.amax(grid_pdf))
u_arrays = []
v_arrays = []
ssh_arrays = []
for multiplier in time_scaled:
u_arrays.append(np.abs(u_scaled * multiplier))
v_arrays.append(np.abs(v_scaled * multiplier))
ssh_arrays.append(ssh_scaled * multiplier)
U = np.dstack(u_arrays)
V = np.dstack(v_arrays)
SSH = np.dstack(ssh_arrays)
TI = np.ones(SSH.shape) * TI
tidal_series_raw = {"values": {"U": U,
"V": V,
"SSH": SSH,
"TI": TI},
"coords": [x, y, t]}
xc = x[int(nx/2)]
yc = y[int(ny/2)]
tidal_point = (xc, yc)
# Tidal flow characteristics (hydro)
power_law_exponent = np.array([7.])
blockage_ratio = 1.
# Tidal flow characteristics (moorings)
max_10year_current = 6.
max_10year_current_dir = 0.
current_profile = "1/7 Power Law" #current profile alternatives: "Uniform"
# "1/7 Power Law"
# Wave characterists
predominant_100year_wave_dir = 0.
max_100year_hs = 0.5
max_100year_tp = 10.
max_100year_gamma = 1.
# Wind characteristics
mean_100_year_wind_speed = 2.0
mean_100_year_wind_dir = 0.0
max_100_year_gust_speed = 6.8
max_100_year_gust_dir = 0.0
# Water level characterists
|
openstack/neutron-lib
|
neutron_lib/api/definitions/segment.py
|
Python
|
apache-2.0
| 4,049
| 0
|
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import converters
from neutron_lib.api.definitions import provider_net
from neutron_lib.api.definitions import subnet
from neutron_lib import constants
from neutron_lib.db import constants as db_constants
SEGMENT_ID = 'segment_id'
NETWORK_TYPE = 'network_type'
PHYSICAL_NETWORK = 'physical_network'
SEGMENTATION_ID = 'segmentation_id'
NAME_LEN = db_constants.NAME_FIELD_SIZE
DESC_LEN = db_constants.DESCRIPTION_FIELD_SIZE
ALIAS = 'segment'
IS_SHIM_EXTENSION = False
IS_STANDARD_ATTR_EXTENSION = False
NAME = 'Segment'
API_PREFIX = ''
DESCRIPTION = 'Segments extension.'
UPDATED_TIMESTAMP = '2016-02-24T17:00:00-00:00'
RESOURCE_NAME = 'segment'
COLLECTION_NAME = RESOURCE_NAME + 's'
RESOURCE_ATTRIBUTE_MAP = {
COLLECTION_NAME: {
'id': {
'allow_post': False,
'allow_put': False,
'validate': {
'type:uuid': None
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'validate': {
'type:string': db_constants.PROJECT_ID_FIELD_SIZE
},
'is_visible': False},
'network_id': {
'allow_post': True,
'allow_put': False,
'validate': {
'type:uuid': None
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
},
PHYSICAL_NETWORK: {
'allow_post': True,
'allow_put': False,
'default': constants.ATTR_NOT_SPECIFIED,
'validate': {
'type:string': provider_net.PHYSICAL_NETWORK_MAX_LEN
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
},
NETWORK_TYPE: {
'allow_post': True,
'allow_put': False,
'validate': {
'type:string': provider_net.NETWORK_TYPE_MAX_LEN
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
},
SEGMENTATION_ID: {
'allow_post': True,
'allow_put': False,
'default': co
|
nstants.ATTR_NOT_SPECIFIED,
'convert_to': converters.convert_to_
|
int,
'is_sort_key': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'default': constants.ATTR_NOT_SPECIFIED,
'validate': {
'type:string_or_none': NAME_LEN
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
}
},
subnet.COLLECTION_NAME: {
SEGMENT_ID: {
'allow_post': True,
'allow_put': False,
'default': None,
'validate': {
'type:uuid_or_none': None
},
'is_filter': True,
'is_sort_key': True,
'is_visible': True
}
}
}
SUB_RESOURCE_ATTRIBUTE_MAP = {}
ACTION_MAP = {}
REQUIRED_EXTENSIONS = [
'standard-attr-description'
]
OPTIONAL_EXTENSIONS = [
# Use string instead of constant to avoid circulated import
'standard-attr-segment'
]
ACTION_STATUS = {}
|
wtsnjp/nlp100
|
chap07/k67.py
|
Python
|
unlicense
| 290
| 0.006897
|
#
# usage: python k67.py {alias}
#
import sys
import pymongo
def find_aliases(alias):
cn = pymongo.MongoClient().MusicBrainz.artist
return [a for a in cn.find({'aliases.name': alias})]
if __name__ == '__main__':
n = sys
|
.argv
|
[1]
for a in find_aliases(n):
print(a)
|
barseghyanartur/django-currencies
|
currencies/context_processors.py
|
Python
|
bsd-3-clause
| 450
| 0
|
from currencies.models import Currency
def currencies(request):
currencies = Currency.objects.active()
if not request.session.get('currency'):
try:
currency = Currency.objects.get(is_default__exact=True)
|
except Currency.DoesNotExist:
currency = None
request.session['currency'] = currency
return {
'CURRENCIES': currencies,
'CURRENCY': request.session['cur
|
rency']
}
|
annoviko/pyclustering
|
pyclustering/cluster/ema.py
|
Python
|
gpl-3.0
| 28,795
| 0.010662
|
"""!
@brief Cluster analysis algorithm: Expectation-Maximization Algorithm for Gaussian Mixture Model.
@details Implementation based on paper @cite article::ema::1.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
import numpy
import random
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.cluster.kmeans import kmeans
from pyclustering.utils import pi, calculate_ellipse_description, euclidean_distance_square
from enum import IntEnum
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import patches
def gaussian(data, mean, covariance):
"""!
@brief Calculates gaussian for dataset using specified mean (mathematical expectation) and variance or covariance in case
multi-dimensional data.
@param[in] data (list): Data that is used for gaussian calculation.
@param[in] mean (float|numpy.array): Mathematical expectation used for calculation.
@param[in] covariance (float|numpy.array): Variance or covariance matrix for calculation.
@return (list) Value of gaussian function for each point in dataset.
"""
dimension = float(len(data[0]))
if dimension != 1.0:
inv_variance = numpy.linalg.pinv(covariance)
else:
inv_variance = 1.0 / covariance
divider = (pi * 2.0) ** (dimension / 2.0) * numpy.sqrt(numpy.linalg.norm(covariance))
if divider != 0.0:
right_const = 1.0 / divider
else:
right_const = float('inf')
result = []
for point in data:
mean_delta = point - mean
point_gaussian = right_const * numpy.exp( -0.5 * mean_delta.dot(inv_variance).dot(numpy.transpose(mean_delta)) )
result.append(point_gaussian)
return result
class ema_init_type(IntEnum):
"""!
@brief Enumeration of initialization types for Expectation-Maximization algorithm.
"""
## Means are randomly taken from input dataset and variance or covariance is calculated based on
## spherical data that belongs to the chosen means.
RANDOM_INITIALIZATION = 0
## Two step initialization. The first is calculation of initial centers using K-Means++ method.
## The second is K-Means clustering using obtained centers in the first step. Obtained clusters
## and its centers are used for calculation of variance (covariance in case of multi-dimensional)
## data.
KMEANS_INITIALIZATION = 1
class ema_initializer():
"""!
@brief Provides services for preparing initial means and covariances for Expectation-Maximization algorithm.
@details Initialization strategy is defined by enumerator 'ema_init_type': random initialization and
kmeans with kmeans++ initialization. Here an example of initialization using kmeans strategy:
@code
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FAMOUS_SAMPLES
from pyclustering.cluster.ema import ema_initializer
sample = read_sample(FAMOUS_SAMPLES.SAMPLE_OLD_FAITHFUL)
amount_clusters = 2
initial_means, initial_covariance = ema_initializer(sample, amount_clusters).initialize()
print(initial_means)
print(initial_covariance)
@endcode
"""
__MAX_GENERATION_ATTEMPTS = 10
def __init__(self, sample, amount):
"""!
@brief Constructs EM initializer.
@param[in] sample (list): Data that will be used by the EM algorithm.
@param[in] amount (uint): Amount of clusters that should be allocated by the EM algorithm.
"""
self.__sample = sample
self.__amount = amount
def initialize(self, init_type = ema_init_type.KMEANS_INITIALIZATION):
"""!
@brief Calculates initial parameters for EM algorithm: means and covariances using
specified strategy.
@param[in] init_type (ema_init_type): Strategy for initialization.
@return (float|list, float|numpy.array) Initial means and variance (covariance matrix in case multi-dimensional data).
"""
if init_type == ema_init_type.KMEANS_INITIALIZATION:
return self.__initialize_kmeans()
elif init_type == ema_init_type.RANDOM_INITIALIZATION:
return self.__initi
|
alize_random()
raise NameError("Unknown type of EM algorithm initialization is specified.")
def __calculate_initial_clusters(self, centers):
"""!
@brief Calculate Euclidean distance to each point from the each cluster.
@brief Nearest points are captured by according clusters and as a result clusters are updated.
@return (list) update
|
d clusters as list of clusters. Each cluster contains indexes of objects from data.
"""
clusters = [[] for _ in range(len(centers))]
for index_point in range(len(self.__sample)):
index_optim, dist_optim = -1, 0.0
for index in range(len(centers)):
dist = euclidean_distance_square(self.__sample[index_point], centers[index])
if (dist < dist_optim) or (index == 0):
index_optim, dist_optim = index, dist
clusters[index_optim].append(index_point)
return clusters
def __calculate_initial_covariances(self, initial_clusters):
covariances = []
for initial_cluster in initial_clusters:
if len(initial_cluster) > 1:
cluster_sample = [self.__sample[index_point] for index_point in initial_cluster]
covariances.append(numpy.cov(cluster_sample, rowvar=False))
else:
dimension = len(self.__sample[0])
covariances.append(numpy.zeros((dimension, dimension)) + random.random() / 10.0)
return covariances
def __initialize_random(self):
initial_means = []
for _ in range(self.__amount):
mean = self.__sample[ random.randint(0, len(self.__sample)) - 1 ]
attempts = 0
while (mean in initial_means) and (attempts < ema_initializer.__MAX_GENERATION_ATTEMPTS):
mean = self.__sample[ random.randint(0, len(self.__sample)) - 1 ]
attempts += 1
if attempts == ema_initializer.__MAX_GENERATION_ATTEMPTS:
mean = [ value + (random.random() - 0.5) * value * 0.2 for value in mean ]
initial_means.append(mean)
initial_clusters = self.__calculate_initial_clusters(initial_means)
initial_covariance = self.__calculate_initial_covariances(initial_clusters)
return initial_means, initial_covariance
def __initialize_kmeans(self):
initial_centers = kmeans_plusplus_initializer(self.__sample, self.__amount).initialize()
kmeans_instance = kmeans(self.__sample, initial_centers, ccore = True)
kmeans_instance.process()
means = kmeans_instance.get_centers()
covariances = []
initial_clusters = kmeans_instance.get_clusters()
for initial_cluster in initial_clusters:
if len(initial_cluster) > 1:
cluster_sample = [ self.__sample[index_point] for index_point in initial_cluster ]
covariances.append(numpy.cov(cluster_sample, rowvar=False))
else:
dimension = len(self.__sample[0])
covariances.append(numpy.zeros((dimension, dimension)) + random.random() / 10.0)
return means, covariances
class ema_observer:
"""!
@brief Observer of EM algorithm for collecting algorithm state on each step.
|
mikekap/batchy
|
tests/batch_tests.py
|
Python
|
apache-2.0
| 4,519
| 0.002213
|
import itertools
from batchy.runloop import coro_return, runloop_coroutine
from batchy.batch_coroutine import batch_coroutine, class_batch_coroutine
from . import BaseTestCase
CALL_COUNT = 0
@batch_coroutine()
def increment(arg_lists):
def increment_single(n):
return n + 1
global CALL_COUNT
CALL_COUNT += 1
coro_return([increment_single(*ar, **kw) for ar, kw in arg_lists])
yield
@batch_coroutine(accepts_kwargs=False)
def increment_nokwargs(arg_lists):
global CALL_COUNT
CALL_COUNT += 1
coro_return(list(itertools.starmap(lambda _n: _n + 1, arg_lists)))
yield
class BatchClient(object):
def __init__(self):
self.get_call_count = 0
self.set_call_count = 0
self.run_call_count = 0
self.throw_count = 0
@class_batch_coroutine(1)
def get(self, arg_lists):
self.get_call_count += 1
yield self.run()
coro_return([0] * len(arg_lists))
@class_batch_coroutine(1)
def set(self, _):
self.set_call_count += 1
yield self.run()
@cla
|
ss_batch_coroutine(0)
def run(self, _):
self.run_call_count += 1
yield
@class_batch_coroutine(0)
def throw(self, _):
self.throw_count += 1
raise ValueError()
yield # pylint: disable-msg=W0101
@class_batch_coroutine(2)
def throw_sooner(self, _):
self.throw_count += 1
raise
|
ValueError()
yield # pylint: disable-msg=W0101
def reset(self):
self.get_call_count = self.set_call_count = self.run_call_count = self.throw_count = 0
class BatchTests(BaseTestCase):
def setup(self):
global CALL_COUNT
CALL_COUNT = 0
def test_simple_batch(self):
@runloop_coroutine()
def test():
a, b, c = yield increment(1), increment(2), increment(3)
coro_return((a, b, c))
self.assert_equals((2,3,4), test())
self.assert_equals(1, CALL_COUNT)
def test_batch_no_kwargs(self):
@runloop_coroutine()
def test():
a, b, c = yield increment_nokwargs(1), increment_nokwargs(2), increment_nokwargs(3)
coro_return((a, b, c))
self.assert_equals((2,3,4), test())
self.assert_equals(1, CALL_COUNT)
def test_multi_clients(self):
client1, client2 = BatchClient(), BatchClient()
@runloop_coroutine()
def sub_1(client):
rv = yield client.get()
yield client.set()
coro_return(rv)
@runloop_coroutine()
def sub_2(client):
rv = yield client.get()
yield client.set()
coro_return(rv)
@runloop_coroutine()
def test1():
rv = yield sub_1(client1), sub_2(client2)
coro_return(rv)
test1()
self.assert_equal(1, client1.get_call_count)
self.assert_equal(1, client1.set_call_count)
self.assert_equal(2, client1.run_call_count)
self.assert_equal(1, client2.get_call_count)
self.assert_equal(1, client2.set_call_count)
self.assert_equal(2, client2.run_call_count)
client1.reset()
client2.reset()
@runloop_coroutine()
def test2():
rv = yield sub_1(client1), sub_2(client1)
coro_return(rv)
test2()
self.assert_equal(1, client1.get_call_count)
self.assert_equal(1, client1.set_call_count)
self.assert_equal(2, client1.run_call_count)
self.assert_equal(0, client2.get_call_count)
self.assert_equal(0, client2.set_call_count)
self.assert_equal(0, client2.run_call_count)
def test_exception(self):
client = BatchClient()
@runloop_coroutine()
def action_1():
yield client.throw()
@runloop_coroutine()
def action_2():
yield client.get('a')
yield client.throw()
@runloop_coroutine()
def test():
yield action_1(), action_1(), action_2()
self.assert_raises(ValueError, test)
def test_exception_sooner(self):
client = BatchClient()
@runloop_coroutine()
def action_1():
yield client.throw_sooner()
@runloop_coroutine()
def action_2():
yield client.get('a')
yield client.throw_sooner()
@runloop_coroutine()
def test():
yield action_1(), action_1(), action_2()
self.assert_raises(ValueError, test)
|
FabienPean/sofa
|
applications/plugins/BulletCollisionDetection/examples/PrimitiveCreation.py
|
Python
|
lgpl-2.1
| 9,475
| 0.054142
|
import Sofa
import random
from cmath import *
############################################################################################
# this is a PythonScriptController example script
############################################################################################
############################################################################################
# following defs are used later in the script
############################################################################################
# utility methods
falling_speed = 0
capsule_height = 5
capsule_chain_height = 5
def createRigidCapsule(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
radius=0
if len(args)==0:
radius = random.uniform(1,3)
if len(args) <= 1:
height = random.uniform(1,3)
meca =
|
node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1)
node.createObject('TCapsuleModel',template='Rigid',name='capsule_model',radii=str(radius),heights=str(he
|
ight))
return 0
def createBulletCapsule(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
if len(args) <= 1:
height = random.uniform(1,3)
else:
height = args[1]
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1,template='Rigid')
node.createObject('RigidBulletCapsuleModel',template='Rigid',name='capsule_model',radii=str(radius),heights=str(height),margin="0.5")
return 0
def createBulletCylinder(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
if len(args) <= 1:
height = random.uniform(1,3)
else:
height = args[1]
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1,template='Rigid')
node.createObject('BulletCylinderModel',template='Rigid',name='capsule_model',radii=str(radius),heights=str(height))
return 0
def createFlexCapsule(parentNode,name,x,y,z,*args):
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
node = parentNode.createChild(name)
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
node = node.createChild('Surf')
node.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x + x_rand)+' '+str(y + y_rand)+' '+str(z + z_rand + capsule_height)+' '+str(x - x_rand)+' '+str(y - y_rand)+' '+str(z - z_rand),velocity='0 0 '+str(falling_speed))
mass = node.createObject('UniformMass',name='mass')
node.createObject('MeshTopology', name='meshTopology34',edges='0 1',drawEdges='1')
node.createObject('TCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
return 0
def createBulletFlexCapsule(parentNode,name,x,y,z,*args):
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
node = parentNode.createChild(name)
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
node = node.createChild('Surf')
node.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x + x_rand)+' '+str(y + y_rand)+' '+str(z + z_rand + capsule_height)+' '+str(x - x_rand)+' '+str(y - y_rand)+' '+str(z - z_rand),velocity='0 0 '+str(falling_speed))
mass = node.createObject('UniformMass',name='mass')
node.createObject('MeshTopology', name='meshTopology34',edges='0 1',drawEdges='1')
node.createObject('BulletCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
return 0
def createCapsuleChain(parentNode,name,length,x,y,z):
node = parentNode.createChild(name)
#radius=random.uniform(1,3)
radius=0.5
height=5
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
node = node.createChild('Surf')
ray = 3.0
t = 0.0
delta_t = 0.7
topo_edges=''
particles=''
velocities = ''
springs=''
for i in range(0,length):
particles += str(x + (ray * cos(t)).real)+' '+str(y + (ray * sin(t)).real)+' '+str(z + i*capsule_chain_height)+' '
t += delta_t
if i < length -1:
topo_edges += str(i)+' '+str(i + 1)+' '
springs += str(i)+' '+str(i + 1)+' 10 1 '+str(capsule_chain_height)+' '
velocities+='0 0 '+str(falling_speed)+' '
topo_edges += str(length - 2)+' '+str(length -1)
springs += str(length - 2)+' '+str(length -1)+' 10 1 '+str(capsule_chain_height)
node.createObject('MechanicalObject',template='Vec3d',name='falling_particles',position=particles,velocity=velocities)
node.createObject('StiffSpringForceField',template='Vec3d',name='springforcefield',stiffness='100',damping='1',spring=springs)
mass = node.createObject('UniformMass',name='mass')
node.createObject('MeshTopology', name='meshTopology34',edges=topo_edges,drawEdges='1')
node.createObject('TCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
return 0
def createOBB(parentNode,name,x,y,z,*args):
a=0
b=0
c=0
if len(args)==0:
a=random.uniform(0.5,1.5)
b=random.uniform(0.5,1.5)
c=random.uniform(0.5,1.5)
else:
a=args[0]
b=args[1]
c=args[2]
node = parentNode.createChild(name)
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1)
node.createObject('TOBBModel',template='Rigid',name='OBB_model',extents=str(a)+' '+str(b)+' '+str(c))
return 0
def createBulletOBB(parentNode,name,x,y,z,*args):
a=0
b=0
c=0
if len(args)==0:
a=random.uniform(0.5,1.5)
b=random.uniform(0.5,1.5)
c=random.uniform(0.5,1.5)
else:
a=args[0]
b=args[1]
c=args[2]
node = parentNode.createChild(name)
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1,template='Rigid')
node.createObject('BulletOBBModel',template='Rigid',name='OBB_model',extents=str(a)+' '+str(b)+' '+str(c))
return 0
def createCapsule(parentNode,name,x,y,z):
if random.randint(0,1) == 0:
createRigidCapsule(parentNode,name,x,y,z)
else:
createFlexCapsule(parentNode,name,x,y,z)
return 0
def createCapsule(parentNode,name,x,y,z):
if random.randint(0,1) == 0:
createRigidCapsule(parentNode,name,x,y,z)
else:
createFlexCapsule(parentNode,name,x,y,z)
return 0
def createSphere(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
r = 0
if len(args) == 0:
r=random.uniform(1,4)
else:
r = args[0]
#meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+
# str(z)+' 0 0 0 1')
#SurfNode = node.createChild('Surf')
node.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x)+' '+str(y)+' '+str(z),velocity='0 0 '+str(falling_speed))
node.createObject('TSphereModel',template='Vec3d',name='sphere_model',radius=str(r))
node.createObject('UniformMass',name='mass',totalMass=1)
#SurfNode.createObject('RigidMapping',template='Rigid,Vec3d',name='rigid_mapping',input='@../rigidDOF',output='@falling_particle')
return 0
def createBulletSphere(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
r = 0
if len(args) == 0:
r=random.uniform(1,4)
else:
r = args[0]
#meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+
# str(z)+' 0 0 0 1')
#SurfNode
|
skarphed/skarphed
|
core/lib/database.py
|
Python
|
agpl-3.0
| 15,700
| 0.008153
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# Skarphed is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skarphed.
# If not, see http://www.gnu.org/licenses/.
###########################################################
import re
import fdb
import time
import os
from skarphedcore.configuration import Configuration
from common.errors import DatabaseException
class Database(object):
"""
The Database-Class handles the connection to a Firebird 2.5+ Database
"""
_borgmind = {}
def __init__(self):
"""
The Database loads connectiondata to the database from the config of Core
"""
self.__dict__ = Database._borgmind
if self.__dict__ == {}:
self._connection = None
self._ip = None
self._dbname = None
self._user = None
self._password = None
self._queryCache = QueryCache()
c = Configuration()
self.set_ip(c.get_entry('db.ip'))
self.set_db_name(c.get_entry('db.name'))
self.set_user(c.get_entry('db.user'))
self.set_password(c.get_entry('db.password'))
self.connect()
def conne
|
ct(self):
"""
The class actua
|
lly connects to the database and stores the
connection in _connection
"""
if None in (self._user, self._ip, self._dbname, self._password):
raise DatabaseException(DatabaseException.get_msg(1))
#TODO: Globally Definable DB-Path
try:
self._connection = fdb.connect(
host=self._ip,
database='/var/lib/firebird/2.5/data/'+self._dbname,
user=self._user,
password=self._password,
charset="UTF8")
except fdb.fbcore.DatabaseError, e:
raise DatabaseException(e.args[0])
return
def set_ip(self, ip):
"""
trivial
"""
self._ip = str(ip)
def set_db_name(self, dbname):
"""
trivial
"""
self._dbname = str(dbname)
def set_user(self, user):
"""
trivial
"""
self._user = str(user)
def set_password(self, password):
"""
trivial
"""
self._password = str(password)
def get_connection(self):
"""
trivial
"""
return self._connection
def commit(self):
"""
commits a pending transaction to the database
"""
self._connection.commit()
def query(self, statement, args=(), module=None, forceNoCache=False, commit=False):
"""
execute a query on the database. be sure to deliver the module.
it is necessary to determine tablenames
"""
try:
mutex = Configuration().get_entry("core.webpath")+"/db.mutex"
if commit: #only if writing stuff
while os.path.exists(mutex):
time.sleep(0.000001)
os.mkdir(mutex)
if self._connection is None:
raise DatabaseException(DatabaseException.get_msg(2))
if module is not None:
statement = self._replace_module_tables(module,statement)
cur = self._connection.cursor()
try:
prepared, cur = self._queryCache(cur, statement)
cur.execute(prepared,args)
except fdb.fbcore.DatabaseError,e:
raise DatabaseException(str(e))
if commit:
self.commit()
return cur
finally:
if commit:
os.rmdir(mutex)
def _replace_module_tables(self, module, query):
"""
replaces module-based tablenames like
'de.grinhold.skarphed.news.news'
with an actual SQL-table like
'TAB_000004'
"""
tagpattern = re.compile('\$\{[A-Za-z0-9.]+\}')
matches = tagpattern.findall(query)
matches = list(set(matches)) # making matches unique
matches = map(lambda s: s[2:-1], matches)
matchesRaw = list(matches)
modules = [module.get_name()]
for match in matches:
splitted = match.split(".")
if len(splitted) > 1:
matches.append(splitted[-1])
matches.remove(match)
splitted.remove(splitted[-1])
modules.append(".".join(splitted))
tableQuery = """SELECT MDT_ID, MDT_NAME, MOD_NAME
FROM MODULETABLES
INNER JOIN MODULES ON (MDT_MOD_ID = MOD_ID )
WHERE MOD_NAME IN (%s)
AND MDT_NAME IN (%s) ;"""%("'"+"','".join(modules)+"'","'"+"','".join(matches)+"'")
cur = self._connection.cursor()
cur.execute(tableQuery)
replacementsDone = []
for res in cur.fetchallmap():
pattern = "${"+res["MOD_NAME"]+"."+res["MDT_NAME"]+"}"
tableId = str(res["MDT_ID"])
tableId = "TAB_"+"0"*(6-len(tableId))+tableId
query = query.replace(pattern, tableId)
replacementsDone.append(res["MOD_NAME"]+"."+res["MDT_NAME"])
if res["MOD_NAME"] == module.get_name():
query = query.replace("${"+res["MDT_NAME"]+"}", tableId)
if len(matchesRaw) != len(replacementsDone):
for replacement in replacementsDone:
matchesRaw.remove(replacement)
raise DatabaseException(DatabaseException.get_msg(3,str(matchesRaw)))
return query
def get_seq_next(self,sequenceId):
"""
Yields the next value of a given sequence (e.g. 'MOD_GEN')
and increments it
if the sequence contains a "$"-character, tries to resolve name of table
"""
cur = self._connection.cursor()
if sequenceId.startswith("${"):
statement = "SELECT MDT_ID FROM MODULETABLES INNER JOIN MODULES ON MOD_ID = MDT_MOD_ID WHERE MOD_NAME = ? AND MDT_NAME = ? ;"
args = tuple(sequenceId[2:-1].split("."))
cur.execute(statement, args)
res = cur.fetchone()
seqnum = str(res[0])
sequenceId = "SEQ_"+"0"*(6-len(seqnum))+seqnum
statement = "SELECT GEN_ID ( %s , 1) FROM RDB$DATABASE ;"%str(sequenceId)
cur.execute(statement)
res = cur.fetchone()
return res[0]
def get_seq_current(self,sequenceId):
"""
Yields the current value of a given sequence (e.g. 'MOD_GEN')
without incrementing it
"""
cur = self._connection.cursor()
if sequenceId.startswith("${"):
statement = "SELECT MDT_ID FROM MODULETABLES INNER JOIN MODULES ON MOD_ID = MDT_MOD_ID WHERE MOD_NAME = ? AND MDT_NAME = ? ;"
args = tuple(sequenceId[2:-1].split("."))
cur.execute(statement, args)
res = cur.fetchone()
seqnum = str(res[0])
sequenceId = "SEQ_"+"0"*(6-len(seqnum))+seqnum
statement = "SELECT GEN_ID ( %s , 0) FROM RDB$DATABASE ;"%str(sequenceId)
cur.execute(statement)
res = cur.fetchone()
return res[0]
def set_seq_to(self,sequenceId, value):
"""
Yields the current value of a given sequence (e.g. 'MOD_GEN')
without incrementing it
"""
cur = self._connection.cursor()
statement = "SET GENERATO
|
empowerhack/HealthMate
|
healthmate/services/admin.py
|
Python
|
mit
| 530
| 0
|
"""Admin functionality for services."""
from django.contrib import admin
from django.contrib.admin import site
from leaflet.admin import LeafletGeoAdmin
from .models import Service, ServiceImage
class
|
ServiceImageInline(admin.TabularInline):
"""The inline for service images."""
model = ServiceImage
extra = 3
ordering = ("order",)
class ServiceAdm
|
in(LeafletGeoAdmin, admin.ModelAdmin):
"""The class for the service admin."""
inlines = [ServiceImageInline]
site.register(Service, ServiceAdmin)
|
oceanobservatories/mi-instrument
|
mi/dataset/parser/test/test_nutnr_m_glider.py
|
Python
|
bsd-2-clause
| 7,448
| 0.003894
|
"""
@package mi.dataset.parser.test
@file mi/dataset/parser/test/test_nutnr_m_glider.py
@author Emily Hahn
@brief A test parser for the nutnr series m instrument through a glider
"""
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import SampleException, ConfigurationException, DatasetParserException
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.nutnr_m.glider.resource import RESOURCE_PATH
from mi.dataset.parser.glider import GliderParser
from mi.dataset.test.test_parser import ParserUnitTestCase
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
log = get_logger()
@attr('UNIT', group='mi')
class NutnrMGliderParserUnitTestCase(ParserUnitTestCase):
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDri
|
verConfigKeys.PARTICLE_CLASS: 'NutnrMDataParticle'
}
def test_simple(self):
"""
Test a simple case that we can par
|
se a single message
"""
with open(os.path.join(RESOURCE_PATH, 'single.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(1)
self.assert_particles(particles, "single.yml", RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_many(self):
"""
Test a simple case with more messages
"""
with open(os.path.join(RESOURCE_PATH, 'many.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(12)
# requested more than are available in file, should only be 10
self.assertEquals(len(particles), 10)
self.assert_particles(particles, "many.yml", RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_full(self):
"""
Test a full file and confirm the right number of particles is returned
"""
with open(os.path.join(RESOURCE_PATH, 'unit_514-2014-351-2-0.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(40)
# requested more than are available in file, should only be 10
self.assertEquals(len(particles), 31)
self.assertEqual(self.exception_callback_value, [])
def test_empty(self):
"""
An empty file will return a sample exception since it cannot read the header
"""
file_handle = open(os.path.join(RESOURCE_PATH, 'empty.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(1)
# requested more than are available in file, should only be 10
self.assertEquals(len(particles), 0)
def test_bad_config(self):
"""
Test that a set of bad configurations produces the expected exceptions
"""
file_handle = open(os.path.join(RESOURCE_PATH, 'single.mrg'), 'rU')
# confirm a configuration exception occurs if no config is passed in
with self.assertRaises(ConfigurationException):
GliderParser({}, file_handle, self.exception_callback)
# confirm a config missing the particle class causes an exception
bad_config = {DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider'}
with self.assertRaises(ConfigurationException):
GliderParser(bad_config, file_handle, self.exception_callback)
# confirm a config with a non existing class causes an exception
bad_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'BadDataParticle'
}
with self.assertRaises(AttributeError):
GliderParser(bad_config, file_handle, self.exception_callback)
def test_bad_headers(self):
"""
Test that a file with a short header raises a sample exception
"""
# this file does not have enough header lines
file_handle = open(os.path.join(RESOURCE_PATH, 'short_header.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
# this file specifies a number of header lines other than 14
file_handle = open(os.path.join(RESOURCE_PATH, 'bad_num_header_lines.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
# this file specifies a number of label lines other than 3
file_handle = open(os.path.join(RESOURCE_PATH, 'bad_num_label_lines.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
def test_missing_time(self):
"""
Test that a file which is missing the required m_present_time field for timestamps raises a sample exception
"""
# this file is missing the m_present_time label
file_handle = open(os.path.join(RESOURCE_PATH, 'no_time_label.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
def test_short_data(self):
"""
Test that if the number of columns in the header do not match the number of columns in the data an
exception occurs
"""
# this file is has two columns removed from the data libe
file_handle = open(os.path.join(RESOURCE_PATH, 'short_data.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
def test_bad_sensors_per_cycle(self):
"""
Test that if the number of sensors per cycle from the header does not match that in the header that an
exception in the callback occurs, but processing continues
"""
with open(os.path.join(RESOURCE_PATH, 'bad_sensors_per_cycle.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(1)
self.assert_particles(particles, "single.yml", RESOURCE_PATH)
self.assertEqual(len(self.exception_callback_value), 1)
self.assertIsInstance(self.exception_callback_value[0], SampleException)
def test_short_units(self):
"""
Test that if the number of label columns does not match the units number of columns an exception occurs
"""
# this file is has two columns removed from the data libe
file_handle = open(os.path.join(RESOURCE_PATH, 'short_units.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
|
muscatmat/vmi-event-naive-detector
|
scripts/check_creds.py
|
Python
|
mit
| 972
| 0.012346
|
#!/usr/bin/python
# Import System Required Paths
import sys
sys.path.append('/usr/local/src/volatility-master')
# Import Volalatility
import volatility.conf as conf
import volatility.registry as registry
registry.PluginImporter()
config = conf.ConfObject()
import volatility.commands as commands
import volatility.addrspace as addrspace
registry.register_global_options(config, commands.Command)
regis
|
try.register_global_options(config, addrspace.BaseAddressS
|
pace)
config.parse_options()
config.PROFILE="LinuxDebian31604x64"
config.LOCATION = "vmi://debian-hvm"
# Other imports
import time
# Retrieve check creds plugin
import volatility.plugins.linux.check_creds as fopPlugin
fopData = fopPlugin.linux_check_creds(config)
invalid_fop_start_time = time.time()
for msg in fopData.calculate():
print "***Processes are sharing credential structures***"
print msg
dir(msg)
print("--- Check creds Time Taken: %s seconds ---" % (time.time() - invalid_fop_start_time))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.