repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
vinoth3v/In
|
In/vakai/page/load_more.py
|
Python
|
apache-2.0
| 2,553
| 0.061888
|
def action_vakai_load_more(context, action, parent_entity_bundle, last_id, parent_entity_id, **args):
try:
parent_entity_type = 'Vakai'
parent_entity_id = int(parent_entity_id)
last_id = int(last_id)
output = Object()
db = IN.db
connection = db.connection
# TODO: paging
# get total
total = 0
limit = 10
# TODO: make it dynamic
cursor = db.execute('''SELECT
count(field_vakai_parent.value)
FROM
field.field_vakai_parent
JOIN
config.vakai ON field_vakai_parent.entity_id = vakai.id
WHERE
vakai.type = %(parent_entity_bundle)s AND
field_vakai_parent.value = %(parent_id)s AND
vakai.id < %(last_id)s AND
vakai.status != 0
''', {
'parent_entity_bundle' : parent_entity_bundle,
'parent_id' : parent_entity_id,
'last_id' : last_id,
})
if cursor.rowcount >= 0:
total = int(cursor.fetchone()[0])
more_id = '_'.join(('more-vakais', parent_entity_type, str(parent_entity_id)))
if total > 0:
cursor = db.execute('''SELECT
field_vakai_parent.entity_type,
field_vakai_parent.entity_id,
field_vakai_parent.value,
vakai.weight
FROM
field.field_vakai_parent
JOIN
config.vakai ON field_vakai_parent.entity_id = vakai.id
WHERE
vakai.type = %(parent_entity_bundle)s AND
field_vakai_parent.value = %(parent_id)s AND
vakai.id < %(last_id)s AND
vakai.status != 0
ORDER BY
vakai.weight
LIMIT %(limit)s
''', {
'parent_entity_bundle' : parent_entity_bundle,
'parent_id' : parent_entity_id,
'last_id' : last_id,
'limit' : limit,
})
ids = []
last_id = 0
if cursor.rowcount >= 0:
for row in cursor:
# reverse reference
ids.append(row['entity_id'])
last_id = ids[-1] # last id
vakais = IN.entitier.load_multiple('Vakai', ids)
for id, vakai in vakais.items():
obj = ThemeArgs(vakai, {'view_mode' : 'adminlist'})
output.add(obj)
remaining = total - limit
if remaining >
|
0 and la
|
st_id > 0:
output.add('TextDiv', {
'id' : more_id,
'value' : str(remaining) + ' more...',
'css' : ['ajax i-text-center pointer i-panel-box i-panel-box-primary'],
'attributes' : {
'data-href' : ''.join(('/vakai/more/!', str(parent_entity_bundle), '/', str(last_id), '/', str(parent_entity_id)))
},
'weight' : -1
})
output = {more_id : output}
context.response = In.core.response.PartialResponse(output = output)
except:
IN.logger.debug()
|
KevinOConnor/klipper
|
klippy/webhooks.py
|
Python
|
gpl-3.0
| 20,782
| 0.001203
|
# Klippy WebHooks registration and server connection
#
# Copyright (C) 2020 Eric Callahan <arksine.code@gmail.com>
#
# This file may be distributed under the terms of the GNU GPLv3 license
import logging, socket, os, sys, errno, json, collections
import gcode
REQUEST_LOG_SIZE = 20
# Json decodes strings as unicode types in Python 2.x. This doesn't
# play well with some parts of Klipper (particuarly displays), so we
# need to create an object hook. This solution borrowed from:
#
# https://stackoverflow.com/questions/956867/
#
def byteify(data, ignore_dicts=False):
if isinstance(data, unicode):
return data.encode('utf-8')
if isinstance(data, list):
return [byteify(i, True) for i in data]
if isinstance(data, dict) and not ignore_dicts:
return {byteify(k, True): byteify(v, True)
for k, v in data.items()}
return data
class WebRequestError(gcode.CommandError):
def __init__(self, message,):
Exception.__init__(self, message)
def to_dict(self):
return {
'error': 'WebRequestError',
'message': str(self)}
class Sentinel:
pass
class WebRequest:
error = WebRequestError
def __init__(self, client_conn, request):
self.client_conn = client_conn
base_request = json.loads(request, object_hook=byteify)
if type(base_request) != dict:
raise ValueError("Not a top-level dictionary")
self.id = base_request.get('id', None)
self.method = base_request.get('method')
self.params = base_request.get('params', {})
if type(self.method) != str or type(self.params) != dict:
raise ValueError("Invalid request type")
self.response = None
self.is_error = False
def get_client_connection(self):
return self.client_conn
def get(self, item, default=Sentinel, types=None):
value = self.params.get(item, default)
if value is Sentinel:
raise WebRequestError("Missing Argument [%s]" % (item,))
if (types is not None and type(value) not in types
and item in self.params):
raise WebRequestError("Invalid Argument Type [%s]" % (item,))
return value
def get_str(self, item, default=Sentinel):
return self.get(item, default, types=(str,))
def get_int(self, item, default=Sentinel):
return self.get(item, default, types=(int,))
def get_float(self, item, default=Sentinel):
return float(self.get(item, default, types=(int, float)))
def get_dict(self, item, default=Sentinel):
return self.get(item, default, types=(dict,))
def get_method(self):
return self.method
def set_error(self, error):
self.is_error = True
self.response = error.to_dict()
def send(self, data):
if self.response is not None:
raise WebRequestError("Multiple calls to send not allowed")
self.response = data
def finish(self):
if self.id is None:
return None
rtype = "result"
if self.is_error:
rtype = "error"
if self.response is None:
# No error was set and the user never executed
# send, default response is {}
self.response = {}
return {"id": self.id, rtype: self.response}
class ServerSocket:
def __init__(self, webhooks, printer):
self.printer = printer
self.webhooks = webhooks
self.reactor = printer.get_reactor()
self.sock = self.fd_handle = None
self.clients = {}
start_args = printer.get_start_args()
server_address = start_args.get('apiserver')
is_fileinput = (start_args.get('debuginput') is not None)
if not server_address or is_fileinput:
# Do not enable server
return
self._remove_socket_file(server_address)
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.setblocking(0)
self.sock.bind(server_address)
self.sock.listen(1)
self.fd_handle = self.reactor.register_fd(
self.sock.fileno(), self._handle_accept)
printer.register_event_handler(
'klippy:disconnect', self._handle_disconnect)
printer.register_event_handler(
"klippy:shutdown", self._handle_shutdown)
def _handle_accept(self, eventtime):
try:
sock, addr = self.sock.accept()
except socket.error:
return
sock.setblocking(0)
client = ClientConnection(self, sock)
self.clients[client.uid] = client
def _handle_disconnect(self):
for client in list(self.clients.values()):
client.close()
if self.sock is not None:
self.reactor.unregister_fd(self.fd_handle)
try:
self.sock.close()
except socket.error:
pass
def _handle_shutdown(self):
for client in self.clients.values():
client.dump_request_log()
def _remove_socket_file(self, file_path):
try:
os.remove(file_path)
except OSError:
if os.path.exists(file_path):
logging.exception(
"webhooks: Unable to delete socket file '%s'"
% (file_path))
raise
def pop_client(self, client_id):
self.clients.pop(client_id, None)
class ClientConnection:
def __init__(self, server, sock):
self.printer = server.printer
self.webhooks = server.webhooks
self.reactor = server.reactor
self.server = server
self.uid = id(self)
self.sock = sock
self.fd_handle = self.reactor.register_fd(
self.sock.fileno(), self.process_received)
self.partial_data = self.send_buffer = ""
self.is_sending_data = False
self.set_client_info("?", "New connection")
self.request_log = collections.deque([], REQUEST_LOG_SIZE)
def dump_request_log(self):
out = []
out.append("Dumping %d requests for client %d"
% (len(self.request_log), self.uid,))
for eventti
|
me, request in self.request_log:
out.append("Received %f: %s" % (eventtime, request))
logging.info("\n".join(out))
def set_client_info(self, client_info, state_msg=None):
if state_msg is None:
state_msg = "Client info %s" % (repr(client_info),)
logging.info("webhooks client %s: %s", self.uid, state_msg)
log_id = "webhooks %s" % (self.uid,)
if client_info is None:
self.printer.set_rollover_info(log_id, None,
|
log=False)
return
rollover_msg = "webhooks client %s: %s" % (self.uid, repr(client_info))
self.printer.set_rollover_info(log_id, rollover_msg, log=False)
def close(self):
if self.fd_handle is None:
return
self.set_client_info(None, "Disconnected")
self.reactor.unregister_fd(self.fd_handle)
self.fd_handle = None
try:
self.sock.close()
except socket.error:
pass
self.server.pop_client(self.uid)
def is_closed(self):
return self.fd_handle is None
def process_received(self, eventtime):
try:
data = self.sock.recv(4096)
except socket.error as e:
# If bad file descriptor allow connection to be
# closed by the data check
if e.errno == errno.EBADF:
data = ''
else:
return
if data == '':
# Socket Closed
self.close()
return
requests = data.split('\x03')
requests[0] = self.partial_data + requests[0]
self.partial_data = requests.pop()
for req in requests:
self.request_log.append((eventtime, req))
try:
web_request = WebRequest(self, req)
except Exception:
logging.exception("webhooks: Error decoding Server Request %s"
% (req))
continue
|
vipul-sharma20/fossevents.in
|
tests/frontend/test_page_contents.py
|
Python
|
mit
| 869
| 0.002301
|
# -*- coding: utf-8 -*-
import pytest
from django.core.urlresolvers import reverse
pytestmark = pytest.mark.django_db
global_footer_links = [
'About',
'Developers',
'Privacy',
'Report an issue',
]
def assert_title_and_links_on_page(browser, url, title, links_text):
browser.visit(url)
assert title in browser.title
for link_text in links_text:
assert browser.find_link_by_text(link_text)
def
|
test_homepage(browser):
url = reverse('pages:home')
assert_title_and_links_on_page
|
(browser, url, "FossEvents", global_footer_links)
def test_about_page(browser):
url = reverse('pages:about')
assert_title_and_links_on_page(browser, url, "About", global_footer_links)
def test_privacy_page(browser):
url = reverse('pages:privacy')
assert_title_and_links_on_page(browser, url, "Privacy", global_footer_links)
|
3dfxsoftware/cbss-addons
|
mass_mailing/controllers/main.py
|
Python
|
gpl-2.0
| 2,342
| 0.005978
|
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
class MassMailController(http.Controller):
@http.route('/mail/track/<int:mail_id>/blank.gif', type='http', auth='none')
def track_mail_open(self, mail_id, **post):
""" Email tracking. """
mail_mail_stats = request.registry.get('mail.mail.statistics')
mail_mail_stats.set_opened(request.cr, SUPERUSER_ID, mail_mail_ids=[mail_id])
response = werkzeug.wrappers.Response()
response.mimetype = 'image/gif'
response.data = 'R0lGODlhAQABAIAAANvf7wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='.decode('base64')
return response
@http.route(['/mail/mailing/<int:mailing_id>/unsubscribe'], type='http', auth='none')
def mailing(self, mailing_id, email=None, res_id=None, **post):
cr, uid, context = request.cr, request.uid, request.context
MassMailing = request.registry['mail.mass_mailing']
mailing_ids = MassMailing.exists(cr, SUPERUSER_ID, [mailing_id], context=context)
if not mailing_ids:
return 'KO'
mailing = MassMailing.browse(cr, SUPERUSER_ID, mailing_ids[0], context=context)
if mailing.mailing_model == 'mail.mass_mailing.contact':
list_ids = [l.id for l in mailing.contact_list_ids]
record_ids = request.registry[mailing.mailing_model].search(cr, SUPERUSER_ID, [('list_id', 'in', list_ids), ('id', '=', res_id), ('email', 'ilike', email)], context=context)
request.registry[mailing.mailing_model].write(cr, SUPERUSER_ID, record_ids, {'opt_out': True}, context=context)
else:
email_fname = None
if 'email_from' in request.registry[mailing.mailing_model]._all_columns:
email_fname = 'email_from'
elif 'email' in request.registry[mailing.mailing_model]._all_columns:
email_fname = 'email'
if email_fname:
record_ids = request.registry[mailing.mailing_model].search(cr, SUPERUSER_ID, [('id', '=', res_
|
id), (email_fname, 'ilike', email)], context=context)
if 'opt_out' in request.registry[mailing.mailing_model]._all_columns:
request.registry[mailing.mailing_model].write(
|
cr, SUPERUSER_ID, record_ids, {'opt_out': True}, context=context)
return 'OK'
|
neuroo/equip
|
examples/sample-test-program/test_module/mistune.py
|
Python
|
apache-2.0
| 31,391
| 0
|
# coding: utf-8
"""
mistune
~~~~~~~
The fastest markdown parser in pure Python with renderer feature.
:copyright: (c) 2014 by Hsiaoming Yang.
"""
import re
import inspect
__version__ = '0.4.1'
__author__ = 'Hsiaoming Yang <me@lepture.com>'
__all__ = [
'BlockGrammar', 'BlockLexer',
'InlineGrammar', 'InlineLexer',
'Renderer', 'Markdown',
'markdown', 'escape',
]
def _pure_pattern(regex):
pattern = regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
return pattern
_key_pattern = re.compile(r'\s+')
def _keyify(key):
return _key_pattern.sub(' ', key.lower())
_escape_pattern = re.compile(r'&(?!#?\w+;)')
def escape(text, quote=False, smart_amp=True):
"""Replace special characters "&", "<" and ">" to HTML-safe sequences.
The original cgi.escape will always escape "&", but you can control
this one for a smart escape amp.
:param quote: if set to True, " and ' will be escaped.
:param smart_amp: if set to False, & will always be escaped.
"""
if smart_amp:
text = _escape_pattern.sub('&', text)
else:
text = text.replace('&', '&')
text = text.replace('<', '<')
text = text.replace('>', '>')
if quote:
text = text.replace('"', '"')
text = text.replace("'", ''')
return text
def preprocessing(text, tab=4):
text = re.sub(r'\r\n|\r', '\n', text)
text = text.replace('\t', ' ' * tab)
text = text.replace('\u00a0', ' ')
text = text.replace('\u2424', '\n')
pattern = re.compile(r'^ +$', re.M)
return pattern.sub('', text)
class BlockGrammar(object):
"""Grammars for block level tokens."""
_tag = (
r'(?!(?:'
r'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|'
r'var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|'
r'span|br|wbr|
|
ins|del|img)\b)\w+(?!:/|[^\w\s@]*@)\b'
)
def_links = re.compile(
r'^ *\[([^^\]]+)\]: *' # [key]:
r'<?([^\s>]+)>?' # <link> or link
r'(?: +["(]([^\n]+)[")])? *(?:\n+|$)'
)
def_footnotes = re.compile(
r'^\[\^([^\]]+)\]: *('
r'[^\n]*(?:\n+|$)' # [^key]:
r'(?: {1,}[^\n]*(?:\n+|$))*'
r')'
)
newline =
|
re.compile(r'^\n+')
block_code = re.compile(r'^( {4}[^\n]+\n*)+')
fences = re.compile(
r'^ *(`{3,}|~{3,}) *(\S+)? *\n' # ```lang
r'([\s\S]+?)\s*'
r'\1 *(?:\n+|$)' # ```
)
hrule = re.compile(r'^(?: *[-*_]){3,} *(?:\n+|$)')
heading = re.compile(r'^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)')
lheading = re.compile(r'^([^\n]+)\n *(=|-)+ *(?:\n+|$)')
block_quote = re.compile(r'^( *>[^\n]+(\n[^\n]+)*\n*)+')
list_block = re.compile(
r'^( *)([*+-]|\d+\.) [\s\S]+?'
r'(?:'
r'\n+(?=\1?(?:[-*_] *){3,}(?:\n+|$))' # hrule
r'|\n+(?=%s)' # def links
r'|\n+(?=%s)' # def footnotes
r'|\n{2,}'
r'(?! )'
r'(?!\1(?:[*+-]|\d+\.) )\n*'
r'|'
r'\s*$)' % (
_pure_pattern(def_links),
_pure_pattern(def_footnotes),
)
)
list_item = re.compile(
r'^(( *)(?:[*+-]|\d+\.) [^\n]*'
r'(?:\n(?!\2(?:[*+-]|\d+\.) )[^\n]*)*)',
flags=re.M
)
list_bullet = re.compile(r'^ *(?:[*+-]|\d+\.) +')
paragraph = re.compile(
r'^((?:[^\n]+\n?(?!'
r'%s|%s|%s|%s|%s|%s|%s|%s|%s'
r'))+)\n*' % (
_pure_pattern(fences).replace(r'\1', r'\2'),
_pure_pattern(list_block).replace(r'\1', r'\3'),
_pure_pattern(hrule),
_pure_pattern(heading),
_pure_pattern(lheading),
_pure_pattern(block_quote),
_pure_pattern(def_links),
_pure_pattern(def_footnotes),
'<' + _tag,
)
)
block_html = re.compile(
r'^ *(?:%s|%s|%s) *(?:\n{2,}|\s*$)' % (
r'<!--[\s\S]*?-->',
r'<(%s)[\s\S]+?<\/\1>' % _tag,
r'''<%s(?:"[^"]*"|'[^']*'|[^'">])*?>''' % _tag,
)
)
table = re.compile(
r'^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*'
)
nptable = re.compile(
r'^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*'
)
text = re.compile(r'^[^\n]+')
class BlockLexer(object):
"""Block level lexer for block grammars."""
default_features = [
'newline', 'block_code', 'fences', 'heading',
'nptable', 'lheading', 'hrule', 'block_quote',
'list_block', 'block_html', 'def_links',
'def_footnotes', 'table', 'paragraph', 'text'
]
list_features = (
'newline', 'block_code', 'fences', 'lheading', 'hrule',
'block_quote', 'list_block', 'block_html', 'text',
)
footnote_features = (
'newline', 'block_code', 'fences', 'heading',
'nptable', 'lheading', 'hrule', 'block_quote',
'list_block', 'block_html', 'table', 'paragraph', 'text'
)
def __init__(self, rules=None, **kwargs):
self.tokens = []
self.def_links = {}
self.def_footnotes = {}
if not rules:
rules = BlockGrammar()
self.rules = rules
def __call__(self, text, features=None):
return self.parse(text, features)
def parse(self, text, features=None):
text = text.rstrip('\n')
if not features:
features = self.default_features
def manipulate(text):
for key in features:
rule = getattr(self.rules, key)
m = rule.match(text)
if not m:
continue
getattr(self, 'parse_%s' % key)(m)
return m
return False
while text:
m = manipulate(text)
if m is not False:
text = text[len(m.group(0)):]
continue
if text:
raise RuntimeError('Infinite loop at: %s' % text)
return self.tokens
def parse_newline(self, m):
length = len(m.group(0))
if length > 1:
self.tokens.append({'type': 'space'})
def parse_block_code(self, m):
code = m.group(0)
pattern = re.compile(r'^ {4}', re.M)
code = pattern.sub('', code)
self.tokens.append({
'type': 'code',
'lang': None,
'text': code,
})
def parse_fences(self, m):
self.tokens.append({
'type': 'code',
'lang': m.group(2),
'text': m.group(3),
})
def parse_heading(self, m):
self.tokens.append({
'type': 'heading',
'level': len(m.group(1)),
'text': m.group(2),
})
def parse_lheading(self, m):
"""Parse setext heading."""
self.tokens.append({
'type': 'heading',
'level': 1 if m.group(2) == '=' else 2,
'text': m.group(1),
})
def parse_hrule(self, m):
self.tokens.append({'type': 'hrule'})
def parse_list_block(self, m):
bull = m.group(2)
self.tokens.append({
'type': 'list_start',
'ordered': '.' in bull,
})
cap = m.group(0)
self._process_list_item(cap, bull)
self.tokens.append({'type': 'list_end'})
def _process_list_item(self, cap, bull):
cap = self.rules.list_item.findall(cap)
_next = False
length = len(cap)
for i in range(length):
item = cap[i][0]
# remove the bullet
space = len(item)
item = self.rules.list_bullet.sub('', item)
# outdent
if '\n ' in item:
space = space - len(item)
pattern = re.compile(r'^ {1,%d}' % space, flags=re.M)
item = pattern.sub('', item)
# determin whether item is loose or not
loose = _next
if not loose and re.search(r'\n\n(?!\s*$)', item):
loose = True
if i != length - 1:
_next = item[len(item)-1] == '\n'
if not loose:
loose =
|
unicef-zambia/zambia-ureport
|
docs/conf.py
|
Python
|
bsd-3-clause
| 7,753
| 0.007739
|
# -*- coding: utf-8 -*-
#
# zambiaureport documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'zambiaureport'
copyright = u'2014, Andre Lesa'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'zambiaureportdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'zambiaureport.tex', u'zambiaureport Documentation',
u'Andre Lesa', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts =
|
False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = Tr
|
ue
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zambiaureport', u'zambiaureport Documentation',
[u'Andre Lesa'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'zambiaureport', u'zambiaureport Documentation',
u'Andre Lesa', 'zambiaureport',
'Zambia U-Report reference implementation.','Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
dskoda1/hpds
|
hpds/trees/__init__.py
|
Python
|
bsd-2-clause
| 49
| 0
|
from .binary_search_tree
|
import
|
BinarySearchTree
|
evolvIQ/railgun
|
setup.py
|
Python
|
apache-2.0
| 990
| 0.017189
|
# -*- coding: utf-8 -*
from distutils.core import setup
import os
PACKAGE_NAME = "railgun"
def recurse(d):
ret = []
for f in os.listdir(d):
if f.startswith("."): continue
df = os.path.join(d, f)
if os.path.isfile(df):
ret.append(df)
elif f != "build":
ret += recurse(df)
return ret
def structure(fs):
s = {}
for f in fs:
d = os.path.dirname(f)
if not d.startswith("meta/"): cont
|
inue
d = PACKAGE_NAME + d[4:]
v = s.get(d, [])
s[d] = v
v.append(f)
return s.items()
setup(name='docker-railgun',
version='0.1',
description='Self-organizing Docker-based container building and provisioning',
author='Rickard Petzäll',
author_email='rickard@evolviq.
|
com',
url='https://github.com/evolvIQ/railgun',
packages=[PACKAGE_NAME, "%s.host_providers" % PACKAGE_NAME],
scripts=['bin/railgun'],
data_files=structure(recurse("meta"))
)
|
freedesktop-unofficial-mirror/gstreamer__cerbero
|
cerbero/utils/__init__.py
|
Python
|
lgpl-2.1
| 10,438
| 0.000958
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import shutil
import sys
try:
import sysconfig
except:
from distutils import sysconfig
try:
import xml.etree.cElementTree as etree
except ImportError:
from lxml import etree
import gettext
import platform as pplatform
import re
from cerbero.enums import Platform, Architecture, Distro, DistroVersion
from cerbero.errors import FatalError
from cerbero.utils impo
|
rt messages as m
_ = gettext.gettext
N_ = lambda x: x
class ArgparseArgument(object):
def __init__(self, *name, **kwargs):
self.name = name
|
self.args = kwargs
def add_to_parser(self, parser):
parser.add_argument(*self.name, **self.args)
def user_is_root():
''' Check if the user running the process is root '''
return hasattr(os, 'getuid') and os.getuid() == 0
def determine_num_of_cpus():
''' Number of virtual or physical CPUs on this system '''
# Python 2.6+
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
return 1
def to_winpath(path):
if path.startswith('/'):
path = '%s:%s' % (path[1], path[2:])
return path.replace('/', '\\')
def to_unixpath(path):
if path[1] == ':':
path = '/%s%s' % (path[0], path[2:])
return path
def to_winepath(path):
path = path.replace('/', '\\\\')
# wine maps the filesystem root '/' to 'z:\'
path = 'z:\\%s' % path
return path
def fix_winpath(path):
return path.replace('\\', '/')
def system_info():
'''
Get the sysem information.
Return a tuple with the platform type, the architecture and the
distribution
'''
# Get the platform info
platform = sys.platform
if platform.startswith('win'):
platform = Platform.WINDOWS
elif platform.startswith('darwin'):
platform = Platform.DARWIN
elif platform.startswith('linux'):
platform = Platform.LINUX
else:
raise FatalError(_("Platform %s not supported") % platform)
# Get the architecture info
if platform == Platform.WINDOWS:
platform_str = sysconfig.get_platform()
if platform_str in ['win-amd64', 'win-ia64']:
arch = Architecture.X86_64
else:
arch = Architecture.X86
else:
uname = os.uname()
arch = uname[4]
if arch == 'x86_64':
arch = Architecture.X86_64
elif arch.endswith('86'):
arch = Architecture.X86
else:
raise FatalError(_("Architecture %s not supported") % arch)
# Get the distro info
if platform == Platform.LINUX:
d = pplatform.linux_distribution()
if d[0] in ['Ubuntu', 'debian', 'LinuxMint']:
distro = Distro.DEBIAN
if d[2] in ['maverick', 'isadora']:
distro_version = DistroVersion.UBUNTU_MAVERICK
elif d[2] in ['lucid', 'julia']:
distro_version = DistroVersion.UBUNTU_LUCID
elif d[2] in ['natty', 'katya']:
distro_version = DistroVersion.UBUNTU_NATTY
elif d[2] in ['oneiric', 'lisa']:
distro_version = DistroVersion.UBUNTU_ONEIRIC
elif d[2] in ['precise', 'maya']:
distro_version = DistroVersion.UBUNTU_PRECISE
elif d[2] in ['quantal', 'nadia']:
distro_version = DistroVersion.UBUNTU_QUANTAL
elif d[2] in ['raring', 'olivia']:
distro_version = DistroVersion.UBUNTU_RARING
elif d[2] in ['saucy', 'petra']:
distro_version = DistroVersion.UBUNTU_SAUCY
elif d[2] in ['trusty', 'qiana']:
distro_version = DistroVersion.UBUNTU_TRUSTY
elif d[2] in ['utopic']:
distro_version = DistroVersion.UBUNTU_UTOPIC
elif d[1].startswith('6.'):
distro_version = DistroVersion.DEBIAN_SQUEEZE
elif d[1].startswith('7.') or d[1].startswith('wheezy'):
distro_version = DistroVersion.DEBIAN_WHEEZY
elif d[1].startswith('8.') or d[1].startswith('jessie'):
distro_version = DistroVersion.DEBIAN_JESSIE
else:
raise FatalError("Distribution '%s' not supported" % str(d))
elif d[0] in ['RedHat', 'Fedora', 'CentOS', 'Red Hat Enterprise Linux Server', 'CentOS Linux']:
distro = Distro.REDHAT
if d[1] == '16':
distro_version = DistroVersion.FEDORA_16
elif d[1] == '17':
distro_version = DistroVersion.FEDORA_17
elif d[1] == '18':
distro_version = DistroVersion.FEDORA_18
elif d[1] == '19':
distro_version = DistroVersion.FEDORA_19
elif d[1] == '20':
distro_version = DistroVersion.FEDORA_20
elif d[1] == '21':
distro_version = DistroVersion.FEDORA_21
elif d[1] == '22':
distro_version = DistroVersion.FEDORA_22
elif d[1].startswith('6.'):
distro_version = DistroVersion.REDHAT_6
elif d[1].startswith('7.'):
distro_version = DistroVersion.REDHAT_7
else:
# FIXME Fill this
raise FatalError("Distribution '%s' not supported" % str(d))
elif d[0].strip() in ['openSUSE']:
distro = Distro.SUSE
if d[1] == '12.1':
distro_version = DistroVersion.OPENSUSE_12_1
elif d[1] == '12.2':
distro_version = DistroVersion.OPENSUSE_12_2
elif d[1] == '12.3':
distro_version = DistroVersion.OPENSUSE_12_3
else:
# FIXME Fill this
raise FatalError("Distribution OpenSuse '%s' "
"not supported" % str(d))
else:
raise FatalError("Distribution '%s' not supported" % str(d))
elif platform == Platform.WINDOWS:
distro = Distro.WINDOWS
win32_ver = pplatform.win32_ver()[0]
dmap = {'xp': DistroVersion.WINDOWS_XP,
'vista': DistroVersion.WINDOWS_VISTA,
'7': DistroVersion.WINDOWS_7,
'post2008Server': DistroVersion.WINDOWS_8,
'8': DistroVersion.WINDOWS_8}
if win32_ver in dmap:
distro_version = dmap[win32_ver]
else:
raise FatalError("Windows version '%s' not supported" % win32_ver)
elif platform == Platform.DARWIN:
distro = Distro.OS_X
ver = pplatform.mac_ver()[0]
if ver.startswith('10.10'):
distro_version = DistroVersion.OS_X_YOSEMITE
elif ver.startswith('10.9'):
distro_version = DistroVersion.OS_X_MAVERICKS
elif ver.startswith('10.8'):
distro_version = DistroVersion.OS_X_MOUNTAIN_LION
else:
raise FatalError("Mac version %s not supported" % ver)
num_of_cpus = determine_num_of_cpus()
return platform, arch, distro, distro_version, num_of_cpus
def validate_packager(packager):
# match packager in the form 'Name <email>'
expr = r'(.*\s)*[<]([a-zA-Z0-9+_\-\.]+@'\
'[0-9a-zA-Z][.-0-9a-zA-Z]*.[a-zA-Z]+)[>]$'
return bool(re.match(ex
|
masschallenge/django-accelerator
|
accelerator/tests/test_program_partner_type.py
|
Python
|
mit
| 423
| 0
|
from __future_
|
_ import unicode_literals
from django.test import TestCase
from accelerator.tests.factories import ProgramPartnerTypeFactory
class TestProgramPartnerType(TestCase):
def test_str(self):
program_partner_type = ProgramPartnerTypeFactory()
assert program_partner_type.partner_type in str(program_partner_type)
assert program_partner_type.program.name in str(program_partner_
|
type)
|
omnitrogen/enigma
|
gui/enigma-gui-resizable-window.py
|
Python
|
mit
| 11,660
| 0.038346
|
#!/usr/bin/python3
from tkinter import *
from tkinter.messagebox import *
fenetre = Tk()
fenetre.title("The Enigma Machine")
fenetre.configure(background='white')
fenetre.geometry("550x800")
class AutoScrollbar(Scrollbar):
#create a 'responsive' scrollbar
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
self.tk.call("grid", "remove", self)
else:
self.grid()
Scrollbar.set(self, lo, hi)
def pack(self, **kw):
raise(TclError, "can't pack this widget")
def place(self, **kw):
raise(TclError, "can't grid this widget")
vscrollbar = AutoScrollbar(fenetre)
vscrollbar.grid(row=0, column=1, sticky=N+S)
hscrollbar = AutoScrollbar(fenetre, orient=HORIZONTAL)
hscrollbar.grid(row=1, column=0, sticky=E+W)
canvas = Canvas(fenetre,
yscrollcommand=vscrollbar.set,
xscrollcommand=hscrollbar.set)
canvas.grid(row=0, column=0, sticky=N+S+E+W)
vscrollbar.config(command=canvas.yview)
hscrollbar.config(command=canvas.xview)
# make the canvas expandable
fenetre.grid_rowconfigure(0
|
, weight=1)
fenetre.grid_columnconfigure(0, weight=1)
#
# create canvas contents
frame = Frame(canvas, background="white", borderwidth=0)
#enigma_image
image = PhotoImage(file="enigma.gif")
Label(frame, image=image, background="white", borderwidth=0).pack(padx=10, pady=10, side=TO
|
P)
#help_button
def help():
showinfo("The Enigma Machine Quick Start", "Hello World!\nThis is a quick tutorial on how to use this app!\nFirst, you need to choose the order of the rotors.\nThen you need to set the rotors' position\nYou can finally write your message and encrypt it by pressing the Return key!\nThat's it, you've just encrypt your first enigma message!\n Have fun!")
helpButton = Button(frame, text ="Help! Quick Start", command = help, background="white")
helpButton.pack(padx=5, pady=5)
#spinboxes_choose_rotors
frameRotor = Frame(frame, background='white')
var4 = StringVar()
spinbox = Spinbox(frameRotor, values = ("rotor1=[J,G,D,Q,O,X,U,S,C,A,M,I,F,R,V,T,P,N,E,W,K,B,L,Z,Y,H]",
"rotor2=[N,T,Z,P,S,F,B,O,K,M,W,R,C,J,D,I,V,L,A,E,Y,U,X,H,G,Q]",
"rotor3=[J,V,I,U,B,H,T,C,D,Y,A,K,E,Q,Z,P,O,S,G,X,N,R,M,W,F,L]"), textvariable=var4, width=44)
var4.set("rotor1=[J,G,D,Q,O,X,U,S,C,A,M,I,F,R,V,T,P,N,E,W,K,B,L,Z,Y,H]")
spinbox.grid(row=0, column=1)
var5 = StringVar()
spinbox = Spinbox(frameRotor, values = ("rotor1=[J,G,D,Q,O,X,U,S,C,A,M,I,F,R,V,T,P,N,E,W,K,B,L,Z,Y,H]",
"rotor2=[N,T,Z,P,S,F,B,O,K,M,W,R,C,J,D,I,V,L,A,E,Y,U,X,H,G,Q]",
"rotor3=[J,V,I,U,B,H,T,C,D,Y,A,K,E,Q,Z,P,O,S,G,X,N,R,M,W,F,L]"), textvariable=var5, width=44)
var5.set("rotor2=[N,T,Z,P,S,F,B,O,K,M,W,R,C,J,D,I,V,L,A,E,Y,U,X,H,G,Q]")
spinbox.grid(row=1, column=1)
var6 = StringVar()
spinbox = Spinbox(frameRotor, values = ("rotor1=[J,G,D,Q,O,X,U,S,C,A,M,I,F,R,V,T,P,N,E,W,K,B,L,Z,Y,H]",
"rotor2=[N,T,Z,P,S,F,B,O,K,M,W,R,C,J,D,I,V,L,A,E,Y,U,X,H,G,Q]",
"rotor3=[J,V,I,U,B,H,T,C,D,Y,A,K,E,Q,Z,P,O,S,G,X,N,R,M,W,F,L]"), textvariable=var6, width=44)
var6.set("rotor3=[J,V,I,U,B,H,T,C,D,Y,A,K,E,Q,Z,P,O,S,G,X,N,R,M,W,F,L]")
spinbox.grid(row=2, column=1)
var7 = StringVar()
spinbox = Spinbox(frameRotor, values = ("reflec=[Y,R,U,H,Q,S,L,D,P,X,N,G,O,K,M,I,E,B,F,Z,C,W,V,J,A,T]"), textvariable=var7, width=44)
var7.set("reflec=[Y,R,U,H,Q,S,L,D,P,X,N,G,O,K,M,I,E,B,F,Z,C,W,V,J,A,T]")
spinbox.grid(row=3, column=1)
rotorn1 = Label(frameRotor, text='Slot n°=1:', padx=10, pady=5, background="white")
rotorn1.grid(row=0, column=0)
rotorn2 = Label(frameRotor, text='Slot n°=2:', padx=10, pady=5, background="white")
rotorn2.grid(row=1, column=0)
rotorn3 = Label(frameRotor, text='Slot n°=3:', padx=10, pady=5, background="white")
rotorn3.grid(row=2, column=0)
reflectorn = Label(frameRotor, text='Reflector:', padx=10, pady=5, background="white")
reflectorn.grid(row=3, column=0)
frameRotor.pack()
#frame_to_set_rotor_position
frame1 = Frame(frame, borderwidth=0, relief=FLAT, background='white')
frame1.pack(side=TOP, padx=10, pady=10)
def update1(x):
x = int(x)
alphabetList = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
lab1.configure(text='position : {}'.format(alphabetList[x-1]))
def update2(x):
x = int(x)
alphabetList = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
lab2.configure(text='position : {}'.format(alphabetList[x-1]))
def update3(x):
x = int(x)
alphabetList = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
lab3.configure(text='position : {}'.format(alphabetList[x-1]))
rotor1lab = Label(frame1, text='Rotor 1', padx=10, pady=5, background="white")
rotor1lab.grid(row=0, column=0)
rotor2lab = Label(frame1, text='Rotor 2', padx=10, pady=5, background="white")
rotor2lab.grid(row=0, column=1)
rotor3lab = Label(frame1, text='Rotor 3', padx=10, pady=5, background="white")
rotor3lab.grid(row=0, column=2)
#scales_choose_position
var1 = DoubleVar()
scale = Scale(frame1, from_=1, to=26, variable = var1, cursor='dot', showvalue=0, command=update1, length= 100, background="white")
scale.grid(row=1, column=0, padx=60, pady=10)
var2 = DoubleVar()
scale = Scale(frame1, from_=1, to=26, variable = var2, cursor='dot', showvalue=0, command=update2, length= 100, background="white")
scale.grid(row=1, column=1, padx=60, pady=10)
var3 = DoubleVar()
scale = Scale(frame1, from_=1, to=26, variable = var3, cursor='dot', showvalue=0, command=update3, length= 100, background="white")
scale.grid(row=1, column=2, padx=60, pady=10)
lab1 = Label(frame1, background="white")
lab1.grid(row=2, column=0)
lab2 = Label(frame1, background="white")
lab2.grid(row=2, column=1)
lab3 = Label(frame1, background="white")
lab3.grid(row=2, column=2)
#function_code
def code(event=None):
a = int(var1.get())
b = int(var2.get())
c = int(var3.get())
def rotationRotor(liste1):
liste1.append(liste1[0])
del liste1[0]
return liste1
def estValide(liste1):
if liste1 == []:
return False
for elt in liste1:
if alphabetList.count(elt.upper()) < 1:
return False
return True
sortie = entryvar.get()
var4str = var4.get()
var4list = list(var4str)
var5str = var5.get()
var5list = list(var5str)
var6str = var6.get()
var6list = list(var6str)
if var4list[5] == '1':
rotor1 = ['J','G','D','Q','O','X','U','S','C','A','M','I','F','R','V','T','P','N','E','W','K','B','L','Z','Y','H']
elif var4list[5] == '2':
rotor1 = ['N','T','Z','P','S','F','B','O','K','M','W','R','C','J','D','I','V','L','A','E','Y','U','X','H','G','Q']
elif var4list[5] == '3':
rotor1 = ['J','V','I','U','B','H','T','C','D','Y','A','K','E','Q','Z','P','O','S','G','X','N','R','M','W','F','L']
if var5list[5] == '1':
rotor2 = ['J','G','D','Q','O','X','U','S','C','A','M','I','F','R','V','T','P','N','E','W','K','B','L','Z','Y','H']
elif var5list[5] == '2':
rotor2 = ['N','T','Z','P','S','F','B','O','K','M','W','R','C','J','D','I','V','L','A','E','Y','U','X','H','G','Q']
elif var5list[5] == '3':
rotor2 = ['J','V','I','U','B','H','T','C','D','Y','A','K','E','Q','Z','P','O','S','G','X','N','R','M','W','F','L']
if var6list[5] == '1':
rotor3 = ['J','G','D','Q','O','X','U','S','C','A','M','I','F','R','V','T','P','N','E','W','K','B','L','Z','Y','H']
elif var6list[5] == '2':
rotor3 = ['N','T','Z','P','S','F','B','O','K','M','W','R','C','J','D','I','V','L','A','E','Y','U','X','H','G','Q']
elif var6list[5] == '3':
rotor3 = ['J','V','I','U','B','H','T','C','D','Y','A','K','E','Q','Z','P','O','S','G','X','N','R','M','W','F','L']
alphabetList = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',' ']
alphabetDict = {'G': 7, 'U': 21, 'T': 20, 'L': 12, 'Y': 25, 'Q': 17, 'V': 22, 'J': 10, 'O': 15, 'W': 23, 'N': 14, 'R': 18, 'Z': 26, 'S': 19, 'X': 24, 'A': 1, 'M': 13, 'E': 5, 'D': 4, 'I': 9, 'F': 6, 'P': 16, 'B': 2,
|
yast/yast-python-bindings
|
examples/Frame1.py
|
Python
|
gpl-2.0
| 340
| 0.017647
|
# encoding: utf-8
from yast import import_module
import_module('UI')
from
|
yast import *
class Frame1Client:
def main(self):
UI.
|
OpenDialog(
VBox(
Frame("Hey! I&mportant!", Label("Hello, World!")),
PushButton("&OK")
)
)
UI.UserInput()
UI.CloseDialog()
Frame1Client().main()
|
davrv93/creed-en-sus-profetas-backend
|
django_rv_apps/apps/believe_his_prophets_api/views/book/filters.py
|
Python
|
apache-2.0
| 550
| 0.001818
|
import django_filters
from django_filters
|
import rest_framework as filters
from django_rv_apps.apps.believe_his_prophets.models.book import Book
from django_rv_apps.apps.believe_his_prophets.models.bible_read import BibleRead
from django_rv_apps.apps.believe_his_prophets.models.testament import Testament
class BookFilter(django_filters.FilterSet):
testament = filters.ModelChoiceFilter(
|
queryset=Testament.objects.all())
class Meta:
model = Book
fields = ('id', 'testament',
'book_order')
|
ewindisch/nova
|
nova/tests/api/openstack/compute/contrib/test_aggregates.py
|
Python
|
apache-2.0
| 20,787
| 0.000625
|
# Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the aggregates admin api."""
from webob import exc
from nova.api.openstack.compute.contrib import aggregates
from nova import context
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import matchers
AGGREGATE_LIST = [
{"name": "aggregate1", "id": "1", "availability_zone": "nova1"},
{"name": "aggregate2", "id": "2", "availability_zone": "nova1"},
{"name": "aggregate3", "id": "3", "availability_zone": "nova2"},
{"name": "aggregate1", "id": "4", "availability_zone": "nova1"}]
AGGREGATE = {"name": "aggregate1",
"id": "1",
"availability_zone": "nova1",
"metadata": {"foo": "bar"},
"hosts": ["host1, host2"]}
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
class AggregateTestCase(test.NoDBTestCase):
"""Test Case for aggregates admin api."""
def setUp(self):
super(AggregateTestCase, self).setUp()
self.controller = aggregates.AggregateController()
self.req = FakeRequest()
self.user_req = fakes.HTTPRequest.blank('/v2/os-aggregates')
self.context = self.req.environ['nova.context']
def test_index(self):
def stub_list_aggregates(context):
if context is None:
raise Exception()
return AGGREGATE_LIST
self.stubs.Set(self.controller.api, 'get_aggregate_list',
stub_list_aggregates)
result = self.controller.index(self.req)
self.assertEqual(AGGREGATE_LIST, result["aggregates"])
def test_index_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index,
self.user_req)
def test_create(self):
def stub_create_aggregate(context, name, availability_zone):
self.assertEqual(context, self.context, "context")
self.assertEqual("test", name, "name")
self.assertEqual("nova1", availability_zone, "availability_zone")
return AGGREGATE
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
result = self.controller.create(self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
self.assertEqual(AGGREGATE, result["aggregate"])
def test_create_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create, self.user_req,
{"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_duplicate_aggregate_name(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.AggregateNameExists(aggregate_name=name)
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller.create,
self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_incorrect_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.InvalidAggregateAction(action='create_aggregate',
aggregate_id="'N/A'",
reason='invalid zone')
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exception.InvalidAggregateAction,
self.controller.create,
self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova_bad"}})
def test_create_with_no_aggregate(se
|
lf):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"foo":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_no_name(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"foo": "test",
|
"availability_zone": "nova1"}})
def test_create_with_no_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
self.assertEqual(context, self.context, "context")
self.assertEqual("test", name, "name")
self.assertIsNone(availability_zone, "availability_zone")
return AGGREGATE
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
result = self.controller.create(self.req,
{"aggregate": {"name": "test"}})
self.assertEqual(AGGREGATE, result["aggregate"])
def test_create_with_null_name(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"name": "",
"availability_zone": "nova1"}})
def test_create_with_name_too_long(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"name": "x" * 256,
"availability_zone": "nova1"}})
def test_create_with_extra_invalid_arg(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, dict(name="test",
availability_zone="nova1",
foo='bar'))
def test_show(self):
def stub_get_aggregate(context, id):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", id, "id")
return AGGREGATE
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
aggregate = self.controller.show(self.req, "1")
self.assertEqual(AGGREGATE, aggregate["aggregate"])
def test_show_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show,
self.user_req, "1")
def test_show_with_invalid_id(self):
def stub_get_aggregate(context, id):
raise exception.AggregateNotFound(aggregate_id=2)
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
self.assertRaises(exc.HTTPNotFound,
self.controller.show, self.req, "2")
def test_update(self):
body = {"aggregate": {"name": "new_name",
"availability_zone": "nova1"}}
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual(body["aggrega
|
omelkonian/cds
|
cds/modules/deposit/receivers.py
|
Python
|
gpl-2.0
| 2,693
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and im
|
munities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Deposit API."""
from __future__ import absolute_import, print_function
from flask import current_app
from invenio_pidstore.models import PersistentIdentifier
from invenio_indexer.api import RecordIndexer
from invenio_deposit.receivers import \
index_deposit_aft
|
er_publish as original_index_deposit_after_publish
from invenio_jsonschemas import current_jsonschemas
from .api import Project
from .tasks import datacite_register
def index_deposit_after_publish(sender, action=None, pid=None, deposit=None):
"""Index the record after publishing."""
project_schema = current_jsonschemas.path_to_url(Project._schema)
if deposit['$schema'] == project_schema:
if action == 'publish':
# index videos (records)
pid_values = Project(data=deposit).video_ids
ids = [str(p.object_uuid)
for p in PersistentIdentifier.query.filter(
PersistentIdentifier.pid_value.in_(pid_values)).all()]
# index project (record)
_, record = deposit.fetch_published()
ids.append(str(record.id))
RecordIndexer().bulk_index(iter(ids))
else:
original_index_deposit_after_publish(sender=sender, action=action,
pid=pid, deposit=deposit)
def datacite_register_after_publish(sender, action=None, pid=None,
deposit=None):
"""Mind DOI with DataCite after the deposit has been published."""
if action == "publish" and \
current_app.config['DEPOSIT_DATACITE_MINTING_ENABLED']:
recid_pid, record = deposit.fetch_published()
datacite_register.delay(recid_pid.pid_value, str(record.id))
|
bmi-forum/bmi-pyre
|
pythia-0.8/packages/pyre/pyre/inventory/Configurable.py
|
Python
|
gpl-2.0
| 9,858
| 0.006898
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.parsing.locators.Traceable import Traceable
class Configurable(Traceable):
# lifecycle management
def init(self):
"""load user input, initialize my subcomponents and call the custom initialization hook"""
# initialize my subcomponents
self.inventory.init()
# perform any last initializations
self._init()
return
def fini(self):
"""call the custom finalization hook and then shut down my subcomponents"""
self._fini()
self.inventory.fini()
return
# configuration management
def retrieveConfiguration(self, registry=None):
"""place my current configuration in the given registry"""
if registry is None:
registry = self.createRegistry()
return self.inventory.retrieveConfiguration(registry)
def initializeConfiguration(self):
"""initialize my private registry using my private settings"""
return self.inventory.initializeConfiguration()
def loadConfiguration(self, filename):
"""open the given filename and retrieve registry settings for me"""
return self.inventory.loadConfiguration(filename)
def updateConfiguration(self, registry):
"""load the user setting in <registry> into my inventory"""
return self.inventory.updateConfiguration(registry)
def applyConfiguration(self):
"""transfer user settings to my inventory"""
# apply user settings to my properties
up, uc = self.configureProperties()
unknownProperties = up
unknownComponents = uc
# apply user settings to my components
up, uc = self.configureComponents()
unknownProperties += up
unknownComponents += uc
# give descendants a chance to adjust to configuration changes
self._configure()
return (unknownProperties, unknownComponents)
def configureProperties(self):
"""set the values of all the properties and facilities in my inventory"""
up, uc = self.inventory.configureProperties()
return self._claim(up, uc)
def configureComponents(self):
"""guide my subcomponents through the configuration process"""
up, uc = self.inventory.configureComponents()
return self._claim(up, uc)
def getDepositories(self):
return self.inventory.getDepositories()
# single component management
def retrieveComponent(self, name, factory, args=(), encoding='odb', vault=[], extras=[]):
"""retrieve component <name> from the persistent store"""
return self.inventory.retrieveComponent(name, factory, args, encoding, vault, extras)
def configureComponent(self, component, registry=None):
"""guide <component> through the configuration process"""
up, uc = self.inventory.configureComponent(component, registry)
return up, uc
# curator accessors
def getCurator(self):
"""return my persistent store manager"""
return self.inventory.getCurator()
def setCurator(self, curator):
"""set my persistent store manager"""
return self.inventory.setCurator(curator)
# accessors for the inventory items by category
def properties(self):
"""return a list of all the property objects in my inventory"""
return self.inventory.properties()
def facilities(self):
"""return a list of all the facility objects in my inventory"""
return self.inventory.facilities()
def components(self):
"""return a list of all the components in my inventory"""
return self.inventory.components()
# access to trait values and descriptors by name
# used by clients that obtain a listing of these names
# and want to access the underlying objects
def getTraitValue(self, name):
try:
return self.inventory.getTraitValue(name)
except KeyError:
pass
raise AttributeError("object '%s' of type '%s' has no attribute '%s'" % (
self.name, self.__class__.__name__, name))
def getTraitDescriptor(self, name):
try:
return self.inventory.getTraitDescriptor(name)
except KeyError:
pass
raise Attr
|
ibuteError("object '%s' of type '%s' has no attribute '%s'" % (
self.name, self.__class__.__name__, name))
# support for the help facility
def showProperties(self):
"""print a report describing my properties"""
|
facilityNames = self.inventory.facilityNames()
propertyNames = self.inventory.propertyNames()
propertyNames.sort()
print "properties of %r:" % self.name
for name in propertyNames:
if name in facilityNames:
continue
# get the trait object
trait = self.inventory.getTrait(name)
# get the common trait attributes
traitType = trait.type
default = trait.default
meta = trait.meta
validator = trait.validator
try:
tip = meta['tip']
except KeyError:
tip = '(no documentation available)'
# get the trait descriptor from the instance
descriptor = self.inventory.getTraitDescriptor(name)
# extract the instance specific values
value = descriptor.value
locator = descriptor.locator
print " %s=<%s>: %s" % (name, traitType, tip)
print " default value: %r" % default
print " current value: %r, from %s" % (value, locator)
if validator:
print " validator: %s" % validator
return
def showComponents(self):
facilityNames = self.inventory.facilityNames()
facilityNames.sort()
print "facilities of %r:" % self.name
for name in facilityNames:
# get the facility object
facility = self.inventory.getTrait(name)
meta = facility.meta
try:
tip = meta['tip']
except KeyError:
tip = '(no documentation available)'
# get the trait descriptor from the instance
descriptor = self.inventory.getTraitDescriptor(name)
# extract the instance specific values
value = descriptor.value
locator = descriptor.locator
print " %s=<component name>: %s" % (name, tip)
print " current value: %r, from %s" % (value.name, locator)
print " configurable as: %s" % ", ".join(value.aliases)
return
def showUsage(self):
"""print a high level usage screen"""
propertyNames = self.inventory.propertyNames()
propertyNames.sort()
facilityNames = self.inventory.facilityNames()
facilityNames.sort()
print "component %r" % self.name
if propertyNames:
print " properties:", ", ".join(propertyNames)
if facilityNames:
print " facilities:", ",".join(facilityNames)
print "For more information:"
print " --help-properties: prints details about user settable properties"
print " --help-components: prints details about user settable facilities and components"
return
def showCurator(self):
"""print a description of the manager of my persistence store"""
self.inventory.dumpCurator()
return
# default implementations of the various factories
def createRegistry(self, name=None):
"""create a registry instance to store my configuration"""
if
|
vertexproject/synapse
|
synapse/tests/test_model_gov_us.py
|
Python
|
apache-2.0
| 1,328
| 0.000753
|
import synapse.tests.utils as s_t_utils
class UsGovTest(s_t_utils.SynTest):
async def test_models_usgov_cage(self):
async with self.getTestCore() as core:
input_props = {
'street': '123 Main St',
'city': 'Smallville',
'state': 'Kansas',
'zip': 12345,
'cc': 'US',
'country': 'United States of America',
'phone0': '17035551212',
'phone1': 17035551213,
'name0': 'Kent Labs',
}
expected_props = {
'street': '123 main st',
'city': 'smallville',
'state': 'kansas',
'zip': 12345,
'cc': 'us',
'country': 'united states of america',
'phone0': '17035551212',
'phone1'
|
: '17035551213',
'name0': 'kent labs',
}
formname = 'gov:us:cage'
valu = '7qe71'
expected_ndef = (formname, valu)
async with await core.snap() as snap:
n0 = await snap.addNode(formname, valu.upper(), input_props)
self.eq(n0.ndef, expected_nde
|
f)
for prop, valu in expected_props.items():
self.eq(n0.get(prop), valu)
|
AAAI-DISIM-UnivAQ/ASP_DALI
|
LindaProxy/proxyLinda.py
|
Python
|
apache-2.0
| 10,523
| 0.002946
|
'''
PyDALI proxyLinda module to encapsulate DALI agent communication
in the ASP solver case study
Licensed with Apache Public License
by AAAI Research Group
Department of Information Engineering and Computer Science and Mathematics
University of L'Aquila, ITALY
http://www.disim.univaq.it
'''
__author__ = 'AAAI-DISIM@UnivAQ'
from aspsolver import AspSolver
import threading
from lin import *
import socket
import json
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import os
import time
import select
import tornado.platform.twisted
tornado.platform.twisted.install()
from twisted.internet import protocol, reactor
AG_COORDINATOR = 'agCoordinator'
AG_METAPLANNER = 'agMetaPlanner'
# localmachine = socket.gethostname().lower()
localmachine = 'localhost'
sock = socket.socket()
sock.connect((localmachine, 3010))
# root = '.' + os.sep + os.path.dirname(__file__) + os.sep + 'web'
root = './web'
print 'myroot:', root
system_connection = {}
TMAX = 100 # secondi
def createmessage(sender, destination, typefunc, message):
m = "message(%s:3010,%s,%s:3010,%s,italian,[],%s(%s,%s))" % (localmachine, destination,
localmachine, sender,
typefunc, message, sender)
return m
system_connection = {}
class WSHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def sendConsoleMessage(self, message):
console = {}
console['type'] = 'console'
console['identifier'] = self.identifier
console['message'] = message
self.write_message(json.dumps(console))
def sendPath(self, message):
console = {}
console['type'] = 'path'
console['identifier'] = self.identifier
console['message'] = message
self.write_message(json.dumps(console))
def open(self):
print 'new connection'
self.identifier = str(int(time.time()))
system_connection[self.identifier] = self
m = createmessage('user', AG_COORDINATOR, 'send_message', "new_connection(%s)" % self.identifier)
wrm = write_message(m)
sock.send(wrm)
self.sendConsoleMessage('System Ready')
def on_message(self, message):
print message
jsonmessage = json.loads(message)
# print jsonmessage
#
# m = createmessage(jsonmessage['sender'], jsonmessage['destination'], jsonmessage['typefunc'], jsonmessage['message'])
# print m
# wrm = write_message(m)
# print 'message received %s' % message
# print wrm
# sock.send(wrm)
# self.write_message(wrm)
def on_close(self):
print 'connection closed'
system_connection.pop(self.identifier)
class MainHandler(tornado.web.RequestHandler):
def get(self):
try:
with open(os.path.join(root, 'knight' + os.sep + 'index.html')) as f:
self.write(f.read())
except IOError as e:
self.write("404: Not Found")
class PlanHandler(tornado.web.RequestHandler):
def prepare(self):
if self.request.headers["Content-Type"].startswith("application/json"):
self.json_args = json.loads(self.request.body)
else:
self.json_args = None
def post(self):
identifier = self.json_args.get('identifier')
forbidden = self.json_args.get('forbidden')
mandatory = self.json_args.get('mandatory')
size = self.json_args.get('size')
f = open('dlvprogram/instance.dl', 'w')
f.write('size(%s). ' % size)
for forb in forbidden:
f.write("forbidden(%s,%s). " % (forb.get('x'), forb.get('y')))
for mark in mandatory:
f.write("must_reach(%s,%s). " % (mark.get('x'), mark.get('y')))
f.close()
m = "instanceReady(%s, %s)" % (size, len(forbidden))
m = createmessage('user', AG_COORDINATOR, 'send_message', m)
wrm = write_message(m)
sock.send(wrm)
time.sleep(0.2)
for forb in forbidden:
mess = "forbidden_of_problem([%s,%s])" % (forb.get('x'), forb.get('y'))
m = createmessage('user', AG_COORDINATOR, 'send_message', mess)
wrm = write_message(m)
sock.send(wrm)
time.sleep(0.2)
system_connection[identifier].sendConsoleMessage('Request sent to system')
class ResetHandler(tornado.web.RequestHandler):
def prepare(self):
if self.request.headers["Content-Type"].startswith("application/json"):
self.json_args = json.loads(self.request.body)
else:
self.json_args = None
def post(self):
identifier = self.json_args.get('identifier')
m = createmessage('user', AG_COORDINATOR, 'send_message', "new_connection(%s)" % identifier)
wrm = write_message(m)
sock.send(wrm)
application = tornado.web.Application([
(r'/ws', WSHandler),
(r"/", MainHandler),
(r"/api/plan", PlanHandler),
(r"/api/reset", ResetHandler),
(r"/(.*)", tornado.web.StaticFileHandler, dict(path=root)),
])
temporaryresult = None
class DALI(protocol.Protocol):
def notifyFailure(self):
message = 'problem_failed(%s)' % self.currentproblem
m = createmessage('user', AG_METAPLANNER, 'send_message', message)
wrm = write_message(m)
sock.send(wrm)
def checkPlan(self):
if not self.planner.is_alive():
|
print 'DLV ended.'
try:
self.planner.readresult()
global temporaryresult
temporaryresult = self.planner.getresult()
if self.currentproblem == 1:
system_connection[self.identifier].sendConsoleMessage(
'Hamiltonian Tour Problem has found a solution')
elif self.currentproblem == 2:
system_connecti
|
on[self.identifier].sendConsoleMessage('Weak Constraint Problem has found a solution')
elif self.currentproblem == 3:
system_connection[self.identifier].sendConsoleMessage('With Blank Problem has found a solution')
message = 'new_moves_for_evaluate(%s)' % len(temporaryresult)
m = createmessage('user', AG_METAPLANNER, 'send_message', message)
wrm = write_message(m)
sock.send(wrm)
system_connection[self.identifier].sendConsoleMessage('Plan sent to MAS')
except:
self.notifyFailure()
else:
print 'DLV is alive'
dt = time.time() - self.t0
print dt, 'secs elapsed'
if dt > TMAX:
self.planner.terminate()
print 'DLV terminated'
self.notifyFailure()
threading.Timer(1, self.checkPlan).start()
def makePlan(self, problem):
path = "dlvprogram" + os.sep + "problem%s.dl" % problem
self.currentproblem = problem
self.planner = AspSolver("dlvprogram" + os.sep + "instance.dl", path)
self.planner.run()
self.t0 = time.time()
time.sleep(5)
threading.Timer(1, self.checkPlan).start()
def dataReceived(self, data):
# print 'data', data
fs = data.split('_.._')
identifier = fs[1]
self.identifier = identifier
if len(fs) > 3:
cmd = fs[2]
if cmd == 'path':
strJSONPath = fs[3]
print strJSONPath
system_connection[identifier].sendPath(strJSONPath)
elif cmd == 'as':
state = fs[3]
system_connection[identifier].sendConsoleMessage('State of agent: ' + str(state))
elif len(fs) > 2:
cmd = fs[2]
if cmd ==
|
folkrav/rts-b51
|
src/projectX/bgui/text_input.py
|
Python
|
gpl-3.0
| 15,841
| 0.031816
|
"""
This module defines the following constants:
*InputText options*
* BGUI_INPUT_NONE = 0
* BGUI_INPUT_SELECT_ALL = 1
* BGUI_INPUT_DEFAULT = BGUI_INPUT_NONE
"""
from .widget import Widget, WeakMethod, BGUI_DEFAULT, BGUI_CENTERY, \
BGUI_NO_FOCUS, BGUI_MOUSE_ACTIVE, BGUI_MOUSE_CLICK, BGUI_MOUSE_RELEASE, \
BGUI_NO_NORMALIZE
from .key_defs import *
from .label import Label
from .frame import Frame
import time
# InputText options
BGUI_INPUT_NONE = 0
BGUI_INPUT_SELECT_ALL = 1
BGUI_INPUT_DEFAULT = BGUI_INPUT_NONE
class TextInput(Widget):
"""Widget for getting text input"""
theme_section = 'TextInput'
theme_options = {
'TextColor': (1, 1, 1, 1),
'FrameColor': (0, 0, 0, 0),
'BorderSize': 0,
'BorderColor': (0, 0, 0, 0),
'HighlightColor': (0.6, 0.6, 0.6, 0.5),
'InactiveTextColor': (1, 1, 1, 1),
'InactiveFrameColor': (0, 0, 0, 0),
'InactiveBorderSize': 0,
'InactiveBorderColor': (0, 0, 0, 0),
'InactiveHighlightColor': (0.6, 0.6, 0.6, 0.5),
'LabelSubTheme': '',
}
def __init__(self, parent, name=None, text="", prefix="", font=None, pt_size=None, color=None,
aspect=None, size=[1, 1], pos=[0, 0], sub_theme='', input_options=BGUI_INPUT_DEFAULT, options=BGUI_DEFAULT):
"""
:param parent: the widget's parent
:param name: the name of the widget
:param text: the text to display (this can be changed later via the text property)
:param prefix: prefix text displayed before user input, ca
|
nnot be edited by
|
user (this can be changed later via the prefix property)
:param font: the font to use
:param pt_size: the point size of the text to draw
:param color: color of the font for this widget
:param aspect: constrain the widget size to a specified aspect ratio
:param size: a tuple containing the width and height
:param pos: a tuple containing the x and y position
:param sub_theme: name of a sub_theme defined in the theme file (similar to CSS classes)
:param options: various other options
"""
Widget.__init__(self, parent, name, aspect, size, pos, sub_theme, options)
self.text_prefix = prefix
self.pos = len(text)
self.input_options = input_options
self.colors = {}
#create widgets
self.frame = Frame(self, size=[1, 1], options=BGUI_NO_FOCUS | BGUI_DEFAULT | BGUI_CENTERY)
self.highlight = Frame(self, size=self.frame.size, border=0, options=BGUI_NO_FOCUS | BGUI_CENTERY | BGUI_NO_NORMALIZE)
self.cursor = Frame(self, size=[1, 1], border=0, options=BGUI_NO_FOCUS | BGUI_CENTERY | BGUI_NO_NORMALIZE)
self.label = Label(self, text=text, font=font, pt_size=pt_size, sub_theme=self.theme['LabelSubTheme'], options=BGUI_NO_FOCUS | BGUI_DEFAULT)
#Color and setting initialization
self.colormode = 0
theme = self.theme
self.colors["text"] = [None, None]
self.colors["text"][0] = theme['InactiveTextColor']
self.colors["text"][1] = theme['TextColor']
self.colors["frame"] = [None, None]
self.colors["frame"][0] = theme['InactiveFrameColor']
self.colors["frame"][1] = theme['FrameColor']
self.colors["border"] = [None, None]
self.colors["border"][0] = theme['InactiveBorderColor']
self.colors["border"][1] = theme['BorderColor']
self.colors["highlight"] = [None, None]
self.colors["highlight"][0] = theme['HighlightColor']
self.colors["highlight"][1] = theme['HighlightColor']
self.border_size = [None, None]
self.border_size[0] = theme['InactiveBorderSize']
self.border_size[1] = theme['BorderSize']
self.swapcolors(0)
#gauge height of the drawn font
fd = self.system.textlib.dimensions(self.label.fontid, "Egj/}|^,")
py = .5 - (fd[1] / self.size[1] / 2)
px = fd[1] / self.size[0] - fd[1] / 1.5 / self.size[0]
self.label.position = [px, py]
self.fd = self.system.textlib.dimensions(self.label.fontid, self.text_prefix)[0] + fd[1] / 3.2
self.frame.size = [1, 1]
self.frame.position = [0, 0]
self.slice = [len(text), len(text)]
self.slice_direction = 0
self.mouse_slice_start = 0
self.mouse_slice_end = 0
#create the char width list
self._update_char_widths()
#initial call to update_selection
self.selection_refresh = 1
self.just_activated = 0
self._active = 0 # internal active state to avoid confusion from parent active chain
#blinking cursor
self.time = time.time()
#double/triple click functionality
self.click_counter = 0
self.single_click_time = 0.0
self.double_click_time = 0.0
# On Enter callback
self._on_enter_key = None
@property
def text(self):
return self.label.text
@text.setter
def text(self, value):
#setter intended for external access, internal changes can just change self.label.text
self.label.text = value
self._update_char_widths()
self.slice = [0, 0]
self.update_selection()
@property
def prefix(self):
return self.text_prefix
@prefix.setter
def prefix(self, value):
self.fd = self.system.textlib.dimensions(self.label.fontid, value)[0] + fd[1] / 3.2
self.text_prefix = value
@property
def on_enter_key(self):
"""A callback for when the enter key is pressed while the TextInput has focus"""
return self._on_enter_key
@on_enter_key.setter
def on_enter_key(self, value):
self._on_enter_key = WeakMethod(value)
#utility functions
def _update_char_widths(self):
self.char_widths = []
for char in self.text:
self.char_widths.append(self.system.textlib.dimensions(self.label.fontid, char * 20)[0] / 20)
def select_all(self):
"""Change the selection to include all of the text"""
self.slice = [0, len(self.text)]
self.update_selection()
def select_none(self):
"""Change the selection to include none of the text"""
self.slice = [0, 0]
self.update_selection()
#Activation Code
def activate(self):
if self.frozen:
return
self.system.focused_widget = self
self.swapcolors(1)
self.colormode = 1
if self.input_options & BGUI_INPUT_SELECT_ALL:
self.slice = [0, len(self.text)]
self.slice_direction = -1
self.just_activated = 1
self._active = 1
def deactivate(self):
self.system.focused_widget = self.system
self.swapcolors(0)
self.colormode = 0
self.just_activated = 0
self._active = 0
def swapcolors(self, state=0): # 0 inactive 1 active
self.frame.colors = [self.colors["frame"][state]] * 4
self.frame.border = self.border_size[state]
self.frame.border_color = self.colors["border"][state]
self.highlight.colors = [self.colors["highlight"][state]] * 4
self.label.color = self.colors["text"][state]
if state == 0:
self.cursor.colors = [[0.0, 0.0, 0.0, 0.0]] * 4
else:
self.cursor.colors = [self.colors["text"][state]] * 4
#Selection Code
def update_selection(self):
left = self.fd + self.system.textlib.dimensions(self.label.fontid, self.text[:self.slice[0]])[0]
right = self.fd + self.system.textlib.dimensions(self.label.fontid, self.text[:self.slice[1]])[0]
self.highlight.position = [left, 1]
self.highlight.size = [right - left, self.frame.size[1] * .8]
if self.slice_direction in [0, -1]:
self.cursor.position = [left, 1]
else:
self.cursor.position = [right, 1]
self.cursor.size = [2, self.frame.size[1] * .8]
def find_mouse_slice(self, pos):
cmc = self.calc_mouse_cursor(pos)
mss = self.mouse_slice_start
self.mouse_slice_end = cmc
if cmc < mss:
self.slice_direction = -1
self.slice = [self.mouse_slice_end, self.mouse_slice_start]
elif cmc > mss:
self.slice_direction = 1
self.slice = [self.mouse_slice_start, self.mouse_slice_end]
else:
self.slice_direction = 0
self.slice = [self.mouse_slice_start, self.mouse_slice_start]
self.selection_refresh = 1
def calc_mouse_cursor(self, pos):
adj_pos = pos[0] - (self.position[0] + self.fd)
find_slice = 0
i = 0
for entry in self.char_widths:
if find_slice + entry > adj_pos:
if abs((find_slice + entry) - adj_pos) >= abs(adj_pos - find_slice):
return i
else:
return i + 1
else:
find_slice += entry
i += 1
self.time = time.time() - 0.501
return i
def _handle_mouse(self, pos, event):
"""Extend function's behaviour by providing focus to unfrozen inactive TextInput,
swapping out colors.
"""
if self.frozen:
return
if event == BGUI_MOUSE_CLICK
|
airbnb/streamalert
|
streamalert_cli/athena/helpers.py
|
Python
|
apache-2.0
| 9,745
| 0.00236
|
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from streamalert.shared.artifact_extractor import Artifact
from streamalert.shared.firehose import FirehoseClient
from streamalert.shared.logger import get_logger
from streamalert.shared.alert import Alert
from streamalert_cli.helpers import record_to_schema
LOGGER = get_logger(__name__
|
)
PARTITION_PARTS = re.compile(
r'dt=(?P<year>\d{4})\-(?P<month>\d{2})\-(?P<day>\d{2})\-(?P<hour>\d{2})')
# The returned partition from the SHOW PARTITIONS command is dt=YYYY-MM-DD-HH,
# But when re-creating new partitions this value must be quoted
PARTITION_STMT = ("PARTITION (dt = '{year}-{month}-{day}-{hour}') "
"LOCATION 's3://{bucket}/{table_name}/{year}/{month}/{day}/{hour}'")
# How to map log schema types to Athena/Hive types
SCHEMA_TYPE_MAP
|
PING = {
'string': 'string',
'integer': 'bigint',
'boolean': 'boolean',
'float': 'decimal(10,3)',
dict: 'map<string,string>',
list: 'array<string>'
}
# Athena query statement length limit
MAX_QUERY_LENGTH = 262144
def add_partition_statements(partitions, bucket, table_name):
"""Generate ALTER TABLE commands from existing partitions. It wil yield Athena
statement string(s), the length of each string should be less than Athena query
statement length limit, 262144 bytes.
https://docs.aws.amazon.com/athena/latest/ug/service-limits.html
Args:
partitions (set): The unique set of partitions gathered from Athena
bucket (str): The bucket name
table_name (str): The name of the Athena table
Yields:
string: The ALTER TABLE statements to add the new partitions
"""
# Each add partition statement starting with "ALTER TABLE"
initial_statement = 'ALTER TABLE {} ADD IF NOT EXISTS'.format(table_name)
initial_statement_len = len(initial_statement)
# The statement will be stored in a list of string format before join into a string
statement = [initial_statement]
statement_len = initial_statement_len
fmt_values = {
'bucket': bucket,
'table_name': table_name
}
for partition in sorted(partitions):
parts = PARTITION_PARTS.match(partition)
if not parts:
continue
fmt_values.update(parts.groupdict())
partition_stmt = PARTITION_STMT.format(**fmt_values)
partition_stmt_len = len(partition_stmt)
# It will add a space between sub strings when join the whole statement
space_count = len(statement)
# Monitor the lenght of whole statement and make sure it won't exceed the limit
if statement_len + partition_stmt_len + space_count >= MAX_QUERY_LENGTH:
# If the length of whole statement about to exceed the limit, yield
# the statement and reset it for rest of partitions
yield ' '.join(statement)
statement = [initial_statement]
statement_len = initial_statement_len
statement_len += partition_stmt_len
statement.append(partition_stmt)
yield ' '.join(statement)
def logs_schema_to_athena_schema(log_schema, ddl_statement=True):
"""Convert streamalert log schema to athena schema
Args:
log_schema (dict): StreamAlert log schema object.
ddl_statement (bool): Indicate if the Athena table created by Athena
DDL query or terraform aws_glue_catalog_table resource
Returns:
athena_schema (dict): Equivalent Athena schema used for generating create table statement
"""
athena_schema = {}
for key_name, key_type in log_schema.items():
if ddl_statement:
# Backticks are needed for backward compatibility when creating Athena
# table via Athena DDL query.
key_name = '`{}`'.format(key_name)
if key_type == {}:
# For empty dicts
athena_schema[key_name] = SCHEMA_TYPE_MAPPING[dict]
elif key_type == []:
# For empty array
athena_schema[key_name] = SCHEMA_TYPE_MAPPING[list]
elif isinstance(key_type, dict):
# For recursion
athena_schema[key_name] = logs_schema_to_athena_schema(key_type, ddl_statement)
else:
athena_schema[key_name] = SCHEMA_TYPE_MAPPING[key_type]
return athena_schema
def unique_values_from_query(query_result):
"""Simplify Athena query results into a set of values.
Useful for listing tables, partitions, databases, enable_metrics
Args:
query_result (dict): The result of run_athena_query
Returns:
set: Unique values from the query result
"""
return {
value
for row in query_result['ResultSet']['Rows'] for result in row['Data']
for value in list(result.values())
}
def format_schema_tf(schema):
"""Format schema for an Athena table for terraform.
Args:
schema (dict): Equivalent Athena schema used for generating create table statement
Returns:
formatted_schema (list(tuple))
"""
# Construct the main Athena Schema
formatted_schema = []
for key_name in sorted(schema.keys()):
key_type = schema[key_name]
if isinstance(key_type, str):
formatted_schema.append((key_name.lower(), key_type))
# Account for nested structs
elif isinstance(key_type, dict):
struct_schema = ','.join(
'{0}:{1}'.format(sub_key.lower(), key_type[sub_key])
for sub_key in sorted(key_type.keys())
)
formatted_schema.append((key_name.lower(), 'struct<{}>'.format(struct_schema)))
return formatted_schema
def generate_alerts_table_schema():
"""Generate the schema for alerts table in terraform by using a fake alert
Returns:
athena_schema (dict): Equivalent Athena schema used for generating create table statement
"""
alert = Alert('temp_rule_name', {}, {})
output = alert.output_dict()
schema = record_to_schema(output)
athena_schema = logs_schema_to_athena_schema(schema, False)
return format_schema_tf(athena_schema)
def generate_data_table_schema(config, table, schema_override=None):
"""Generate the schema for data table in terraform
Args:
config (CLIConfig): Loaded StreamAlert config
table (string): The name of data table
Returns:
athena_schema (dict): Equivalent Athena schema used for generating create table statement
"""
enabled_logs = FirehoseClient.load_enabled_log_sources(
config['global']['infrastructure']['firehose'],
config['logs']
)
# Convert special characters in schema name to underscores
sanitized_table_name = FirehoseClient.sanitized_value(table)
# Check that the log type is enabled via Firehose
if sanitized_table_name not in enabled_logs:
LOGGER.error('Table name %s missing from configuration or '
'is not enabled.', sanitized_table_name)
return None
log_info = config['logs'][enabled_logs.get(sanitized_table_name)]
schema = dict(log_info['schema'])
sanitized_schema = FirehoseClient.sanitize_keys(schema)
athena_schema = logs_schema_to_athena_schema(sanitized_schema, False)
# Add envelope keys to Athena Schema
configuration_options = log_info.get('configuration')
if configuration_options:
envelope_keys = configuration_options.get('envelope_keys')
if envelope_keys:
sanitized_envelope_key_schema = FirehoseClient.sanitize_keys(envelope_keys)
# Note: this key is wrapped in backticks to be Hive compliant
|
helldorado/ansible
|
lib/ansible/modules/network/f5/bigip_dns_cache_resolver.py
|
Python
|
gpl-3.0
| 16,802
| 0.000595
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_dns_cache_resolver
short_description: Manage DNS resolver cache configurations on BIG-IP
description:
- Manage DNS resolver cache configurations on BIG-IP.
version_added: 2.8
options:
name:
description:
- Specifies the name of the cache.
type: str
required: True
answer_default_zones:
description:
- Specifies whether the system answers DNS queries for the default
zones localhost, reverse 127.0.0.1 and ::1, and AS112.
- When creating a new cache resolver, if this parameter is not specified, the
default is C(no).
type: bool
forward_zones:
description:
- Forward zones associated with the cache.
- To remove all forward zones, specify a value of C(none).
suboptions:
name:
description:
- Specifies a FQDN for the forward zone.
type: str
required: True
nameservers:
description:
- Specifies the IP address and service port of a recursive
nameserver that answers DNS queries for the zone when the
response cannot be found in the DNS cache.
suboptions:
address:
description:
- Address of recursive nameserver.
type: str
port:
description:
- Port of recursive nameserver.
- When specifying new nameservers, if this value is not provided, the
default is C(53).
type: int
type: list
type: raw
route_domain:
description:
- Specifies the route domain the resolver uses for outbound traffic.
type: str
state:
description:
- When C(present), ensures that the resource exists.
- When C(absent), ensures the resource is removed.
type: str
choices:
- present
- absent
default: present
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a DNS resolver cache
bigip_dns_cache:
name: foo
answer_default_zones: yes
forward_zones:
- name: foo.bar.com
nameservers:
- address: 1.2.3.4
port: 53
- address: 5.6.7.8
route_domain: 0
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
param1:
description: The new param1 value of the resource.
returned: changed
type: bool
sample: true
param2:
description: The new param2 value of the resource.
returned: changed
type: str
sample: Foo is bar
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'routeDomain': 'route_domain',
'answerDefaultZones': 'answer_default_zones',
'forwardZones': 'forward_zones',
}
api_attributes = [
'routeDomain',
'answerDefaultZones',
'forwardZones',
]
returnables = [
'route_domain',
'answer_default_zones',
'forward_zones',
]
updatables = [
'route_domain',
'answer_default_zones',
'forward_zones',
]
@property
def route_domain(sel
|
f):
if self._values['route_domain'] is None:
return None
return fq_name(self.partition, self._values['route_domain'])
@property
def answer_default_zones(self):
|
return flatten_boolean(self._values['answer_default_zones'])
class ApiParameters(Parameters):
@property
def forward_zones(self):
if self._values['forward_zones'] is None:
return None
result = []
for x in self._values['forward_zones']:
tmp = dict(
name=x['name'],
nameservers=[]
)
if 'nameservers' in x:
tmp['nameservers'] = [y['name'] for y in x['nameservers']]
tmp['nameservers'].sort()
result.append(tmp)
return result
class ModuleParameters(Parameters):
@property
def forward_zones(self):
if self._values['forward_zones'] is None:
return None
elif self._values['forward_zones'] in ['', 'none']:
return ''
result = []
for x in self._values['forward_zones']:
if 'name' not in x:
raise F5ModuleError(
"A 'name' key must be provided when specifying a list of forward zones."
)
tmp = dict(
name=x['name'],
nameservers=[]
)
if 'nameservers' in x:
for ns in x['nameservers']:
if 'address' not in ns:
raise F5ModuleError(
"An 'address' key must be provided when specifying a list of forward zone nameservers."
)
item = '{0}:{1}'.format(ns['address'], ns.get('port', 53))
tmp['nameservers'].append(item)
tmp['nameservers'].sort()
result.append(tmp)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def forward_zones(self):
if self._values['forward_zones'] is None:
return None
result = []
for x in self._values['forward_zones']:
tmp = {'name': x['name']}
if 'nameservers' in x:
tmp['nameservers'] = []
for y in x['nameservers']:
tmp['nameservers'].append(dict(name=y))
result.append(tmp)
return result
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def forward_zones(self):
if self.want.forward_zones is None:
|
Hiyorimi/scikit-image
|
skimage/data/_binary_blobs.py
|
Python
|
bsd-3-clause
| 1,995
| 0
|
import numpy as np
from ..filters import gaussian
def binary_blobs(length=512, blob_size_fraction=0.1, n_dim=2,
volume_fraction=0.5, seed=None):
"""
Generate synthetic binary image with several rounded blob-like objects.
Parameters
----------
length : int, optional
Linear size of output image.
blob_size_fraction : float, optional
Typical linear size of
|
blob, as a fraction of ``length``, should be
smaller than 1.
n_dim : int, optional
Number of dimensions of output image.
volume_fraction : float, default 0.5
Fraction of image pixels covered by the blobs (where the output is 1).
Should be in [0, 1].
seed : int, optional
Seed to initialize the random number gen
|
erator.
Returns
-------
blobs : ndarray of bools
Output binary image
Examples
--------
>>> from skimage import data
>>> data.binary_blobs(length=5, blob_size_fraction=0.2, seed=1)
array([[ True, False, True, True, True],
[ True, True, True, False, True],
[False, True, False, True, True],
[ True, False, False, True, True],
[ True, False, False, False, True]], dtype=bool)
>>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.1)
>>> # Finer structures
>>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.05)
>>> # Blobs cover a smaller volume fraction of the image
>>> blobs = data.binary_blobs(length=256, volume_fraction=0.3)
"""
rs = np.random.RandomState(seed)
shape = tuple([length] * n_dim)
mask = np.zeros(shape)
n_pts = max(int(1. / blob_size_fraction) ** n_dim, 1)
points = (length * rs.rand(n_dim, n_pts)).astype(np.int)
mask[[indices for indices in points]] = 1
mask = gaussian(mask, sigma=0.25 * length * blob_size_fraction)
threshold = np.percentile(mask, 100 * (1 - volume_fraction))
return np.logical_not(mask < threshold)
|
npotts/dotfiles
|
ipython/profile_default/ipython_config.py
|
Python
|
unlicense
| 38,662
| 0.005276
|
c = get_config()
# NEVER NAG ME
c.TerminalInteractiveShell.confirm_exit = False
# Configuration file for ipython.
# ------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
# ------------------------------------------------------------------------------
## A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
## Execute the given command string.
# Default: ''
# c.InteractiveShellApp.code_to_run = ''
## Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# Default: True
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
# Default: []
# c.InteractiveShellApp.exec_files = []
## lines of code to run at IPython startup.
# Default: []
# c.InteractiveShellApp.exec_lines = []
## A list of dotted module names of IPython extensions to load.
# Default: []
# c.InteractiveShellApp.extensions = []
## dotted module name of an IPython extension to load.
# Default: ''
# c.InteractiveShellApp.extra_extension = ''
## A file to be run
# Default: ''
# c.InteractiveShellApp.file_to_run = ''
## Enable GUI event loop integration with any of ('asyncio', 'glut', 'gtk',
# 'gtk2', 'gtk3', 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2',
# 'qt4').
# Choices: any of ['asyncio', 'glut', 'gtk', 'gtk2', 'gtk3
|
', 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4'] (case-insensitive) or None
# Default: None
# c.InteractiveShellApp.gui = N
|
one
## Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# Default: True
# c.InteractiveShellApp.hide_initial_ns = True
## If True, IPython will not add the current working directory to sys.path. When
# False, the current working directory is added to sys.path, allowing imports of
# modules defined in the current directory.
# Default: False
# c.InteractiveShellApp.ignore_cwd = False
## Configure matplotlib for interactive use with the default matplotlib backend.
# Choices: any of ['auto', 'agg', 'gtk', 'gtk3', 'inline', 'ipympl', 'nbagg', 'notebook', 'osx', 'pdf', 'ps', 'qt', 'qt4', 'qt5', 'svg', 'tk', 'widget', 'wx'] (case-insensitive) or None
# Default: None
# c.InteractiveShellApp.matplotlib = None
## Run the module as a script.
# Default: ''
# c.InteractiveShellApp.module_to_run = ''
## Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# Choices: any of ['auto', 'agg', 'gtk', 'gtk3', 'inline', 'ipympl', 'nbagg', 'notebook', 'osx', 'pdf', 'ps', 'qt', 'qt4', 'qt5', 'svg', 'tk', 'widget', 'wx'] (case-insensitive) or None
# Default: None
# c.InteractiveShellApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# Default: True
# c.InteractiveShellApp.pylab_import_all = True
## Reraise exceptions encountered loading IPython extensions?
# Default: False
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
# ------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
# ------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
# Default: '%Y-%m-%d %H:%M:%S'
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# Default: '[%(name)s]%(highlevel)s %(message)s'
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# Choices: any of [0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL']
# Default: 30
# c.Application.log_level = 30
## Instead of starting the Application, dump configuration to stdout
# Default: False
# c.Application.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# Default: False
# c.Application.show_config_json = False
# ------------------------------------------------------------------------------
# BaseIPythonApplication(Application) configuration
# ------------------------------------------------------------------------------
## IPython: an enhanced interactive Python shell.
## Whether to create profile dir if it doesn't exist
# Default: False
# c.BaseIPythonApplication.auto_create = False
## Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# Default: False
# c.BaseIPythonApplication.copy_config_files = False
## Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# Default: ''
# c.BaseIPythonApplication.extra_config_file = ''
## The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# Default: ''
# c.BaseIPythonApplication.ipython_dir = ''
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.BaseIPythonApplication.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.BaseIPythonApplication.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# See also: Application.log_level
# c.BaseIPythonApplication.log_level = 30
## Whether to overwrite existing config files when copying
# Default: False
# c.BaseIPythonApplication.overwrite = False
## The IPython profile to use.
# Default: 'default'
# c.BaseIPythonApplication.profile = 'default'
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.BaseIPythonApplication.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.BaseIPythonApplication.show_config_json = False
## Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# Default: False
# c.BaseIPythonApplication.verbose_crash = False
# ------------------------------------------------------------------------------
# TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp) configuration
# ------------------------------------------------------------------------------
## Execute the given command string.
# See also: InteractiveShellApp.code_to_run
# c.TerminalIPythonApp.code_to_run = ''
## Whether to install the default config files into the profile dir.
# See also: BaseIPythonApplication.copy_config_files
# c.TerminalIPythonApp.copy_config_files = False
## Whether to display a banner upon starting IPython.
# Default: True
# c.TerminalIPythonApp.display_banner = True
## Run the file referenced by the PYTHONSTARTUP environment
# See also: InteractiveShellApp.exec_PYTHONSTARTUP
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
# See also: InteractiveShellApp.exec_files
# c.TerminalIPythonApp.exec_files = []
## lines of code to run at IPython startup.
# See also: InteractiveShellApp.exec_lines
# c.TerminalIPythonApp.exec_lines = []
## A list of dotted module names of IPython extensions to load.
# See also: InteractiveShellApp.
|
Distrotech/scons
|
build/scons/engine/SCons/Tool/zip.py
|
Python
|
mit
| 3,328
| 0.005709
|
"""SCons.Tool.zip
Tool-specific initialization for zip.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/zip.
|
py 2014/01/04 01:12:18 root"
import os.path
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
try:
import zipfile
internal_zip = 1
except ImportError:
internal_zip = 0
if intern
|
al_zip:
zipcompression = zipfile.ZIP_DEFLATED
def zip(target, source, env):
compression = env.get('ZIPCOMPRESSION', 0)
zf = zipfile.ZipFile(str(target[0]), 'w', compression)
for s in source:
if s.isdir():
for dirpath, dirnames, filenames in os.walk(str(s)):
for fname in filenames:
path = os.path.join(dirpath, fname)
if os.path.isfile(path):
zf.write(path)
else:
zf.write(str(s))
zf.close()
else:
zipcompression = 0
zip = "$ZIP $ZIPFLAGS ${TARGET.abspath} $SOURCES"
zipAction = SCons.Action.Action(zip, varlist=['ZIPCOMPRESSION'])
ZipBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$ZIPSUFFIX',
multi = 1)
def generate(env):
"""Add Builders and construction variables for zip to an Environment."""
try:
bld = env['BUILDERS']['Zip']
except KeyError:
bld = ZipBuilder
env['BUILDERS']['Zip'] = bld
env['ZIP'] = 'zip'
env['ZIPFLAGS'] = SCons.Util.CLVar('')
env['ZIPCOM'] = zipAction
env['ZIPCOMPRESSION'] = zipcompression
env['ZIPSUFFIX'] = '.zip'
def exists(env):
return internal_zip or env.Detect('zip')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
XXN/pwb-custom
|
wd-videogame.descriptions.py
|
Python
|
mit
| 7,056
| 0.004789
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (C) XXN, 2017
#
from __future__ import absolute_import, unicode_literals
import os, re, sys, time
import pywikibot
def main():
site = pywikibot.Site('wikidata', 'wikidata')
repo = site.data_repository()
mylist = \
[
u"Q3001778",
u"Q37115",
u"Q55246",
u"Q55563",
u"Q55611",
]
targetlangs = ['bg', 'bs', 'ca', 'cs', 'da', 'de', 'en', 'en-ca', 'en-gb', 'el', 'es', 'fi', 'fr', 'hr', 'hu', 'it', 'lv', 'mk', 'nb', 'nl', 'nn', 'pl', 'pt', 'pt-br', 'ro', 'ru', 'sco', 'sh', 'sk', 'sl', 'sr', 'sv', 'tr', 'uk', ]
for q in mylist[0:]:
item = pywikibot.ItemPage(repo, q)
item.get()
if 'P577' in item.claims and item.claims['P31'][0].getTarget().title() == 'Q7889':
try:
myclaim = item.get()['claims']['P577'][0].getTarget()
dic = myclaim.toWikibase()
Text = myclaim.toTimestr()
if dic['precision'] >= 9:
year = str(int(Text[8:12]))
for targetlang in targetlangs:
translations = {
'bg': 'видеоигра от ~YEAR~ година',
'bs': 'videoigra',
'ca': 'videojoc de ~YEAR~',
'cs': 'videohra z roku ~YEAR~',
'da': 'computerspil fra ~YEAR~',
'de': 'Videospiel',
'en': '~YEAR~ video game',
'en-ca': '~YEAR~ video game',
'en-gb': '~YEAR~ video game',
'el': 'βιντεοπαιχνίδι του ~YEAR~',
'es': 'videojuego de ~YEAR~',
'fi': '~YEAR~ videopeli',
'fr': 'jeu vidéo de ~YEAR~',
'hr': 'videoigra',
'hu': 'videojáték',
'it': 'videogioco del ~YEAR~',
'lv': 'videospēle',
'mk': 'видеоигра од ~YEAR~ година',
'nb': 'videospill fra ~YEAR~',
'nl': 'computerspel uit ~YEAR~',
'nn': 'dataspel frå ~YEAR~',
'pl': 'gra
|
wideo z ~YEAR~ roku',
'pt': 'vídeojogo de ~YEAR~',
'pt-br': 'jogo eletrônico de ~YEAR~',
|
'ro': 'joc video din ~YEAR~',
'ru': 'видеоигра ~YEAR~ года',
'sco': 'video gemme',
'sh': 'videoigra',
'sk': 'počítačová hra z ~YEAR~',
'sl': 'videoigra iz leta ~YEAR~',
'sr': 'видео-игра',
'sv': 'datorspel från ~YEAR~',
'tr': '~YEAR~ video oyunu',
'uk': 'відеогра ~YEAR~ року',
}
descriptions = item.descriptions
addedlangs = []
for lang in translations.keys():
if not lang in descriptions.keys():
translation = translations[lang]
translation = translation.replace('~YEAR~', str(year))
descriptions[lang] = translation
addedlangs.append(lang)
data = { 'descriptions': descriptions }
addedlangs.sort()
if addedlangs:
summary = 'Bot: Adding descriptions (%s languages): %s' % (len(addedlangs), ', '.join(addedlangs))
try:
item.editEntity(data, summary=summary)
pywikibot.output(u'{} - \03{{lightgreen}}{}\03{{default}}'.format(q,translations['en'].replace('~YEAR~', str(year))))
except:
pywikibot.output('Error while saving {}'.format(q))
continue
except:
continue
else:#no P577
for targetlang in targetlangs:
translations = {
'bg': 'видеоигра',
'bs': 'videoigra',
'ca': 'videojoc',
'cs': 'videohra',
'da': 'computerspil',
'de': 'Videospiel',
'en': 'video game',
'en-ca': 'video game',
'en-gb': 'video game',
'el': 'βιντεοπαιχνίδι',
'es': 'videojuego',
'fi': 'videopeli',
'fr': 'jeu vidéo',
'hr': 'videoigra',
'hu': 'videojáték',
'it': 'videogioco',
'lv': 'videospēle',
'mk': 'видеоигра',
'nb': 'videospill',
'nn': 'dataspel',
'nl': 'computerspel',
'pl': 'gra wideo',
'pt': 'vídeojogo',
'pt-br': 'jogo eletrônico',
'ro': 'joc video',
'ru': 'видеоигра',
'sco': 'video gemme',
'sh': 'videoigra',
'sk': 'počítačová hra',
'sl': 'videoigra',
'sr': 'видео-игра',
'sv': 'datorspel',
'tr': 'video oyunu',
'uk': 'відеогра',
}
descriptions = item.descriptions
addedlangs = []
for lang in translations.keys():
if not lang in descriptions.keys():
translation = translations[lang]
descriptions[lang] = translation
addedlangs.append(lang)
data = { 'descriptions': descriptions }
addedlangs.sort()
if addedlangs:
summary = 'Bot: Adding descriptions (%s languages): %s' % (len(addedlangs), ', '.join(addedlangs))
print(summary)
try:
item.editEntity(data, summary=summary)
pywikibot.output(u'{} - \03{{lightgreen}}{}\03{{default}}'.format(q,translations['en']))
except:
pywikibot.output('Error while saving {}'.format(q))
continue
if __name__ == "__main__":
main()
|
GirlsCodePy/girlscode-coursebuilder
|
modules/math/math.py
|
Python
|
gpl-3.0
| 4,047
| 0.000494
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = [
'Neema Kotonya (neemak@google.com)',
'Gun Pinyo (gunpinyo@google.com)'
]
import os
from xml.etree import cElementTree
import appengine_config
from common import schema_fields
from c
|
ommon import tags
from controllers import sites
from models import custom_modules
from models import services
from modules.math import messages
MATH_MODULE_URI = '/modules/math'
RESOURCES_URI = MATH_MODULE_URI + '/resources'
MATHJAX_URI = MATH_MODULE_URI + '/MathJax'
class MathTag(tags.ContextAwareTag):
"""Custom tag for mathematical notation using MathJax."""
binding_name = 'gcb-math'
@classmethod
def name(cls):
return 'Mathematical Formula'
@classmethod
def ven
|
dor(cls):
return 'gcb'
def render(self, node, context):
math_script = cElementTree.XML('<script/>')
# The formula is "text" type in the schema and so is presented in the
# tag's body.
math_script.text = node.text
input_type = node.attrib.get('input_type')
if input_type == 'MML':
math_script.set('type', 'math/mml')
else:
math_script.set('type', 'math/tex')
return math_script
def rollup_header_footer(self, context):
"""Include MathJax library only when a math tag is present."""
header = tags.html_string_to_element_tree("""
<script src="%s/MathJax.js?config=TeX-AMS-MML_HTMLorMML">
</script>""" % MATHJAX_URI)
footer = tags.html_string_to_element_tree('')
return (header, footer)
def get_icon_url(self):
return RESOURCES_URI + '/math.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(MathTag.name())
reg.add_property(
schema_fields.SchemaField(
'input_type', 'Type', 'string', i18n=False,
optional=True,
select_data=[('TeX', 'TeX'), ('MML', 'MathML')],
extra_schema_dict_values={'value': 'TeX'},
description=services.help_urls.make_learn_more_message(
messages.RTE_MATH_TYPE, 'math:math:input_type')))
reg.add_property(
schema_fields.SchemaField(
'formula', 'Mathematical Formula', 'text',
optional=True,
description=messages.RTE_MATH_MATHEMATICAL_FORMULA))
return reg
custom_module = None
def register_module():
"""Registers this module for use."""
def on_module_disable():
tags.Registry.remove_tag_binding(MathTag.binding_name)
def on_module_enable():
tags.Registry.add_tag_binding(MathTag.binding_name, MathTag)
global_routes = [
(RESOURCES_URI + '/.*', tags.ResourcesHandler),
(MATHJAX_URI + '/(fonts/.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'mathjax-fonts-2.3.0.zip'))),
(MATHJAX_URI + '/(.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'mathjax-2.3.0.zip')))]
namespaced_routes = []
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Mathematical Formula Display',
'Provides a custom tag to embed mathematical formulas using TeX or MML.'
, global_routes, namespaced_routes,
notify_module_disabled=on_module_disable,
notify_module_enabled=on_module_enable)
return custom_module
|
henrysher/opslib
|
opslib/__init__.py
|
Python
|
apache-2.0
| 1,905
| 0.000525
|
"""
ICS Ops Common Library
"""
import os
from os.path import dirname
from os.path import realpath
from os.path import join as pathjoin
import boto
__version__ = "0.0.3.3"
__release__ = "alpha"
CONFIG = "opslib.ini"
LOG_NAME = "opslib"
AWS_ACCESS_KEY_NAME = "aws_access_key_id"
AWS_SECRET_KEY_NAME = "aws_secret_access_key"
def init_config(filepath=None, enable_boto=True, enable_botocore=False):
# Default credential file will be located at current folder
if filepath is None or not os.path.exists(filepath):
pwdpath = dirname(realpath(__file__))
filepath = path
|
join(pwdpath, CONFIG)
if enable_boto:
# Initialize credentials for boto
from boto.pyami.config import Config
boto.config = Config(filepath)
ac
|
cess_key = boto.config.get('Credentials', AWS_ACCESS_KEY_NAME, None)
secret_key = boto.config.get('Credentials', AWS_SECRET_KEY_NAME, None)
# FIXME: a trick when the value is empty
if not access_key or not secret_key:
boto.config.remove_section('Credentials')
if enable_botocore:
# Initialize credentials for botocore
import botocore.credentials
if access_key and secret_key:
def get_credentials(session, metadata=None):
return botocore.credentials.Credentials(access_key, secret_key)
botocore.credentials.get_credentials = get_credentials
if access_key and secret_key:
return access_key, secret_key
def init_logging(name=LOG_NAME, logfile=None,
console=False, loglevel="INFO",
enable_boto_log=False):
global logger
from opslib.icslog import IcsLog
logger = IcsLog(name, level=loglevel, console=console, logfile=logfile)
if enable_boto_log:
boto.log = logger
return logger
init_config()
init_logging()
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
mpharrigan/msmbuilder
|
MSMBuilder/clustering.py
|
Python
|
gpl-2.0
| 49,464
| 0.002628
|
from __future__ import print_function, division, absolute_import
from mdtraj.utils.six import PY2
from mdtraj.utils.six.moves import xrange
import sys
import types
import random
import numpy as np
try:
import fastcluster
except ImportError:
pass
import scipy.cluster.hierarchy
import mdtraj as md
from msmbuilder import metrics
from mdtraj import io
from msmbuilder.utils import uneven_zip
from multiprocessing import Pool
try:
from deap import dtm # has parallel map() implementation via mpi
except:
pass
import logging
logger = logging.getLogger(__name__)
#####################################################################
# #
# Begin Helper Functions #
# #
#####################################################################
def concatenate_trajectories(trajectories):
"""Concatenate a list of trajectories into a single long trajectory
Parameters
----------
trajectories : list
list of mdtraj.Trajectory object
Returns
-------
concat_traj : mdtraj.Trajectory
"""
assert len(trajectories) > 0, 'Please supply a list of trajectories'
concat_traj = trajectories[0]
for i in xrange(1, len(trajectories)):
# Use mdtraj operator overloading
concat_traj += trajectories[i]
return concat_traj
def concatenate_prep_trajectories(prep_trajectories, metric):
"""Concatenate a list of prepared trajectories and
create a single prepared_trajectory.
This is non-trivial because the RMSD and LPRMSD prepared
trajectories are not np.ndarrays ...
Parameters
----------
prep_trajectories : list
list of prepared trajectories
metric : msmbuilder.metrics.AbstractDistance metric subclass instance
metric used to prepare the trajectories. Needed for RMSD and LPRMSD
since concatenation requires recreating the prepared trajectory
Returns
-------
ptraj : prepared_trajectory
prepared trajectory instance, like that returned from
metric.prepare_trajectory
"""
if isinstance(prep_trajectories[0], np.ndarray):
ptraj = np.concatenate(prep_trajectories)
elif isinstance(prep_trajectories[0], RMSD.TheoData):
xyz = np.concatenate([p.XYZData[:, :, :p.NumAtoms] for p in prep_trajectories])
xyz = xyz.transpose((0, 2, 1))
ptraj = metric.TheoData(xyz)
else:
raise Exception("unrecognized prepared trajectory."
"NOTE: LPRMSD currently unsupported. Email schwancr@stanford.edu")
return ptraj
def unconcatenate_trajectory(trajectory, lengths):
"""Take a single trajectory that was created by concatenating seperate
trajectories and unconcenatenate it, returning the original trajectories.
You have to supply the lengths of the original trajectories.
Parameters
----------
trajectory : mdtraj.Trajectory
Long trajectory to be split
lengths : array_like
list of lengths to split the long trajectory into
Returns
-------
A list of trajectories
"""
return split(trajectory, lengths)
def split(longlist, lengths):
"""Split a long list into segments
Parameters
----------
longlist : array_like
Long trajectory to be split
lengths : array_like
list of lengths to split the long list into
Returns
-------
A list of lists
"""
if not sum(lengths) == len(longlist):
raise Exception('sum(lengths)=%
|
s, len(longlist)=%s' % (sum(lengths), len(longlist)))
def func(x):
length, cumlength = x
return longlist[cumlength - length: cumlength]
output = [func(elem) for elem in zip(lengths, np.cumsum(lengths))]
return output
def stochastic_subsa
|
mple(trajectories, shrink_multiple):
"""Randomly subsample from a trajectory
Given a list of trajectories, return a single trajectory
shrink_multiple times smaller than the total number of frames in
trajectories taken by random sampling of frames from trajectories
Parameters
----------
trajectories : list of mdtraj.Trajectory
list of trajectories to sample from
shrink_multiple : int
fraction to shrint by
Note that this method will modify the trajectory objects that you pass in
@CHECK is the note above actually true?
"""
shrink_multiple = int(shrink_multiple)
if shrink_multiple < 1:
raise ValueError('Shrink multiple should be an integer greater '
'than 1. You supplied %s' % shrink_multiple)
elif shrink_multiple == 1:
# if isinstance(trajectories, Trajectory):
# return trajectories
# return concatenate_trajectories(trajectories)
return trajectories
if isinstance(trajectories, md.Trajectory):
traj = trajectories
length = traj.n_frames
new_length = int(length / shrink_multiple)
if new_length <= 0:
return None
indices = np.array(random.sample(np.arange(length), new_length))
new_traj = traj[indices, :, :]
return new_traj
else:
# assume we have a list of trajectories
# check that all trajectories have the same number of atoms
num_atoms = np.array([traj.n_atoms for traj in trajectories])
if not np.all(num_atoms == num_atoms[0]):
raise Exception('Not all same # atoms')
# shrink each trajectory
subsampled = [stochastic_subsample(traj, shrink_multiple) for traj in trajectories]
# filter out failures
subsampled = [a for a in subsampled if a is not None]
return concatenate_trajectories(subsampled)
def deterministic_subsample(trajectories, stride, start=0):
"""Given a list of trajectories, return a single trajectory
shrink_multiple times smaller than the total number of frames in
trajectories by taking every "stride"th frame, starting from "start"
Note that this method will modify the trajectory objects that you pass in
Parameters
----------
trajectories : list of mdtraj.Trajectory
trajectories to subsample from
stride : int
freq to subsample at
start : int
first frame to pick
Returns
-------
trajectory : mdtraj.trajectory
shortened trajectory
"""
stride = int(stride)
if stride < 1:
raise ValueError('stride should be an integer greater than 1. You supplied %s' % stride)
elif stride == 1:
# if isinstance(trajectories, Trajectory):
# return trajectories
# return concatenate_trajectories(trajectories)
return trajectories
if isinstance(trajectories, Trajectory):
traj = trajectories
traj = traj[start::stride]
return traj
else:
# assume we have a list of trajectories
# check that all trajectories have the same number of atoms
num_atoms = np.array([traj.n_atoms for traj in trajectories])
if not np.all(num_atoms == num_atoms[0]):
raise Exception('Not all same # atoms')
# shrink each trajectory
strided = [deterministic_subsample(traj, stride, start) for traj in trajectories]
return concatenate_trajectories(strided)
def p_norm(data, p=2):
"""p_norm of an ndarray with XYZ coordinates
Parameters
----------
data : ndarray
XYZ coordinates. TODO: Shape?
p : {int, "max"}, optional
power of p_norm
Returns
-------
value : float
the answer
"""
if p == "max":
return data.max()
else:
p = float(p)
n = float(data.shape[0])
return ((data ** p).sum() / n) ** (1.0 / p)
#####################################################################
# #
# End Helper Functions #
# Begin Clustering Function #
#
|
theanalyst/cinder
|
cinder/volume/drivers/windows/constants.py
|
Python
|
apache-2.0
| 742
| 0
|
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "Li
|
cense"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under t
|
he License.
WMI_JOB_STATUS_STARTED = 4096
WMI_JOB_STATE_RUNNING = 4
WMI_JOB_STATE_COMPLETED = 7
VHD_TYPE_FIXED = 2
VHD_TYPE_DYNAMIC = 3
|
TeXitoi/navitia
|
source/sql/alembic/versions/224621d9edde_timezone_at_metavj_level.py
|
Python
|
agpl-3.0
| 1,446
| 0.01314
|
"""timezone at metavj level
Revision ID: 224621d9edde
Revises: 14346346596e
Create Date: 2015-12-21 16:52:30.275508
"""
# revision identifiers, used by Alembic.
revision = '224621d9edde'
down_revision = '5a590ae95255'
from alembic import op
import sqlalchemy as sa
import geoalchemy2 as ga
def upgrade():
op.create_table('timezone',
sa.Column('id', sa.BIGINT(), nullable=False),
sa.Column('name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint('id'),
schema='navitia'
)
op.create_table('tz_dst',
sa.Column('id', sa.BIGINT(), nullable=False),
sa.Column('tz_id', sa.BIGINT(), nullable=False),
sa.Column('beginning_date', sa.DATE(), nullable=False),
sa.Column('end_date', sa.DATE(), nullable=False),
sa.Column('utc_offset', sa.INTEGER(), nullable=False),
sa.ForeignKeyConstraint(['tz_id'], [u'navitia.timezone.id'], na
|
me=u'associated_tz_dst_fkey'),
sa.PrimaryKeyConstraint('id'),
schema='navitia'
)
op.add_column(u'meta_vj', sa.Column('timezone', sa.BIGINT(), nullable=True), schema=u'navitia')
op.drop_column(u'vehicle_journey', 'utc_to_local_offset', schema=u'navitia')
def downgrade():
op.drop_column(u'meta_vj', 'timezone', schema=u'navitia')
op.drop_table('tz_dst', schema='navitia')
op.drop_table('timezone', schema=
|
'navitia')
op.add_column(u'vehicle_journey', sa.Column('utc_to_local_offset', sa.BIGINT(), nullable=True), schema=u'navitia')
|
bombehub/SEconsistent
|
diagrams/overhead_savePDF.py
|
Python
|
gpl-3.0
| 699
| 0.020029
|
__
|
author__ = 'mk'
import matplotlib.pyplot as plt
import sys
import math
import numpy as np
dataDir = sys.argv[1]
resDir
|
= sys.argv[2]
plt.figure(figsize=(8,4))
algLabel=['naive','cou','zigzag','pingpong','MK','LL']
for i in range(0,6,1):
filePath = dataDir + str(i) + '_overhead.dat'
file = open(filePath)
x = []
y = []
for eachLine in file.readlines():
xStr,yStr = eachLine.split()
x.append(int(xStr))
y.append(math.log(float(yStr)))
file.close()
plt.plot(x,y,label=algLabel[i],linewidth=1)
plt.xlabel("Data Size")
plt.ylabel("Overhead")
plt.title("Overhead Per Checkpoint")
plt.legend()
plt.savefig(resDir + "OverheadPerCheckpoint.pdf")
|
mcldev/DjangoCMS_Charts
|
djangocms_charts/base/consts.py
|
Python
|
mit
| 824
| 0.002427
|
from django.utils.translation import ugettext_lazy as _
# Legend Position
def get_legend_class(position):
return 'legend-' + str(position)
class LEGEND_POSITIONS:
BOTTOM = _('bottom')
TOP = _('top')
LEFT = _('left')
RIGHT = _('right')
get_choices = ((get_legend_class(BOTTOM), BOTTOM),
(get_legend_class(TOP), TOP),
|
(get_legend_class(LEFT), LEFT),
(get_legend_class(RIGHT), RIGHT)
|
,)
def get_chart_position_class(position):
return 'chart-' + str(position)
class CHART_POSITIONS:
CENTER = _('center')
LEFT = _('left')
RIGHT = _('right')
get_choices = ((get_chart_position_class(CENTER), CENTER),
(get_chart_position_class(LEFT), LEFT),
(get_chart_position_class(RIGHT), RIGHT),)
|
kgilmo/penning_artiq
|
artiq/gui/datasets.py
|
Python
|
gpl-3.0
| 5,603
| 0.000714
|
import asyncio
from collections import OrderedDict
from functools import partial
import logging
from quamash import QtGui, QtCore
from pyqtgraph import dockarea
from pyqtgraph import LayoutWidget
from artiq.protocols.sync_struct import Subscriber
from artiq.tools import short_format
from artiq.gui.tools import DictSyncModel
from artiq.gui.displays import *
try:
QSortFilterProxyModel = QtCore.QSortFilterProxyModel
except AttributeError:
QSortFilterProxyModel = QtGui.QSortFilterProxyModel
logger = logging.getLogger(__name__)
class DatasetsModel(DictSyncModel):
def __init__(self, parent, init):
DictSyncModel.__init__(self, ["Dataset", "Persistent", "Value"],
parent, in
|
it)
def sort_key(self, k, v):
return k
def convert(self, k, v, column):
if column == 0:
return k
elif column == 1:
return "Y" if v[0] else "N"
elif column =
|
= 2:
return short_format(v[1])
else:
raise ValueError
def _get_display_type_name(display_cls):
for name, (_, cls) in display_types.items():
if cls is display_cls:
return name
class DatasetsDock(dockarea.Dock):
def __init__(self, dialog_parent, dock_area):
dockarea.Dock.__init__(self, "Datasets", size=(1500, 500))
self.dialog_parent = dialog_parent
self.dock_area = dock_area
grid = LayoutWidget()
self.addWidget(grid)
self.search = QtGui.QLineEdit()
self.search.setPlaceholderText("search...")
self.search.editingFinished.connect(self._search_datasets)
grid.addWidget(self.search, 0, )
self.table = QtGui.QTableView()
self.table.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.table.horizontalHeader().setResizeMode(
QtGui.QHeaderView.ResizeToContents)
grid.addWidget(self.table, 1, 0)
add_display_box = QtGui.QGroupBox("Add display")
grid.addWidget(add_display_box, 1, 1)
display_grid = QtGui.QGridLayout()
add_display_box.setLayout(display_grid)
for n, name in enumerate(display_types.keys()):
btn = QtGui.QPushButton(name)
display_grid.addWidget(btn, n, 0)
btn.clicked.connect(partial(self.create_dialog, name))
self.displays = dict()
def _search_datasets(self):
self.table_model_filter.setFilterFixedString(self.search.displayText())
def get_dataset(self, key):
return self.table_model.backing_store[key][1]
async def sub_connect(self, host, port):
self.subscriber = Subscriber("datasets", self.init_datasets_model,
self.on_mod)
await self.subscriber.connect(host, port)
async def sub_close(self):
await self.subscriber.close()
def init_datasets_model(self, init):
self.table_model = DatasetsModel(self.table, init)
self.table_model_filter = QSortFilterProxyModel()
self.table_model_filter.setSourceModel(self.table_model)
self.table.setModel(self.table_model_filter)
return self.table_model
def update_display_data(self, dsp):
filtered_data = {k: self.table_model.backing_store[k][1]
for k in dsp.data_sources()
if k in self.table_model.backing_store}
dsp.update_data(filtered_data)
def on_mod(self, mod):
if mod["action"] == "init":
for display in self.displays.values():
display.update_data(self.table_model.backing_store)
return
if mod["path"]:
source = mod["path"][0]
elif mod["action"] == "setitem":
source = mod["key"]
else:
return
for display in self.displays.values():
if source in display.data_sources():
self.update_display_data(display)
def create_dialog(self, ty):
dlg_class = display_types[ty][0]
dlg = dlg_class(self.dialog_parent, None, dict(),
sorted(self.table_model.backing_store.keys()),
partial(self.create_display, ty, None))
dlg.open()
def create_display(self, ty, prev_name, name, settings):
if prev_name is not None and prev_name in self.displays:
raise NotImplementedError
dsp_class = display_types[ty][1]
dsp = dsp_class(name, settings)
self.displays[name] = dsp
self.update_display_data(dsp)
def on_close():
del self.displays[name]
dsp.sigClosed.connect(on_close)
self.dock_area.addDock(dsp)
self.dock_area.floatDock(dsp)
return dsp
def save_state(self):
r = dict()
for name, display in self.displays.items():
r[name] = {
"ty": _get_display_type_name(type(display)),
"settings": display.settings,
"state": display.save_state()
}
return r
def restore_state(self, state):
for name, desc in state.items():
try:
dsp = self.create_display(desc["ty"], None, name,
desc["settings"])
except:
logger.warning("Failed to create display '%s'", name,
exc_info=True)
try:
dsp.restore_state(desc["state"])
except:
logger.warning("Failed to restore display state of '%s'",
name, exc_info=True)
|
jay-johnson/sci-pype
|
bins/ml/extractors/extract_and_upload_iris_classifier.py
|
Python
|
apache-2.0
| 2,955
| 0.01286
|
#!/usr/bin/env python
# Load common imports and system envs to build the core object
import sys, os
# Load the Environment:
os.environ["ENV_DEPLOYMENT_TYPE"] = "JustRedis"
from src.common.inits_for_python import *
#####################################################################
#
# Start Arg Processing:
#
action = "Extract and Upload IRIS Models to S3"
parser = argparse.ArgumentParser(description="Parser for Action: " + str(action))
parser.add_argument('-u', '--url', help='URL to Download', dest='url')
parser.add_argument('-b', '--s3bucket', help='S3 Bucket (Optional)', dest='s_bucket')
parser.add_argument('-k', '--s3key
|
', help='S3 Key (Optional)', dest='s_key')
parser.add_argument("-d", "--debug", help="Debug Flag", dest='debug', action='store_true')
args = parser.parse_args()
if args.debug:
debug = True
core.enable_debug()
data_dir = str(os.getenv("
|
ENV_DATA_DST_DIR", "/opt/work/data/dst"))
if not os.path.exists(data_dir):
os.mkdir(data_dir, 0777)
ds_name = "iris_classifier"
cur_date_str = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
s3_bucket = "unique-bucket-name-for-datasets"
s3_key = "dataset_" + core.to_upper(ds_name) + ".cache.pickle.zlib"
s3_loc = ""
if args.s_bucket:
s3_bucket = str(args.s_bucket)
if args.s_key:
s3_key = str(args.s_key)
#
# End Arg Processing
#
#####################################################################
s3_loc = str(s3_bucket) + ":" + str(s3_key)
lg("-------------------------------------------------", 6)
lg("Extracting and Uploading Models from CACHE to S3Loc(" + str(s3_loc) + ")", 6)
lg("", 6)
cache_req = {
"RAName" : "CACHE", # Redis instance name holding the models
"DSName" : str(ds_name), # Dataset name for pulling out of the cache
"S3Loc" : str(s3_loc), # S3 location to store the model file
"DeleteAfter" : False, # Optional delete after upload
"SaveDir" : data_dir, # Optional dir to save the model file - default is ENV_DATA_DST_DIR
"TrackingID" : "" # Future support for using the tracking id
}
upload_results = core.ml_upload_cached_dataset_to_s3(cache_req, core.get_rds(), core.get_dbs(), debug)
if upload_results["Status"] == "SUCCESS":
lg("Done Uploading Model and Analysis DSName(" + str(ds_name) + ") S3Loc(" + str(cache_req["S3Loc"]) + ")", 6)
else:
lg("", 6)
lg("ERROR: Failed Upload Model and Analysis Caches as file for DSName(" + str(ds_name) + ")", 6)
lg(upload_results["Error"], 6)
lg("", 6)
sys.exit(1)
# end of if extract + upload worked
lg("", 6)
lg("Extract and Upload Completed", 5)
lg("", 6)
sys.exit(0)
|
dreibh/planetlab-lxc-plcapi
|
PLC/Methods/AddNetworkMethod.py
|
Python
|
bsd-3-clause
| 651
| 0.006144
|
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.NetworkMethods import NetworkMethod, NetworkMethods
from PLC.Auth import Auth
class AddNetworkMethod(Method):
"""
Adds a new network method.
Returns 1 if successful, faults otherwise.
"""
roles = ['admin']
accepts = [
Auth(),
NetworkMet
|
hod.field
|
s['method']
]
returns = Parameter(int, '1 if successful')
def call(self, auth, name):
network_method = NetworkMethod(self.api)
network_method['method'] = name
network_method.sync(insert = True)
return 1
|
lovexiaov/python-in-practice
|
texteditor2/Display.py
|
Python
|
gpl-3.0
| 4,435
| 0.005187
|
#!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import os
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.colorchooser as colorchooser
Spinbox = ttk.Spinbox if hasattr(ttk, "Spinbox") else tk.Spinbox
if __name__ == "__main__": # For stand-alone testing with parallel TkUtil
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
"..")))
import TkUtil
import TkUtil.Dock
from Globals import *
class Dock(TkUtil.Dock.Window):
def create_variables(self):
self.title = "Display"
self.__wordWrap = tk.StringVar()
self.__wordWrap.set("Word")
self.__wordWrap.trace("w", self.__set_word_wrap)
self.__blockCursor = tk.IntVar()
self.__blockCursor.set(False)
self.__blockCursor.trace("w", self.__set_block_cursor)
self.__lineSpacing = tk.StringVar()
self.__lineSpacing.set(0)
self.__lineSpacing.trace("w", self.__set_line_spacing)
def create_widgets(self):
self.wordWrapLabel = ttk.Label(self, text="Wrap:")
self.wordWrapCombobox = ttk.Combobox(self, state="readonly",
values=["None", "Character", "Word"],
textvariable=self.__wordWrap, width=10)
self.blockCursorCheckbutton = ttk.Checkbutton(self,
text="Block Cursor", variable=self.__blockCursor)
self.lineSpacingLabel = ttk.Label(self, text="Line Spacing:")
self.lineSpacingSpinbox = tk.Spinbox(self, from_=0, to=32,
width=3, validate="all", justify=tk.RIGHT,
textvariable=self.__lineSpacing)
self.lineSpacingSpinbox.config(validatecommand=(
self.lineSpacingSpinbox.register(self.__validate_int),
"lineSpacingSpinbox", "%P"))
def create_layout(self):
pad = dict(padx=PAD, pady=PAD)
padW = dict(sticky=tk.W, **pad)
padWE = dict(sticky=(tk.W, tk.E), **pad)
self.wordWrapLabel.grid(row=1, column=0, **padW)
self.wordWrapCombobox.grid(row=1, column=1, columnspan=2, **padWE)
self.blockCursorCheckbutton.grid(row=2, column=0, columnspan=3,
**padWE)
self.lineSpacingLabel.grid(row=3, column=0, columnspan=2, **padW)
self.lineSpacingSpinbox.grid(row=3, column=2, stick=tk.E, **pad)
def __set_word_wrap(self, *args):
self.event_generate("<<WordWrapChanged>>")
def __set_block_cursor(self, *args):
self.event_generate("<<BlockCursorChanged>>")
def __set_line_spacing(self, *args):
self.event_generate("<<LineSpacingChanged>>")
def __validate_int(self, spinbox, number):
spinbox = getattr(self, spinbox)
return TkUtil.validate_spinbox_int(spinbox, number)
@property
def word_wrap(self):
wrap = self.__wordWrap.get().lower()
if wrap == "character":
wrap = "char"
return wrap
@word_wrap.setter
def word_wrap(self, value):
if value.lower() == "char":
value = "character"
self.__wordWrap.set(value.title())
@property
def block_cursor(self):
return bool(self.__blockCursor.get())
@block_cursor.setter
def block_cursor(self, value):
self.__blockCursor.set(value)
|
@property
def line_spacing(self)
|
:
return int(self.__lineSpacing.get())
@line_spacing.setter
def line_spacing(self, value):
self.__lineSpacing.set(value)
if __name__ == "__main__":
if sys.stdout.isatty():
application = tk.Tk()
application.title("Display")
dock = Dock(application, None)
dock.pack(fill=tk.BOTH, expand=True)
dock.bind("<Escape>", lambda *args: application.quit())
application.bind("<Escape>", lambda *args: application.quit())
application.mainloop()
else:
print("Loaded OK")
|
openstack/tempest
|
tempest/api/compute/admin/test_migrations.py
|
Python
|
apache-2.0
| 7,577
| 0
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
CONF = config.CONF
class MigrationsAdminTest(base.BaseV2ComputeAdminTest):
"""Test migration operations supported by admin user"""
@classmethod
def setup_clients(cls):
super(MigrationsAdminTest, cls).setup_clients()
cls.client = cls.os_admin.migrations_client
@decorators.idempotent_id('75c0b83d-72a0-4cf8-a153-631e83e7d53f')
def test_list_migrations(self):
"""Test admin user can get the migrations list"""
self.client.list_migrations()
@decorators.idempotent_id('1b512062-8093-438e-b47a-37d2f597cd64')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_list_migrations_in_flavor_resize_situation(self):
"""Admin can get the migrations list containing the resized server"""
server = self.create_test_server(wait_until="ACTIVE")
server_id = server['id']
self.resize_server(server_id, self.flavor_ref_alt)
body = self.client.list_migrations()['migrations']
instance_uuids = [x['instance_uuid'] for x in body]
self.assertIn(server_id, instance_uuids)
def _flavor_clean_up(self, flavor_id):
try:
self.admin_flavors_client.delete_flavor(flavor_id)
self.admin_flavors_client.wait_for_resource_deletion(flavor_id)
except exceptions.NotFound:
pass
@decorators.idempotent_id('33f1fec3-ba18-4470-8e4e-1d888e7c3593')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_revert_deleted_flavor(self):
"""Test reverting resized server with original flavor deleted
Tests that we can revert the resize on an instance whose original
flavor has been deleted.
"""
# First we have to create a flavor that we can delete so make a copy
# of the normal flavor from which we'd create a server.
flavor = self.admin_flavors_client.show_flavor(
self.flavor_ref)['flavor']
flavor = self.admin_flavors_client.create_flavor(
name=data_utils.rand_name('test_resize_flavor_'),
ram=flavor['ram'],
disk=flavor['disk'],
vcpus=flavor['vcpus']
)['flavor']
self.addCleanup(self._flavor_clean_up, flavor['id'])
# Set extra specs same as self.flavor_ref for the created flavor,
# because the environment may need some special extra specs to
# create server which should have been contained in
# self.flavor_ref.
extra_spec_keys = self.admin_flavors_client.list_flavor_extra_specs(
self.flavor_ref)['extra_specs']
if extra_spec_keys:
self.admin_flavors_client.set_flavor_extra_spec(
flavor['id'], **extra_spec_keys)
# Now boot a server with the copied flavor.
server = self.create_test_server(
wait_until='ACTIVE', flavor=flavor['id'])
server = self.servers_client.show_server(server['id'])['server']
# If 'id' not in server['flavor'], we can only compare the flavor
# details, so here we should save the to-be-deleted flavor's details,
# for the flavor comparison after the server resizing.
if not server['flavor'].get('id'):
pre_flavor = {}
body = self.flavors_client.show_flavor(flavor['id'])['flavor']
for key in ['name', 'ram', 'vcpus', 'disk']:
pre_flavor[key] = body[key]
# Delete the flavor we used to boot the instance.
self._flavor_clean_up(flavor['id'])
# Now resize the server and wait for it to go into verify state.
self.servers_client.resize_server(server['id'], self.flavor_ref_alt)
waiters.wait_for_server_status(self.servers_client, server['id'],
'VERIFY_RESIZE')
# Now revert the resize, it should be OK even though the original
# flavor used to boot the server was deleted.
self.servers_client.revert_resize_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
'ACTIVE')
server = self.servers_client.show_server(server['id'])['server']
if server['flavor'].get('id'):
msg = ('server flavor is not same as flavor!')
self.assertEqual(flavor['id'], server['flavor']['id'], msg)
else:
self.assertEqual(pre_flavor['name'],
server['flavor']['original_name'],
"original_name in server flavor is not same as "
"flavor name!")
for key in ['ram', 'vcpus', 'disk']:
msg = ('attribute %s in server flavor is not same as '
'flavor!' % key)
self.assertEqual(pre_flavor[key], server['flavor'][key], msg)
def _test_cold_migrate_server(self, revert=False):
if CONF.compute.min_compute_nodes < 2:
msg = "Less than 2 compute nodes, skipping multinode tests."
raise self.skipException(msg)
server = self.create_test_server(wait_until="ACTIVE")
src_host = self.get_host_for_server(server['id'])
self.admin_servers_client.migrate_server(server['id'])
waiters.wait_for_server_status(self.servers_client,
server['id'], 'VERIFY_RESIZE')
if revert:
self.servers_client.revert_resize_server(server['id'])
assert_func = self.assertEqual
else:
self.servers_client.confirm_resize_server(server['id'])
assert_func = self.assertNotEqual
waiters.wait_for_server_status(self.servers_client,
server['id'], 'ACTIVE')
dst_host = self.get_host_for_server(server['id'])
assert_func(src_host, dst_h
|
ost)
@decorators.idempotent_id('4bf0be52-3b6f-4746-9a27-3143636fe30d')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration not available.')
def test_cold_migration(self):
"""Test cold migrating server and then confirm the migration"""
self._test_cold_migrate_server(revert=False)
@decorators.idempotent_id('caa1aa8b-f4ef-4374-be0d-95f001c2ac2d')
@testtools.skipUnless(CONF.compute_feature_enabled.c
|
old_migration,
'Cold migration not available.')
def test_revert_cold_migration(self):
"""Test cold migrating server and then revert the migration"""
self._test_cold_migrate_server(revert=True)
|
trezor/micropython
|
extmod/webrepl/webrepl.py
|
Python
|
mit
| 2,184
| 0.001374
|
# This module should be imported from REPL
|
, not run from command line.
import socket
import uos
import network
import uwebsocket
import websocket_helper
import _webrepl
listen_s = None
client_s = None
def setup_conn(port, accept_handler):
global listen_s
listen_s
|
= socket.socket()
listen_s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ai = socket.getaddrinfo("0.0.0.0", port)
addr = ai[0][4]
listen_s.bind(addr)
listen_s.listen(1)
if accept_handler:
listen_s.setsockopt(socket.SOL_SOCKET, 20, accept_handler)
for i in (network.AP_IF, network.STA_IF):
iface = network.WLAN(i)
if iface.active():
print("WebREPL daemon started on ws://%s:%d" % (iface.ifconfig()[0], port))
return listen_s
def accept_conn(listen_sock):
global client_s
cl, remote_addr = listen_sock.accept()
prev = uos.dupterm(None)
uos.dupterm(prev)
if prev:
print("\nConcurrent WebREPL connection from", remote_addr, "rejected")
cl.close()
return
print("\nWebREPL connection from:", remote_addr)
client_s = cl
websocket_helper.server_handshake(cl)
ws = uwebsocket.websocket(cl, True)
ws = _webrepl._webrepl(ws)
cl.setblocking(False)
# notify REPL on socket incoming data (ESP32/ESP8266-only)
if hasattr(uos, 'dupterm_notify'):
cl.setsockopt(socket.SOL_SOCKET, 20, uos.dupterm_notify)
uos.dupterm(ws)
def stop():
global listen_s, client_s
uos.dupterm(None)
if client_s:
client_s.close()
if listen_s:
listen_s.close()
def start(port=8266, password=None):
stop()
if password is None:
try:
import webrepl_cfg
_webrepl.password(webrepl_cfg.PASS)
setup_conn(port, accept_conn)
print("Started webrepl in normal mode")
except:
print("WebREPL is not configured, run 'import webrepl_setup'")
else:
_webrepl.password(password)
setup_conn(port, accept_conn)
print("Started webrepl in manual override mode")
def start_foreground(port=8266):
stop()
s = setup_conn(port, None)
accept_conn(s)
|
jorgebg/tictactoe
|
time.py
|
Python
|
mit
| 74
| 0
|
from .game import Boa
|
rd
for i in range(10):
Board.all()
pr
|
int(i)
|
AlManja/logs.py
|
logsgui3.py
|
Python
|
mit
| 9,203
| 0.004781
|
#!/usr/bin/env python
import os
import sys # provides interaction with the Python interpreter
from functools import partial
from PyQt4 import QtGui # provides the graphic elements
from PyQt4.QtCore import Qt # provides Qt identifiers
from PyQt4.QtGui import QPushButton
try:
from sh import inxi
except:
print(" 'inxi' not found, install it to get this info")
try:
from sh import mhwd
except:
print(" 'mhwd' not found, this is not Manjaro?")
try:
from sh import hwinfo
except:
print(" 'hwinfo' not found")
try:
from sh import free
except:
print(" 'free' not found")
try:
from sh import lsblk
except:
print(" 'lsblk' not found")
try:
from sh import df
except:
print(" 'df' not found")
try:
from sh import blockdev
except:
print(" 'blockdev' not found")
try:
from sh import test
except:
print(" 'test' not found")
try:
from sh import parted
except:
print(" 'parted' not found")
TMP_FILE = "/tmp/mlogsout.txt"
HEADER = '''
===================
|{:^17}| {}
===================
'''
checkbuttons = [
'Inxi',
'Installed g. drivers',
'List all g. drivers',
'Graphic Card Info',
'Memory Info',
'Partitions',
'Free Disk Space',
'Xorg.0',
'Xorg.1',
'pacman.log',
'journalctl - Emergency',
'journalctl - Alert',
'journalctl - Critical',
'journalctl - Failed',
'Open&Rc - rc.log',
]
def look_in_file(file_name, kws):
"""reads a file and returns only the lines that contain one of the keywords"""
with open(file_name) as f:
return "".join(filter(lambda line: any(kw in line for kw in kws), f))
class Window(QtGui.QWidget):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
self.checks = [False]*len(checkbuttons) # initialize all buttons to False
# creates a vertical box layout for the window
vlayout = QtGui.QVBoxLayout()
# creates the checkboxes
for idx, text in enumerate(checkbuttons):
checkbox = QtGui.QCheckBox(text)
# connects the 'stateChanged()' signal with the 'checkbox_state_changed()' slot
checkbox.stateChanged.connect(partial(self.checkbox_state_changed, idx))
vlayout.addWidget(checkbox) # adds the checkbox to the layout
btn = QPushButton("&Show Info ({})".format(TMP_FILE), self)
btn.clicked.connect(self.to_computer)
btn.clicked.connect(self.to_editor)
vlayout.addWidget(btn)
vlayout.addStretch()
self.setLayout(vlayout) # sets the window layout
def checkbox_state_changed(self, idx, state):
self.checks[idx] = state == Qt.Checked
def to_computer(self, text):
f = open(TMP_FILE, 'w') # write mode clears any previous content from the file if it exists
if self.checks[0]:
print("Saving: inxi to file")
f.write(HEADER.format("Inxi -Fxzc0", "Listing computer information"))
try:
f.write(str(inxi('-Fxxxzc0')))
except:
" 'inxi' not found, install it to get this info"
f.write('\n')
if self.checks[1]:
print("Getting info about installed graphical driver")
f.write(HEADER.format("Installed drivers", "Shows which graphic driver is installed"))
try:
f.write(str(mhwd('-li')))
except:
print(" 'mhwd' not found, this is not Manjaro?")
f.write('\n')
if self.checks[2]:
print("Getting list of all drivers supported on detected gpu's")
f.write(HEADER.format("Available drivers", "list of all drivers supported on detected gpu's"))
try:
f.write(str(mhwd('-l')))
except:
print(" 'mhwd' not found, this is not Manjaro?")
# f.write('\n')
if self.checks[3]:
print('hwinfo -graphic card')
# os.system('hwinfo --gfxcard')
f.write(HEADER.format("hwinfo --gfxcard", "Show Graphic Card info"))
try:
f.write(str(hwinfo('--gfxcard')))
except:
print('hwinfo graphic card info error')
f.write('hwinfo graphic card info error')
f.write('\n')
if self.checks[4]:
print('memory info')
# os.system('free -h')
f.write(HEADER.format("Memory Info", "Info about Memory and Swap"))
try:
f.write(str(free(' -h')))
except:
print('memory info error')
f.write('memory info error')
f.write('\n')
if self.checks[5]:
print('disk info')
# os.system('lsblk')
f.write(HEADER.format("Disk Info", "Disks and Partitions"))
try:
f.write(str(lsblk()))
except:
print('lsblk error')
f.write('lsblk error')
f.write('\n')
if
|
self.checks[6]:
print('free disk space')
# os.system('df')
f.write(HEADER.format("Free Disk Space", "Free space per pertition"))
try:
f.write(str(df()))
except:
print('free disk space error')
f.write('free disk space error')
f.write('\n')
if self.checks[9]:
print("Saving: Xorg.0.log to file")
f.write(HEADER.format("Xorg.0.log", "searching
|
for: failed, error & (WW) keywords"))
try:
f.write(look_in_file('/var/log/Xorg.0.log', ['failed', 'error', '(WW)']))
except FileNotFoundError:
print("/var/log/Xorg.0.log not found!")
f.write("Xorg.0.log not found!")
f.write('\n')
if self.checks[10]:
print("Saving: Xorg.1.log to file")
f.write(HEADER.format("Xorg.1.log", "searching for: failed, error & (WW) keywords"))
try:
f.write(look_in_file('/var/log/Xorg.1.log', ['failed', 'error', '(WW)']))
except FileNotFoundError:
print("/var/log/Xorg.1.log not found!")
f.write("Xorg.1.log not found!")
f.write('\n')
if self.checks[11]:
print("Saving: pacman.log to file")
f.write(HEADER.format("pacman.log", "searching for: pacsave, pacnew, pacorig keywords"))
try:
f.write(look_in_file('/var/log/pacman.log', ['pacsave', 'pacnew', 'pacorig']))
except FileNotFoundError:
print("/var/log/pacman.log not found, this is not Manjaro or Arch based Linux?")
f.write("pacman.log not found! Not Arch based OS?")
f.write('\n')
if self.checks[12]:
print("Saving: journalctl (emergency) to file")
os.system("journalctl -b > /tmp/journalctl.txt")
f.write(HEADER.format("journalctl.txt", "Searching for: Emergency keywords"))
f.write(look_in_file('/tmp/journalctl.txt', ['emergency', 'Emergency', 'EMERGENCY']))
f.write('\n')
if self.checks[13]:
print("Saving: journalctl (alert) to file")
os.system("journalctl -b > /tmp/journalctl.txt")
f.write(HEADER.format("journalctl.txt", "Searching for: Alert keywords"))
f.write(look_in_file('/tmp/journalctl.txt', ['alert', 'Alert', 'ALERT']))
f.write('\n')
if self.checks[14]:
print("Saving: journalctl (critical) to file")
os.system("journalctl -b > /tmp/journalctl.txt")
f.write(HEADER.format("journalctl.txt", "Searching for: Critical keywords"))
f.write(look_in_file('/tmp/journalctl.txt', ['critical', 'Critical', 'CRITICAL']))
f.write('\n')
if self.checks[15]:
print("Saving: journalctl (failed) to file")
os.system("journalctl -b > /tmp/journalctl.txt")
f.write(HEADER.format("journalctl.txt", "Searching for: Failed keywords"))
f.write
|
threeaims/browserstep
|
tests/test_browserstep.py
|
Python
|
mit
| 432
| 0.00463
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_browserstep
----------------------------------
Tests for `browserstep` module.
"""
import sys
import unittest
from browserstep import browserstep
class TestBrowsers
|
tep(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_000_something(self):
pass
if __name__ == '__main__':
sys.exit(unittest.mai
|
n())
|
sahilshekhawat/sympy
|
sympy/core/tests/test_assumptions.py
|
Python
|
bsd-3-clause
| 27,164
| 0.00011
|
from sympy import I, sqrt, log, exp, sin, asin, factorial
from sympy.core import Symbol, S, Rational, Integer, Dummy, Wild, Pow
from sympy.core.facts import InconsistentAssumptions
from sympy import simplify
from sympy.core.compatibility import range
from sympy.utilities.pytest import raises, XFAIL
def test_symbol_unset():
x = Symbol('x', real=True, integer=True)
assert x.is_real is True
assert x.is_integer is True
assert x.is_imaginary is False
assert x.is_noninteger is False
assert x.is_number is False
def test_zero():
z = Integer(0)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is False
assert z.is_positive is False
assert z.is_negative is False
assert z.is_nonpositive is True
assert z.is_nonnegative is True
assert z.is_even is True
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
assert z.is_number is True
def test_one():
z = Integer(1)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is True
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_number is True
assert z.is_composite is False # issue 8807
def test_negativeone():
z = Integer(-1)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is False
assert z.is_positive is False
assert z.is_negative is True
assert z.is_nonpositive is True
assert z.is_nonnegative is False
assert z.is_even is False
assert z.is_odd is True
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
assert z.is_number is True
def test_infinity():
oo = S.Infinity
assert oo.is_commutative is True
assert oo.is_integer is None
assert oo.is_rational is None
assert oo.is_algebraic is None
assert oo.is_transcendental is None
assert oo.is_real is True
assert oo.is_complex is True
assert oo.is_noninteger is None
assert oo.is_irrational is None
assert oo.is_imaginary is False
assert oo.is_positive is True
assert oo.is_negative is False
assert oo.is_nonpositive is False
assert oo.is_nonnegative is True
assert oo.is_even is None
assert oo.is_odd is None
assert oo.is_finite is False
assert oo.is_infinite is True
assert oo.is_comparable is True
assert oo.is_prime is False
assert oo.is_composite is None
assert oo.is_number is True
def test_neg_infinity():
mm = S.NegativeInfinity
assert mm.is_commutative is True
assert mm.is_integer is None
assert mm.is_rational is None
assert mm.is_algebraic is None
assert mm.is_transcendental is None
assert mm.is_real is True
assert mm.is_complex is True
assert mm.is_noninteger is None
assert mm.is_irrational is None
assert mm.is_imaginary is False
assert mm.is_positive is False
assert mm.is_negative is True
assert mm.is_nonpositive is True
assert mm.is_nonnegative is False
assert mm.is_even is None
assert mm.is_odd is None
assert mm.is_finite is False
assert mm.is_infinite is True
assert mm.is_comparable is True
assert mm.is_prime is False
assert mm.is_composite is False
assert mm.is_number is True
def test_nan():
nan = S.NaN
assert nan.is_commutative is True
assert nan.is_integer is None
assert nan.is_rational is None
assert nan.is_algebraic is None
assert nan.is_t
|
ranscendental is None
assert nan.is_real is None
assert nan.is_complex is None
assert nan.is_noninteger is None
assert nan.is_irrational is None
assert nan.is_imaginary is None
assert nan.is_positive is None
assert nan.is_negative is None
|
assert nan.is_nonpositive is None
assert nan.is_nonnegative is None
assert nan.is_even is None
assert nan.is_odd is None
assert nan.is_finite is None
assert nan.is_infinite is None
assert nan.is_comparable is False
assert nan.is_prime is None
assert nan.is_composite is None
assert nan.is_number is True
def test_pos_rational():
r = Rational(3, 4)
assert r.is_commutative is True
assert r.is_integer is False
assert r.is_rational is True
assert r.is_algebraic is True
assert r.is_transcendental is False
assert r.is_real is True
assert r.is_complex is True
assert r.is_noninteger is True
assert r.is_irrational is False
assert r.is_imaginary is False
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonpositive is False
assert r.is_nonnegative is True
assert r.is_even is False
assert r.is_odd is False
assert r.is_finite is True
assert r.is_infinite is False
assert r.is_comparable is True
assert r.is_prime is False
assert r.is_composite is False
r = Rational(1, 4)
assert r.is_nonpositive is False
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonnegative is True
r = Rational(5, 4)
assert r.is_negative is False
assert r.is_positive is True
assert r.is_nonpositive is False
assert r.is_nonnegative is True
r = Rational(5, 3)
assert r.is_nonnegative is True
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonpositive is False
def test_neg_rational():
r = Rational(-3, 4)
assert r.is_positive is False
assert r.is_nonpositive is True
assert r.is_negative is True
assert r.is_nonnegative is False
r = Rational(-1, 4)
assert r.is_nonpositive is True
assert r.is_positive is False
assert r.is_negative is True
assert r.is_nonnegative is False
r = Rational(-5, 4)
assert r.is_negative is True
assert r.is_positive is False
assert r.is_nonpositive is True
assert r.is_nonnegative is False
r = Rational(-5, 3)
assert r.is_nonnegative is False
assert r.is_positive is False
assert r.is_negative is True
assert r.is_nonpositive is True
def test_pi():
z = S.Pi
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is False
assert z.is_transcendental is True
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is True
assert z.is_irrational is True
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
def test_E():
z = S.Exp1
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is False
assert z.is_transcendental is Tru
|
Korred/advent_of_code_2016
|
day_6_part_2.py
|
Python
|
mit
| 5,783
| 0.001383
|
recording = '''jtfxgqec
zxoeuddn
anlfufma
dxuuyxkg
ttnewhlw
sjoyeiry
rgfwwdhw
qymxsllk
forftdvy
rzmnmewh
hogawihi
mtsyexba
mrjzqqfk
ypmkexpg
pjuyopgv
rtqquvaj
evubmlrq
bqlrtuce
ndidnbps
vqukosam
mzdyfkcd
rrbwdimb
uhnvxgly
aaimxpcv
acxvinqj
muaeikzy
lhzbosjd
fflqqiit
unfhzfrs
gmwoyvob
cculubmy
zqbugcwa
ijouicwt
bildjjww
ugksmnps
ivawibvu
igzteede
foehssxo
pkeevvlt
xumuixyw
okhhtycj
xhblffye
iqapgjqe
lkhpntum
wuzxgwow
bkkpfguu
bnqctsdi
cwncjrwn
eivhabsi
bwdicgfm
kowiourk
dhbzuztx
gibitfxo
wmrlhenb
wfzmjvwh
zddjirfg
fafhmiwf
ddhvufhg
qdwnlzqp
nhsnngut
uacmfgop
morcixux
sfdxrgqy
tezzvctv
dnnmtkfp
dygdzcib
efurreri
npvpklix
svpbdgyw
mcntltzd
inwkhxlx
sajfgeoi
nwkqrspt
qtzqsksv
mtncajjk
etarsvxr
eyaeeauy
gqnctylg
uerywmma
hjrxhtjb
zdsdyfzp
zhgrrhvd
yvxqyalf
rlgwftff
xczvgpzq
yydydclu
rzltbrro
jforpzau
zskadlfz
dqbqdsgv
bcwjltvc
byfoamgd
cpefdmso
ocuetyke
vlqrfnpp
ggikwydh
eakpyuov
osaguhlz
ylmrfvee
nvdvqpzm
pudbbuhh
bwmqdpyv
proscvgy
cetkcpjw
sbhcqeya
fgnyltmf
qcspgopp
bdhnemmy
tczkhihl
yduxunvr
dtxerncl
xnxeaayt
rvlcbgts
vpavzjqs
oueloufw
mubbhyna
nptmeppg
ojjfbuzz
lusboycs
gurmmorr
kefddaka
cpvpszit
bfvthzpm
owgcvdjo
simxphmv
rxedvjyw
hmeieuxr
vgqhcapz
vwtvbain
aobnhdsx
hkpshsjs
jxgegczu
xbsfxesk
pqhifeaj
triurorr
rnkufaxl
hmrqfoaw
veghzoxa
zbvgbpcm
rqrnbylj
txaawlta
uuksnfel
jqvycrvw
cdttmdpc
wojvbrzp
qvnuinon
gnpguyvh
cgbkpzbu
pdaqhlan
muiykslt
prvzlunm
whhcrchz
cahjhrkl
zifdgfpq
wanlienf
sfrnozvi
mwmykvyh
fbdfzgut
wfrviilb
ucaopfgo
fjhuikma
hdmizjdj
xngpfwvn
rueojtjg
xvtssxtx
vvcgzidf
xtehcxki
xksbfbso
osnzpqmy
isrnjkxh
utleakmz
dthmtbdt
plregxuh
amoeprsy
tmyhzhqd
csxqavbe
jmojlysw
slebxnbl
ldzryqmj
ajejyudk
ynhgnjhw
mdibxxxw
rvtcmesd
jmnwqddq
hppfoplc
nrcbjynz
kcqnjzue
mthvgjxm
ykztdbcv
etqqnhuz
tezkopgq
fwhwkqmz
fozpkzfy
hbbtlcog
hdvjqwyh
xuljsrvz
abskreoo
aedeydgc
dcyigvqf
ntpcvvgk
iiwgzkhl
zofhlqlx
veumtlae
qibdapwq
xpgpwirt
wvnnautq
wfhlgmdg
yqcrvdgx
srdufrbu
vycrvkpx
flwxzkim
enxayqxm
dgpntiaj
qedfutmp
vfdovine
dgrvjfjt
dqxxjahk
hnxpblyp
nnadwbsc
krmqqgwf
efykkzeb
lkrmrwqw
vfzayrwt
chopbnyf
vbydrtln
azmlestl
sqcyddvi
zdc
|
ubjok
afshwptc
sjgpuoch
bnfylydl
rsyxsbzi
psyuvyzx
npngqypd
xejayhdk
aqfmvjfi
tpffksph
uekwkjnj
ljsjimwm
hbgzjlig
ngssshxx
icitlosb
unxryqyt
nzpujfti
lupxnzhe
kxglfnic
ecewosbs
htlqxpiq
clqgnyfd
yyiozvar
mbvjgmyc
srhwhlin
casmlryr
ebuzskkp
iewhdqtr
oyidcobe
avptvltf
mfheqaxl
shqnezrq
xrpkzuvb
soxdjwba
aitmzlds
rpmpozpd
ccgxauky
gsstsjyx
bzeolqal
vfhddmuc
wfbbmqfv
pumxmnhj
qumdxkns
xymraott
uthlccig
ezpalags
giftxymr
ujjacleo
cgwgmktp
istetgdl
azedmaao
bnlfwyoq
orcwhbek
amswhkum
yxupesxu
mlzvqsrg
solkxzby
|
tbaxnjdu
xwbsiquk
hsftntsn
ajraaorz
mwmycrff
ymnbrbpj
uyfscatq
kzkgmbeh
libgpgnr
kxlgthxc
vzjbobyx
isqessab
ehursvof
guwrjnbi
xivkphwn
rurrmdmi
nqijeuzq
jambocej
qrtidktb
sbzvehmq
aikgzrsq
lgydnujf
twafyzry
nxhtklba
xhyaqyqe
xgvdfcrf
wdieppsd
iabrfmdm
doijaavc
oxydttkg
qsqiofwv
titrvjym
mwojqcku
tewiyhjx
jlqbksqd
knycvoks
tmcbnvhv
ekksoxmz
mgvommal
hrosnzeu
fzeymbek
evqxcukn
ilkpvdvl
rclpjbkb
tdpitlei
zvvzuucc
pzdgwnfz
mralxxlz
wywkawzh
hmazaakd
llltvbex
ihsmefpz
rzzgkjyz
srjqpeoq
jrczcdna
uuyskwop
yeuiaepa
vzppcwnn
oqhxixdo
xkwpfsij
cmsoiogl
ngbmaeue
lmqttyrj
yhgjxfmx
lwfgjnyp
ibbkjgra
gaxsotzr
paugisvs
pcqqauqi
pweuwnqs
jcbrscrj
ovtsgcnh
oscsgtqn
hkpwmhwk
pmdgwclk
owmskdhh
qutyussr
atdkvmzl
oqslriwe
wafjwfxp
ipcqlsxv
kzurbnoh
lfhfzwqo
ucybqwrj
tgnblzgm
lhwlniea
tlxymfbu
bcyvlkvt
glpacpjk
rjagzpnu
fyjpvhaq
cjtzwtdu
dkaqawts
pjoovtlv
xsnwqixw
swcftfed
cadigksp
fnsmxccx
cbxmdxvb
hpyqnpjq
jzpvphmo
kdkpubul
kiajwwta
uyeuctbe
yetyzqxw
fgeemnbl
brprbvgj
xszwwlea
ygunyguo
jwplrcbq
fejndxnx
oxsmkcqm
ldwkbpsk
cmzuxrst
jaoadiiu
oxcpkgbc
nyulhuci
bdwfqtkv
ehxvnzyd
cizuemsb
lbqyqduk
kqweswcd
tqnicuzh
utyaiaeu
osjdgvtj
qmrxcaoa
qiltxgvv
qklfgyss
lpjebmuo
bvebkous
yifrmeoa
jzgntlep
wadcknde
kaikclag
tucuhehr
bvwhuwzn
uvlecxgy
rzyxjhmo
dyyfwjgv
vocjkohi
ylyflktq
raltxpqg
eitypruw
pfbmopgm
qerushjt
xykophcv
amjhrlhi
uqkjhdhn
kkohprfw
hvsmtnfd
uxgiqmqc
npxwplcj
ltchgces
exiyyief
ysmvbqso
zpyvuhqz
lkvwronk
vxilskkl
cxfypwcd
jhrczkmf
rdedtejq
gmxcrlzi
jumwfmnn
gkynzdtd
dfdkxggc
yldclxhz
fsxvbwyj
ioiupzio
lxyqvncv
rsgsviny
osgcimej
tecqrgkq
tozohtwt
kmlowfrf
hhpiukqe
xlxlkjwf
ntvtoexx
zzvsvdow
yluidajg
vumkynvp
vaxipwwg
pqymmoif
sgjzogut
jppwszzn
gvvaibqu
lwjotuil
srflotab
ibnblmjm
kvcsdivb
wqrpzmvr
gcmqdezs
vrizdyfo
vtqnsjbf
jwocjmvb
fjkiiowl
ctjhmmrq
pcckqfki
wqolxgfg
gbsdyrbc
giqmfqwb
fodfpvyl
nxdzwvzz
hpnatltw
adjjyhjd
aoguhvmv
yyeanoir
baojaygs
ovkebbjb
pmykvfex
zeooykoa
uuozuxjb
kxxvbhbr
jxbchjlr
qhiwdonk
dnvfwwfh
kjfrlslh
wionbrdf
qgkjarob
kwplsxso
txgelygh
vlmziqwf
wbetqqkp
qfkocear
wrvonhyr
sbiqrcri
lnwzitce
bctyrwph
kallfwzc
zfqwanet
bevnljjr
kwqsktan
gjviqwlu
zflsnpig
wzaufqvr
uvxhutav
diejbica
ojciaexn
zyjoxrwi
djkodeiz
gsinkcqk
jkonssuq
eychyabp
fkcogwnr
kkioyrnn
inqxlztu
cqnbxxks
ipwmpdmm
moozfajm
irjaimrw
ojihmanb
hzoszxzc
ajjvxqqi
ohkfkijd
nlsahrpv
zizxtmxa
gjtnrurd
pyqghfuj
fltnnyfe
goxagvfp
nplhpkiy
dlwgyvby
fzrfhcgh
zaiuostp
jdjojfkw
thksqbjh
qopcwnht
ewkljwho
qguaeaac
wxzzxgcc
nlnuuhdu
ihtzrqay
nmtdbkhp
yasxhulm
drzjobfy
qpgcjdxn
aegbxmjb
bbuxsffr
zevjcgzn
pgbqezxk
qdlepjko
zbtzvicm
ssjdcggg
ugrtxalo
tsbvnppt
rboleppu
gywfqiwz
skgzeqhu
hzuggbcf
dkegaxap
zijcjrkm
jtfkeoog
fyvtrvig
gophbeoj
ieatnihe
vlaauxgz
mxnheqkz
mftwybny
ebawojuj
dyrvecbs
lrrcwang
qswijdeu
wkuszdax
ecaokzfc
pmbznspx
tjqrztdv
mwdxruge
whutfdqy
zpfwqvox
fkqapoid
bodleqbn
kpxiuodk
johmsncc
enhamlol
yhtydoss'''.split("\n")
from collections import Counter
# get length of message
m_len = len(recording[0])
occurence_list = [[] for i in range(m_len)]
code = ""
for line in recording:
for e, i in enumerate(line):
occurence_list[e].append(i)
for entry in occurence_list:
mc = Counter(entry).most_common()[-1][0] # <--- only this was changed
code += mc
print("Code: ", code)
|
google/jax
|
jax/interpreters/sharded_jit.py
|
Python
|
apache-2.0
| 22,527
| 0.006659
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Callable, Iterable, Optional, Tuple, Union
from absl import logging
import numpy as np
from jax import core
from jax.interpreters import ad
from jax.interpreters import partial_eval as pe
# TODO(skye): separate pmap into it's own module?
from jax.interpreters import mlir
from jax.interpreters import pxla
from jax.interpreters import xla
from jax import linear_util as lu
from jax._src import dispatch
fr
|
om jax._src.lib import xla_bridge as xb
from jax._src.lib import xla_client as xc
from jax._src.lib.mlir import ir
from jax._src.lib.mlir.dialects import func as func_dialect
from jax._src.api_util import (argnums_partial, flatten_axes, flatten_fun,
_ensure_index_tuple)
import jax._sr
|
c.util as util
from jax.tree_util import tree_flatten, tree_unflatten
from jax._src.util import (new_name_stack, wrap_name, wraps, safe_map,
safe_zip, HashableFunction)
from jax._src.config import config
xops = xc._xla.ops
def _map(f, *xs):
return tuple(map(f, *xs))
class ResultToPopulate: pass
result_to_populate = ResultToPopulate()
def _avals_to_results_handler(nrep, npart, partitions, out_avals):
handlers = [_aval_to_result_handler(npart, parts, out_aval)
for parts, out_aval in safe_zip(partitions, out_avals)]
def handler(out_bufs):
return [h(bufs) for h, bufs in zip(handlers, out_bufs)]
return handler
def _aval_to_result_handler(npart, parts, aval):
if aval is not core.abstract_unit:
spec = pxla.partitioned_sharding_spec(npart, parts, aval)
indices = pxla.spec_to_indices(aval.shape, spec)
else:
spec = indices = None
return pxla.local_aval_to_result_handler(aval, spec, indices)
@lu.cache
def _sharded_callable(
fun: lu.WrappedFun, nparts: Optional[int],
in_parts: Tuple[pxla.PartitionsOrReplicated, ...],
out_parts_thunk: Callable[[], Tuple[pxla.PartitionsOrReplicated, ...]],
local_in_parts: Optional[Tuple[pxla.PartitionsOrReplicated, ...]],
local_out_parts_thunk: Callable[[], Optional[Tuple[pxla.PartitionsOrReplicated, ...]]],
local_nparts: Optional[int], name: str, *abstract_args):
nrep = 1
if local_in_parts is None:
local_in_parts = in_parts
global_abstract_args = [pxla.get_global_aval(arg, parts, lparts)
for arg, parts, lparts
in safe_zip(abstract_args, in_parts, local_in_parts)]
if logging.vlog_is_on(2):
logging.vlog(2, "abstract_args: %s", abstract_args)
logging.vlog(2, "global_abstract_args: %s", global_abstract_args)
logging.vlog(2, "in_parts: %s", in_parts)
logging.vlog(2, "local_in_parts: %s", local_in_parts)
jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_final(fun, global_abstract_args)
platform = xb.get_backend().platform
if platform not in ["tpu", "gpu"]:
# TODO(skye): fall back to regular jit?
raise ValueError(f"sharded_jit not supported for {platform}")
nparts = pxla.reconcile_num_partitions(jaxpr, nparts)
assert nparts is not None
if nparts > xb.device_count():
raise ValueError(
f"sharded_jit computation requires {nparts} devices, "
f"but only {xb.device_count()} devices are available.")
if xb.local_device_count() < nparts < xb.device_count():
raise NotImplementedError(
f"sharded_jit across multiple hosts must use all available devices. "
f"Got {nparts} out of {xb.device_count()} requested devices "
f"(local device count: {xb.local_device_count()})")
if local_nparts is None:
if nparts > xb.local_device_count():
raise ValueError(
"Specify 'local_nparts' when using cross-process sharded_jit "
"and all inputs and outputs are replicated.")
else:
local_nparts = nparts
if local_nparts > xb.local_device_count():
raise ValueError(
f"sharded_jit computation requires {local_nparts} local devices, "
f"but only {xb.local_device_count()} local devices are available.")
if logging.vlog_is_on(2):
logging.vlog(2, "nparts: %d local_nparts: %d", nparts, local_nparts)
out_parts = out_parts_thunk()
local_out_parts = local_out_parts_thunk()
if local_out_parts is None:
local_out_parts = out_parts
if logging.vlog_is_on(2):
logging.vlog(2, "out_parts: %s", out_parts)
logging.vlog(2, "local_out_parts: %s", local_out_parts)
local_out_avals = [pxla.get_local_aval(out, parts, lparts)
for out, parts, lparts
in safe_zip(global_out_avals, out_parts, local_out_parts)]
log_priority = logging.WARNING if config.jax_log_compiles else logging.DEBUG
logging.log(log_priority,
"Compiling %s for %d devices with args %s.",
fun.__name__, nparts, global_abstract_args)
c = xc.XlaBuilder("spjit_{}".format(fun.__name__))
xla_consts = _map(partial(xla.pyval_to_ir_constant, c), consts)
xla_args = _xla_sharded_args(c, global_abstract_args, in_parts)
axis_env = xla.AxisEnv(nrep, (), ())
ctx = xla.TranslationContext(
c, platform, axis_env, new_name_stack(wrap_name(name, "sharded_jit")))
out_nodes = xla.jaxpr_subcomp(ctx, jaxpr, xla_consts, *xla_args)
out_tuple = xla.with_sharding(c, out_parts, xops.Tuple, c, out_nodes)
built = c.Build(out_tuple)
if nparts <= xb.local_device_count():
devices = xb.local_devices()[:nparts]
else:
assert nparts == xb.device_count()
devices = xb.devices()
device_assignment = np.array([[d for d in devices]])
device_assignment = np.reshape(device_assignment, (-1, nparts))
# device_assignment = None # TODO(skye): replace with default device assignment?
compiled = dispatch.backend_compile(
xb.get_backend(), built,
xb.get_compile_options(nrep, nparts, device_assignment))
input_specs = [
pxla.partitioned_sharding_spec(local_nparts, parts, aval)
for parts, aval in zip(local_in_parts, abstract_args)]
input_indices = [pxla.spec_to_indices(aval.shape, spec)
if spec is not None else None
for aval, spec in zip(abstract_args, input_specs)]
handle_args = partial(pxla.shard_args, compiled.local_devices(),
input_indices)
handle_outs = _avals_to_results_handler(nrep, local_nparts, # type: ignore
local_out_parts, local_out_avals)
return partial(_execute_spatially_partitioned, compiled, handle_args,
handle_outs)
def _sharded_jit_translation_rule(ctx, avals_in, avals_out, *in_nodes,
in_parts, out_parts_thunk, nparts,
name, call_jaxpr, local_in_parts,
local_out_parts_thunk, local_nparts):
subc = xc.XlaBuilder(f"sharded_jit_{name}")
# We assume any extra leading in_nodes are constants and replicate them.
num_extra_nodes = len(in_nodes) - len(in_parts)
assert num_extra_nodes >= 0
in_parts = (None,) * num_extra_nodes + in_parts
args = []
for i, (n, sharding) in enumerate(safe_zip(in_nodes, in_parts)):
# We use xla.set_sharding instead of xla.with_sharding because inlined calls
# shouldn't have shardings set directly on the inputs or outputs.
arg = xla.parameter(subc, i, ctx.builder.GetShape(n))
args.append(xla.set_sharding(subc, arg, sharding))
sub_ctx = ctx.replace(
builder=subc,
name_stack=new_name_stack(wrap_name(name, "sharded_jit")))
out_nodes = xla.jaxpr_subcomp(sub_ctx, call_jaxpr, (), *args)
out_parts = out_parts_thunk()
assert len(out_par
|
arkanister/django-contact-form-site
|
django_contact/apps.py
|
Python
|
bsd-3-clause
| 255
| 0.003922
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class ContactFormConfig(AppConfig):
"""The default AppConfig for admin which does autodiscover
|
y."""
name = 'dja
|
ngo_contact'
verbose_name = _("Contact")
|
soltanmm-google/grpc
|
src/python/grpcio/grpc/beta/_client_adaptations.py
|
Python
|
bsd-3-clause
| 26,840
| 0.000261
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Translates gRPC's client-side API into gRPC's client-side Beta API."""
import grpc
from grpc import _common
from grpc._cython import cygrpc
from grpc.beta import interfaces
from grpc.framework.common import cardinality
from grpc.framework.foundation import future
from grpc.framework.interfaces.face import face
_STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS = {
grpc.StatusCode.CANCELLED: (face.Abortion.Kind.CANCELLED,
face.CancellationError),
grpc.StatusCode.UNKNOWN: (face.Abortion.Kind.REMOTE_FAILURE,
face.RemoteError),
grpc.StatusCode.DEADLINE_EXCEEDED: (face.Abortion.Kind.EXPIRED,
face.ExpirationError),
grpc.StatusCode.UNIMPLEMENTED: (face.Abortion.Kind.LOCAL_FAILURE,
face.LocalError),
}
def _effective_metadata(metadata, metadata_transformer):
non_none_metadata = () if metadata is None else metadata
if metadata_transformer is None:
return non_none_metadata
else:
return metadata_transformer(non_none_metadata)
def _credentials(grpc_call_options):
return None if grpc_call_options is None else grpc_call_options.credentials
def _abortion(rpc_error_call):
code = rpc_error_call.code()
pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
error_kind = face.Abortion.Kind.LOCAL_FAILURE if pair is None else pair[0]
return face.Abortion(error_kind,
rpc_error_call.initial_metadata(),
rpc_error_call.trailing_metadata(), code,
rpc_error_call.details())
def _abortion_error(rpc_error_call):
code = rpc_error_call.code()
pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
exception_class = face.AbortionError if pair is None else pair[1]
return exception_class(rpc_error_call.initial_metadata(),
rpc_error_call.trailing_metadata(), code,
rpc_error_call.details())
class _InvocationProtocolContext(interfaces.GRPCInvocationContext):
def disable_next_request_compression(self):
pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
class _Rendezvous(future.Future, face.Call):
def __init__(self, response_future, response_iterator, call):
self._future = response_future
self._iterator = response_iterator
self._call = call
def cancel(self):
return self._call.cancel()
def cancelled(self):
return self._future.cancelled()
def running(self):
return self._future.running()
def done(self):
return self._future.done()
def result(self, timeout=None):
try:
return self._future.result(timeout=timeout)
except grpc.RpcError as rpc_error_call:
raise _abortion_error(rpc_error_call)
except grpc.FutureTimeoutError:
raise future.TimeoutError()
except grpc.FutureCancelledError:
raise future.CancelledError()
def exception(self, timeout=None):
try:
rpc_error_call = self._future.exception(timeout=timeout)
if rpc_error_call is None:
return None
else:
return _abortion_error(rpc_error_call)
except grpc.FutureTimeoutError:
raise future.TimeoutError()
except grpc.FutureCancelledError:
raise future.CancelledError()
def traceback(self, timeout=None):
try:
return self._future.traceback(timeout=timeout)
except grpc.FutureTime
|
outError:
raise future.TimeoutError()
except grpc.FutureCancelledError:
raise future.CancelledError()
def add_done_callback(self, fn):
self._future.add_done_callback(lambda ignored_callback: fn(self))
def __iter__(self):
return self
def _next(self):
try:
return next(self._iterator)
except grpc.RpcError as rpc_error_call:
|
raise _abortion_error(rpc_error_call)
def __next__(self):
return self._next()
def next(self):
return self._next()
def is_active(self):
return self._call.is_active()
def time_remaining(self):
return self._call.time_remaining()
def add_abortion_callback(self, abortion_callback):
def done_callback():
if self.code() is not grpc.StatusCode.OK:
abortion_callback(_abortion(self._call))
registered = self._call.add_callback(done_callback)
return None if registered else done_callback()
def protocol_context(self):
return _InvocationProtocolContext()
def initial_metadata(self):
return self._call.initial_metadata()
def terminal_metadata(self):
return self._call.terminal_metadata()
def code(self):
return self._call.code()
def details(self):
return self._call.details()
def _blocking_unary_unary(channel, group, method, timeout, with_call,
protocol_options, metadata, metadata_transformer,
request, request_serializer, response_deserializer):
try:
multi_callable = channel.unary_unary(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
if with_call:
response, call = multi_callable.with_call(
request,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
return response, _Rendezvous(None, None, call)
else:
return multi_callable(
request,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
except grpc.RpcError as rpc_error_call:
raise _abortion_error(rpc_error_call)
def _future_unary_unary(channel, group, method, timeout, protocol_options,
metadata, metadata_transformer, request,
request_serializer, response_deserializer):
multi_callable = channel.unary_unary(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_tra
|
dariosena/LearningPython
|
PY-14/conta.py
|
Python
|
gpl-3.0
| 1,698
| 0.00708
|
import datetime
class Historico:
def __init__(self):
self.data_abertura = datetime.datetime.today()
self.transacoes = []
def imprime(self):
print('data abertura: {}'.format(self.data_abertura))
print('transações: ')
for t in self.transacoes:
print('-', t)
class Cliente:
def __init__(self, nome, sobrenome, cpf):
self.nome = nome
self.sobrenome = sobrenome
self.cpf = cpf
class Conta:
def __init__(self, numero, cliente, saldo, limite=1000.0):
print('inicializando uma conta')
self._numero = numero
self._titular = cliente
self._saldo = saldo
self._limite = limite
self._historico = Historico()
def deposita(self, valor):
self._saldo += valor
self._historico.transacoes.append('depósito de {}'.format(valor))
def saca(self, valor):
if (self._saldo < valor):
return False
else:
self._saldo -= valor
self._historico.transacoes.append('saque de {}'.format(valor))
return True
def transfere_para(self, destino, valor):
retirou = self.saca(valor)
if (retirou):
destino.deposita(valor)
self._historico.transacoes.append('transferencia de {} para conta'.format(valor, destino.numero))
return True
else:
return False
def extrato(self):
print('numero: {} \nsaldo: {}'.format(self._numero, self._saldo))
self
|
._historico.transacoes.append('tirou
|
extrato - saldo de {}'.format(self._saldo))
|
vvladych/forecastmgmt
|
src/forecastmgmt/ui/masterdata/organisation_add_mask.py
|
Python
|
unlicense
| 1,939
| 0.017535
|
'''
Created on 03.05.2015
@author: vvladych
'''
from gi.repository import Gtk
from forecastmgmt.model.organisation import Organisation
from masterdata_abstract_window import AbstractAddMask
class OrganisationAddMask(AbstractAddMask):
def __init__(self, main_window, reset_callback):
super(OrganisationAddMask, self).__init__(main_window, reset_callback)
def create_layout(self):
self.set_column_spacing(5)
self.set_row_spacing(3)
placeholder_label = Gtk.Label("")
placeholder_label.set_size_request(1,40)
self.attach(placeholder_label,0,-1,1,1)
row = 0
# Row 0: organisation uuid
self.add_uuid_row("Organisation UUID", row)
row+=1
# Row 1: common name
self.add_common_name_row("Common Name", row)
row+=1
# last row
save_button = Gtk.Button("Save", Gtk.STOCK_SAVE)
save_button.connect("clicked", self.save_current_object)
self.attach(save_button,1,row,1,1)
back_button = Gtk.Button("Back", Gtk.STOCK_GO_BACK)
back_button.connect("clicked", self.parent_callback_func, self.reset_callback)
self.attach(back_button,2,row,1,1)
def fill_mask_from_current_object(self):
if self.current_object!=None:
|
self.uuid_text_entry.set_text(self.current_object.uuid)
self.common_name_text_entry.set_text(self.current_object.common_name)
else:
self.uuid_text_entry.set_text("")
self.common_name_text_entry.set_text("")
def create_object_from_mask(self):
common_name = self.common_name_text_entry.get_te
|
xt()
if common_name is None:
self.show_error_dialog("common name cannot be null")
return
organisation=Organisation(None,common_name)
return organisation
|
nycholas/ask-undrgz
|
src/ask-undrgz/django/contrib/sessions/models.py
|
Python
|
bsd-3-clause
| 2,675
| 0.001495
|
import base64
import cPickle as pickle
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.utils.hashcompat import md5_constructor
class SessionManager(models.Manager):
def encode(self, session_dict):
"""
Returns the given session dictionary pickled and encoded as a string.
"""
pickled = pickle.dumps(session_dict)
pickled_md5 = md5_constructor(pickled + settings.SECRET_KEY).hexdigest()
return base64.encodestring(pickled + pickled_md5)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delete() # Clear sessions with no data.
return s
class Session(models.Model):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
The Django s
|
essions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" hea
|
der.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django website).
"""
session_key = models.CharField(_('session key'), max_length=40,
primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expire date'))
objects = SessionManager()
class Meta:
db_table = 'django_session'
verbose_name = _('session')
verbose_name_plural = _('sessions')
def get_decoded(self):
encoded_data = base64.decodestring(self.session_data)
pickled, tamper_check = encoded_data[:-32], encoded_data[-32:]
if md5_constructor(pickled + settings.SECRET_KEY).hexdigest() != tamper_check:
from django.core.exceptions import SuspiciousOperation
raise SuspiciousOperation("User tampered with session cookie.")
try:
return pickle.loads(pickled)
# Unpickling can cause a variety of exceptions. If something happens,
# just return an empty dictionary (an empty session).
except:
return {}
|
abbot/android-restore-tools
|
extract.py
|
Python
|
mit
| 9,201
| 0.003261
|
#!/usr/bin/env python
import optparse
import os
import sys
import tempfile
import datetime
from xml.etree import ElementTree as etree
from convert import read_messages, read_calls
import yaffs
def read_chunk(fd, size):
s = fd.read(size)
if len(s) > 0 and len(s) != size:
raise IOError("Broken image file")
return s
def read_segment(fd):
chunk_data = read_chunk(fd, yaffs.CHUNK_SIZE)
spare_data = read_chunk(fd, yaffs.SPARE_SIZE)
if len(chunk_data) == 0 and len(spare_data) == 0:
return None, None
elif len(chunk_data) == 0 or len(spare_data) == 0:
raise IOError("Broken image file")
return chunk_data, yaffs.PackedTags2.from_buffer_copy(spare_data)
def extract(filename):
fd = open(filename, "rb")
yaffs_objects = {yaffs.OBJECTID_ROOT: "."}
while True:
chunk_data, tags = read_segment(fd)
if chunk_data is None:
break
if tags.t.byteCount == 0xffff:
header = yaffs.ObjectHeader.from_buffer_copy(chunk_data)
full_path_name = os.path.join(yaffs_objects[header.parentObjectId], header.name)
yaffs_objects[tags.t.objectId] = full_path_name
if header.type == yaffs.FILE:
remaining = header.fileSize
out = open(full_path_name, "wb")
try:
os.fchmod(out.fileno(), header.yst_mode)
except:
pass
while remaining > 0:
chunk_data, tags = read_segment(fd)
if remaining < tags.t.byteCount:
s = chunk_data[:remaining]
else:
s = chunk_data[:tags.t.byteCount]
out.write(s)
remaining -= len(s)
print "wrote", full_path_name
elif header.type == yaffs.SYMLINK:
os.symlink(header.alias, full_path_name)
print "symlink %s -> %s" % (header.alias, full_path_name)
elif header.type == yaffs.DIRECTORY:
try:
os.mkdir(full_path_name, 0777)
print "created directory %s" % full_path_name
except OSError, exc:
if "exists" in str(exc):
pass
else:
print str(exc)
raise
elif header.type == yaffs.HARDLINK:
os.link(yaffs_objects[header.equivalentObjectId], full_path_name)
print "hardlink %s -> %s" % (yaffs_objects[header.equivalentObjectId], full_path_name)
else:
print "skipping unknown object"
def get_files(filename, filenames, callback=None):
fd = open(filename, "rb")
yaffs_objects = {yaffs.OBJECTID_ROOT: "."}
rc = {}
while True:
chunk_data, tags = read_segment(fd)
if chunk_data is None:
break
if tags.t.byteCount == 0xffff:
header = yaffs.ObjectHeader.from_buffer_copy(chunk_data)
full_path_name = os.path.join(yaffs_objects[header.parentObjectId], header.name)
yaffs_objects[tags.t.objectId] = full_path_name
if callback is not None:
callback(header)
if header.type == yaffs.FILE:
remaining = header.fileSize
contents = ""
if header.name in filenames:
while remaining > 0:
chunk_data, tags = read_segment(fd)
if remaining < tags.t.byteCount:
s = chunk_data[:remaining]
else:
s = chunk_data[:tags.t.byteCount]
contents += s
remaining -= len(s)
rc[full_path_name] = contents
else:
blocks = (remaining + yaffs.CHUNK_SIZE - 1) / yaffs.CHUNK_SIZE
fd.seek(blocks*(yaffs.CHUNK_SIZE+yaffs.SPARE_SIZE), 1)
return rc
def dotty(header):
if header.name.endswith(".db"):
sys.stdout.write("+")
else:
sys.stdout.write(".")
sys.stdout.flush()
def get_save_filename(filename=""):
while True:
if filename == "":
new_filename
|
= raw_input("Save as: ")
else:
new_filename = raw_input("Save as (empty=%s): " % filename)
if new_filename == "" and filename == "":
continue
if new_filename != "":
filename = new_filename
try:
os.stat(filename)
ans = raw_input("Warning: %s already exists, overwrite (y/n)? " % filename)
if ans.lower().startswith("y"):
break
|
except OSError:
break
return filename
def save(filename, content):
open(get_save_filename(filename), "wb").write(content)
def extract_sms(content):
fd_n, name = tempfile.mkstemp()
fd = os.fdopen(fd_n, "wb")
try:
fd.write(content)
fd.close()
messages = read_messages(name)
print "Read %s messages" % str(messages.attrib["count"])
newest = datetime.datetime.fromtimestamp(int(messages.getchildren()[0].attrib["date"])/1000)
output = newest.strftime("sms-%Y%m%d%H%M%S.xml")
etree.ElementTree(messages).write(get_save_filename(output),
encoding="utf-8",
xml_declaration=True)
except Exception, exc:
print "Failed to extract messages: %s" % exc
print repr(exc)
finally:
try:
os.unlink(name)
except:
print "Warning: failed to remove temporary file %s" % name
def extract_calls(content):
fd, name = tempfile.mkstemp()
fd = os.fdopen(fd, "wb")
try:
fd.write(content)
fd.close()
calls = read_calls(name)
print "Read %s calls" % str(calls.attrib["count"])
newest = datetime.datetime.fromtimestamp(int(calls.getchildren()[0].attrib["date"])/1000)
output = newest.strftime("calls-%Y%m%d%H%M%S.xml")
etree.ElementTree(calls).write(get_save_filename(output),
encoding="utf-8",
xml_declaration=True)
except Exception, exc:
print "Failed to extract calls: %s" % exc
finally:
try:
os.unlink(name)
except:
print "Warning: failed to remove temporary file %s" % name
def interactive(filename):
print "Scanning and reading image (this may take some time)"
r = get_files(filename, ["mmssms.db", "contacts2.db"], dotty)
print ""
while True:
print
print "Found files:"
names = r.keys()
for i, n in enumerate(names):
print "[%d] %s" % (i+1, n)
n = int(raw_input("Enter file number to extract (0 to quit): ")) - 1
if n < 0 or n >= len(names):
break
name = names[n]
print "File %s selected." % name
print "Possible actions:"
print "[f] save file"
print "[s] extract SMS messages from file"
print "[c] extract Call logs from file"
t = raw_input("Please choose action: ")
t = t.lower()
if t.startswith("f"):
save(os.path.basename(names[n]), r[names[n]])
elif t.startswith("s"):
extract_sms(r[names[n]])
elif t.startswith("c"):
extract_calls(r[names[n]])
def main():
parser = optparse.OptionParser(usage="%prog [options...] data.img")
parser.add_option("-x", "--extract", action="store_true",
help="Don't search for required databases, just extract the filesystem")
parser.add_option("-s", "--sms", action="store_true",
help="Input file is mmssms.db, just convert SMS messages to XML format.")
parser.add_option("-c", "--calls", action="store_true",
help="Input file is contacts2.db, just convert Call Logs
|
daodaoliang/bokeh
|
bokeh/server/websocket/manager.py
|
Python
|
bsd-3-clause
| 3,294
| 0.00425
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
#
|
The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
fr
|
om __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
import atexit
import uuid
from ..utils.multi_dict import MultiDict
class WebSocketManager(object):
def __init__(self):
self.sockets = {}
self.topic_clientid_map = MultiDict()
self.clientid_topic_map = MultiDict()
self.auth_functions = {}
atexit.register(self._atexit)
def _atexit(self):
if len(self.sockets) != 0:
log.warning("Not all websocket connections were closed properly")
def remove_clientid(self, clientid):
topics = self.clientid_topic_map.get(clientid, [])
for topic in topics:
self.topic_clientid_map.remove_val(topic, clientid)
def remove_topic(self, topic):
clientids = self.topic_clientid_map.get(topic)
for clientid in clientids:
self.clientid_topic_map.remove_val(clientid, topic)
def subscribe_socket(self, socket, topic, clientid=None):
if clientid is None :
clientid = str(uuid.uuid4())
self.subscribe(clientid, topic)
self.add_socket(socket, clientid)
def can_subscribe(self, clientid, topic):
#auth goes here
return True
def register_auth(self, authtype, func):
self.auth_functions[authtype] = func
def auth(self, authtoken, topic):
#authtoken - some string, whatever you want it to be
#topic - string topic, of syntax type:value.
#topic type maps to auth function
authtype, topic = topic.split(":", 1)
if self.auth_functions.get(authtype):
return self.auth_functions[authtype](authtoken, topic)
else:
return True
def subscribe(self, clientid, topic):
if self.can_subscribe(clientid, topic):
log.debug("subscribe %s, %s", topic, clientid)
self.topic_clientid_map.add(topic, clientid)
self.clientid_topic_map.add(clientid, topic)
def add_socket(self, socket, clientid):
log.debug("add socket %s", clientid)
self.sockets[clientid] = socket
def remove_socket(self, clientid):
log.debug("remove socket %s", clientid)
self.sockets.pop(clientid, None)
def send(self, topic, msg, exclude=None):
if exclude is None:
exclude = set()
log.debug("sending to %s", self.topic_clientid_map.get(topic, []))
for clientid in tuple(self.topic_clientid_map.get(topic, [])):
socket = self.sockets.get(clientid, None)
if not socket:
continue
if clientid in exclude:
continue
try:
socket.write_message(topic + ":" + msg)
except Exception as e: #what exception is this?if a client disconnects
log.exception(e)
self.remove_socket(clientid)
self.remove_clientid(clientid)
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/sockeye/sockeye/train.py
|
Python
|
apache-2.0
| 46,961
| 0.004152
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Simple Training CLI.
"""
import argparse
import os
import shutil
import sys
import tempfile
from contextlib import ExitStack
from typing import Any, cast, Optional, Dict, List, Tuple
import mxnet as mx
from . import arguments
from . import checkpoint_decoder
from . import constants as C
from . import convolution
from . import coverage
from . import data_io
from . import decoder
from . import encoder
from . import initializer
from . import loss
from . import lr_scheduler
from . import model
from . import rnn
from . import rnn_attention
from . import training
from . import transformer
from . import utils
from . import vocab
from .config import Config
from .log import setup_main_logger
from .optimizers import OptimizerConfig
from .utils import check_condition
# Temporary logger, the real one (logging to a file probably, will be created in the main function)
logger = setup_main_logger(__name__, file_logging=False, console=True)
def none_if_negative(val):
return None if val < 0 else val
def _list_to_tuple(v):
"""Convert v to a tuple if it is a list."""
if isinstance(v, list):
return tuple(v)
return v
def _dict_difference(dict1: Dict, dict2: Dict):
diffs = set()
for k, v in dict1.items():
# Note: A list and a tuple with the same values is considered equal
# (this is due to json deserializing former tuples as list).
if k not in dict2 or _list_to_tuple(dict2[k]) != _list_to_tuple(v):
diffs.add(k)
return diffs
def check_arg_compatibility(args: argparse.Namespace):
"""
Check if some arguments are incompatible with each other.
:param args: Arguments as returned by argparse.
"""
check_condition(args.optimized_metric == C.BLEU or args.optimized_metric in args.metrics,
"Must optimize either BLEU or one of tracked metrics (--metrics)")
if args.encoder == C.TRANSFORMER_TYPE:
check_condition(args.transformer_model_size[0] == args.num_embed[0],
"Source embedding size must match transformer model size: %s vs. %s"
% (args.transformer_model_size, args.num_embed[0]))
total_source_factor_size = sum(args.source_factors_num_embed)
if total_source_factor_size > 0:
adjusted_transformer_encoder_model_size = args.num_embed[0] + total_source_factor_size
check_condition(adjusted_transformer_encoder_model_size % 2 == 0 and
adjusted_transformer_encoder_model_size % args.transformer_attention_heads[0] == 0,
"Sum of source factor sizes, i.e. num-embed plus source-factors-num-embed, (%d) "
"has to be even and a multiple of encoder attention heads (%d)" % (
adjusted_transformer_encoder_model_size, args.transformer_attention_heads[0]))
if args.decoder == C.TRANSFORMER_TYPE:
check_condition(args.transformer_model_size[1] == args.num_embed[1],
"Target embedding size must match transformer model size: %s vs. %s"
% (args.transformer_model_size, args.num_embed[1]))
if args.lhuc is not None:
# Actually this check is a bit too strict
check_condition(args.encoder != C.CONVOLUTION_TYPE or args.decoder != C.CONVOLUTION_TYPE,
"LHUC is not supported for convolutional models yet.")
check_condition(args.decoder != C.TRANSFORMER_TYPE or C.LHUC_STATE_INIT not in args.lhuc,
"The %s options only applies to RNN models" % C.LHUC_STATE_INIT)
def check_resume(args: argparse.Namespace, output_folder: str) -> bool:
"""
Check if we should resume a broken training run.
:param args: Arguments as returned by argparse.
:param output_folder: Main output folder for the model.
:return: Flag signaling if we are resuming training and the directory with
the training status.
"""
resume_training = False
training_state_dir = os.path.join(output_folder, C.TRAINING_STATE_DIRNAME)
if os.path.exists(output_folder):
if args.overwrite_output:
logger.info("Removing existing output folder %s.", output_folder)
shutil.rmtree(output_folder)
os.makedirs(output_folder)
elif os.path.exists(training_state_dir):
old_args = vars(arguments.load_args(os.path.join(output_folder, C.ARGS_STATE_NAME)))
arg_diffs = _dict_difference(vars(args), old_args) | _dict_difference(old_args, vars(args))
# Remove args that may differ without affecting the training.
arg_diffs -= set(C.ARGS_MAY_DIFFER)
# allow different device-ids provided their total count is the same
if 'device_ids' in arg_diffs and len(old_args['device_ids']) == len(vars(args)['device_ids']):
arg_diffs.discard('device_ids')
if not arg_diffs:
resume_training = True
else:
# We do not have the logger yet
logger.error("Mismatch in arguments for training continuation.")
logger.error("Differing arguments: %s.", ", ".join(arg_diffs))
sys.exit(1)
elif os.path.exists(os.path.join(output_folder, C.PARAMS_BEST_NAME)):
logger.error("Refusing to overwrite model folder %s as it seems to contain a trained model.", output_folder)
sys.exit(1)
else:
logger.info("The output folder %s already exists, but no training state or parameter file was found. "
"Will start training from scratch.", output_folder)
else:
os.makedirs(output_folder)
return resume_training
def determine_context(args:
|
argparse.Namespace, exit_stack: ExitStack) -> List[mx.Context]:
"""
Determine the context we should run on (CPU or GPU).
:param args: Arguments as returned by argparse.
:param exit_stack: An ExitStack
|
from contextlib.
:return: A list with the context(s) to run on.
"""
if args.use_cpu:
logger.info("Training Device: CPU")
context = [mx.cpu()]
else:
num_gpus = utils.get_num_gpus()
check_condition(num_gpus >= 1,
"No GPUs found, consider running on the CPU with --use-cpu "
"(note: check depends on nvidia-smi and this could also mean that the nvidia-smi "
"binary isn't on the path).")
if args.disable_device_locking:
context = utils.expand_requested_device_ids(args.device_ids)
else:
context = exit_stack.enter_context(utils.acquire_gpus(args.device_ids, lock_dir=args.lock_dir))
if args.batch_type == C.BATCH_TYPE_SENTENCE:
check_condition(args.batch_size % len(context) == 0, "When using multiple devices the batch size must be "
"divisible by the number of devices. Choose a batch "
"size that is a multiple of %d." % len(context))
logger.info("Training Device(s): GPU %s", context)
context = [mx.gpu(gpu_id) for gpu_id in context]
return context
def create_checkpoint_decoder(args: argparse.Namespace,
exit_stack: ExitStack,
train_context: List[mx.Context]) -> Optional[checkpoint_decoder.CheckpointDecoder]:
"""
Returns a checkpoint decoder or None.
:param args: Arguments as returned by argparse.
|
YannickJadoul/Parselmouth
|
pybind11/tests/test_pickling.py
|
Python
|
gpl-3.0
| 1,191
| 0.00084
|
# -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import pickling as m
try:
import cPickle as pickle # Use cPickle on Pyth
|
on 2.7
except ImportError:
import pickle
@pytest.mark.parametrize("cls_name", ["Pickleable", "PickleableNew"])
def test_roundtrip(cls_name):
cls = getattr(m, cls_name)
p = cls("test_value")
p.setExtra1(15)
p.setExtra2(48)
data = pickle.dumps(p, 2) # Must use pickle protocol >= 2
|
p2 = pickle.loads(data)
assert p2.value() == p.value()
assert p2.extra1() == p.extra1()
assert p2.extra2() == p.extra2()
@pytest.mark.xfail("env.PYPY")
@pytest.mark.parametrize("cls_name", ["PickleableWithDict", "PickleableWithDictNew"])
def test_roundtrip_with_dict(cls_name):
cls = getattr(m, cls_name)
p = cls("test_value")
p.extra = 15
p.dynamic = "Attribute"
data = pickle.dumps(p, pickle.HIGHEST_PROTOCOL)
p2 = pickle.loads(data)
assert p2.value == p.value
assert p2.extra == p.extra
assert p2.dynamic == p.dynamic
def test_enum_pickle():
from pybind11_tests import enums as e
data = pickle.dumps(e.EOne, 2)
assert e.EOne == pickle.loads(data)
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/softwareproperties/gtk/dialogs.py
|
Python
|
gpl-3.0
| 1,305
| 0.009962
|
# dialogs - provide common dialogs
#
# Copyright (c) 2006 FSF Europe
#
# Authors:
# Sebastian Heinlein <glatzor@ubuntu.com>
#
|
Michael Vogt <mvo@canonical.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABI
|
LITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
from gi.repository import Gtk
def show_error_dialog(parent, primary, secondary):
p = "<span weight=\"bold\" size=\"larger\">%s</span>" % primary
dialog = Gtk.MessageDialog(parent,Gtk.DialogFlags.MODAL,
Gtk.MessageType.ERROR,Gtk.ButtonsType.CLOSE,"")
dialog.set_markup(p);
dialog.format_secondary_text(secondary);
dialog.run()
dialog.hide()
|
openstack/cinder
|
cinder/cmd/volume_usage_audit.py
|
Python
|
apache-2.0
| 10,093
| 0.000694
|
#!/usr/bin/env python
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cron script to generate usage notifications for volumes existing during
the audit period.
Together with the notifications generated by volumes
create/delete/resize, over that time period, this allows an external
system consuming usage notification feeds to calculate volume usage
for each tenant.
Time periods are specified as 'hour', 'month', 'day' or 'year'
- `hour` - previous hour. If run at 9:07am, will generate usage for
8-9am.
- `month` - previous month. If the script is run April 1, it will
generate usages for March 1 through March 31.
- `day` - previous day. if run on July 4th, it ge
|
nerates usages for
July 3rd.
|
- `year` - previous year. If run on Jan 1, it generates usages for
Jan 1 through Dec 31 of the previous year.
"""
import datetime
import sys
import iso8601
from oslo_config import cfg
from oslo_log import log as logging
from cinder import i18n # noqa
i18n.enable_lazy()
from cinder import context
from cinder.i18n import _
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder import version
import cinder.volume.volume_utils
CONF = cfg.CONF
script_opts = [
cfg.StrOpt('start_time',
help="If this option is specified then the start time "
"specified is used instead of the start time of the "
"last completed audit period."),
cfg.StrOpt('end_time',
help="If this option is specified then the end time "
"specified is used instead of the end time of the "
"last completed audit period."),
cfg.BoolOpt('send_actions',
default=False,
help="Send the volume and snapshot create and delete "
"notifications generated in the specified period."),
]
CONF.register_cli_opts(script_opts)
def _time_error(LOG, begin, end):
if CONF.start_time:
begin = datetime.datetime.strptime(CONF.start_time,
"%Y-%m-%d %H:%M:%S")
if CONF.end_time:
end = datetime.datetime.strptime(CONF.end_time,
"%Y-%m-%d %H:%M:%S")
begin = begin.replace(tzinfo=iso8601.UTC)
end = end.replace(tzinfo=iso8601.UTC)
if end <= begin:
msg = _("The end time (%(end)s) must be after the start "
"time (%(start)s).") % {'start': begin,
'end': end}
LOG.error(msg)
sys.exit(-1)
return begin, end
def _vol_notify_usage(LOG, volume_ref, extra_info, admin_context):
"""volume_ref notify usage"""
try:
LOG.debug("Send exists notification for <volume_id: "
"%(volume_id)s> <project_id %(project_id)s> "
"<%(extra_info)s>",
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': extra_info})
cinder.volume.volume_utils.notify_about_volume_usage(
admin_context, volume_ref, 'exists', extra_usage_info=extra_info)
except Exception as exc_msg:
LOG.error("Exists volume notification failed: %s",
exc_msg, resource=volume_ref)
def _snap_notify_usage(LOG, snapshot_ref, extra_info, admin_context):
"""snapshot_ref notify usage"""
try:
LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': extra_info})
cinder.volume.volume_utils.notify_about_snapshot_usage(
admin_context, snapshot_ref, 'exists', extra_info)
except Exception as exc_msg:
LOG.error("Exists snapshot notification failed: %s",
exc_msg, resource=snapshot_ref)
def _backup_notify_usage(LOG, backup_ref, extra_info, admin_context):
"""backup_ref notify usage"""
try:
cinder.volume.volume_utils.notify_about_backup_usage(
admin_context, backup_ref, 'exists', extra_info)
LOG.debug("Sent notification for <backup_id: %(backup_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'backup_id': backup_ref.id,
'project_id': backup_ref.project_id,
'extra_info': extra_info})
except Exception as exc_msg:
LOG.error("Exists backups notification failed: %s", exc_msg)
def _create_action(obj_ref, admin_context, LOG, notify_about_usage,
type_id_str, type_name):
try:
local_extra_info = {
'audit_period_beginning': str(obj_ref.created_at),
'audit_period_ending': str(obj_ref.created_at),
}
LOG.debug("Send create notification for <%(type_id_str)s: %(_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'type_id_str': type_id_str,
'_id': obj_ref.id,
'project_id': obj_ref.project_id,
'extra_info': local_extra_info})
notify_about_usage(admin_context, obj_ref,
'create.start', extra_usage_info=local_extra_info)
notify_about_usage(admin_context, obj_ref,
'create.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error("Create %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
def _delete_action(obj_ref, admin_context, LOG, notify_about_usage,
type_id_str, type_name):
try:
local_extra_info = {
'audit_period_beginning': str(obj_ref.deleted_at),
'audit_period_ending': str(obj_ref.deleted_at),
}
LOG.debug("Send delete notification for <%(type_id_str)s: %(_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'type_id_str': type_id_str,
'_id': obj_ref.id,
'project_id': obj_ref.project_id,
'extra_info': local_extra_info})
notify_about_usage(admin_context, obj_ref,
'delete.start', extra_usage_info=local_extra_info)
notify_about_usage(admin_context, obj_ref,
'delete.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error("Delete %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
def _obj_ref_action(_notify_usage, LOG, obj_ref, extra_info, admin_context,
begin, end, notify_about_usage, type_id_str, type_name):
_notify_usage(LOG, obj_ref, extra_info, admin_context)
if CONF.send_actions:
if begin < obj_ref.created_at < end:
_create_action(obj_ref, admin_context, LOG,
notify_about_usage, type_id_str, type_name)
if obj_ref.deleted_at and begin < obj_ref.deleted_at < end:
_delete_action(obj_ref, admin_context, LOG,
notify_about_usage, type_id_str, type_name)
def main():
objects.register_all()
admin_context = context.get_admin_context()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
LOG = logging.getLogger("cinder")
rpc.init(CONF)
begin, end = u
|
maferelo/saleor
|
saleor/graphql/discount/bulk_mutations.py
|
Python
|
bsd-3-clause
| 839
| 0.002384
|
import graphene
from ...core.permis
|
sions import DiscountPermissions
from ...discount import models
from ..core.mutations import ModelBulkDeleteMutation
cl
|
ass SaleBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID, required=True, description="List of sale IDs to delete."
)
class Meta:
description = "Deletes sales."
model = models.Sale
permissions = (DiscountPermissions.MANAGE_DISCOUNTS,)
class VoucherBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID, required=True, description="List of voucher IDs to delete."
)
class Meta:
description = "Deletes vouchers."
model = models.Voucher
permissions = (DiscountPermissions.MANAGE_DISCOUNTS,)
|
oppia/oppia
|
core/domain/event_services.py
|
Python
|
apache-2.0
| 12,316
| 0.000081
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for handling events."""
from __future__ import annotations
import logging
from core import feconf
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import feedback_services
from core.domain import stats_domain
from core.domain import stats_services
from core.domain import taskqueue_services
from core.platform import models
(feedback_models, stats_models, user_models) = models.Registry.import_models([
models.NAMES.feedback, models.NAMES.statistics, models.NAMES.user])
transaction_services = models.Registry.import_transaction_services()
class BaseEventHandler:
"""Base class for event dispatchers."""
# A string denoting t
|
he type of the event. Should be specified by
# subclasses and considered immutable.
EVENT_TYPE = None
@classmethod
def _handle_event(cls, *args, **kwargs):
"""Perform in-request processing of an incoming event."""
|
raise NotImplementedError(
'Subclasses of BaseEventHandler should implement the '
'_handle_event() method, using explicit arguments '
'(no *args or **kwargs).')
@classmethod
def record(cls, *args, **kwargs):
"""Process incoming events.
Callers of event handlers should call this method, not _handle_event().
"""
cls._handle_event(*args, **kwargs)
class StatsEventsHandler(BaseEventHandler):
"""Event handler for incremental update of analytics model using aggregated
stats data.
"""
EVENT_TYPE = feconf.EVENT_TYPE_ALL_STATS
@classmethod
def _is_latest_version(cls, exp_id, exp_version):
"""Verifies whether the exploration version for the stats to be stored
corresponds to the latest version of the exploration.
"""
exploration = exp_fetchers.get_exploration_by_id(exp_id)
return exploration.version == exp_version
@classmethod
def _handle_event(cls, exploration_id, exp_version, aggregated_stats):
if 'undefined' in aggregated_stats['state_stats_mapping']:
logging.error(
'Aggregated stats contains an undefined state name: %s'
% list(aggregated_stats['state_stats_mapping'].keys()))
return
if cls._is_latest_version(exploration_id, exp_version):
taskqueue_services.defer(
taskqueue_services.FUNCTION_ID_UPDATE_STATS,
taskqueue_services.QUEUE_NAME_STATS,
exploration_id,
exp_version, aggregated_stats)
class AnswerSubmissionEventHandler(BaseEventHandler):
"""Event handler for recording answer submissions."""
EVENT_TYPE = feconf.EVENT_TYPE_ANSWER_SUBMITTED
@classmethod
def _handle_event(
cls, exploration_id, exploration_version, state_name,
interaction_id, answer_group_index, rule_spec_index,
classification_categorization, session_id, time_spent_in_secs,
params, normalized_answer):
"""Records an event when an answer triggers a rule. The answer recorded
here is a Python-representation of the actual answer submitted by the
user.
"""
# TODO(sll): Escape these args?
stats_services.record_answer(
exploration_id, exploration_version, state_name, interaction_id,
stats_domain.SubmittedAnswer(
normalized_answer, interaction_id, answer_group_index,
rule_spec_index, classification_categorization, params,
session_id, time_spent_in_secs))
feedback_is_useful = (
classification_categorization != (
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION))
stats_models.AnswerSubmittedEventLogEntryModel.create(
exploration_id, exploration_version, state_name, session_id,
time_spent_in_secs, feedback_is_useful)
class ExplorationActualStartEventHandler(BaseEventHandler):
"""Event handler for recording exploration actual start events."""
EVENT_TYPE = feconf.EVENT_TYPE_ACTUAL_START_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id):
stats_models.ExplorationActualStartEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id)
class SolutionHitEventHandler(BaseEventHandler):
"""Event handler for recording solution hit events."""
EVENT_TYPE = feconf.EVENT_TYPE_SOLUTION_HIT
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.SolutionHitEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class StartExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration start events."""
EVENT_TYPE = feconf.EVENT_TYPE_START_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, params,
play_type):
stats_models.StartExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, params,
play_type)
handle_exploration_start(exp_id)
class MaybeLeaveExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration leave events."""
EVENT_TYPE = feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, time_spent,
params, play_type):
stats_models.MaybeLeaveExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, time_spent,
params, play_type)
class CompleteExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration completion events."""
EVENT_TYPE = feconf.EVENT_TYPE_COMPLETE_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, time_spent,
params, play_type):
stats_models.CompleteExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, time_spent,
params, play_type)
class RateExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration rating events."""
EVENT_TYPE = feconf.EVENT_TYPE_RATE_EXPLORATION
@classmethod
def _handle_event(cls, exp_id, user_id, rating, old_rating):
stats_models.RateExplorationEventLogEntryModel.create(
exp_id, user_id, rating, old_rating)
handle_exploration_rating(exp_id, rating, old_rating)
class StateHitEventHandler(BaseEventHandler):
"""Event handler for recording state hit events."""
EVENT_TYPE = feconf.EVENT_TYPE_STATE_HIT
# TODO(sll): Remove params before sending this event to the jobs taskqueue.
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
params, play_type):
stats_models.StateHitEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
params, play_type)
class StateCompleteEventHandler(BaseEventHandler):
"""Event handler for recording state complete events."""
EVENT_TYPE = feconf.EVENT_TYPE_STATE_COMPLETED
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.Sta
|
htygithub/bokeh
|
bokeh/server/protocol/messages/__init__.py
|
Python
|
bsd-3-clause
| 643
| 0.021773
|
'''
'''
from __future__ i
|
mport absolute_import
from ...exceptions import ProtocolError
index = {}
def register(cls):
''' Decorator to add a Message (and its revision) to the Protocol index.
'''
key = (cls.msgtype, cls.revision)
if key
|
in index:
raise ProtocolError("Duplicate message specification encountered: %r" % key)
index[key] = cls
return cls
from .ack import *
from .ok import *
from .patch_doc import *
from .pull_doc_req import *
from .pull_doc_reply import *
from .push_doc import *
from .error import *
from .server_info_reply import *
from .server_info_req import *
from .working import *
|
endlessm/chromium-browser
|
third_party/catapult/dashboard/dashboard/create_health_report.py
|
Python
|
bsd-3-clause
| 4,168
| 0.007917
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides the web interface for adding and editing sheriff rotations."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import json
from google.appengine.api import users
from google.appengine.ext import ndb
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.common import xsrf
from dashboard.models import table_config
class CreateHealthReportHandler(request_handler.RequestHandler):
def get(self):
"""Renders the UI with the form fields."""
self.RenderStaticHtml('create_health_report.html')
def post(self):
"""POSTS the data to the datastore."""
user = users.get_current_user()
if not user:
self.response.out.write(json.dumps({'error': 'User not logged in.'}))
return
if not utils.IsInternalUser():
self.response.out.write(json.dumps(
{'error':
'Unauthorized access, please use chromium account to login.'}))
return
get_token = self.request.get('getToken')
get_table_config_list = self.request.get('getTableConfigList')
get_table_config_details = self.request.get('getTableConfigDetails')
if get_token == 'true':
values = {}
self.GetDynamicVariables(values)
self.response.out.write(json.dumps({
'xsrf_token': values['xsrf_token'],
}))
elif get_table_config_list:
self._GetTableConfigList()
elif get_table_config_details:
self._GetTableConfigDetails(get_table_config_details)
else:
self._CreateTableConfig()
def _GetTableConfigList(self):
query = table_config.TableConfig.query()
table_config_list = query.fetch(keys_only=True)
return_list = []
for config in table_config_list:
return_list.append(config.id())
self.response.out.write(json.dumps({
'table_config_list': return_list,
}))
def _GetTableConfigDetails(self, config_name):
config_entity = ndb.Key('TableConfig', config_name).get()
if config_entity:
master_bot_list = []
for bot in config_entity.bots:
master_bot_list.append(bot.parent().string_id() + '/' + bot.string_id())
self.response.out.write(json.dumps({
'table_name': config_name,
'table_bots': master_bot_list,
'table_tests': config_entity.tests,
'table_layout': config_entity.table_layout
}))
else:
self.response.out.write(
|
json.dumps({
'error': 'Invalid config name.'
}))
def _CreateTableConfig(self):
"""Creates a table config. Wr
|
ites a valid name or an error message."""
self._ValidateToken()
name = self.request.get('tableName')
master_bot = self.request.get('tableBots').splitlines()
tests = self.request.get('tableTests').splitlines()
table_layout = self.request.get('tableLayout')
override = int(self.request.get('override'))
user = users.get_current_user()
if not name or not master_bot or not tests or not table_layout or not user:
self.response.out.write(json.dumps({
'error': 'Please fill out the form entirely.'
}))
return
try:
created_table = table_config.CreateTableConfig(
name=name, bots=master_bot, tests=tests, layout=table_layout,
username=user.email(), override=override)
except table_config.BadRequestError as error:
self.response.out.write(json.dumps({
'error': error.message,
}))
logging.error('BadRequestError: %r', error.message)
return
if created_table:
self.response.out.write(json.dumps({
'name': name,
}))
else:
self.response.out.write(json.dumps({
'error': 'Could not create table.',
}))
logging.error('Could not create table.')
def _ValidateToken(self):
user = users.get_current_user()
token = str(self.request.get('xsrf_token'))
if not user or not xsrf._ValidateToken(token, user):
self.abort(403)
|
lsst-sqre/sqre-codekit
|
codekit/progressbar.py
|
Python
|
mit
| 2,070
| 0
|
""" progressbar2 related utils"""
from codekit.codetools import warn
from public import public
from time import sleep
import progressbar
import functools
@public
def setup_logging(verbosity=0):
"""Configure progressbar sys.stderr wrapper which is required to play nice
with logging and not have strange formatting artifacts.
"""
progressbar.streams.wrap_stderr()
@public
def countdown_timer(seconds=10):
"""Show a simple countdown progress bar
Parameters
----------
seconds
Period of time the progress bar takes to reach zero.
"""
tick = 0.1 # seconds
n_ticks = int(seconds / tick)
widgets = ['Pause for panic: ', progressbar.ETA(), ' ', progressbar.Bar()]
pbar = progressbar.ProgressBar(
widgets=widgets, max_value=n_ticks
).start()
for i in range(n_ticks):
pbar.update(i)
sleep(tick)
pbar.finish()
@public
def wait_for_user_panic(**kwargs):
"""Display a scary message and count down progresss bar so an interative
user a chance to panic and kill the program.
Parameters
----------
kwargs
Passed verbatim to countdown_timer()
"""
warn('Now is the time to panic and Ctrl-C')
countdown_timer(**kwargs)
@public
@fu
|
nctools.lru_cache()
def wait_for_user_panic_once(**kwargs):
"""Same functionality as wait_for_user_panic() but will only display a
countdown once, reguardless of how many times it is called.
|
Parameters
----------
kwargs
Passed verbatim to wait_for_user_panic()
"""
wait_for_user_panic(**kwargs)
@public
def eta_bar(msg, max_value):
"""Display an adaptive ETA / countdown bar with a message.
Parameters
----------
msg: str
Message to prefix countdown bar line with
max_value: max_value
The max number of progress bar steps/updates
"""
widgets = [
"{msg}:".format(msg=msg),
progressbar.Bar(), ' ', progressbar.AdaptiveETA(),
]
return progressbar.ProgressBar(widgets=widgets, max_value=max_value)
|
pantsbuild/pants
|
src/python/pants/backend/scala/goals/tailor_test.py
|
Python
|
apache-2.0
| 802
| 0
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.
|
backend.scala.goals.tailor import classify_source_files
from pants.backend.scala.target_types import (
ScalaJunitTestsGeneratorTarget,
ScalaSourcesGeneratorTarget,
ScalatestTestsGeneratorTarget,
)
def test_classify_source_files() -> None:
scalatest_files = {
"foo/bar/BazSpec.scala",
}
junit_files = {
"foo/b
|
ar/BazTest.scala",
}
lib_files = {"foo/bar/Baz.scala"}
assert {
ScalatestTestsGeneratorTarget: scalatest_files,
ScalaJunitTestsGeneratorTarget: junit_files,
ScalaSourcesGeneratorTarget: lib_files,
} == classify_source_files(junit_files | lib_files | scalatest_files)
|
lento/cortex
|
test/IECoreHoudini/procedurals/subdRender/subdRender-1.py
|
Python
|
bsd-3-clause
| 2,590
| 0.011583
|
#=====
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Redistribution and use in source and binary forms, with or without
# modification
|
, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materia
|
ls provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#=====
# Subd Render
#
# This cookbook example demonstrates how to load & render a mesh primitive and
# render it as a subdivision surface.
#
#=====
from IECore import *
class subdRender(ParameterisedProcedural) :
def __init__(self) :
ParameterisedProcedural.__init__( self, "Renders a mesh as a subd." )
path = PathParameter( "path", "Path", "" )
self.parameters().addParameter( path )
def doBound(self, args) :
geo = Reader.create( args['path'].value ).read()
return geo.bound()
def doRenderState(self, renderer, args) :
pass
def doRender(self, renderer, args) :
geo = Reader.create( args['path'].value ).read()
geo.interpolation = "catmullClark"
geo.render( renderer )
# register
registerRunTimeTyped( subdRender )
|
analyst-collective/dbt
|
test/unit/test_yaml_renderer.py
|
Python
|
apache-2.0
| 2,904
| 0.000344
|
import unittest
import dbt.exceptions
import dbt.utils
from dbt.parser.schema_renderer import SchemaYamlRenderer
class TestYamlRendering(unittest.TestCase):
def test__models(self):
context = {
"test_var": "1234",
"alt_var": "replaced",
}
renderer = SchemaYamlRenderer(context, 'models')
# Verify description is not rendered and misc attribute is rendered
dct = {
"name": "my_model",
"description": "{{ test_var }}",
"attribute": "{{ test_var }}",
}
expected = {
"name": "my_model",
"description": "{{ test_var }}",
"attribute": "1234",
}
dct = renderer.render_data(dct)
self.assertEqual(expected, dct)
# Verify description in columns is not rendered
dct = {
'name': 'my_test',
'attribute': "{{ test_var }}",
'columns': [
{'description': "{{ test_var }}", 'name': 'id'},
]
}
expected = {
'name': 'my_test',
'attribute': "1234",
'columns': [
{'description': "{{ test_var }}", 'name': 'id'},
]
}
dct = renderer.render_data(dct)
self.assertEqual(expected, dct)
def test__sources(self):
context = {
"test_var": "1234",
"alt_var": "replaced",
}
renderer = SchemaYamlRenderer(context, 'sources')
# Only descriptions have jinja, none should be rendered
dct = {
"name": "my_source",
"description": "{{ alt_var }}",
"tables": [
{
"name": "my_table",
"description": "{{ alt_var }}",
"columns": [
{
"name": "id",
"description": "{{ alt_var }}",
}
]
}
]
}
rendered = renderer.render_data(dct)
self.assertEqual(dct, rendered)
def test__macros(self):
context = {
"test_var": "1234",
"alt_var": "replaced",
|
}
renderer = SchemaYamlRenderer(context, 'macros')
# Look for description in arguments
dct = {
"name": "my_macro",
"arguments": [
{"name": "my_arg", "attr": "{{ alt_var }}"},
{"name"
|
: "an_arg", "description": "{{ alt_var}}"}
]
}
expected = {
"name": "my_macro",
"arguments": [
{"name": "my_arg", "attr": "replaced"},
{"name": "an_arg", "description": "{{ alt_var}}"}
]
}
dct = renderer.render_data(dct)
self.assertEqual(dct, expected)
|
Honzin/ccs
|
dev/bitnz/public/__init__.py
|
Python
|
agpl-3.0
| 1,090
| 0.006422
|
import urllib.parse
import sys
from ccs import core
from ccs import constants
from . import response
def ticker():
s = __name__.split(".")[1]
r = sys._getframe().f_code.co_name
# complete request
cr = core.request(s, r)
return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s))
def trades():
s = __name__.split(".")[1]
r = sys._getframe().f_code.co_name
# complete request
cr = core.request(s, r)
return core.get(core.hostname(s), cr, core.head
|
er(s), core.compression(s), core.timeout(s))
# nejaky problem s kodovanim
# def trades_chart():
# s = __name__.split(".")[1]
# r = sys._getframe().f_code.co_name
#
# # complete request
# cr = core.request(s, r)
#
# return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s))
def orderbook():
s = __name__.split(".")[1]
r = sys._getframe().f_code.co_name
#
|
complete request
cr = core.request(s, r)
return core.get(core.hostname(s), cr, core.header(s), core.compression(s), core.timeout(s))
|
glennrub/micropython
|
tools/pydfu.py
|
Python
|
mit
| 20,108
| 0.001392
|
#!/usr/bin/env python
# This file is part of the OpenMV project.
# Copyright (c) 2013/2014 Ibrahim Abdelkader <i.abdalkader@gmail.com>
# This work is licensed under the MIT license, see the file LICENSE for
# details.
"""This module implements enough functionality to program the STM32F4xx over
DFU, without requiring dfu-util.
See app note AN3156 for a description of the DFU protocol.
See document UM0391 for a dscription of the DFuse file.
"""
from __future__ import print_function
import argparse
import collections
import inspect
import re
import struct
import sys
import usb.core
import usb.util
import zlib
# USB request __TIMEOUT
__TIMEOUT = 4000
# DFU commands
__DFU_DETACH = 0
__DFU_DNLOAD = 1
__DFU_UPLOAD = 2
__DFU_GETSTATUS = 3
__DFU_CLRSTATUS = 4
__DFU_GETSTATE = 5
__DFU_ABORT = 6
# DFU status
__DFU_STATE_APP_IDLE = 0x00
__DFU_STATE_APP_DETACH = 0x01
__DFU_STATE_DFU_IDLE = 0x02
__DFU_STATE_DFU_DOWNLOAD_SYNC = 0x03
__DFU_STATE_DFU_DOWNLOAD_BUSY = 0x04
__DFU_STATE_DFU_DOWNLOAD_IDLE = 0x05
__DFU_STATE_DFU_MANIFEST_SYNC = 0x06
__DFU_STATE_DFU_MANIFEST = 0x07
__DFU_STATE_DFU_MANIFEST_WAIT_RESET = 0x08
__DFU_STATE_DFU_UPLOAD_IDLE = 0x09
__DFU_STATE_DFU_ERROR = 0x0A
_DFU_DESCRIPTOR_TYPE = 0x21
__DFU_STATUS_STR = {
__DFU_STATE_APP_IDLE: "STATE_APP_IDLE",
__DFU_STATE_APP_DETACH: "STATE_APP_DETACH",
__DFU_STATE_DFU_IDLE: "STATE_DFU_IDLE",
__DFU_STATE_DFU_DOWNLOAD_SYNC: "STATE_DFU_DOWNLOAD_SYNC",
__DFU_STATE_DFU_DOWNLOAD_BUSY: "STATE_DFU_DOWNLOAD_BUSY",
__DFU_STATE_DFU_DOWNLOAD_IDLE: "STATE_DFU_DOWNLOAD_IDLE",
__DFU_STATE_DFU_MANIFEST_SYNC: "STATE_DFU_MANIFEST_SYNC",
__DFU_STATE_DFU_MANIFEST: "STATE_DFU_MANIFEST",
__DFU_STATE_DFU_MANIFEST_WAIT_RESET: "STATE_DFU_MANIFEST_WAIT_RESET",
__DFU_STATE_DFU_UPLOAD_IDLE: "STATE_DFU_UPLOAD_IDLE",
__DFU_STATE_DFU_ERROR: "STATE_DFU_ERROR",
}
# USB device handle
__dev = None
# Configuration descriptor of the device
__cfg_descr = None
__verbose = None
# USB DFU interface
__DFU_INTERFACE = 0
# Python 3 deprecated getargspec in favour of getfullargspec, but
# Python 2 doesn't have the latter, so detect which one to use
getargspec = getattr(inspect, "getfullargspec", inspect.get
|
argspec)
if "length" in getargspec(usb.util.get_string).args:
# PyUSB 1.0.0.b1 has the length argument
def get_string(dev, index):
return usb.util.get_string(dev, 255, index)
else:
|
# PyUSB 1.0.0.b2 dropped the length argument
def get_string(dev, index):
return usb.util.get_string(dev, index)
def find_dfu_cfg_descr(descr):
if len(descr) == 9 and descr[0] == 9 and descr[1] == _DFU_DESCRIPTOR_TYPE:
nt = collections.namedtuple(
"CfgDescr",
[
"bLength",
"bDescriptorType",
"bmAttributes",
"wDetachTimeOut",
"wTransferSize",
"bcdDFUVersion",
],
)
return nt(*struct.unpack("<BBBHHH", bytearray(descr)))
return None
def init(**kwargs):
"""Initializes the found DFU device so that we can program it."""
global __dev, __cfg_descr
devices = get_dfu_devices(**kwargs)
if not devices:
raise ValueError("No DFU device found")
if len(devices) > 1:
raise ValueError("Multiple DFU devices found")
__dev = devices[0]
__dev.set_configuration()
# Claim DFU interface
usb.util.claim_interface(__dev, __DFU_INTERFACE)
# Find the DFU configuration descriptor, either in the device or interfaces
__cfg_descr = None
for cfg in __dev.configurations():
__cfg_descr = find_dfu_cfg_descr(cfg.extra_descriptors)
if __cfg_descr:
break
for itf in cfg.interfaces():
__cfg_descr = find_dfu_cfg_descr(itf.extra_descriptors)
if __cfg_descr:
break
# Get device into idle state
for attempt in range(4):
status = get_status()
if status == __DFU_STATE_DFU_IDLE:
break
elif status == __DFU_STATE_DFU_DOWNLOAD_IDLE or status == __DFU_STATE_DFU_UPLOAD_IDLE:
abort_request()
else:
clr_status()
def abort_request():
"""Sends an abort request."""
__dev.ctrl_transfer(0x21, __DFU_ABORT, 0, __DFU_INTERFACE, None, __TIMEOUT)
def clr_status():
"""Clears any error status (perhaps left over from a previous session)."""
__dev.ctrl_transfer(0x21, __DFU_CLRSTATUS, 0, __DFU_INTERFACE, None, __TIMEOUT)
def get_status():
"""Get the status of the last operation."""
stat = __dev.ctrl_transfer(0xA1, __DFU_GETSTATUS, 0, __DFU_INTERFACE, 6, 20000)
# firmware can provide an optional string for any error
if stat[5]:
message = get_string(__dev, stat[5])
if message:
print(message)
return stat[4]
def check_status(stage, expected):
status = get_status()
if status != expected:
raise SystemExit("DFU: %s failed (%s)" % (stage, __DFU_STATUS_STR.get(status, status)))
def mass_erase():
"""Performs a MASS erase (i.e. erases the entire device)."""
# Send DNLOAD with first byte=0x41
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, "\x41", __TIMEOUT)
# Execute last command
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_IDLE)
def page_erase(addr):
"""Erases a single page."""
if __verbose:
print("Erasing page: 0x%x..." % (addr))
# Send DNLOAD with first byte=0x41 and page address
buf = struct.pack("<BI", 0x41, addr)
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_IDLE)
def set_address(addr):
"""Sets the address for the next operation."""
# Send DNLOAD with first byte=0x21 and page address
buf = struct.pack("<BI", 0x21, addr)
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
check_status("set address", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("set address", __DFU_STATE_DFU_DOWNLOAD_IDLE)
def write_memory(addr, buf, progress=None, progress_addr=0, progress_size=0):
"""Writes a buffer into memory. This routine assumes that memory has
already been erased.
"""
xfer_count = 0
xfer_bytes = 0
xfer_total = len(buf)
xfer_base = addr
while xfer_bytes < xfer_total:
if __verbose and xfer_count % 512 == 0:
print(
"Addr 0x%x %dKBs/%dKBs..."
% (xfer_base + xfer_bytes, xfer_bytes // 1024, xfer_total // 1024)
)
if progress and xfer_count % 2 == 0:
progress(progress_addr, xfer_base + xfer_bytes - progress_addr, progress_size)
# Set mem write address
set_address(xfer_base + xfer_bytes)
# Send DNLOAD with fw data
chunk = min(__cfg_descr.wTransferSize, xfer_total - xfer_bytes)
__dev.ctrl_transfer(
0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf[xfer_bytes : xfer_bytes + chunk], __TIMEOUT
)
# Execute last command
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_IDLE)
xfer_count += 1
xfer_bytes += chunk
def write_page(buf, xfer_offset):
"""Writes a single page. This routine assumes that memory has already
been erased.
"""
xfer_base = 0x08000000
# Set mem write address
set_address(xfer_base + xfer_offset)
# Send DNLOAD with fw data
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_IDLE)
if __verbose:
print("Write: 0x%x " % (xfer_base + xfer_offset))
def exit_dfu():
"""Exit DFU mode, and star
|
redshodan/codepunks
|
tests/testconfig.py
|
Python
|
apache-2.0
| 1,388
| 0
|
import argparse
import configparser
from codepunks.config import (Config, INISource, XMLSource, JSONSource,
|
YAMLSource, ArgParserSource)
ARGS1 = argparse.Namespace()
ARGS1.apkey1 = "apval1"
ARGS1.apkey2 = "apval2"
ARGS1.apkey3 = "apval3"
def testEmptyCfg():
Config()
def testINISource():
c = Co
|
nfig(INISource("tests/config/config.ini"))
c.load()
def testINISourcePreBuilt():
fname = "tests/config/config.ini"
parser = configparser.ConfigParser()
parser.read(fname)
c = Config(INISource(fname, cfgparser=parser))
c.load()
def testINISource2():
c = Config([INISource("tests/config/config.ini"),
INISource("tests/config/config2.ini")])
c.load()
def testXMLSource():
c = Config(XMLSource("tests/config/config.xml"))
c.load()
def testJSONSource():
c = Config(JSONSource("tests/config/config.json"))
c.load()
def testYAMLSource():
c = Config(YAMLSource("tests/config/config.yml"))
c.load()
def testArgParserSource():
c = Config(ArgParserSource(ARGS1))
c.load()
def testAllSources():
c = Config([INISource("tests/config/config.ini"),
XMLSource("tests/config/config.xml"),
JSONSource("tests/config/config.json"),
YAMLSource("tests/config/config.yml"),
ArgParserSource(ARGS1)])
c.load()
|
ccc-ffm/christian
|
modules/dudle.py
|
Python
|
gpl-3.0
| 250
| 0
|
class Dudle(ob
|
ject):
def __init__(self):
self.baseurl = 'https://dudle.inf.tu-dresden.de/?create_poll='
def getDudle(self, name, ty
|
pe='time', url=''):
return(self.baseurl + name + '&poll_type=' + type + '&poll_url=' + url)
|
dhavalmanjaria/dma-student-information-system
|
university_credits/apps.py
|
Python
|
gpl-2.0
| 110
| 0
|
from django.apps import AppConf
|
ig
class UniversityCreditsConfig(AppConfig):
name = 'university
|
_credits'
|
vrbagalkote/avocado-misc-tests-1
|
perf/libunwind.py
|
Python
|
gpl-2.0
| 3,140
| 0.000318
|
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2017 IBM
# Author: Pooja <pooja@linux.vnet.ibm.com>
import os
import re
from avocado import Test
from avocado import main
from avocado.utils import archive, build, distro, process
from avocado.utils.software_manager import SoftwareManager
class Libunwind(Test):
def setUp(self):
'''
Build Libunwind library
Source:
https://github.com/pathscale/libunwind/archive/vanilla_pathscale.zip
'''
dist = distro.detect()
smm = SoftwareManager()
deps = ['gcc', 'libtool', 'autoconf', 'automake', 'make']
if dist.name == 'Ubuntu':
deps.extend(['dh-autoreconf', 'dh-dist-zilla', 'g++',
'texlive-extra-utils'])
elif dist.name in ['SuSE', 'rhel', 'fedora', 'redhat']:
deps.extend(['gcc-c++'])
else:
self.cancel('Test not supported
|
in %s' % dist.name)
for package in deps:
if not smm.check_installed(package) and not smm.install(package):
self.cancel("Failed to install %s, which is needed for"
"t
|
he test to be run" % package)
tarball = self.fetch_asset('vanilla_pathscale.zip', locations=[
'https://github.com/pathscale/libunwind/archive/'
'vanilla_pathscale.zip'], expire='7d')
archive.extract(tarball, self.srcdir)
self.sourcedir = os.path.join(self.srcdir, 'libunwind-vanilla_pathscale')
os.chdir(self.sourcedir)
process.run('./autogen.sh', shell=True)
'''
For configure options on different architecture please refer
https://github.com/pathscale/libunwind
'''
configure_option = self.params.get('configure_option',
default='configure_option')
process.run('./configure %s' % configure_option, shell=True)
build.make(self.sourcedir)
build.make(self.sourcedir, extra_args='install')
def test(self):
'''
Execute regression tests for libunwind library
'''
results = build.run_make(self.sourcedir, extra_args='check',
ignore_status=True).stdout
fail_list = ['FAIL', 'XFAIL', 'ERROR']
failures = []
for failure in fail_list:
num_fails = re.compile(r"# %s:(.*)" %
failure).findall(results)[0].strip()
if int(num_fails):
failures.append({failure: num_fails})
if failures:
self.fail('Test failed with following:%s' % failures)
if __name__ == "__main__":
main()
|
IgnitedAndExploded/pyfire
|
pyfire/contact.py
|
Python
|
bsd-3-clause
| 3,814
| 0.001049
|
"""
pyfire.contact
~~~~~~~~~~
Handles Contact ("roster item") interpretation as per RFC-6121
:copyright: 2011 by the pyfire Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import xml.etree.ElementTree as ET
from sqlalchemy import Table, Column, Boolean, Integer, String, Enum, ForeignKey
from sqlalchemy.orm import relationship, backref
from pyfire.jid import JID
from pyfire.storage import Base, JIDString
contacts_groups = Table('contacts_groups', Base.metadata,
Column('contact_id', Integer, ForeignKey('contacts.id')),
Column('group_id', Integer, ForeignKey('groups.id'))
)
class Roster(Base):
"""List of contacts for a given jid"""
__tablename__ = 'rosters'
id = Column(Integer, primary_key=True)
jid = Column(JIDString, nullable=False)
def __init__(self, jid):
self.jid = JID(jid)
class Group(Base):
"""Simple group, only providing a name for now"""
__tablename__ = 'groups'
id = Column(Integer, primary_key=True)
name = Column(String(255))
class Contact(Base):
"""Jabber Contact, aka roster item. It has some really strict attribute
setting mechanism as it leads to all kinds of fantastic crashes with
clients which should be avoided in any case.
"""
__tablename__ = 'contacts'
id = Column(Integer, primary_key=True)
approved = Column(Boolean)
ask = Column(Enum('subscribe'))
jid = Column(JIDString, nullable=False)
name = Column(String(255))
subscription = Column(Enum("none", "from", "to", "remove", "both"))
groups = relationship(Group, secondary=contacts_groups)
roster = relationship(Roster, backref=backref('contacts'))
roster_id = Column(Integer, ForeignKey('rosters.id'), nullable=False)
def __init__(self, jid, **kwds):
super(Contact, self).__init__()
# required
if isinstance(jid, basestring):
self.jid = JID(jid)
elif isinstance(jid, JID):
self.jid = jid
self.jid.validate(raise_error=True)
else:
raise AttributeError("Needs valid jid either as string or JID instance")
# optional
self.approved = False
self.ask = None
self.name = None
self.subscriptio
|
n = "none"
self.groups = []
for k, v in kwds.iteritems():
setattr(self, k, v)
def to_element(self):
"""Formats contact as `class`:ET.Element object"""
element = ET.Element("item")
if self.approved is no
|
t None:
element.set("approved", 'true' if self.approved else 'false')
if self.ask is not None:
element.set("ask", self.ask)
element.set("jid", str(self.jid))
if self.name is not None:
element.set("name", self.name)
if self.subscription is not None:
element.set("subscription", self.subscription)
for group in self.groups:
group_element = ET.SubElement(element, "group")
group_element.text = group
return element
@staticmethod
def from_element(element):
"""Creates contact instance from `class`:ET.Element"""
if element.tag != "item":
raise ValueError("Invalid element with tag %s" % element.tag)
cont = Contact(element.get('jid'))
cont.ask = element.get('ask')
cont.subscription = element.get('subscription')
approved = element.get('approved')
if approved == 'true':
cont.approved = True
elif approved == 'false':
cont.approved = False
else:
cont.approved = approved
for group in list(element):
if group.tag == "group":
cont.groups.append(group.text)
return cont
|
tgbugs/pyontutils
|
nifstd/nifstd_tools/parcellation/paxinos.py
|
Python
|
mit
| 40,653
| 0.007306
|
import re
from collections import defaultdict, Counter
from ttlser import natsort
from pyontutils.core import LabelsBase, Collector, Source, resSource, ParcOnt
from pyontutils.core import makePrefixes
from pyontutils.config import auth
from pyontutils.namespaces import nsExact
from pyontutils.namespaces import NIFRID, ilx, ilxtr, TEMP
from pyontutils.namespaces import NCBITaxon, UBERON
from pyontutils.namespaces import PAXMUS, PAXRAT, paxmusver, paxratver
from pyontutils.namespaces import rdf, rdfs, owl
from pyontutils.combinators import annotations
from nifstd_tools.parcellation import log
from nifstd_tools.parcellation import Atlas, Label, LabelRoot, LocalSource, parcCore
from nifstd_tools.parcellation import RegionRoot, RegionsBase
log = log.getChild('pax')
class DupeRecord:
def __init__(self, alt_abbrevs=tuple(), structures=tuple(), figures=None, artiris=tuple()):
self.alt_abbrevs = alt_abbrevs
self.structures = structures
self.artiris = artiris
class Artifacts(Collector):
collects = Atlas
class PaxMouseAt(Atlas):
""" Any atlas artifact with Paxinos as an author for the adult rat. """
iri = ilx['paxinos/uris/mouse'] # ilxtr.paxinosMouseAtlas
class_label = 'Paxinos Mouse Atlas'
PaxMouseAtlas = Atlas(iri=PaxMouseAt.iri,
species=NCBITaxon['10090'],
devstage=UBERON['0000113'], # TODO this is 'Mature' which may not match... RnorDv:0000015 >10 weeks...
region=UBERON['0000955'],
)
PaxMouse2 = PaxMouseAt(iri=paxmusver['2'], # ilxtr.paxm2,
label='The Mouse Brain in Stereotaxic Coordinates 2nd Edition',
synonyms=('Paxinos Mouse 2nd',),
abbrevs=tuple(),
shortname='PAXMOUSE2', # TODO upper for atlas lower for label?
copyrighted='2001',
version='2nd Edition', # FIXME ??? delux edition??? what is this
citation='???????',)
PaxMouse3 = PaxMouseAt(iri=paxmusver['3'], # ilxtr.paxm3,
label='The Mouse Brain in Stereotaxic Coordinates 3rd Edition',
synonyms=('Paxinos Mouse 3rd',),
abbrevs=tuple(),
shortname='PAXMOUSE3', # TODO upper for atlas lower for label?
copyrighted='2008',
version='3rd Edition',
citation='???????',)
PaxMouse4 = PaxMouseAt(iri=paxmusver['4'], # ilxtr.paxm4,
label='The Mouse Brain in Stereotaxic Coordinates 4th Edition',
synonyms=('Paxinos Mouse 4th',),
abbrevs=tuple(),
shortname='PAXMOUSE4', # TODO upper for atlas lower for label?
copyrighted='2012',
version='4th Edition',
citation='???????',)
class PaxRatAt(Atlas):
""" Any atlas artifact with Paxinos as an author for the adult rat. """
iri = ilx['paxinos/uris/rat'] # ilxtr.paxinosRatAtlas
class_label = 'Paxinos Rat Atlas'
PaxRatAtlas = Atlas(iri=PaxRatAt.iri,
species=NCBITaxon['10116'],
devstage=UBERON['0000113'], # TODO this is 'Mature' which may not match... RnorDv:0000015 >10 weeks...
region=UBERON['0000955'],
citation=('Paxinos, George, Charles RR Watson, and Piers C. Emson. '
'"AChE-stained horizontal sections of the rat brain '
'in stereotaxic coordinates." Journal of neuroscience '
'methods 3, no. 2 (1980): 129-149.'),)
PaxRat4 = PaxRatAt(iri=ilx['paxinos/uris/rat/versions/4'], # ilxtr.paxr4,
label='The Rat Brain in Stereotaxic Coordinates 4th Edition',
synonyms=('Paxinos Rat 4th',),
abbrevs=tuple(),
shortname='PAXRAT4', # TODO upper for atlas lower for label?
copyrighted='1998',
version='4th Edition',)
PaxRat6 = PaxRatAt(iri=ilx['paxinos/uris/rat/versions/6'], # ilxtr.paxr6,
label='The Rat Brain in Stereotaxic Coordinates 6th Edition',
synonyms=('Paxinos Rat 6th',),
abbrevs=tuple(),
shortname='PAXRAT6', # TODO upper for atlas lower for label?
copyrighted='2007',
version='6th Edition',)
PaxRat7 = PaxRatAt(iri=ilx['p
|
axinos/uris/rat/versions/7'], # ilxtr.paxr7,
label='The Rat Brain in Stereotaxic Coordinates 7th Edition',
synonyms=('Paxinos Rat 7th',
'Paxinos and Watson\'s The Rat Brain
|
in Stereotaxic Coordinates 7th Edition', # branding >_<
),
abbrevs=tuple(),
shortname='PAXRAT7', # TODO upper for atlas lower for label?
copyrighted='2014',
version='7th Edition',)
class PaxSr_6(resSource):
sourceFile = auth.get_path('resources') / 'paxinos09names.txt'
artifact = Artifacts.PaxRat6
@classmethod
def loadData(cls):
with open(cls.source, 'rt') as f:
lines = [l.rsplit('#')[0].strip() for l in f.readlines() if not l.startswith('#')]
return [l.rsplit(' ', 1) for l in lines]
@classmethod
def processData(cls):
structRecs = []
out = {}
for structure, abrv in cls.raw:
structRecs.append((abrv, structure))
if abrv in out:
out[abrv][0].append(structure)
else:
out[abrv] = ([structure], ())
return structRecs, out
@classmethod
def validate(cls, structRecs, out):
print(Counter(_[0] for _ in structRecs).most_common()[:5])
print(Counter(_[1] for _ in structRecs).most_common()[:5])
assert len(structRecs) == len([s for sl, _ in out.values() for s in sl]), 'There are non-unique abbreviations'
errata = {}
return out, errata
class PaxSrAr(resSource):
artifact = None
@classmethod
def parseData(cls):
a, b = cls.raw.split('List of Structures')
if not a:
los, loa = b.split('List of Abbreviations')
else:
los = b
_, loa = a.split('List of Abbreviations')
sr = []
for l in los.split('\n'):
if l and not l[0] == ';':
if ';' in l:
l, *comment = l.split(';')
l = l.strip()
print(l, comment)
#asdf = l.rsplit(' ', 1)
#print(asdf)
struct, abbrev = l.rsplit(' ', 1)
sr.append((abbrev, struct))
ar = []
for l in loa.split('\n'):
if l and not l[0] == ';':
if ';' in l:
l, *comment = l.split(';')
l = l.strip()
print(l, comment)
#asdf = l.rsplit(' ', 1)
#print(asdf)
abbrev, rest = l.split(' ', 1)
parts = rest.split(' ')
#print(parts)
for i, pr in enumerate(parts[::-1]):
#print(i, pr)
z = pr[0].isdigit()
if not z or i > 0 and z and pr[-1] != ',':
break
struct = ' '.join(parts[:-i])
figs = tuple(tuple(int(_) for _ in p.split('-'))
if '-' in p
else (tuple(f'{nl[:-1]}{l}'
for nl, *ls in p.split(',')
for l in (nl[-1], *ls))
|
Karlon/pychess
|
lib/pychess/Savers/epd.py
|
Python
|
gpl-3.0
| 5,385
| 0.011513
|
from __future__ import absolute_import
from __future__ import print_function
from .ChessFile import ChessFile, LoadingError
from pychess.Utils.GameModel import GameModel
from pychess.Utils.const import WHITE, BLACK, WON_RESIGN, WAITING_TO_START, BLACKWON, WHITEWON, DRAW
from pychess.Utils.logic import getStatus
from pychess.Utils.lutils.leval import evaluateComplete
__label__ = _("Chess Position")
__ending__ = "epd"
__append__ = True
def save (file, model, position=None):
"""Saves game to
|
file in fen f
|
ormat"""
color = model.boards[-1].color
fen = model.boards[-1].asFen().split(" ")
# First four parts of fen are the same in epd
file.write(u" ".join(fen[:4]))
############################################################################
# Repetition count #
############################################################################
rc = model.boards[-1].board.repetitionCount()
############################################################################
# Centipawn evaluation #
############################################################################
if model.status == WHITEWON:
if color == WHITE:
ce = 32766
else: ce = -32766
elif model.status == BLACKWON:
if color == WHITE:
ce = -32766
else: ce = 32766
elif model.status == DRAW:
ce = 0
else: ce = evaluateComplete(model.boards[-1].board, model.boards[-1].color)
############################################################################
# Opcodes #
############################################################################
opcodes = (
("fmvn", fen[5]), # In fen full move number is the 6th field
("hmvc", fen[4]), # In fen halfmove clock is the 5th field
# Email and name of reciever and sender. We don't know the email.
("tcri", "?@?.? %s" % repr(model.players[color]).replace(";","")),
("tcsi", "?@?.? %s" % repr(model.players[1-color]).replace(";","")),
("ce", ce),
("rc", rc),
)
for key, value in opcodes:
file.write(u" %s %s;" % (key, value))
############################################################################
# Resign opcode #
############################################################################
if model.status in (WHITEWON, BLACKWON) and model.reason == WON_RESIGN:
file.write(u" resign;")
print(u"", file=file)
file.close()
def load (file):
return EpdFile ([line for line in map(str.strip, file) if line])
class EpdFile (ChessFile):
def loadToModel (self, gameno, position, model=None):
if not model: model = GameModel()
fieldlist = self.games[gameno].split(" ")
if len(fieldlist) == 4:
fen = self.games[gameno]
opcodestr = ""
elif len(fieldlist) > 4:
fen = " ".join(fieldlist[:4])
opcodestr = " ".join(fieldlist[4:])
else: raise LoadingError("EPD string can not have less than 4 field")
opcodes = {}
for opcode in map(str.strip, opcodestr.split(";")):
space = opcode.find(" ")
if space == -1:
opcodes[opcode] = True
else:
opcodes[opcode[:space]] = opcode[space+1:]
if "hmvc" in opcodes:
fen += " " + opcodes["hmvc"]
else: fen += " 0"
if "fmvn" in opcodes:
fen += " " + opcodes["fmvn"]
else: fen += " 1"
model.boards = [model.variant.board(setup=fen)]
model.variations = [model.boards]
model.status = WAITING_TO_START
# rc is kinda broken
#if "rc" in opcodes:
# model.boards[0].board.rc = int(opcodes["rc"])
if "resign" in opcodes:
if fieldlist[1] == "w":
model.status = BLACKWON
else:
model.status = WHITEWON
model.reason = WON_RESIGN
if model.status == WAITING_TO_START:
status, reason = getStatus(model.boards[-1])
if status in (BLACKWON, WHITEWON, DRAW):
model.status, model.reason = status, reason
return model
def get_player_names (self, gameno):
data = self.games[gameno]
names = {}
for key in "tcri", "tcsi":
keyindex = data.find(key)
if keyindex == -1:
names[key] = _("Unknown")
else:
sem = data.find(";", keyindex)
if sem == -1:
opcode = data[keyindex+len(key)+1:]
else: opcode = data[keyindex+len(key)+1:sem]
email, name = opcode.split(" ", 1)
names[key] = name
color = data.split(" ")[1] == "b" and BLACK or WHITE
if color == WHITE:
return (names["tcri"], names["tcsi"])
else:
return (names["tcsi"], names["tcri"])
|
Lekensteyn/Solaar
|
lib/logitech_receiver/hidpp20.py
|
Python
|
gpl-2.0
| 14,409
| 0.028732
|
# -*- python-mode -*-
# -*- coding: UTF-8 -*-
## Copyright (C) 2012-2013 Daniel Pavel
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Logitech Unifying Receiver API.
from __future__ import absolute_import, division, print_function, unicode_literals
from logging import getLogger, DEBUG as _DEBUG
_log = getLogger(__name__)
del getLogger
from .common import (FirmwareInfo as _FirmwareInfo,
ReprogrammableKeyInfo as _ReprogrammableKeyInfo,
ReprogrammableKeyInfoV4 as _ReprogrammableKeyInfoV4,
KwException as _KwException,
NamedInts as _NamedInts,
pack as _pack,
unpack as _unpack)
from . import special_keys
#
#
#
# <FeaturesSupported.xml sed '/LD_FID_/{s/.*LD_FID_/\t/;s/"[ \t]*Id="/=/;s/" \/>/,/p}' | sort -t= -k2
"""Possible features available on a Logitech device.
A particular device might not support all these features, and may support other
unknown features as well.
"""
FEATURE = _NamedInts(
ROOT=0x0000,
FEATURE_SET=0x0001,
FEATURE_INFO=0x0002,
DEVICE_FW_VERSION=0x0003,
DEVICE_NAME=0x0005,
DEVICE_GROUPS=0x0006,
RESET=0x0020, # "Config Change"
DFUCONTROL=0x00C0,
DFUCONTROL_2=0x00C1,
DFU=0x00D0,
BATTERY_STATUS=0x1000,
LED_CONTROL=0x1300,
CHANGE_HOST=0x1814,
BACKLIGHT=0x1981,
REPROG_CONTROLS=0x1B00,
REPROG_CONTROLS_V2=0x1B01,
REPROG_CONTROLS_V2_2=0x1B02, # LogiOptions 2.10.73 features.xml
REPROG_CONTROLS_V3=0x1B03,
REPROG_CONTROLS_V4=0x1B04,
WIRELESS_DEVICE_STATUS=0x1D4B,
LEFT_RIGHT_SWAP=0x2001,
SWAP_BUTTON=0x2005,
VERTICAL_SCROLLING=0x2100,
SMART_SHIFT=0x2110,
HI_RES_SCROLLING=0x2120,
HIRES_WHEEL=0x2121,
LOWRES_WHEEL=0x2130,
MOUSE_POINTER=0x2200,
ADJUSTABLE_DPI=0x2201,
POINTER_SPEED=0x2205,
ANGLE_SNAPPING=0x2230,
SURFACE_TUNING=0x2240,
HYBRID_TRACKING=0x2400,
FN_INVERSION=0x40A0,
NEW_FN_INVERSION=0x40A2,
K375S_FN_INVERSION=0x40A3,
ENCRYPTION=0x4100,
LOCK_KEY_STATE=0x4220,
SOLAR_DASHBOARD=0x4301,
KEYBOARD_LAYOUT=0x4520,
KEYBOARD_DISABLE=0x4521,
DUALPLATFORM=0x4530,
KEYBOARD_LAYOUT_2=0x4540,
TOUCHPAD_FW_ITEMS=0x6010,
TOUCHPAD_SW_ITEMS=0x6011,
TOUCHPAD_WIN8_FW_ITEMS=0x6012,
TOUCHPAD_RAW_XY=0x6100,
TOUCHMOUSE_RAW_POINTS=0x6110,
TOUCHMOUSE_6120=0x6120,
GESTURE=0x6500,
GESTURE_2=0x6501,
GKEY=0x8010,
MKEYS=0x8020,
MR=0x8030,
REPORT_RATE=0x8060,
COLOR_LED_EFECTS=0x8070,
PER_KEY_LIGHTING=0x8080,
ONBOARD_PROFILES=0x8100,
MOUSE_BUTTON_SPY=0x8110,
)
FEATURE._fallback = lambda x: 'unknown:%04X' % x
FEATURE_FLAG = _NamedInts(
internal=0x20,
hidden=0x40,
obsolete=0x80)
DEVICE_KIND = _NamedInts(
keyboard=0x00,
remote_control=0x01,
numpad=0x02,
mouse=0x03,
touchpad=0x04,
trackball=0x05,
presenter=0x06,
receiver=0x07)
FIRMWARE_KIND = _NamedInts(
Firmware=0x00,
Bootloader=0x01,
Hardware=0x02,
Other=0x03)
BATTERY_OK = lambda status: status not in (BATTERY_STATUS.invalid_battery, BATTERY_STATUS.thermal_error)
BATTERY_STATUS = _NamedInts(
discharging=0x00,
recharging=0x01,
almost_full=0x02,
full=0x03,
slow_recharge=0x04,
invalid_battery=0x05,
thermal_error=0x06)
ERROR = _NamedInts(
unknown=0x01,
invalid_argument=0x02,
out_of_range=0x03,
hardware_error=0x04,
logitech_internal=0x05,
invalid_feature_index=0x06,
invalid_function=0x07,
busy=0x08,
unsupported=0x09)
#
#
#
class FeatureNotSupported(_KwException):
"""Raised when trying to request a feature not supported by the device."""
pass
class FeatureCallError(_KwException):
"""Raised if the device replied to a feature call with an error."""
pass
#
#
#
class FeaturesArray(object):
"""A sequence of features supported by a HID++ 2.0 device."""
__slots__ = ('supported', 'device', 'features')
assert FEATURE.ROOT == 0x0000
def __init__(self, device):
assert device is not None
self.device = device
self.supported = True
self.features = None
def __del__(self):
self.supported = False
self.device = None
self.features = None
def _check(self):
# print (self.device, "check", self.supported, self.features, self.device.protocol)
if self.supported:
assert self.device
if self.features is not None:
return True
if not self.device.online:
# device is not connected right now, will have to try later
return False
# I _think_ this is universally true
if self.device.protocol and self.device.protocol < 2.0:
self.supported = False
self.device.features = None
self.device = None
return False
reply = self.device.request(0x0000, _pack('!H', FEATURE.FEATURE_SET))
if reply is None:
self.supported = False
else:
fs_index = ord(reply[0:1])
if fs_index:
count = self.device.request(fs_index << 8)
if count is None:
_log.warn("FEATURE_SET found, but failed to read features count")
# most likely the device is unavailable
return False
else:
count = ord(count[:1])
assert count >= fs_index
self.features = [None] * (1 + count)
self.features[0] = FEATURE.ROOT
self.features[fs_index] = FEATURE.FEATURE_SET
return True
else:
self.supported = False
return False
__bool__ = __nonzero__ = _check
def __getitem__(self, index):
if self._check():
if isinstance(index, int):
if index < 0 or index >= len(self.features):
raise IndexError(index)
if self.features[index] is None:
feature = self.device.feature_request(FEATURE.FEATURE_SET, 0x10, index)
if feature:
feature, = _unpack('!H', feature[:2])
self.features[index] = FEATURE[feature]
return self.features[index]
elif isinstance(index, slice):
indices = index.indices(len(self.features))
return [self.__getitem__(i) for i in range(*indices)]
def __contains__(self, featureId):
"""Tests whether the list contains given Feature ID"""
if self._check():
ivalue = int(featureId)
may_have = False
for f in self.features:
if f is None:
may_have = True
elif ivalue == int(f):
return True
if may_have:
reply = self.device.request(0x0000, _pack('!H', ivalue))
if reply:
index = ord(reply[0:1])
if index:
self.features[index] = FEATURE[ivalue]
return True
def index(self, featureId):
"""Gets the Feature Index for a given Feature ID"""
if self._check():
may_have = False
ivalue = int(featureId)
for index, f in enumerate(self.features):
if f is None:
may_have = True
elif ivalue == int(f):
return index
if may_have:
reply = self.device.request(0x0000, _
|
pack('!H', ivalue))
if reply:
index = ord(reply[0:1])
self.features[index] = FEATURE[ivalue]
return index
raise ValueError("%r not in list" % featureId)
def __iter__(self):
if self._check():
yield FEATURE.ROOT
index = 1
last_index = len(self.
|
features)
while index < last_index:
yield self.__getitem__(index)
index += 1
def __len__(self):
return len(self.features) if self._check() else 0
#
#
#
class KeysArray(object):
"""A sequence of key mappings supported by a HID++ 2.0 device."""
__slots__ = ('device', 'keys', 'keyversion')
def __init__(self, device, count):
assert device is not None
self.device = device
self.keyversion = 0
self.keys = [None] * count
def __getitem__(self, index):
if isinstance(index, int):
if index < 0 or index >= len(self.keys):
raise IndexError(index)
# TODO: add here additional variants for other REPROG_CONTROLS
if self.keys[index] is None:
keydata = feature_
|
Diti24/python-ivi
|
ivi/lecroy/lecroyWR44MXIA.py
|
Python
|
mit
| 1,652
| 0.001816
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AN
|
D NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TOR
|
T OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .lecroyWRXIA import *
class lecroyWR44MXIA(lecroyWRXIA):
"Lecroy WaveRunner 44MXi-A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'WaveRunner 44MXi-A')
super(lecroy104MXiA, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 400e6
self._init_channels()
|
PyQwt/PyQwt3D
|
Doc/sourceforge.py
|
Python
|
gpl-2.0
| 924
| 0.002165
|
#!/usr/bin/env python
import os
import re
import sys
def stamp(html):
"""Stamp a Python HTML documentation page with the S
|
ourceForge logo"""
def replace(m):
return ('<span class="release-info">%s '
'Hosted on <a href="http://sourceforge.net">'
'<img src="http://sourceforge.net/'
'sflogo.php?group_id=82987&type=1" width="88" height="31"'
'border="0" alt="SourceForge Logo"></a></span>' % m.group(1))
mailRe = re.compile(r'<span class="release-i
|
nfo">(.*)</span>')
return re.sub(mailRe, replace, html)
# stamp()
if __name__ == '__main__':
for name in sys.argv[1:]:
html = open(name, 'r').read()
text = stamp(html)
if text != html:
os.remove(name)
file = open(name, 'w')
file.write(text)
file.close()
# Local Variables: ***
# mode: python ***
# End: ***
|
streed/simpleGossip
|
run.py
|
Python
|
mit
| 211
| 0.009479
|
from simpleGossip.gossiping.gossip import RemoteGossipService
if __name__ == "__main__":
|
from rpyc.utils.server impor
|
t ThreadedServer
t = ThreadedServer( RemoteGossipService, port=18861 )
t.start()
|
rebolinho/liveit.repository
|
script.video.F4mProxy/lib/f4mUtils/pycrypto_aes.py
|
Python
|
gpl-2.0
| 869
| 0
|
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""PyCrypto AES implementation."""
from .cryptomath import *
from .aes import *
if pycryptoLoaded:
import Crypto.Cipher.AES
def new(key, mode, IV):
return PyCrypto_AES(key, mode, IV)
class PyCrypto_AES(AES):
def __init__(self, key, mode, IV):
AES.__init__(self, key, mode, IV, "pycrypto")
|
key = bytes(key)
IV = bytes(IV)
self.context = Crypto.Cipher.AES.new(key, mode, IV)
def encrypt(self, p
|
laintext):
plaintext = bytes(plaintext)
return bytearray(self.context.encrypt(plaintext))
def decrypt(self, ciphertext):
ciphertext = bytes(ciphertext)
return bytearray(self.context.decrypt(ciphertext))
|
Trust-Code/trust-addons
|
trust_crm/models/crm_lead.py
|
Python
|
agpl-3.0
| 2,232
| 0
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2016 Trustcode - www.trustcode.com.br #
# Danimar Ribeiro <danimaribeiro@gmail.com> #
|
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either
|
version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from openerp import api, models
class CrmLead(models.Model):
_inherit = 'crm.lead'
@api.multi
def handle_partner_assignation(self, action='create',
partner_id=False, context=None):
partner_ids = super(CrmLead, self).handle_partner_assignation(
action=action, partner_id=partner_id, context=context)
for lead in self:
partner_id = partner_ids[lead.id]
partner = self.env['res.partner'].browse(partner_id)
if partner.parent_id:
partner_ids[lead.id] = partner.parent_id.id
lead.partner_id = partner.parent_id.id
return partner_ids
|
battlemidget/conjure-up
|
conjureup/controllers/spellpicker/tui.py
|
Python
|
mit
| 108
| 0.009259
|
cl
|
ass SpellPickerC
|
ontroller:
def render(self):
pass
_controller_class = SpellPickerController
|
moskytw/mosql
|
tests/test_query.py
|
Python
|
mit
| 2,404
| 0.000416
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import OrderedDict
from nose.tools import eq_, assert_raises
from mosql.query import select, insert, replace
from mosql.util import param, ___, raw, DirectionError, OperatorError, autoparam
def test_select_customize():
gen = select('person', OrderedDict([
('name like', 'Mosky%'), ('age >', 20),
]))
exp = 'SELECT * FROM "person" WHERE "name" LIKE \'Mosky%\' AND "age" > 20'
eq_(gen, exp)
def test_select_customize_operator():
gen = select('person', OrderedDict([
(('name', 'like'), 'Mosky%'), (('age', '>'), 20)
]))
exp = 'SELECT * FROM "person" WHERE "name" LIKE \'Mosky%\' AND "age" > 20'
eq_(gen, exp)
def test_select_operationerror():
with assert_raises(OperatorError) as cxt:
select('person', {"person_id = '' OR true; --": 'mosky'})
exp = "this operator is not allowed: \"= '' OR TRUE; --\""
eq_(str(cxt.exception), exp)
def test_select_directionerror():
with assert_raises(DirectionError) as cxt:
select('person', {'name like': 'Mosky%'},
order_by=('age ; DROP person; --', ))
exp = "this direction is not allowed: '; DROP PERSON; --'"
eq_(str(cxt.exception), exp)
def test_select_param():
gen = select('table', OrderedDict([
('cu
|
stom_param', param('my_param')), ('auto_param', autoparam),
('using_alias', ___),
]))
exp = (
'SELECT * FROM "table" WHERE "custom_param" = %(my_param)s '
'AND "auto_param" = %(auto_param)s AND "using_alias" = %(using_alias)s'
)
eq_(gen, exp)
def test_insert_dict():
gen = insert('person', OrderedDict([
('person_id', 'mosky'), ('name', 'Mosky Liu')
]))
exp = ('IN
|
SERT INTO "person" ("person_id", "name") '
'VALUES (\'mosky\', \'Mosky Liu\')')
eq_(gen, exp)
def test_insert_returing():
gen = insert('person', OrderedDict([
('person_id', 'mosky'), ('name', 'Mosky Liu'),
]), returning=raw('*'))
exp = ('INSERT INTO "person" ("person_id", "name") '
'VALUES (\'mosky\', \'Mosky Liu\') RETURNING *')
eq_(gen, exp)
def test_replace():
gen = replace('person', OrderedDict([
('person_id', 'mosky'), ('name', 'Mosky Liu')
]))
exp = ('REPLACE INTO "person" ("person_id", "name") '
'VALUES (\'mosky\', \'Mosky Liu\')')
eq_(gen, exp)
|
jaeilepp/mne-python
|
tutorials/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py
|
Python
|
bsd-3-clause
| 12,088
| 0.000083
|
"""
.. _tut_stats_cluster_source_rANOVA:
======================================================================
Repeated measures ANOVA on source data with spatio-temporal clustering
======================================================================
This example illustrates how to make use of the clustering functions
for arbitrary, self-defined contrasts beyond standard t-tests. In this
case we will tests if the differences in evoked responses between
stimulation modality (visual VS auditory) depend on the stimulus
location (left vs right) for a group of subjects (simulated here
using one subject's data). For this purpose we will compute an
interaction effect using a repeated measures ANOVA. The multiple
comparisons problem is addressed with a cluster-level permutation test
across space and time.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Denis Engemannn <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
import mne
from mne import (io, spatial_tris_connectivity, compute_morph_matrix,
grade_to_tris)
from mne.stats import (spatio_temporal_cluster_test, f_threshold_mway_rm,
f_mway_rm, summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for all channels, removing a bad one
# ------------------------------------------------
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
# we'll load all four conditions that make up the 'two ways' of our ANOVA
event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
epochs.equalize_event_counts(event_id)
###############################################################################
# Transform to source space
# -------------------------
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
inverse_operator = read_inverse_operator(fname_inv)
# we'll only use one hemisphere to speed up this example
# instead of a second vertex array we'll pass an empty array
sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([], int)]
# Let's average and compute inverse, then resample to speed things up
conditions = []
for cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']: # order is important
evoked = epochs[cond].average()
evoked.resample(50, npad='auto')
condition = apply_inverse(evoked, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition.crop(0, None)
conditions.append(condition)
tmin = conditions[0].tmin
tstep = conditions[0].tstep
###############################################################################
# Transform to common cortical space
# ----------------------------------
#
# Normally you would read in estimates across several subjects and morph them
# to the same cortical space (e.g. fsaverage). For example purposes, we will
# simulate this by just having each "subject" have the same response (just
# noisy in source space) here.
#
# We'll only consider the left hemisphere in this tutorial.
n_vertices_sample, n_times = conditions[0].lh_data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 4) * 10
for ii, condition in enumerate(conditions):
|
X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]
###############################################################################
# It's a good idea to spatially smooth the data, and for visualization
# purposes, let's morph these to fsaverage, which is a grade 5 source space
# with vertices 0:10242 for each hemisphere. Usually you'd have to morph
# each subject's data separately (and you might want to use morph_data
# instead), but here since all estimates are on 'sample' we can use
|
one
# morph matrix for all the heavy lifting.
fsave_vertices = [np.arange(10242), np.array([], int)] # right hemi is empty
morph_mat = compute_morph_matrix('sample', 'fsaverage', sample_vertices,
fsave_vertices, 20, subjects_dir)
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
###############################################################################
# Now we need to prepare the group matrix for the ANOVA statistic. To make the
# clustering function work correctly with the ANOVA function X needs to be a
# list of multi-dimensional arrays (one per condition) of shape: samples
# (subjects) x time x space.
#
# First we permute dimensions, then split the array into a list of conditions
# and discard the empty dimension resulting from the split using numpy squeeze.
X = np.transpose(X, [2, 1, 0, 3]) #
X = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]
###############################################################################
# Prepare function for arbitrary contrast
# ---------------------------------------
# As our ANOVA function is a multi-purpose tool we need to apply a few
# modifications to integrate it with the clustering function. This
# includes reshaping data, setting default arguments and processing
# the return values. For this reason we'll write a tiny dummy function.
#
# We will tell the ANOVA how to interpret the data matrix in terms of
# factors. This is done via the factor levels argument which is a list
# of the number factor levels for each factor.
factor_levels = [2, 2]
###############################################################################
# Finally we will pick the interaction effect by passing 'A:B'.
# (this notation is borrowed from the R formula language). Without this also
# the main effects will be returned.
effects = 'A:B'
# Tell the ANOVA not to compute p-values which we don't need for clustering
return_pvals = False
# a few more convenient bindings
n_times = X[0].shape[1]
n_conditions = 4
###############################################################################
# A stat_fun must deal with a variable number of input arguments.
#
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
#
# The following function catches the list input and swaps the first and the
# second dimension, and finally calls ANOVA.
#
# Note. for further details on this ANOVA function consider the
# corresponding
# :ref:`time-frequency tutorial <tut_stats_cluster_sensor_rANOVA_tfr>`.
def stat_fun(*args):
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=return_pvals)
|
ToureNPlaner/tourenplaner-web
|
js/lang/po2js.py
|
Python
|
apache-2.0
| 1,277
| 0.022709
|
#!/usr/bin/python
#
# convert .po to .js
#
import json
import optparse
import os
import polib
import re
import string
import sys
parser = optparse.OptionParser(usage="us
|
age: %prog [options] pofile...")
parser.add_option("--callback", default="_.setTranslation", dest="callback", help="callback function to call with data")
parser.add_option("--quiet", action="store_false", default=True, dest="verbose", help="don't
|
print status messages to stdout")
(options, args) = parser.parse_args()
if args == None or len(args) == 0:
print("ERROR: you must specify at least one po file to translate");
sys.exit(1)
paramFix = re.compile("(\\(([0-9])\\))")
for srcfile in args:
destfile = os.path.splitext(srcfile)[0] + ".js"
if options.verbose:
print("INFO: converting %s to %s" % (srcfile, destfile))
xlate_map = {}
po = polib.pofile(srcfile, autodetect_encoding=False, encoding="utf-8", wrapwidth=-1)
for entry in po:
if entry.obsolete or entry.msgstr == '':
continue
xlate_map[entry.msgid] = entry.msgstr;
dest = open(destfile, "w")
dest.write('i18n = ')
encoder = json.JSONEncoder()
for part in encoder.iterencode(xlate_map):
if part.startswith('"function('):
dest.write(part[1:-1]);
else:
dest.write(part);
dest.write(";\n")
dest.close()
|
eSiUX/siux-python
|
tests/sourceinfo_test.py
|
Python
|
mit
| 4,760
| 0.070168
|
#!/usr/bin/python
import unittest, pprint, sys
sys.path.append( '../siux' )
import siuxlib
class TestSourceInfo(unittest.TestCase):
# config
auth = '<YOUR_API_KEY>'
def checkSourceInfo( self,retList ):
"""
Method tests sourceInfo structure
:param retList: - structure reported by API
"""
self.assertTrue( 'browser' in retList )
self.assertTrue( 'browserFamilyName' in retList )
self.assertTrue( 'browserId' in retList )
self.assertTrue( isinstance( retList['browserId'] ,int ))
self.assertTrue( 'browserName' in retList )
self.assertTrue( 'clientId' in retList )
self.assertTrue( isinstance( retList['clientId'] ,int ))
self.assertTrue( 'clientName' i
|
n retList )
self.assertTrue( 'clientPay' in retList )
self.assertTrue( isinstance( retList['clientPay'] ,int ))
self.assertTrue(
|
'domainId' in retList )
self.assertTrue( isinstance( retList['domainId'] ,int ))
self.assertTrue( 'googleGaProfileId' in retList )
self.assertTrue( isinstance( retList['googleGaProfileId'] ,int ))
self.assertTrue( 'googleGaTsCreate' in retList )
self.assertTrue( isinstance( retList['googleGaTsCreate'] ,int ))
self.assertTrue( 'lastChecktime' in retList )
self.assertTrue( isinstance( retList['lastChecktime'] ,int ))
self.assertTrue( 'lastErrChecktime' in retList )
self.assertTrue( isinstance( retList['lastErrChecktime'] ,int ))
self.assertTrue( 'lastErrNo' in retList )
self.assertTrue( isinstance( retList['lastErrNo'] ,int ))
self.assertTrue( 'lastErrStatusCode' in retList )
self.assertTrue( isinstance( retList['lastErrStatusCode'] ,int ))
self.assertTrue( 'lastErrStatusMessage' in retList )
self.assertTrue( 'lastStatusCode' in retList )
self.assertTrue( 'lastStatusMessage' in retList )
self.assertTrue( 'lastStatusText' in retList )
self.assertTrue( 'minAvailability' in retList )
self.assertTrue( isinstance( retList['minAvailability'] ,float ))
self.assertTrue( 'name' in retList )
self.assertTrue( 'paramCookie' in retList )
self.assertTrue( 'paramHeaderOnly' in retList )
self.assertTrue( 'paramPasswd' in retList )
self.assertTrue( 'paramPost' in retList )
self.assertTrue( 'paramSearch' in retList )
self.assertTrue( 'paramServer' in retList )
self.assertTrue( 'paramServerType' in retList )
self.assertTrue( 'paramUsername' in retList )
self.assertTrue( 'parentId' in retList )
self.assertTrue( 'publicStatActive' in retList )
self.assertTrue( isinstance( retList['publicStatActive'] ,int ))
self.assertTrue( 'rumIdent' in retList )
self.assertTrue( 'serviceCheckType' in retList )
self.assertTrue( 'serviceCheckTypeId' in retList )
self.assertTrue( isinstance( retList['serviceCheckTypeId'] ,int ))
self.assertTrue( 'siuxdbId' in retList )
self.assertTrue( isinstance( retList['siuxdbId'] ,int ))
self.assertTrue( 'sourceGroupId' in retList )
self.assertTrue( isinstance( retList['sourceGroupId'] ,int ))
self.assertTrue( 'sourceGroupName' in retList )
self.assertTrue( 'sourceId' in retList )
self.assertTrue( isinstance( retList['sourceId'] ,int ))
self.assertTrue( 'sourceType' in retList )
self.assertTrue( 'sourceTypeName' in retList )
self.assertTrue( 'status' in retList )
self.assertTrue( 'timeSchemeId' in retList )
self.assertTrue( isinstance( retList['timeSchemeId'] ,int ))
self.assertTrue( 'timeSchemeName' in retList )
self.assertTrue( 'timeout' in retList )
self.assertTrue( isinstance( retList['timeout'] ,int ))
self.assertTrue( 'timeoutErr' in retList )
self.assertTrue( isinstance( retList['timeoutErr'] ,int ))
self.assertTrue( 'timeoutWarn' in retList )
self.assertTrue( isinstance( retList['timeoutWarn'] ,int ))
self.assertTrue( 'timezone' in retList )
self.assertTrue( isinstance( retList['timezone'] ,int ))
self.assertTrue( 'timezoneId' in retList )
self.assertTrue( isinstance( retList['timezoneId'] ,int ))
self.assertTrue( 'timezoneName' in retList )
self.assertTrue( 'timezoneNick' in retList )
self.assertTrue( 'url' in retList )
self.assertTrue( 'urlNice' in retList )
def testSourceInfo(self):
"""
Test tests correct api sourceList call
"""
# init
S = siuxlib.SiUXclient( auth = self.auth )
# source.list()
retList = S.sourceList()
#pprint.pprint( retList )
if retList['statusCode'] == 'OK':
if sys.version_info[0] == 2 and sys.version_info[1] >= 7:
self.assertGreater(retList['data'].__len__(),0)
for line in retList['data']:
sourceId = line['sourceId']
self.assertTrue( isinstance( sourceId , int ) )
sourceInfo = S.sourceInfo( sourceId )
self.checkSourceInfo( sourceInfo['data'] )
if __name__ == '__main__':
unittest.main()
|
jda/unifi-tools
|
gen-minrssi.py
|
Python
|
mit
| 828
| 0.018116
|
#!/usr/bin/env python
from pymongo import MongoClient
import json
import sys
# print message and die
def msgDie(msg):
print msg
sys.exit(2)
if len(sys.argv) != 4:
msgDie("usage: unifi-minder.py config.json site-name minSNR")
# load config
cfgFile = sys.argv[1]
siteName = sys.argv[2]
minSNR = sys.argv[3]
with open(cfgFile) as data_file:
|
cfg = json.load(data_file)
# get database
dbCfg = cfg['database']
client = MongoClient(dbCfg['host'], dbCfg['port'])
db = client[dbCfg['db']]
sites = db['site']
site = sites.find_one({"name": siteName})
sid =
|
str(site["_id"])
devices = db['device']
for device in devices.find({"site_id": sid}):
mac = device["mac"]
mac = mac.replace(":", "")
for radio in device['radio_table']:
radtype = radio['radio']
print "config.minrssi.%s.%s=%s" % (mac, radtype, minSNR)
|
squilter/ardupilot
|
Tools/scripts/decode-ICSR.py
|
Python
|
gpl-3.0
| 2,047
| 0
|
#!/usr/bin/env python
'''
decode an stm32 ICSR register value
'''
import sys
import optparse
def num(s):
try:
return int(s)
except ValueError:
return int(s, 16)
parser = optparse.OptionParser(__file__)
opts, args = parser.parse_args()
if len(args) == 0:
|
print(parser.usage)
sys.exit(0)
ICSR = num(args[0])
# https:
|
//www.st.com/content/ccc/resource/technical/document/programming_manual/6c/3a/cb/e7/e4/ea/44/9b/DM00046982.pdf/files/DM00046982.pdf/jcr:content/translations/en.DM00046982.pdf
# page 225
def decoder_m4_vectactive(value):
exceptions = {
0: "Thread mode",
1: "Reserved",
2: "NMI",
3: "Hard fault",
4: "Memory management fault",
5: "Bus fault",
6: "Usage fault",
7: "Reserved....",
10: "Reserved",
11: "SVCall",
12: "Reserved for Debug",
13: "Reserved",
14: "PendSV",
15: "SysTick",
}
if value in exceptions:
exception = "%s" % str(exceptions[value])
else:
exception = "IRQ%u" % (value - 16)
sys.stdout.write(" (%s)" % exception)
M4_BITS = [
("0-8", "VECTACTIVE", decoder_m4_vectactive),
("9-10", "RESERVED1", None),
("11", "RETOBASE", None),
("12-18", "VECTPENDING", None),
("19-21", "RESERVED2", None),
("22", "ISRPENDING", None),
("23-24", "RESERVED3", None),
("25", "PENDSTCLR", None),
("27", "PENDSVCLR", None),
("28", "PENDSVSET", None),
("29-30", "RESERVED4", None),
("31", "NMIPENDSET", None),
]
for bit in M4_BITS:
(bits, name, decoder) = bit
if "-" in bits:
(start_bit, stop_bit) = bits.split("-")
start_bit = int(start_bit)
stop_bit = int(stop_bit)
else:
start_bit = int(bits)
stop_bit = int(bits)
mask = 0
for i in range(start_bit, stop_bit+1):
mask |= (1 << i)
value = (ICSR & mask) >> start_bit
sys.stdout.write("%s: %u" % (name, value)),
if decoder is not None:
decoder(value)
print("")
|
bsmr-eve/Pyfa
|
gui/utils/numberFormatter.py
|
Python
|
gpl-3.0
| 5,541
| 0.002166
|
import math
def formatAmount(val, prec=3, lowest=0, highest=0, currency=False, forceSign=False):
"""
Add suffix to value, transform value to match new suffix and round it.
Keyword arguments:
val -- value to process
prec -- precision of final number (number of significant positions to show)
lowest -- lowest order for suffixizing for numbers 0 < |num| < 1
highest -- highest order for suffixizing for numbers |num| > 1
currency -- if currency, billion suffix will be B instead of G
forceSign -- if True, positive numbers are signed too
"""
if val is None:
return ""
# Define suffix maps
posSuffixMap = {3: "k", 6: "M", 9: "B" if currency is True else "G"}
negSuffixMap = {-6: '\u03bc', -3: "m"}
# Define tuple of the map keys
# As we're going to go from the biggest order of abs(key), sort
# them differently due to one set of values being negative
# and other positive
posOrders = tuple(sorted(iter(posSuffixMap.keys()), reverse=True))
negOrders = tuple(sorted(iter(negSuffixMap.keys()), reverse=False))
# Find the least abs(key)
posLowest = min(posOrders)
negHighest = max(negOrders)
# By default, mantissa takes just value and no suffix
mantissa, suffix = val, ""
# Positive suffixes
if abs(val) > 1 and highest >= posLowest:
# Start from highest possible suffix
for key in posOrders:
# Find first suitable suffix and check if it's not above highest order
if abs(val) >= 10 ** key and key <= highest:
mantissa, suffix = val / float(10 ** key), posSuffixMap[key]
# Do additional step to eliminate results like 999999 => 1000k
# If we're already using our greatest order, we can't do anything useful
if posOrders.index(key) == 0:
break
else:
# Get order greater than current
prevKey = posOrders[posOrders.index(key) - 1]
# Check if the key to which we potentially can change is greater
# than our highest boundary
if prevKey > highest:
# If it is, bail - we already have acceptable results
break
# Find multiplier to get from one order to another
orderDiff = 10 ** (prevKey - key)
# If rounded mantissa according to our specifications is greater than
# or equal to multiplier
if roundToPrec(mantissa, prec) >= orderDiff:
# Divide mantissa and use suffix of greater order
mantissa, suffix = mantissa / orderDiff, posSuffixMap[prevKey]
# Otherwise consider current results as acceptable
break
# Take numbers between 0 and 1, and matching/below highest possible negative suffix
elif abs(val) < 1 and val != 0 and lowest <= negHighest:
# Start from lowest possible suffix
for key in negOrders:
# Get next order
try:
nextKey = negOrders[negOrders.index(key) + 1]
except IndexError:
nextKey = 0
# Check if mantissa with next suffix is in range [1, 1000)
if abs(val) < 10 ** nextKey and key >= lowest:
mantissa, suffix = val / float(10 ** key), negSuffixMap[key]
# Do additional step to eliminate results like 0.9999 => 1000m
# Check if the key we're potentially switching to is greater than our
# upper boundary
if nextKey > highest:
# If it is, leave loop with results we already have
break
# Find the multiplier between current and next order
orderDiff = 10 ** (nextKey - key)
# If rounded mantissa according to our specifications is greater than
# or equal to multiplier
if roundToPrec(mantissa, prec) >= orderDiff:
|
# Divide mantissa and use suffix of greater order
# Use special handling of zero key as it's not on the map
mantissa, suffix = mantissa / orderDiff, posSuffixMap[nextKey] if nextKey != 0 else ""
# Otherwise consider current results as acceptable
|
break
# Round mantissa according to our prec variable
mantissa = roundToPrec(mantissa, prec)
sign = "+" if forceSign is True and mantissa > 0 else ""
# Round mantissa and add suffix
result = "{0}{1}{2}".format(sign, mantissa, suffix)
return result
def roundToPrec(val, prec):
# We're not rounding integers anyway
# Also make sure that we do not ask to calculate logarithm of zero
if int(val) == val:
return int(val)
# Find round factor, taking into consideration that we want to keep at least prec
# positions for fractions with zero integer part (e.g. 0.0000354 for prec=3)
roundFactor = int(prec - math.ceil(math.log10(abs(val))))
# But we don't want to round integers
if roundFactor < 0:
roundFactor = 0
# Do actual rounding
val = round(val, roundFactor)
# Make sure numbers with .0 part designating float don't get through
if int(val) == val:
val = int(val)
return val
def roundDec(val, prec):
if int(val) == val:
return int(val)
return round(val, prec)
|
ruibarreira/linuxtrail
|
usr/lib/python2.7/dist-packages/reportlab/lib/utils.py
|
Python
|
gpl-3.0
| 45,338
| 0.017491
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
# $URI:$
__version__=''' $Id$ '''
__doc__='''Gazillions of miscellaneous internal utility functions'''
import os, sys, imp, time, types
from base64 import decodestring as base64_decodestring, encodestring as base64_encodestring
try:
from cPickle import dumps as pickle_dumps, loads as pickle_loads, dump as pickle_dump, load as pickle_load
except ImportError:
from pickle import dumps as pickle_dumps, loads as pickle_loads, dump as pickle_dump, load as pickle_load
from reportlab import isPy3
from reportlab.lib.logger import warnOnce
from reportlab.lib.rltempfile import get_rl_tempfile, get_rl_tempdir, _rl_getuid
try:
from hashlib import md5
except ImportError:
import md5
def isFunction(v):
return type(v) == type(isFunction)
class c:
def m(self): pass
def isMethod(v,mt=type(c.m)):
return type(v) == mt
del c
def isModule(v):
return type(v) == type(sys)
def isSeq(v,_st=(tuple,list)):
return isinstance(v,_st)
def isNative(v):
return isinstance(v, str)
#isStr is supposed to be for arbitrary stringType
#isBytes for bytes strings only
#isUnicode for proper unicode
if isPy3:
_rl_NoneType=type(None)
|
bytesT = bytes
unicodeT = str
strTypes = (str,bytes)
def _digeste
|
r(s):
return md5(s if isBytes(s) else s.encode('utf8')).hexdigest()
def asBytes(v,enc='utf8'):
return v if isinstance(v,bytes) else v.encode(enc)
def asUnicode(v,enc='utf8'):
return v if isinstance(v,str) else v.decode(enc)
def asUnicodeEx(v,enc='utf8'):
return v if isinstance(v,str) else v.decode(enc) if isinstance(v,bytes) else str(v)
def asNative(v,enc='utf8'):
return asUnicode(v,enc=enc)
uniChr = chr
def int2Byte(i):
return bytes([i])
def isStr(v):
return isinstance(v, (str,bytes))
def isBytes(v):
return isinstance(v, bytes)
def isUnicode(v):
return isinstance(v, str)
def isClass(v):
return isinstance(v, type)
def isNonPrimitiveInstance(x):
return not isinstance(x,(float,int,type,tuple,list,dict,str,bytes,complex,bool,slice,_rl_NoneType,
types.FunctionType,types.LambdaType,types.CodeType,
types.MappingProxyType,types.SimpleNamespace,
types.GeneratorType,types.MethodType,types.BuiltinFunctionType,
types.BuiltinMethodType,types.ModuleType,types.TracebackType,
types.FrameType,types.GetSetDescriptorType,types.MemberDescriptorType))
def instantiated(v):
return not isinstance(v,type)
from string import ascii_letters, ascii_uppercase, ascii_lowercase
from io import BytesIO, StringIO
def getBytesIO(buf=None):
'''unified StringIO instance interface'''
if buf:
return BytesIO(buf)
return BytesIO()
_bytesIOType = BytesIO
def getStringIO(buf=None):
'''unified StringIO instance interface'''
if buf:
return StringIO(buf)
return StringIO()
def bytestr(x,enc='utf8'):
if isinstance(x,str):
return x.encode(enc)
elif isinstance(x,bytes):
return x
else:
return str(x).encode(enc)
def encode_label(args):
return base64_encodestring(pickle_dumps(args)).strip().decode('latin1')
def decode_label(label):
return pickle_loads(base64_decodestring(label.encode('latin1')))
def rawUnicode(s):
'''converts first 256 unicodes 1-1'''
return s.decode('latin1') if not isinstance(s,str) else s
def rawBytes(s):
'''converts first 256 unicodes 1-1'''
return s.encode('latin1') if isinstance(s,str) else s
import builtins
rl_exec = getattr(builtins,'exec')
del builtins
def char2int(s):
return s if isinstance(s,int) else ord(s if isinstance(s,str) else s.decode('latin1'))
def rl_reraise(t, v, b=None):
if v.__traceback__ is not b:
raise v.with_traceback(b)
raise v
def rl_add_builtins(**kwd):
import builtins
for k,v in kwd.items():
setattr(builtins,k,v)
else:
bytesT = str
unicodeT = unicode
strTypes = basestring
if sys.hexversion >= 0x02000000:
def _digester(s):
return md5(s).hexdigest()
else:
# hexdigest not available in 1.5
def _digester(s):
return join(["%02x" % ord(x) for x in md5(s).digest()], '')
def asBytes(v,enc='utf8'):
return v if isinstance(v,str) else v.encode(enc)
def asNative(v,enc='utf8'):
return asBytes(v,enc=enc)
def uniChr(v):
return unichr(v)
def isStr(v):
return isinstance(v, basestring)
def isBytes(v):
return isinstance(v, str)
def isUnicode(v):
return isinstance(v, unicode)
def asUnicode(v,enc='utf8'):
return v if isinstance(v,unicode) else v.decode(enc)
def asUnicodeEx(v,enc='utf8'):
return v if isinstance(v,unicode) else v.decode(enc) if isinstance(v,str) else unicode(v)
def isClass(v):
return isinstance(v,(types.ClassType,type))
def isNonPrimitiveInstance(x):
return isinstance(x,types.InstanceType) or not isinstance(x,(float,int,long,type,tuple,list,dict,bool,unicode,str,buffer,complex,slice,types.NoneType,
types.FunctionType,types.LambdaType,types.CodeType,types.GeneratorType,
types.ClassType,types.UnboundMethodType,types.MethodType,types.BuiltinFunctionType,
types.BuiltinMethodType,types.ModuleType,types.FileType,types.XRangeType,
types.TracebackType,types.FrameType,types.EllipsisType,types.DictProxyType,
types.NotImplementedType,types.GetSetDescriptorType,types.MemberDescriptorType
))
def instantiated(v):
return not isinstance(v,type) and hasattr(v,'__class__')
int2Byte = chr
from StringIO import StringIO
def getBytesIO(buf=None):
'''unified StringIO instance interface'''
if buf:
return StringIO(buf)
return StringIO()
getStringIO = getBytesIO
_bytesIOType = StringIO
def bytestr(x,enc='utf8'):
if isinstance(x,unicode):
return x.encode(enc)
elif isinstance(x,str):
return x
else:
return str(x).encode(enc)
from string import letters as ascii_letters, uppercase as ascii_uppercase, lowercase as ascii_lowercase
def encode_label(args):
return base64_encodestring(pickle_dumps(args)).strip()
def decode_label(label):
return pickle_loads(base64_decodestring(label))
def rawUnicode(s):
'''converts first 256 unicodes 1-1'''
return s.decode('latin1') if not isinstance(s,unicode) else s
def rawBytes(s):
'''converts first 256 unicodes 1-1'''
return s.encode('latin1') if isinstance(s,unicode) else s
def rl_exec(obj, G=None, L=None):
if G is None:
frame = sys._getframe(1)
G = frame.f_globals
if L is None:
L = frame.f_locals
del frame
elif L is None:
L = G
exec("""exec obj in G, L""")
rl_exec("""def rl_reraise(t, v, b=None):\n\traise t, v, b\n""")
char2int = ord
def rl_add_builtins(**kwd):
import __builtin__
for k,v in kwd.items():
setattr(__builtin__,k,v)
def zipImported(ldr=None):
try:
if not ldr:
ldr = sys._getframe(1).f_globals['__loader__']
from zipimport import zipimporter
return ldr if isinstance(ldr,zipimporter) else None
except:
return None
def _findFiles(dirList,ext='.ttf'):
from os.path import isfile, isdir, join as path_join
from os import listdir
ext = ext.lower()
R = []
A = R.append
for D in dirList:
if not isdir(D): continue
for fn in listdir(D):
fn = path_join(D,fn)
if isfile(fn) and (not ext or fn.lower().endswith(ext)): A(fn)
return R
class CIDict(
|
tarballs-are-good/sympy
|
sympy/printing/repr.py
|
Python
|
bsd-3-clause
| 4,037
| 0.004706
|
"""
A Printer for generating executable code.
The most important function here is srepr that returns a string so that the
relation eval(srepr(expr))=expr holds in an appropriate environment.
"""
from printer import Printer
from sympy.core import Basic
import sympy.mpmath.libmp as mlib
from sympy.mpmath.libmp import prec_to_dps, repr_dps
class ReprPrinter(Printer):
printmethod = "_sympyrepr"
def reprify(self, args, sep):
return sep.join([self.doprint(item) for item in args])
def emptyPrinter(self, expr):
if isinstance(expr, str):
return expr
elif hasattr(expr, "__srepr__"):
return expr.__srepr__()
elif hasattr(expr, "args") and hasattr(expr.args, "__iter__"):
l = []
for o in expr.args:
l.append(self._print(o))
return expr.__class__.__name__ + '(%s)'%', '.join(l)
elif hasattr(expr, "__module__") and hasattr(expr, "__
|
name__"):
return "<'%s.%s'>"%(expr.__module__, expr.__name_
|
_)
else:
return str(expr)
def _print_Add(self, expr):
args = list(expr.args)
args.sort(Basic._compare_pretty)
args = map(self._print, args)
return "Add(%s)"%", ".join(args)
def _print_Function(self, expr):
r = '%s(%r)' % (expr.func.__base__.__name__, expr.func.__name__)
r+= '(%s)' % ', '.join([self._print(a) for a in expr.args])
return r
def _print_FunctionClass(self, expr):
return 'Function(%r)'%(expr.__name__)
def _print_GeometryEntity(self, expr):
# GeometryEntity is special -- its base is tuple
return repr(expr)
def _print_Infinity(self, expr):
return 'Infinity'
def _print_Integer(self, expr):
return '%s(%s)' % (expr.__class__.__name__, self._print(expr.p))
def _print_list(self, expr):
return "[%s]"%self.reprify(expr, ", ")
def _print_Matrix(self, expr):
l = []
for i in range(expr.rows):
l.append([])
for j in range(expr.cols):
l[-1].append(expr[i,j])
return '%s(%s)' % (expr.__class__.__name__, self._print(l))
def _print_NaN(self, expr):
return "nan"
def _print_NegativeInfinity(self, expr):
return "NegativeInfinity"
def _print_NegativeOne(self, expr):
return "NegativeOne"
def _print_One(self, expr):
return "One"
def _print_Rational(self, expr):
return '%s(%s, %s)' % (expr.__class__.__name__, self._print(expr.p), self._print(expr.q))
def _print_Fraction(self, expr):
return '%s(%s, %s)' % (expr.__class__.__name__, self._print(expr.numerator), self._print(expr.denominator))
def _print_Real(self, expr):
dps = prec_to_dps(expr._prec)
r = mlib.to_str(expr._mpf_, repr_dps(expr._prec))
return "%s('%s', prec=%i)" % (expr.__class__.__name__, r, dps)
def _print_Sum2(self, expr):
return "Sum2(%s, (%s, %s, %s))" % (self._print(expr.f), self._print(expr.i),
self._print(expr.a), self._print(expr.b))
def _print_Symbol(self, expr):
return "%s(%s)" % (expr.__class__.__name__, self._print(expr.name))
def _print_Predicate(self, expr):
return "%s(%s)" % (expr.__class__.__name__, self._print(expr.name))
def _print_str(self, expr):
return repr(expr)
def _print_tuple(self, expr):
if len(expr)==1:
return "(%s,)"%self._print(expr[0])
else:
return "(%s)"%self.reprify(expr, ", ")
def _print_WildFunction(self, expr):
return "%s('%s')" % (expr.__class__.__name__, expr.name)
def _print_Zero(self, expr):
return "Zero"
def _print_AlgebraicNumber(self, expr):
return "%s(%s, %s)" % (self.__class__.__name__,
self._print(self.coeffs()), self._print(expr.root))
def srepr(expr, **settings):
"""return expr in repr form"""
return ReprPrinter(settings).doprint(expr)
|
zacharyvoase/zenqueue
|
zenqueue/client/http/async.py
|
Python
|
mit
| 507
| 0.00789
|
# -*- coding: utf-8 -*-
from eventlet import httpc
from zenqueue.client.http.common import HTTPQueueClient
class QueueClient(HTTPQueueClient):
def send(self, url, data=''):
# Catch non-successful HTTP requests and treat them as if they were.
try:
result = httpc.post(url, data=data,
content_type='application/json; charse
|
t=utf-8')
except httpc.ConnectionError, exc:
result
|
= exc.params.response_body
return result
|
ANKRAJG/movieStats
|
movieGraphs/admin.py
|
Python
|
bsd-3-clause
| 570
| 0.007018
|
from djan
|
go.contrib import admin
from . import models
from .models import Hollywood, Profession, Artist, Xaxis, Y
|
axis, MovieImage, ArtistImage
class ArtistInline(admin.StackedInline):
model = Artist
extra = 4
class ProfessionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['prof_name']}),
]
inlines = [ArtistInline]
admin.site.register(Profession, ProfessionAdmin)
admin.site.register(Hollywood)
admin.site.register(Xaxis)
admin.site.register(Yaxis)
admin.site.register(MovieImage)
admin.site.register(ArtistImage)
|
huchoi/edx-platform
|
cms/djangoapps/contentstore/views/public.py
|
Python
|
agpl-3.0
| 2,264
| 0.001325
|
"""
Public views
"""
from django_future.csrf import ensure_csrf_cookie
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.conf import settings
from edxmako.shortcuts import render_to_response
from external_auth.views import (ssl_login_shortcut, ssl_get_cert_from_request,
redirect_with_get)
from microsite_configuration import microsite
__all__ = ['signup', 'login_page', 'howitworks']
@ensure_csrf_cookie
def signup(request):
"""
Display the signup form.
"""
csrf_token = csrf(request)['csrf_token']
if request.user.is_authenticated():
return redirect('/course/')
if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP'):
# Redirect to course to login to process their certificate if SSL is enabled
# and registration is disabled.
return redirect_with_get('login', request.GET, False)
return render_to_response('register.html', {'csrf': csrf_token})
@ssl_login_shortcut
@ensure_csrf_cookie
def login_page(request):
"""
Display the login form.
"""
csrf_token = csrf(request)['csrf_token']
if (settings.FEATURES['AUTH_USE_CERTIFICATES'] and
|
ssl_get_cert_from_request(request)):
# SSL login doesn't require a login view, so redirect
# to course now that the user is authenticated via
# the decorator.
next_url = request.GET.get('next')
if next_url:
return redirect(next_url)
else:
return redirect('/course/')
if settings.FEATURES.get('AUTH_USE_CAS'):
# If
|
CAS is enabled, redirect auth handling to there
return redirect(reverse('cas-login'))
return render_to_response(
'login.html',
{
'csrf': csrf_token,
'forgot_password_link': "//{base}/login#forgot-password-modal".format(base=settings.LMS_BASE),
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
}
)
def howitworks(request):
"Proxy view"
if request.user.is_authenticated():
return redirect('/course/')
else:
return render_to_response('howitworks.html', {})
|
rocky/python2-trepan
|
test/unit/test-info-files.py
|
Python
|
gpl-3.0
| 1,573
| 0.001271
|
#!/usr/bin/env python
'Unit test for debugger info file'
import inspect, unittest
from trepan import debugger as Mdebugger
from trepan.processor.command import info as Minfo
from trepan.processor.command.info_subcmd import files as MinfoFile
from cmdhelper import dbg_setup
class TestInfoFile(unittest.TestCase):
# FIXME: put in a more common place
# Possibly fix up Mock to include this
def setup_io(self, command):
self.clear_output()
command.msg = self.msg
command.errmsg = self.errmsg
command.msg_nocr = self.msg_noc
|
r
return
def clear_output(self):
self.msgs = [
|
]
self.errmsgs = []
self.last_was_newline = True
return
def msg_nocr(self, msg):
if len(self.msgs) > 0:
self.msgs[-1] += msg
else:
self.msgs += msg
pass
return
def msg(self, msg):
self.msgs += [msg]
return
def errmsg(self, msg):
self.errmsgs.append(msg)
pass
def test_info_file(self):
d = Mdebugger.Debugger()
d, cp = dbg_setup(d)
command = Minfo.InfoCommand(cp, 'info')
sub = MinfoFile.InfoFiles(command)
self.setup_io(sub)
sub.run([])
self.assertEqual([], self.msgs)
cp.curframe = inspect.currentframe()
for width in (80, 200):
# sub.settings['width'] = width
sub.run(['test-info-file.py', 'lines'])
sub.run([])
pass
pass
if __name__ == '__main__':
unittest.main()
|
econandrew/bandicoot
|
bandicoot/tests/test_group.py
|
Python
|
mit
| 7,063
| 0.004247
|
"""
Test for the bandicoot.helper.group module.
"""
import bandicoot as bc
from bandicoot.core import Record, Position
import unittest
import datetime
from bandicoot.tests.generate_user import random_burst
from bandicoot.helper.group import group_records
from bandicoot.helper.tools
|
import std, mean
from datetime import timedelta
import numpy as np
import os
class TestGroup(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._dir_changed = False
def setUp(self):
if not TestGroup._dir_changed:
abspath = os.path.abspath(__file__)
name = abspath.index(os.path.basename(__file__))
abspath = abspath[:name]
os.chdir(abspath)
TestGroup._dir_changed = True
s
|
elf.maxDiff = None
self.user = bc.io.read_orange("u_test", "samples", describe=False)
self.random_int_list = np.random.randint(1, 1000, size=9001)
self.sum_stats_list = [bc.helper.tools.SummaryStats(np.random.rand(), np.random.rand(),
np.random.rand(), np.random.rand(), np.random.rand(), np.random.rand(), np.random.rand(), []) for _ in range(9001)]
def test_statistics(self):
self.assertDictEqual(bc.helper.group.statistics(self.random_int_list, summary='default'), {
'mean': mean(self.random_int_list),
'std': std(self.random_int_list),
})
def mean_std(key):
return {
'mean': mean([getattr(s, key) for s in self.sum_stats_list]),
'std': std([getattr(s, key) for s in self.sum_stats_list]),
}
self.assertDictEqual(bc.helper.group.statistics(self.sum_stats_list, summary='extended'), {
'kurtosis': mean_std('kurtosis'),
'mean': mean_std('mean'),
'median': mean_std('median'),
'skewness': mean_std('skewness'),
'std': mean_std('std'),
'min': mean_std('min'),
'max': mean_std('max')
})
self.assertEqual(bc.helper.group.statistics([]).values(), [None] * 2)
def test_statistics_bad_aggregated(self):
def run_bad_aggregated():
try:
bc.helper.group.statistics("bad_aggregated")
except (TypeError, ValueError):
return True
return False
self.assertTrue(run_bad_aggregated())
def test_weekly_group(self):
records = [
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 24), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 4), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 11), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby='week')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[0]], [records[1]], [records[2]]])
def test_weekday_group(self):
records = [
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 25), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 4), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 11), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby='week', part_of_week='weekday')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[0]], [records[1]], [records[2]]])
def test_weekend_group(self):
records = [
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 23), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 31), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 10, 18), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby='week', part_of_week='weekend')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[0]], [records[1]], [records[2]]])
def test_daily_group(self):
records = [
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 22, 10, 00), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 23, 10, 00), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 7, 11, 00), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 10, 18, 2, 00), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby='week', part_of_day='night')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[3]]])
grouping = bc.helper.group.group_records(user, groupby='week', part_of_day='day')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[0], records[1]], [records[2]]])
def test_none_group(self):
records = [
Record("call", "in", "1", datetime.datetime(2014, 9, 4), 1, Position()),
Record("call", "in", "1", datetime.datetime(2014, 9, 5), 1, Position()),
Record("call", "in", "1", datetime.datetime(2014, 9, 11), 1, Position()),
Record("call", "in", "1", datetime.datetime(2014, 9, 12), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby=None)
self.assertEqual(records, list(next(grouping)))
self.assertRaises(StopIteration, grouping.next)
class ConsistencyTests(unittest.TestCase):
def setUp(self):
self.user = bc.User()
self.user.records = random_burst(100, delta=timedelta(days=2))
def _group_set(self, method, interaction):
chunks = group_records(self.user, groupby=method,
interaction=interaction)
new_records = set(r for c in chunks for r in c)
return new_records
def test_weekly(self):
old_records = set(self.user.records)
new_records = self._group_set('week', None)
self.assertSetEqual(new_records, old_records)
new_records = self._group_set('week', 'call')
self.assertSetEqual(new_records, {r for r in old_records
if r.interaction == 'call'})
class MissingTests(unittest.TestCase):
def setUp(self):
self.user = bc.read_csv('user_ignored', 'samples')
def test_amount(self):
result = {
'all': 4,
'interaction': 2,
'direction': 2,
'correspondent_id': 0,
'datetime': 0,
'call_duration': 1,
}
self.assertDictEqual(self.user.ignored_records, result)
def test_total_records(self):
self.assertEqual(len(self.user.records), 1)
|
marcosbontempo/inatelos
|
poky-daisy/scripts/lib/mic/utils/oe/misc.py
|
Python
|
mit
| 4,324
| 0.004625
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This module provides a place to collect various mic-related utils
# for the OpenEmbedded Image Tools.
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
from mic import msger
from mic.utils import runner
def exec_cmd(cmd_and_args, as_shell = False, catch = 3):
"""
Execute command, catching stderr, stdout
Need to execute as_shell if the command uses wildcards
"""
msger.debug("exec_cmd: %s" % cmd_and_args)
args = cmd_and_args.split()
msger.debug(args)
if (as_shell):
rc, out = runner.runtool(cmd_and_args, catch)
else:
rc, out = runner.runtool(args, catch)
out = out.strip()
msger.debug("exec_cmd: output for %s (rc = %d): %s" % \
(cmd_and_args, rc, out))
if rc != 0:
# We don't throw exception when return code is not 0, because
# parted always fails to reload part table with loop devices. This
# prevents us from distinguishing
|
real errors based on return
# code.
msger.warning("WARNING: %s returned '%s' instead of 0" % (cmd_and_args, rc))
return (rc, out)
def exec_cmd_quiet(cmd_and_args, as_shell = False):
"""
Execute command, catching nothing in the output
Need to execute as_shell if the command uses wildcards
|
"""
return exec_cmd(cmd_and_args, as_shell, 0)
def exec_native_cmd(cmd_and_args, native_sysroot, catch = 3):
"""
Execute native command, catching stderr, stdout
Need to execute as_shell if the command uses wildcards
Always need to execute native commands as_shell
"""
native_paths = \
"export PATH=%s/sbin:%s/usr/sbin:%s/usr/bin:$PATH" % \
(native_sysroot, native_sysroot, native_sysroot)
native_cmd_and_args = "%s;%s" % (native_paths, cmd_and_args)
msger.debug("exec_native_cmd: %s" % cmd_and_args)
args = cmd_and_args.split()
msger.debug(args)
rc, out = exec_cmd(native_cmd_and_args, True, catch)
if rc == 127: # shell command-not-found
msger.error("A native (host) program required to build the image "
"was not found (see details above). Please make sure "
"it's installed and try again.")
return (rc, out)
def exec_native_cmd_quiet(cmd_and_args, native_sysroot):
"""
Execute native command, catching nothing in the output
Need to execute as_shell if the command uses wildcards
Always need to execute native commands as_shell
"""
return exec_native_cmd(cmd_and_args, native_sysroot, 0)
# kickstart doesn't support variable substution in commands, so this
# is our current simplistic scheme for supporting that
wks_vars = dict()
def get_wks_var(key):
return wks_vars[key]
def add_wks_var(key, val):
wks_vars[key] = val
BOOTDD_EXTRA_SPACE = 16384
IMAGE_EXTRA_SPACE = 10240
__bitbake_env_lines = ""
def set_bitbake_env_lines(bitbake_env_lines):
global __bitbake_env_lines
__bitbake_env_lines = bitbake_env_lines
def get_bitbake_env_lines():
return __bitbake_env_lines
def get_line_val(line, key):
"""
Extract the value from the VAR="val" string
"""
if line.startswith(key + "="):
stripped_line = line.split('=')[1]
stripped_line = stripped_line.replace('\"', '')
return stripped_line
return None
def get_bitbake_var(key):
for line in __bitbake_env_lines.split('\n'):
if (get_line_val(line, key)):
val = get_line_val(line, key)
return val
return None
|
ATIX-AG/foreman-ansible-modules
|
plugins/modules/realm.py
|
Python
|
gpl-3.0
| 2,670
| 0.001498
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Lester R Claudio <claudiol@redhat.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: realm
version_added: 1.0.0
short_description: Manage Realms
description:
- Manage Realms
author:
- "Lester R Claudio (@claudiol1)"
options:
name:
description:
- Name of the realm
required: true
type: str
realm_proxy:
description:
- Proxy to use for this realm
required: true
type: str
realm_type:
description:
- Realm type
choices:
- Red Hat Identity Management
- FreeIPA
- Active Directory
required: true
type: str
extends_documentation_fragment:
- theforeman.foreman.foreman
- theforeman.foreman.foreman.entity_state
- theforeman.foreman.foreman.taxonomy
'''
EXAMPLES = '''
- name: "Create EXAMPLE.LOCAL Realm"
theforeman.foreman.realm:
username: "admin"
password: "changeme"
server_url: "https://foreman.example.com"
name: "EXAMPLE.COM"
realm_proxy: "foreman.example.com"
realm_type: "Red Hat Identity Management"
state: present
'''
RETURN = '''
entity:
description: Final state of the affected entities grouped by their type.
returned: success
type: dict
contains:
realms:
description: List of realms.
type: list
elements: dict
'''
from ansible_collections
|
.theforeman.foreman.plugins.module_utils.foreman_helper imp
|
ort ForemanTaxonomicEntityAnsibleModule
class ForemanRealmModule(ForemanTaxonomicEntityAnsibleModule):
pass
def main():
module = ForemanRealmModule(
foreman_spec=dict(
name=dict(required=True),
realm_proxy=dict(type='entity', required=True, resource_type='smart_proxies'),
realm_type=dict(required=True, choices=['Red Hat Identity Management', 'FreeIPA', 'Active Directory']),
),
)
with module.api_connection():
module.run()
if __name__ == '__main__':
main()
|
SportySpice/Collections
|
src/gui/EnumButton.py
|
Python
|
gpl-2.0
| 4,537
| 0.020719
|
from src.tools.enum import enum
import pyxbmct.addonwindow as pyxbmct
from src.tools.dialog import dialog
EnumMode = enum(SELECT=0, ROTATE=1)
class EnumButton(object):
def __init__(self, label, values, current, default, changeCallback=None, saveCallback=None, customLabels=None, mode=EnumMode.SELECT, returnValue=False, alignment=pyxbmct.ALIGN_CENTER):
self.label = label
self.values = values
self.customLabels = customLabels
self.mode = mode
self.returnValue = returnValue
self.changeCallback = changeCallback
self.saveCallback = saveCallback
self.currentValue = current
self.defaultValue = default
self.currentIndex = None
self.defaultIndex = None
self.assignedValue = False
if saveCallback is None:
self.onSave = None
if customLabels:
self._findCurrentIndex()
label = str(customLabels[self.currentIndex])
else:
label = str(current)
if alignment is not None:
self.button = pyxbmct.Button(label, alignment=alignment)
else:
self.button = pyxbmct.Button(label)
def update(self, value):
if self.currentValue != value:
self.currentValue = value
if self.customLabels:
self._findCurrentIndex()
label = str(self.customLabels[self.currentIndex])
else:
self.currentIndex = None
label = str(value)
self.button.setLabel(label)
self.assignedValue = True
def onClick(self):
if self.mode == EnumMode.SELECT:
if self.customLabels:
values = self.customLabels
else:
values = self.values
selectedIndex = dialog.select(self.label, list((str(value) for value in values)))
if selectedIndex == -1:
return
index = selectedIndex
else:
if self.currentIndex is None:
self._findCurrentIndex()
if self.currentIndex == len(self.values) - 1:
index = 0
else:
index = self.currentIndex + 1
self.assign(index)
def onDefault(self):
if self.defaultIndex is None:
self._findDefaultIndex()
self.assign(self.defaultIndex)
def onSave(self):
if self.assignedValue:
if self.returnValue:
self.saveCallback(self.currentValue)
else:
self.saveCallback(self.currentIndex)
def assign(self, index):
value = self.values[index]
self.currentIndex = index
self.currentValue = value
if self.customLabels:
label = str(self.customLabels[index])
else:
label = str(value)
self.button.setLabel(label)
self.assignedValue = True
if self.changeCallback:
if self.returnValue:
self.changeCallback(value)
else:
self.changeCallback(index)
|
def _findDefaultIndex(self):
for i in range(0, le
|
n(self.values)):
value = self.values[i]
if value == self.defaultValue:
self.defaultIndex = i
if self.defaultIndex is None:
raise ValueError ('Default value not found in value list')
def _findCurrentIndex(self):
for i in range(0, len(self.values)):
value = self.values[i]
if value == self.currentValue:
self.currentIndex = i
if self.currentIndex is None:
raise ValueError ('Current value not found in value list')
|
chatelak/RMG-Py
|
rmgpy/qm/mopacTest.py
|
Python
|
mit
| 7,951
| 0.043013
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
import numpy as np
from rmgpy import getPath
from rmgpy.qm.main import QMCalculator
from rmgpy.molecule import Molecule
from rmgpy.qm.mopac import MopacMolPM3, MopacMolPM6, MopacMolPM7
mopacEnv = os.getenv('MOPAC_DIR', default="/opt/mopac")
if os.path.exists(os.path.join(mopacEnv , 'MOPAC2012.exe')):
executablePath = os.path.join(mopacEnv , 'MOPAC2012.exe')
elif os.path.exists(os.path.join(mopacEnv , 'MOPAC2009.exe')):
executablePath = os.path.join(mopacEnv , 'MOPAC2009.exe')
else:
executablePath = os.path.join(mopacEnv , '(MOPAC 2009 or 2012)')
mol1 = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
class TestMopacMolPM3(unittest.TestCase):
"""
Contains unit tests for the Geometry class.
"""
@unittest.skipIf(os.path.exists(executablePath)==False, "MOPAC not found. Try resetting your environment variables if you want to use it.")
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
qm = QMCalculator(software = 'mopac',
method = 'pm3',
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles'),
scratchDirectory = os.path.join(RMGpy_path, 'testing', 'qm', 'QMscratch'),
)
if not os.path.exists(qm.settings.fileStore):
os.makedirs(qm.settings.fileStore)
self.qmmol1 = MopacMolPM3(mol1, qm.settings)
def testGenerateThermoData(self):
"""
Test that generateThermoData() works correctly.
"""
try:
fileList = os.listdir(self.qmmol1.settings.fileStore)
for fileName in fileList:
os.remove(os.path.join(self.qmmol1.settings.fileStore, fileName))
except OSError:
pass
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM3 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.173,2)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 169708.0608, 0) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 334.5007584, 1) # to 1 decimal place
def testLoadThermoData(self):
"""
Test that generateThermoData() can load thermo from a previous run.
Check that it loaded, and the values are the same as above.
"""
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM3 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.173,2)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 169708.0608, 0) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 334.5007584, 1) # to 1 decimal place
class TestMopacMolPM6(unittest.TestCase):
"""
Contains unit tests for the Geometry class.
"""
@unittest.skipIf(os.path.exists(executablePath)==False, "MOPAC not found. Try resetting your environment variables if you want to use it.")
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join
|
(getPath(),'..'))
qm = QMCalculator(software = 'mopac',
method = 'pm6',
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles'),
scratchDirectory = os.path.join(RMGpy_path, 'testing', 'qm', 'QMscratch'),
)
if not os.path.exists(qm.settings.fileStore):
os.makedirs(qm.setti
|
ngs.fileStore)
self.qmmol1 = MopacMolPM6(mol1, qm.settings)
def testGenerateThermoData(self):
"""
Test that generateThermoData() works correctly.
"""
try:
fileList = os.listdir(self.qmmol1.settings.fileStore)
for fileName in fileList:
os.remove(os.path.join(self.qmmol1.settings.fileStore, fileName))
except OSError:
pass
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM6 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.173,2)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 167704.4270, 0) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 338.0999241, 1) # to 1 decimal place
def testLoadThermoData(self):
"""
Test that generateThermoData() can load thermo from a previous run.
Check that it loaded, and the values are the same as above.
"""
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM6 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertEqual(result.molecularMass.value, 128.173)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 167704.0681, 0) # to 0 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 338.0999241, 1) # to 1 decimal place
class TestMopacMolPM7(unittest.TestCase):
"""
Contains unit tests for the Geometry class.
"""
@unittest.skipIf(os.path.exists(executablePath)==False, "MOPAC not found. Try resetting your environment variables if you want to use it.")
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
qm = QMCalculator(software = 'mopac',
method = 'pm7',
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles'),
scratchDirectory = os.path.join(RMGpy_path, 'testing', 'qm', 'QMscratch'),
)
if not os.path.exists(qm.settings.fileStore):
os.makedirs(qm.settings.fileStore)
mol1 = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
self.qmmol1 = MopacMolPM7(mol1, qm.settings)
def testGenerateThermoData(self):
"""
Test that generateThermoData() works correctly.
"""
try:
fileList = os.listdir(self.qmmol1.settings.fileStore)
for fileName in fileList:
os.remove(os.path.join(self.qmmol1.settings.fileStore, fileName))
except OSError:
pass
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.173,2)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.9863, 0) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1) # to 1 decimal place
def testLoadThermoData(self):
"""
Test that generateThermoData() can load thermo from a previous run.
Check that it loaded, and the values are the same as above.
"""
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM MopacMolPM7 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.173,2)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 166168.8571, 0) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 336.3330406, 1) # to 1 decimal place
################################################################################
if __name__ == '__main__':
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) )
|
ageitgey/face_recognition
|
docs/conf.py
|
Python
|
mit
| 8,789
| 0.005461
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# face_recognition documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['face_recognition_models', 'Click', 'dlib', 'numpy', 'PIL']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import face_recognition
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Face Recognition'
copyright = u"2017, Adam Geitgey"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = face_recognition.__version__
# The full version, including alpha/beta/rc tags.
release = face_recognition.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the b
|
uilt
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation
|
for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'face_recognitiondoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'face_recognition.tex',
u'Face Recognition Documentation',
u'Adam Geitgey', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'face_recognition',
u'Face Recognition Documentation',
[u'Adam Geitgey'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title
|
rogerscristo/BotFWD
|
env/lib/python3.6/site-packages/telegram/files/venue.py
|
Python
|
mit
| 2,201
| 0.000909
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram Venue."""
from telegram import TelegramObject, Location
class Venue(TelegramObject):
"""This object represents a venue.
Attributes:
location (:class:`telegram.Location`): Venue location.
title (:obj:`str`): Name of the venue.
address (:obj:`str`): Address of the venue.
foursquare_id (:obj:`str`): Optional. Foursquare identifier of the venue.
Args:
location
|
(:class:`telegram.Location`): Venue location.
title (:obj:`str`): Name of the venue.
address (:obj:`str`): Address of the venue.
foursquare_id (:obj:`str`, optional): Foursquare identifier of the venue.
**kwargs (:obj:`dict`): Arbitrary keyword argum
|
ents.
"""
def __init__(self, location, title, address, foursquare_id=None, **kwargs):
# Required
self.location = location
self.title = title
self.address = address
# Optionals
self.foursquare_id = foursquare_id
self._id_attrs = (self.location, self.title)
@classmethod
def de_json(cls, data, bot):
data = super(Venue, cls).de_json(data, bot)
if not data:
return None
data['location'] = Location.de_json(data.get('location'), bot)
return cls(**data)
|
hellhovnd/dentexchange
|
dentexchange/apps/search/strings.py
|
Python
|
bsd-3-clause
| 551
| 0
|
# -*- coding:utf-8 -*-
from django.utils.translation import ugettext_lazy as _
# SearchForm's strings
SEA
|
RCH_FORM_KEYWORDS = _(u'Key Words / Profession')
SEARCH_FORM_LOCATION = _(u'City, State or Zip Code')
# SearchFiltersForm's strings
SEARCH_FILTERS_FORM_JOB_POSITION = _(u'Job Position')
SEARCH_FILTERS_FORM_EXPERIENCE_YEARS = _(u'Experience')
SEARCH_FIL
|
TERS_FORM_DISTANCE = _(u'Distance')
SEARCH_FILTERS_FORM_FULL_TIME = _(u'Full Time')
SEARCH_FILTERS_FORM_PART_TIME = _(u'Part Time')
SEARCH_FILTERS_FORM_VISA = _(u'Has a Visa / Visa required')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.