code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from __future__ import unicode_literals
from __future__ import absolute_import
from ..packages.docker import Client
from requests.exceptions import ConnectionError
import errno
import logging
import os
import re
import yaml
from ..packages import six
from ..project import Project
from ..service import ConfigError
from .docopt_command import DocoptCommand
from .utils import docker_url, call_silently, is_mac, is_ubuntu
from . import verbose_proxy
from . import errors
from .. import __version__
log = logging.getLogger(__name__)
class Command(DocoptCommand):
base_dir = '.'
def dispatch(self, *args, **kwargs):
try:
super(Command, self).dispatch(*args, **kwargs)
except ConnectionError:
if call_silently(['which', 'docker']) != 0:
if is_mac():
raise errors.DockerNotFoundMac()
elif is_ubuntu():
raise errors.DockerNotFoundUbuntu()
else:
raise errors.DockerNotFoundGeneric()
elif call_silently(['which', 'docker-osx']) == 0:
raise errors.ConnectionErrorDockerOSX()
else:
raise errors.ConnectionErrorGeneric(self.get_client().base_url)
def perform_command(self, options, handler, command_options):
explicit_config_path = options.get('--file') or os.environ.get('FIG_FILE')
project = self.get_project(
self.get_config_path(explicit_config_path),
project_name=options.get('--project-name'),
verbose=options.get('--verbose'))
handler(project, command_options)
def get_client(self, verbose=False):
client = Client(docker_url())
if verbose:
version_info = six.iteritems(client.version())
log.info("Fig version %s", __version__)
log.info("Docker base_url: %s", client.base_url)
log.info("Docker version: %s",
", ".join("%s=%s" % item for item in version_info))
return verbose_proxy.VerboseProxy('docker', client)
return client
def get_config(self, config_path):
try:
with open(config_path, 'r') as fh:
return yaml.safe_load(fh)
except IOError as e:
if e.errno == errno.ENOENT:
raise errors.FigFileNotFound(os.path.basename(e.filename))
raise errors.UserError(six.text_type(e))
def get_project(self, config_path, project_name=None, verbose=False):
try:
return Project.from_config(
self.get_project_name(config_path, project_name),
self.get_config(config_path),
self.get_client(verbose=verbose))
except ConfigError as e:
raise errors.UserError(six.text_type(e))
def get_project_name(self, config_path, project_name=None):
def normalize_name(name):
return re.sub(r'[^a-zA-Z0-9]', '', name)
if project_name is not None:
return normalize_name(project_name)
project = os.path.basename(os.path.dirname(os.path.abspath(config_path)))
if project:
return normalize_name(project)
return 'default'
def get_config_path(self, file_path=None):
if file_path:
return os.path.join(self.base_dir, file_path)
if os.path.exists(os.path.join(self.base_dir, 'fig.yaml')):
log.warning("Fig just read the file 'fig.yaml' on startup, rather "
"than 'fig.yml'")
log.warning("Please be aware that fig.yml the expected extension "
"in most cases, and using .yaml can cause compatibility "
"issues in future")
return os.path.join(self.base_dir, 'fig.yaml')
return os.path.join(self.base_dir, 'fig.yml')
| waynedovey/fig | fig/cli/command.py | Python | apache-2.0 | 3,878 |
import util
| mtmarsh2/vislab | vislab/ui/__init__.py | Python | bsd-2-clause | 12 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.models import Model
from openerp.modules.registry import RegistryManager
from openerp import SUPERUSER_ID
class res_users(Model):
_inherit = 'res.users'
def _login(self, db, login, password):
uid = super(res_users, self)._login(db, login, password)
if uid:
self.update_dynamic_groups(uid, db)
return uid
def update_dynamic_groups(self, uid, db):
pool = RegistryManager.get(db)
cr = pool._db.cursor()
user = pool.get('res.users').browse(cr, SUPERUSER_ID, uid)
groups_obj = pool.get('res.groups')
user.write(
{
'groups_id': [
(4, dynamic_group.id)
if dynamic_group.eval_dynamic_group_condition(uid=uid)
else (3, dynamic_group.id)
for dynamic_group in groups_obj.browse(
cr, SUPERUSER_ID,
groups_obj.search(cr, SUPERUSER_ID,
[('is_dynamic', '=', True)]))
],
})
cr.commit()
cr.close()
| be-cloud-be/horizon-addons | server-tools/auth_dynamic_groups/model/res_users.py | Python | agpl-3.0 | 2,115 |
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests the includeconf argument
Verify that:
1. adding includeconf to the configuration file causes the includeconf
file to be loaded in the correct order.
2. includeconf cannot be used as a command line argument.
3. includeconf cannot be used recursively (ie includeconf can only
be used from the base config file).
4. multiple includeconf arguments can be specified in the main config
file.
"""
import os
from test_framework.test_framework import BitcoinTestFramework
class IncludeConfTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
# Create additional config files
# - tmpdir/node0/relative.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
# - tmpdir/node0/relative2.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative2\n")
with open(os.path.join(self.options.tmpdir, "node0", "bitcoin.conf"), "a", encoding='utf8') as f:
f.write("uacomment=main\nincludeconf=relative.conf\n")
def run_test(self):
self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'")
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error: Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
self.log.info("-includeconf cannot contain invalid arg")
# Commented out as long as we ignore invalid arguments in configuration files
#with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# f.write("foo=bar\n")
#self.nodes[0].assert_start_raises_init_error(expected_msg="Error: Error reading configuration file: Invalid configuration value foo")
self.log.info("-includeconf cannot be invalid path")
os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
self.nodes[0].assert_start_raises_init_error(expected_msg="Error: Error reading configuration file: Failed to include configuration file relative.conf")
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# Restore initial file contents
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "bitcoin.conf"), "a", encoding='utf8') as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative; relative2)/")
if __name__ == '__main__':
IncludeConfTest().main()
| Sjors/bitcoin | test/functional/feature_includeconf.py | Python | mit | 4,066 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2014 René Kijewski <rene.kijewski@fu-berlin.de>
# Copyright (C) 2015 Philipp Rosenkranz <philipp.rosenkranz@fu-berlin.de>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
import re
from itertools import groupby
from os import devnull, environ, listdir
from os.path import abspath, dirname, isfile, join
from subprocess import CalledProcessError, check_call, check_output, PIPE, Popen
from sys import exit, stdout, argv, exc_info
from StringIO import StringIO
from itertools import tee
class Termcolor:
red = '\033[1;31m'
green = '\033[1;32m'
yellow = '\033[1;33m'
blue = '\033[1;34m'
purple = '\033[1;35m'
end = '\033[0m'
def is_tracked(application_folder):
if not isfile(join(application_folder, 'Makefile')):
return False
try:
check_call(('git', 'ls-files', '--error-unmatch', 'Makefile'),
stdin=null, stdout=null, stderr=null, cwd=application_folder)
except CalledProcessError:
return False
else:
return True
def get_results_and_output_from(fd):
results_prefix = 'Building for '
output_prefix = 'Building application '
prev_results = False
result = ['']
output = StringIO()
while 1:
line = fd.readline()
if not line:
if prev_results:
yield (' .. '.join(result[:-1]), result[-1], output)
break
elif line.startswith(results_prefix):
read_more_output = False
if prev_results:
yield (' .. '.join(result[:-1]), result[-1], output)
prev_results = True
output = StringIO()
result = line[len(results_prefix):].rstrip().split(' .. ')[::-1]
if (len(result) > 1) and ('success' in result[0] or 'failed' in result[0]):
stdout.write('.')
stdout.flush()
elif line.startswith(output_prefix):
output.write(line)
read_more_output = True
elif read_more_output:
output.write(line)
def _get_common_user(common):
return [f for f in check_output(r'grep -l "{}" cpu/*/Makefile* boards/*/Makefile*'.format(common),
shell=True).split() if 'common' not in f]
def _get_boards_from_files(files):
boards = set()
if any('boards/' in s for s in files):
for f in files:
if 'boards/' not in f:
continue
board = re.sub(r'^boards/([^/]+)/.*$', r'\1', f)
if 'common' in board:
boards |= _get_boards_from_files(_get_common_user(board))
else:
boards |= { board }
return boards
def _get_cpus_from_files(files):
cpus = set()
if any('cpu/' in s for s in files):
for f in files:
if 'cpu/' not in f:
continue
cpu = re.sub(r'^cpu/([^/]+)/.*', r'\1', f)
if 'common' in cpu:
cpus |= _get_cpus_from_files(_get_common_user(cpu))
else:
cpus |= { cpu }
return cpus
def is_updated(application_folder, subprocess_env):
try:
if base_branch == '':
return True
if '.travis.yml' in diff_files or \
any('dist/' in s for s in diff_files):
return True
boards_changes = set()
boards_changes |= _get_boards_from_files(diff_files)
for cpu in _get_cpus_from_files(diff_files):
board_files = check_output(r'grep -l "^\(export \)*CPU[ :?=]\+{}" boards/*/Makefile.include'.format(cpu),
shell=True).split()
boards_changes |= _get_boards_from_files(board_files)
if len(boards_changes) > 0:
app_files = set()
for board in boards_changes:
env = { 'BOARD': board }
env.update(subprocess_env)
tmp = check_output(('make', 'info-files'), stderr=null,
cwd=application_folder, env=env)
app_files |= set(tmp.split())
if (len(diff_files & app_files) > 0):
return True
else:
app_files = check_output(('make', 'info-files'), stderr=null,
cwd=application_folder, env=subprocess_env)
app_files = set(app_files.split())
return (len(diff_files & app_files) > 0)
except CalledProcessError as e:
return True
def build_all():
riotbase = environ.get('RIOTBASE') or abspath(join(dirname(abspath(__file__)), '../' * 3))
for folder in ('examples', 'tests'):
print('Building all applications in: {}'.format(colorize_str(folder, Termcolor.blue)))
applications = listdir(join(riotbase, folder))
applications = filter(lambda app: is_tracked(join(riotbase, folder, app)), applications)
applications = sorted(applications)
subprocess_env = environ.copy()
subprocess_env['RIOT_DO_RETRY'] = '1'
subprocess_env['BUILDTEST_VERBOSE'] = '1'
for nth, application in enumerate(applications, 1):
stdout.write('\tBuilding application: {} ({}/{}) '.format(colorize_str(application, Termcolor.blue), nth, len(applications)))
stdout.flush()
try:
if not is_updated(join(riotbase, folder, application), subprocess_env):
print(colorize_str('(skipped)', Termcolor.yellow))
skipped.append(application)
continue
subprocess = Popen(('make', 'buildtest'),
bufsize=1, stdin=null, stdout=PIPE, stderr=null,
cwd=join(riotbase, folder, application),
env=subprocess_env)
results, results_with_output = tee(get_results_and_output_from(subprocess.stdout))
results = groupby(sorted(results), lambda (outcome, board, output): outcome)
results_with_output = filter(lambda (outcome, board, output): output.getvalue(), results_with_output)
failed_with_output = filter(lambda (outcome, board, output): 'failed' in outcome, results_with_output)
success_with_output = filter(lambda (outcome, board, output): 'success' in outcome, results_with_output)
print()
for group, results in results:
print('\t\t{}: {}'.format(group, ', '.join(sorted(board for outcome, board, output in results))))
returncode = subprocess.wait()
if success_with_output:
warnings.append((application, success_with_output))
if returncode == 0:
success.append(application)
else:
failed.append(application)
errors.append((application, failed_with_output))
except Exception, e:
print('\n\t\tException: {}'.format(e))
exceptions.append(application)
finally:
try:
subprocess.kill()
except:
pass
def colorize_str(string, color):
return '%s%s%s' % (color, string, Termcolor.end)
def print_output_for(buf, name, color):
if buf:
print('%s:' % name)
for application, details in buf:
for outcome, board, output in details:
print()
print(colorize_str('%s:%s:' % (application, board), color))
print('%s' % output.getvalue())
def print_outcome(outputListDescription):
print()
print('Outcome:')
for color, group, name in outputListDescription:
applications = group
if applications:
print('\t{}{}{}: {}'.format(color, name, Termcolor.end, ', '.join(applications)))
def print_num_of_errors_and_warnings():
stdout.write('Errors: ')
if errors:
num_of_errors = sum(map(lambda x: len(x[1]), errors))
stdout.write('%s' % colorize_str(str(num_of_errors), Termcolor.red))
else:
stdout.write('0')
stdout.write(' Warnings: ')
if warnings:
num_of_warnings = sum(map(lambda x: len(x[1]), warnings))
stdout.write('%s' % colorize_str(str(num_of_warnings), Termcolor.yellow))
else:
stdout.write('0')
stdout.write('\n')
if __name__ == '__main__':
success = []
failed = []
skipped = []
exceptions = []
warnings = []
errors = []
null = open(devnull, 'w', 0)
if len(argv) > 1:
base_branch = argv[1]
diff_files = check_output(('git', 'diff', '--name-only', base_branch, 'HEAD'))
diff_files = set(diff_files.split())
else:
base_branch = ''
build_all()
print_output_for(warnings, 'Warnings', Termcolor.yellow)
print_output_for(errors, 'Errors', Termcolor.red)
outputListDescription = [(Termcolor.yellow, skipped, 'skipped'), (Termcolor.green, success, 'success'),
(Termcolor.red, failed, 'failed'), (Termcolor.blue, exceptions, 'exceptions')]
print_outcome(outputListDescription)
print_num_of_errors_and_warnings()
if exceptions:
exit(2)
elif failed:
exit(1)
else:
exit(0)
| daniel-k/RIOT | dist/tools/compile_test/compile_test.py | Python | lgpl-2.1 | 10,074 |
import myhdl
import pihdf
from pihdf import Testable
import os, sys
sys.path.append(os.path.dirname(__file__) + "/../..")
from ParamStruct.ParamStruct import ParamStruct
class t_ParamStruct(Testable):
'''|
| Automatically generated. Do not modify this file.
|________'''
pihdf.head("T E S T S")
pihdf.info("Using myhdl version " + myhdl.__version__)
pihdf.info("Using pihdf version " + pihdf.__version__ + '\n')
def __init__(self):
# call base class constructor
Testable.__init__(self)
self.test_path = os.path.dirname(__file__)
self.cond_tx = []
self.res_tx = []
self.cond_sim_end = {}
self.tst_data = { "cond_tx":self.cond_tx,\
"res_tx":self.res_tx,\
"cond_sim_end": self.cond_sim_end }
self.ref_tx = []
self.ref_data = { "tx":(self.ref_tx, self.res_tx) }
# Automatically executed BEFORE every test case
def setUp(self):
print ""
# Automatically executed AFTER every test case
def tearDown(self):
print ""
self.cond_tx = []
self.res_tx = []
self.ref_tx = []
# Data has been previously generated and written to files
def use_data_from_files(self):
self.res_tx.append({"file" : self.test_path + "/vectors/my_tx.tvr"})
self.ref_tx.append({"file" : self.test_path + "/vectors/tx.tvr"})
self.checkfiles = True
self.run_it()
# Run the simulation and check the results
def run_it(self, checkfiles=False):
self.check_config("ParamStruct")
ParamStruct_dut = ParamStruct(IMPL=self.models)
ParamStruct_dut.Simulate(tb_config=self.tb_config, tst_data=self.tst_data, verbose=self.verbose, dut_params=self.dut_params)
ParamStruct_dut.clean()
self.check_results()
| hnikolov/pihdf | test/ParamStruct/test/t_ParamStruct.py | Python | mit | 1,864 |
# -*- coding: utf-8 -*-
"""
flask.ext.security.core
~~~~~~~~~~~~~~~~~~~~~~~
Flask-Security core module
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from flask import current_app
from flask.ext.login import AnonymousUser as AnonymousUserBase, \
UserMixin as BaseUserMixin, LoginManager, current_user
from flask.ext.principal import Principal, RoleNeed, UserNeed, Identity, \
identity_loaded
from itsdangerous import URLSafeTimedSerializer
from passlib.context import CryptContext
from werkzeug.datastructures import ImmutableList
from werkzeug.local import LocalProxy
from .utils import config_value as cv, get_config, md5, url_for_security
from .views import create_blueprint
from .forms import LoginForm, ConfirmRegisterForm, RegisterForm, \
ForgotPasswordForm, ChangePasswordForm, ResetPasswordForm, \
SendConfirmationForm, PasswordlessLoginForm
# Convenient references
_security = LocalProxy(lambda: current_app.extensions['security'])
#: Default Flask-Security configuration
_default_config = {
'BLUEPRINT_NAME': 'security',
'URL_PREFIX': None,
'SUBDOMAIN': None,
'FLASH_MESSAGES': True,
'PASSWORD_HASH': 'plaintext',
'PASSWORD_SALT': None,
'LOGIN_URL': '/login',
'LOGOUT_URL': '/logout',
'REGISTER_URL': '/register',
'RESET_URL': '/reset',
'CHANGE_URL': '/change',
'CONFIRM_URL': '/confirm',
'POST_LOGIN_VIEW': '/',
'POST_LOGOUT_VIEW': '/',
'CONFIRM_ERROR_VIEW': None,
'POST_REGISTER_VIEW': None,
'POST_CONFIRM_VIEW': None,
'POST_RESET_VIEW': None,
'POST_CHANGE_VIEW': None,
'UNAUTHORIZED_VIEW': None,
'FORGOT_PASSWORD_TEMPLATE': 'security/forgot_password.html',
'LOGIN_USER_TEMPLATE': 'security/login_user.html',
'REGISTER_USER_TEMPLATE': 'security/register_user.html',
'RESET_PASSWORD_TEMPLATE': 'security/reset_password.html',
'SEND_CONFIRMATION_TEMPLATE': 'security/send_confirmation.html',
'SEND_LOGIN_TEMPLATE': 'security/send_login.html',
'CONFIRMABLE': False,
'REGISTERABLE': False,
'RECOVERABLE': False,
'TRACKABLE': False,
'PASSWORDLESS': False,
'CHANGEABLE': False,
'SEND_REGISTER_EMAIL': True,
'LOGIN_WITHIN': '1 days',
'CONFIRM_EMAIL_WITHIN': '5 days',
'RESET_PASSWORD_WITHIN': '5 days',
'LOGIN_WITHOUT_CONFIRMATION': False,
'EMAIL_SENDER': 'no-reply@localhost',
'TOKEN_AUTHENTICATION_KEY': 'auth_token',
'TOKEN_AUTHENTICATION_HEADER': 'Authentication-Token',
'CONFIRM_SALT': 'confirm-salt',
'RESET_SALT': 'reset-salt',
'LOGIN_SALT': 'login-salt',
'CHANGE_SALT': 'change-salt',
'REMEMBER_SALT': 'remember-salt',
'DEFAULT_HTTP_AUTH_REALM': 'Login Required',
'EMAIL_SUBJECT_REGISTER': 'Welcome',
'EMAIL_SUBJECT_CONFIRM': 'Please confirm your email',
'EMAIL_SUBJECT_PASSWORDLESS': 'Login instructions',
'EMAIL_SUBJECT_PASSWORD_NOTICE': 'Your password has been reset',
'EMAIL_SUBJECT_PASSWORD_CHANGE_NOTICE': 'Your password has been changed',
'EMAIL_SUBJECT_PASSWORD_RESET': 'Password reset instructions'
}
#: Default Flask-Security messages
_default_messages = {
'UNAUTHORIZED': ('You do not have permission to view this resource.', 'error'),
'CONFIRM_REGISTRATION': ('Thank you. Confirmation instructions have been sent to %(email)s.', 'success'),
'EMAIL_CONFIRMED': ('Thank you. Your email has been confirmed.', 'success'),
'ALREADY_CONFIRMED': ('Your email has already been confirmed.', 'info'),
'INVALID_CONFIRMATION_TOKEN': ('Invalid confirmation token.', 'error'),
'EMAIL_ALREADY_ASSOCIATED': ('%(email)s is already associated with an account.', 'error'),
'PASSWORD_MISMATCH': ('Password does not match', 'error'),
'RETYPE_PASSWORD_MISMATCH': ('Passwords do not match', 'error'),
'INVALID_REDIRECT': ('Redirections outside the domain are forbidden', 'error'),
'PASSWORD_RESET_REQUEST': ('Instructions to reset your password have been sent to %(email)s.', 'info'),
'PASSWORD_RESET_EXPIRED': ('You did not reset your password within %(within)s. New instructions have been sent to %(email)s.', 'error'),
'INVALID_RESET_PASSWORD_TOKEN': ('Invalid reset password token.', 'error'),
'CONFIRMATION_REQUIRED': ('Email requires confirmation.', 'error'),
'CONFIRMATION_REQUEST': ('Confirmation instructions have been sent to %(email)s.', 'info'),
'CONFIRMATION_EXPIRED': ('You did not confirm your email within %(within)s. New instructions to confirm your email have been sent to %(email)s.', 'error'),
'LOGIN_EXPIRED': ('You did not login within %(within)s. New instructions to login have been sent to %(email)s.', 'error'),
'LOGIN_EMAIL_SENT': ('Instructions to login have been sent to %(email)s.', 'success'),
'INVALID_LOGIN_TOKEN': ('Invalid login token.', 'error'),
'DISABLED_ACCOUNT': ('Account is disabled.', 'error'),
'EMAIL_NOT_PROVIDED': ('Email not provided', 'error'),
'INVALID_EMAIL_ADDRESS': ('Invalid email address', 'error'),
'PASSWORD_NOT_PROVIDED': ('Password not provided', 'error'),
'USER_DOES_NOT_EXIST': ('Specified user does not exist', 'error'),
'INVALID_PASSWORD': ('Invalid password', 'error'),
'PASSWORDLESS_LOGIN_SUCCESSFUL': ('You have successfuly logged in.', 'success'),
'PASSWORD_RESET': ('You successfully reset your password and you have been logged in automatically.', 'success'),
'PASSWORD_CHANGE': ('You successfully changed your password.', 'success'),
'LOGIN': ('Please log in to access this page.', 'info'),
'REFRESH': ('Please reauthenticate to access this page.', 'info'),
}
_allowed_password_hash_schemes = [
'bcrypt',
'des_crypt',
'pbkdf2_sha256',
'pbkdf2_sha512',
'sha256_crypt',
'sha512_crypt',
# And always last one...
'plaintext'
]
_default_forms = {
'login_form': LoginForm,
'confirm_register_form': ConfirmRegisterForm,
'register_form': RegisterForm,
'forgot_password_form': ForgotPasswordForm,
'reset_password_form': ResetPasswordForm,
'change_password_form': ChangePasswordForm,
'send_confirmation_form': SendConfirmationForm,
'passwordless_login_form': PasswordlessLoginForm,
}
def _user_loader(user_id):
return _security.datastore.find_user(id=user_id)
def _token_loader(token):
try:
data = _security.remember_token_serializer.loads(token)
user = _security.datastore.find_user(id=data[0])
if user and md5(user.password) == data[1]:
return user
except:
pass
return None
def _identity_loader():
if not isinstance(current_user._get_current_object(), AnonymousUser):
identity = Identity(current_user.id)
return identity
def _on_identity_loaded(sender, identity):
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
for role in current_user.roles:
identity.provides.add(RoleNeed(role.name))
identity.user = current_user
def _get_login_manager(app):
lm = LoginManager()
lm.anonymous_user = AnonymousUser
lm.login_view = '%s.login' % cv('BLUEPRINT_NAME', app=app)
lm.user_loader(_user_loader)
lm.token_loader(_token_loader)
if cv('FLASH_MESSAGES', app=app):
lm.login_message, lm.login_message_category = cv('MSG_LOGIN', app=app)
lm.needs_refresh_message, lm.needs_refresh_message_category = cv('MSG_REFRESH', app=app)
else:
lm.login_message = None
lm.needs_refresh_message = None
lm.init_app(app)
return lm
def _get_principal(app):
p = Principal(app, use_sessions=False)
p.identity_loader(_identity_loader)
return p
def _get_pwd_context(app):
pw_hash = cv('PASSWORD_HASH', app=app)
if pw_hash not in _allowed_password_hash_schemes:
allowed = ', '.join(_allowed_password_hash_schemes[:-1]) + ' and ' + _allowed_password_hash_schemes[-1]
raise ValueError("Invalid hash scheme %r. Allowed values are %s" % (pw_hash, allowed))
return CryptContext(schemes=_allowed_password_hash_schemes, default=pw_hash)
def _get_serializer(app, name):
secret_key = app.config.get('SECRET_KEY')
salt = app.config.get('SECURITY_%s_SALT' % name.upper())
return URLSafeTimedSerializer(secret_key=secret_key, salt=salt)
def _get_state(app, datastore, **kwargs):
for key, value in get_config(app).items():
kwargs[key.lower()] = value
kwargs.update(dict(
app=app,
datastore=datastore,
login_manager=_get_login_manager(app),
principal=_get_principal(app),
pwd_context=_get_pwd_context(app),
remember_token_serializer=_get_serializer(app, 'remember'),
login_serializer=_get_serializer(app, 'login'),
reset_serializer=_get_serializer(app, 'reset'),
confirm_serializer=_get_serializer(app, 'confirm'),
_context_processors={},
_send_mail_task=None
))
for key, value in _default_forms.items():
if key not in kwargs or not kwargs[key]:
kwargs[key] = value
return _SecurityState(**kwargs)
def _context_processor():
return dict(url_for_security=url_for_security, security=_security)
class RoleMixin(object):
"""Mixin for `Role` model definitions"""
def __eq__(self, other):
return (self.name == other or \
self.name == getattr(other, 'name', None))
def __ne__(self, other):
return not self.__eq__(other)
class UserMixin(BaseUserMixin):
"""Mixin for `User` model definitions"""
def is_active(self):
"""Returns `True` if the user is active."""
return self.active
def get_auth_token(self):
"""Returns the user's authentication token."""
data = [str(self.id), md5(self.password)]
return _security.remember_token_serializer.dumps(data)
def has_role(self, role):
"""Returns `True` if the user identifies with the specified role.
:param role: A role name or `Role` instance"""
if isinstance(role, basestring):
return role in (role.name for role in self.roles)
else:
return role in self.roles
class AnonymousUser(AnonymousUserBase):
"""AnonymousUser definition"""
def __init__(self):
super(AnonymousUser, self).__init__()
self.roles = ImmutableList()
def has_role(self, *args):
"""Returns `False`"""
return False
class _SecurityState(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key.lower(), value)
def _add_ctx_processor(self, endpoint, fn):
group = self._context_processors.setdefault(endpoint, [])
fn not in group and group.append(fn)
def _run_ctx_processor(self, endpoint):
rv, fns = {}, []
for g in [None, endpoint]:
for fn in self._context_processors.setdefault(g, []):
rv.update(fn())
return rv
def context_processor(self, fn):
self._add_ctx_processor(None, fn)
def forgot_password_context_processor(self, fn):
self._add_ctx_processor('forgot_password', fn)
def login_context_processor(self, fn):
self._add_ctx_processor('login', fn)
def register_context_processor(self, fn):
self._add_ctx_processor('register', fn)
def reset_password_context_processor(self, fn):
self._add_ctx_processor('reset_password', fn)
def change_password_context_processor(self, fn):
self._add_ctx_processor('change_password', fn)
def send_confirmation_context_processor(self, fn):
self._add_ctx_processor('send_confirmation', fn)
def send_login_context_processor(self, fn):
self._add_ctx_processor('send_login', fn)
def mail_context_processor(self, fn):
self._add_ctx_processor('mail', fn)
def send_mail_task(self, fn):
self._send_mail_task = fn
class Security(object):
"""The :class:`Security` class initializes the Flask-Security extension.
:param app: The application.
:param datastore: An instance of a user datastore.
"""
def __init__(self, app=None, datastore=None, **kwargs):
self.app = app
self.datastore = datastore
if app is not None and datastore is not None:
self._state = self.init_app(app, datastore, **kwargs)
def init_app(self, app, datastore=None, register_blueprint=True,
login_form=None, confirm_register_form=None,
register_form=None, forgot_password_form=None,
reset_password_form=None, change_password_form=None,
send_confirmation_form=None, passwordless_login_form=None):
"""Initializes the Flask-Security extension for the specified
application and datastore implentation.
:param app: The application.
:param datastore: An instance of a user datastore.
:param register_blueprint: to register the Security blueprint or not.
"""
datastore = datastore or self.datastore
for key, value in _default_config.items():
app.config.setdefault('SECURITY_' + key, value)
for key, value in _default_messages.items():
app.config.setdefault('SECURITY_MSG_' + key, value)
identity_loaded.connect_via(app)(_on_identity_loaded)
state = _get_state(app, datastore,
login_form=login_form,
confirm_register_form=confirm_register_form,
register_form=register_form,
forgot_password_form=forgot_password_form,
reset_password_form=reset_password_form,
change_password_form=change_password_form,
send_confirmation_form=send_confirmation_form,
passwordless_login_form=passwordless_login_form)
if register_blueprint:
app.register_blueprint(create_blueprint(state, __name__))
app.context_processor(_context_processor)
app.extensions['security'] = state
return state
def __getattr__(self, name):
return getattr(self._state, name, None)
| 100grams/flask-security | flask_security/core.py | Python | mit | 14,213 |
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import itertools
import oslo_service.sslutils
import murano.api.middleware.ssl
import murano.common.config
import murano.common.wsgi
def build_list(opt_list):
return list(itertools.chain(*opt_list))
# List of *all* options in [DEFAULT] namespace of murano.
# Any new option list or option needs to be registered here.
_opt_lists = [
('engine', murano.common.config.engine_opts),
('rabbitmq', murano.common.config.rabbit_opts),
('heat', murano.common.config.heat_opts),
('neutron', murano.common.config.neutron_opts),
('murano', murano.common.config.murano_opts),
('glance', murano.common.config.glance_opts),
('mistral', murano.common.config.mistral_opts),
('networking', murano.common.config.networking_opts),
('stats', murano.common.config.stats_opts),
('packages_opts', murano.common.config.packages_opts),
(None, build_list([
murano.common.config.metadata_dir,
murano.common.config.bind_opts,
murano.common.config.file_server,
murano.common.wsgi.wsgi_opts,
murano.api.middleware.ssl.ssl_middleware_opts
])),
]
_opt_lists.extend(oslo_service.sslutils.list_opts())
def list_opts():
"""Return a list of oslo.config options available in Murano.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
This function is also discoverable via the 'murano' entry point
under the 'oslo.config.opts' namespace.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by Murano.
:returns: a list of (group_name, opts) tuples
"""
return [(g, copy.deepcopy(o)) for g, o in _opt_lists]
| olivierlemasle/murano | murano/opts.py | Python | apache-2.0 | 2,494 |
import json
import pprint
pp = pprint.PrettyPrinter(indent=4)
def main():
doc_fields = ['name',
'email',
'alt_email',
'city',
'realm',
'coins',
'category',
'achievements',
'year',
'body']
with open("collections/1bucket_1scope_100collections_basic.json", "r")\
as read_file:
index_map = json.load(read_file)
for i in range(100):
collection_indexes = {}
num_coll_indexes = 0
for field1 in doc_fields:
for field2 in doc_fields:
collection_indexes["myindex"+str(i+1)+"-"+str(num_coll_indexes+1)] = \
"{},{}".format(field1, field2)
num_coll_indexes += 1
index_map['bucket-1']['scope-1']['collection-'+str(i+1)] = collection_indexes
outpath = 'tests/gsi/index_defs/'
outfile = '1bucket_1scope_100collections_10k_indexes_1.json'
with open(outpath+outfile, 'w') as outfile:
json.dump(index_map, outfile, indent=4, sort_keys=True)
pp.pprint(index_map)
print("Done")
if __name__ == '__main__':
main()
| couchbase/perfrunner | scripts/index_mapper.py | Python | apache-2.0 | 1,220 |
#!/usr/bin/python3
from distutils.core import setup
import cmglib.common
setup(name = 'CMGlue',
version = cmglib.common.VERSION,
description = 'Configuration Management Glue: a better git-submodule',
author = 'Dong Yue',
author_email = 'me@dongyue.name',
url = '(TBD)',
platforms = 'all popular platforms',
license = '(TBD)',
long_description = 'CMGlue (Configuration Management Glue) is a better git-submodule, to manage a workspace which may contains multiple components that may from various version control tools such as Git and SVN (Subversion), or even just from a shared folder.',
packages=['cmglib'],
scripts=['cmg', 'cmg.bat', 'cmg_create_demo', 'cmg_create_demo.bat'],
data_files=[('Doc', ['cmg_manual.txt', 'cmg_manual.html'])]
)
| dongyue/cmglue | setup.py | Python | bsd-3-clause | 792 |
import os
import zipfile
from utils import run, dpath, rm, symlink_in_tempdir
def test_fastqc(sample1_se_fq, tmpdir):
snakefile = '''
rule fastqc:
input:
fastq='sample1_R1.fastq.gz'
output:
html='results/sample1_R1.html',
zip='sample1_R1.zip'
wrapper: "file:wrapper"'''
input_data_func=symlink_in_tempdir(
{
sample1_se_fq: 'sample1_R1.fastq.gz'
}
)
def check():
assert '<html>' in open('results/sample1_R1.html').readline()
assert zipfile.ZipFile('sample1_R1.zip').namelist() == [
'sample1_R1_fastqc/',
'sample1_R1_fastqc/Icons/',
'sample1_R1_fastqc/Images/',
'sample1_R1_fastqc/Icons/fastqc_icon.png',
'sample1_R1_fastqc/Icons/warning.png',
'sample1_R1_fastqc/Icons/error.png',
'sample1_R1_fastqc/Icons/tick.png',
'sample1_R1_fastqc/summary.txt',
'sample1_R1_fastqc/Images/per_base_quality.png',
'sample1_R1_fastqc/Images/per_tile_quality.png',
'sample1_R1_fastqc/Images/per_sequence_quality.png',
'sample1_R1_fastqc/Images/per_base_sequence_content.png',
'sample1_R1_fastqc/Images/per_sequence_gc_content.png',
'sample1_R1_fastqc/Images/per_base_n_content.png',
'sample1_R1_fastqc/Images/sequence_length_distribution.png',
'sample1_R1_fastqc/Images/duplication_levels.png',
'sample1_R1_fastqc/Images/adapter_content.png',
'sample1_R1_fastqc/fastqc_report.html',
'sample1_R1_fastqc/fastqc_data.txt',
'sample1_R1_fastqc/fastqc.fo'
]
run(dpath('../wrappers/fastqc'), snakefile, check, input_data_func, tmpdir)
| lcdb/lcdb-wrapper-tests | test/test_fastqc.py | Python | mit | 1,788 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorkit.base import ObjectiveBase
import tensorflow as tf
class Objective(ObjectiveBase):
def loss(self, hypes, logits, labels):
labels = tf.reshape(labels, [-1, 1])
error = tf.subtract(labels, logits['output'])
rmse_loss = tf.sqrt(tf.reduce_mean(tf.square(error)))
loss = rmse_loss
reg_loss_col = tf.GraphKeys.REGULARIZATION_LOSSES
weight_loss = tf.add_n(tf.get_collection(reg_loss_col), name='reg_loss')
total_loss = weight_loss + loss
tf.summary.scalar('loss', loss)
tf.summary.scalar('weight_loss', weight_loss)
tf.summary.scalar('total_loss', total_loss)
losses = {
'total_loss': total_loss,
'mse_loss': rmse_loss,
'weight_loss': weight_loss,
'error': tf.reduce_sum(tf.abs(error))
}
return losses
def evaluate(self, hyp, images, target, logits, losses):
eval_list = []
eval_list.append(('Total loss', losses['total_loss']))
eval_list.append(('RMSE loss', losses['mse_loss']))
eval_list.append(('Error', losses['error']))
eval_list.append(('Weights', losses['weight_loss']))
return eval_list | nghiattran/self-steering | models/objective.py | Python | mit | 1,330 |
from collections import deque
import os.path
import pygame
from pygame.locals import QUIT, KEYDOWN, K_a, K_s, K_d, K_w, KEYUP, K_p, K_ESCAPE, K_y, K_n, K_g
from pygame import time, display, mixer, transform
from img import load_img
from render import update_display
from player import Player
from background import Background
from level import load_level
from collision import is_collides
from monsters import CaptureBot, LaserBot, PatrolBot, WaitBot, PatrolLaserBot, Boss
from events import Spawner
from random import shuffle, randint
def _is_killed_all(monster_list):
"""does not count PatrolBots."""
count = 0
for monster in monster_list:
if type(monster) is not PatrolBot:
count += 1
return count == 0
def _load_level(level):
background.reset()
background.render(screen, update_queue)
player, portal, env_obj_list, monster_list, spawner_list = load_level(os.path.join("levels", level_dict[level]), TILE_SIZE, WINDOW_WIDTH, WINDOW_HEIGHT, background, screen, update_queue)
# bad fix to ensure player renders on top of any background object
if player:
player.render(screen, update_queue)
return player, portal, env_obj_list, monster_list, spawner_list
def update_image(mover, new_image, new_name):
mover.img = new_image
mover.img_name = new_name
def shake_screen(screen):
blank_screen = screen.copy()
blank_screen.fill((0,0,0))
screen_copy = screen.copy()
noise = mixer.Sound(os.path.join("sounds", "explosion_hard1.wav"))
for i in range(1,500):
offset = randint(-10,10)
screen.blit(blank_screen, (0,0))
screen.blit(screen_copy, (offset,0))
time.wait(10)
display.flip()
# play noise
if i % 100 == 0:
noise.play()
screen.blit(blank_screen, (0,0))
screen.blit(screen_copy, (0,0))
display.flip()
if __name__ == "__main__":
# init
running = True
killed = False
won = False
pygame.init()
# allow key repeating for holding them down
# i just pulled these values off internets
KEY_DELAY = 1
KEY_INTERVAL = 50
pygame.key.set_repeat(KEY_DELAY, KEY_INTERVAL)
fps_clock = pygame.time.Clock()
FPS = 30
WINDOW_WIDTH = 1000
WINDOW_HEIGHT = 700
TILE_SIZE = 50
BOMB_LIMIT = 3
# Mouse button codes (pygame specific)
LEFT_MB = 1
# Colours
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREY = (211, 211, 211)
# Art
PLAYER_N = "man_standing3.png"
GBG_CAN_N = "garbage_can.png"
COMP_SAD_N = "computer_sad.png"
COMP_HAP_N = "computer_happy.png"
CUB_MED_N = "cubicle_med.png"
CUB_MED_COM_N = "cubicle_med_computer_happy.png"
LEVEL1_N = "level1.txt"
BOSS_LEVEL_N = "boss_level.txt"
STRT_SCRN_N = "startscreen.txt"
INTRO_SCRN_N = "intro_level.txt"
END_GAME_SCRN_N = "endgame_screen.txt"
# Animation Prerendered Surfaces
PLAYER_RIGHT_1 = load_img("man_2_right1.png")
PLAYER_RIGHT_2 = load_img("man_2_right2.png")
PLAYER_LEFT_1 = load_img("man_2_left1.png")
PLAYER_LEFT_2 = load_img("man_2_left2.png")
move_count = 0
CHASEBOT_LEFT = load_img("chasebot_l.png")
CHASEBOT_RIGHT = load_img("chasebot_r.png")
DOOR_OPEN = load_img("door_open.png")
DOOR_CLOSED = load_img("door_closed.png")
BOSS_SMILE = load_img("boss_smile.png")
BOSS_SHIELD = load_img("boss_smile_shield.png")
BOSS_SMILE2 = load_img("boss_smile2.png")
BOSS_SHIELD2 = load_img("boss_shield2.png")
# clocks for doors
door_times = [0, 0, 0, 0]
door_clocks = [None, None, None, None]
# music
MUSIC_DIR = "music"
MUSIC_N = "robotpoop (ai)_v2-01.ogg"
MUSIC_BOSS_N = "robotpoop (boss)_v1.ogg"
# Create the screen
screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
# put desks, garbage cans, etc in here
env_obj_list = deque()
# put the ai stuff in here, but not player
monster_list = deque()
# init rendering
update_queue = deque() # Queue of rects specifying areas of display to update
background = Background(WINDOW_WIDTH, WINDOW_HEIGHT, GREY)
# levels
level_dict = {0: STRT_SCRN_N, 1: INTRO_SCRN_N, 2: LEVEL1_N, 3: BOSS_LEVEL_N, 4: END_GAME_SCRN_N}
# load start screen
level = 0
# portal is rect of where the player has to get to after killing all computer's to advance to next level
player, portal, env_obj_list, monster_list, spawner_list = _load_level(level)
# variable for reference to deque of destroyed monsters
destroyed = None
# bombs
bomb_ctr = 0
bomb_list = deque()
# Force update display (generally handled at end of main loop below)
update_display(update_queue)
music_loaded = False
music_advance = False
# for player pausing game
pause = False
# for toggling pause at end of main loop
unpause = False
if level in (0, 1, 4):
# prevent certain unwanted processes for running
pause = True
while running and (not killed):
if won:
level += 1
player, portal, env_obj_list, monster_list, spawner_list = _load_level(level)
time.wait(1000)
update_display(update_queue)
break
# music
if level == 2 and not music_loaded:
pygame.mixer.music.load(os.path.join(MUSIC_DIR, MUSIC_N))
# play indefinitely
pygame.mixer.music.play(-1)
# need variable, seems API get_busy doesn't consider paused as not busy
music_pause = False
music_loaded = True
if level == 3 and not music_loaded:
# stop whatever music that is playing
pygame.mixer.music.stop()
# load new music
pygame.mixer.music.load(os.path.join(MUSIC_DIR, MUSIC_BOSS_N))
# play indefinitely
pygame.mixer.music.play(-1)
# need variable, seems API get_busy doesn't consider paused as not busy
music_pause = False
music_loaded = True
# input loop
for event in pygame.event.get():
if level == 3 and music_advance:
pygame.mixer.music.load(os.path.join(MUSIC_DIR, MUSIC_BOSS_N))
pygame.mixer.music.play(-1)
music_advance = False
if event.type == KEYDOWN:
if event.key and level == 1:
# load next level, first level with enemies
level += 1
unpause = True
player, portal, env_obj_list, monster_list, spawner_list = _load_level(level)
# ~~~~~DEBUG ONLY~~~~~~~~~~~~~~
# kill all (not sure about boss?)
# elif event.key == K_g:
# destroyed = monster_list.__copy__()
elif event.key in (K_a, K_d, K_w, K_s) and not pause:
# player to be moved, clear old location
background.render(screen, update_queue, player.rect.copy())
rect = player.rect.copy()
if event.key == K_a:
rect.x -= player.mov_unit
if is_collides(rect, env_obj_list, monster_list) is None:
player.move_left()
if player.img_name != ("LEFT1" or "LEFT2"):
player.update_image(PLAYER_LEFT_1, "LEFT1")
elif player.img_name != ("LEFT2"):
player.update_image(PLAYER_LEFT_2, "LEFT2")
else:
player.update_image(PLAYER_LEFT_1, "LEFT1")
elif event.key == K_d:
rect.x += player.mov_unit
if is_collides(rect, env_obj_list, monster_list) is None:
player.move_right()
if player.img_name != ("RIGHT1" or "RIGHT2"):
player.update_image(PLAYER_RIGHT_1, "RIGHT1")
elif player.img_name != ("RIGHT2"):
player.update_image(PLAYER_RIGHT_2, "RIGHT2")
else:
player.update_image(PLAYER_RIGHT_1, "RIGHT1")
elif event.key == K_w:
rect.y -= player.mov_unit
if is_collides(rect, env_obj_list, monster_list) is None:
player.move_up()
elif event.key == K_s:
rect.y += player.mov_unit
if is_collides(rect, env_obj_list, monster_list) is None:
player.move_down()
player.render(screen, update_queue)
elif event.type == KEYUP:
if event.key == K_p and not pause:
# player left poop bomb!
if bomb_ctr < BOMB_LIMIT:
bomb_list.append(player.drop_bomb(FPS))
bomb_ctr += 1
elif event.key == K_ESCAPE and not (level in (0, 1, 4)):
# toggle
pause = not pause
if music_pause:
pygame.mixer.music.unpause()
music_pause = False
else:
pygame.mixer.music.pause()
music_pause = True
elif event.key == K_y and level == 0:
# load next level
level += 1
player, portal, env_obj_list, monster_list, spawner_list = _load_level(level)
elif event.key == K_n and level == 0:
# quit game from start menu
running = False
elif event.type == QUIT:
running = False
if not pause:
# close trapdoors if necessary
for trapdoor in spawner_list:
if trapdoor.is_open:
trapdoor.time += trapdoor.clock.tick(FPS)
if trapdoor.time > 2000:
# remove current trapdoor
background.render(screen, update_queue, trapdoor.rect.copy())
trapdoor.close()
trapdoor.render(screen, update_queue)
# monster ai
for monster in monster_list:
# clear old location
background.render(screen, update_queue, monster.rect.copy())
# remove this monster from general list when checking collisions
monster_list_copy = monster_list.__copy__()
monster_list_copy.remove(monster)
monster.move(player, env_obj_list, monster_list_copy)
if type(monster) in (PatrolBot, CaptureBot):
# update image
if monster.move_count > 1:
update_image(monster, CHASEBOT_LEFT, "left")
monster.move_count = 0
else:
update_image(monster, CHASEBOT_RIGHT, "right")
if type(monster) is WaitBot and monster.sighted:
# update image
if monster.move_count > 1:
update_image(monster, CHASEBOT_LEFT, "left")
monster.move_count = 0
else:
update_image(monster, CHASEBOT_RIGHT, "right")
if type(monster) is Boss:
if monster.shield:
monster.time += monster.clock.tick(monster.fps)
# Check if shield should be turned off
if monster.time > monster.shield_time:
monster.shield_off()
if monster.hp < 5:
update_image(monster, BOSS_SMILE2, "smile")
else:
update_image(monster, BOSS_SMILE, "smile")
monster.render(screen, update_queue)
# Check if player died
if type(monster) in (CaptureBot, WaitBot, PatrolBot) and type(monster.adj_obj) is Player:
killed = True
if type(monster) in (LaserBot, PatrolLaserBot) and (monster.shot is not None):
killed = True
# copy bomb list so can remove bomb's from original list when they explode (can't remove things from a deque
# while you are iterating it)
bomb_list_c = bomb_list.__copy__()
for bomb in bomb_list_c:
if not bomb.is_explode():
bomb.render(screen, update_queue)
else:
# remove from screen
background.render(screen, update_queue, bomb.rect.copy())
bomb_list.remove(bomb)
bomb_ctr -= 1
bomb_mess, destroyed = bomb.explode(monster_list)
bomb_mess.render(screen, update_queue)
# check if have to re-render player so not lost under bomb mess
if is_collides(player.rect, [bomb_mess]):
player.render(screen, update_queue)
background.obj_list.append(bomb_mess)
# re-render any player or monster intersecting any bomb's rect so they are rendered on top
if is_collides(player.rect, bomb_list):
player.render(screen, update_queue)
for monster in monster_list:
if is_collides(monster.rect, bomb_list):
monster.render(screen, update_queue)
# remove any destroyed monsters
if destroyed is not None:
for monster in destroyed:
# spawn monsters
if type(monster) is Boss:
if not monster.shield:
monster.hp -= 1
if monster.hp == 9:
spawn_list = [0]
elif monster.hp >=7 and monster.hp <= 8:
spawn_list = [0,1]
elif monster.hp >= 4 and monster.hp <= 6:
spawn_list = [0,1,2]
elif monster.hp >= 2 and monster.hp <= 3:
spawn_list = [0,1,2,3]
shuffle(spawn_list)
spawn_list = spawn_list[0:3]
elif monster.hp == 1:
spawn_list = [0,1,2,3]
else:
spawn_list = []
# spawn monsters and open doors
for i in spawn_list:
# remove current trapdoor
background.render(screen, update_queue, spawner_list[i].rect.copy())
spawner_list[i].open()
spawner_list[i].render(screen, update_queue)
spawner_list[i].spawner.spawn(monster_list, screen, update_queue)
if monster.hp < 1:
# initiate explosion sequence
pygame.mixer.music.fadeout(2000)
shake_screen(screen)
monster_mess = monster.on_death()
w, h = monster_mess.rect.w, monster_mess.rect.h
monster_mess.img = transform.scale(monster_mess.img, (w, h))
monster_mess.render(screen, update_queue)
background.obj_list.append(monster_mess)
monster_list.remove(monster)
# game over, player won
won = True
else:
# check if fps has been set to a variable
if monster.fps == None:
monster.fps = FPS
monster.shield_on()
if monster.hp < 5:
update_image(monster, BOSS_SHIELD2, "shield")
else:
update_image(monster, BOSS_SHIELD, "shield")
# remove from screen
background.render(screen, update_queue, monster.rect.copy())
else:
monster_mess = monster.on_death()
monster_mess.render(screen, update_queue)
background.obj_list.append(monster_mess)
monster_list.remove(monster)
# remove monster from a spawner if it came from one
for event in spawner_list:
if type(event) is Spawner:
if event.contains(monster):
event.remove(monster)
# remove from screen
background.render(screen, update_queue, monster.rect.copy())
destroyed = None
# check if player advanced to next level (at portal and killed all bots)
if portal and player.rect.colliderect(portal.rect) and _is_killed_all(monster_list):
# time.wait(2000)
if level == 2:
pygame.mixer.music.fadeout(2000)
time.wait(2000)
level += 1
player, portal, env_obj_list, monster_list, spawner_list = _load_level(level)
music_advance = True
update_display(update_queue)
fps_clock.tick(FPS)
if unpause:
pause = False
unpause = False
if killed:
# freeze for a sec so player can see how they died (e.g. see laser rendered for a bit)
time.wait(2000)
elif won:
# Let player hit a key to quit game when they are ready
running = True
while running:
for event in pygame.event.get():
if event.type == KEYDOWN:
running = False
elif event.type == QUIT:
running = False
| Roolymoo/comprev | src/main.py | Python | gpl-3.0 | 18,714 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2014 Carleton Coffrin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from collections import namedtuple
Set = namedtuple("Set", ['index', 'cost', 'items'])
def solve_it(input_data):
# Modify this code to run your optimization algorithm
# parse the input
lines = input_data.split('\n')
parts = lines[0].split()
item_count = int(parts[0])
set_count = int(parts[1])
sets = []
for i in range(1, set_count+1):
parts = lines[i].split()
sets.append(Set(i-1, float(parts[0]), map(int, parts[1:])))
# build a trivial solution
# pick add sets one-by-one until all the items are covered
solution = [0]*set_count
covered = set()
sorted_sets = sorted(sets, key=lambda s: s.cost/len(s.items) if len(s.items) > 0 else s.cost)
for s in sorted_sets:
solution[s.index] = 1
covered |= set(s.items)
if len(covered) >= item_count:
break
# calculate the cost of the solution
obj = sum([s.cost*solution[s.index] for s in sets])
# prepare the solution in the specified output format
output_data = str(obj) + ' ' + str(0) + '\n'
output_data += ' '.join(map(str, solution))
return output_data
import sys
if __name__ == '__main__':
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
input_data_file = open(file_location, 'r')
input_data = ''.join(input_data_file.readlines())
input_data_file.close()
print 'Solving:', file_location
print solve_it(input_data)
else:
print 'This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/sc_6_1)'
| discreteoptimization/setcover | greedy_001/solver.py | Python | mit | 2,794 |
import collections
from rdflib import URIRef, Graph, Literal
from rdflib.namespace import VOID, RDF
def generateVoID(g, dataset=None, res=None, distinctForPartitions=True):
"""
Returns a new graph with a VoID description of the passed dataset
For more info on Vocabulary of Interlinked Datasets (VoID), see:
http://vocab.deri.ie/void
This only makes two passes through the triples (once to detect the types
of things)
The tradeoff is that lots of temporary structures are built up in memory
meaning lots of memory may be consumed :)
I imagine at least a few copies of your original graph.
the distinctForPartitions parameter controls whether
distinctSubjects/objects are tracked for each class/propertyPartition
this requires more memory again
"""
typeMap = collections.defaultdict(set)
classes = collections.defaultdict(set)
for e, c in g.subject_objects(RDF.type):
classes[c].add(e)
typeMap[e].add(c)
triples = 0
subjects = set()
objects = set()
properties = set()
classCount = collections.defaultdict(int)
propCount = collections.defaultdict(int)
classProps = collections.defaultdict(set)
classObjects = collections.defaultdict(set)
propSubjects = collections.defaultdict(set)
propObjects = collections.defaultdict(set)
for s, p, o in g:
triples += 1
subjects.add(s)
properties.add(p)
objects.add(o)
# class partitions
if s in typeMap:
for c in typeMap[s]:
classCount[c] += 1
if distinctForPartitions:
classObjects[c].add(o)
classProps[c].add(p)
# property partitions
propCount[p] += 1
if distinctForPartitions:
propObjects[p].add(o)
propSubjects[p].add(s)
if not dataset:
dataset = URIRef("http://example.org/Dataset")
if not res:
res = Graph()
res.add((dataset, RDF.type, VOID.Dataset))
# basic stats
res.add((dataset, VOID.triples, Literal(triples)))
res.add((dataset, VOID.classes, Literal(len(classes))))
res.add((dataset, VOID.distinctObjects, Literal(len(objects))))
res.add((dataset, VOID.distinctSubjects, Literal(len(subjects))))
res.add((dataset, VOID.properties, Literal(len(properties))))
for i, c in enumerate(classes):
part = URIRef(dataset + "_class%d" % i)
res.add((dataset, VOID.classPartition, part))
res.add((part, RDF.type, VOID.Dataset))
res.add((part, VOID.triples, Literal(classCount[c])))
res.add((part, VOID.classes, Literal(1)))
res.add((part, VOID["class"], c))
res.add((part, VOID.entities, Literal(len(classes[c]))))
res.add((part, VOID.distinctSubjects, Literal(len(classes[c]))))
if distinctForPartitions:
res.add((part, VOID.properties, Literal(len(classProps[c]))))
res.add((part, VOID.distinctObjects, Literal(len(classObjects[c]))))
for i, p in enumerate(properties):
part = URIRef(dataset + "_property%d" % i)
res.add((dataset, VOID.propertyPartition, part))
res.add((part, RDF.type, VOID.Dataset))
res.add((part, VOID.triples, Literal(propCount[p])))
res.add((part, VOID.properties, Literal(1)))
res.add((part, VOID.property, p))
if distinctForPartitions:
entities = 0
propClasses = set()
for s in propSubjects[p]:
if s in typeMap:
entities += 1
for c in typeMap[s]:
propClasses.add(c)
res.add((part, VOID.entities, Literal(entities)))
res.add((part, VOID.classes, Literal(len(propClasses))))
res.add((part, VOID.distinctSubjects, Literal(len(propSubjects[p]))))
res.add((part, VOID.distinctObjects, Literal(len(propObjects[p]))))
return res, dataset
| RDFLib/rdflib | rdflib/void.py | Python | bsd-3-clause | 4,001 |
"""Markov Decision Processes (Chapter 17)
First we define an MDP, and the special case of a GridMDP, in which
states are laid out in a 2-dimensional grid. We also represent a policy
as a dictionary of {state:action} pairs, and a Utility function as a
dictionary of {state:number} pairs. We then define the value_iteration
and policy_iteration algorithms."""
from . utils import *
class MDP:
"""A Markov Decision Process, defined by an initial state, transition model,
and reward function. We also keep track of a gamma value, for use by
algorithms. The transition model is represented somewhat differently from
the text. Instead of P(s' | s, a) being a probability number for each
state/state/action triplet, we instead have T(s, a) return a list of (p, s')
pairs. We also keep track of the possible states, terminal states, and
actions for each state. [page 646]"""
def __init__(self, init, actlist, terminals, gamma=.9):
self.init = init
self.actlist = actlist
self.terminals = terminals
if not (0 <= gamma < 1):
raise ValueError("An MDP must have 0 <= gamma < 1")
self.gamma = gamma
self.states = set()
self.reward = {}
def R(self, state):
"Return a numeric reward for this state."
return self.reward[state]
def T(self, state, action):
"""Transition model. From a state and an action, return a list
of (probability, result-state) pairs."""
raise NotImplementedError
def actions(self, state):
"""Set of actions that can be performed in this state. By default, a
fixed list of actions, except for terminal states. Override this
method if you need to specialize by state."""
if state in self.terminals:
return [None]
else:
return self.actlist
class GridMDP(MDP):
"""A two-dimensional grid MDP, as in [Figure 17.1]. All you have to do is
specify the grid as a list of lists of rewards; use None for an obstacle
(unreachable state). Also, you should specify the terminal states.
An action is an (x, y) unit vector; e.g. (1, 0) means move east."""
def __init__(self, grid, terminals, init=(0, 0), gamma=.9):
grid.reverse() # because we want row 0 on bottom, not on top
MDP.__init__(self, init, actlist=orientations,
terminals=terminals, gamma=gamma)
self.grid = grid
self.rows = len(grid)
self.cols = len(grid[0])
for x in range(self.cols):
for y in range(self.rows):
self.reward[x, y] = grid[y][x]
if grid[y][x] is not None:
self.states.add((x, y))
def T(self, state, action):
if action is None:
return [(0.0, state)]
else:
return [(0.8, self.go(state, action)),
(0.1, self.go(state, turn_right(action))),
(0.1, self.go(state, turn_left(action)))]
def go(self, state, direction):
"Return the state that results from going in this direction."
state1 = vector_add(state, direction)
return (state1 if state1 in self.states else state)
def to_grid(self, mapping):
"""Convert a mapping from (x, y) to v into a [[..., v, ...]] grid."""
return list(reversed([[mapping.get((x, y), None)
for x in range(self.cols)]
for y in range(self.rows)]))
def to_arrows(self, policy):
chars = {
(1, 0): '>', (0, 1): '^', (-1, 0): '<', (0, -1): 'v', None: '.'}
return self.to_grid(dict([(s, chars[a]) for (s, a) in list(policy.items())]))
#______________________________________________________________________________
Fig[17, 1] = GridMDP([[-0.04, -0.04, -0.04, +1],
[-0.04, None, -0.04, -1],
[-0.04, -0.04, -0.04, -0.04]],
terminals=[(3, 2), (3, 1)])
#______________________________________________________________________________
def value_iteration(mdp, epsilon=0.001):
"Solving an MDP by value iteration. [Fig. 17.4]"
U1 = dict([(s, 0) for s in mdp.states])
R, T, gamma = mdp.R, mdp.T, mdp.gamma
while True:
U = U1.copy()
delta = 0
for s in mdp.states:
U1[s] = R(s) + gamma * max([sum([p * U[s1] for (p, s1) in T(s, a)])
for a in mdp.actions(s)])
delta = max(delta, abs(U1[s] - U[s]))
if delta < epsilon * (1 - gamma) / gamma:
return U
def best_policy(mdp, U):
"""Given an MDP and a utility function U, determine the best policy,
as a mapping from state to action. (Equation 17.4)"""
pi = {}
for s in mdp.states:
pi[s] = argmax(
mdp.actions(s), lambda a: expected_utility(a, s, U, mdp))
return pi
def expected_utility(a, s, U, mdp):
"The expected utility of doing a in state s, according to the MDP and U."
return sum([p * U[s1] for (p, s1) in mdp.T(s, a)])
#______________________________________________________________________________
def policy_iteration(mdp):
"Solve an MDP by policy iteration [Fig. 17.7]"
U = dict([(s, 0) for s in mdp.states])
pi = dict([(s, random.choice(mdp.actions(s))) for s in mdp.states])
while True:
U = policy_evaluation(pi, U, mdp)
unchanged = True
for s in mdp.states:
a = argmax(
mdp.actions(s), lambda a: expected_utility(a, s, U, mdp))
if a != pi[s]:
pi[s] = a
unchanged = False
if unchanged:
return pi
def policy_evaluation(pi, U, mdp, k=20):
"""Return an updated utility mapping U from each state in the MDP to its
utility, using an approximation (modified policy iteration)."""
R, T, gamma = mdp.R, mdp.T, mdp.gamma
for i in range(k):
for s in mdp.states:
U[s] = R(s) + gamma * sum([p * U[s1] for (p, s1) in T(s, pi[s])])
return U
__doc__ += """
>>> pi = best_policy(Fig[17,1], value_iteration(Fig[17,1], .01))
>>> Fig[17,1].to_arrows(pi)
[['>', '>', '>', '.'], ['^', None, '^', '.'], ['^', '>', '^', '<']]
>>> print_table(Fig[17,1].to_arrows(pi))
> > > .
^ None ^ .
^ > ^ <
>>> print_table(Fig[17,1].to_arrows(policy_iteration(Fig[17,1])))
> > > .
^ None ^ .
^ > ^ <
"""
__doc__ += """
Random tests:
>>> pi
{(3, 2): None, (3, 1): None, (3, 0): (-1, 0), (2, 1): (0, 1), (0, 2): (1, 0), (1, 0): (1, 0), (0, 0): (0, 1), (1, 2): (1, 0), (2, 0): (0, 1), (0, 1): (0, 1), (2, 2): (1, 0)}
>>> value_iteration(Fig[17,1], .01)
{(3, 2): 1.0, (3, 1): -1.0, (3, 0): 0.12958868267972745, (0, 1): 0.39810203830605462, (0, 2): 0.50928545646220924, (1, 0): 0.25348746162470537, (0, 0): 0.29543540628363629, (1, 2): 0.64958064617168676, (2, 0): 0.34461306281476806, (2, 1): 0.48643676237737926, (2, 2): 0.79536093684710951}
>>> policy_iteration(Fig[17,1])
{(3, 2): None, (3, 1): None, (3, 0): (0, -1), (2, 1): (-1, 0), (0, 2): (1, 0), (1, 0): (1, 0), (0, 0): (1, 0), (1, 2): (1, 0), (2, 0): (1, 0), (0, 1): (1, 0), (2, 2): (1, 0)}
"""
| gokul-uf/aima-python | aimaPy/mdp.py | Python | mit | 7,240 |
# packet.py
#
# Copyright 2002-2005,2007 Wichert Akkerman <wichert@wiggy.net>
#
# A RADIUS packet as defined in RFC 2138
import struct, types, random, UserDict
try:
import hashlib
md5_constructor = hashlib.md5
except ImportError:
# BBB for python 2.4
import md5
md5_constructor = md5.new
import tools
# Packet codes
AccessRequest = 1
AccessAccept = 2
AccessReject = 3
AccountingRequest = 4
AccountingResponse = 5
AccessChallenge = 11
StatusServer = 12
StatusClient = 13
DisconnectRequest = 40
DisconnectACK = 41
DisconnectNAK = 42
CoARequest = 43
CoAACK = 44
CoANAK = 45
# Current ID
CurrentID = random.randrange(1, 255)
class PacketError(Exception):
pass
class Packet(UserDict.UserDict):
"""Packet acts like a standard python map to provide simple access
to the RADIUS attributes. Since RADIUS allows for repeated
attributes the value will always be a sequence. pyrad makes sure
to preserve the ordering when encoding and decoding packets.
There are two ways to use the map intereface: if attribute
names are used pyrad take care of en-/decoding data. If
the attribute type number (or a vendor ID/attribute type
tuple for vendor attributes) is used you work with the
raw data.
Normally you will not use this class directly, but one of the
:obj:`AuthPacket` or :obj:`AcctPacket` classes.
"""
def __init__(self, code=0, id=None, secret="", authenticator=None, **attributes):
"""Constructor
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary class
:param secret: secret needed to communicate with a RADIUS server
:type secret: string
:param id: packet identifaction number
:type id: integer (8 bits)
:param code: packet type code
:type code: integer (8bits)
:param packet: raw packet to decode
:type packet: string
"""
UserDict.UserDict.__init__(self)
self.code=code
if id is not None:
self.id=id
else:
self.id=CreateID()
self.secret=secret
self.authenticator=authenticator
if attributes.has_key("dict"):
self.dict=attributes["dict"]
if attributes.has_key("packet"):
self.DecodePacket(attributes["packet"])
for (key,value) in attributes.items():
if key in [ "dict", "fd", "packet"]:
continue
key=key.replace("_", "-")
self.AddAttribute(key, value)
def CreateReply(self, **attributes):
"""Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
"""
return Packet(id=self.id, secret=self.secret,
authenticator=self.authenticator, dict=self.dict,
**attributes)
def _DecodeValue(self, attr, value):
if attr.values.HasBackward(value):
return attr.values.GetBackward(value)
else:
return tools.DecodeAttr(attr.type, value)
def _EncodeValue(self, attr, value):
if attr.values.HasForward(value):
return attr.values.GetForward(value)
else:
return tools.EncodeAttr(attr.type, value)
def _EncodeKeyValues(self, key, values):
if not isinstance(key, str):
return (key, values)
attr=self.dict.attributes[key]
if attr.vendor:
key=(self.dict.vendors.GetForward(attr.vendor), attr.code)
else:
key=attr.code
return (key, [self._EncodeValue(attr, v) for v in values])
def _EncodeKey(self, key):
if not isinstance(key, str):
return key
attr=self.dict.attributes[key]
if attr.vendor:
return (self.dict.vendors.GetForward(attr.vendor), attr.code)
else:
return attr.code
def _DecodeKey(self, key):
"Turn a key into a string if possible"
if self.dict.attrindex.HasBackward(key):
return self.dict.attrindex.GetBackward(key)
return key
def AddAttribute(self, key, value):
"""Add an attribute to the packet.
:param key: attribute name or identification
:type key: string, attribute code or (vendor code, attribute code) tuple
:param value: value
:type value: depends on type of attribute
"""
(key,value)=self._EncodeKeyValues(key, [value])
value=value[0]
if self.data.has_key(key):
self.data[key].append(value)
else:
self.data[key]=[value]
def __getitem__(self, key):
if type(key)!=types.StringType:
return self.data[key]
values=self.data[self._EncodeKey(key)]
attr=self.dict.attributes[key]
res=[]
for v in values:
res.append(self._DecodeValue(attr, v))
return res
def __contains__(self, key):
return self.has_key(key)
def has_key(self, key):
try:
return self.data.has_key(self._EncodeKey(key))
except KeyError:
return False
def __delitem__(self, key):
del self.data[self._EncodeKey(key)]
def __setitem__(self, key, item):
if type(key)==types.StringType:
(key,item)=self._EncodeKeyValues(key, [item])
self.data[key]=item
else:
assert isinstance(item, list)
self.data[key]=item
def keys(self):
return [self._DecodeKey(key) for key in self.data.keys()]
def CreateAuthenticator():
"""Create a packet autenticator. All RADIUS packets contain a sixteen
byte authenticator which is used to authenticate replies from the
RADIUS server and in the password hiding algorithm. This function
returns a suitable random string that can be used as an authenticator.
:return: valid packet authenticator
:rtype: string
"""
data=""
for i in range(16):
data+=chr(random.randrange(0,256))
return data
CreateAuthenticator=staticmethod(CreateAuthenticator)
def CreateID(self):
"""Create a packet ID. All RADIUS requests have a ID which is used to
identify a request. This is used to detect retries and replay attacks.
This function returns a suitable random number that can be used as ID.
:return: ID number
:rtype: integer
"""
return random.randrange(0,256)
def ReplyPacket(self):
"""Create a ready-to-transmit authentication reply packet.
Returns a RADIUS packet which can be directly transmitted
to a RADIUS server. This differs with Packet() in how
the authenticator is calculated.
:return: raw packet
:rtype: string
"""
assert(self.authenticator)
assert(self.secret)
attr=self._PktEncodeAttributes()
header=struct.pack("!BBH", self.code, self.id, (20+len(attr)))
authenticator=md5_constructor(header[0:4] + self.authenticator
+ attr + self.secret).digest()
return header + authenticator + attr
def VerifyReply(self, reply, rawreply=None):
if reply.id!=self.id:
return False
if rawreply is None:
rawreply=reply.ReplyPacket()
hash=md5_constructor(rawreply[0:4] + self.authenticator +
rawreply[20:] + self.secret).digest()
if hash!=rawreply[4:20]:
return False
return True
def _PktEncodeAttribute(self, key, value):
if type(key)==types.TupleType:
value=struct.pack("!L", key[0]) + \
self._PktEncodeAttribute(key[1], value)
key=26
return struct.pack("!BB", key, (len(value)+2))+value
def _PktEncodeAttributes(self):
result=""
for (code, datalst) in self.items():
for data in datalst:
result+=self._PktEncodeAttribute(code, data)
return result
def _PktDecodeVendorAttribute(self, data):
# Check if this packet is long enough to be in the
# RFC2865 recommended form
if len(data)<6:
return (26, data)
(vendor, type, length)=struct.unpack("!LBB", data[:6])[0:3]
# Another sanity check
if len(data)!=length+4:
return (26,data)
return ((vendor,type), data[6:])
def DecodePacket(self, packet):
"""Initialize the object from raw packet data. Decode a packet as
received from the network and decode it.
:param packet: raw packet
:type packet: string"""
try:
(self.code, self.id, length, self.authenticator)=struct.unpack("!BBH16s", packet[0:20])
except struct.error:
raise PacketError, "Packet header is corrupt"
if len(packet)!=length:
raise PacketError, "Packet has invalid length"
if length>8192:
raise PacketError, "Packet length is too long (%d)" % length
self.clear()
packet=packet[20:]
while packet:
try:
(key, attrlen)=struct.unpack("!BB", packet[0:2])
except struct.error:
raise PacketError, "Attribute header is corrupt"
if attrlen<2:
raise PacketError, "Attribute length is too small (%d)" % attrlen
value=packet[2:attrlen]
if key==26:
(key,value)=self._PktDecodeVendorAttribute(value)
if self.data.has_key(key):
self.data[key].append(value)
else:
self.data[key]=[value]
packet=packet[attrlen:]
class AuthPacket(Packet):
def __init__(self, code=AccessRequest, id=None, secret="", authenticator=None, **attributes):
"""Constructor
:param code: packet type code
:type code: integer (8bits)
:param id: packet identifaction number
:type id: integer (8 bits)
:param secret: secret needed to communicate with a RADIUS server
:type secret: string
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary class
:param packet: raw packet to decode
:type packet: string
"""
Packet.__init__(self, code, id, secret, authenticator, **attributes)
def CreateReply(self, **attributes):
"""Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
"""
return AuthPacket(AccessAccept, self.id,
self.secret, self.authenticator, dict=self.dict,
**attributes)
def RequestPacket(self):
"""Create a ready-to-transmit authentication request packet.
Return a RADIUS packet which can be directly transmitted
to a RADIUS server.
:return: raw packet
:rtype: string
"""
attr=self._PktEncodeAttributes()
if self.authenticator is None:
self.authenticator=self.CreateAuthenticator()
if self.id is None:
self.id=self.CreateID()
header=struct.pack("!BBH16s", self.code, self.id,
(20+len(attr)), self.authenticator)
return header+attr
def PwDecrypt(self, password):
"""Unobfuscate a RADIUS password. RADIUS hides passwords in packets by
using an algorithm based on the MD5 hash of the packet authenticator
and RADIUS secret. This function reverses the obfuscation process.
:param password: obfuscated form of password
:type password: string
:return: plaintext password
:rtype: string
"""
buf=password
pw=""
last=self.authenticator
while buf:
hash=md5_constructor(self.secret+last).digest()
for i in range(16):
pw+=chr(ord(hash[i]) ^ ord(buf[i]))
(last,buf)=(buf[:16], buf[16:])
while pw.endswith("\x00"):
pw=pw[:-1]
return pw
def PwCrypt(self, password):
"""Obfuscate password.
RADIUS hides passwords in packets by using an algorithm
based on the MD5 hash of the packet authenticator and RADIUS
secret. If no authenticator has been set before calling PwCrypt
one is created automatically. Changing the authenticator after
setting a password that has been encrypted using this function
will not work.
:param password: plaintext password
:type password: string
:return: obfuscated version of the password
:rtype: string
"""
if self.authenticator is None:
self.authenticator=self.CreateAuthenticator()
buf=password
if len(password)%16!=0:
buf+="\x00" * (16-(len(password)%16))
hash=md5_constructor(self.secret+self.authenticator).digest()
result=""
last=self.authenticator
while buf:
hash=md5_constructor(self.secret+last).digest()
for i in range(16):
result+=chr(ord(hash[i]) ^ ord(buf[i]))
last=result[-16:]
buf=buf[16:]
return result
class AcctPacket(Packet):
"""RADIUS accounting packets. This class is a specialization
of the generic :obj:`Packet` class for accounting packets.
"""
def __init__(self, code=AccountingRequest, id=None, secret="", authenticator=None, **attributes):
"""Constructor
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary class
:param secret: secret needed to communicate with a RADIUS server
:type secret: string
:param id: packet identifaction number
:type id: integer (8 bits)
:param code: packet type code
:type code: integer (8bits)
:param packet: raw packet to decode
:type packet: string
"""
Packet.__init__(self, code, id, secret, authenticator, **attributes)
if attributes.has_key("packet"):
self.raw_packet=attributes["packet"]
def CreateReply(self, **attributes):
"""Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
"""
return AcctPacket(AccountingResponse, self.id,
self.secret, self.authenticator, dict=self.dict,
**attributes)
def VerifyAcctRequest(self):
"""Verify request authenticator.
:return: True if verification failed else False
:rtype: boolean
"""
assert(self.raw_packet)
hash=md5_constructor(self.raw_packet[0:4] + 16*"\x00" +
self.raw_packet[20:] + self.secret).digest()
return hash==self.authenticator
def RequestPacket(self):
"""Create a ready-to-transmit authentication request packet.
Return a RADIUS packet which can be directly transmitted
to a RADIUS server.
:return: raw packet
:rtype: string
"""
attr=self._PktEncodeAttributes()
if self.id is None:
self.id=self.CreateID()
header=struct.pack("!BBH", self.code, self.id, (20+len(attr)))
self.authenticator=md5_constructor(header[0:4] + 16 * "\x00" + attr
+ self.secret).digest()
return header + self.authenticator + attr
def CreateID():
"""Generate a packet ID.
:return: packet ID
:rtype: 8 bit integer
"""
global CurrentID
CurrentID=(CurrentID+1)%256
return CurrentID
| shanghai-edu/radius-1xtest | lib/packet.py | Python | apache-2.0 | 15,968 |
import unittest
from nose.tools import assert_equals, assert_true, assert_false
from robotide.robotapi import TestCaseFile
from robotide.controller.filecontrollers import TestCaseFileController
from robotide.controller.macrocontrollers import (
TestCaseController, UserKeywordController)
from robotide.controller.tablecontrollers import (
TestCaseTableController, KeywordTableController)
class _BaseWithSteps(unittest.TestCase):
def _test_copy_empty(self):
for setting in self.ctrl.settings:
assert_false(setting.is_set, 'not empty %s' % setting.__class__)
new = self.ctrl.copy('new name')
for setting in new.settings:
assert_false(setting.is_set, 'not empty %s' % setting.__class__)
def _test_copy_content(self):
for setting in self.ctrl.settings:
assert_false(setting.is_set, 'not empty %s' % setting.__class__)
setting.set_value('boo')
setting.set_comment(['hobo'])
new = self.ctrl.copy('new name')
for setting in new.settings:
assert_true(setting.is_set, 'empty %s' % setting.__class__)
assert_equals(setting.value, 'boo', 'not boo %s' % setting.__class__)
assert_equals(setting.comment.as_list(), ['# hobo'], 'comment not copied %s' % setting.__class__)
class TestCaseControllerTest(_BaseWithSteps):
def setUp(self):
self.tcf = TestCaseFile()
self.testcase = self.tcf.testcase_table.add('Test')
self.testcase.add_step(['Log', 'Hello'])
self.testcase.add_step(['No Operation'])
self.testcase.add_step(['Foo'])
self.tcf.testcase_table.add('Another Test')
tctablectrl = TestCaseTableController(TestCaseFileController(self.tcf),
self.tcf.testcase_table)
self.ctrl = TestCaseController(tctablectrl, self.testcase)
def test_creation(self):
for st in self.ctrl.settings:
assert_true(st is not None)
assert_true(self.ctrl.datafile is self.tcf, self.ctrl.datafile)
def test_rename(self):
self.ctrl.rename('Foo Barness')
assert_equals(self.ctrl.name, 'Foo Barness')
assert_true(self.ctrl.dirty)
def test_rename_strips_whitespace(self):
self.ctrl.rename('\t \n Foo Barness ')
assert_equals(self.ctrl.name, 'Foo Barness')
assert_true(self.ctrl.dirty)
def test_copy_empty(self):
self._test_copy_empty()
def test_copy_content(self):
self._test_copy_content()
def test_add_tag(self):
orig_num_tags = len(self.ctrl.tags.as_list())
self.ctrl.add_tag('Some tag')
assert_true(len(self.ctrl.tags.as_list()) == orig_num_tags + 1)
class UserKeywordControllerTest(_BaseWithSteps):
def setUp(self):
self.tcf = TestCaseFile()
uk = self.tcf.keyword_table.add('UK')
uk.add_step(['No Operation'])
uk2 = self.tcf.keyword_table.add('UK 2')
tablectrl = KeywordTableController(TestCaseFileController(self.tcf),
self.tcf.keyword_table)
self.ctrl = UserKeywordController(tablectrl, uk)
self.ctrl2 = UserKeywordController(tablectrl, uk2)
def test_keyword_settings(self):
labels = [setting.label for setting in self.ctrl.settings]
assert_true('Documentation' in labels)
assert_true('Arguments' in labels)
assert_true('Teardown' in labels)
assert_true('Return Value' in labels)
assert_true('Timeout' in labels)
def test_creation(self):
for st in self.ctrl.settings:
assert_true(st is not None)
assert_equals(self.ctrl.steps[0].keyword, 'No Operation')
assert_true(self.ctrl.datafile is self.tcf)
def test_dirty(self):
self.ctrl.mark_dirty()
assert_true(self.ctrl.dirty)
def test_move_up(self):
assert_false(self.ctrl.move_up())
self._assert_uk_in(0, 'UK')
assert_true(self.ctrl2.move_up())
self._assert_uk_in(0, 'UK 2')
def test_move_down(self):
assert_false(self.ctrl2.move_down())
self._assert_uk_in(1, 'UK 2')
assert_true(self.ctrl.move_down())
self._assert_uk_in(1, 'UK')
def test_delete(self):
self.ctrl.delete()
assert_false('UK' in self.tcf.keyword_table.keywords)
self._assert_uk_in(0, 'UK 2')
def _assert_uk_in(self, index, name):
assert_equals(self.tcf.keyword_table.keywords[index].name, name)
def _assert_step(self, step, exp_assign=[], exp_keyword=None, exp_args=[]):
assert_equals(step.assign, exp_assign)
assert_equals(step.keyword, exp_keyword)
assert_equals(step.args, exp_args)
def test_copy_empty(self):
self._test_copy_empty()
def test_copy_content(self):
self._test_copy_content()
| fingeronthebutton/RIDE | utest/controller/test_macro_controllers.py | Python | apache-2.0 | 4,898 |
import json
import logging
import requests
import os
import pexpect
import yaml
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from teuthology.config import config as teuth_config
from teuthology.exceptions import CommandFailedError
from teuthology.repo_utils import fetch_repo
from . import Task
log = logging.getLogger(__name__)
class LoggerFile(object):
"""
A thin wrapper around a logging.Logger instance that provides a file-like
interface.
Used by Ansible.execute_playbook() when it calls pexpect.run()
"""
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, string):
self.logger.log(self.level, string)
def flush(self):
pass
class Ansible(Task):
"""
A task to run ansible playbooks
Required configuration parameters:
playbook: Required; can either be a list of plays, or a path/URL to a
playbook. In the case of a path, it may be relative to the
repo's on-disk location (if a repo is provided), or
teuthology's working directory.
Optional configuration parameters:
repo: A path or URL to a repo (defaults to '.'). Given a repo
value of 'foo', ANSIBLE_ROLES_PATH is set to 'foo/roles'
branch: If pointing to a remote git repo, use this branch. Defaults
to 'master'.
hosts: A list of teuthology roles or partial hostnames (or a
combination of the two). ansible-playbook will only be run
against hosts that match.
inventory: A path to be passed to ansible-playbook with the
--inventory-file flag; useful for playbooks that also have
vars they need access to. If this is not set, we check for
/etc/ansible/hosts and use that if it exists. If it does
not, we generate a temporary file to use.
tags: A string including any (comma-separated) tags to be passed
directly to ansible-playbook.
vars: A dict of vars to be passed to ansible-playbook via the
--extra-vars flag
cleanup: If present, the given or generated playbook will be run
again during teardown with a 'cleanup' var set to True.
This will allow the playbook to clean up after itself,
if the playbook supports this feature.
Examples:
tasks:
- ansible:
repo: https://github.com/ceph/ceph-cm-ansible.git
playbook:
- roles:
- some_role
- another_role
hosts:
- client.0
- host1
tasks:
- ansible:
repo: /path/to/repo
inventory: /path/to/inventory
playbook: /path/to/playbook.yml
tags: my_tags
vars:
var1: string_value
var2:
- list_item
var3:
key: value
"""
def __init__(self, ctx, config):
super(Ansible, self).__init__(ctx, config)
self.log = log
self.generated_inventory = False
self.generated_playbook = False
def setup(self):
super(Ansible, self).setup()
self.find_repo()
self.get_playbook()
self.get_inventory() or self.generate_hosts_file()
if not hasattr(self, 'playbook_file'):
self.generate_playbook()
def find_repo(self):
"""
Locate the repo we're using; cloning it from a remote repo if necessary
"""
repo = self.config.get('repo', '.')
if repo.startswith(('http://', 'https://', 'git@', 'git://')):
repo_path = fetch_repo(
repo,
self.config.get('branch', 'master'),
)
else:
repo_path = os.path.abspath(os.path.expanduser(repo))
self.repo_path = repo_path
def get_playbook(self):
"""
If necessary, fetch and read the playbook file
"""
playbook = self.config['playbook']
if isinstance(playbook, list):
# Multiple plays in a list
self.playbook = playbook
elif isinstance(playbook, str) and playbook.startswith(('http://',
'https://')):
response = requests.get(playbook)
response.raise_for_status()
self.playbook = yaml.safe_load(response.text)
elif isinstance(playbook, str):
try:
playbook_path = os.path.expanduser(playbook)
if not playbook_path.startswith('/'):
# If the path is not absolute at this point, look for the
# playbook in the repo dir. If it's not there, we assume
# the path is relative to the working directory
pb_in_repo = os.path.join(self.repo_path, playbook_path)
if os.path.exists(pb_in_repo):
playbook_path = pb_in_repo
self.playbook_file = file(playbook_path)
playbook_yaml = yaml.safe_load(self.playbook_file)
self.playbook = playbook_yaml
except Exception:
log.error("Unable to read playbook file %s", playbook)
raise
else:
raise TypeError(
"playbook value must either be a list, URL or a filename")
log.info("Playbook: %s", self.playbook)
def get_inventory(self):
"""
Determine whether or not we're using an existing inventory file
"""
self.inventory = self.config.get('inventory')
etc_ansible_hosts = '/etc/ansible/hosts'
if self.inventory:
self.inventory = os.path.expanduser(self.inventory)
elif os.path.exists(etc_ansible_hosts):
self.inventory = etc_ansible_hosts
return self.inventory
def generate_hosts_file(self):
"""
Generate a hosts (inventory) file to use. This should not be called if
we're using an existing file.
"""
hosts = self.cluster.remotes.keys()
hostnames = [remote.hostname for remote in hosts]
hostnames.sort()
hosts_str = '\n'.join(hostnames + [''])
hosts_file = NamedTemporaryFile(prefix="teuth_ansible_hosts_",
delete=False)
hosts_file.write(hosts_str)
hosts_file.flush()
self.generated_inventory = True
self.inventory = hosts_file.name
def generate_playbook(self):
"""
Generate a playbook file to use. This should not be called if we're
using an existing file.
"""
for play in self.playbook:
# Ensure each play is applied to all hosts mentioned in the --limit
# flag we specify later
play['hosts'] = 'all'
pb_buffer = StringIO()
pb_buffer.write('---\n')
yaml.safe_dump(self.playbook, pb_buffer)
pb_buffer.seek(0)
playbook_file = NamedTemporaryFile(prefix="teuth_ansible_playbook_",
delete=False)
playbook_file.write(pb_buffer.read())
playbook_file.flush()
self.playbook_file = playbook_file
self.generated_playbook = True
def begin(self):
super(Ansible, self).begin()
self.execute_playbook()
def execute_playbook(self, _logfile=None):
"""
Execute ansible-playbook
:param _logfile: Use this file-like object instead of a LoggerFile for
testing
"""
environ = os.environ
environ['ANSIBLE_SSH_PIPELINING'] = '1'
environ['ANSIBLE_ROLES_PATH'] = "%s/roles" % self.repo_path
args = self._build_args()
command = ' '.join(args)
log.debug("Running %s", command)
out_log = self.log.getChild('out')
out, status = pexpect.run(
command,
logfile=_logfile or LoggerFile(out_log, logging.INFO),
withexitstatus=True,
timeout=None,
)
if status != 0:
raise CommandFailedError(command, status)
def _build_args(self):
"""
Assemble the list of args to be executed
"""
fqdns = [r.hostname for r in self.cluster.remotes.keys()]
# Assume all remotes use the same username
user = self.cluster.remotes.keys()[0].user
extra_vars = dict(ansible_ssh_user=user)
extra_vars.update(self.config.get('vars', dict()))
args = [
'ansible-playbook', '-v',
"--extra-vars", "'%s'" % json.dumps(extra_vars),
'-i', self.inventory,
'--limit', ','.join(fqdns),
self.playbook_file.name,
]
tags = self.config.get('tags')
if tags:
args.extend(['--tags', tags])
return args
def teardown(self):
self._cleanup()
if self.generated_inventory:
os.remove(self.inventory)
if self.generated_playbook:
os.remove(self.playbook_file.name)
super(Ansible, self).teardown()
def _cleanup(self):
"""
If the ``cleanup`` key exists in config the same playbook will be
run again during the teardown step with the var ``cleanup`` given with
a value of ``True``. If supported, this will allow the playbook to
cleanup after itself during teardown.
"""
if self.config.get("cleanup"):
log.info("Running ansible cleanup...")
extra = dict(cleanup=True)
if self.config.get('vars'):
self.config.get('vars').update(extra)
else:
self.config['vars'] = extra
self.execute_playbook()
else:
log.info("Skipping ansible cleanup...")
class CephLab(Ansible):
__doc__ = """
A very simple subclass of Ansible that defaults to:
- ansible:
repo: {git_base}ceph-cm-ansible.git
playbook: cephlab.yml
""".format(git_base=teuth_config.ceph_git_base_url)
# Set the name so that Task knows to look up overrides for
# 'ansible.cephlab' instead of just 'cephlab'
name = 'ansible.cephlab'
def __init__(self, ctx, config):
config = config or dict()
if 'playbook' not in config:
config['playbook'] = 'cephlab.yml'
if 'repo' not in config:
config['repo'] = os.path.join(teuth_config.ceph_git_base_url,
'ceph-cm-ansible.git')
super(CephLab, self).__init__(ctx, config)
task = Ansible
cephlab = CephLab
| ivotron/teuthology | teuthology/task/ansible.py | Python | mit | 10,845 |
import unittest
from IPython.display import Markdown, display
import numpy as np
def printmd(string):
display(Markdown(string))
V_opt = np.zeros((4,12))
V_opt[0:13][0] = -np.arange(3, 15)[::-1]
V_opt[0:13][1] = -np.arange(3, 15)[::-1] + 1
V_opt[0:13][2] = -np.arange(3, 15)[::-1] + 2
V_opt[3][0] = -13
pol_opt = np.hstack((np.ones(11), 2, 0))
V_true = np.zeros((4,12))
for i in range(3):
V_true[0:13][i] = -np.arange(3, 15)[::-1] - i
V_true[1][11] = -2
V_true[2][11] = -1
V_true[3][0] = -17
def get_long_path(V):
return np.array(np.hstack((V[0:13][0], V[1][0], V[1][11], V[2][0], V[2][11], V[3][0], V[3][11])))
def get_optimal_path(policy):
return np.array(np.hstack((policy[2][:], policy[3][0])))
class Tests(unittest.TestCase):
def td_prediction_check(self, V):
to_check = get_long_path(V)
soln = get_long_path(V_true)
np.testing.assert_array_almost_equal(soln, to_check)
def td_control_check(self, policy):
to_check = get_optimal_path(policy)
np.testing.assert_equal(pol_opt, to_check)
check = Tests()
def run_check(check_name, func):
try:
getattr(check, check_name)(func)
except check.failureException as e:
printmd('**<span style="color: red;">PLEASE TRY AGAIN</span>**')
return
printmd('**<span style="color: green;">PASSED</span>**') | hetaodie/hetaodie.github.io | assets/media/uda-ml/qinghua/shijianchafenfangfa/迷你项目:时间差分方法(第 0 部分和第 1 部分)/check_test.py | Python | mit | 1,352 |
'''Test animation of a group of objects making a face.
This version replaces moveAllOnLine by moveAllOnLineFlush,
to only update thescreen once for each animation step,
after multiple individual graphics instructions are given.'''
from graphics import *
import time
def moveAll(shapeList, dx, dy):
''' Move all shapes in shapeList by (dx, dy).'''
for shape in shapeList:
shape.move(dx, dy)
#NEW Flush version with win parameter
def moveAllOnLineFlush(shapeList, dx, dy, repetitions, delay, win):
'''Animate the shapes in shapeList along a line in win.
Move by (dx, dy) each time.
Repeat the specified number of repetitions.
Have the specified delay (in seconds) after each repeat.
'''
win.autoflush = False # NEW: set before animation
for i in range(repetitions):
moveAll(shapeList, dx, dy)
win.flush() # NEW needed to make all the changes appear
time.sleep(delay)
win.autoflush = True # NEW: set after animation
def main():
win = GraphWin('Back and Forth', 300, 300)
win.yUp() # make right side up coordinates!
rect = Rectangle(Point(200, 90), Point(220, 100))
rect.setFill("blue")
rect.draw(win)
head = Circle(Point(40,100), 25)
head.setFill("yellow")
head.draw(win)
eye1 = Circle(Point(30, 105), 5)
eye1.setFill('blue')
eye1.draw(win)
eye2 = Line(Point(45, 105), Point(55, 105))
eye2.setWidth(3)
eye2.draw(win)
mouth = Oval(Point(30, 90), Point(50, 85))
mouth.setFill("red")
mouth.draw(win)
faceList = [head, eye1, eye2, mouth]
cir2 = Circle(Point(150,125), 25)
cir2.setFill("red")
cir2.draw(win)
moveAllOnLineFlush(faceList, 5, 0, 46, .05, win) # NEW Flush version
moveAllOnLineFlush(faceList, -5, 0, 46, .05, win) # added win
win.promptClose(win.getWidth()/2, 20)
main()
| hwheeler01/comp150 | examples/backAndForth2Flush.py | Python | mit | 1,955 |
"""
ESSArch is an open source archiving and digital preservation system
ESSArch
Copyright (C) 2005-2019 ES Solutions AB
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Contact information:
Web - http://www.essolutions.se
Email - essarch@essolutions.se
"""
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-08 14:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ProfileMaker', '0006_auto_20160902_1308'),
]
operations = [
migrations.DeleteModel(
name='finishedTemplate',
),
migrations.RemoveField(
model_name='templatepackage',
name='generated',
),
migrations.AddField(
model_name='templatepackage',
name='namespace',
field=models.CharField(default='', max_length=20),
),
migrations.AddField(
model_name='templatepackage',
name='root_element',
field=models.CharField(default='', max_length=55),
),
]
| ESSolutions/ESSArch_Core | ESSArch_Core/essxml/ProfileMaker/migrations/0007_auto_20160908_1428.py | Python | gpl-3.0 | 1,696 |
'''
Unit tests for simpletable.py.
:see: http://docs.python.org/lib/minimal-example.html for an intro to unittest
:see: http://agiletesting.blogspot.com/2005/01/python-unit-testing-part-1-unittest.html
:see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/305292
'''
from __future__ import absolute_import
import unittest
try:
import numpy as np
has_numpy = True
except ImportError:
has_numpy = False
__docformat__ = "restructuredtext en"
from simpletable import Cell, Row, SimpleTable
from simpletable import default_latex_fmt
from simpletable import default_html_fmt
ltx_fmt1 = default_latex_fmt.copy()
html_fmt1 = default_html_fmt.copy()
txt_fmt1 = dict(
data_fmts = ['%0.2f', '%d'],
empty_cell = ' ',
colwidths = 1,
colsep=' * ',
row_pre = '* ',
row_post = ' *',
table_dec_above='*',
table_dec_below='*',
header_dec_below='*',
header_fmt = '%s',
stub_fmt = '%s',
title_align='r',
header_align = 'r',
data_aligns = "r",
stubs_align = "l",
fmt = 'txt'
)
cell0data = 0.0000
cell1data = 1
row0data = [cell0data, cell1data]
row1data = [2, 3.333]
table1data = [ row0data, row1data ]
test1stubs = ('stub1', 'stub2')
test1header = ('header1', 'header2')
#test1header = ('header1\nheader1a', 'header2\nheader2a')
tbl = SimpleTable(table1data, test1header, test1stubs,
txt_fmt=txt_fmt1, ltx_fmt=ltx_fmt1, html_fmt=html_fmt1)
def custom_labeller(cell):
if cell.data is np.nan:
return 'missing'
class test_Cell(unittest.TestCase):
def test_celldata(self):
celldata = cell0data, cell1data, row1data[0], row1data[1]
cells = [Cell(datum, datatype=i%2) for i, datum in enumerate(celldata)]
for cell, datum in zip(cells, celldata):
self.assertEqual(cell.data, datum)
class test_SimpleTable(unittest.TestCase):
def test_txt_fmt1(self):
"""Limited test of custom txt_fmt"""
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * 0.00 * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text()
#print('actual')
#print(actual)
#print('desired')
#print(desired)
self.assertEqual(actual, desired)
def test_ltx_fmt1(self):
"""Limited test of custom ltx_fmt"""
desired = r"""
\begin{tabular}{lcc}
\toprule
& \textbf{header1} & \textbf{header2} \\
\midrule
\textbf{stub1} & 0.0 & 1 \\
\textbf{stub2} & 2 & 3.333 \\
\bottomrule
\end{tabular}
"""
actual = '\n%s\n' % tbl.as_latex_tabular()
#print(actual)
#print(desired)
self.assertEqual(actual, desired)
def test_html_fmt1(self):
"""Limited test of custom html_fmt"""
desired = """
<table class="simpletable">
<tr>
<td></td> <th>header1</th> <th>header2</th>
</tr>
<tr>
<th>stub1</th> <td>0.0</td> <td>1</td>
</tr>
<tr>
<th>stub2</th> <td>2</td> <td>3.333</td>
</tr>
</table>
"""
actual = '\n%s\n' % tbl.as_html()
#print(actual)
#print(desired)
self.assertEqual(actual, desired)
def test_customlabel(self):
"""Limited test of custom custom labeling"""
if has_numpy:
tbl = SimpleTable(table1data, test1header, test1stubs, txt_fmt=txt_fmt1)
tbl[1][1].data = np.nan
tbl.label_cells(custom_labeller)
print([[c.datatype for c in row] for row in tbl])
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * -- * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text(missing='--')
print(actual)
print(desired)
self.assertEqual(actual, desired)
def test_csv01(self):
mydata = [[11,12],[21,22]]
myheaders = [ "Column 1", "Column 2" ]
mystubs = [ "Row 1", "Row 2" ]
tbl = SimpleTable(mydata, myheaders, mystubs, title="Title")
actual = '%s' % tbl.as_csv().strip()
desired = """
Title
,Column 1,Column 2
Row 1,11 ,12
Row 2,21 ,22
""".strip()
self.assertEqual(actual, desired)
if __name__=="__main__":
unittest.main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 autoindent
| alan-isaac/SimpleTable | test_table.py | Python | mit | 4,499 |
# coding: utf-8
from .common import CommonTestCase
class SuggestionsTest(CommonTestCase):
def test_suggestion_url(self):
client = self.client
# self.assertEqual(client.suggestions.address.url, "https://suggestions.dadata.ru/suggestions/api/4_1/rs/suggest/address")
self.assertEqual(client.suggestions.address.url, "https://dadata.ru/api/v2/suggest/address")
def test_that_suggestion_url_is_not_private(self):
self.assertEqual(self.client.suggestions.address.private, False)
def test_that_assigned_data_is_query(self):
self.client.suggest_address = "test"
self.assertEqual(self.client.data, {'query' : 'test'})
| tigrus/dadata-python | tests/test_suggestions.py | Python | mit | 674 |
#!/usr/bin/env python
from tools.load import LoadMatrix
from numpy import where
import shogun as sg
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
parameter_list=[[traindat,testdat, 1.0],[traindat,testdat, 10.0]]
def kernel_wave (fm_train_real=traindat,fm_test_real=testdat, theta=1.0):
feats_train=sg.create_features(fm_train_real)
feats_test=sg.create_features(fm_test_real)
distance = sg.create_distance('EuclideanDistance')
kernel = sg.create_kernel('WaveKernel', theta=theta, distance=distance)
kernel.init(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('Wave')
kernel_wave(*parameter_list[0])
| shogun-toolbox/shogun | examples/undocumented/python/kernel_wave.py | Python | bsd-3-clause | 846 |
# -*- coding: utf-8 -*
#
# Test links:
# https://www.androidfilehost.com/?fid=95916177934518197
import re
from module.plugins.internal.SimpleHoster import SimpleHoster
class AndroidfilehostCom(SimpleHoster):
__name__ = "AndroidfilehostCom"
__type__ = "hoster"
__version__ = "0.05"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?androidfilehost\.com/\?fid=\d+'
__config__ = [("activated" , "bool", "Activated" , True),
("use_premium" , "bool", "Use premium account if available" , True),
("fallback" , "bool", "Fallback to free download if premium fails" , True),
("chk_filesize", "bool", "Check file size" , True),
("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 )]
__description__ = """Androidfilehost.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zapp-brannigan", "fuerst.reinje@web.de")]
NAME_PATTERN = r'<br />(?P<N>.*?)</h1>'
SIZE_PATTERN = r'<h4>size</h4>\s*<p>(?P<S>[\d.,]+)(?P<U>[\w^_]+)</p>'
HASHSUM_PATTERN = r'<h4>(?P<H>.*?)</h4>\s*<p><code>(?P<D>.*?)</code></p>'
OFFLINE_PATTERN = r'404 not found'
WAIT_PATTERN = r'users must wait <strong>(\d+) secs'
def setup(self):
self.multiDL = True
self.resume_download = True
self.chunk_limit = 1
def handle_free(self, pyfile):
wait = re.search(self.WAIT_PATTERN, self.data)
self.log_debug("Waiting time: %s seconds" % wait.group(1))
fid = re.search(r'id="fid" value="(\d+)" />', self.data).group(1)
self.log_debug("FID: %s" % fid)
html = self.load("https://www.androidfilehost.com/libs/otf/mirrors.otf.php",
post={'submit': 'submit',
'action': 'getdownloadmirrors',
'fid' : fid})
self.link = re.findall('"url":"(.*?)"', html)[0].replace("\\", "")
mirror_host = self.link.split("/")[2]
self.log_debug("Mirror Host: %s" % mirror_host)
html = self.load("https://www.androidfilehost.com/libs/otf/stats.otf.php",
get={'fid' : fid,
'w' : 'download',
'mirror': mirror_host})
| Guidobelix/pyload | module/plugins/hoster/AndroidfilehostCom.py | Python | gpl-3.0 | 2,454 |
from spock.mcp import datautils
from spock.utils import BoundBuffer
def test_unpack_varint():
largebuff = BoundBuffer(b'\x80\x94\xeb\xdc\x03')
smallbuff = BoundBuffer(b'\x14')
assert datautils.unpack_varint(smallbuff) == 20
assert datautils.unpack_varint(largebuff) == 1000000000
def test_pack_varint():
assert datautils.pack_varint(20) == b'\x14'
assert datautils.pack_varint(1000000000) == b'\x80\x94\xeb\xdc\x03'
assert datautils.pack_varint(-10000000000) is None
assert datautils.pack_varint(10000000000) is None
def test_unpack_varlong():
largebuff = BoundBuffer(b'\x80\xc8\xaf\xa0%')
smallbuff = BoundBuffer(b'\x14')
assert datautils.unpack_varlong(smallbuff) == 20
assert datautils.unpack_varlong(largebuff) == 10000000000
pass
def test_pack_varlong():
assert datautils.pack_varlong(20) == b'\x14'
assert datautils.pack_varlong(10000000000) == b'\x80\xc8\xaf\xa0%'
assert datautils.pack_varlong(10000000000000000000) is None
assert datautils.pack_varlong(-10000000000000000000) is None
| MrSwiss/SpockBot | tests/mcp/test_datautils.py | Python | mit | 1,065 |
'''
Copyright 2015-2016 GoodCrypto.
Last modified: 2016-10-30
This file is open source, licensed under GPLv3 <http://www.gnu.org/licenses/>.
Manage logging messages encrypted and decrypted by the GoodCrypto server to
prevent others spoofing the security of a message.
'''
import json, os, re
from datetime import datetime
from time import gmtime, strftime
from urllib.parse import unquote
from django.db.models import Q
from goodcrypto.mail.models import MessageHistory
from goodcrypto.mail.utils import gen_password
from goodcrypto.mail.message.constants import SIGNER_VERIFIED
from goodcrypto.mail.message.inspect_utils import get_message_id
from goodcrypto.utils import get_email
from goodcrypto.utils.log_file import LogFile
from syr.exception import record_exception
from syr.mime_constants import DATE_KEYWORD, SUBJECT_KEYWORD
_log = None
DEBUGGING = False
def add_outbound_record(crypto_message, verification_code):
'''
Add a history record so the user can verify what security measures were made to an outbound message.
'''
try:
add_record(crypto_message, MessageHistory.OUTBOUND_MESSAGE, verification_code=verification_code)
log_message('added outbound history record from {}'.format(crypto_message.smtp_sender()))
except:
record_exception()
log_message('EXCEPTION 1 - see syr.exception.log for details')
def add_inbound_record(crypto_message, verification_code):
'''
Add a history record so the user can verify what security measures were made to an inbound message.
'''
try:
add_record(crypto_message, MessageHistory.INBOUND_MESSAGE, verification_code=verification_code)
log_message('added inbound history record to {}'.format(crypto_message.smtp_recipient()))
except:
record_exception()
log_message('EXCEPTION 2 - see syr.exception.log for details')
def add_record(crypto_message, direction, verification_code=None):
''' Add the message's summary details about its security measures. '''
ok = False
try:
sender = get_email(crypto_message.smtp_sender())
recipient = get_email(crypto_message.smtp_recipient())
message_id = get_message_id(crypto_message.get_email_message())
message_date = crypto_message.get_email_message().get_header(DATE_KEYWORD)
subject = crypto_message.get_email_message().get_header(SUBJECT_KEYWORD)
crypted_with = crypto_message.get_crypted_with()
crypted = crypted_with is not None and len(crypted_with) > 0
metadata_crypted_with = crypto_message.get_metadata_crypted_with()
metadata_crypted = metadata_crypted_with is not None and len(metadata_crypted_with) > 0
# use the encryption for the inner message if possible
if crypted_with is not None and len(crypted_with) > 0:
encryption_programs = crypted_with
else:
encryption_programs = metadata_crypted_with
timestamp = get_isoformat(message_date)
sender_email = get_email(sender)
recipient_email = get_email(recipient)
if sender is None:
log_message(
"unable to record {} message because there's no contact record for {}".format(direction, sender))
elif recipient is None:
log_message(
"unable to record {} message because there's no contact record for {}".format(direction, recipient))
else:
if message_id is None:
message_id = ''
programs = ''
if encryption_programs is not None:
for encryption_program in encryption_programs:
if len(programs) > 0:
programs += ', '
programs += str(encryption_program)
if subject is None:
subject = ''
if timestamp is None:
timestamp = ''
if message_id is None:
message_id = ''
if verification_code is None:
verification_code = gen_verification_code()
if type(verification_code) is list:
verification_code = ' '.join(verification_code)
if DEBUGGING:
log_message("sender: {}".format(sender))
log_message("recipient: {}".format(recipient))
log_message("direction: {}".format(direction))
log_message("timestamp: {}".format(timestamp))
log_message("subject: {}".format(subject))
log_message("message_id: {}".format(message_id))
log_message("verification_code: {}".format(verification_code))
log_message("crypted: {}".format(crypted))
log_message("crypted with: {}".format(crypted_with))
log_message("metadata_crypted: {}".format(metadata_crypted))
log_message("metadatacrypted with: {}".format(metadata_crypted_with))
if encryption_programs: log_message("encryption_programs: {}".format(encryption_programs))
if crypto_message is not None:
log_message("private_signers: {}".format(crypto_message.private_signers_list()))
log_message("clear_signers: {}".format(crypto_message.clear_signers_list()))
log_message("is_dkim_signed: {}".format(crypto_message.is_dkim_signed()))
log_message("is_dkim_sig_verified: {}".format(crypto_message.is_dkim_sig_verified()))
MessageHistory.objects.create(sender=sender_email,
recipient=recipient_email,
direction=direction[:1],
encryption_programs=programs[:MessageHistory.MAX_ENCRYPTION_PROGRAMS],
message_date=timestamp[:MessageHistory.MAX_MESSAGE_DATE],
subject=subject[:MessageHistory.MAX_SUBJECT],
message_id=message_id[:MessageHistory.MAX_MESSAGE_ID],
verification_code=verification_code[:MessageHistory.MAX_VERIFICATION_CODE],
content_protected=crypted,
metadata_protected=metadata_crypted,
private_signers=pack_signers(crypto_message.private_signers_list()),
clear_signers=pack_signers(crypto_message.clear_signers_list()),
dkim_signed=crypto_message.is_dkim_signed(),
dkim_sig_verified=crypto_message.is_dkim_sig_verified(),
)
log_message('created {} history record for {} with {} verification code'.format(
get_direction(direction), sender, verification_code))
ok = True
except:
ok = False
record_exception()
log_message('EXCEPTION 3 - see syr.exception.log for details')
if sender: log_message("sender: {}".format(sender))
if recipient: log_message("recipient: {}".format(recipient))
if direction: log_message("direction: {}".format(direction))
if encryption_programs: log_message("encryption_programs: {}".format(encryption_programs))
if timestamp: log_message("timestamp: {}".format(timestamp))
if subject: log_message("subject: {}".format(subject))
if message_id: log_message("message_id: {}".format(message_id))
if verification_code: log_message("verification_code: {}".format(verification_code))
if crypted: log_message("crypted: {}".format(crypted))
if metadata_crypted: log_message("metadata_crypted: {}".format(metadata_crypted))
if crypto_message is not None:
log_message("private_signers: {}".format(crypto_message.private_signers_list()))
log_message("clear_signers: {}".format(crypto_message.clear_signers_list()))
log_message("is_dkim_signed: {}".format(crypto_message.is_dkim_signed()))
log_message("is_dkim_sig_verified: {}".format(crypto_message.is_dkim_sig_verified()))
return ok
def is_signed(raw_signers):
'''
Returns true if there is at least one signer.
If raw_signers are pickeled, unpickles and then checks.
'''
signed = False
try:
if type(raw_signers) is list:
signers = raw_signers
else:
signers = unpack_signers(raw_signers)
if len(signers) > 0:
signed = True
except:
record_exception()
return signed
def is_sig_verified(raw_signers):
''' Returns true if at least one signer was verified. '''
verified_sig = False
try:
if type(raw_signers) is list:
signers = raw_signers
else:
signers = unpack_signers(raw_signers)
if len(signers) > 0:
for signer in signers:
if signer[SIGNER_VERIFIED]:
verified_sig = True
except:
record_exception()
return verified_sig
def pack_signers(signers):
''' Return a packed (i.e., json) version of the signers. '''
try:
log_message('signers: {}'.format(signers))
packed_signers = json.dumps(signers)
log_message('pickled signers: {}'.format(packed_signers))
except:
record_exception()
packed_signers = []
return packed_signers
def unpack_signers(packed_signers):
''' Return signers from pickled versions. '''
try:
return json.loads(packed_signers)
except:
record_exception()
signers = []
return signers
def get_outbound_messages(email):
''' Get the encrypted messages when the email address was the sender. '''
records = []
if email is not None:
address = get_email(email)
try:
sender_records = MessageHistory.objects.filter(sender=address)
records = sender_records.filter(
Q(direction=MessageHistory.OUTBOUND_MESSAGE) )
except MessageHistory.DoesNotExist:
records = []
except Exception:
records = []
record_exception()
log_message('EXCEPTION 4 - see syr.exception.log for details')
else:
address = email
log_message("{} has {} encrypted messages".format(address, len(records)))
return records
def get_inbound_messages(email):
''' Get the decrypted messages when the email address was the recipient. '''
records = []
if email is not None:
address = get_email(email)
try:
recipient_records = MessageHistory.objects.filter(recipient=address)
records = recipient_records.filter(Q(direction=MessageHistory.INBOUND_MESSAGE))
except MessageHistory.DoesNotExist:
records = []
except Exception:
records = []
record_exception()
log_message('EXCEPTION 5 - see syr.exception.log for details')
else:
address = email
log_message("{} has {} decrypted messages".format(address, len(records)))
return records
def get_validated_messages(email, verification_code):
'''
Get the messages with a matching verification code for the email address.
Theoretically, this should just be one message, but we'll remain flexible.
'''
records = []
if email is not None and verification_code is not None:
address = get_email(email)
try:
validated_records = MessageHistory.objects.filter(
verification_code=unquote(verification_code))
records = validated_records.filter(Q(sender=address) | Q(recipient=address) )
except MessageHistory.DoesNotExist:
records = []
except Exception:
records = []
record_exception()
log_message('EXCEPTION 6 - see syr.exception.log for details')
if len(records) <= 0:
try:
validated_records = MessageHistory.objects.filter(verification_code=verification_code)
records = validated_records.filter(Q(sender=address) | Q(recipient=address) )
except MessageHistory.DoesNotExist:
records = []
except Exception:
records = []
record_exception()
log_message('EXCEPTION 7 - see syr.exception.log for details')
log_message("{} has {} crypted messages".format(email, len(records)))
return records
def gen_verification_code():
'''
Generate a verification code.
>>> verification_code = gen_verification_code()
>>> len(verification_code)
24
>>> ' ' not in verification_code
True
'''
verification_code = gen_password(
max_length=MessageHistory.MAX_VERIFICATION_CODE - 1, punctuation_chars='-_.')
return verification_code
def get_direction(direction_code):
'''
Get the direction in words.
>>> direction = get_direction(1)
>>> direction == 'Inbound'
True
'''
try:
code = int(direction_code)
if code > 0 and code <= len(MessageHistory.MESSAGE_DIRECTIONS):
__, direction = MessageHistory.MESSAGE_DIRECTIONS[code-1]
else:
direction = ''
except:
direction = ''
return direction
def get_isoformat(message_date):
''' Get the timestamp in iso format. '''
def get_year(yr):
''' Get the year as a 4 digit number. '''
if len(yr) < 4:
if yr.startswith('7') or yr.startswith('8') or yr.startswith('9'):
yr = '19' + yr
else:
yr = '20' + yr
year = int(yr)
return year
# map month abbrevs to numeric equivalent
MONTH_MAP = {'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12}
if message_date is None:
message_date = strftime("%a, %d %b %Y %H:%M:%S", gmtime())
try:
Date_Format = re.compile(r'''(?P<wk_day>.*,)?\s+(?P<day>\d*) (?P<month>.*) (?P<year>\d*) (?P<hour>\d*):(?P<min>\d*):(?P<sec>\d*)\s*(?P<gmt_offset>.*)''')
m = Date_Format.search(message_date)
if not m:
Date_Format = re.compile(r'''(?P<day>\d*) (?P<month>.*) (?P<year>\d*) (?P<hour>\d*):(?P<min>\d*):(?P<sec>\d*)\s*(?P<gmt_offset>.*)''')
m = Date_Format.search(message_date)
if m:
day = int(m.group('day'))
month = int(MONTH_MAP[m.group('month')])
year = get_year(m.group('year'))
hour = int(m.group('hour'))
minutes = int(m.group('min'))
seconds = int(m.group('sec'))
timestamp = datetime(year, month, day, hour, minutes, seconds).isoformat(str(' '))
if m.group('gmt_offset'):
gmt_offset = m.group('gmt_offset')
if gmt_offset.lower() == 'gmt' or gmt_offset.lower() == 'utc':
gmt_offset = '+0000'
timestamp += ' {}'.format(gmt_offset)
else:
log_message('unable to format date: {}'.format(message_date))
timestamp = message_date.decode()
except:
try:
timestamp = message_date.decode()
except:
timestamp = datetime.today().isoformat(str(' '))
log_message('unable to format date ({}) so defaulting to today'.format(message_date))
return timestamp
def log_message(message):
'''
Log a message to the local log.
>>> import os.path
>>> from syr.log import BASE_LOG_DIR
>>> from syr.user import whoami
>>> log_message('test')
>>> os.path.exists(os.path.join(BASE_LOG_DIR, whoami(), 'goodcrypto.mail.message.history.log'))
True
'''
global _log
if _log is None:
_log = LogFile()
_log.write_and_flush(message)
| goodcrypto/goodcrypto-mail | goodcrypto/mail/message/history.py | Python | gpl-3.0 | 16,318 |
from django.contrib import admin
from organization.models import Organization
class OrganizationAdmin(admin.ModelAdmin):
pass
admin.site.register(Organization, OrganizationAdmin)
| KokareIITP/vms | vms/organization/admin.py | Python | gpl-2.0 | 185 |
"""
This module is in charge of handling URLs in lectio.
"""
from .config import BASE_URL
def _make_url(school_id, endpoint):
"""
Crafts an URL based on the ``school_id``, ``endpoint``, and ``query``.
``school_id`` is a number as a string object
Example:
``school_id = "248"``
``endpoint`` is a string object containing with the endpoint
Example:
``endpoint = "SkemaNy.aspx"``
"""
url = BASE_URL + school_id + "/" + endpoint
return url
def make_timetable_url(school_id):
"""
Returns a Lectio timetable URL for a given student at a given school for
the proper week.
"""
return _make_url(school_id, "SkemaNy.aspx")
def make_login_url(school_id):
"""
Returns a Lectio Login URL.
"""
return _make_url(school_id, "login.aspx")
def make_frontpage_url(school_id):
"""
Returns a Lectio Frontpage URL.
"""
return _make_url(school_id, "forside.aspx")
def make_assignments_overview_url(school_id):
"""
Returns a Lectio Assignment Overview URL.
"""
return _make_url(school_id, "OpgaverElev.aspx")
| dkkline/pylectio | lectio/urls.py | Python | mit | 1,118 |
AR3[AR3 == -99.] = np.nan
AR3 | stijnvanhoey/course_gis_scripting | notebooks/_solutions/02-scientific-python-introduction58.py | Python | bsd-3-clause | 29 |
from z3 import *
import itertools, collections
class SolverGen:
def __init__(self):
self.varCount = 0
def genVar(self, i):
self.varCount += 1
return Int("%s_%d" % ("abc"[i], self.varCount))
def addSolver(self, solver, i, n, vs):
if i == 0: return
myIndex, unknownIndex = (i + 2) % 3, (i + 1) % 3
oldvs = vs[:unknownIndex] + vs[unknownIndex+1:]
tmpvs = [self.genVar(unknownIndex) for j in range(3)]
for j in range(3):
solver.add(And(1 <= tmpvs[j], tmpvs[j] <= n))
solver.add([tmpvs[j] != y for y in oldvs])
xs1 = vs[:unknownIndex] + [tmpvs[j]] + vs[unknownIndex+1:]
x1, y1, z1 = xs1[j:] + xs1[:j]
solver.add(Or(And(y1 < x1, x1 < z1), And(z1 < x1, x1 < y1)))
for k in range(i):
self.addSolver(solver, k, n, xs1)
return
def makeSolver(self, i, n):
solver = Solver()
vs = [Int(x) for x in 'abc']
solver.add([And(1 <= x, x <= n) for x in vs])
solver.add([x != y for x, y in itertools.combinations(vs, 2)])
for j in range(i + 1):
self.addSolver(solver, j, n, vs)
return solver
n = 20
for i in range(20):
solver = SolverGen().makeSolver(i, n)
if solver.check() == sat:
print(i)
print(solver)
print(solver.model())
else:
break
| u-tokyo-gps-tanaka-lab/three_wise_men | 3.py | Python | gpl-3.0 | 1,400 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from scripttest import TestFileEnvironment
current_dir = os.path.dirname(__file__)
def test_cli():
env = TestFileEnvironment('./test-output')
img1 = os.path.join(current_dir, 'test1.png')
img2 = os.path.join(current_dir, 'test2.png')
imgx = os.path.join(current_dir, '*.png')
res = env.run('bild', '-f', img1)
assert re.findall(r'http://s1.bild.me/bilder/\d+/\d+.*', res.stdout)
res = env.run('bild', '-qf', img1, img2)
assert len(re.findall(r'http://s1.bild.me/bilder/\d+/\d+.*', res.stdout)) == 2
res = env.run('bild', '-lf', img1)
assert re.findall(r'http://s1.bild.me/bilder/\d+/\d+.*', res.stdout)
assert '[URL=http://www.bild.me][IMG]' in res.stdout
res = env.run('bild', '-qlf', img1, img2)
assert re.findall(r'http://s1.bild.me/bilder/\d+/\d+.*', res.stdout)
assert len(re.findall(r'\[URL=http://www.bild.me\]\[IMG\]', res.stdout)) == 2
res = env.run('bild', '-qf', imgx)
assert len(re.findall(r'http://s1.bild.me/bilder/\d+/\d+.*', res.stdout)) == 2
| mozillazg/bild.me-cli | tests/test_cli.py | Python | mit | 1,099 |
# coding=utf-8
__author__ = "AstroPrint Product Team <product@astroprint.com>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2017 3DaGoGo, Inc - Released under terms of the AGPLv3 License"
# singleton
_instance = None
DEFAULT_MANAGER = 'marlin'
# This object is recreated when the driver is changed in the printer profile page.
# DO NOT store a reference to the result of printerManager in any persistant object.
def printerManager(driver = None):
global _instance
if driver is not None and _instance is not None and _instance.driverName != driver:
_instance.rampdown()
_instance = None
if _instance is None and driver:
if driver.startswith('plugin:'):
from astroprint.printer.plugin import PrinterWithPlugin, NoPluginException
try:
_instance = PrinterWithPlugin(driver[7:])
except NoPluginException:
#The plugin is gone. Pick the default
from astroprint.printerprofile import printerProfileManager
ppm = printerProfileManager()
ppm.set({'driver': DEFAULT_MANAGER})
ppm.save()
else:
import importlib
try:
# driver name to class map. format is (module, classname)
classInfo = {
'marlin': ('.marlin', 'PrinterMarlin'),
's3g': ('.s3g', 'PrinterS3g')
}[driver]
except KeyError:
classInfo = ('.marlin', 'PrinterMarlin')
module = importlib.import_module(classInfo[0], 'astroprint.printer')
_instance = getattr(module, classInfo[1])()
return _instance
| AstroPrint/AstroBox | src/astroprint/printer/manager.py | Python | agpl-3.0 | 1,517 |
import sys
def count(value, last_coin=50):
if value == 0:
return 1
result = 0
for coin in [50, 25, 10, 5, 1]:
if coin <= value and coin <= last_coin:
result += count(value - coin, coin)
return result
def main():
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
test = test.strip()
if len(test) == 0: continue
test = int(test)
print(count(test))
test_cases.close()
if __name__ == '__main__':
main()
| mpillar/codeeval | 1-moderate/alternative-reality/main.py | Python | unlicense | 507 |
"""Django settings for jiggety project."""
#import os, sys
#sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
COUCHDB_DATABASES = (
('jiggety.task_configs', 'http://ssspiochld:password@127.0.0.1:5984/jiggety_configs'),
('jiggety.task_results', 'http://ssspiochld:password@127.0.0.1:5984/jiggety_results'),
)
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Warsaw'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_qh1kri5k-5+-byuo!bbob)2&b*n5skdo%=aq%8#)^)duo0)0h'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'jiggety.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
# 'django.contrib.auth',
'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'couchdbkit.ext.django',
'jiggety.task_configs',
'jiggety.task_results',
# 'django.contrib.admin',
# 'django.contrib.admindocs',
)
# Jiggety settings. Leave defaults or make your changes here
#JIGGETY_REFRESH_RATE = '1 * * * *'
#JIGGETY_TEST_FAIL_LOG_LEVEL = "WARN"
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'jiggety_formatter': {
'format': '[%(levelname)s] %(asctime)s %(message)s',
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'jiggety_formatter',
},
'jiggety_logfile': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename':'/tmp/jiggety.log',
'formatter': 'jiggety_formatter',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'jiggety.tasks': {
'handlers': ['console', 'jiggety_logfile', 'mail_admins'],
'level': 'DEBUG',
},
}
}
| lukmdo/Jiggety | jiggety/settings.py | Python | mit | 5,912 |
#!/usr/bin/env python
"""t is for people that want do things, not organize their tasks."""
from __future__ import with_statement
import os, re, sys, hashlib
from operator import itemgetter
from optparse import OptionParser, OptionGroup
class InvalidTaskfile(Exception):
"""Raised when the path to a task file already exists as a directory."""
pass
class AmbiguousPrefix(Exception):
"""Raised when trying to use a prefix that could identify multiple tasks."""
def __init__(self, prefix):
super(AmbiguousPrefix, self).__init__()
self.prefix = prefix
class UnknownPrefix(Exception):
"""Raised when trying to use a prefix that does not match any tasks."""
def __init__(self, prefix):
super(UnknownPrefix, self).__init__()
self.prefix = prefix
def _hash(text):
"""Return a hash of the given text for use as an id.
Currently SHA1 hashing is used. It should be plenty for our purposes.
"""
return hashlib.sha1(text).hexdigest()
def _task_from_taskline(taskline):
"""Parse a taskline (from a task file) and return a task.
A taskline should be in the format:
summary text ... | meta1:meta1_value,meta2:meta2_value,...
The task returned will be a dictionary such as:
{ 'id': <hash id>,
'text': <summary text>,
... other metadata ... }
A taskline can also consist of only summary text, in which case the id
and other metadata will be generated when the line is read. This is
supported to enable editing of the taskfile with a simple text editor.
"""
if taskline.strip().startswith('#'):
return None
elif '|' in taskline:
text, _, meta = taskline.rpartition('|')
task = { 'text': text.strip() }
for piece in meta.strip().split(','):
label, data = piece.split(':')
task[label.strip()] = data.strip()
else:
text = taskline.strip()
task = { 'id': _hash(text), 'text': text }
return task
def _tasklines_from_tasks(tasks):
"""Parse a list of tasks into tasklines suitable for writing."""
tasklines = []
for task in tasks:
meta = [m for m in task.items() if m[0] != 'text']
meta_str = ', '.join('%s:%s' % m for m in meta)
tasklines.append('%s | %s\n' % (task['text'], meta_str))
return tasklines
def _prefixes(ids):
"""Return a mapping of ids to prefixes in O(n) time.
Each prefix will be the shortest possible substring of the ID that
can uniquely identify it among the given group of IDs.
If an ID of one task is entirely a substring of another task's ID, the
entire ID will be the prefix.
"""
ps = {}
for id in ids:
id_len = len(id)
for i in range(1, id_len+1):
# identifies an empty prefix slot, or a singular collision
prefix = id[:i]
if (not prefix in ps) or (ps[prefix] and prefix != ps[prefix]):
break
if prefix in ps:
# if there is a collision
other_id = ps[prefix]
for j in range(i, id_len+1):
if other_id[:j] == id[:j]:
ps[id[:j]] = ''
else:
ps[other_id[:j]] = other_id
ps[id[:j]] = id
break
else:
ps[other_id[:id_len+1]] = other_id
ps[id] = id
else:
# no collision, can safely add
ps[prefix] = id
ps = dict(zip(ps.values(), ps.keys()))
if '' in ps:
del ps['']
return ps
class TaskDict(object):
"""A set of tasks, both finished and unfinished, for a given list.
The list's files are read from disk when the TaskDict is initialized. They
can be written back out to disk with the write() function.
"""
def __init__(self, taskdir='.', name='tasks'):
"""Initialize by reading the task files, if they exist."""
self.tasks = {}
self.done = {}
self.name = name
self.taskdir = taskdir
filemap = (('tasks', self.name), ('done', '.%s.done' % self.name))
for kind, filename in filemap:
path = os.path.join(os.path.expanduser(self.taskdir), filename)
if os.path.isdir(path):
raise InvalidTaskfile
if os.path.exists(path):
with open(path, 'r') as tfile:
tls = [tl.strip() for tl in tfile if tl]
tasks = map(_task_from_taskline, tls)
for task in tasks:
if task is not None:
getattr(self, kind)[task['id']] = task
def __getitem__(self, prefix):
"""Return the unfinished task with the given prefix.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, unless the prefix is the entire ID of one task.
If no tasks match the prefix an UnknownPrefix exception will be raised.
"""
matched = filter(lambda tid: tid.startswith(prefix), self.tasks.keys())
if len(matched) == 1:
return self.tasks[matched[0]]
elif len(matched) == 0:
raise UnknownPrefix(prefix)
else:
matched = filter(lambda tid: tid == prefix, self.tasks.keys())
if len(matched) == 1:
return self.tasks[matched[0]]
else:
raise AmbiguousPrefix(prefix)
def add_task(self, text):
"""Add a new, unfinished task with the given summary text."""
task_id = _hash(text)
self.tasks[task_id] = {'id': task_id, 'text': text}
def edit_task(self, prefix, text):
"""Edit the task with the given prefix.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, unless the prefix is the entire ID of one task.
If no tasks match the prefix an UnknownPrefix exception will be raised.
"""
task = self[prefix]
if text.startswith('s/') or text.startswith('/'):
text = re.sub('^s?/', '', text).rstrip('/')
find, _, repl = text.partition('/')
text = re.sub(find, repl, task['text'])
task['text'] = text
def finish_task(self, prefix):
"""Mark the task with the given prefix as finished.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, if no tasks match it an UnknownPrefix exception will
be raised.
"""
task = self.tasks.pop(self[prefix]['id'])
self.done[task['id']] = task
def remove_task(self, prefix):
"""Remove the task from tasks list.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, if no tasks match it an UnknownPrefix exception will
be raised.
"""
self.tasks.pop(self[prefix]['id'])
def print_list(self, kind='tasks', verbose=False, quiet=False, grep=''):
"""Print out a nicely formatted list of unfinished tasks."""
tasks = dict(getattr(self, kind).items())
label = 'prefix' if not verbose else 'id'
if not verbose:
for task_id, prefix in _prefixes(tasks).items():
tasks[task_id]['prefix'] = prefix
plen = max(map(lambda t: len(t[label]), tasks.values())) if tasks else 0
for _, task in sorted(tasks.items()):
if grep.lower() in task['text'].lower():
p = '%s - ' % task[label].ljust(plen) if not quiet else ''
print p + task['text']
def write(self, delete_if_empty=False):
"""Flush the finished and unfinished tasks to the files on disk."""
filemap = (('tasks', self.name), ('done', '.%s.done' % self.name))
for kind, filename in filemap:
path = os.path.join(os.path.expanduser(self.taskdir), filename)
if os.path.isdir(path):
raise InvalidTaskfile
tasks = sorted(getattr(self, kind).values(), key=itemgetter('id'))
if tasks or not delete_if_empty:
with open(path, 'w') as tfile:
for taskline in _tasklines_from_tasks(tasks):
tfile.write(taskline)
elif not tasks and os.path.isfile(path):
os.remove(path)
def _build_parser():
"""Return a parser for the command-line interface."""
usage = "Usage: %prog [-t DIR] [-l LIST] [options] [TEXT]"
parser = OptionParser(usage=usage)
actions = OptionGroup(parser, "Actions",
"If no actions are specified the TEXT will be added as a new task.")
actions.add_option("-e", "--edit", dest="edit", default="",
help="edit TASK to contain TEXT", metavar="TASK")
actions.add_option("-f", "--finish", dest="finish",
help="mark TASK as finished", metavar="TASK")
actions.add_option("-r", "--remove", dest="remove",
help="Remove TASK from list", metavar="TASK")
parser.add_option_group(actions)
config = OptionGroup(parser, "Configuration Options")
config.add_option("-l", "--list", dest="name", default="tasks",
help="work on LIST", metavar="LIST")
config.add_option("-t", "--task-dir", dest="taskdir", default="",
help="work on the lists in DIR", metavar="DIR")
config.add_option("-d", "--delete-if-empty",
action="store_true", dest="delete", default=False,
help="delete the task file if it becomes empty")
parser.add_option_group(config)
output = OptionGroup(parser, "Output Options")
output.add_option("-g", "--grep", dest="grep", default='',
help="print only tasks that contain WORD", metavar="WORD")
output.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print more detailed output (full task ids, etc)")
output.add_option("-q", "--quiet",
action="store_true", dest="quiet", default=False,
help="print less detailed output (no task ids, etc)")
output.add_option("--done",
action="store_true", dest="done", default=False,
help="list done tasks instead of unfinished ones")
parser.add_option_group(output)
return parser
def _main():
"""Run the command-line interface."""
(options, args) = _build_parser().parse_args()
td = TaskDict(taskdir=options.taskdir, name=options.name)
text = ' '.join(args).strip()
try:
if options.finish:
td.finish_task(options.finish)
td.write(options.delete)
elif options.remove:
td.remove_task(options.remove)
td.write(options.delete)
elif options.edit:
td.edit_task(options.edit, text)
td.write(options.delete)
elif text:
td.add_task(text)
td.write(options.delete)
else:
kind = 'tasks' if not options.done else 'done'
td.print_list(kind=kind, verbose=options.verbose, quiet=options.quiet,
grep=options.grep)
except AmbiguousPrefix, e:
sys.stderr.write('The ID "%s" matches more than one task.\n' % e.prefix)
except UnknownPrefix, e:
sys.stderr.write('The ID "%s" does not match any task.\n' % e.prefix)
if __name__ == '__main__':
_main()
| nafigator/dotfiles | tasks/.tasks/t.py | Python | mit | 11,581 |
from pathlib import Path
from test.yara_signature_testing import SignatureTestingMatching, SignatureTestingMeta
TEST_DATA_DIR = Path(__file__).parent / 'data'
SIGNATURE_PATH = Path(__file__).parent.parent / 'signatures/'
TEST_SIGNATURE_PATH = Path(__file__).parent.parent / 'test/data/signatures/'
class TestSoftwareSignatureMeta:
@classmethod
def setup_class(cls):
cls.sigTest = SignatureTestingMeta()
def test_check_meta_fields(self):
missing_fields = self.sigTest.check_meta_fields(SIGNATURE_PATH)
assert not missing_fields, f'Missing meta fields: {missing_fields.__str__()}'
def test_check_meta_fields_missing(self):
missing_fields = self.sigTest.check_meta_fields(TEST_SIGNATURE_PATH)
assert len(missing_fields) == 3
assert all(
entry in missing_fields
for entry in ['website in missing_meta_1', 'description in missing_meta_1', 'ALL in missing_meta_2']
)
class TestAllSoftwareSignaturesMatched:
def setup_method(self):
self.sig_tester = SignatureTestingMatching() # pylint: disable=attribute-defined-outside-init
def test_all_signatures_matched(self):
diff = self.sig_tester.check(SIGNATURE_PATH, TEST_DATA_DIR / 'software_component_test_list.txt')
assert diff == set(), f'Missing signature for {diff}'
| fkie-cad/FACT_core | src/plugins/analysis/software_components/test/test_plugin_software_components_signatures.py | Python | gpl-3.0 | 1,349 |
# -*- coding: utf-8 -*-
from gitlint.rules import LineRule
class MyUserLineRule(LineRule):
id = "UC2"
name = "my-lïne-rule"
# missing validate method, missing target attribute
| jorisroovers/gitlint | gitlint-core/gitlint/tests/samples/user_rules/incorrect_linerule/my_line_rule.py | Python | mit | 193 |
# Check the behavior of the ALLOW_RETRIES keyword.
# This test uses a file that's stable across retries of the test to fail and
# only succeed the fourth time it is retried.
#
# RUN: rm -f %t.counter
# RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST1 %s
# CHECK-TEST1: Passed With Retry: 1
# Test that a per-file ALLOW_RETRIES overwrites the config-wide test_retry_attempts property, if any.
#
# RUN: rm -f %t.counter
# RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dtest_retry_attempts=2 -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST2 %s
# CHECK-TEST2: Passed With Retry: 1
# This test does not succeed within the allowed retry limit
#
# RUN: not %{lit} -j 1 %{inputs}/allow-retries/does-not-succeed-within-limit.py | FileCheck --check-prefix=CHECK-TEST3 %s
# CHECK-TEST3: Failed Tests (1):
# CHECK-TEST3: allow-retries :: does-not-succeed-within-limit.py
# This test should be UNRESOLVED since it has more than one ALLOW_RETRIES
# lines, and that is not allowed.
#
# RUN: not %{lit} -j 1 %{inputs}/allow-retries/more-than-one-allow-retries-lines.py | FileCheck --check-prefix=CHECK-TEST4 %s
# CHECK-TEST4: Unresolved Tests (1):
# CHECK-TEST4: allow-retries :: more-than-one-allow-retries-lines.py
# This test does not provide a valid integer to the ALLOW_RETRIES keyword.
# It should be unresolved.
#
# RUN: not %{lit} -j 1 %{inputs}/allow-retries/not-a-valid-integer.py | FileCheck --check-prefix=CHECK-TEST5 %s
# CHECK-TEST5: Unresolved Tests (1):
# CHECK-TEST5: allow-retries :: not-a-valid-integer.py
# This test checks that the config-wide test_retry_attempts property is used
# when no ALLOW_RETRIES keyword is present.
#
# RUN: rm -f %t.counter
# RUN: %{lit} -j 1 %{inputs}/test_retry_attempts/test.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST6 %s
# CHECK-TEST6: Passed With Retry: 1
| google/llvm-propeller | llvm/utils/lit/tests/allow-retries.py | Python | apache-2.0 | 1,984 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1DaemonSetUpdateStrategy(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'rolling_update': 'V1RollingUpdateDaemonSet',
'type': 'str'
}
attribute_map = {
'rolling_update': 'rollingUpdate',
'type': 'type'
}
def __init__(self, rolling_update=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1DaemonSetUpdateStrategy - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._rolling_update = None
self._type = None
self.discriminator = None
if rolling_update is not None:
self.rolling_update = rolling_update
if type is not None:
self.type = type
@property
def rolling_update(self):
"""Gets the rolling_update of this V1DaemonSetUpdateStrategy. # noqa: E501
:return: The rolling_update of this V1DaemonSetUpdateStrategy. # noqa: E501
:rtype: V1RollingUpdateDaemonSet
"""
return self._rolling_update
@rolling_update.setter
def rolling_update(self, rolling_update):
"""Sets the rolling_update of this V1DaemonSetUpdateStrategy.
:param rolling_update: The rolling_update of this V1DaemonSetUpdateStrategy. # noqa: E501
:type: V1RollingUpdateDaemonSet
"""
self._rolling_update = rolling_update
@property
def type(self):
"""Gets the type of this V1DaemonSetUpdateStrategy. # noqa: E501
Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate. Possible enum values: - `\"OnDelete\"` Replace the old daemons only when it's killed - `\"RollingUpdate\"` Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. # noqa: E501
:return: The type of this V1DaemonSetUpdateStrategy. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1DaemonSetUpdateStrategy.
Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate. Possible enum values: - `\"OnDelete\"` Replace the old daemons only when it's killed - `\"RollingUpdate\"` Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. # noqa: E501
:param type: The type of this V1DaemonSetUpdateStrategy. # noqa: E501
:type: str
"""
allowed_values = ["OnDelete", "RollingUpdate"] # noqa: E501
if self.local_vars_configuration.client_side_validation and type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DaemonSetUpdateStrategy):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DaemonSetUpdateStrategy):
return True
return self.to_dict() != other.to_dict()
| kubernetes-client/python | kubernetes/client/models/v1_daemon_set_update_strategy.py | Python | apache-2.0 | 5,285 |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from nova.i18n import _LW
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
disk_allocation_ratio_opt = cfg.FloatOpt("disk_allocation_ratio", default=1.0,
help="Virtual disk to physical disk allocation ratio")
CONF = cfg.CONF
CONF.register_opt(disk_allocation_ratio_opt)
class DiskFilter(filters.BaseHostFilter):
"""Disk Filter with over subscription flag."""
def _get_disk_allocation_ratio(self, host_state, filter_properties):
return CONF.disk_allocation_ratio
def host_passes(self, host_state, filter_properties):
"""Filter based on disk usage."""
instance_type = filter_properties.get('instance_type')
requested_disk = (1024 * (instance_type['root_gb'] +
instance_type['ephemeral_gb']) +
instance_type['swap'])
free_disk_mb = host_state.free_disk_mb
total_usable_disk_mb = host_state.total_usable_disk_gb * 1024
disk_allocation_ratio = self._get_disk_allocation_ratio(
host_state, filter_properties)
disk_mb_limit = total_usable_disk_mb * disk_allocation_ratio
used_disk_mb = total_usable_disk_mb - free_disk_mb
usable_disk_mb = disk_mb_limit - used_disk_mb
if not usable_disk_mb >= requested_disk:
LOG.debug("%(host_state)s does not have %(requested_disk)s MB "
"usable disk, it only has %(usable_disk_mb)s MB usable "
"disk.", {'host_state': host_state,
'requested_disk': requested_disk,
'usable_disk_mb': usable_disk_mb})
return False
disk_gb_limit = disk_mb_limit / 1024
host_state.limits['disk_gb'] = disk_gb_limit
return True
class AggregateDiskFilter(DiskFilter):
"""AggregateDiskFilter with per-aggregate disk allocation ratio flag.
Fall back to global disk_allocation_ratio if no per-aggregate setting
found.
"""
def _get_disk_allocation_ratio(self, host_state, filter_properties):
# TODO(uni): DB query in filter is a performance hit, especially for
# system with lots of hosts. Will need a general solution here to fix
# all filters with aggregate DB call things.
aggregate_vals = utils.aggregate_values_from_db(
filter_properties['context'],
host_state.host,
'disk_allocation_ratio')
try:
ratio = utils.validate_num_values(
aggregate_vals, CONF.disk_allocation_ratio, cast_to=float)
except ValueError as e:
LOG.warning(_LW("Could not decode disk_allocation_ratio: '%s'"), e)
ratio = CONF.disk_allocation_ratio
return ratio
| affo/nova | nova/scheduler/filters/disk_filter.py | Python | apache-2.0 | 3,530 |
"""
Class which embellishes the DataCollectionView with buttons and actions for
editing the data collection
"""
import operator
from ...external.qt.QtGui import (QWidget, QMenu,
QAction, QKeySequence, QFileDialog)
from ...external.qt.QtCore import Qt, Signal, QObject
from ..ui.layertree import Ui_LayerTree
from ... import core
from ..link_editor import LinkEditor
from .. import qtutil
from ..qtutil import get_icon, nonpartial
from .custom_component_widget import CustomComponentWidget
from ..actions import act as _act
from ...core.edit_subset_mode import AndMode, OrMode, XorMode, AndNotMode
from .subset_facet import SubsetFacet
@core.decorators.singleton
class Clipboard(object):
def __init__(self):
self.contents = None
class LayerAction(QAction):
_title = ''
_icon = None
_tooltip = None
_enabled_on_init = False
_shortcut = None
_shortcut_context = Qt.WidgetShortcut
def __init__(self, layer_tree_widget):
self._parent = layer_tree_widget.layerTree
super(LayerAction, self).__init__(self._title.title(), self._parent)
self._layer_tree = layer_tree_widget
if self._icon:
self.setIcon(get_icon(self._icon))
if self._tooltip:
self.setToolTip(self._tooltip)
self.setEnabled(self._enabled_on_init)
if self._shortcut_context is not None:
self.setShortcutContext(self._shortcut_context)
if self._shortcut:
self.setShortcut(self._shortcut)
self._parent.addAction(self)
self._connect()
def _connect(self):
self._parent.selection_changed.connect(
self.update_enabled)
self.triggered.connect(nonpartial(self._do_action))
def selected_layers(self):
return self._layer_tree.selected_layers()
@property
def data_collection(self):
return self._layer_tree.data_collection
def update_enabled(self):
enabled = self._can_trigger()
self.setEnabled(enabled)
self.setVisible(enabled)
def single_selection(self):
return len(self.selected_layers()) == 1
def single_selection_subset(self):
layers = self.selected_layers()
if len(layers) != 1:
return False
return isinstance(layers[0], core.Subset)
def single_selection_subset_group(self):
layers = self.selected_layers()
if len(layers) != 1:
return False
return isinstance(layers[0], core.SubsetGroup)
def _can_trigger(self):
raise NotImplementedError
def _do_action(self):
raise NotImplementedError
class PlotAction(LayerAction):
"""Visualize the selection. Requires GlueApplication"""
_title = "Plot Data"
_tooltip = "Make a plot of this selection"
def __init__(self, tree, app):
super(PlotAction, self).__init__(tree)
self.app = app
def _can_trigger(self):
return self.single_selection()
def _do_action(self):
assert self._can_trigger()
data = self.selected_layers()[0].data
self.app.choose_new_data_viewer(data)
class FacetAction(LayerAction):
"""Add a sequence of subsets which facet a ComponentID"""
_title = "Create faceted subsets"
_tooltip = "Create faceted subsets"
def _can_trigger(self):
return len(self._layer_tree.data_collection) > 0
def _do_action(self):
layers = self.selected_layers()
default = layers[0].data if len(layers) > 0 else None
SubsetFacet.facet(self._layer_tree.data_collection,
parent=self._layer_tree, default=default)
class NewAction(LayerAction):
_title = "New Subset"
_tooltip = "Create a new subset"
_icon = "glue_subset"
_shortcut = QKeySequence('Ctrl+Shift+N')
def _can_trigger(self):
return len(self.data_collection) > 0
def _do_action(self):
assert self._can_trigger()
self.data_collection.new_subset_group()
class ClearAction(LayerAction):
_title = "Clear subset"
_tooltip = "Clear current subset"
_shortcut = QKeySequence('Ctrl+K')
def _can_trigger(self):
return self.single_selection_subset_group()
def _do_action(self):
assert self._can_trigger()
subset = self.selected_layers()[0]
subset.subset_state = core.subset.SubsetState()
class DeleteAction(LayerAction):
_title = "Delete Layer"
_tooltip = "Delete the selected data and/or subset Groups"
_shortcut = QKeySequence(Qt.Key_Backspace)
def _can_trigger(self):
selection = self.selected_layers()
return all(isinstance(s, (core.Data, core.SubsetGroup))
for s in selection)
def _do_action(self):
assert self._can_trigger()
selection = self.selected_layers()
for s in selection:
if isinstance(s, core.Data):
self._layer_tree.data_collection.remove(s)
else:
assert isinstance(s, core.SubsetGroup)
self._layer_tree.data_collection.remove_subset_group(s)
class LinkAction(LayerAction):
_title = "Link Data"
_tooltip = "Define links between data sets"
_data_link_message = "Define links between data sets"
_icon = "glue_link"
def __init__(self, *args, **kwargs):
super(LinkAction, self).__init__(*args, **kwargs)
self._link_icon = get_icon(self._icon)
self._unlink_icon = get_icon('glue_unlink')
def _can_trigger(self):
return len(self.data_collection) > 0
def _do_action(self):
LinkEditor.update_links(self.data_collection)
class SaveAction(LayerAction):
_title = "Save subset"
_tooltip = "Save the mask for this subset to a file"
def _can_trigger(self):
return self.single_selection_subset()
def _do_action(self):
assert self._can_trigger()
subset = self.selected_layers()[0]
save_subset(subset)
class CopyAction(LayerAction):
_title = "Copy subset"
_tooltip = "Copy the definition for the selected subset"
_shortcut = QKeySequence.Copy
def _can_trigger(self):
return self.single_selection_subset_group()
def _do_action(self):
assert self._can_trigger()
subset = self.selected_layers()[0]
Clipboard().contents = subset.subset_state
class PasteAction(LayerAction):
_title = "Paste subset"
_tooltip = "Overwrite selected subset with contents from clipboard"
_shortcut = QKeySequence.Paste
def _can_trigger(self):
if not self.single_selection_subset_group():
return False
cnt = Clipboard().contents
if not isinstance(cnt, core.subset.SubsetState):
return False
return True
def _do_action(self):
assert self._can_trigger()
layer = self.selected_layers()[0]
layer.paste(Clipboard().contents)
class PasteSpecialAction(PasteAction):
_title = "Paste Special..."
_tooltip = "Paste with boolean logic"
_shortcut = None
def __init__(self, *args, **kwargs):
super(PasteSpecialAction, self).__init__(*args, **kwargs)
self.setMenu(self.menu())
def menu(self):
m = QMenu()
a = QAction("Or", m)
a.setIcon(get_icon('glue_or'))
a.triggered.connect(nonpartial(self._paste, OrMode))
m.addAction(a)
a = QAction("And", m)
a.setIcon(get_icon('glue_and'))
a.triggered.connect(nonpartial(self._paste, AndMode))
m.addAction(a)
a = QAction("XOR", m)
a.setIcon(get_icon('glue_xor'))
a.triggered.connect(nonpartial(self._paste, XorMode))
m.addAction(a)
a = QAction("Not", m)
a.setIcon(get_icon('glue_andnot'))
a.triggered.connect(nonpartial(self._paste, AndNotMode))
m.addAction(a)
return m
def _paste(self, mode):
if not self._can_trigger():
return
assert self._can_trigger()
layer = self.selected_layers()[0]
mode(layer, Clipboard().contents)
def _do_action(self):
pass
class Inverter(LayerAction):
_title = "Invert"
_icon = "glue_not"
_tooltip = "Invert selected subset"
def _can_trigger(self):
""" Can trigger iff one subset is selected """
return self.single_selection_subset_group()
def _do_action(self):
"""Replace selected subset with its inverse"""
assert self._can_trigger()
subset, = self.selected_layers()
subset.subset_state = core.subset.InvertState(subset.subset_state)
class LayerCommunicator(QObject):
layer_check_changed = Signal(object, bool)
class LayerTreeWidget(QWidget, Ui_LayerTree):
"""The layertree widget provides a way to visualize the various
data and subset layers in a Glue session.
This widget relies on sending/receiving messages to/from the hub
to maintin synchronization with the data collection it manages. If
it isn't attached to a hub, interactions may not propagate properly.
"""
def __init__(self, parent=None):
Ui_LayerTree.__init__(self)
QWidget.__init__(self, parent)
self._signals = LayerCommunicator()
self._is_checkable = True
self._layer_check_changed = self._signals.layer_check_changed
self._layer_dict = {}
self._actions = {}
self.setupUi(self)
self._create_actions()
self._connect()
self._data_collection = None
self._hub = None
self.layerTree.setDragEnabled(True)
@property
def data_collection(self):
return self._data_collection
def setup(self, collection):
self._data_collection = collection
self._hub = collection.hub
self.layerTree.set_data_collection(collection)
def unregister(self, hub):
"""Unsubscribe from hub"""
self.layerTree.unregister(hub)
def is_checkable(self):
""" Return whether checkboxes appear next o layers"""
return self.layerTree.checkable
def set_checkable(self, state):
""" Setw hether checkboxes appear next o layers"""
self.layerTree.checkable = state
def selected_layers(self):
""" Return a list of selected layers (subsets and data objects) """
return self.layerTree.selected_layers()
def current_layer(self):
"""Return the layer if a single item is selected, else None """
layers = self.selected_layers()
if len(layers) == 1:
return layers[0]
def actions(self):
""" Return the list of actions attached to this widget """
return self.layerTree.actions()
def _connect(self):
""" Connect widget signals to methods """
self._actions['link'] = LinkAction(self)
self.layerAddButton.clicked.connect(self._load_data)
self.layerRemoveButton.clicked.connect(self._actions['delete'].trigger)
self.linkButton.set_action(self._actions['link'])
self.newSubsetButton.set_action(self._actions['new'], text=False)
rbut = self.layerRemoveButton
def update_enabled():
return rbut.setEnabled(self._actions['delete'].isEnabled())
self.layerTree.selection_changed.connect(update_enabled)
def bind_selection_to_edit_subset(self):
self.layerTree.selection_changed.connect(
self._update_editable_subset)
def _update_editable_subset(self):
"""Update edit subsets to match current selection"""
layers = self.selected_layers()
layers.extend(s for l in layers
if isinstance(l, core.SubsetGroup)
for s in l.subsets)
for data in self.data_collection:
data.edit_subset = [s for s in data.subsets if s in layers]
def _create_component(self):
CustomComponentWidget.create_component(self.data_collection)
def _create_actions(self):
tree = self.layerTree
sep = QAction("", tree)
sep.setSeparator(True)
tree.addAction(sep)
self._actions['save'] = SaveAction(self)
self._actions['copy'] = CopyAction(self)
self._actions['paste'] = PasteAction(self)
self._actions['paste_special'] = PasteSpecialAction(self)
self._actions['invert'] = Inverter(self)
self._actions['new'] = NewAction(self)
self._actions['clear'] = ClearAction(self)
self._actions['delete'] = DeleteAction(self)
self._actions['facet'] = FacetAction(self)
# new component definer
separator = QAction("sep", tree)
separator.setSeparator(True)
tree.addAction(separator)
a = _act("Define new component", self,
tip="Define a new component using python expressions")
tree.addAction(a)
a.triggered.connect(nonpartial(self._create_component))
self._actions['new_component'] = a
# right click pulls up menu
tree.setContextMenuPolicy(Qt.ActionsContextMenu)
def _on_item_change(self, item, column):
"""emit check_state_changed signal when checkbox clicked"""
if item is None or item not in self or column != 0:
return
is_checked = item.checkState(0) == Qt.Checked
layer = self[item]
self._layer_check_changed.emit(layer, is_checked)
def _load_data(self):
""" Interactively loads data from a data set. Adds
as new layer """
layers = qtutil.data_wizard()
for layer in layers:
self.data_collection.append(layer)
def __getitem__(self, key):
raise NotImplementedError()
return self.layerTree[key]
def __setitem__(self, key, value):
raise NotImplementedError()
self.layerTree[key] = value
def __contains__(self, obj):
return obj in self.layerTree
def __len__(self):
return len(self.layerTree)
def save_subset(subset):
assert isinstance(subset, core.subset.Subset)
fname, fltr = QFileDialog.getSaveFileName(caption="Select an output name")
fname = str(fname)
if not fname:
return
subset.write_mask(fname)
| bsipocz/glue | glue/qt/widgets/layer_tree_widget.py | Python | bsd-3-clause | 14,225 |
# -*- coding: UTF-8 -*-
# Copyright (C) 2011 Juan David Ibáñez Palomar <jdavid@itaapy.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from itools
from itools.gettext import MSG
# Import from ikaaro
from autoedit import AutoEdit
from config import Configuration
from fields import Char_Field
from resource_ import DBResource
seo_description = MSG(
u'Optimize your website for better ranking in search engine results.')
class SEO(DBResource):
class_id = 'config-seo'
class_title = MSG(u'Search Engine Optimization')
class_description = seo_description
class_icon16 = '/ui/ikaaro/icons/16x16/search.png'
class_icon48 = '/ui/ikaaro/icons/48x48/search.png'
class_icon_css = 'fa-search-plus'
# Fields
google_site_verification = Char_Field(
title=MSG(u'Google site verification key'))
yahoo_site_verification = Char_Field(
title=MSG(u'Yahoo site verification key'))
bing_site_verification = Char_Field(
title=MSG(u'Bing site verification key'))
# Views
class_views = ['edit']
edit = AutoEdit(title=MSG(u'Search engine optimization'),
description=seo_description,
fields=['google_site_verification',
'yahoo_site_verification',
'bing_site_verification'])
# Configuration
config_name = 'seo'
config_group = 'webmaster'
# Register
Configuration.register_module(SEO)
| bepatient-fr/ikaaro | ikaaro/config_seo.py | Python | gpl-3.0 | 2,060 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '11.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, SpatialDropout1D,Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import numpy as np
from numpy import vstack, row_stack, asarray
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cross_validation import train_test_split
from pandas import read_csv
from pymystem3 import Mystem
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from collections import Counter
import Twarc
import json
import codecs
import tweepy
auth = tweepy.OAuthHandler('DkNRJdzhUnThKJ7G5C9IftdUp', 'C14fr0ews91xJI8AH1I3BNhZrZ2gdlyz9KqnXFPQOnmZqJUmc7')
auth.set_access_token('866132837082296320-GRx4gxwbRVffxwXgMhjZhXbxgn4RaM0', 'rhtMycE2gFiJchJVIJtlEIf7qgkvqtCbmue9rPDoXEpkt')
api = tweepy.API(auth)
from PyQt5 import QtCore, QtGui, QtWidgets
# Create a summary of a tweet, only showing relevant fields.
def summarize(tweet, extra_fields = None):
new_tweet = {}
for field, value in tweet.items():
if field in ["text", "id_str", "screen_name", "retweet_count", "favorite_count", "in_reply_to_status_id_str", "in_reply_to_screen_name", "in_reply_to_user_id_str"] and value is not None:
new_tweet[field] = value
elif extra_fields and field in extra_fields:
new_tweet[field] = value
elif field in ["retweeted_status", "quoted_status", "user"]:
new_tweet[field] = summarize(value)
return new_tweet
# Print out a tweet, with optional colorizing of selected fields.
def dump(tweet, colorize_fields=None, summarize_tweet=True):
colorize_field_strings = []
for line in json.dumps(summarize(tweet) if summarize_tweet else tweet, indent=4, sort_keys=True).splitlines():
colorize = False
for colorize_field in colorize_fields or []:
if "\"{}\":".format(colorize_field) in line:
print("\x1b" + line + "\x1b")
break
else:
print(line)
tweet = list(t.hydrate(['']))[0]
dump(summarize(tweet, extra_fields=['in_reply_to_status_id_str', 'in_reply_to_user_id']), colorize_fields=['in_reply_to_status_id', 'in_reply_to_status_id_str', 'in_reply_to_screen_name', 'in_reply_to_user_id', 'in_reply_to_user_id_str'], summarize_tweet=False)
def stemconvtext(text):
return(''.join(Mystem().lemmatize(text)))
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='linear',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('linear'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
class Ui_MainWindow(object):
def load_tweet(self):
tweet = api.get_status(self.plainTextEdit_2.toPlainText())
self.textBrowser_2.setPlainText(tweet.text)
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(911, 597)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.layoutWidget = QtWidgets.QWidget(self.tab)
self.layoutWidget.setGeometry(QtCore.QRect(510, 10, 371, 411))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.checkBox = QtWidgets.QCheckBox(self.layoutWidget)
self.checkBox.setObjectName("checkBox")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.checkBox)
self.dateEdit_2 = QtWidgets.QDateEdit(self.layoutWidget)
self.dateEdit_2.setMinimumDateTime(QtCore.QDateTime(QtCore.QDate(2000, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateEdit_2.setMaximumDate(QtCore.QDate(2017, 6, 30))
self.dateEdit_2.setObjectName("dateEdit_2")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.dateEdit_2)
self.dateEdit = QtWidgets.QDateEdit(self.layoutWidget)
self.dateEdit.setDateTime(QtCore.QDateTime(QtCore.QDate(2017, 6, 15), QtCore.QTime(0, 0, 0)))
self.dateEdit.setObjectName("dateEdit")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.dateEdit)
self.label_2 = QtWidgets.QLabel(self.layoutWidget)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.label_2)
self.spinBox = QtWidgets.QSpinBox(self.layoutWidget)
self.spinBox.setMaximum(3)
self.spinBox.setObjectName("spinBox")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.spinBox)
self.label = QtWidgets.QLabel(self.layoutWidget)
self.label.setObjectName("label")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.label)
self.label_3 = QtWidgets.QLabel(self.layoutWidget)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.label_3)
self.verticalLayout_2.addLayout(self.formLayout)
self.label_4 = QtWidgets.QLabel(self.layoutWidget)
self.label_4.setObjectName("label_4")
self.verticalLayout_2.addWidget(self.label_4)
self.plainTextEdit_2 = QtWidgets.QPlainTextEdit(self.layoutWidget)
self.plainTextEdit_2.setObjectName("plainTextEdit_2")
self.verticalLayout_2.addWidget(self.plainTextEdit_2)
self.label_5 = QtWidgets.QLabel(self.layoutWidget)
self.label_5.setObjectName("label_5")
self.verticalLayout_2.addWidget(self.label_5)
self.textBrowser = QtWidgets.QTextBrowser(self.layoutWidget)
self.textBrowser.setEnabled(True)
self.textBrowser.setObjectName("textBrowser")
self.verticalLayout_2.addWidget(self.textBrowser)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_7 = QtWidgets.QLabel(self.layoutWidget)
self.label_7.setObjectName("label_7")
self.horizontalLayout_2.addWidget(self.label_7)
self.lcdNumber_5 = QtWidgets.QLCDNumber(self.layoutWidget)
self.lcdNumber_5.setProperty("intValue", 0)
self.lcdNumber_5.setObjectName("lcdNumber_5")
self.horizontalLayout_2.addWidget(self.lcdNumber_5)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.layoutWidget1 = QtWidgets.QWidget(self.tab)
self.layoutWidget1.setGeometry(QtCore.QRect(0, 0, 481, 451))
self.layoutWidget1.setObjectName("layoutWidget1")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.layoutWidget1)
self.verticalLayout_3.setContentsMargins(1, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.textBrowser_2 = QtWidgets.QTextBrowser(self.layoutWidget1)
self.textBrowser_2.setEnabled(True)
self.textBrowser_2.setObjectName("textBrowser_2")
self.verticalLayout_3.addWidget(self.textBrowser_2)
self.lcdNumber_4 = QtWidgets.QLCDNumber(self.layoutWidget1)
self.lcdNumber_4.setProperty("intValue", 0)
self.lcdNumber_4.setObjectName("lcdNumber_4")
self.verticalLayout_3.addWidget(self.lcdNumber_4)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_2)
self.textBrowser_3.setGeometry(QtCore.QRect(0, 0, 411, 431))
self.textBrowser_3.setObjectName("textBrowser_3")
self.lcdNumber = QtWidgets.QLCDNumber(self.tab_2)
self.lcdNumber.setEnabled(True)
self.lcdNumber.setGeometry(QtCore.QRect(414, 14, 421, 31))
self.lcdNumber.setSmallDecimalPoint(False)
self.lcdNumber.setProperty("intValue", 0)
self.lcdNumber.setObjectName("lcdNumber")
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.textBrowser_4 = QtWidgets.QTextBrowser(self.tab_3)
self.textBrowser_4.setGeometry(QtCore.QRect(0, 0, 411, 431))
self.textBrowser_4.setObjectName("textBrowser_4")
self.lcdNumber_2 = QtWidgets.QLCDNumber(self.tab_3)
self.lcdNumber_2.setEnabled(True)
self.lcdNumber_2.setGeometry(QtCore.QRect(414, 14, 421, 31))
self.lcdNumber_2.setProperty("intValue", 0)
self.lcdNumber_2.setObjectName("lcdNumber_2")
self.tabWidget.addTab(self.tab_3, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.textBrowser_5 = QtWidgets.QTextBrowser(self.tab_4)
self.textBrowser_5.setGeometry(QtCore.QRect(0, 0, 411, 431))
self.textBrowser_5.setObjectName("textBrowser_5")
self.lcdNumber_3 = QtWidgets.QLCDNumber(self.tab_4)
self.lcdNumber_3.setEnabled(True)
self.lcdNumber_3.setGeometry(QtCore.QRect(414, 14, 421, 31))
self.lcdNumber_3.setProperty("intValue", 0)
self.lcdNumber_3.setObjectName("lcdNumber_3")
self.tabWidget.addTab(self.tab_4, "")
self.horizontalLayout.addWidget(self.tabWidget)
self.verticalLayout.addLayout(self.horizontalLayout)
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setObjectName("label_6")
self.verticalLayout.addWidget(self.label_6)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 911, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.pushButton.clicked.connect(self.load_tweet)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.checkBox.setText(_translate("MainWindow", "Анализировать комментарии"))
self.label_2.setText(_translate("MainWindow", "Количество комментариев"))
self.label.setText(_translate("MainWindow", "верхняя граница даты"))
self.label_3.setText(_translate("MainWindow", "нижняя граница даты"))
self.label_4.setText(_translate("MainWindow", "Id на пост"))
self.plainTextEdit_2.setPlainText(_translate("MainWindow", ""))
self.label_5.setText(_translate("MainWindow", "Список первых трех комментариев выбранных по дате"))
self.textBrowser.setHtml(_translate("MainWindow", ""))
self.label_7.setText(_translate("MainWindow", "Средняя тональность всех комментариев "))
self.textBrowser_2.setHtml(_translate("MainWindow", ""))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Пост"))
self.textBrowser_3.setHtml(_translate("MainWindow", ""))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Первый комментарий"))
self.textBrowser_4.setHtml(_translate("MainWindow", ""))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("MainWindow", "Второй комментарий"))
self.textBrowser_5.setHtml(_translate("MainWindow", ""))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("MainWindow", "Третий комментарий"))
self.label_6.setText(_translate("MainWindow", "Эмоциональная тональность от 0 - абсолютный негатив, до 100 - абсолютный позитив"))
self.pushButton.setText(_translate("MainWindow", "Анализ"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| nstustudent/krotyuk_social_network_sa | Interface.py | Python | apache-2.0 | 14,186 |
# encoding=utf8
import datetime
from distutils.version import StrictVersion
import hashlib
import os.path
import random
from seesaw.config import realize, NumberConfigValue
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.task import SimpleTask, LimitConcurrent
from seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \
UploadWithTracker, SendDoneToTracker
import shutil
import socket
import subprocess
import sys
import time
import string
import requests
import seesaw
from seesaw.externalprocess import WgetDownload
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.util import find_executable
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.8.5"):
raise Exception("This pipeline needs seesaw version 0.8.5 or higher.")
###########################################################################
# Find a useful Wget+Lua executable.
#
# WGET_LUA will be set to the first path that
# 1. does not crash with --version, and
# 2. prints the required version string
WGET_LUA = find_executable(
"Wget+Lua",
["GNU Wget 1.14.lua.20130523-9a5c"],
[
"./wget-lua",
"./wget-lua-warrior",
"./wget-lua-local",
"../wget-lua",
"../../wget-lua",
"/home/warrior/wget-lua",
"/usr/bin/wget-lua"
]
)
if not WGET_LUA:
raise Exception("No usable Wget+Lua found.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20160109.01"
USER_AGENT = 'ArchiveTeam'
TRACKER_ID = 'musicbrainz'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
# NEW for 2014! Check if we are behind firewall/proxy
if self._counter <= 0:
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'Are you behind a firewall/proxy? That is a big no-no!')
raise Exception(
'Are you behind a firewall/proxy? That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
escaped_item_name = item_name.replace(':', '_').replace('/', '_').replace('~', '_')
dirname = "/".join((item["data_dir"], escaped_item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix, escaped_item_name,
time.strftime("%Y%m%d-%H%M%S"))
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "MoveFiles")
def process(self, item):
# NEW for 2014! Check if wget was compiled with zlib support
if os.path.exists("%(item_dir)s/%(warc_file_base)s.warc" % item):
raise Exception('Please compile wget with zlib support!')
os.rename("%(item_dir)s/%(warc_file_base)s.warc.gz" % item,
"%(data_dir)s/%(warc_file_base)s.warc.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
LUA_SHA1 = get_hash(os.path.join(CWD, 'musicbrainz.lua'))
def stats_id_function(item):
# NEW for 2014! Some accountability hashes and stats.
d = {
'pipeline_hash': PIPELINE_SHA1,
'lua_hash': LUA_SHA1,
'python_version': sys.version,
}
return d
class WgetArgs(object):
def realize(self, item):
wget_args = [
WGET_LUA,
"-U", USER_AGENT,
"-nv",
"--lua-script", "musicbrainz.lua",
"-o", ItemInterpolation("%(item_dir)s/wget.log"),
"--no-check-certificate",
"--output-document", ItemInterpolation("%(item_dir)s/wget.tmp"),
"--truncate-output",
"-e", "robots=off",
"--rotate-dns",
"--no-parent",
"--page-requisites",
"--timeout", "30",
"--tries", "inf",
"--span-hosts",
"--waitretry", "30",
"--warc-file", ItemInterpolation("%(item_dir)s/%(warc_file_base)s"),
"--warc-header", "operator: Archive Team",
"--warc-header", "musicbrainz-dld-script-version: " + VERSION,
"--warc-header", ItemInterpolation("musicbrainz-user: %(item_name)s"),
]
item_name = item['item_name']
assert ':' in item_name
item_sort, item_item, item_file = item_name.split(':', 2)
item['item_item'] = item_item
item_list = requests.get('http://archive.org/download/{0}/{1}'.format(item_item, item_file))
if item_list.status_code != 200:
raise Exception('You received status code %d with URL %s'%(item_list.status_code, 'https://archive.org/download/{0}/{1}'.format(item_item, item_file)))
for url in item_list.text.splitlines():
wget_args.append("{0}".format(url))
if 'bind_address' in globals():
wget_args.extend(['--bind-address', globals()['bind_address']])
print('')
print('*** Wget will bind address at {0} ***'.format(
globals()['bind_address']))
print('')
return realize(wget_args, item)
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title="musicbrainz",
project_html="""
<img class="project-logo" alt="Project logo" src="http://archiveteam.org/images/thumb/a/a9/MusicBrainz_Logo_Transparent.png/320px-MusicBrainz_Logo_Transparent.png" height="50px" title=""/>
<h2>musicbrainz.org <span class="links"><a href="http://musicbrainz.org/">Website</a> · <a href="http://tracker.archiveteam.org/musicbrainz/">Leaderboard</a></span></h2>
<p>Save external links from MusicBrainz!</p>
"""
)
pipeline = Pipeline(
CheckIP(),
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="musicbrainz"),
WgetDownload(
WgetArgs(),
max_tries=2,
accept_on_exit_code=[0, 4, 8],
env={
"item_dir": ItemValue("item_dir"),
"item_item": ItemValue("item_item"),
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [
ItemInterpolation("%(item_dir)s/%(warc_file_base)s.warc.gz")
]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.warc.gz")
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp",
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
| ArchiveTeam/musicbrainz-grab | pipeline.py | Python | unlicense | 9,152 |
"""
$Id: __init__.py,v 1.2 2005/02/26 17:56:10 sidnei Exp $
"""
from opencore.interfaces import IProject
| socialplanning/opencore | Products/OpenPlans/interfaces/project.py | Python | gpl-3.0 | 105 |
"""This package contains implementations of models of cache replacement
policies and caching and routing strategies.
"""
from .cache import *
from .cachenet import *
from .strategy import * | FilWisher/distributed-project | icarus/icarus/models/__init__.py | Python | mit | 190 |
"""
Test settings for ``timezone`` app.
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
}
INSTALLED_APPS = (
'django_nose',
'timezone.tests'
)
SECRET_KEY = 'secret-key'
STATIC_URL = '/static/'
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
TIME_ZONE = 'Australia/Sydney'
USE_TZ = True
| ixc/django-timezone | timezone/tests/settings.py | Python | mit | 368 |
"""Unit test `MiddlewareComposer()`."""
import twill
import selector
def test_middleware_composer():
"""Middleware stack should alter return in order.."""
def make_middleware(txt):
def middleware(app):
def wrappedapp(environ, start_response):
res = app(environ, start_response)
res.append(txt)
return res
return wrappedapp
return middleware
# Environ predicates
t = lambda x: True
f = lambda x: False
rules = [(t, make_middleware('a')),
(f, make_middleware('b')),
(t, make_middleware('c')),
(f, make_middleware('d')),
(t, make_middleware('e'))]
def app(environ, start_response):
start_response("200 OK", [('Content-type', 'text/plain')])
return ["ok "]
composed = selector.MiddlewareComposer(app, rules)
twill.add_wsgi_intercept('simple-host', 80, lambda: composed)
browser = twill.get_browser()
browser.go('http://simple-host/endpoint')
assert browser.result.page.startswith("ok eca")
assert browser.result.http_code == 200
| lukearno/selector | tests/unit/test_middleware_composer.py | Python | mit | 1,141 |
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
count = 0
var = raw_input("GPIO TO TEST ")
var = int(var)
totaltime = raw_input("HOW MANY SECONDS? ")
print "YOU ENTERED: ", var
print "IT WILL BE ACTIVE FOR: ", totaltime, "SECONDS"
totaltime = int(totaltime)
GPIO.setup(var, GPIO.OUT)
GPIO.output(var, GPIO.LOW)
while count < totaltime:
time.sleep(1)
print count, " ", totaltime
count = count + 1
GPIO.output(var, GPIO.HIGH)
GPIO.cleanup()
| will-davis/fishtank | gpio.py | Python | mit | 496 |
from settings.default import *
# neonion specific
ANNOTATION_STORE_URL = "http://127.0.0.1:5100"
ELASTICSEARCH_URL = "http://127.0.0.1:9200"
DEFAULT_USER_ACTIVE_STATE = True
| Taraka16/neonion | settings/demo.py | Python | gpl-2.0 | 175 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Integration Service')
class TestIntegrationService(unittest.TestCase):
pass
| vqw/frappe | frappe/integration_broker/doctype/integration_service/test_integration_service.py | Python | mit | 293 |
__author__ = 'scidb'
| gqueiroz/scigws | src/server/server/management/commands/__init__.py | Python | gpl-3.0 | 21 |
import jingo
from django.test.client import RequestFactory
from nose.tools import eq_
from pyquery import PyQuery as pq
from bedrock.mozorg.tests import TestCase
def render(s, context=None):
t = jingo.env.from_string(s)
return t.render(context or {})
class TestDownloadButtons(TestCase):
def test_thunderbird(self):
"""Should have 4 links on the Thunderbird download button"""
with self.activate('en-US'):
rf = RequestFactory()
get_request = rf.get('/fake')
get_request.locale = 'en-US'
doc = pq(render("{{ download_thunderbird() }}",
{'request': get_request}))
list = doc('.download-list li')
eq_(list.length, 4)
eq_(pq(list[0]).attr('class'), 'os_win')
eq_(pq(list[1]).attr('class'), 'os_osx')
eq_(pq(list[2]).attr('class'), 'os_linux')
eq_(pq(list[3]).attr('class'), 'os_linux64')
# TODO: Support Beta and Earlybird
class TestThunderbirdURL(TestCase):
rf = RequestFactory()
def _render(self, page, channel=None):
with self.activate('en-US'):
req = self.rf.get('/')
req.locale = 'en-US'
if channel:
tmpl = "{{ thunderbird_url('%s', '%s') }}" % (page, channel)
else:
tmpl = "{{ thunderbird_url('%s') }}" % page
return render(tmpl, {'request': req})
def test_thunderbird_all(self):
"""Should return a reversed path for the Thunderbird download page"""
eq_(self._render('all'),
'/en-US/thunderbird/all/')
eq_(self._render('all', 'release'),
'/en-US/thunderbird/all/')
# TODO: Support Beta and Earlybird
def test_thunderbird_sysreq(self):
"""Should return a reversed path for the Thunderbird sysreq page"""
eq_(self._render('sysreq'),
'/en-US/thunderbird/latest/system-requirements/')
eq_(self._render('sysreq', 'release'),
'/en-US/thunderbird/latest/system-requirements/')
# TODO: Support Beta and Earlybird
def test_thunderbird_notes(self):
"""Should return a reversed path for the desktop notes page"""
eq_(self._render('notes'),
'/en-US/thunderbird/latest/releasenotes/')
eq_(self._render('notes', 'release'),
'/en-US/thunderbird/latest/releasenotes/')
# TODO: Support Beta and Earlybird
| Jobava/bedrock | bedrock/thunderbird/tests/test_helpers.py | Python | mpl-2.0 | 2,449 |
import click
import sys
from collections import namedtuple
from random import randint
Ctx = namedtuple('Ctx', ['ctl', 'ssh', 'ssh_cfg'])
@click.group()
@click.pass_context
@click.option('--host', default='vdi.nci.org.au', help='Customize vdi login node')
@click.option('--user', help='SSH user name, if not given will be read from ~/.ssh/config')
@click.option('--no-ask', is_flag=True, help='Do not ask for passwords')
def cli(ctx, host, user, no_ask):
""" Control and query info about VDI sessions
"""
from ._ssh import open_ssh
from .vdi import vdi_ctl
try:
ssh, ssh_cfg = open_ssh(host, user, no_ask=no_ask)
except:
click.echo('Failed to connect to "{}{}"'.format(user+'@' if user else '', host))
ctx.exit()
ctl = vdi_ctl(ssh)
ctx.obj = Ctx(ssh=ssh, ssh_cfg=ssh_cfg, ctl=ctl)
@cli.command('launch')
@click.pass_obj
@click.option('--force', is_flag=True, help='Launch new session even if one is already running')
def launch(ctx, force):
""" Launch session if not running
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) != 0 and not force:
click.echo('Job already running', err=True)
sys.exit(1)
job = ctl('launch', '--partition', 'main')
click.echo(job.get('id'))
return 0
@cli.command('terminate')
@click.pass_obj
def terminate(ctx):
""" Shutdown session (all sessions actually)
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
for job in jobs:
jobid = job['id']
click.echo('Terminating {}'.format(jobid))
ctl('terminate', '--jobid', jobid)
@cli.command('host')
@click.pass_obj
def hostname(ctx):
""" Print hostname for every active session
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) == 0:
click.echo('No jobs running', err=True)
sys.exit(1)
for job in jobs:
host = ctl('get-host', '--jobid', job['id']).get('host')
click.echo(host)
return 0
@cli.command('get-passwd')
@click.pass_obj
def get_passwd(ctx):
""" Print VNC password
"""
ctl = ctx.ctl
password = ctl('get-passwd').get('passwd')
if password is None:
click.echo('Failed to query VNC password', err=True)
sys.exit(1)
click.echo(password)
return 0
def collect_vnc_info(ctl, job_id, ssh_cfg):
from ._ssh import mk_ssh
from .vdi import vdi_ctl
cfg = dict(**ssh_cfg)
host = ctl('get-host', '--jobid', job_id).get('host')
passwd = ctl('get-passwd').get('passwd')
cfg['hostname'] = host
try:
client_ctl = vdi_ctl(mk_ssh(cfg))
except:
click.echo('Failed to connect to {}'.format(host), err=True)
sys.exit(2)
display = client_ctl('get-display-nbr').get('display')
if display is None:
click.echo('Failed to query display {}'.format(host), err=True)
sys.exit(3)
try:
display = int(display[1:]) # Parse `:2`
except ValueError:
click.echo('Failed to parse display number: "{}"'.format(display))
sys.exit(3)
return dict(host=host,
display=display,
port=display+5900,
passwd=passwd)
def get_vnc_tunnel_cmd(ctx, job_id, local_port):
v_map = {True: 'yes', False: 'no'}
opts = dict(
PasswordAuthentication=False,
ChallengeResponseAuthentication=False,
KbdInteractiveAuthentication=False,
PubkeyAuthentication=True,
StrictHostKeyChecking=True,
)
args = ['-T'] + ['-o{}={}'.format(k, v_map.get(v, v))
for k, v in opts.items()]
cmd = '/opt/vdi/bin/session-ctl --configver=20173552330 tunnel'.split(' ')
user = ctx.ssh_cfg.get('user')
if user is not None:
args.extend(['-l', user])
info = collect_vnc_info(ctx.ctl, job_id, ctx.ssh_cfg)
fwd_args = ['-L',
'{local_port}:127.0.0.1:{remote_port} {host}'.format(
local_port=local_port,
remote_port=info['port'],
host=info['host'])]
return ['ssh'] + args + fwd_args + cmd
@cli.command('display-nbr')
@click.option('--as-port', is_flag=True, help='Print it as a port number of the VNC server')
@click.pass_obj
def display_nbr(ctx, as_port=False):
""" Print display number for active session (s)
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) == 0:
click.echo('No jobs running', err=True)
sys.exit(1)
for job in jobs:
info = collect_vnc_info(ctl, job['id'], ctx.ssh_cfg)
if as_port:
click.echo('%d' % info['port'])
else:
click.echo(':%d' % info['display'])
@cli.command('vnc-tunnel-cmd')
@click.option('--local-port', type=int, default=0, help='Local port to use for ssh forwarding')
@click.pass_obj
def vnc_tunnel_cmd(ctx, local_port=0):
""" Print port forwarding command
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) == 0:
click.echo('No jobs running', err=True)
sys.exit(1)
local_port = local_port or randint(10000, 65000)
for job in jobs:
cmd = get_vnc_tunnel_cmd(ctx, job['id'], local_port)
click.echo(' '.join(cmd))
@cli.command('nbconnect')
@click.option('--local-port', type=int, default=0, help='Local port to use for ssh forwarding')
@click.option('--runtime-dir', help='Jupyter runtime dir on a remote `jupyter --runtime-dir`')
@click.pass_obj
def nbconnect(ctx, local_port=0, runtime_dir=None):
""" Connect to notebook on VDI
"""
from ._ssh import mk_ssh
from .nbconnect import run_nb_tunnel
ctl = ctx.ctl
ssh_cfg = ctx.ssh_cfg
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) == 0:
click.echo('No jobs running', err=True)
sys.exit(1)
for job in jobs:
host = ctl('get-host', '--jobid', job['id']).get('host')
ssh_cfg['hostname'] = host
try:
ssh = mk_ssh(ssh_cfg)
except:
click.echo('Failed to connect to {}'.format(host))
sys.exit(2)
sys.exit(run_nb_tunnel(ssh, ssh_cfg, runtime_dir=runtime_dir, local_port=local_port))
def _cli():
cli(obj={})
if __name__ == '__main__':
_cli()
| Kirill888/nci-tools | ncitools/_vdimain.py | Python | mit | 6,475 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from polyaxon.lifecycle import V1Stages
from polyaxon.utils.enums_utils import values_to_choices
class StageModel(models.Model):
stage = models.CharField(
max_length=16,
blank=True,
null=True,
db_index=True,
default=V1Stages.TESTING,
choices=values_to_choices(V1Stages.allowable_values),
)
stage_conditions = models.JSONField(
encoder=DjangoJSONEncoder, blank=True, null=True, default=dict
)
class Meta:
abstract = True
| polyaxon/polyaxon | platform/coredb/coredb/abstracts/stage.py | Python | apache-2.0 | 1,201 |
#!/usr/bin/python
import numpy as np
import sys
from getData import *
import matplotlib.pylab as plt
def effEpsilon(epsv1,epsv2,f):
epsw1=[]
epsw2=[]
for eps1,eps2 in zip(epsv1,epsv2):
epsw1.append( f * eps1 + (1-f) * eps2)
epsw2.append( 1./(f/eps1 + (1-f)/eps2))
return (epsw1,epsw2)
if len(sys.argv)!=3:
sys.stderr.write("You have to give exactly 2 arguments, yaml files with material descriptioni\n")
sys.exit(1)
file1=sys.argv[1]
file2=sys.argv[2]
matFile1=open(file1,'r')
matFile2=open(file2,'r')
matRange1=getRange(file1)
matRange2=getRange(file2)
sys.stderr.write("matRange1="+str( matRange1))
sys.stderr.write("matRange2="+str( matRange2))
comRange=np.empty(shape=(2,1))
comRange[0]=matRange1[0] if matRange1[0]>matRange2[0] else matRange2[0]
comRange[1]=matRange1[1] if matRange1[1]<matRange2[1] else matRange2[1]
sys.stderr.write("comRange="+str(comRange))
#Sampling definition:
lambdas=np.linspace(comRange[0],comRange[1],100)
#alternativelly hard specified
lambdas=np.linspace(7,10,100)
#Change refractive index to epsilon
matData1=getData(file1,lambdas)
for i in range(0,len(matData1)):
matData1[i]=matData1[i]*matData1[i]
matData2=getData(file2,lambdas)
for i in range(0,len(matData2)):
matData2[i]=matData2[i]*matData2[i]
R1=np.empty(shape=(0,len(lambdas)))
I1=np.empty(shape=(0,len(lambdas)))
R2=np.empty(shape=(0,len(lambdas)))
I2=np.empty(shape=(0,len(lambdas)))
for f in np.linspace(0,1,100):
(efw1,efw2)=effEpsilon(matData1,matData2,f)
r1 = [x.real for x in efw1]
i1 = [x.imag for x in efw1]
r2 = [x.real for x in efw2]
i2 = [x.imag for x in efw2]
R1=np.vstack((R1,r1))
I1=np.vstack((I1,i1))
R2=np.vstack((R2,r2))
I2=np.vstack((I2,i2))
plt.figure("Materials:"+file1+" "+file2)
plt.subplot(211)
plt.plot(1000*lambdas,matData1,'r-',label="Real("+file1+")")
plt.plot(1000*lambdas,matData2,'b--',label="Real("+file2+")")
plt.xlabel(r"$\lambda$ [nm]")
#plt.ylim(0,2)
plt.legend()
plt.subplot(212)
plt.plot(1000*lambdas,[ x.imag for x in matData1],'r-',label="Imag("+file1+")")
plt.plot(1000*lambdas,[ x.imag for x in matData2],'b--',label="Imag("+file2+")")
plt.xlabel(r"$\lambda$ [nm]")
plt.legend()
#number of ticks
TN=5
#formated list of wavelengths to be displayed
lambTicks=[]
for lamb in np.linspace(min(lambdas),max(lambdas),TN):
lambTicks.append("%0.1f"%lamb)
fTicks=np.linspace(0,1,TN)
VMIN=-10#min(np.min(R1),np.min(R2))
VMAX=30#max(np.max(R1),np.max(R2))
plt.figure("Effective:"+file1+" "+file2)
plt.subplot(2,2,1)
plt.imshow(R1,vmin=VMIN,vmax=VMAX)
plt.ylabel("f")
plt.title(r'Re{$\epsilon_x$}')
#START:generic settings for all plots
#plt.xticks(np.linspace(0,R1.shape[0],TN),lambTicks)
plt.xticks([])
plt.yticks(np.linspace(0,R1.shape[1],TN),fTicks)
#END:generic settings for all plots
#We can choose contour position with changing this line:
plt.contour(R1,[0.],colors="k")
plt.subplot(2,2,2)
plt.imshow(R2,vmin=VMIN,vmax=VMAX)
plt.title(r'Re{$\epsilon_z$}')
#START:generic settings for all plots
plt.colorbar()
plt.xticks([])
plt.yticks([])
#END:generic settings for all plots
VMIN=min(np.min(I1),np.min(I2))
VMAX=10#max(np.max(I1),np.max(I2))
#We can choose contour position with changing this line:
plt.contour(R2,[100.],colours="w")
plt.subplot(2,2,3)
plt.imshow(I1,vmin=VMIN,vmax=VMAX)
plt.xlabel(r"$\lambda$ [nm]")
plt.ylabel("f")
plt.title(r'Im{$\epsilon_x$}')
#START:generic settings for all plots
plt.xticks(np.linspace(0,R1.shape[0],TN),1000*lambTicks)
plt.yticks(np.linspace(0,R1.shape[1],TN),fTicks)
#END:generic settings for all plots
#We can choose contour position with changing this line:
#plt.contour(I1,[1.])
plt.subplot(2,2,4)
plt.imshow(I2,vmin=VMIN,vmax=VMAX)
plt.xlabel(r"$\lambda$ [nm]")
plt.title(r'Im{$\epsilon_z$}')
#START:generic settings for all plots
plt.colorbar()
plt.xticks(np.linspace(0,R1.shape[0],TN),1000*lambTicks)
plt.yticks([])
#END:generic settings for all plots
#We can choose contour position with changing this line:
#plt.contour(I2,[1.])
plt.subplots_adjust(left=0.01, bottom=0.1, right=0.99, top=0.9, wspace=0, hspace=0.15)
plt.suptitle("")
#plt.figure("eps1eps2:"+file1+" "+file2)
##C=np.empty(shape=R1.shape,dtype="complex")
#Cr=np.empty(shape=R1.shape)
#Ci=np.empty(shape=R1.shape)
#for i in range(0,R1.shape[1]):
# for j in range(0,R1.shape[0]):
# C=(R1[i][j]+1j*I1[i][j]) * (R2[i][j]+1j*I2[i][j])
# Cr[i][j]=C.real
# Ci[i][j]=C.imag
#
#
#plt.subplot(2,2,1)
#plt.title(r"$\epsilon_1 \cdot \epsilon_2$",size=20)
#plt.imshow(abs(1-Cr),vmin=-50,vmax=50)
#plt.title("Real")
#plt.colorbar(fraction=0.046, pad=0.04)
#plt.xticks(np.linspace(0,R1.shape[0],TN),lambTicks)
#plt.yticks(np.linspace(0,R1.shape[1],TN),fTicks)
#plt.contour(Cr,[1],colors="w")
#
#plt.subplot(2,2,2)
#plt.title("Imag")
#plt.imshow(abs(Ci),vmin=-50,vmax=50)
#plt.colorbar(fraction=0.046, pad=0.04)
#plt.xticks(np.linspace(0,R1.shape[0],TN),lambTicks)
#plt.yticks(np.linspace(0,R1.shape[1],TN),fTicks)
#plt.contour(Ci,[0],colors="w")
#
#plt.subplot(2,2,3)
#
#plt.contour(Cr,[1],colors="b")
#plt.contour(Ci,[0],colors="r")
#plt.xticks(np.linspace(0,R1.shape[0],TN),lambTicks)
#plt.yticks(np.linspace(0,R1.shape[1],TN),fTicks)
#
#plt.subplot(2,2,4)
##Calculated R for normal incidence
#Refle=np.empty(shape=R1.shape)
#n=np.empty(shape=R1.shape,dtype="complex")
#for i in range(0,R1.shape[1]):
# for j in range(0,R1.shape[0]):
# n=cmath.sqrt(R1[i][j]+1j*I1[i][j])
# Refle[i][j]=abs((n-1)/(n+1))*abs((n-1)/(n+1))
#
#plt.imshow(Refle,vmin=0,vmax=1)
#plt.colorbar(fraction=0.046, pad=0.04)
#plt.title("Reflection for normal incidence")
#plt.xticks(np.linspace(0,R1.shape[0],TN),lambTicks)
#plt.yticks(np.linspace(0,R1.shape[1],TN),fTicks)
#
#minRef=np.average(Refle,axis=1)
#print minRef
#if np.any(minRef<0.1):
plt.show()
| cinek810/refractiveindex.info | effEpsilon.py | Python | gpl-2.0 | 5,763 |
# -*- coding: utf-8 -*-
from flask_restful import Resource, reqparse, marshal_with
from flask_restful_swagger import swagger
from app.mod_shared.models import db
from app.mod_profiles.models import *
from app.mod_profiles.resources.fields.measurementUnitFields import MeasurementUnitFields
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True)
parser.add_argument('symbol', type=str, required=True)
parser.add_argument('suffix', type=bool)
class MeasurementUnitView(Resource):
@swagger.operation(
notes=u'Retorna una instancia específica de unidad de medición.'.encode('utf-8'),
responseClass='MeasurementUnitFields',
nickname='measurementUnitView_get',
parameters=[
{
"name": "id",
"description": u'Identificador único de la unidad de medición.'.encode('utf-8'),
"required": True,
"dataType": "int",
"paramType": "path"
}
],
responseMessages=[
{
"code": 200,
"message": "Objeto encontrado."
},
{
"code": 404,
"message": "Objeto inexistente."
}
]
)
@marshal_with(MeasurementUnitFields.resource_fields, envelope='resource')
def get(self, id):
measurement_unit = MeasurementUnit.query.get_or_404(id)
return measurement_unit
@swagger.operation(
notes=u'Actualiza una instancia específica de unidad de medición, y la retorna.'.encode('utf-8'),
responseClass='MeasurementUnitFields',
nickname='measurementUnitView_put',
parameters=[
{
"name": "id",
"description": u'Identificador único de la unidad de medición.'.encode('utf-8'),
"required": True,
"dataType": "int",
"paramType": "path"
},
{
"name": "name",
"description": u'Nombre de la unidad de medición.'.encode('utf-8'),
"required": True,
"dataType": "string",
"paramType": "body"
},
{
"name": "symbol",
"description": u'Símbolo de la unidad de medición.'.encode('utf-8'),
"required": True,
"dataType": "string",
"paramType": "body"
},
{
"name": "suffix",
"description": (u'Variable booleana que indica si el símbolo de '
'la unidad de medición es un sufijo (verdadero) '
'o un prefijo (falso) del valor de la medición.').encode('utf-8'),
"required": False,
"dataType": "boolean",
"paramType": "body"
}
],
responseMessages=[
{
"code": 200,
"message": "Objeto actualizado exitosamente."
},
{
"code": 404,
"message": "Objeto inexistente."
}
]
)
@marshal_with(MeasurementUnitFields.resource_fields, envelope='resource')
def put(self, id):
measurement_unit = MeasurementUnit.query.get_or_404(id)
args = parser.parse_args()
# Actualiza los atributos y relaciones del objeto, en base a los
# argumentos recibidos.
# Actualiza el nombre, en caso de que haya sido modificado.
if (args['name'] is not None and
measurement_unit.name != args['name']):
measurement_unit.name = args['name']
# Actualiza el simbolo de la unidad de medida, en caso de que haya sido
# modificado.
if (args['symbol'] is not None and
measurement_unit.symbol != args['symbol']):
measurement_unit.symbol = args['symbol']
# Actualiza el estado del sufijo, en caso de que haya sido modificado.
if (args['suffix'] is not None and
measurement_unit.suffix != args['suffix']):
measurement_unit.suffix = args['suffix']
db.session.commit()
return measurement_unit, 200 | coco19/salud-api | app/mod_profiles/resources/views/measurementUnitView.py | Python | gpl-2.0 | 4,223 |
from django.conf import settings
PROMOTERSCORE_USER_RANGES_DEFAULT = {
'promoters': [9, 10],
'passive': [7, 8],
'detractors': [1, 2, 3, 4, 5, 6],
'skipped': [-1]
}
PROMOTERSCORE_PERMISSION_VIEW = getattr(settings, 'PROMOTERSCORE_PERMISSION_VIEW', lambda u: u.is_staff)
PROMOTERSCORE_USER_RANGES = getattr(settings, 'PROMOTERSCORE_USER_RANGES', PROMOTERSCORE_USER_RANGES_DEFAULT) | epantry/django-netpromoterscore | netpromoterscore/app_settings.py | Python | mit | 396 |
import unittest
import productsvd
import numpy
class TestReduceusv(unittest.TestCase):
def test_1(self):
Ui = numpy.array([[1,0,0],[0,1,0],[0,0,1],[0,0,0]])
si = numpy.array([3,2,1])
Vit = numpy.array([[1,0,0],[0,1,0],[0,0,1]])
p = 2
Ue = numpy.array([[1,0],[0,1],[0,0],[0,0]])
se = numpy.array([3,2])
Vet = numpy.array([[1,0,0],[0,1,0]])
Uo, so, Vot = productsvd.reduceusv(Ui, si, Vit, p)
numpy.testing.assert_array_equal(Ue, Uo)
numpy.testing.assert_array_equal(se, so)
numpy.testing.assert_array_equal(Vet, Vot)
class TestReducematrix(unittest.TestCase):
def test_1(self):
Ui = numpy.array([[1,0,0],[0,1,0],[0,0,1],[0,0,0]])
si = numpy.array([3,2,1])
Vit = numpy.array([[1,0,0],[0,1,0],[0,0,1]])
p = 2
Ai = Ui @ numpy.diag(si) @ Vit
Ue = numpy.array([[1,0],[0,1],[0,0],[0,0]])
se = numpy.array([3,2])
Vet = numpy.array([[1,0,0],[0,1,0]])
Ae = Ue @ numpy.diag(se) @ Vet
Ao = productsvd.reducematrix(Ai, p)
numpy.testing.assert_array_equal(Ae, Ao)
class TestSvdproductsvd(unittest.TestCase):
def test_1(self):
Ai = numpy.random.randint(0, 9, size = (9,5))
Bi = numpy.random.randint(0, 9, size = (9,5))
Ci = Ai @ Bi.T
Ue, se, Vet = numpy.linalg.svd(Ci, full_matrices = False)
Ce = Ci
Ue, se, Vet = productsvd.reduceusv(Ue, se, Vet, 5)
Ue = numpy.absolute(Ue)
Vet = numpy.absolute(Vet)
Uo, so, Vot = productsvd.svdproductsvd(Ai, Bi)
Co = Uo @ numpy.diag(so) @ Vot
Uo = numpy.absolute(Uo)
Vot = numpy.absolute(Vot)
numpy.testing.assert_array_almost_equal(Ue, Uo)
numpy.testing.assert_array_almost_equal(se, so)
numpy.testing.assert_array_almost_equal(Vet, Vot)
numpy.testing.assert_array_almost_equal(Ce, Co)
class TestQrproductsvd(unittest.TestCase):
def test_1(self):
Ai = numpy.random.randint(0, 9, size = (9,5))
Bi = numpy.random.randint(0, 9, size = (9,5))
Ci = Ai @ Bi.T
Ue, se, Vet = numpy.linalg.svd(Ci, full_matrices = False)
Ce = Ci
Ue, se, Vet = productsvd.reduceusv(Ue, se, Vet, 5)
Ue = numpy.absolute(Ue)
Vet = numpy.absolute(Vet)
Uo, so, Vot = productsvd.qrproductsvd(Ai, Bi)
Co = Uo @ numpy.diag(so) @ Vot
Uo = numpy.absolute(Uo)
Vot = numpy.absolute(Vot)
numpy.testing.assert_array_almost_equal(Ue, Uo)
numpy.testing.assert_array_almost_equal(se, so)
numpy.testing.assert_array_almost_equal(Vet, Vot)
numpy.testing.assert_array_almost_equal(Ce, Co)
if __name__ == '__main__':
unittest.main() | phenology/infrastructure | applications/modules/python/productsvd.ut.py | Python | apache-2.0 | 2,494 |
# coding=utf-8
""" Module holding tools for ee.Collection """
import ee
def enumerate(collection):
""" Create a list of lists in which each element of the list is:
[index, element]. For example, if you parse a FeatureCollection with 3
Features you'll get: [[0, feat0], [1, feat1], [2, feat2]]
:param collection: can be an ImageCollection or a FeatureCollection
:return: ee.Collection
"""
collist = collection.toList(collection.size())
# first element
ini = ee.Number(0)
first_image = ee.Image(collist.get(0))
first = ee.List([ini, first_image])
start = ee.List([first])
rest = collist.slice(1)
def over_list(im, s):
im = ee.Image(im)
s = ee.List(s)
last = ee.List(s.get(-1))
last_index = ee.Number(last.get(0))
index = last_index.add(1)
return s.add(ee.List([index, im]))
list = ee.List(rest.iterate(over_list, start))
return list
def joinByProperty(primary, secondary, propertyField, outer=False):
""" Join 2 collections by a given property field.
It assumes ids are unique so uses ee.Join.saveFirst.
It drops non matching features.
Example:
fc1 = ee.FeatureCollection([ee.Feature(geom=ee.Geometry.Point([0,0]),
opt_properties={'id': 1, 'prop_from_fc1': 'I am from fc1'})])
fc2 = ee.FeatureCollection([ee.Feature(geom=ee.Geometry.Point([0,0]),
opt_properties={'id': 1, 'prop_from_fc2': 'I am from fc2'})])
joined = joinById(fc1, fc2, 'id')
print(joined.getInfo())
"""
Filter = ee.Filter.equals(leftField=propertyField,
rightField=propertyField)
join = ee.Join.saveFirst(matchKey='match', outer=outer)
joined = join.apply(primary, secondary, Filter)
def overJoined(feat):
properties = feat.propertyNames()
retain = properties.remove('match')
match = ee.Feature(feat.get('match'))
matchprop = match.toDictionary()
return feat.select(retain).setMulti(matchprop)
return joined.map(overJoined)
| gee-community/gee_tools | geetools/tools/collection.py | Python | mit | 2,120 |
from .lsm import *
| WeKeyPedia/toolkit-python | wekeypedia/metrics/__init__.py | Python | mit | 19 |
"""Provide a way to work with pandas data frames in Spark"""
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparklingpandas.utils import add_pyspark_path
from functools import reduce
add_pyspark_path()
from sparklingpandas.pstatcounter import PStatCounter
class PRDD:
"""A Pandas Resilient Distributed Dataset (PRDD), is an extension of a
Spark RDD. You can access the underlying RDD at _rdd, but be
careful doing so.
Note: RDDs are lazy, so you operations are not performed until required."""
def __init__(self, rdd):
self._rdd = rdd
@classmethod
def from_rdd(cls, rdd):
"""Construct a PRDD from an RDD. No checking or validation occurs."""
return PRDD(rdd)
def to_spark_sql(self):
"""A Sparkling Pandas specific function to turn a DDF into
something that Spark SQL can query. To use the result you will
need to call sqlCtx.inferSchema(rdd) and then register the result
as a table. Once Spark 1.1 is released this function may be deprecated
and replacted with to_spark_sql_schema_rdd."""
raise NotImplementedError("Method deprecated, please use "
"to_spark_sql_schema_rdd instead!")
def applymap(self, func, **kwargs):
"""Return a new PRDD by applying a function to each element of each
pandas DataFrame."""
return self.from_rdd(
self._rdd.map(lambda data: data.applymap(func), **kwargs))
def __getitem__(self, key):
"""Returns a new PRDD of elements from that key."""
return self.from_rdd(self._rdd.map(lambda x: x[key]))
def groupby(self, *args, **kwargs):
"""Takes the same parameters as groupby on DataFrame.
Like with groupby on DataFrame disabling sorting will result in an
even larger performance improvement. This returns a Sparkling Pandas
L{GroupBy} object which supports many of the same operations as regular
GroupBy but not all."""
from sparklingpandas.groupby import GroupBy
return GroupBy(self._rdd, *args, **kwargs)
@property
def dtypes(self):
"""
Return the dtypes associated with this object
Uses the types from the first frame.
"""
return self._rdd.first().dtypes
@property
def ftypes(self):
"""
Return the ftypes associated with this object
Uses the types from the first frame.
"""
return self._rdd.first().ftypes
def get_dtype_counts(self):
"""
Return the counts of dtypes in this object
Uses the information from the first frame
"""
return self._rdd.first().get_dtype_counts()
def get_ftype_counts(self):
"""
Return the counts of ftypes in this object
Uses the information from the first frame
"""
return self._rdd.first().get_ftype_counts()
@property
def axes(self):
return (self._rdd.map(lambda frame: frame.axes)
.reduce(lambda xy, ab: [xy[0].append(ab[0]), xy[1]]))
@property
def shape(self):
return (self._rdd.map(lambda frame: frame.shape)
.reduce(lambda xy, ab: (xy[0] + ab[0], xy[1])))
def collect(self):
"""Collect the elements in an PRDD and concatenate the partition."""
# The order of the frame order appends is based on the implementation
# of reduce which calls our function with
# f(valueToBeAdded, accumulator) so we do our reduce implementation.
def append_frames(frame_a, frame_b):
return frame_a.append(frame_b)
return self._custom_rdd_reduce(append_frames)
def _custom_rdd_reduce(self, reduce_func):
"""Provides a custom RDD reduce which preserves ordering if the RDD has
been sorted. This is useful for us because we need this functionality
as many pandas operations support sorting the results. The standard
reduce in PySpark does not have this property. Note that when PySpark
no longer does partition reduces locally this code will also need to
be updated."""
def accumulating_iter(iterator):
acc = None
for obj in iterator:
if acc is None:
acc = obj
else:
acc = reduce_func(acc, obj)
if acc is not None:
yield acc
vals = self._rdd.mapPartitions(accumulating_iter).collect()
return reduce(accumulating_iter, vals)
def stats(self, columns):
"""Compute the stats for each column provided in columns.
Parameters
----------
columns : list of str, contains all columns to compute stats on.
"""
def reduce_func(sc1, sc2):
return sc1.merge_pstats(sc2)
return self._rdd.mapPartitions(lambda partition: [
PStatCounter(dataframes=partition, columns=columns)])\
.reduce(reduce_func)
| michalmonselise/sparklingpandas | sparklingpandas/prdd.py | Python | apache-2.0 | 5,745 |
# -*- coding: utf-8 -*-
# Code for Life
#
# Copyright (C) 2016, Ocado Innovation Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ADDITIONAL TERMS – Section 7 GNU General Public Licence
#
# This licence does not grant any right, title or interest in any “Ocado” logos,
# trade names or the trademark “Ocado” or any other trademarks or domain names
# owned by Ocado Innovation Limited or the Ocado group of companies or any other
# distinctive brand features of “Ocado” as may be secured from time to time. You
# must not distribute any modification of this program using the trademark
# “Ocado” or claim any affiliation or association with Ocado or its employees.
#
# You are not authorised to use the name Ocado (or any of its trade names) or
# the names of any author or contributor in advertising or for publicity purposes
# pertaining to the distribution of this program, without the prior written
# authorisation of Ocado.
#
# Any propagation, distribution or conveyance of this program must include this
# copyright notice and these terms. You must not misrepresent the origins of this
# program; modified versions of the program must be marked as such and not
# identified as the original program.
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('game', '0043_auto_20150615_1155'),
]
operations = [
migrations.AlterField(
model_name='episode',
name='r_blocks',
field=models.ManyToManyField(related_name='episodes', to='game.Block'),
),
migrations.AlterField(
model_name='level',
name='shared_with',
field=models.ManyToManyField(related_name='shared', to=settings.AUTH_USER_MODEL, blank=True),
),
]
| CelineBoudier/rapid-router | game/migrations/0044_auto_20150615_1156.py | Python | agpl-3.0 | 2,499 |
import logging
import pandas
from netort.data_manager import DataSession, thread_safe_property
from yandextank.plugins.Phantom.reader import string_to_df_microsec
from yandextank.common.interfaces import AbstractPlugin,\
MonitoringDataListener
logger = logging.getLogger(__name__) # pylint: disable=C0103
class Plugin(AbstractPlugin, MonitoringDataListener):
SECTION = 'neuploader'
importance_high = {
'interval_real',
'proto_code',
'net_code'
}
OVERALL = '__overall__'
def __init__(self, core, cfg, name):
super(Plugin, self).__init__(core, cfg, name)
self._is_telegraf = None
self.clients_cfg = [{'type': 'luna',
'api_address': self.cfg.get('api_address'),
'db_name': self.cfg.get('db_name')}]
self.metrics_objs = {} # map of case names and metric objects
self.monitoring_metrics = {}
self._col_map = None
self._data_session = None
def configure(self):
pass
def start_test(self):
try:
self.reader = self.core.job.generator_plugin.get_reader(parser=string_to_df_microsec)
except TypeError:
logger.error('Generator plugin does not support NeUploader')
self.is_test_finished = lambda: -1
self.reader = []
@thread_safe_property
def col_map(self):
return {
'interval_real': self.data_session.new_true_metric,
'connect_time': self.data_session.new_true_metric,
'send_time': self.data_session.new_true_metric,
'latency': self.data_session.new_true_metric,
'receive_time': self.data_session.new_true_metric,
'interval_event': self.data_session.new_true_metric,
'net_code': self.data_session.new_event_metric,
'proto_code': self.data_session.new_event_metric
}
@thread_safe_property
def data_session(self):
"""
:rtype: DataSession
"""
if self._data_session is None:
self._data_session = DataSession({'clients': self.clients_cfg},
test_start=self.core.status['generator']['test_start'] * 10**6)
self.add_cleanup(self._cleanup)
self._data_session.update_job(dict({'name': self.cfg.get('test_name'),
'__type': 'tank'},
**self.cfg.get('meta', {})))
return self._data_session
def _cleanup(self):
uploader_metainfo = self.map_uploader_tags(self.core.status.get('uploader'))
if self.core.status.get('autostop'):
autostop_rps = self.core.status.get('autostop', {}).get('rps', 0)
autostop_reason = self.core.status.get('autostop', {}).get('reason', '')
self.log.warning('Autostop: %s %s', autostop_rps, autostop_reason)
uploader_metainfo.update({'autostop_rps': autostop_rps, 'autostop_reason': autostop_reason})
uploader_metainfo.update(self.cfg.get('meta', {}))
self.data_session.update_job(uploader_metainfo)
self.data_session.close(test_end=self.core.status.get('generator', {}).get('test_end', 0) * 10**6)
def is_test_finished(self):
df = next(self.reader)
if df is not None:
self.upload(df)
return -1
def monitoring_data(self, data_list):
self.upload_monitoring(data_list)
def post_process(self, retcode):
try:
for chunk in self.reader:
if chunk is not None:
self.upload(chunk)
except KeyboardInterrupt:
logger.warning('Caught KeyboardInterrupt on Neuploader')
self._cleanup()
return retcode
@property
def is_telegraf(self):
return True
def get_metric_obj(self, col, case):
"""
Generator of metric objects:
Checks existent metrics and creates new metric if it does not exist.
:param col: str with column name
:param case: str with case name
:return: metric object
"""
case_metrics = self.metrics_objs.get(case)
if case_metrics is None:
for col, constructor in self.col_map.items():
# args = dict(self.cfg.get('meta', {}),
# name=col,
# case=case,
# raw=False,
# aggregate=True,
# source='tank',
# importance='high' if col in self.importance_high else '',
# )
# if case != self.OVERALL:
# args.update(parent=self.get_metric_obj(col, self.OVERALL))
self.metrics_objs.setdefault(case, {})[col] = constructor(
dict(self.cfg.get('meta', {}),
name=col,
source='tank',
importance='high' if col in self.importance_high else ''),
raw=False, aggregate=True,
parent=self.get_metric_obj(col, self.OVERALL) if case != self.OVERALL else None,
case=case if case != self.OVERALL else None
)
return self.metrics_objs[case][col]
def upload(self, df):
df_cases_set = set()
for row in df.itertuples():
if row.tag and isinstance(row.tag, str):
df_cases_set.add(row.tag)
if '|' in row.tag:
for tag in row.tag.split('|'):
df_cases_set.add(tag)
for column in self.col_map:
overall_metric_obj = self.get_metric_obj(column, self.OVERALL)
df['value'] = df[column]
result_df = self.filter_df_by_case(df, self.OVERALL)
overall_metric_obj.put(result_df)
for case_name in df_cases_set:
case_metric_obj = self.get_metric_obj(column, case_name)
df['value'] = df[column]
result_df = self.filter_df_by_case(df, case_name)
case_metric_obj.put(result_df)
def upload_monitoring(self, data):
for metric_name, df in self.monitoring_data_to_dfs(data).items():
if metric_name not in self.monitoring_metrics:
panel, metric = metric_name.split(':', 1)
try:
group, name = metric.split('_', 1)
except ValueError:
name = metric
group = '_OTHER_'
self.monitoring_metrics[metric_name] = self.data_session.new_true_metric(name,
group=group,
host=panel,
type='monitoring',
**self.cfg.get('meta', {}))
self.monitoring_metrics[metric_name].put(df)
@staticmethod
def monitoring_data_to_dfs(data):
panels = {}
for chunk in data:
for panel_name, content in chunk['data'].items():
if panel_name in panels:
for metric_name, value in content['metrics'].items():
if metric_name in panels[panel_name]:
panels[panel_name][metric_name]['value'].append(value)
panels[panel_name][metric_name]['ts'].append(chunk['timestamp'])
else:
panels[panel_name][metric_name] = {'value': [value], 'ts': [chunk['timestamp']]}
else:
panels[panel_name] = {name: {'value': [value], 'ts': [chunk['timestamp']]} for name, value in content['metrics'].items()}
return {'{}:{}'.format(panelk, name): pandas.DataFrame({'ts': [ts * 1000000 for ts in values['ts']], 'value': values['value']})
for panelk, panelv in panels.items() for name, values in panelv.items()}
@staticmethod
def filter_df_by_case(df, case):
"""
Filter dataframe by case name. If case is '__overall__', return all rows.
:param df: DataFrame
:param case: str with case name
:return: DataFrame with columns 'ts' and 'value'
"""
case = case.strip()
return df[['ts', 'value']] if case == Plugin.OVERALL else df[df.tag.str.strip() == case][['ts', 'value']]
def map_uploader_tags(self, uploader_tags):
if not uploader_tags:
logger.info('No uploader metainfo found')
return {}
else:
meta_tags_names = ['component', 'description', 'name', 'person', 'task', 'version', 'lunapark_jobno']
meta_tags = {key: uploader_tags.get(key) for key in meta_tags_names if key in uploader_tags}
meta_tags.update({k: v if v is not None else '' for k, v in uploader_tags.get('meta', {}).items()})
return meta_tags
| nettorta/yandex-tank | yandextank/plugins/NeUploader/plugin.py | Python | lgpl-2.1 | 9,248 |
# -*- coding: utf8 -*-
import ConfigParser
import logging
log = logging.getLogger(__name__)
__author__ = "Stinger <neo3land@gmail.com>"
__license__ = "GNU Lesser General Public License (LGPL)"
class MyDict(dict):
def __init__(self, parent, name, seq, **kwargs):
self.name = name
self.parent = parent
seq = [(y, int(x)) if str(x).isdigit() else (y, x) for y, x in seq]
super(MyDict, self).__init__(seq, **kwargs)
def __setitem__(self, key, value):
if key != 'client_flags':
self.parent.cp.set(self.name, key, value)
self.parent.save_config()
super(MyDict, self).__setitem__(key, value)
def __getitem__(self, item):
return super(MyDict, self).__getitem__(item)
class Settings(object):
def __init__(self, cfile='settings.cfg'):
self.cp = ConfigParser.RawConfigParser()
self.__config_file = cfile
self.__config_default = \
{
'MYSQL':
{
'user': 'your_mysql_user',
'password': 'your_mysql_pass',
'host': '127.0.0.1',
'database': 'BDPN',
'connection_timeout': '600'
},
'SFTP':
{
'host': 'prod-sftp.numlex.ru',
'user': 'your_numlex_user',
'secret': 'your_numlex_pass',
'port': '3232'
},
'MAIN':
{
'local_dir': '/home/neoland/PyProjects/SftpSync/localstore/',
'log_file': '/home/neoland/PyProjects/SftpSync/bdpnsync.log',
'log_level': 40,
'sync_every_minutes': 120 # Timer step - every even hour
}
}
self._config_cache = {}
self.load_config()
handler = logging.FileHandler(self.main['log_file'], 'a')
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
handler.setLevel(self.main['log_level'])
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(self.main['log_level'])
def load_config(self):
"""This loads configuration file if one exist or uses default settings otherwise."""
for section in self.cp.sections():
self.cp.remove_section(section)
if not self.cp.read(self.__config_file):
log.warning("Settings file '%s' was not found! Creating one with defaults..." % self.__config_file)
self.__create_default()
if self.__check_config():
for section_name in self.cp.sections():
setattr(self, section_name.lower(), MyDict(self, section_name, self.cp.items(section_name)))
else:
return False
return True
def __check_config(self):
"""Checks if current configuration file has all settings we need."""
for section_name in self.__config_default:
if self.cp.has_section(section_name):
if not sorted(self.cp.options(section_name)) == sorted(self.__config_default[section_name].keys()):
log.critical('Options in section %s are not complete.' % section_name)
raise RuntimeError('Options in section %s are not complete.' % section_name)
else:
log.critical('There is no section %s in settings file.' % section_name)
raise RuntimeError('There is no section %s in settings file.' % section_name)
return True
def __create_default(self):
"""Creates default configuration file."""
for section_name in self.__config_default:
self.cp.add_section(section_name)
for name, value in self.__config_default[section_name].items():
self.cp.set(section_name, name, value)
self.save_config()
def save_config(self):
"""Saves configuration to file."""
log.debug("Saving settings to:%s" % self.__config_file)
with open(self.__config_file, 'wb') as configfile:
self.cp.write(configfile)
| neo4land/numlex | config.py | Python | gpl-3.0 | 4,196 |
# /usr/bin/env python
'''
Written by Kong Xiaolu and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
'''
import os
import numpy as np
import torch
import CBIG_pMFM_basic_functions as fc
import warnings
def CBIG_mfm_validation_desikan_fccost(gpu_index=0):
'''
This function is to validate the estimated parameters of mean field
model.
The objective function is the summation of FC correlation cost and
FCD KS statistics cost.
Args:
gpu_index: index of gpu used for optimization
input_path: input directory for the optimized model parameters
output_path: output directory for saving validation results
Returns:
None
'''
torch.cuda.set_device(gpu_index)
# Create output folder
input_path = '../output/step1_training_results/'
output_path = '../output/step2_validation_results/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
highest_order = 1
myelin_data = fc.csv_matrix_read('../../../input/Desikan_input/myelin.csv')
myelin_data = myelin_data[:, 0]
gradient_data = fc.csv_matrix_read(
'../../../input/Desikan_input/rsfc_gradient.csv')
gradient_data = gradient_data[:, 0]
n_node = myelin_data.shape[0]
amatrix = np.zeros((n_node, highest_order + 1))
bmatrix = np.zeros((n_node, highest_order + 1))
for i in range(highest_order + 1):
amatrix[:, i] = myelin_data**(i)
bmatrix[:, i] = gradient_data**(i)
template_mat = np.hstack((amatrix, bmatrix[:, 1:highest_order + 1]))
n_trial = 10
vali_dup = 20
for i in range(1, 11):
random_seed_cuda = i + 100
torch.cuda.manual_seed(random_seed_cuda)
load_file = ['random_seed_', str(i), '.csv']
load_path = [input_path] + load_file
xmin = fc.csv_matrix_read(''.join(load_path))
x_mass = xmin[0:-3, :]
result_save = np.zeros((6 + 3 * n_node + 1, xmin.shape[1]))
result_save[0:3, :] = xmin[-3:, :]
para_w = template_mat @ x_mass[0:2 * highest_order + 1, :]
para_I = template_mat @ x_mass[2 * highest_order + 1:2 *
(2 * highest_order + 1), :]
para_sigma = template_mat @ x_mass[2 * (2 * highest_order + 1) +
1:x_mass.shape[0], :]
arx_mass = np.concatenate(
(para_w, para_I, x_mass[2 * (2 * highest_order + 1):2 *
(2 * highest_order + 1) + 1, :],
para_sigma), 0)
result_save[6:, :] = arx_mass
for k in range(n_trial):
in_para = arx_mass[:, 50 * k:50 * (k + 1)]
vali_total, vali_corr, vali_ks = \
fc.CBIG_combined_cost_validation(
in_para, vali_dup)
result_save[3, 50 * k:50 * (k + 1)] = vali_corr
result_save[4, 50 * k:50 * (k + 1)] = vali_ks
result_save[5, 50 * k:50 * (k + 1)] = vali_total
save_path = [output_path] + load_file
np.savetxt(''.join(save_path), result_save, delimiter=',')
if __name__ == '__main__':
warnings.filterwarnings("ignore", category=RuntimeWarning)
CBIG_mfm_validation_desikan_fccost()
| ThomasYeoLab/CBIG | stable_projects/fMRI_dynamics/Kong2021_pMFM/part2_pMFM_control_analysis/FC_cost/scripts/CBIG_pMFM_step2_validation_fccost.py | Python | mit | 3,265 |
"""Demonstrates the use of callbacks. Press the onboard button S2 to toggle led.
"""
from robovero.LPC17xx import IRQn_Type
from robovero.core import NVIC_EnableIRQ
from robovero.arduino import pinMode, digitalWrite, digitalRead, BTN, LED, OUTPUT
from robovero.extras import heartbeatOff, registerCallback
from robovero.lpc17xx_exti import EXTI_Init, EXTI_ClearEXTIFlag
from robovero.lpc17xx_pinsel import PINSEL_CFG_Type, PINSEL_ConfigPin
from time import sleep
from random import choice
__author__ = "Neil MacMunn"
__email__ = "neil@gumstix.com"
__copyright__ = "Copyright 2010, Gumstix Inc."
__license__ = "BSD 2-Clause"
__version__ = "0.1"
responses = (
"Please", "Hey", "OK I surrender, just", "I said", "Ouch",
"I'm afraid I can't let you do that, Dave. Also,",
"Hammer says"
)
def EINT0Callback():
"""Callback function for EINT0.
"""
while not digitalRead(BTN):
sleep(0)
print "%s don't touch that!" % choice(responses)
state = digitalRead(LED)
digitalWrite(LED, state ^ 1)
EXTI_ClearEXTIFlag(0)
# control the LED manually
heartbeatOff()
pinMode(LED, OUTPUT)
# setup EINT0 on pin 2.10
PinCfg = PINSEL_CFG_Type()
PinCfg.Funcnum = 1
PinCfg.OpenDrain = 0
PinCfg.Pinmode = 0
PinCfg.Pinnum = 10
PinCfg.Portnum = 2
PINSEL_ConfigPin(PinCfg.ptr)
EXTI_Init()
# register the callback
registerCallback(IRQn_Type.EINT0_IRQn, EINT0Callback)
# enable EINT0
NVIC_EnableIRQ(IRQn_Type.EINT0_IRQn)
# the callback does everything from here
while True:
sleep(1)
| robovero/python | callback.py | Python | bsd-2-clause | 1,488 |
# -*- coding: utf-8 -*-
import gitmesh
import six
def test_version_string():
assert isinstance(gitmesh.version, six.text_type)
| smartmob-project/gitmesh | tests/test_version.py | Python | mit | 135 |
# -*- coding: utf-8 -*-
# Copyright 2012 Guewen Baconnier (Camptocamp SA)
# Copyright 2016 Antonio Espinosa <antonio.espinosa@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Web Translate Dialog",
"summary": "Easy-to-use pop-up to translate fields in several languages",
"version": "9.0.1.0.0",
"category": "Web",
"website": "https://odoo-community.org/",
"author": "Camptocamp, "
"Tecnativa, "
"Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"depends": [
"web",
],
"data": [
"view/web_translate.xml",
],
"qweb": [
"static/src/xml/base.xml",
]
}
| be-cloud-be/horizon-addons | web/web_translate_dialog/__openerp__.py | Python | agpl-3.0 | 752 |
# -*- coding: utf-8 -*-
import openerp
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.tools.translate import _
from openerp.addons.web.http import request
from openerp.addons.website_partner.controllers import main as website_partner
import werkzeug.urls
class WebsiteCrmPartnerAssign(http.Controller):
_references_per_page = 20
@http.route([
'/partners/',
'/partners/page/<int:page>/',
'/partners/country/<int:country_id>',
'/partners/country/<country_name>-<int:country_id>',
'/partners/country/<int:country_id>/page/<int:page>/',
'/partners/country/<country_name>-<int:country_id>/page/<int:page>/',
], type='http', auth="public", website=True, multilang=True)
def partners(self, country_id=0, page=0, **post):
country_obj = request.registry['res.country']
partner_obj = request.registry['res.partner']
post_name = post.get('search', '')
grade_id = post.get('grade', '')
country = None
# format displayed membership lines domain
base_partner_domain = [('is_company', '=', True), ('grade_id', '!=', False), ('website_published', '=', True)]
partner_domain = list(base_partner_domain)
if post_name:
partner_domain += ['|', ('name', 'ilike', post_name), ('website_description', 'ilike', post_name)]
if grade_id and grade_id != "all":
partner_domain += [('grade_id', '=', int(grade_id))] # try/catch int
# group by country
countries = partner_obj.read_group(
request.cr, openerp.SUPERUSER_ID, partner_domain, ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
countries_partners = partner_obj.search(
request.cr, openerp.SUPERUSER_ID, partner_domain,
context=request.context, count=True)
if country_id:
country = country_obj.browse(request.cr, request.uid, country_id, request.context)
partner_domain += [('country_id', '=', country_id)]
if not any(x['country_id'][0] == country_id for x in countries):
countries.append({
'country_id_count': 0,
'country_id': (country_id, country.name)
})
countries.sort(key=lambda d: d['country_id'][1])
countries.insert(0, {
'country_id_count': countries_partners,
'country_id': (0, _("All Countries"))
})
# format pager
partner_ids = partner_obj.search(
request.cr, openerp.SUPERUSER_ID, partner_domain,
context=request.context)
pager = request.website.pager(url="/partners/", total=len(partner_ids), page=page, step=self._references_per_page, scope=7, url_args=post)
# search for partners to display
partners_data = partner_obj.search_read(request.cr, openerp.SUPERUSER_ID,
domain=partner_domain,
fields=request.website.get_partner_white_list_fields(),
offset=pager['offset'],
limit=self._references_per_page,
order="grade_id DESC,partner_weight DESC",
context=request.context)
google_map_partner_ids = ",".join([str(p['id']) for p in partners_data])
# group by grade
grades = partner_obj.read_group(
request.cr, openerp.SUPERUSER_ID, base_partner_domain, ["id", "grade_id"],
groupby="grade_id", orderby="grade_id", context=request.context)
grades_partners = partner_obj.search(
request.cr, openerp.SUPERUSER_ID, base_partner_domain,
context=request.context, count=True)
grades.insert(0, {
'grade_id_count': grades_partners,
'grade_id': ("all", _("All Levels"))
})
values = {
'countries': countries,
'current_country_id': country_id,
'current_country': country,
'grades': grades,
'grade_id': grade_id,
'partners_data': partners_data,
'google_map_partner_ids': google_map_partner_ids,
'pager': pager,
'searches': post,
'search_path': "?%s" % werkzeug.url_encode(post),
}
return request.website.render("website_crm_partner_assign.index", values)
@http.route(['/partners/<int:partner_id>/', '/partners/<partner_name>-<int:partner_id>/'], type='http', auth="public", website=True, multilang=True)
def partners_ref(self, partner_id, **post):
partner = request.registry['res.partner'].browse(request.cr, SUPERUSER_ID, partner_id, context=request.context)
values = website_partner.get_partner_template_value(partner)
if not values:
return self.partners(**post)
values['main_object'] = values['partner']
return request.website.render("website_crm_partner_assign.partner", values)
| trabacus-softapps/openerp-8.0-cc | openerp/addons/website_crm_partner_assign/controllers/main.py | Python | agpl-3.0 | 4,993 |
#!/usr/bin/env python
import rospy
from slaw_smach.ArmStates import *
from slaw_smach.MoveStates import *
from slaw_smach.ObjectDetectState import *
def btt():
rospy.init_node('btt_smach_test')
sm = smach.StateMachine(outcomes=['end'])
#sm.userdata.pose = "D2"
locations = rospy.get_param('locations')
sm.userdata.pose = locations[0]['name']
sm.userdata.suffix = "_grip"
#pre-grsm.userdata.pose = "test"
with sm:
# smach.StateMachine.add('Recover', RecoverState(), transitions = {'done':'MoveStateSmart'}, remapping = {'pose_in':'pose', 'pose_out': 'pose'})
#smach.StateMachine.add('MoveBack', MoveBack(), transitions = {'done':'end'})
smach.StateMachine.add('MoveToStart', MoveStateUserData(),
transitions = {'reached' : 'ScanMatcher',
'not_reached': 'RecoverToStart',
'failed' : 'MoveToStart'},
remapping = {'pose_in' :'pose',
'pose_out':'pose'})
smach.StateMachine.add('RecoverToStart', RecoverState(),
transitions = {'done':'MoveToStart'},
remapping = {'pose_in' :'pose',
'pose_out': 'pose'})
smach.StateMachine.add('ScanMatcher', ScanMatcher(),
transitions = {'reached' : 'PreGrip',
'not_reached': 'ScanMatcher',
'failed' : 'MoveToStart'},
remapping = {'pose_in' : 'pose',
'suffix_in': 'suffix',
'pose_out' : 'pose'})
smach.StateMachine.add('PreGrip', PreGrip(),
transitions = {'success': 'Scan',
'failed' : 'TuckArmPreGrip'},
remapping = {'pose_in' :'pose',
'pose_out':'pose'})
smach.StateMachine.add('TuckArmPreGrip', TuckArm(),
transitions = {'success' : 'PreGrip',
'not_reached': 'TuckArmPreGrip',
'failed' : 'end'})
smach.StateMachine.add("Scan", ScanForObjectsState(),
transitions = {'success' : 'Grip',
'failed' : 'TuckArmMoveStart',
'nothing_found': 'TuckArmDelete'},
remapping = {'pose_in' : 'pose',
'pose_out' : 'pose',
'object_out': 'object',
'point_out' : 'point'})
smach.StateMachine.add('TuckArmMoveStart', TuckArm(),
transitions = {'success' : 'MoveToStart',
'not_reached': 'TuckArmMoveStart',
'failed' : 'end'})
smach.StateMachine.add('TuckArmDelete', TuckArm(),
transitions = {'success' : 'DeleteState',
'not_reached': 'TuckArmDelete',
'failed' : 'end'})
#DeleteNode
smach.StateMachine.add("DeleteState", DelReachedGoalStateBTT(),
transitions = {'not_end' : 'MoveToStart',
'move_out': 'MoveToEnd'},
remapping = {'pose_in' : 'pose',
'pose_out': 'pose'})
smach.StateMachine.add("Grip", Grip(),
transitions = {'success' : 'MoveAwayFromPlatform',
'too_far' : 'ScanMatcher',
'failed' : 'TuckArmFailGrip',
'failed_after_grip': 'TuckArmGrip'},
remapping = {'pose_in' : 'pose',
'object_in' : 'object',
'point_in' : 'point',
'pose_out' : 'pose',
'object_out': 'object',
'point_out' : 'point'})
smach.StateMachine.add('TuckArmGrip', TuckArm(),
transitions = {'success' : 'MoveAwayFromPlatform',
'not_reached': 'TuckArmGrip',
'failed' : 'end'})
smach.StateMachine.add('TuckArmFailGrip', TuckArm(),
transitions = {'success' :'MoveToStart',
'not_reached':'TuckArmFailGrip',
'failed' :'end'})
smach.StateMachine.add('MoveAwayFromPlatform', RecoverState(),
transitions = {'done':'MovePrePlace'},
remapping = {'pose_in':'pose',
'pose_out': 'pose'})
smach.StateMachine.add('MovePrePlace', MoveStateUserData(),
transitions = {'reached' : 'ScanMatcherPrePlace',
'not_reached': 'MoveAwayFromPlatform',
'failed' : 'MoveAwayFromPlatform'},
remapping = {'pose_in' : 'pose',
'pose_out': 'pose'})
smach.StateMachine.add('ScanMatcherPrePlace', ScanMatcher(),
transitions = {'reached' : 'Place',
'not_reached': 'ScanMatcherPrePlace',
'failed' : 'Place'},
remapping = {'pose_in' : 'pose',
'suffix_in': 'suffix',
'pose_out' : 'pose'})
#smach.StateMachine.add('MoveBack20', MoveBack(),
# transitions = {'done':'Place'},
# remapping = {'pose_in':'pose',
# 'pose_out':'pose'})
smach.StateMachine.add('Place', Place(),
transitions = {'success': 'RecoverToStart',
'failed' : 'TuckArmFailPlace'},
remapping = {'pose_in' : 'pose',
'pose_out': 'pose'})
smach.StateMachine.add('TuckArmFailPlace', TuckArm(),
transitions = {'success' : 'Place',
'not_reached': 'TuckArmFailPlace',
'failed' : 'end'})
smach.StateMachine.add('RecoverToEnd', RecoverState(),
transitions = {'done':'MoveToEnd'},
remapping = {'pose_in' : 'pose',
'pose_out': 'pose'})
smach.StateMachine.add('MoveToEnd', MoveStateUserData(),
transitions = {'reached' : 'end',
'not_reached': 'RecoverToEnd',
'failed' : 'end'},
remapping = {'pose_in' : 'pose',
'pose_out': 'pose'})
# Create and start the introspection server
sis = smach_ros.IntrospectionServer('server_name', sm, '/SM_TEST')
sis.start()
# Execute SMACH plan
outcome = sm.execute()
#print sm.userdata.object
#print sm.userdata.point
rospy.spin()
sis.stop()
if __name__ == '__main__':
btt()
#main()
| smARTLab-liv/smartlabatwork-release | slaw_btt/scripts/btt.py | Python | mit | 7,123 |
# -*- coding: utf-8 -*-
#
# dipy documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 4 15:23:20 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Doc generation depends on being able to import dipy
try:
import dipy
except ImportError:
raise RuntimeError('Cannot import dipy, please investigate')
from distutils.version import LooseVersion
import sphinx
if LooseVersion(sphinx.__version__) < LooseVersion('1'):
raise RuntimeError('Need sphinx >= 1 for numpydoc to work correctly')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# -- General configuration -----------------------------------------------------
# We load the nibabel release info into a dict by explicit execution
rel = {}
execfile(os.path.join('..', 'dipy', 'info.py'), rel)
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.autosummary',
'math_dollar', # has to go before numpydoc
'numpydoc',
'github']
# ghissue config
github_project_url = "https://github.com/nipy/dipy"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dipy'
copyright = u'2008-2013, %(AUTHOR)s <%(AUTHOR_EMAIL)s>' % rel
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = rel['__version__']
# The full version, including alpha/beta/rc tags.
release = version
# Include common links
# We don't use this any more because it causes conflicts with the gitwash docs
#rst_epilog = open('links_names.inc', 'rt').read()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build', 'examples']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'dipy.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
# Setting to false fixes double module listing under header
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'dipydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'dipy.tex', u'dipy Documentation',
u'Eleftherios Garyfallidis, Ian Nimmo-Smith, Matthew Brett', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = """
\usepackage{amsfonts}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| maurozucchelli/dipy | doc/conf.py | Python | bsd-3-clause | 7,939 |
import re
def parse_point_script_output(script_output):
"""
Parse the output from Praat into a dictionary of acoustic measurements.
See docstring of analyze_script for formatting requirements.
Prints the Praat script output if it doesn't fit the specified format (usually because the Praat script crashed),
and returns None in that case
Parameters
----------
script_output : str
output from Praat. (This is what appears in the Info window when using the Praat GUI)
Returns
----------
dict
dictionary of measurement : value, based on the columns output by the Praat script
"""
headers = []
output = {}
for line in script_output.split('\n'):
if line.strip() is not "" and line.strip() is not "." and "Warning" not in line and "warning" not in line:
values = line.strip().split()
if not headers:
headers = values
else:
for (measurement, value) in zip(headers, values):
if value == '--undefined--':
value = None
elif value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
else:
try:
value = float(value)
except ValueError:
value = None
output[measurement] = value
return output
def parse_track_script_output(text):
if not text:
return None
lines = text.splitlines()
head = None
while head is None:
try:
l = lines.pop(0)
except IndexError:
print(text)
raise
if l.startswith('time'):
head = re.sub('[(]\w+[)]', '', l)
head = head.split("\t")[1:]
output = {}
for l in lines:
if '\t' in l:
line = l.split("\t")
time = line.pop(0)
values = {}
for j in range(len(line)):
v = line[j]
if v != '--undefined--':
try:
v = float(v)
except ValueError:
print(text)
print(head)
else:
v = None
values[head[j]] = v
if values:
output[float(time)] = values
return output
| mmcauliffe/python-praat-scripts | pyraat/parse_outputs.py | Python | gpl-3.0 | 2,514 |
"""Support for ESPHome climate devices."""
import logging
from typing import List, Optional
from aioesphomeapi import (
ClimateAction,
ClimateFanMode,
ClimateInfo,
ClimateMode,
ClimateState,
ClimateSwingMode,
)
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_OFF,
CURRENT_HVAC_IDLE,
FAN_AUTO,
FAN_DIFFUSE,
FAN_FOCUS,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
FAN_MIDDLE,
FAN_OFF,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_HOME,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
SWING_BOTH,
SWING_HORIZONTAL,
SWING_OFF,
SWING_VERTICAL,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
TEMP_CELSIUS,
)
from . import (
EsphomeEntity,
esphome_map_enum,
esphome_state_property,
platform_async_setup_entry,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up ESPHome climate devices based on a config entry."""
await platform_async_setup_entry(
hass,
entry,
async_add_entities,
component_key="climate",
info_type=ClimateInfo,
entity_type=EsphomeClimateDevice,
state_type=ClimateState,
)
@esphome_map_enum
def _climate_modes():
return {
ClimateMode.OFF: HVAC_MODE_OFF,
ClimateMode.AUTO: HVAC_MODE_HEAT_COOL,
ClimateMode.COOL: HVAC_MODE_COOL,
ClimateMode.HEAT: HVAC_MODE_HEAT,
ClimateMode.FAN_ONLY: HVAC_MODE_FAN_ONLY,
ClimateMode.DRY: HVAC_MODE_DRY,
}
@esphome_map_enum
def _climate_actions():
return {
ClimateAction.OFF: CURRENT_HVAC_OFF,
ClimateAction.COOLING: CURRENT_HVAC_COOL,
ClimateAction.HEATING: CURRENT_HVAC_HEAT,
ClimateAction.IDLE: CURRENT_HVAC_IDLE,
ClimateAction.DRYING: CURRENT_HVAC_DRY,
ClimateAction.FAN: CURRENT_HVAC_FAN,
}
@esphome_map_enum
def _fan_modes():
return {
ClimateFanMode.ON: FAN_ON,
ClimateFanMode.OFF: FAN_OFF,
ClimateFanMode.AUTO: FAN_AUTO,
ClimateFanMode.LOW: FAN_LOW,
ClimateFanMode.MEDIUM: FAN_MEDIUM,
ClimateFanMode.HIGH: FAN_HIGH,
ClimateFanMode.MIDDLE: FAN_MIDDLE,
ClimateFanMode.FOCUS: FAN_FOCUS,
ClimateFanMode.DIFFUSE: FAN_DIFFUSE,
}
@esphome_map_enum
def _swing_modes():
return {
ClimateSwingMode.OFF: SWING_OFF,
ClimateSwingMode.BOTH: SWING_BOTH,
ClimateSwingMode.VERTICAL: SWING_VERTICAL,
ClimateSwingMode.HORIZONTAL: SWING_HORIZONTAL,
}
class EsphomeClimateDevice(EsphomeEntity, ClimateDevice):
"""A climate implementation for ESPHome."""
@property
def _static_info(self) -> ClimateInfo:
return super()._static_info
@property
def _state(self) -> Optional[ClimateState]:
return super()._state
@property
def precision(self) -> float:
"""Return the precision of the climate device."""
precicions = [PRECISION_WHOLE, PRECISION_HALVES, PRECISION_TENTHS]
for prec in precicions:
if self._static_info.visual_temperature_step >= prec:
return prec
# Fall back to highest precision, tenths
return PRECISION_TENTHS
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available operation modes."""
return [
_climate_modes.from_esphome(mode)
for mode in self._static_info.supported_modes
]
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return [
_fan_modes.from_esphome(mode)
for mode in self._static_info.supported_fan_modes
]
@property
def preset_modes(self):
"""Return preset modes."""
return [PRESET_AWAY, PRESET_HOME] if self._static_info.supports_away else []
@property
def swing_modes(self):
"""Return the list of available swing modes."""
return [
_swing_modes.from_esphome(mode)
for mode in self._static_info.supported_swing_modes
]
@property
def target_temperature_step(self) -> float:
"""Return the supported step of target temperature."""
# Round to one digit because of floating point math
return round(self._static_info.visual_temperature_step, 1)
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return self._static_info.visual_min_temperature
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return self._static_info.visual_max_temperature
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
features = 0
if self._static_info.supports_two_point_target_temperature:
features |= SUPPORT_TARGET_TEMPERATURE_RANGE
else:
features |= SUPPORT_TARGET_TEMPERATURE
if self._static_info.supports_away:
features |= SUPPORT_PRESET_MODE
if self._static_info.supported_fan_modes:
features |= SUPPORT_FAN_MODE
if self._static_info.supported_swing_modes:
features |= SUPPORT_SWING_MODE
return features
# https://github.com/PyCQA/pylint/issues/3150 for all @esphome_state_property
# pylint: disable=invalid-overridden-method
@esphome_state_property
def hvac_mode(self) -> Optional[str]:
"""Return current operation ie. heat, cool, idle."""
return _climate_modes.from_esphome(self._state.mode)
@esphome_state_property
def hvac_action(self) -> Optional[str]:
"""Return current action."""
# HA has no support feature field for hvac_action
if not self._static_info.supports_action:
return None
return _climate_actions.from_esphome(self._state.action)
@esphome_state_property
def fan_mode(self):
"""Return current fan setting."""
return _fan_modes.from_esphome(self._state.fan_mode)
@esphome_state_property
def preset_mode(self):
"""Return current preset mode."""
return PRESET_AWAY if self._state.away else PRESET_HOME
@esphome_state_property
def swing_mode(self):
"""Return current swing mode."""
return _swing_modes.from_esphome(self._state.swing_mode)
@esphome_state_property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return self._state.current_temperature
@esphome_state_property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
return self._state.target_temperature
@esphome_state_property
def target_temperature_low(self) -> Optional[float]:
"""Return the lowbound target temperature we try to reach."""
return self._state.target_temperature_low
@esphome_state_property
def target_temperature_high(self) -> Optional[float]:
"""Return the highbound target temperature we try to reach."""
return self._state.target_temperature_high
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature (and operation mode if set)."""
data = {"key": self._static_info.key}
if ATTR_HVAC_MODE in kwargs:
data["mode"] = _climate_modes.from_hass(kwargs[ATTR_HVAC_MODE])
if ATTR_TEMPERATURE in kwargs:
data["target_temperature"] = kwargs[ATTR_TEMPERATURE]
if ATTR_TARGET_TEMP_LOW in kwargs:
data["target_temperature_low"] = kwargs[ATTR_TARGET_TEMP_LOW]
if ATTR_TARGET_TEMP_HIGH in kwargs:
data["target_temperature_high"] = kwargs[ATTR_TARGET_TEMP_HIGH]
await self._client.climate_command(**data)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target operation mode."""
await self._client.climate_command(
key=self._static_info.key, mode=_climate_modes.from_hass(hvac_mode)
)
async def async_set_preset_mode(self, preset_mode):
"""Set preset mode."""
away = preset_mode == PRESET_AWAY
await self._client.climate_command(key=self._static_info.key, away=away)
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set new fan mode."""
await self._client.climate_command(
key=self._static_info.key, fan_mode=_fan_modes.from_hass(fan_mode)
)
async def async_set_swing_mode(self, swing_mode: str) -> None:
"""Set new swing mode."""
await self._client.climate_command(
key=self._static_info.key, swing_mode=_swing_modes.from_hass(swing_mode)
)
| qedi-r/home-assistant | homeassistant/components/esphome/climate.py | Python | apache-2.0 | 9,432 |
#!/usr/bin/env python2.7
# coding: utf-8
## Use of swig and numpy to compute the evolution of internal energy in a vNR scheme
from IPython import get_ipython
import os
##### We will use numpy extensively
import numpy as np
##### Import the module created by swig as a classical python module
import vnrinternalenergy as vnr_ext
##### Import classical numpy modules for performance comparison purpose
from xvof.solver.functionstosolve.vnrenergyevolutionforveformulation import VnrEnergyEvolutionForVolumeEnergyFormulation
from xvof.solver.newtonraphson import NewtonRaphson
from xvof.equationsofstate.miegruneisen import MieGruneisen
### Creation of the data
pb_size = 1000
old_density = np.ndarray(pb_size, dtype=np.float64, order='c')
new_density = np.ndarray(pb_size, dtype=np.float64, order='c')
pressure= np.ndarray(pb_size, dtype=np.float64, order='c')
internal_energy = np.ndarray(pb_size, dtype=np.float64, order='c')
new_internal_energy = np.ndarray(pb_size, dtype=np.float64, order='c')
new_pressure = np.ndarray(pb_size, dtype=np.float64, order='c')
new_soundspeed = np.ndarray(pb_size, dtype=np.float64, order='c')
mask = np.ndarray(pb_size, dtype=np.bool, order='c')
##### Initialization of the data
old_density[:] = 7500.
new_density[:] = 9500.
pressure[:] = 1e+09
internal_energy[:] = 1e+06
new_internal_energy[:] = 0.
new_pressure[:] = 0.
new_soundspeed[:] = 0.
mask[:] = True
### Test of performance of swig made module
tmp_ie = np.ndarray(pb_size, dtype=np.float64, order='c')
tmp_p = np.ndarray(pb_size, dtype=np.float64, order='c')
tmp_ss = np.ndarray(pb_size, dtype=np.float64, order='c')
print "Swig performance :"
get_ipython().magic(u'''timeit vnr_ext.launch_vnr_resolution(old_density[mask], new_density[mask], pressure[mask], internal_energy[mask], tmp_ie, tmp_p, tmp_ss)''')
new_internal_energy[mask] = tmp_ie
new_pressure[mask] = tmp_p
new_soundspeed[mask] = tmp_ss
c_pressure = np.copy(new_pressure)
c_internal_energy = np.copy(new_internal_energy)
c_soundspeed = np.copy(new_soundspeed)
### Test of performance of classical numpy module
new_internal_energy[:] = 0.
new_pressure[:] = 0.
new_soundspeed[:] = 0.
function_to_vanish = VnrEnergyEvolutionForVolumeEnergyFormulation()
solver = NewtonRaphson(function_to_vanish)
my_variables = {'EquationOfState': MieGruneisen(),
'OldDensity': old_density,
'NewDensity': new_density,
'Pressure': pressure,
'OldEnergy': internal_energy}
function_to_vanish.setVariables(my_variables)
print "Classical numpy module performance :"
get_ipython().magic(u'timeit ref_internal_energy = solver.computeSolution(internal_energy)')
ref_internal_energy = solver.computeSolution(internal_energy[mask])
ref_pressure = np.ndarray(pb_size, dtype=np.float64, order='c')
ref_soundspeed = np.ndarray(pb_size, dtype=np.float64, order='c')
dummy = np.ndarray(pb_size, dtype=np.float64, order='c')
tmp_p[:] = 0.
tmp_ss[:] = 0.
my_variables['EquationOfState'].solveVolumeEnergy(1. / new_density[mask], ref_internal_energy[mask], tmp_p, tmp_ss, dummy[mask])
ref_pressure[mask] = tmp_p
ref_soundspeed[mask] = tmp_ss
print "Pressure difference between reference and swig computations : %15.9g"%(max(abs(ref_pressure - c_pressure)))
print "Internal energy difference between reference and swig computations : %15.9g"%(max(abs(ref_internal_energy - c_internal_energy)))
print "Sound speed difference between reference and swig computations : %15.9g"%(max(abs(ref_soundspeed - c_soundspeed)))
| hippo91/nonlinear_solver | doc/swig_vs_numpy_comparison.py | Python | gpl-2.0 | 3,539 |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XManager."""
| deepmind/xmanager | __init__.py | Python | apache-2.0 | 610 |
# -*- coding: utf-8 -*-
# File: __init__.py
from tensorpack.libinfo import __version__, _HAS_TF
from tensorpack.utils import *
from tensorpack.dataflow import *
# dataflow can be used alone without installing tensorflow
# TODO maybe separate dataflow to a new project if it's good enough
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = _HAS_TF
if STATICA_HACK:
from tensorpack.models import *
from tensorpack.callbacks import *
from tensorpack.tfutils import *
from tensorpack.train import *
from tensorpack.graph_builder import InputDesc, ModelDesc, ModelDescBase
from tensorpack.input_source import *
from tensorpack.predict import *
| eyaler/tensorpack | tensorpack/__init__.py | Python | apache-2.0 | 791 |
# Copyright (C) 2015-2018 East Asian Observatory
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful,but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301, USA
from __future__ import absolute_import, division, print_function, \
unicode_literals
from codecs import ascii_decode
from datetime import datetime
import re
from time import sleep
from xml.etree import ElementTree as etree
import requests
from ..compat import python_version
from ..type.simple import PrevProposalPub
from ..type.util import null_tuple
from ..util import get_logger, list_in_blocks
logger = get_logger(__name__)
if python_version < 3:
# Python 2: ElementTree can return text as either ASCII byte strings or
# as unicode.
def element_text(element):
text = element.text
if isinstance(text, unicode):
return text
return ascii_decode(text, 'replace')[0]
else:
# ElementTree should always return unicode (i.e. str).
def element_text(element):
return element.text
fixed_responses = {}
query_block_size = 10
query_delay = 3
def get_pub_info_arxiv(article_ids):
"""
Get information on a list of article IDs.
:return: a dictionary by article ID
"""
url = 'http://export.arxiv.org/api/query'
xmlns = {'atom': 'http://www.w3.org/2005/Atom'}
ans = {}
n_query = 0
for query in list_in_blocks(article_ids, query_block_size):
if n_query > 0:
sleep(query_delay)
n_query += 1
query_str = ','.join(query)
feed = None
try:
response = fixed_responses.get(query_str)
if response is None:
r = requests.get(url, params={
'max_results': 10,
'id_list': query_str,
}, timeout=30)
r.raise_for_status()
response = r.content
feed = etree.fromstring(response)
except requests.exceptions.RequestException:
logger.exception('Failed to retrive feed from arXiv')
except:
logger.exception('Failed to parse arXiv feed')
if feed is not None:
for entry in feed.findall('atom:entry', xmlns):
try:
# Read elements from the feed entry.
id_element = entry.find('atom:id', xmlns)
if id_element is None:
continue
id_ = element_text(id_element)
title = element_text(entry.find('atom:title', xmlns))
authors = entry.findall('atom:author', xmlns)
author = element_text(authors[0].find('atom:name', xmlns))
if len(authors) > 1:
author += ' et al.'
published = element_text(
entry.find('atom:published', xmlns))
if published.endswith('Z'):
published = published[:-1]
year = datetime.strptime(
published, '%Y-%m-%dT%H:%M:%S').year
# Create publication info tuple.
pub = null_tuple(PrevProposalPub)._replace(
title=re.sub(r'\s+', ' ', title).strip(),
author=re.sub(r'\s+', ' ', author).strip(),
year='{}'.format(year))
# Try to determine to which article ID this entry
# relates and store it in the dictionary.
for query_id in query:
if query_id in id_:
ans[query_id] = pub
break
else:
logger.warning('Got unexpected ID {} from arXiv', id_)
except:
logger.exception('Failed to parse arXiv feed entry')
return ans
| eaobservatory/hedwig | lib/hedwig/publication/arxiv.py | Python | gpl-3.0 | 4,543 |
class LineIntersection(object):
""" Line Intersection Class. initiates with lines
[(x1,y1), (x2, y2)] to be converted to labeled
endpoints [(x,y), label]. Main test for gen.
line intersection uses sweep-line algorithm.
"""
def __init__(self, data):
self.count = 0
self.lines = {}
self.sweeps = []
self.tested = []
self.addLines(data)
################
#Data Properties
################
def addLine(self, line):
self.sweeps.append([line[0], str(self.count)])
self.sweeps.append([line[1], str(self.count)])
self.lines[str(self.count)] = line
self.count += 1
def addLines(self, array):
for line in array:
self.addLine(line)
def sortSweeps(self):
self.sweeps.sort(key = lambda x: (x[0][0], not x[1]))
###################
#Line Intersections
###################
def intersectTwo(self, lineA, lineB):
""" It suffices to check if two points are on
opposite sides of a line segment. To do
this we compute the cross products of
lineA and the endpoints of lineB and take
their product. The product will be negative
if and only if they intersect.
"""
P, Q = lineB[0], lineB[1]
xproductP = (1.0*(lineA[1][0] - lineA[0][0])*(P[1] + lineA[1][1]) -
1.0*(lineA[0][1] - lineA[1][1])*(P[0] - lineA[1][0]))
xproductQ = (1.0*(lineA[1][0] - lineA[0][0])*(Q[1] + lineA[1][1]) -
1.0*(lineA[0][1] - lineA[1][1])*(Q[0] - lineA[1][0]))
return true if xproductP * xproductQ < 0 else false
def checkIntersection(self):
if not self.sweeps:
return False
self.sortSweeps()
self.tested = []
events = []
i = 0
while i < len(self.sweeps):
newvalue = self.sweeps[i][1]
if not events:
events.append(newvalue)
elif newvalue in events:
events.remove(newvalue)
else:
for event in events:
if (event, newvalue) in self.tested:
continue
test = self.intersectTwo(self.lines[event],
self.lines[newvalue])
if test:
return True
else:
self.tested.append((event, newvalue))
i += 1
return False
##############
#Miscellaneous
##############
def __repr__(self):
return 'Line Intersection Class for lines %s' % self.lines
| n17r4m/mmm.coffee | old-lib/geometry/xxx-LineIntersection.py | Python | lgpl-3.0 | 2,196 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dci.common import exceptions as dci_exc
import datetime
import uuid
class Mixin(object):
def serialize(self, ignore_columns=[]):
def _get_nested_columns():
_res = {}
for ic in ignore_columns:
if "." in ic:
k, v = ic.split(".")
if k not in _res:
_res[k] = [v]
else:
_res[k].append(v)
return _res
nested_ignore_columns = []
if ignore_columns:
nested_ignore_columns = _get_nested_columns()
_dict = {}
_attrs = self.__dict__.keys()
for attr in _attrs:
if attr in ignore_columns:
continue
attr_obj = getattr(self, attr)
if isinstance(attr_obj, list):
_dict[attr] = []
for ao in attr_obj:
_ignore_columns = []
if attr in nested_ignore_columns:
_ignore_columns = nested_ignore_columns[attr]
if isinstance(ao, Mixin):
_dict[attr].append(ao.serialize(ignore_columns=_ignore_columns))
else:
_dict[attr].append(ao)
elif isinstance(attr_obj, Mixin):
_ignore_columns = []
if attr in nested_ignore_columns:
_ignore_columns = nested_ignore_columns[attr]
_dict[attr] = attr_obj.serialize(ignore_columns=_ignore_columns)
elif isinstance(attr_obj, uuid.UUID):
_dict[attr] = str(attr_obj)
elif isinstance(attr_obj, datetime.datetime):
_dict[attr] = attr_obj.isoformat()
elif not attr.startswith("_"):
_dict[attr] = self.__dict__[attr]
return _dict
def handle_pagination(query, args):
limit_max = 200
default_limit = 20
default_offset = 0
query = query.offset(args.get("offset", default_offset))
query = query.limit(min(args.get("limit", default_limit), limit_max))
return query
def handle_args(query, model_object, args):
if args.get("sort"):
columns = model_object.__mapper__.columns.keys()
for s in args.get("sort"):
asc = True
if s.startswith("-"):
s = s[1:]
asc = False
if s not in columns:
raise dci_exc.DCIException(
'Invalid sort key: "%s"' % s,
payload={"Valid sort keys": sorted(set(columns))},
)
if asc:
query = query.order_by(getattr(model_object, s).asc())
else:
query = query.order_by(getattr(model_object, s).desc())
else:
query = query.order_by(getattr(model_object, "created_at").desc())
if args.get("where"):
columns = model_object.__mapper__.columns.keys()
for w in args.get("where"):
try:
name, value = w.split(":", 1)
if name not in columns:
raise dci_exc.DCIException(
'Invalid where key: "%s"' % w,
payload={"Valid where keys": sorted(set(columns))},
)
except ValueError:
payload = {
"error": 'where key must have the following form "key:value"'
}
raise dci_exc.DCIException(
'Invalid where key: "%s"' % w, payload=payload
)
m_column = getattr(model_object, name)
if str(m_column.type) == "UUID" and uuid.UUID(value):
query = query.filter(m_column == value)
elif m_column.type.python_type == list:
query = query.filter(m_column.contains([value]))
elif value.endswith("*") and value.count("*") == 1:
query = query.filter(m_column.contains(value.replace("*", "")))
else:
query = query.filter(m_column == value)
if args.get("created_after"):
query = query.filter(
getattr(model_object, "created_at") >= args.get("created_after")
)
if args.get("updated_after"):
query = query.filter(
getattr(model_object, "updated_at") >= args.get("updated_after")
)
return query
| redhat-cip/dci-control-server | dci/db/declarative.py | Python | apache-2.0 | 5,019 |
#/!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Iris Classification Sample Cloud Runner.
"""
import argparse
import datetime
import os
import subprocess
import uuid
import apache_beam as beam
import tensorflow as tf
import trainer.model as iris
import google.cloud.ml as ml
import google.cloud.ml.dataflow.io.tfrecordio as tfrecordio
import google.cloud.ml.io as io
# Model variables
MODEL_NAME = 'iris'
TRAINER_NAME = 'trainer-1.0.tar.gz'
def _default_project():
get_project = ['gcloud', 'config', 'list', 'project',
'--format=value(core.project)']
with open(os.devnull, 'w') as dev_null:
return subprocess.check_output(get_project, stderr=dev_null).strip()
parser = argparse.ArgumentParser(
description='Runs Training on the Iris model data.')
parser.add_argument('--project_id',
help='The project to which the job will be submitted.')
parser.add_argument('--cloud', action='store_true',
help='Run preprocessing on the cloud.')
parser.add_argument('--metadata_path',
help='The path to the metadata file from preprocessing.')
parser.add_argument('--training_data',
default='gs://cloud-ml-data/iris/data_train.csv',
help='Data to analyze and encode as training features.')
parser.add_argument('--eval_data',
default='gs://cloud-ml-data/iris/data_eval.csv',
help='Data to encode as evaluation features.')
parser.add_argument('--predict_data',
default='gs://cloud-ml-data/iris/data_predict.csv',
help='Data to encode as prediction features.')
parser.add_argument('--output_dir', default=None,
help=('Google Cloud Storage or Local directory in which '
'to place outputs.'))
parser.add_argument('--deploy_model_name', default='iris',
help=('If --cloud is used, the model is deployed with this '
'name. The default is iris.'))
parser.add_argument('--deploy_model_version',
default='v' + uuid.uuid4().hex[:4],
help=('If --cloud is used, the model is deployed with this '
'version. The default is four random characters.'))
args, passthrough_args = parser.parse_known_args()
if not args.project_id:
args.project_id = _default_project()
if not args.output_dir:
if args.cloud:
args.output_dir = os.path.join('gs://' + args.project_id + '-ml',
MODEL_NAME)
else:
path = 'output'
if not os.path.isdir(path):
os.makedirs(path)
args.output_dir = path
TRAINER_URI = os.path.join(args.output_dir, TRAINER_NAME)
MODULE_NAME = 'trainer.task'
EXPORT_SUBDIRECTORY = 'model'
def preprocess(pipeline):
feature_set = iris.IrisFeatures()
training_data = beam.io.TextFileSource(
args.training_data, strip_trailing_newlines=True,
coder=io.CsvCoder.from_feature_set(feature_set, feature_set.csv_columns))
eval_data = beam.io.TextFileSource(
args.eval_data, strip_trailing_newlines=True,
coder=io.CsvCoder.from_feature_set(feature_set, feature_set.csv_columns))
predict_data = beam.io.TextFileSource(
args.predict_data, strip_trailing_newlines=True,
coder=io.CsvCoder.from_feature_set(feature_set, feature_set.csv_columns,
has_target_columns=False))
train = pipeline | beam.Read('ReadTrainingData', training_data)
evaluate = pipeline | beam.Read('ReadEvalData', eval_data)
predict = pipeline | beam.Read('ReadPredictData', predict_data)
(metadata, train_features, eval_features, predict_features) = (
(train, evaluate, predict)
| 'Preprocess'
>> ml.Preprocess(feature_set, input_format='csv',
format_metadata={'headers': feature_set.csv_columns}))
# Writes metadata.yaml (text file), features_train, features_eval, and
# features_eval (TFRecord files)
(metadata | 'SaveMetadata'
>> io.SaveMetadata(os.path.join(args.output_dir, 'metadata.yaml')))
# We turn off sharding of the feature files because the dataset is very small.
(train_features | 'SaveTrain'
>> io.SaveFeatures(
os.path.join(args.output_dir, 'features_train')))
(eval_features | 'SaveEval'
>> io.SaveFeatures(
os.path.join(args.output_dir, 'features_eval')))
(predict_features | 'SavePredict'
>> io.SaveFeatures(
os.path.join(args.output_dir, 'features_predict')))
return metadata, train_features, eval_features, predict_features
def get_train_parameters(metadata):
job_args = []
return {
'package_uris': [TRAINER_URI],
'python_module': MODULE_NAME,
'export_subdir': EXPORT_SUBDIRECTORY,
'metadata': metadata,
'label': 'Train',
'region': 'us-central1',
'scale_tier': 'STANDARD_1',
'job_args': job_args
}
def train(pipeline, train_features=None, eval_features=None, metadata=None):
if not train_features:
train_features = (
pipeline
| 'ReadTrain'
>> io.LoadFeatures(os.path.join(args.output_dir, 'features_train*')))
if not eval_features:
eval_features = (
pipeline
| 'ReadEval'
>> io.LoadFeatures(os.path.join(args.output_dir, 'features_eval*')))
trained_model, results = ((train_features, eval_features)
| ml.Train(**get_train_parameters(metadata)))
trained_model | 'SaveModel' >> io.SaveModel(os.path.join(args.output_dir,
'saved_model'))
results | io.SaveTrainingJobResult(os.path.join(args.output_dir,
'train_results'))
return trained_model, results
def evaluate(pipeline, trained_model=None, eval_features=None):
if not eval_features:
eval_features = (
pipeline
| 'ReadEval'
>> io.LoadFeatures(os.path.join(args.output_dir, 'features_eval*')))
if not trained_model:
trained_model = (pipeline
| 'LoadModel'
>> io.LoadModel(os.path.join(args.output_dir,
'saved_model')))
# Run our evaluation data through a Batch Evaluation, then pull out just
# the expected and predicted target values.
evaluations = (eval_features
| 'Evaluate' >> ml.Evaluate(trained_model)
| beam.Map('CreateEvaluations', make_evaluation_dict))
coder = io.CsvCoder(['key', 'target', 'predicted', 'score'],
['target', 'predicted', 'score'])
write_text_file(evaluations, 'WriteEvaluation', 'model_evaluations', coder)
return evaluations
def make_evaluation_dict((example, prediction)):
# When running inside of Dataflow, we don't have our global scope,
# so import tf here so that we can access it.
import numpy
import tensorflow as tf
tf_example = tf.train.Example()
tf_example.ParseFromString(example.values()[0])
feature_map = tf_example.features.feature
scores = prediction['score']
prediction = numpy.argmax(scores)
return {
'key': feature_map['key'].bytes_list.value[0],
'target': feature_map['species'].int64_list.value[0],
'predicted': prediction,
'score': scores[prediction]
}
def deploy_model(pipeline, model_name, version_name, trained_model=None):
if not trained_model:
trained_model = (pipeline
| 'LoadModel'
>> io.LoadModel(os.path.join(args.output_dir,
'saved_model')))
return trained_model | ml.DeployVersion(model_name, version_name)
def model_analysis(pipeline, evaluation_data=None, metadata=None):
if not metadata:
metadata = pipeline | io.LoadMetadata(
os.path.join(args.output_dir, "metadata.yaml"))
if not evaluation_data:
coder = io.CsvCoder(['key', 'target', 'predicted', 'score'],
['target', 'predicted', 'score'])
evaluation_data = read_text_file(pipeline, 'ReadEvaluation',
'model_evaluations', coder=coder)
confusion_matrix, precision_recall, logloss = (
evaluation_data | 'AnalyzeModel' >> ml.AnalyzeModel(metadata))
confusion_matrix | io.SaveConfusionMatrixCsv(
os.path.join(args.output_dir, 'analyzer_cm.csv'))
precision_recall | io.SavePrecisionRecallCsv(
os.path.join(args.output_dir, 'analyzer_pr.csv'))
write_text_file(logloss, 'Write Log Loss', 'analyzer_logloss.csv')
return confusion_matrix, precision_recall, logloss
def get_pipeline_name():
if args.cloud:
return 'BlockingDataflowPipelineRunner'
else:
return 'DirectPipelineRunner'
def dataflow():
"""Run Preprocessing, Training, Eval, and Prediction as a single Dataflow."""
print 'Building',TRAINER_NAME,'package.'
subprocess.check_call(['python', 'setup.py', 'sdist', '--format=gztar'])
subprocess.check_call(['gsutil', '-q', 'cp',
os.path.join('dist', TRAINER_NAME),
TRAINER_URI])
opts = None
if args.cloud:
options = {
'staging_location': os.path.join(args.output_dir, 'tmp', 'staging'),
'temp_location': os.path.join(args.output_dir, 'tmp'),
'job_name': ('cloud-ml-sample-iris' + '-'
+ datetime.datetime.now().strftime('%Y%m%d%H%M%S')),
'project': args.project_id,
# Dataflow needs a copy of the version of the cloud ml sdk that
# is being used.
'extra_packages': [ml.sdk_location, TRAINER_URI],
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
else:
# For local runs, the trainer must be installed as a module.
subprocess.check_call(['pip', 'install', '--upgrade', '--force-reinstall',
'--user', os.path.join('dist', TRAINER_NAME)])
p = beam.Pipeline(get_pipeline_name(), options=opts)
# Every function below writes its ouput to a file. The inputs to these
# functions are also optional; if they are missing, the input values are read
# from a file. Therefore if running this script multiple times, some steps can
# be removed to prevent recomputing values.
metadata, train_features, eval_features, predict_features = preprocess(p)
trained_model, results = train(p, train_features, eval_features, metadata)
evaluations = evaluate(p, trained_model, eval_features)
confusion_matrix, precision_recall, logloss = (
model_analysis(p, evaluations, metadata))
if args.cloud:
deployed = deploy_model(p, args.deploy_model_name,
args.deploy_model_version, trained_model)
# Use our deployed model to run a batch prediction.
output_uri = os.path.join(args.output_dir, 'batch_prediction_results')
deployed | "Batch Predict" >> ml.Predict([args.predict_data], output_uri,
region='us-central1',
data_format='TEXT')
print 'Deploying %s version: %s' % (args.deploy_model_name,
args.deploy_model_version)
p.run()
if args.cloud:
print 'Deployed %s version: %s' % (args.deploy_model_name,
args.deploy_model_version)
def write_text_file(pcollection, label, output_name,
coder=beam.coders.ToStringCoder()):
return pcollection | label >> beam.Write(beam.io.TextFileSink(
os.path.join(args.output_dir, output_name),
shard_name_template='',
coder=coder))
def read_text_file(pipeline, label, input_name,
coder=beam.coders.StrUtf8Coder()):
return pipeline | label >> beam.Read(beam.io.TextFileSource(
os.path.join(args.output_dir, input_name),
strip_trailing_newlines=True,
coder=coder))
def main():
dataflow()
if __name__ == '__main__':
main()
| obulpathi/cloud | ml/tensorflow/iris/pipeline.py | Python | apache-2.0 | 12,718 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TaskResult file download handler.
This module uses blobstore_handlers, which is webapp1 stuff.
No available AppEngine documentation talks about having a webapp2
equivalent for doing blobstore_handlers.BlobstoreDownloadHandler.send_blob.
This should all be converted to webapp2 when send_blob becomes available.
"""
__author__ = 'jeff.carollo@gmail.com (Jeff Carollo)'
from google.appengine.ext import blobstore
from google.appengine.ext import webapp
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext.webapp.util import run_wsgi_app
import webapp2
class TaskResultFileDownloadHandler(
blobstore_handlers.BlobstoreDownloadHandler):
def get(self, file_key):
blob = blobstore.get(file_key)
if not blob:
self.error(404)
else:
# TODO(jeff.carollo): Actually keep track of mimetypes.
# TODO(jeff.carollo): Paginate output.
if 'text' in self.request.headers.get('Accept', ''):
self.response.headers['Content-Type'] = 'text/plain'
data = blobstore.fetch_data(file_key, 0,
blobstore.MAX_BLOB_FETCH_SIZE-1)
self.response.out.write(data)
if len(data) >= blobstore.MAX_BLOB_FETCH_SIZE-1:
self.response.out.write('\n----RESULT-TRUNCATED----\n')
else:
self.send_blob(file_key)
app = webapp2.WSGIApplication([
('/taskresultfiles/(.+)', TaskResultFileDownloadHandler),
], debug=True)
| Eseoghene/bite-project | deps/mrtaskman/server/handlers/taskresultfiles.py | Python | apache-2.0 | 2,001 |
# -*- coding: utf-8 -*-
from tests.functional_tests import isolate
from tuttle.resource import FileResource
import os
# TODO what about symlinks ?
class TestFile():
@isolate
def test_directory_should_be_removable(self):
""" if a file resource is a directory it should be removable """
os.mkdir('a_dir')
assert os.path.isdir('a_dir')
r = FileResource("file://a_dir")
r.remove()
assert not os.path.exists('a_dir')
@isolate
def test_directory_should_be_removable_even_if_not_empty(self):
""" if a file resource is a directory it should be removable even if it contains files """
os.mkdir('a_dir')
open('a_dir/A', 'w').write('A')
assert os.path.isdir('a_dir')
r = FileResource("file://a_dir")
r.remove()
assert not os.path.exists('a_dir')
@isolate
def test_directory_should_have_a_signature(self):
""" if a file resource is a directory it should be removable """
os.mkdir('a_dir')
assert os.path.isdir('a_dir')
r = FileResource("file://a_dir")
sig = r.signature()
# TODO should a directory have a signature resulting of its content ?
assert sig.startswith("sha1:"), sig
| lexman/tuttle | tests/test_file.py | Python | mit | 1,257 |
from grappa.engine import Engine, isoperator
def test_engine(should):
Engine | should.be.a('class')
Engine | should.have.property('register') > should.be.a('function')
class FakeOperator(object):
operators = tuple()
kind = 'accessor'
def run(self):
pass
def test_isoperator(should):
FakeOperator() | should.pass_function(isoperator)
| grappa-py/grappa | tests/engine_test.py | Python | mit | 373 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry points for YAPF.
The main APIs that YAPF exposes to drive the reformatting.
FormatFile(): reformat a file.
FormatCode(): reformat a string of code.
These APIs have some common arguments:
style_config: (string) Either a style name or a path to a file that contains
formatting style settings. If None is specified, use the default style
as set in style.DEFAULT_STYLE_FACTORY
lines: (list of tuples of integers) A list of tuples of lines, [start, end],
that we want to format. The lines are 1-based indexed. It can be used by
third-party code (e.g., IDEs) when reformatting a snippet of code rather
than a whole file.
print_diff: (bool) Instead of returning the reformatted source, return a
diff that turns the formatted source into reformatter source.
verify: (bool) True if reformatted code should be verified for syntax.
"""
import difflib
import re
import sys
from lib2to3.pgen2 import tokenize
from yapf.yapflib import blank_line_calculator
from yapf.yapflib import comment_splicer
from yapf.yapflib import continuation_splicer
from yapf.yapflib import py3compat
from yapf.yapflib import pytree_unwrapper
from yapf.yapflib import pytree_utils
from yapf.yapflib import reformatter
from yapf.yapflib import split_penalty
from yapf.yapflib import style
from yapf.yapflib import subtype_assigner
def FormatFile(filename,
style_config=None,
lines=None,
print_diff=False,
verify=True,
in_place=False,
logger=None):
"""Format a single Python file and return the formatted code.
Arguments:
filename: (unicode) The file to reformat.
in_place: (bool) If True, write the reformatted code back to the file.
logger: (io streamer) A stream to output logging.
remaining arguments: see comment at the top of this module.
Returns:
Pair of (reformatted_code, encoding). reformatted_code is None if the file
is sucessfully written to (having used in_place). reformatted_code is a
diff if print_diff is True.
Raises:
IOError: raised if there was an error reading the file.
ValueError: raised if in_place and print_diff are both specified.
"""
_CheckPythonVersion()
if in_place and print_diff:
raise ValueError('Cannot pass both in_place and print_diff.')
original_source, encoding = ReadFile(filename, logger)
reformatted_source = FormatCode(original_source,
style_config=style_config,
filename=filename,
lines=lines,
print_diff=print_diff,
verify=verify)
if in_place:
with py3compat.open_with_encoding(filename,
mode='w',
encoding=encoding) as fd:
fd.write(reformatted_source)
return None, encoding
return reformatted_source, encoding
def FormatCode(unformatted_source,
filename='<unknown>',
style_config=None,
lines=None,
print_diff=False,
verify=True):
"""Format a string of Python code.
This provides an alternative entry point to YAPF.
Arguments:
unformatted_source: (unicode) The code to format.
filename: (unicode) The name of the file being reformatted.
remaining arguments: see comment at the top of this module.
Returns:
The code reformatted to conform to the desired formatting style.
"""
_CheckPythonVersion()
style.SetGlobalStyle(style.CreateStyleFromConfig(style_config))
if not unformatted_source.endswith('\n'):
unformatted_source += '\n'
tree = pytree_utils.ParseCodeToTree(unformatted_source)
# Run passes on the tree, modifying it in place.
comment_splicer.SpliceComments(tree)
continuation_splicer.SpliceContinuations(tree)
subtype_assigner.AssignSubtypes(tree)
split_penalty.ComputeSplitPenalties(tree)
blank_line_calculator.CalculateBlankLines(tree)
uwlines = pytree_unwrapper.UnwrapPyTree(tree)
for uwl in uwlines:
uwl.CalculateFormattingInformation()
_MarkLinesToFormat(uwlines, lines)
reformatted_source = reformatter.Reformat(uwlines, verify)
if unformatted_source == reformatted_source:
return '' if print_diff else reformatted_source
code_diff = _GetUnifiedDiff(unformatted_source, reformatted_source,
filename=filename)
if print_diff:
return code_diff
return reformatted_source
def _CheckPythonVersion():
errmsg = 'yapf is only supported for Python 2.7 or 3.4+'
if sys.version_info[0] == 2:
if sys.version_info[1] < 7:
raise RuntimeError(errmsg)
elif sys.version_info[0] == 3:
if sys.version_info[1] < 4:
raise RuntimeError(errmsg)
def ReadFile(filename, logger=None):
"""Read the contents of the file.
An optional logger can be specified to emit messages to your favorite logging
stream. If specified, then no exception is raised. This is external so that it
can be used by third-party applications.
Arguments:
filename: (unicode) The name of the file.
logger: (function) A function or lambda that takes a string and emits it.
Returns:
The contents of filename.
Raises:
IOError: raised if there was an error reading the file.
"""
try:
with open(filename, 'rb') as fd:
encoding = tokenize.detect_encoding(fd.readline)[0]
except IOError as err:
if logger:
logger(err)
raise
try:
with py3compat.open_with_encoding(filename, mode='r',
encoding=encoding) as fd:
source = fd.read()
return source, encoding
except IOError as err:
if logger:
logger(err)
raise
DISABLE_PATTERN = r'^#+ +yapf: *disable$'
ENABLE_PATTERN = r'^#+ +yapf: *enable$'
def _MarkLinesToFormat(uwlines, lines):
"""Skip sections of code that we shouldn't reformat."""
if lines:
for uwline in uwlines:
uwline.disable = True
for start, end in sorted(lines):
for uwline in uwlines:
if uwline.lineno > end:
break
if uwline.lineno >= start:
uwline.disable = False
index = 0
while index < len(uwlines):
uwline = uwlines[index]
if uwline.is_comment:
if re.search(DISABLE_PATTERN, uwline.first.value.strip(), re.IGNORECASE):
while index < len(uwlines):
uwline = uwlines[index]
uwline.disable = True
if (uwline.is_comment and
re.search(ENABLE_PATTERN, uwline.first.value.strip(),
re.IGNORECASE)):
break
index += 1
elif re.search(DISABLE_PATTERN, uwline.last.value.strip(), re.IGNORECASE):
uwline.disable = True
index += 1
def _GetUnifiedDiff(before, after, filename='code'):
"""Get a unified diff of the changes.
Arguments:
before: (unicode) The original source code.
after: (unicode) The reformatted source code.
filename: (unicode) The code's filename.
Returns:
The unified diff text.
"""
before = before.splitlines()
after = after.splitlines()
return '\n'.join(difflib.unified_diff(before, after, filename, filename,
'(original)', '(reformatted)',
lineterm='')) + '\n'
| elviswf/yapf | yapf/yapflib/yapf_api.py | Python | apache-2.0 | 7,978 |
from typing import Dict, List, Union
from typeguard import check_argument_types
import tensorflow as tf
import numpy as np
from neuralmonkey.decoders.autoregressive import AutoregressiveDecoder
from neuralmonkey.decoders.sequence_labeler import SequenceLabeler
from neuralmonkey.decorators import tensor
from neuralmonkey.runners.base_runner import BaseRunner
SupportedDecoders = Union[AutoregressiveDecoder, SequenceLabeler]
class XentRunner(BaseRunner[SupportedDecoders]):
# pylint: disable=too-few-public-methods
# Pylint issue here: https://github.com/PyCQA/pylint/issues/2607
class Executable(BaseRunner.Executable["XentRunner"]):
def collect_results(self, results: List[Dict]) -> None:
xents = np.mean([res["xents"] for res in results], axis=0)
self.set_runner_result(outputs=xents.tolist(),
losses=[float(np.mean(xents))])
# pylint: enable=too-few-public-methods
def __init__(self,
output_series: str,
decoder: SupportedDecoders) -> None:
check_argument_types()
super().__init__(output_series, decoder)
@tensor
def fetches(self) -> Dict[str, tf.Tensor]:
return {"xents": self.decoder.train_xents}
@property
def loss_names(self) -> List[str]:
return ["xent"]
| ufal/neuralmonkey | neuralmonkey/runners/xent_runner.py | Python | bsd-3-clause | 1,344 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, scrub
from erpnext.stock.utils import get_incoming_rate
from erpnext.controllers.queries import get_match_cond
from frappe.utils import flt
def execute(filters=None):
if not filters: filters = frappe._dict()
filters.currency = frappe.get_cached_value('Company', filters.company, "default_currency")
gross_profit_data = GrossProfitGenerator(filters)
data = []
group_wise_columns = frappe._dict({
"invoice": ["parent", "customer", "customer_group", "posting_date","item_code", "item_name","item_group", "brand", "description", \
"warehouse", "qty", "base_rate", "buying_rate", "base_amount",
"buying_amount", "gross_profit", "gross_profit_percent", "project"],
"item_code": ["item_code", "item_name", "brand", "description", "qty", "base_rate",
"buying_rate", "base_amount", "buying_amount", "gross_profit", "gross_profit_percent"],
"warehouse": ["warehouse", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"brand": ["brand", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"item_group": ["item_group", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"customer": ["customer", "customer_group", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"customer_group": ["customer_group", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"sales_person": ["sales_person", "allocated_amount", "qty", "base_rate", "buying_rate", "base_amount", "buying_amount",
"gross_profit", "gross_profit_percent"],
"project": ["project", "base_amount", "buying_amount", "gross_profit", "gross_profit_percent"],
"territory": ["territory", "base_amount", "buying_amount", "gross_profit", "gross_profit_percent"]
})
columns = get_columns(group_wise_columns, filters)
for src in gross_profit_data.grouped_data:
row = []
for col in group_wise_columns.get(scrub(filters.group_by)):
row.append(src.get(col))
row.append(filters.currency)
data.append(row)
return columns, data
def get_columns(group_wise_columns, filters):
columns = []
column_map = frappe._dict({
"parent": _("Sales Invoice") + ":Link/Sales Invoice:120",
"posting_date": _("Posting Date") + ":Date",
"posting_time": _("Posting Time"),
"item_code": _("Item Code") + ":Link/Item",
"item_name": _("Item Name"),
"item_group": _("Item Group") + ":Link/Item Group",
"brand": _("Brand"),
"description": _("Description"),
"warehouse": _("Warehouse") + ":Link/Warehouse",
"qty": _("Qty") + ":Float",
"base_rate": _("Avg. Selling Rate") + ":Currency/currency",
"buying_rate": _("Avg. Buying Rate") + ":Currency/currency",
"base_amount": _("Selling Amount") + ":Currency/currency",
"buying_amount": _("Buying Amount") + ":Currency/currency",
"gross_profit": _("Gross Profit") + ":Currency/currency",
"gross_profit_percent": _("Gross Profit %") + ":Percent",
"project": _("Project") + ":Link/Project",
"sales_person": _("Sales person"),
"allocated_amount": _("Allocated Amount") + ":Currency/currency",
"customer": _("Customer") + ":Link/Customer",
"customer_group": _("Customer Group") + ":Link/Customer Group",
"territory": _("Territory") + ":Link/Territory"
})
for col in group_wise_columns.get(scrub(filters.group_by)):
columns.append(column_map.get(col))
columns.append({
"fieldname": "currency",
"label" : _("Currency"),
"fieldtype": "Link",
"options": "Currency"
})
return columns
class GrossProfitGenerator(object):
def __init__(self, filters=None):
self.data = []
self.average_buying_rate = {}
self.filters = frappe._dict(filters)
self.load_invoice_items()
self.load_stock_ledger_entries()
self.load_product_bundle()
self.load_non_stock_items()
self.get_returned_invoice_items()
self.process()
def process(self):
self.grouped = {}
self.grouped_data = []
for row in self.si_list:
if self.skip_row(row, self.product_bundles):
continue
row.base_amount = flt(row.base_net_amount)
product_bundles = []
if row.update_stock:
product_bundles = self.product_bundles.get(row.parenttype, {}).get(row.parent, frappe._dict())
elif row.dn_detail:
product_bundles = self.product_bundles.get("Delivery Note", {})\
.get(row.delivery_note, frappe._dict())
row.item_row = row.dn_detail
# get buying amount
if row.item_code in product_bundles:
row.buying_amount = self.get_buying_amount_from_product_bundle(row,
product_bundles[row.item_code])
else:
row.buying_amount = self.get_buying_amount(row, row.item_code)
# get buying rate
if row.qty:
row.buying_rate = row.buying_amount / row.qty
row.base_rate = row.base_amount / row.qty
else:
row.buying_rate, row.base_rate = 0.0, 0.0
# calculate gross profit
row.gross_profit = row.base_amount - row.buying_amount
if row.base_amount:
row.gross_profit_percent = (row.gross_profit / row.base_amount) * 100.0
else:
row.gross_profit_percent = 0.0
# add to grouped
self.grouped.setdefault(row.get(scrub(self.filters.group_by)), []).append(row)
if self.grouped:
self.get_average_rate_based_on_group_by()
def get_average_rate_based_on_group_by(self):
# sum buying / selling totals for group
for key in list(self.grouped):
if self.filters.get("group_by") != "Invoice":
for i, row in enumerate(self.grouped[key]):
if i==0:
new_row = row
else:
new_row.qty += row.qty
new_row.buying_amount += row.buying_amount
new_row.base_amount += row.base_amount
new_row = self.set_average_rate(new_row)
self.grouped_data.append(new_row)
else:
for i, row in enumerate(self.grouped[key]):
if row.parent in self.returned_invoices \
and row.item_code in self.returned_invoices[row.parent]:
returned_item_rows = self.returned_invoices[row.parent][row.item_code]
for returned_item_row in returned_item_rows:
row.qty += returned_item_row.qty
row.base_amount += returned_item_row.base_amount
row.buying_amount = row.qty * row.buying_rate
if row.qty or row.base_amount:
row = self.set_average_rate(row)
self.grouped_data.append(row)
def set_average_rate(self, new_row):
new_row.gross_profit = new_row.base_amount - new_row.buying_amount
new_row.gross_profit_percent = ((new_row.gross_profit / new_row.base_amount) * 100.0) \
if new_row.base_amount else 0
new_row.buying_rate = (new_row.buying_amount / new_row.qty) if new_row.qty else 0
new_row.base_rate = (new_row.base_amount / new_row.qty) if new_row.qty else 0
return new_row
def get_returned_invoice_items(self):
returned_invoices = frappe.db.sql("""
select
si.name, si_item.item_code, si_item.stock_qty as qty, si_item.base_net_amount as base_amount, si.return_against
from
`tabSales Invoice` si, `tabSales Invoice Item` si_item
where
si.name = si_item.parent
and si.docstatus = 1
and si.is_return = 1
""", as_dict=1)
self.returned_invoices = frappe._dict()
for inv in returned_invoices:
self.returned_invoices.setdefault(inv.return_against, frappe._dict())\
.setdefault(inv.item_code, []).append(inv)
def skip_row(self, row, product_bundles):
if self.filters.get("group_by") != "Invoice":
if not row.get(scrub(self.filters.get("group_by", ""))):
return True
elif row.get("is_return") == 1:
return True
def get_buying_amount_from_product_bundle(self, row, product_bundle):
buying_amount = 0.0
for packed_item in product_bundle:
if packed_item.get("parent_detail_docname")==row.item_row:
buying_amount += self.get_buying_amount(row, packed_item.item_code)
return buying_amount
def get_buying_amount(self, row, item_code):
# IMP NOTE
# stock_ledger_entries should already be filtered by item_code and warehouse and
# sorted by posting_date desc, posting_time desc
if item_code in self.non_stock_items:
#Issue 6089-Get last purchasing rate for non-stock item
item_rate = self.get_last_purchase_rate(item_code)
return flt(row.qty) * item_rate
else:
my_sle = self.sle.get((item_code, row.warehouse))
if (row.update_stock or row.dn_detail) and my_sle:
parenttype, parent = row.parenttype, row.parent
if row.dn_detail:
parenttype, parent = "Delivery Note", row.delivery_note
for i, sle in enumerate(my_sle):
# find the stock valution rate from stock ledger entry
if sle.voucher_type == parenttype and parent == sle.voucher_no and \
sle.voucher_detail_no == row.item_row:
previous_stock_value = len(my_sle) > i+1 and \
flt(my_sle[i+1].stock_value) or 0.0
if previous_stock_value:
return (previous_stock_value - flt(sle.stock_value)) * flt(row.qty) / abs(flt(sle.qty))
else:
return flt(row.qty) * self.get_average_buying_rate(row, item_code)
else:
return flt(row.qty) * self.get_average_buying_rate(row, item_code)
return 0.0
def get_average_buying_rate(self, row, item_code):
args = row
if not item_code in self.average_buying_rate:
if item_code in self.non_stock_items:
self.average_buying_rate[item_code] = flt(frappe.db.sql("""
select sum(base_net_amount) / sum(qty * conversion_factor)
from `tabPurchase Invoice Item`
where item_code = %s and docstatus=1""", item_code)[0][0])
else:
args.update({
'voucher_type': row.parenttype,
'voucher_no': row.parent,
'allow_zero_valuation': True,
'company': self.filters.company
})
average_buying_rate = get_incoming_rate(args)
self.average_buying_rate[item_code] = flt(average_buying_rate)
return self.average_buying_rate[item_code]
def get_last_purchase_rate(self, item_code):
if self.filters.to_date:
last_purchase_rate = frappe.db.sql("""
select (a.base_rate / a.conversion_factor)
from `tabPurchase Invoice Item` a
where a.item_code = %s and a.docstatus=1
and modified <= %s
order by a.modified desc limit 1""", (item_code,self.filters.to_date))
else:
last_purchase_rate = frappe.db.sql("""
select (a.base_rate / a.conversion_factor)
from `tabPurchase Invoice Item` a
where a.item_code = %s and a.docstatus=1
order by a.modified desc limit 1""", item_code)
return flt(last_purchase_rate[0][0]) if last_purchase_rate else 0
def load_invoice_items(self):
conditions = ""
if self.filters.company:
conditions += " and company = %(company)s"
if self.filters.from_date:
conditions += " and posting_date >= %(from_date)s"
if self.filters.to_date:
conditions += " and posting_date <= %(to_date)s"
if self.filters.group_by=="Sales Person":
sales_person_cols = ", sales.sales_person, sales.allocated_amount, sales.incentives"
sales_team_table = "left join `tabSales Team` sales on sales.parent = `tabSales Invoice`.name"
else:
sales_person_cols = ""
sales_team_table = ""
self.si_list = frappe.db.sql("""
select
`tabSales Invoice Item`.parenttype, `tabSales Invoice Item`.parent,
`tabSales Invoice`.posting_date, `tabSales Invoice`.posting_time,
`tabSales Invoice`.project, `tabSales Invoice`.update_stock,
`tabSales Invoice`.customer, `tabSales Invoice`.customer_group,
`tabSales Invoice`.territory, `tabSales Invoice Item`.item_code,
`tabSales Invoice Item`.item_name, `tabSales Invoice Item`.description,
`tabSales Invoice Item`.warehouse, `tabSales Invoice Item`.item_group,
`tabSales Invoice Item`.brand, `tabSales Invoice Item`.dn_detail,
`tabSales Invoice Item`.delivery_note, `tabSales Invoice Item`.stock_qty as qty,
`tabSales Invoice Item`.base_net_rate, `tabSales Invoice Item`.base_net_amount,
`tabSales Invoice Item`.name as "item_row", `tabSales Invoice`.is_return
{sales_person_cols}
from
`tabSales Invoice` inner join `tabSales Invoice Item`
on `tabSales Invoice Item`.parent = `tabSales Invoice`.name
{sales_team_table}
where
`tabSales Invoice`.docstatus=1 and `tabSales Invoice`.is_opening!='Yes' {conditions} {match_cond}
order by
`tabSales Invoice`.posting_date desc, `tabSales Invoice`.posting_time desc"""
.format(conditions=conditions, sales_person_cols=sales_person_cols,
sales_team_table=sales_team_table, match_cond = get_match_cond('Sales Invoice')), self.filters, as_dict=1)
def load_stock_ledger_entries(self):
res = frappe.db.sql("""select item_code, voucher_type, voucher_no,
voucher_detail_no, stock_value, warehouse, actual_qty as qty
from `tabStock Ledger Entry`
where company=%(company)s
order by
item_code desc, warehouse desc, posting_date desc,
posting_time desc, name desc""", self.filters, as_dict=True)
self.sle = {}
for r in res:
if (r.item_code, r.warehouse) not in self.sle:
self.sle[(r.item_code, r.warehouse)] = []
self.sle[(r.item_code, r.warehouse)].append(r)
def load_product_bundle(self):
self.product_bundles = {}
for d in frappe.db.sql("""select parenttype, parent, parent_item,
item_code, warehouse, -1*qty as total_qty, parent_detail_docname
from `tabPacked Item` where docstatus=1""", as_dict=True):
self.product_bundles.setdefault(d.parenttype, frappe._dict()).setdefault(d.parent,
frappe._dict()).setdefault(d.parent_item, []).append(d)
def load_non_stock_items(self):
self.non_stock_items = frappe.db.sql_list("""select name from tabItem
where is_stock_item=0""")
| ESS-LLP/erpnext-medical | erpnext/accounts/report/gross_profit/gross_profit.py | Python | gpl-3.0 | 13,761 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import functools
import itertools
import logging
import os
import time
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN
from luigi.task import Config
logger = logging.getLogger("luigi.server")
class Scheduler(object):
"""
Abstract base class.
Note that the methods all take string arguments, not Task objects...
"""""
add_task = NotImplemented
get_work = NotImplemented
ping = NotImplemented
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
class scheduler(Config):
# TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility
# at some point (in particular this would force users to replace all dashes with underscores in the config)
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
# Jobs are disabled if we see more than disable_failures failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600,
config_path=dict(section='scheduler', name='disable-window-seconds'))
disable_failures = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-num-failures'))
disable_hard_timeout = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-hard-timeout'))
disable_persist = parameter.IntParameter(default=86400,
config_path=dict(section='scheduler', name='disable-persist-seconds'))
max_shown_tasks = parameter.IntParameter(default=100000)
max_graph_nodes = parameter.IntParameter(default=100000)
prune_done_tasks = parameter.BoolParameter(default=False)
record_task_history = parameter.BoolParameter(default=False)
prune_on_get_work = parameter.BoolParameter(default=False)
def fix_time(x):
# Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects
# Let's remove this function soon
if isinstance(x, datetime.datetime):
return time.mktime(x.timetuple())
else:
return x
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and fix_time(self.failures[0]) < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None,
tracking_url=None):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.params = _get_default(params, {})
self.disable_failures = disable_failures
self.disable_hard_timeout = disable_hard_timeout
self.failures = Failures(disable_window)
self.tracking_url = tracking_url
self.scheduler_disable_time = None
self.runnable = False
def __repr__(self):
return "Task(%r)" % vars(self)
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
if (self.failures.first_failure_time is not None and
self.disable_hard_timeout):
if (time.time() >= self.failures.first_failure_time +
self.disable_hard_timeout):
return True
if self.failures.num_failures() >= self.disable_failures:
return True
return False
def can_disable(self):
return (self.disable_failures is not None or
self.disable_hard_timeout is not None)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active or time.time() # seconds since epoch
self.last_get_work = None
self.started = time.time() # seconds since epoch
self.tasks = set() # task objects
self.info = {}
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference, get_work=False):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
if get_work:
self.last_get_work = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
def get_pending_tasks(self, state):
"""
Get PENDING (and RUNNING) tasks for this worker.
You have to pass in the state for optimization reasons.
"""
if len(self.tasks) < state.num_pending_tasks():
return six.moves.filter(lambda task: task.status in [PENDING, RUNNING],
self.tasks)
else:
return state.get_pending_tasks()
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
if self.assistant:
return False
return all(not task.resources for task in self.get_pending_tasks(state))
@property
def assistant(self):
return self.info.get('assistant', False)
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
def get_state(self):
return self._tasks, self._active_workers
def set_state(self, state):
self._tasks, self._active_workers = state
def dump(self):
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(self.get_state(), fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from clean slate.")
return
self.set_state(state)
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
# Convert from old format
# TODO: this is really ugly, we need something more future-proof
# Every time we add an attribute to the Worker or Task class, this
# code needs to be updated
# Compatibility since 2014-06-02
for k, v in six.iteritems(self._active_workers):
if isinstance(v, float):
self._active_workers[k] = Worker(worker_id=k, last_active=v)
# Compatibility since 2015-05-28
if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)):
# If you load from an old format where Workers don't contain tasks.
for k, worker in six.iteritems(self._active_workers):
worker.tasks = set()
for task in six.itervalues(self._tasks):
for worker_id in task.workers:
self._active_workers[worker_id].tasks.add(task)
# Compatibility since 2015-04-28
if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)):
for t in six.itervalues(self._tasks):
t.disable_hard_timeout = None
else:
logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path)
def get_active_tasks(self, status=None):
if status:
for task in six.itervalues(self._status_tasks[status]):
yield task
else:
for task in six.itervalues(self._tasks):
yield task
def get_running_tasks(self):
return six.itervalues(self._status_tasks[RUNNING])
def get_pending_tasks(self):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status])
for status in [PENDING, RUNNING])
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
if new_status == DISABLED and task.status == RUNNING:
return
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None and new_status != DISABLED:
return
if new_status == FAILED and task.can_disable() and task.status != DISABLED:
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=config.disable_failures,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
def fail_dead_worker_task(self, task, config, assistants):
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
def prune(self, task, config):
remove = False
# Mark tasks with no remaining active stakeholders for deletion
if not task.stakeholders:
if task.remove is None:
logger.info("Task %r has stakeholders %r but none remain connected -> will remove "
"task in %s seconds", task.id, task.stakeholders, config.remove_delay)
task.remove = time.time() + config.remove_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time is not None:
if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist:
self.re_enable(task, config)
# Remove tasks that have no stakeholders
if task.remove and time.time() > task.remove:
logger.info("Removing task %r (no connected stakeholders)", task.id)
remove = True
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
return remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None, last_get_work_gt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
last_get_work = getattr(worker, 'last_get_work', None)
if last_get_work_gt is not None and (
last_get_work is None or last_get_work <= last_get_work_gt):
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
# remove workers from tasks
for task in self.get_active_tasks():
task.stakeholders.difference_update(delete_workers)
task.workers.difference_update(delete_workers)
def get_necessary_tasks(self):
necessary_tasks = set()
for task in self.get_active_tasks():
if task.status not in (DONE, DISABLED) or \
getattr(task, 'scheduler_disable_time', None) is not None:
necessary_tasks.update(task.deps)
necessary_tasks.add(task.id)
return necessary_tasks
class CentralPlannerScheduler(Scheduler):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_override: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(
Task, disable_failures=self._config.disable_failures,
disable_hard_timeout=self._config.disable_hard_timeout,
disable_window=self._config.disable_window)
self._worker_requests = {}
def load(self):
self._state.load()
def dump(self):
self._state.dump()
def prune(self):
logger.info("Starting pruning of task graph")
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
assistant_ids = set(w.id for w in self._state.get_assistants())
remove_tasks = []
if assistant_ids:
necessary_tasks = self._state.get_necessary_tasks()
else:
necessary_tasks = ()
for task in self._state.get_active_tasks():
self._state.fail_dead_worker_task(task, self._config, assistant_ids)
if task.id not in necessary_tasks and self._state.prune(task, self._config):
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
logger.info("Done pruning task graph")
def update(self, worker_id, worker_reference=None, get_work=False):
"""
Keep track of whenever the worker was last active.
"""
worker = self._state.get_worker(worker_id)
worker.update(worker_reference, get_work=get_work)
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None,
assistant=False, tracking_url=None, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
worker_id = kwargs['worker']
self.update(worker_id)
task = self._state.get_task(task_id, setdefault=self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params))
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.params:
task.params = _get_default(params, {})
if tracking_url is not None or task.status != RUNNING:
task.tracking_url = tracking_url
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if expl is not None:
task.expl = expl
if not (task.status == RUNNING and status == PENDING) or new_deps:
# don't allow re-scheduling of task while it is running, it must either fail or succeed first
if status == PENDING or status != task.status:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED:
task.retry = self._retry_time(task, self._config)
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if not assistant:
task.stakeholders.add(worker_id)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker_id)
self._update_priority(task, priority, worker_id)
if runnable:
task.workers.add(worker_id)
self._state.get_worker(worker_id).tasks.add(task)
task.runnable = runnable
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks():
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
used_resources[resource] += amount
return used_resources
def _rank(self, task):
"""
Return worker's rank function for task scheduling.
:return:
"""
return task.priority, -task.time
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def _retry_time(self, task, config):
return time.time() + config.retry_delay
def get_work(self, host=None, assistant=False, current_tasks=None, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
if self._config.prune_on_get_work:
self.prune()
worker_id = kwargs['worker']
# Return remaining tasks that have no FAILED descendants
self.update(worker_id, {'host': host}, get_work=True)
if assistant:
self.add_worker(worker_id, [('assistant', assistant)])
best_task = None
if current_tasks is not None:
ct_set = set(current_tasks)
for task in sorted(self._state.get_running_tasks(), key=self._rank):
if task.worker_running == worker_id and task.id not in ct_set:
best_task = task
locally_pending_tasks = 0
running_tasks = []
upstream_table = {}
greedy_resources = collections.defaultdict(int)
n_unique_pending = 0
worker = self._state.get_worker(worker_id)
if worker.is_trivial_worker(self._state):
relevant_tasks = worker.get_pending_tasks(self._state)
used_resources = collections.defaultdict(int)
greedy_workers = dict() # If there's no resources, then they can grab any task
else:
relevant_tasks = self._state.get_pending_tasks()
used_resources = self._used_resources()
activity_limit = time.time() - self._config.worker_disconnect_delay
active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit)
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in active_workers)
tasks = list(relevant_tasks)
tasks.sort(key=self._rank, reverse=True)
for task in tasks:
upstream_status = self._upstream_status(task.id, upstream_table)
in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers
if task.status == RUNNING and in_workers:
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
more_info = {'task_id': task.id, 'worker': str(other_worker)}
if other_worker is not None:
more_info.update(other_worker.info)
running_tasks.append(more_info)
if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED:
locally_pending_tasks += 1
if len(task.workers) == 1 and not assistant:
n_unique_pending += 1
if best_task:
continue
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
if self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
else:
workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = {'n_pending_tasks': locally_pending_tasks,
'running_tasks': running_tasks,
'task_id': None,
'n_unique_pending': n_unique_pending}
if best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker_id
best_task.time_running = time.time()
self._update_task_history(best_task, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
return reply
def ping(self, **kwargs):
worker_id = kwargs['worker']
self.update(worker_id)
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
if self._state.has_task(dep_id):
dep = self._state.get_task(dep_id)
if dep.status == DONE:
continue
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack = task_stack + [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
upstream_status = [upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps]
upstream_status.append('') # to handle empty list
status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True, deps=None):
task = self._state.get_task(task_id)
ret = {
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'params': task.params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
'tracking_url': getattr(task, "tracking_url", None),
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps if deps is None else deps)
return ret
def graph(self, **kwargs):
self.prune()
serialized = {}
seen = set()
for task in self._state.get_active_tasks():
serialized.update(self._traverse_graph(task.id, seen))
return serialized
def _traverse_graph(self, root_task_id, seen=None, dep_func=None):
""" Returns the dependency graph rooted at task_id
This does a breadth-first traversal to find the nodes closest to the
root before hitting the scheduler.max_graph_nodes limit.
:param root_task_id: the id of the graph's root
:return: A map of task id to serialized node
"""
if seen is None:
seen = set()
elif root_task_id in seen:
return {}
if dep_func is None:
dep_func = lambda t: t.deps
seen.add(root_task_id)
serialized = {}
queue = collections.deque([root_task_id])
while queue:
task_id = queue.popleft()
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.warn('Missing task for id [%s]', task_id)
# NOTE : If a dependency is missing from self._state there is no way to deduce the
# task family and parameters.
family, params = UNKNOWN, {}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'priority': 0,
}
else:
deps = dep_func(task)
serialized[task_id] = self._serialize_task(task_id, deps=deps)
for dep in sorted(deps):
if dep not in seen:
seen.add(dep)
queue.append(dep)
if len(serialized) >= self._config.max_graph_nodes:
break
return serialized
def dep_graph(self, task_id, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
return self._traverse_graph(task_id)
def inverse_dep_graph(self, task_id, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
inverse_graph = collections.defaultdict(set)
for task in self._state.get_active_tasks():
for dep in task.deps:
inverse_graph[dep].add(task.id)
return self._traverse_graph(task_id, dep_func=lambda t: inverse_graph[t.id])
def task_list(self, status, upstream_status, limit=True, search=None, **kwargs):
"""
Query for a subset of tasks by status.
"""
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
filter_func = lambda _: True
else:
terms = search.split()
filter_func = lambda t: all(term in t.id for term in terms)
for task in filter(filter_func, self._state.get_active_tasks(status)):
if (task.status != PENDING or not upstream_status or
upstream_status == self._upstream_status(task.id, upstream_status_table)):
serialized = self._serialize_task(task.id, False)
result[task.id] = serialized
if limit and len(result) > self._config.max_shown_tasks:
return {'num_tasks': len(result)}
return result
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=getattr(worker, 'started', None),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_pending_tasks():
if task.status == RUNNING and task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, False)
elif task.status == PENDING:
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, False)
result[task.status][task.id] = serialized
return result
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
return {"taskId": task_id, "error": self._state.get_task(task_id).expl}
else:
return {"taskId": task_id, "error": ""}
def _update_task_history(self, task, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task, successful)
elif status == PENDING:
self._task_history.task_scheduled(task)
elif status == RUNNING:
self._task_history.task_started(task, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
| bmaggard/luigi | luigi/scheduler.py | Python | apache-2.0 | 42,021 |
from datetime import datetime, timedelta
from corehq.apps.locations.dbaccessors import get_users_by_location_id
from corehq.apps.sms.api import send_sms_to_verified_number
from corehq.util.translation import localize
from dimagi.utils.dates import get_business_day_of_month_before
from corehq.apps.locations.models import SQLLocation
from custom.ilsgateway.tanzania.handlers.keyword import KeywordHandler
from custom.ilsgateway.models import SupplyPointStatus, SupplyPointStatusTypes, SupplyPointStatusValues
from custom.ilsgateway.tanzania.reminders import TEST_HANDLER_HELP, TEST_HANDLER_BAD_CODE, SOH_HELP_MESSAGE, \
SUPERVISION_REMINDER, SUBMITTED_REMINDER_DISTRICT, SUBMITTED_REMINDER_FACILITY, \
DELIVERY_REMINDER_FACILITY, DELIVERY_REMINDER_DISTRICT, DELIVERY_LATE_DISTRICT, TEST_HANDLER_CONFIRM, \
REMINDER_MONTHLY_RANDR_SUMMARY, reports, REMINDER_MONTHLY_SOH_SUMMARY, REMINDER_MONTHLY_DELIVERY_SUMMARY, \
SOH_THANK_YOU, LOSS_ADJUST_HELP
class MessageInitiatior(KeywordHandler):
def help(self):
self.respond(TEST_HANDLER_HELP)
return True
def get_district_by_name(self, name):
try:
return SQLLocation.objects.get(domain=self.domain, name=name)
except SQLLocation.DoesNotExist:
return None
def send_message(self, sql_location, message, **kwargs):
for user in get_users_by_location_id(self.domain, sql_location.location_id):
verified_number = user.get_verified_number()
if verified_number:
with localize(user.get_language_code()):
send_sms_to_verified_number(verified_number, message % kwargs)
def handle(self):
if len(self.args) < 2:
return self.help()
command = self.args[0]
rest = " ".join(self.args[1:])
msd_code = self.args[1].lower()
fw_message = " ".join(self.args[2:])
try:
sql_location = SQLLocation.objects.get(domain=self.domain, site_code__iexact=msd_code)
except SQLLocation.DoesNotExist:
sql_location = self.get_district_by_name(rest)
if not sql_location:
self.respond(TEST_HANDLER_BAD_CODE, code=msd_code)
return True
if command in ['soh', 'hmk']:
self.send_message(sql_location, SOH_HELP_MESSAGE)
now = datetime.utcnow()
SupplyPointStatus.objects.create(location_id=sql_location.location_id,
status_type=SupplyPointStatusTypes.SOH_FACILITY,
status_value=SupplyPointStatusValues.REMINDER_SENT,
status_date=now)
elif command in ['la']:
self.send_message(sql_location, LOSS_ADJUST_HELP)
now = datetime.utcnow()
SupplyPointStatus.objects.create(location_id=sql_location.location_id,
status_type=SupplyPointStatusTypes.LOSS_ADJUSTMENT_FACILITY,
status_value=SupplyPointStatusValues.REMINDER_SENT,
status_date=now)
elif command in ['supervision']:
self.send_message(sql_location, SUPERVISION_REMINDER)
now = datetime.utcnow()
SupplyPointStatus.objects.create(location_id=sql_location.location_id,
status_type=SupplyPointStatusTypes.SUPERVISION_FACILITY,
status_value=SupplyPointStatusValues.REMINDER_SENT,
status_date=now)
elif command in ['randr']:
if sql_location.location_type.name == 'DISTRICT':
self.send_message(sql_location, SUBMITTED_REMINDER_DISTRICT)
status_type = SupplyPointStatusTypes.R_AND_R_DISTRICT
else:
self.send_message(sql_location, SUBMITTED_REMINDER_FACILITY)
status_type = SupplyPointStatusTypes.R_AND_R_FACILITY
now = datetime.utcnow()
SupplyPointStatus.objects.create(location_id=sql_location.location_id,
status_type=status_type,
status_value=SupplyPointStatusValues.REMINDER_SENT,
status_date=now)
elif command in ['delivery']:
if sql_location.location_type.name == 'DISTRICT':
self.send_message(sql_location, DELIVERY_REMINDER_DISTRICT)
status_type = SupplyPointStatusTypes.DELIVERY_DISTRICT
else:
self.send_message(sql_location, DELIVERY_REMINDER_FACILITY)
status_type = SupplyPointStatusTypes.DELIVERY_FACILITY
now = datetime.utcnow()
SupplyPointStatus.objects.create(location_id=sql_location.location_id,
status_type=status_type,
status_value=SupplyPointStatusValues.REMINDER_SENT,
status_date=now)
elif command in ['fw']:
if fw_message:
self.send_message(sql_location, fw_message)
elif command in ["latedelivery"]:
self.send_message(sql_location, DELIVERY_LATE_DISTRICT, group_name="changeme", group_total=1,
not_responded_count=2, not_received_count=3)
elif command in ["randr_report"]:
now = datetime.utcnow()
self.respond(REMINDER_MONTHLY_RANDR_SUMMARY, **reports.construct_summary(
sql_location.couch_location,
SupplyPointStatusTypes.R_AND_R_FACILITY,
[SupplyPointStatusValues.SUBMITTED, SupplyPointStatusValues.NOT_SUBMITTED],
get_business_day_of_month_before(now.year, now.month, 5)
))
elif command in ["soh_report"]:
now = datetime.utcnow()
last_month = datetime(now.year, now.month, 1) - timedelta(days=1)
self.respond(
REMINDER_MONTHLY_SOH_SUMMARY,
**reports.construct_summary(
sql_location.couch_location,
SupplyPointStatusTypes.SOH_FACILITY,
[SupplyPointStatusValues.SUBMITTED],
get_business_day_of_month_before(last_month.year, last_month.month, -1)
)
)
elif command in ["delivery_report"]:
now = datetime.utcnow()
self.respond(REMINDER_MONTHLY_DELIVERY_SUMMARY,
**reports.construct_summary(sql_location.couch_location,
SupplyPointStatusTypes.DELIVERY_FACILITY,
[SupplyPointStatusValues.RECEIVED,
SupplyPointStatusValues.NOT_RECEIVED],
get_business_day_of_month_before(now.year, now.month, 15)))
elif command in ["soh_thank_you"]:
self.send_message(sql_location, SOH_THANK_YOU)
self.respond(TEST_HANDLER_CONFIRM)
return True
| qedsoftware/commcare-hq | custom/ilsgateway/tanzania/handlers/messageinitiator.py | Python | bsd-3-clause | 7,307 |
'''
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Luigi Pirelli (luipir@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
Created on Oct 7, 2013
@author: Luigi Pirelli (luipir@gmail.com)
'''
import os, json, traceback
from collections import OrderedDict
from qgis.core import *
from PyQt4.QtCore import *
# SpatiaLite imports
from pyspatialite import dbapi2 as db
# SpatiaLite DB settings
DATABASE_SRID = 32632 # <- have to match with source DB and cratedb script
class ExportDBThread(QThread):
# signals
procDone = pyqtSignal(bool)
procMessage = pyqtSignal(str, int)
def __init__(self, pgconnection, selectedComuni, outDb):
QThread.__init__(self)
self.pgconnection = pgconnection
self.cursor = pgconnection.cursor()
self.selectedComuni = selectedComuni
self.stopThread = False
self.DATABASE_NAME = os.path.splitext( os.path.basename(outDb) )[0]
self.DATABASE_OUTNAME = outDb
self.DATABASE_OUTNAME_SCHEMAFILE = os.path.dirname(os.path.realpath(__file__))+'/schemas/' + self.DATABASE_NAME + ".sql"
def run(self):
try:
# create db
self.createDB()
# populate db
self.populateDB(self.selectedComuni)
# end
self.procDone.emit(True)
except Exception as e:
traceback.print_exc()
self.procDone.emit(False)
self.procMessage.emit(e.message, QgsMessageLog.CRITICAL)
raise e
def smoothlyStop(self):
self.stopThread = True
def createDB(self):
if self.stopThread:
return
if os.path.exists(self.DATABASE_OUTNAME):
os.unlink(self.DATABASE_OUTNAME)
# read
geosisma_geo_schema = ""
with open(self.DATABASE_OUTNAME_SCHEMAFILE, 'r') as fs:
geosisma_geo_schema += fs.read()
# connect spatialite db
conn = db.connect(self.DATABASE_OUTNAME)
# create spatial metadata... depending on SL3 or SL4
try:
conn.cursor().execute("SELECT InitSpatialMetadata(1);")
except:
conn.cursor().execute("SELECT InitSpatialMetadata();")
# create DB
try:
self.procMessage.emit("Inizializza il DB Spatialite temporaneo", QgsMessageLog.INFO)
conn.cursor().executescript(geosisma_geo_schema)
except db.Error as e:
self.procMessage.emit(e.message, QgsMessageLog.CRITICAL)
raise e
def populateDB(self, selectedComuni):
if self.stopThread:
return
# connect spatialite db
conn = db.connect(self.DATABASE_OUTNAME)
try:
# copy tables
tables = ["istat_regioni", "istat_province", "codici_belfiore", "istat_loc_tipi", "istat_comuni"]
for table in tables:
self.copyTable(conn, table)
# copy table with geom
tables = ["istat_loc"]
for table in tables:
self.copyGeomTable(conn, table)
# get fab_catasto poligons only related to selectedComuni
for comune in selectedComuni:
self.copyCatastoPolygons(conn, comune)
# get fab_10k poligons only related to selectedComuni
for comune in selectedComuni:
self.copyFab10kPolygons(conn, comune)
#commit population
conn.commit()
except db.Error as e:
self.procMessage.emit(e.message, QgsMessageLog.CRITICAL)
raise e
def copyTable(self, spliteconn, tableName):
'''Copy a table from PostGIS to Spatialite'''
if self.stopThread:
return
try:
if (not self.cursor.closed):
self.cursor.close()
self.cursor = self.pgconnection.cursor()
self.procMessage.emit("Copia tabella: "+tableName, QgsMessageLog.INFO)
# get PostGIS values
sqlquery = u"SELECT * FROM "+tableName+";"
self.cursor.execute( sqlquery )
# create query string
fields = ['?'] * self.cursor.description.__len__() # create a list of ['?', '?', '?', '?', '?', '?', '?', '?', '?', '?']
sql = 'INSERT INTO '+tableName+' VALUES '
sql += "( " + ",".join(fields) + " );"
# copy on SpatiaLite
for record in self.cursor.fetchall():
spliteconn.cursor().execute(sql, record)
except db.Error as e:
self.procMessage.emit(e.message, QgsMessageLog.CRITICAL)
raise e
def copyGeomTable(self, spliteconn, tableName):
'''Copy a geom table from PostGIS to Spatialite'''
if self.stopThread:
return
# get fab_catasto field types
# working with OrderedDict to maintaing ordering among fields and values
try:
self.procMessage.emit("Copia tabella: "+tableName + ". Attenzione operazione lunga!", QgsMessageLog.INFO)
records = spliteconn.cursor().execute("PRAGMA table_info("+tableName+")")
columnNameTypes = {}
for record in records:
columnNameTypes[record[1]] = record[2]
# create query
temp = {k:k for k in columnNameTypes.keys()}
temp["the_geom"] = "ST_AsText(" + temp["the_geom"] + ")"
temp = OrderedDict(sorted(temp.items(), key=lambda x:x[0]))
sqlcolumns = temp.values()
columnames = temp.keys()
# do postgis query
sqlquery = "SELECT "+",".join(sqlcolumns) + " "
sqlquery += "FROM "+ tableName + ";"
self.cursor.execute( sqlquery )
self.procMessage.emit("%s: Copiando n: %d records" % (tableName, self.cursor.rowcount), QgsMessageLog.INFO)
# create query string for spatialite
sql = 'INSERT INTO '+tableName+'(' + ','.join(columnames) + ') VALUES '
# copy on SpatiaLite
for record in self.cursor.fetchall():
# modify geompetry element
valuesDict = OrderedDict(zip(columnames,record))
#valuesDict["the_geom"] = "GeomFromText('%s',%d)" % (valuesDict["the_geom"], DATABASE_SRID)
fields = ['?'] * valuesDict.__len__() # create a list of ['?', '?', '?', '?', '?', '?', '?', '?', '?', '?']
index = valuesDict.keys().index("the_geom")
fields[index] = "GeomFromText( ? ,%d)" % DATABASE_SRID
newsql = sql + "( " + ",".join(fields) + " );"
#newsql = sql + "(" +",".join( valuesDict.values() ) + ")"
spliteconn.cursor().execute(newsql, tuple(valuesDict.values()))
if self.stopThread:
return
except db.Error as e:
self.procMessage.emit(e.message, QgsMessageLog.CRITICAL)
raise e
def copyCatastoPolygons(self, spliteconn, comuneDict):
if self.stopThread:
return
# get fab_catasto field types
# working with OrderedDict to maintaing ordering among fields and values
self.procMessage.emit("Copia tabella fab_catasto per il comune: "+comuneDict["toponimo"] + ". Attenzione operazione lunga!", QgsMessageLog.INFO)
try:
records = spliteconn.cursor().execute("PRAGMA table_info(fab_catasto)")
columnNameTypes = OrderedDict()
for record in records:
columnNameTypes[record[1]] = record[2]
columnNameTypes = OrderedDict( sorted(columnNameTypes.items(), key=lambda x:x[0]) )
except db.Error as e:
raise e
# create query
temp = OrderedDict()
for k in columnNameTypes.iterkeys():
temp[str(k)] = "fab_catasto."+str(k)
temp["the_geom"] = "ST_AsText(" + temp["the_geom"] + ")"
sqlcolumns = temp.values()
columnames = columnNameTypes.keys()
sqlquery = "SELECT "+",".join(sqlcolumns) + " "
sqlquery += """
FROM
public.fab_catasto,
public.codici_belfiore,
public.istat_comuni
WHERE
fab_catasto.belfiore = codici_belfiore.id AND
codici_belfiore.id_comune = istat_comuni.id_istat AND
codici_belfiore.id_provincia = istat_comuni.idprovincia AND
codici_belfiore.toponimo = istat_comuni.toponimo AND
istat_comuni.id_istat = '{id_istat}' AND
istat_comuni.idprovincia = '{idprovincia}' AND
istat_comuni.toponimo = '{toponimo}';
""".format(**comuneDict)
# query all poligons
self.cursor.execute( sqlquery )
self.procMessage.emit(self.tr("fab_catasto: Copiando n: %d records" % self.cursor.rowcount), QgsMessageLog.INFO)
# add record to spatialite db
for poligons in self.cursor.fetchall():
sql=""
try:
# create a dict for this provincia
valueByName = zip(columnames, poligons)
poligonsDict = OrderedDict( sorted( valueByName, key=lambda x:x[0] ) )
# modify values to match spatialite type
for column in columnames:
#print column, poligonsDict[column]
# None values
if (poligonsDict[column] == None):
poligonsDict[column] = ''
# the_geom values
if (columnNameTypes[column] == "MULTIPOLYGON"):
poligonsDict[column] = "GeomFromText('%s', %d)" % ( poligonsDict[column], DATABASE_SRID)
#poligonsDict[column] = "GeomFromText('MULTIPOLYGON(((0 0,10 20,30 40,0 0),(1 1,2 2,3 3,1 1)),((100 100,110 110,120 120,100 100)))',DATABASE_SRID)"
if (columnNameTypes[column] == "text"):
# using json.dumps to create strings without ' or " problems
poligonsDict[column] = json.dumps(str(poligonsDict[column]))
if (columnNameTypes[column] == "real"):
if poligonsDict[column] != "":
poligonsDict[column] = float(poligonsDict[column])
else:
poligonsDict[column] = 'NULL'
if (columnNameTypes[column] == "numeric" or columnNameTypes[column] == "integer"):
if poligonsDict[column] != "":
poligonsDict[column] = int(poligonsDict[column])
else:
poligonsDict[column] = 'NULL'
if (columnNameTypes[column] != "MULTIPOLYGON" and
columnNameTypes[column] != "text"):
poligonsDict[column] = str(poligonsDict[column])
# do insert
sql = 'INSERT INTO fab_catasto ('+ ",".join(columnames) +') VALUES '
sql += "(" + ",".join(poligonsDict.values()) + ");"
spliteconn.cursor().execute(sql)
except Exception as ex:
self.procMessage.emit(self.tr("Error executing query: %s" % sql), QgsMessageLog.CRITICAL)
raise ex
if self.stopThread:
return
def copyFab10kPolygons(self, spliteconn, comuneDict):
if self.stopThread:
return
# get fab_catasto field types
# working with OrderedDict to maintaing ordering among fields and values
self.procMessage.emit("Copia fab_10k per il comune: "+comuneDict["toponimo"] + ". Attenzione operazione lunga!", QgsMessageLog.INFO)
try:
records = spliteconn.cursor().execute("PRAGMA table_info(fab_10k)")
columnNameTypes = OrderedDict()
for record in records:
columnNameTypes[record[1]] = record[2]
columnNameTypes = OrderedDict( sorted(columnNameTypes.items(), key=lambda x:x[0]) )
except db.Error as e:
raise e
# create query
temp = OrderedDict()
for k in columnNameTypes.iterkeys():
temp[str(k)] = str(k)
temp["the_geom"] = "ST_AsText(" + temp["the_geom"] + ")"
sqlcolumns = temp.values()
columnames = columnNameTypes.keys()
sqlquery = "SELECT "+",".join(sqlcolumns) + " "
sqlquery += """
FROM
fab_10k
WHERE
cod_com = '{id_istat}' AND
nomemin LIKE '{toponimo}';
""".format(**comuneDict)
# query all poligons
self.cursor.execute( sqlquery )
self.procMessage.emit(self.tr("fab_10k: Copiando n: %d records" % self.cursor.rowcount), QgsMessageLog.INFO)
# add record to spatialite db
for poligons in self.cursor.fetchall():
sql=""
try:
# create a dict for this provincia
valueByName = zip(columnames, poligons)
poligonsDict = OrderedDict( sorted( valueByName, key=lambda x:x[0] ) )
# modify values to match spatialite type
for column in columnames:
#print column, poligonsDict[column]
# None values
if (poligonsDict[column] == None):
poligonsDict[column] = ''
# the_geom values
if (columnNameTypes[column] == "MULTIPOLYGON"):
poligonsDict[column] = "GeomFromText('%s', %d)" % ( poligonsDict[column], DATABASE_SRID)
#poligonsDict[column] = "GeomFromText('MULTIPOLYGON(((0 0,10 20,30 40,0 0),(1 1,2 2,3 3,1 1)),((100 100,110 110,120 120,100 100)))',DATABASE_SRID)"
if (columnNameTypes[column] == "text"):
# using json.dumps to create strings without ' or " problems
poligonsDict[column] = json.dumps(str(poligonsDict[column]))
if (columnNameTypes[column] == "real"):
if poligonsDict[column] != "":
poligonsDict[column] = float(poligonsDict[column])
else:
poligonsDict[column] = 'NULL'
if (columnNameTypes[column] == "numeric" or columnNameTypes[column] == "integer"):
if poligonsDict[column] != "":
poligonsDict[column] = int(poligonsDict[column])
else:
poligonsDict[column] = 'NULL'
if (columnNameTypes[column] != "MULTIPOLYGON" and
columnNameTypes[column] != "text"):
poligonsDict[column] = str(poligonsDict[column])
# do insert
sql = 'INSERT INTO fab_10k ('+ ",".join(columnames) +') VALUES '
sql += "(" + ",".join(poligonsDict.values()) + ");"
spliteconn.cursor().execute(sql)
except Exception as ex:
self.procMessage.emit(self.tr("Error executing query: %s" % sql), QgsMessageLog.CRITICAL)
raise ex
if self.stopThread:
return
# def updateExtent(self, spliteconn, comuneDict):
# if self.stopThread:
# return
#
# # get extent from postgis db... bettehr than doing it in spatialite that is too slow!
# sqlquery = ""
# sqlquery += """
# SELECT
# ST_AsText(
# ST_Envelope(
# ST_Union(
# ST_Envelope(fab_catasto.the_geom)
# )
# )
# )
# FROM
# public.fab_catasto,
# public.codici_belfiore,
# public.istat_comuni
# WHERE
# fab_catasto.belfiore = codici_belfiore.id AND
# codici_belfiore.id_comune = istat_comuni.id_istat AND
# codici_belfiore.id_provincia = istat_comuni.idprovincia AND
# codici_belfiore.toponimo = istat_comuni.toponimo AND
# istat_comuni.id_istat = '{id_istat}' AND
# istat_comuni.idprovincia = '{idprovincia}' AND
# istat_comuni.toponimo = '{toponimo}';
# """.format(**comuneDict)
#
# # query extent in WKT format
# self.procMessage.emit(self.tr("Generando l'extent della tabella fab_catasto e aggionando il DB spatialite"), QgsMessageLog.INFO)
# self.cursor.execute( sqlquery )
#
# wkt = self.cursor.fetchone()
# geom = QgsGeometry.fromWkt( wkt[0] )
# if not geom:
# raise Exception( self.tr("Extent errato: %s" % wkt[0]) )
#
# # add in spatialite db
# extentDict = comuneDict;
# extentDict["the_geom"] = wkt[0]
#
# sqlquery = """
# INSERT INTO
# geosisma_extent
# VALUES
# ('{id_istat}', '{toponimo}', '{idprovincia}', GeomFromText('{the_geom}', %d) );
# """.format(**extentDict)
# sqlquery = sqlquery % (DATABASE_SRID)
# spliteconn.cursor().execute(sqlquery)
| faunalia/rt_geosisma_inizializzaevento | ExportDBThread.py | Python | gpl-3.0 | 18,122 |
# Copyright (c) 2014 - 2016 townhallpinball.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from ..lib import devices
def init():
devices.add_gi({
"gi01": {
"label": "Illumination, Playfield Top",
"device": "G01",
},
"gi02": {
"label": "Illumination, Playfield Right",
"device": "G02",
},
"gi03": {
"label": "Illumination, Playfield Left",
"device": "G03",
},
"gi04": {
"label": "Illumination, Insert Title",
"device": "G04",
},
"gi05": {
"label": "Illumination, Insert Background",
"device": "G05",
}
})
| town-hall-pinball/project-omega | pin/machine/gi.py | Python | mit | 1,728 |
from werkzeug.wrappers import Response
import sqlite3
def page(config,template_name,context={},mimetype='text/html'):
t = config['jinja_env'].get_template(template_name)
return Response(t.render(context), mimetype=mimetype)
def db_connect(config):
conn=sqlite3.connect(config['db'])
conn.row_factory = sqlite3.Row
return conn
| zbanks/landfill | landfill/common.py | Python | mit | 333 |
#! /usr/bin/env python
# Copyright 2019 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
import unittest
from problem.numeric.sqrt import sqrt_binary_search as sqrt
# from problem.numeric.sqrt import sqrt_newton_raphson as sqrt
class SqrtTest(unittest.TestCase):
def test_big_sqrt(self):
self.assertAlmostEqual(3, sqrt(9), 4)
self.assertAlmostEqual(3.1464, sqrt(9.9), 4)
def test_small_sqrt(self):
self.assertAlmostEqual(.5, sqrt(.25), 4)
self.assertAlmostEqual(.7071, sqrt(.5), 4)
self.assertAlmostEqual(.001, sqrt(0), 4)
def test_not_i(self):
with self.assertRaises(AssertionError):
sqrt(-1)
if __name__ == '__main__':
unittest.main()
| jhanley634/testing-tools | problem/numeric/sqrt_test.py | Python | mit | 1,740 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = """
---
module: ec2_lc
short_description: Create or delete AWS Autoscaling Launch Configurations
description:
- Can create or delete AWS Autoscaling Configurations
- Works with the ec2_asg module to manage Autoscaling Groups
notes:
- "Amazon ASG Autoscaling Launch Configurations are immutable once created, so modifying the configuration
after it is changed will not modify the launch configuration on AWS. You must create a new config and assign
it to the ASG instead."
version_added: "1.6"
author: "Gareth Rushgrove (@garethr)"
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for configuration
required: true
instance_type:
description:
- instance type to use for the instance
required: true
default: null
aliases: []
image_id:
description:
- The AMI unique identifier to be used for the group
required: false
key_name:
description:
- The SSH key name to be used for access to managed instances
required: false
security_groups:
description:
- A list of security groups to apply to the instances. For VPC instances, specify security group IDs. For EC2-Classic, specify either security group names or IDs.
required: false
volumes:
description:
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
required: false
user_data:
description:
- opaque blob of data which is made available to the ec2 instance. Mutually exclusive with I(user_data_path).
required: false
user_data_path:
description:
- Path to the file that contains userdata for the ec2 instances. Mutually exclusive with I(user_data).
required: false
version_added: "2.3"
kernel_id:
description:
- Kernel id for the EC2 instance
required: false
spot_price:
description:
- The spot price you are bidding. Only applies for an autoscaling group with spot instances.
required: false
instance_monitoring:
description:
- whether instances in group are launched with detailed monitoring.
default: false
assign_public_ip:
description:
- Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
required: false
version_added: "1.8"
ramdisk_id:
description:
- A RAM disk id for the instances.
required: false
version_added: "1.8"
instance_profile_name:
description:
- The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instances.
required: false
version_added: "1.8"
ebs_optimized:
description:
- Specifies whether the instance is optimized for EBS I/O (true) or not (false).
required: false
default: false
version_added: "1.8"
classic_link_vpc_id:
description:
- Id of ClassicLink enabled VPC
required: false
version_added: "2.0"
classic_link_vpc_security_groups:
description:
- A list of security group id's with which to associate the ClassicLink VPC instances.
required: false
version_added: "2.0"
extends_documentation_fragment:
- aws
- ec2
requirements:
- "boto >= 2.39.0"
"""
EXAMPLES = '''
- ec2_lc:
name: special
image_id: ami-XXX
key_name: default
security_groups: ['group', 'group2' ]
instance_type: t1.micro
volumes:
- device_name: /dev/sda1
volume_size: 100
device_type: io1
iops: 3000
delete_on_termination: true
- device_name: /dev/sdb
ephemeral: ephemeral0
'''
import traceback
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
import boto.ec2.autoscale
from boto.ec2.autoscale import LaunchConfiguration
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_block_device(module, volume):
# Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30
if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume:
module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume:
if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume:
module.fail_json(msg='io1 volumes must have an iops value set')
if 'ephemeral' in volume:
if 'snapshot' in volume:
module.fail_json(msg='Cannot set both ephemeral and snapshot')
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume.get('device_type'),
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'))
def create_launch_config(connection, module):
name = module.params.get('name')
image_id = module.params.get('image_id')
key_name = module.params.get('key_name')
security_groups = module.params['security_groups']
user_data = module.params.get('user_data')
user_data_path = module.params.get('user_data_path')
volumes = module.params['volumes']
instance_type = module.params.get('instance_type')
spot_price = module.params.get('spot_price')
instance_monitoring = module.params.get('instance_monitoring')
assign_public_ip = module.params.get('assign_public_ip')
kernel_id = module.params.get('kernel_id')
ramdisk_id = module.params.get('ramdisk_id')
instance_profile_name = module.params.get('instance_profile_name')
ebs_optimized = module.params.get('ebs_optimized')
classic_link_vpc_id = module.params.get('classic_link_vpc_id')
classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups')
bdm = BlockDeviceMapping()
if user_data_path:
try:
with open(user_data_path, 'r') as user_data_file:
user_data = user_data_file.read()
except IOError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if volumes:
for volume in volumes:
if 'device_name' not in volume:
module.fail_json(msg='Device name must be set for volume')
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
# to be a signal not to create this volume
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
bdm[volume['device_name']] = create_block_device(module, volume)
lc = LaunchConfiguration(
name=name,
image_id=image_id,
key_name=key_name,
security_groups=security_groups,
user_data=user_data,
block_device_mappings=[bdm],
instance_type=instance_type,
kernel_id=kernel_id,
spot_price=spot_price,
instance_monitoring=instance_monitoring,
associate_public_ip_address=assign_public_ip,
ramdisk_id=ramdisk_id,
instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized,
classic_link_vpc_security_groups=classic_link_vpc_security_groups,
classic_link_vpc_id=classic_link_vpc_id,
)
launch_configs = connection.get_all_launch_configurations(names=[name])
changed = False
if not launch_configs:
try:
connection.create_launch_configuration(lc)
launch_configs = connection.get_all_launch_configurations(names=[name])
changed = True
except BotoServerError as e:
module.fail_json(msg=str(e))
result = dict(
((a[0], a[1]) for a in vars(launch_configs[0]).items()
if a[0] not in ('connection', 'created_time', 'instance_monitoring', 'block_device_mappings'))
)
result['created_time'] = str(launch_configs[0].created_time)
# Looking at boto's launchconfig.py, it looks like this could be a boolean
# value or an object with an enabled attribute. The enabled attribute
# could be a boolean or a string representation of a boolean. Since
# I can't test all permutations myself to see if my reading of the code is
# correct, have to code this *very* defensively
if launch_configs[0].instance_monitoring is True:
result['instance_monitoring'] = True
else:
try:
result['instance_monitoring'] = module.boolean(launch_configs[0].instance_monitoring.enabled)
except AttributeError:
result['instance_monitoring'] = False
if launch_configs[0].block_device_mappings is not None:
result['block_device_mappings'] = []
for bdm in launch_configs[0].block_device_mappings:
result['block_device_mappings'].append(dict(device_name=bdm.device_name, virtual_name=bdm.virtual_name))
if bdm.ebs is not None:
result['block_device_mappings'][-1]['ebs'] = dict(snapshot_id=bdm.ebs.snapshot_id, volume_size=bdm.ebs.volume_size)
if user_data_path:
result['user_data'] = "hidden" # Otherwise, we dump binary to the user's terminal
module.exit_json(changed=changed, name=result['name'], created_time=result['created_time'],
image_id=result['image_id'], arn=result['launch_configuration_arn'],
security_groups=result['security_groups'],
instance_type=result['instance_type'],
result=result)
def delete_launch_config(connection, module):
name = module.params.get('name')
launch_configs = connection.get_all_launch_configurations(names=[name])
if launch_configs:
launch_configs[0].delete()
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
image_id=dict(type='str'),
key_name=dict(type='str'),
security_groups=dict(type='list'),
user_data=dict(type='str'),
user_data_path=dict(type='path'),
kernel_id=dict(type='str'),
volumes=dict(type='list'),
instance_type=dict(type='str'),
state=dict(default='present', choices=['present', 'absent']),
spot_price=dict(type='float'),
ramdisk_id=dict(type='str'),
instance_profile_name=dict(type='str'),
ebs_optimized=dict(default=False, type='bool'),
associate_public_ip_address=dict(type='bool'),
instance_monitoring=dict(default=False, type='bool'),
assign_public_ip=dict(type='bool'),
classic_link_vpc_security_groups=dict(type='list'),
classic_link_vpc_id=dict(type='str')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['user_data', 'user_data_path']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
state = module.params.get('state')
if state == 'present':
create_launch_config(connection, module)
elif state == 'absent':
delete_launch_config(connection, module)
if __name__ == '__main__':
main()
| cmelange/ansible | lib/ansible/modules/cloud/amazon/ec2_lc.py | Python | gpl-3.0 | 13,170 |
#
# rpclib - Copyright (C) rpclib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
from rpclib.model.primitive import String
from rpclib.service import ServiceBase
from rpclib.decorator import rpc
class DefaultPortService(ServiceBase):
@rpc(String, _returns=String)
def echo_default_port_service(self, string):
return string
class DefaultPortServiceMultipleMethods(ServiceBase):
@rpc(String, _returns=String)
def echo_one(self, string):
return string
@rpc(String, _returns=String)
def echo_two(self, string):
return string
@rpc(String, _returns=String)
def echo_three(self, string):
return string
| martijnvermaat/rpclib | src/rpclib/test/wsdl/defult_services.py | Python | lgpl-2.1 | 1,359 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
MODEL_PARAMS = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [(u'c1', 'sum'), (u'c0', 'first')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 9.5),
'type': 'DateEncoder'
},
u'timestamp_dayOfWeek': None,
u'timestamp_weekend': None,
u'consumption': {
'clipInput': True,
'fieldname': u'consumption',
'maxval': 100.0,
'minval': 0.0,
'n': 50,
'name': u'consumption',
'type': 'ScalarEncoder',
'w': 21
},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
# Spatial Pooler implementation selector.
# Options: 'py', 'cpp' (speed optimized, new)
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of columns in the SP (must be same as in TP)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses.
'potentialPct': 0.8,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10.
'synPermConnected': 0.1,
'synPermActiveInc': 0.0001,
'synPermInactiveDec': 0.0005,
'maxBoost': 1.0,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 9,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 12,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 3,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.005,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 2184},
'trainSPNetOnlyIfRequested': False,
},
}
| 0x0all/nupic | examples/opf/clients/hotgym/anomaly/model_params.py | Python | gpl-3.0 | 9,110 |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from time import time
from sklearn.datasets import fetch_openml
from sklearn_extra.kernel_methods import EigenProClassifier
from sklearn.svm import SVC
rng = np.random.RandomState(1)
# Generate sample data from mnist
mnist = fetch_openml("mnist_784")
mnist.data = mnist.data / 255.0
p = rng.permutation(60000)
x_train = mnist.data[p][:60000]
y_train = np.int32(mnist.target[p][:60000])
x_test = mnist.data[60000:]
y_test = np.int32(mnist.target[60000:])
# randomize 20% of labels
p = rng.choice(len(y_train), np.int32(len(y_train) * 0.2), False)
y_train[p] = rng.choice(10, np.int32(len(y_train) * 0.2))
p = rng.choice(len(y_test), np.int32(len(y_test) * 0.2), False)
y_test[p] = rng.choice(10, np.int32(len(y_test) * 0.2))
# Run tests comparing fkc to svc
eig_fit_times = []
eig_pred_times = []
eig_err = []
svc_fit_times = []
svc_pred_times = []
svc_err = []
train_sizes = [500, 1000, 2000, 5000, 10000, 20000, 40000, 60000]
gamma = 0.02
# Fit models to data
for train_size in train_sizes:
for name, estimator in [
(
"EigenPro",
EigenProClassifier(n_epoch=2, gamma=gamma, random_state=rng),
),
("SupportVector", SVC(C=5, gamma=gamma)),
]:
stime = time()
estimator.fit(x_train[:train_size], y_train[:train_size])
fit_t = time() - stime
stime = time()
y_pred_test = estimator.predict(x_test)
pred_t = time() - stime
err = 100.0 * np.sum(y_pred_test != y_test) / len(y_test)
if name == "EigenPro":
eig_fit_times.append(fit_t)
eig_pred_times.append(pred_t)
eig_err.append(err)
else:
svc_fit_times.append(fit_t)
svc_pred_times.append(pred_t)
svc_err.append(err)
print(
"%s Classification with %i training samples in %0.2f seconds. "
"Test error %.4f" % (name, train_size, fit_t + pred_t, err)
)
# set up grid for figures
fig = plt.figure(num=None, figsize=(6, 4), dpi=160)
ax = plt.subplot2grid((2, 2), (0, 0), rowspan=2)
train_size_labels = ["500", "1k", "2k", "5k", "10k", "20k", "40k", "60k"]
# Graph fit(train) time
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.plot(train_sizes, svc_fit_times, "o--", color="g", label="SVC")
ax.plot(train_sizes, eig_fit_times, "o-", color="r", label="EigenPro")
ax.set_xscale("log")
ax.set_yscale("log", nonposy="clip")
ax.set_xlabel("train size")
ax.set_ylabel("time (seconds)")
ax.legend()
ax.set_title("Train set")
ax.set_xticks(train_sizes)
ax.set_xticks([], minor=True)
ax.set_xticklabels(train_size_labels)
# Graph prediction(test) time
ax = plt.subplot2grid((2, 2), (0, 1), rowspan=1)
ax.plot(train_sizes, eig_pred_times, "o-", color="r")
ax.plot(train_sizes, svc_pred_times, "o--", color="g")
ax.set_xscale("log")
ax.set_yscale("log", nonposy="clip")
ax.set_ylabel("time (seconds)")
ax.set_title("Test set")
ax.set_xticks(train_sizes)
ax.set_xticks([], minor=True)
ax.set_xticklabels(train_size_labels)
# Graph training error
ax = plt.subplot2grid((2, 2), (1, 1), rowspan=1)
ax.plot(train_sizes, eig_err, "o-", color="r")
ax.plot(train_sizes, svc_err, "o-", color="g")
ax.set_xscale("log")
ax.set_xticks(train_sizes)
ax.set_xticklabels(train_size_labels)
ax.set_xticks([], minor=True)
ax.set_xlabel("train size")
ax.set_ylabel("classification error %")
plt.tight_layout()
plt.show()
| scikit-learn-contrib/scikit-learn-extra | benchmarks/_bench/eigenpro_plot_noisy_mnist.py | Python | bsd-3-clause | 3,479 |
import unittest
from parser import parseFeatures
from writers.baseWriter import AbstractFeatureWriter
class TestFeatureWriter(AbstractFeatureWriter):
def __init__(self, name=None):
self._name = name
self._instructions = []
def getData(self):
data = []
for token, obj in self._instructions:
if token == "feature" or token == "lookup":
obj = (obj._name, obj.getData())
data.append((token, obj))
return data
def feature(self, name):
self._instructions.append(("feature", TestFeatureWriter(name)))
token, obj = self._instructions[-1]
return obj
def lookup(self, name):
self._instructions.append(("lookup", TestFeatureWriter(name)))
token, obj = self._instructions[-1]
return obj
def table(self, name, data):
self._instructions.append(("table", (name, data)))
def classDefinition(self, name, contents):
self._instructions.append(("class", (name, contents)))
def lookupFlag(self, rightToLeft=False, ignoreBaseGlyphs=False, ignoreLigatures=False, ignoreMarks=False):
self._instructions.append(("lookup flag", (rightToLeft, ignoreBaseGlyphs, ignoreLigatures, ignoreMarks)))
def gsubType1(self, target, replacement):
self._instructions.append(("gsub type 1", (target, replacement)))
def gsubType3(self, target, replacement):
self._instructions.append(("gsub type 3", (target, replacement)))
def gsubType4(self, target, replacement):
self._instructions.append(("gsub type 4", (target, replacement)))
def gsubType6(self, precedingContext, target, trailingContext, replacement):
self._instructions.append(("gsub type 6", (precedingContext, target, trailingContext, replacement)))
def gposType1(self, target, value):
self._instructions.append(("gpos type 1", (target, value)))
def gposType2(self, target, value):
self._instructions.append(("gpos type 2", (target, value)))
def languageSystem(self, languageTag, scriptTag):
self._instructions.append(("language system", (languageTag, scriptTag)))
def script(self, scriptTag):
self._instructions.append(("script", (scriptTag)))
def language(self, languageTag, includeDefault=True):
self._instructions.append(("language", (languageTag, includeDefault)))
def include(self, path):
self._instructions.append(("include", (path)))
def subtableBreak(self):
self._instructions.append(("subtable break", None))
def lookupReference(self, name):
self._instructions.append(("lookup reference", name))
def featureReference(self, name):
self._instructions.append(("feature reference", name))
class TestRead(unittest.TestCase):
def testLanguageSystem(self):
test = "languagesystem DFLT dflt;"
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [('language system', ('dflt', 'DFLT'))]
self.assertEqual(result, expected)
def testStrings(self):
test = """
"feature test { sub foo by bar; } test;"
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = []
self.assertEqual(result, expected)
def testFeatureBlocks(self):
test = """
feature test {
} test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("feature", ("test", []))
]
self.assertEqual(result, expected)
#
test = """
feature test{}test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("feature", ("test", []))
]
self.assertEqual(result, expected)
#
test = """
feature test {
sub foo by bar;
} test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("feature", ("test", [
("gsub type 1", ("foo", "bar"))
]))
]
self.assertEqual(result, expected)
def testLookupBlocks(self):
test = """
lookup test {
} test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("lookup", ("test", []))
]
self.assertEqual(result, expected)
#
test = """
lookup test{}test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("lookup", ("test", []))
]
self.assertEqual(result, expected)
#
test = """
feature test {
lookup TEST {} TEST;
} test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("feature", ("test", [
("lookup", ("TEST", []))
]))]
self.assertEqual(result, expected)
#
test = """
feature test {
lookup TEST {
sub foo by bar;
} TEST;
} test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("feature", ("test", [
("lookup", ("TEST", [
("gsub type 1", ("foo", "bar"))
]))
]))]
self.assertEqual(result, expected)
#
test = """
feature test{lookup TEST{}TEST;}test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("feature", ("test", [
("lookup", ("TEST", []))
]))]
self.assertEqual(result, expected)
def testTableBlocks(self):
test = """
table test {
} test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = []
self.assertEqual(result, expected)
#
test = """
table test{}test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = []
self.assertEqual(result, expected)
#
test = """
table test {
lookup TEST {} TEST;
} test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = []
self.assertEqual(result, expected)
#
test = """
table OS/2 {
FSType 0;
Panose 0 1 2 3 4 5 6 7 8 9;
UnicodeRange 0 1 2 3 4 5;
CodePageRange 0 1 2 3 4 5;
TypoAscender 750;
TypoDescender -250;
TypoLineGap 200;
winAscent 750;
winDescent -250;
XHeight 400;
CapHeight 750;
WeightClass 500;
WidthClass 3;
Vendor "test";
} OS/2;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [("table", ("OS/2", [
("FSType", 0),
("Panose", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
("UnicodeRange", [0, 1, 2, 3, 4, 5]),
("CodePageRange", [0, 1, 2, 3, 4, 5]),
("TypoAscender", 750.0),
("TypoDescender", -250.0),
("TypoLineGap", 200.0),
("winAscent", 750.0),
("winDescent", -250.0),
("XHeight", 400.0),
("CapHeight", 750.0),
("WeightClass", 500.0),
("WidthClass", 3.0)
]))]
self.assertEqual(result, expected)
#
test = """
table head {
FontRevision 1.1;
} head;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [("table", ("head", [
("FontRevision", 1.1),
]))]
self.assertEqual(result, expected)
#
test = """
table hhea {
CaretOffset 1;
Ascender 2;
Descender 3;
LineGap 4;
} hhea;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [("table", ("hhea", [
("CaretOffset", 1),
("Ascender", 2),
("Descender", 3),
("LineGap", 4),
]))]
self.assertEqual(result, expected)
#
test = """
table vhea {
VertTypoAscender 2;
VertTypoDescender 3;
VertTypoLineGap 4;
} vhea;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [("table", ("vhea", [
("VertTypoAscender", 2.0),
("VertTypoDescender", 3.0),
("VertTypoLineGap", 4.0),
]))]
self.assertEqual(result, expected)
def testLookupFlag(self):
test = """lookupflag RightToLeft;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("lookup flag", (True, False, False, False))
]
self.assertEqual(result, expected)
#
test = """lookupflag IgnoreBaseGlyphs;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("lookup flag", (False, True, False, False))
]
self.assertEqual(result, expected)
#
test = """lookupflag IgnoreLigatures;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("lookup flag", (False, False, True, False))
]
self.assertEqual(result, expected)
#
test = """lookupflag IgnoreMarks;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("lookup flag", (False, False, False, True))
]
self.assertEqual(result, expected)
#
test = """lookupflag RightToLeft, IgnoreBaseGlyphs, IgnoreLigatures, IgnoreMarks;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("lookup flag", (True, True, True, True))
]
self.assertEqual(result, expected)
#
test = """lookupflag 0;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("lookup flag", (False, False, False, False))
]
self.assertEqual(result, expected)
def testGSUBType1(self):
test = """sub foo by bar;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 1", ("foo", "bar"))
]
self.assertEqual(result, expected)
#
test = """sub [foo] by [bar];"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 1", (["foo"], ["bar"]))
]
self.assertEqual(result, expected)
#
test = """sub [foo foo.alt] by [bar];"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 1", (["foo", "foo.alt"], ["bar"]))
]
self.assertEqual(result, expected)
#
test = """sub @foo by @bar;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 1", ("@foo", "@bar"))
]
self.assertEqual(result, expected)
#
test = """
sub foo1 by bar1;
sub foo2 by bar2;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 1", ("foo1", "bar1")),
("gsub type 1", ("foo2", "bar2"))
]
self.assertEqual(result, expected)
#
test = """
feature test {
sub foo by bar;
} test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("feature", ("test", [
("gsub type 1", ("foo", "bar"))
]))
]
self.assertEqual(result, expected)
#
test = """
feature test {sub foo1 by bar1;sub foo2 by bar2;} test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("feature", ("test", [
("gsub type 1", ("foo1", "bar1")),
("gsub type 1", ("foo2", "bar2"))
]))
]
self.assertEqual(result, expected)
def testGSUBType3(self):
test = """sub foo from [bar];"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 3", ("foo", ["bar"]))
]
self.assertEqual(result, expected)
#
test = """
sub foo1 from [bar1];
sub foo2 from [bar2];
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 3", ("foo1", ["bar1"])),
("gsub type 3", ("foo2", ["bar2"]))
]
self.assertEqual(result, expected)
#
test = """
feature test {
sub foo from bar;
} test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("feature", ("test", [
("gsub type 3", ("foo", ["bar"]))
]))
]
self.assertEqual(result, expected)
#
test = """
feature test {sub foo1 from [bar1];sub foo2 from [bar2];} test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("feature", ("test", [
("gsub type 3", ("foo1", ["bar1"])),
("gsub type 3", ("foo2", ["bar2"]))
]))
]
self.assertEqual(result, expected)
def testGSUBType4(self):
test = """sub f o o by f_o_o;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 4", (["f", "o", "o"], "f_o_o"))
]
self.assertEqual(result, expected)
#
test = """sub [f] o o by f_o_o;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 4", ([["f"], "o", "o"], "f_o_o"))
]
self.assertEqual(result, expected)
#
test = """sub @f o o by f_o_o;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 4", (["@f", "o", "o"], "f_o_o"))
]
self.assertEqual(result, expected)
#
test = """
sub f o o by f_o_o;
sub b a r by b_a_r;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 4", (["f", "o", "o"], "f_o_o")),
("gsub type 4", (["b", "a", "r"], "b_a_r"))
]
self.assertEqual(result, expected)
#
test = """
feature test {
sub f o o by f_o_o;
} test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("feature", ("test", [
("gsub type 4", (["f", "o", "o"], "f_o_o"))
]))
]
self.assertEqual(result, expected)
#
test = """
feature test {sub f o o by f_o_o;sub b a r by b_a_r;} test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("feature", ("test", [
("gsub type 4", (["f", "o", "o"], "f_o_o")),
("gsub type 4", (["b", "a", "r"], "b_a_r"))
]))
]
self.assertEqual(result, expected)
def testGSUBType6(self):
test = """sub f o' by o.alt;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 6", (["f"], ["o"], [], "o.alt"))
]
self.assertEqual(result, expected)
#
test = """sub f o' o by o.alt;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 6", (["f"], ["o"], ["o"], "o.alt"))
]
self.assertEqual(result, expected)
#
test = """sub f o' o' by o_o.alt;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 6", (["f"], ["o", "o"], [], "o_o.alt"))
]
self.assertEqual(result, expected)
#
test = """sub f o' o' b by o_o.alt;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 6", (["f"], ["o", "o"], ["b"], "o_o.alt"))
]
self.assertEqual(result, expected)
#
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#test = """sub [f] [o]' [o] by o.alt;"""
#writer = TestFeatureWriter()
#parseFeatures(writer, test)
#result = writer.getData()
#expected = [
# ("gsub type 6", ([["f"]], [["o"]], [["o"]], "o.alt"))
# ]
#self.assertEqual(result, expected)
##
#
test = """sub [foo bar]' bar by [foo.alt bar.alt];"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 6", ([], [["foo", "bar"]], ["bar"], ["foo.alt", "bar.alt"]))
]
self.assertEqual(result, expected)
def testIgnoreGSUBType6(self):
# ("gsub type 6", (precedingContext, target, trailingContext, replacement))
test = """ignore sub f o';"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 6", (["f"], ["o"], [], None))
]
self.assertEqual(result, expected)
#
test = """ignore sub f o' o;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 6", (["f"], ["o"], ["o"], None))
]
self.assertEqual(result, expected)
#
test = """ignore sub f o' o';"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 6", (["f"], ["o", "o"], [], None))
]
self.assertEqual(result, expected)
#
test = """ignore sub f o' o' b;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 6", (["f"], ["o", "o"], ["b"], None))
]
self.assertEqual(result, expected)
#
test = """ignore sub [foo bar]' bar;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gsub type 6", ([], [["foo", "bar"]], ["bar"], None))
]
self.assertEqual(result, expected)
def testGPOSType1(self):
test = """pos foo <0 0 0 0>;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gpos type 1", ("foo", (0.0, 0.0, 0.0, 0.0)))
]
self.assertEqual(result, expected)
#
test = """pos foo <-10 -10 -10 -10>;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gpos type 1", ("foo", (-10.0, -10.0, -10.0, -10.0)))
]
self.assertEqual(result, expected)
#
test = """
pos foo <0 0 0 0>;
pos foo <-10 -10 -10 -10>;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gpos type 1", ("foo", (0.0, 0.0, 0.0, 0.0))),
("gpos type 1", ("foo", (-10.0, -10.0, -10.0, -10.0)))
]
self.assertEqual(result, expected)
def testGPOSType2(self):
test = """pos foo bar 100;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gpos type 2", (["foo", "bar"], 100.0))
]
self.assertEqual(result, expected)
#
test = """pos foo bar -100;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gpos type 2", (["foo", "bar"], -100.0))
]
self.assertEqual(result, expected)
#
test = """enum pos foo [bar bar.alt] -100;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gpos type 2", (["foo", ["bar", "bar.alt"]], -100.0))
]
self.assertEqual(result, expected)
#
test = """pos @foo @bar -100;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gpos type 2", (["@foo", "@bar"], -100.0))
]
self.assertEqual(result, expected)
#
test = """pos [foo foo.alt] [bar bar.alt] -100;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("gpos type 2", ([["foo", "foo.alt"], ["bar", "bar.alt"]], -100.0))
]
self.assertEqual(result, expected)
def testScript(self):
test = """script test;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("script", "test")
]
self.assertEqual(result, expected)
#
test = """
script test;
script TEST;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("script", "test"),
("script", "TEST"),
]
self.assertEqual(result, expected)
#
test = """
script TEST;script test;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("script", "TEST"),
("script", "test"),
]
self.assertEqual(result, expected)
#
test = """
pos foo.subscript bar 100;
"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [("gpos type 2", (["foo.subscript", "bar"], 100.0))]
self.assertEqual(result, expected)
def testInclude(self):
test = """include(../foo.fea)"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("include", "../foo.fea")
]
self.assertEqual(result, expected)
#
test = """include(../foo.fea);"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("include", "../foo.fea")
]
self.assertEqual(result, expected)
#
test = """pos include bar 100;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [("gpos type 2", (["include", "bar"], 100.0))]
self.assertEqual(result, expected)
def testSubtableBreak(self):
test = """subtable;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("subtable break", None)
]
self.assertEqual(result, expected)
#
test = """subtable;subtable;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("subtable break", None),
("subtable break", None)
]
self.assertEqual(result, expected)
def testFeatureReference(self):
test = """feature test;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("feature reference", "test")
]
self.assertEqual(result, expected)
#
test = """feature TEST {feature test;} TEST;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [("feature", ("TEST", [("feature reference", "test")]))]
self.assertEqual(result, expected)
def testLookupReference(self):
test = """lookup test;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [
("lookup reference", "test")
]
self.assertEqual(result, expected)
#
test = """lookup TEST {lookup test;} TEST;"""
writer = TestFeatureWriter()
parseFeatures(writer, test)
result = writer.getData()
expected = [("lookup", ("TEST", [("lookup reference", "test")]))]
self.assertEqual(result, expected)
if __name__ == "__main__":
unittest.main()
| jamesgk/feaTools | Lib/feaTools/test.py | Python | mit | 28,027 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.