max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
scripts/slave/recipe_modules/chromium_android/examples/full.py | mithro/chromium-build | 0 | 12770551 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine.types import freeze
DEPS = [
'adb',
'build',
'chromium',
'chromium_android',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
]
BUILDERS = freeze({
'basic_builder': {
'target': 'Release',
'build': True,
},
'restart_usb_builder': {
'restart_usb': True,
'target': 'Release',
'build': True,
},
'coverage_builder': {
'coverage': True,
'target': 'Debug',
'build': True,
},
'tester': {},
'perf_runner': {
'perf_config': 'sharded_perf_tests.json',
},
'perf_runner_user_build': {
'perf_config': 'sharded_perf_tests.json',
'skip_wipe': True,
},
'perf_runner_disable_location': {
'perf_config': 'sharded_perf_tests.json',
'disable_location': True,
},
'perf_runner_allow_low_battery': {
'perf_config': 'sharded_perf_tests.json',
'min_battery_level': 50,
},
'perf_adb_vendor_keys': {
'adb_vendor_keys': True,
},
'perf_runner_allow_high_battery_temp': {
'perf_config': 'sharded_perf_tests.json',
'max_battery_temp': 500,
},
'gerrit_try_builder': {
'build': True,
'skip_wipe': True,
},
'webview_tester': {
'android_apply_config': ['remove_all_system_webviews'],
},
'slow_tester': {
'timeout_scale': 2,
},
'downgrade_install_tester': {
'specific_install': True,
'downgrade': True,
},
'keep_data_install_tester': {
'specific_install': True,
'keep_data': True,
},
'no_strict_mode_tester': {
'strict_mode': 'off',
},
'resource_size_builder': {
'resource_size': True,
},
'webview_cts': {
'run_webview_cts': True,
},
'last_known_devices': {
'perf_config': 'sharded_perf_tests.json',
'last_known_devices': '.last_devices',
},
'device_flags_builder': {
'device_flags': 'device_flags_file',
},
'no_cache_builder': {
'use_git_cache': False,
},
'json_results_file': {
'json_results_file': 'json_results_file',
},
'render_results': {
'render_results_dir': 'chrome/test/data/android/render_tests',
},
'result_details': {
'result_details': True,
'store_tombstones': True,
},
'enable_platform_mode': {
'perf_config': 'sharded_perf_tests.json',
'enable_platform_mode': True,
'write_buildbot_json': True,
},
'timestamp_as_point_id': {
'perf_config': 'sharded_perf_tests.json',
'timestamp_as_point_id': True
},
'telemetry_browser_tests_tester': {
'run_telemetry_browser_tests': True,
},
'use_devil_adb': {
'android_apply_config': ['use_devil_adb'],
},
'remove_system_vrcore': {
'android_apply_config': ['remove_system_vrcore'],
},
'stackwalker': {
'run_stackwalker': True,
},
'asan': {
'chromium_apply_config': ['chromium_asan'],
}
})
from recipe_engine.recipe_api import Property
PROPERTIES = {
'buildername': Property(),
}
def RunSteps(api, buildername):
config = BUILDERS[buildername]
api.chromium_android.configure_from_properties(
'base_config',
REPO_URL='svn://svn.chromium.org/chrome/trunk/src',
REPO_NAME='src/repo',
INTERNAL=True,
BUILD_CONFIG='Release')
api.chromium_android.c.get_app_manifest_vars = True
api.chromium_android.c.coverage = config.get('coverage', False)
api.chromium_android.c.asan_symbolize = True
if config.get('adb_vendor_keys'):
api.chromium.c.env.ADB_VENDOR_KEYS = api.path['start_dir'].join('.adb_key')
for c in config.get('chromium_apply_config', []):
api.chromium.apply_config(c)
for c in config.get('android_apply_config', []):
api.chromium_android.apply_config(c)
api.chromium_android.init_and_sync(
use_bot_update=False, use_git_cache=config.get('use_git_cache', True))
if config.get('build', False):
api.chromium.ensure_goma()
api.chromium.runhooks()
api.chromium_android.run_tree_truth(additional_repos=['foo'])
assert 'MAJOR' in api.chromium.get_version()
api.chromium_android.host_info()
if config.get('build', False):
api.chromium.compile(use_goma_module=True)
api.chromium_android.make_zip_archive(
'zip_build_product', 'archive.zip', include_filters=['*.apk'],
exclude_filters=['*.so', '*.a'])
else:
api.chromium_android.download_build('build-bucket',
'build_product.zip')
api.chromium_android.git_number()
if config.get('specific_install'):
api.chromium_android.adb_install_apk(
'Chrome.apk',
devices=['abc123'],
allow_downgrade=config.get('downgrade', False),
keep_data=config.get('keep_data', False),
)
api.adb.root_devices()
api.chromium_android.spawn_logcat_monitor()
failure = False
try:
# TODO(luqui): remove redundant cruft, need one consistent API.
api.chromium_android.device_status_check()
api.path.mock_add_paths(api.chromium_android.known_devices_file)
api.chromium_android.device_status_check(
restart_usb=config.get('restart_usb', False))
api.chromium_android.provision_devices(
skip_wipe=config.get('skip_wipe', False),
disable_location=config.get('disable_location', False),
min_battery_level=config.get('min_battery_level'),
max_battery_temp=config.get('max_battery_temp'),
reboot_timeout=1800)
api.chromium_android.common_tests_setup_steps(skip_wipe=True)
except api.step.StepFailure as f:
failure = f
api.chromium_android.monkey_test()
try:
if config.get('perf_config'):
api.chromium_android.run_sharded_perf_tests(
config='fake_config.json',
flaky_config='flake_fakes.json',
upload_archives_to_bucket='archives-bucket',
known_devices_file=config.get('last_known_devices', None),
enable_platform_mode=config.get('enable_platform_mode', None),
write_buildbot_json=config.get('write_buildbot_json', False),
timestamp_as_point_id=config.get('timestamp_as_point_id', False))
except api.step.StepFailure as f:
failure = f
api.chromium_android.run_instrumentation_suite(
name='WebViewInstrumentationTest',
apk_under_test=api.chromium_android.apk_path(
'WebViewInstrumentation.apk'),
test_apk=api.chromium_android.apk_path('WebViewInstrumentationTest.apk'),
flakiness_dashboard='test-results.appspot.com',
annotation='SmallTest',
except_annotation='FlakyTest',
screenshot=True,
timeout_scale=config.get('timeout_scale'),
strict_mode=config.get('strict_mode'),
additional_apks=['Additional.apk'],
device_flags=config.get('device_flags'),
json_results_file=config.get('json_results_file'),
result_details=config.get('result_details'),
store_tombstones=config.get('store_tombstones'),
render_results_dir=config.get('render_results_dir'))
api.chromium_android.run_test_suite(
'unittests',
gtest_filter='WebRtc*',
result_details=config.get('result_details'),
store_tombstones=config.get('store_tombstones'),
tool='asan')
if not failure:
api.chromium_android.run_bisect_script(extra_src='test.py',
path_to_config='test.py')
if config.get('resource_size'):
api.chromium_android.resource_sizes(
apk_path=api.chromium_android.apk_path('Example.apk'),
chartjson_file=True,
upload_archives_to_bucket='Bucket')
api.chromium_android.create_supersize_archive(
apk_path=api.chromium_android.apk_path('Example.apk'),
size_path=api.chromium_android.apk_path('Example.apk.size'))
if config.get('run_webview_cts'):
api.chromium_android.run_webview_cts(command_line_args=[
'--webview_arg_1', '--webview_arg_2'])
if config.get('run_telemetry_browser_tests'):
api.chromium_android.run_telemetry_browser_test('PopularUrlsTest')
api.chromium_android.logcat_dump()
api.chromium_android.stack_tool_steps()
if config.get('coverage', False):
api.chromium_android.coverage_report()
if config.get('run_stackwalker'):
chrome_breakpad_binary = api.path['checkout'].join(
'out', api.chromium.c.BUILD_CONFIG, 'lib.unstripped', 'libchrome.so')
webview_breakpad_binary = api.path['checkout'].join(
'out', api.chromium.c.BUILD_CONFIG, 'lib.unstripped',
'libwebviewchromium.so')
dump_syms_binary = api.path['checkout'].join(
'out', api.chromium.c.BUILD_CONFIG, 'dump_syms')
microdump_stackwalk_binary = api.path['checkout'].join(
'out', api.chromium.c.BUILD_CONFIG, 'microdump_stackwalk')
api.path.mock_add_paths(chrome_breakpad_binary)
api.path.mock_add_paths(webview_breakpad_binary)
api.path.mock_add_paths(dump_syms_binary)
api.path.mock_add_paths(microdump_stackwalk_binary)
api.chromium_android.common_tests_final_steps(
checkout_dir=api.path['checkout'])
if failure:
# pylint: disable=raising-bad-type
raise failure
def GenTests(api):
def properties_for(buildername):
return api.properties.generic(
buildername=buildername,
bot_id='tehslave',
repo_name='src/repo',
issue='123456789',
patchset='1',
rietveld='http://rietveld.example.com',
repo_url='svn://svn.chromium.org/chrome/trunk/src',
revision='4f4b02f6b7fa20a3a25682c457bbc8ad589c8a00',
internal=True)
for buildername in BUILDERS:
yield api.test('%s_basic' % buildername) + properties_for(buildername)
yield (api.test('tester_no_devices_during_recovery') +
properties_for('tester') +
api.step_data('device_recovery', retcode=1))
yield (api.test('tester_no_devices_during_status') +
properties_for('tester') +
api.step_data('device_status', retcode=1))
yield (api.test('tester_other_device_failure_during_recovery') +
properties_for('tester') +
api.step_data('device_recovery', retcode=2))
yield (api.test('tester_other_device_failure_during_status') +
properties_for('tester') +
api.step_data('device_status', retcode=2))
yield (api.test('tester_with_step_warning') +
properties_for('tester') +
api.step_data('unittests', retcode=88))
yield (api.test('tester_failing_host_info') +
properties_for('tester') +
api.step_data(
'Host Info',
api.json.output({'failures': ['foo', 'bar']}),
retcode=1))
yield (api.test('tester_blacklisted_devices') +
properties_for('tester') +
api.override_step_data('provision_devices',
api.json.output(['abc123', 'def456'])))
yield (api.test('tester_offline_devices') +
properties_for('tester') +
api.override_step_data('device_status',
api.json.output([{}, {}])))
yield (api.test('perf_tests_failure') +
properties_for('perf_runner') +
api.step_data('perf_test.foo', retcode=1))
yield (api.test('perf_tests_infra_failure') +
properties_for('perf_runner') +
api.step_data('perf_test.foo', retcode=87))
yield (api.test('perf_tests_reference_failure') +
properties_for('perf_runner') +
api.step_data('perf_test.foo.reference', retcode=1))
yield (api.test('perf_tests_infra_reference_failure') +
properties_for('perf_runner') +
api.step_data('perf_test.foo.reference', retcode=87))
yield (api.test('gerrit_refs') +
api.properties.generic(
buildername='gerrit_try_builder',
bot_id='testslave',
repo_name='src/repo',
issue='123456789',
patchset='1',
rietveld='http://rietveld.example.com',
repo_url='svn://svn.chromium.org/chrome/trunk/src',
revision='4f4b02f6b7fa20a3a25682c457bbc8ad589c8a00',
internal=True, **({'event.patchSet.ref':'refs/changes/50/176150/1'})))
yield (api.test('tombstones_m53') +
properties_for('tester') +
api.override_step_data(
'get version (2)',
api.raw_io.output_text(
'MAJOR=53\nMINOR=0\nBUILD=2800\nPATCH=0\n')))
yield (api.test('telemetry_browser_tests_failures') +
properties_for('telemetry_browser_tests_tester') +
api.override_step_data('Run telemetry browser_test PopularUrlsTest',
api.json.output({'successes': ['passed_test1', 'passed_test2'],
'failures': ['failed_test_1', 'failed_test_2']}),
retcode=1))
yield (api.test('upload_result_details_failures') +
properties_for('result_details') +
api.override_step_data('unittests: generate result details',
retcode=1))
yield (api.test('asan_setup_failure') +
properties_for('asan') +
api.override_step_data('Set up ASAN on devices.wait_for_devices',
retcode=87))
| 1.71875 | 2 |
benchmarks/PCA_multiprocessing_transform.py | tirkarthi/odin-ai | 7 | 12770552 | # ===========================================================================
# Single process:
# 0.0003s
# Multiprocessing:
# ncpu = 1: ~0.16s
# ncpu = 2: ~0.07s
# ===========================================================================
from __future__ import print_function, division, absolute_import
import os
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
from odin import fuel as F, visual
from odin.ml import MiniBatchPCA
from sklearn.manifold import TSNE
from odin.utils import UnitTimer, TemporaryDirectory
iris = F.load_iris()
print(iris)
pca = MiniBatchPCA()
X = iris['X'][:]
i = 0
while i < X.shape[0]:
x = X[i:i + 20]
i += 20
pca.partial_fit(x)
print("Fitting PCA ...")
with UnitTimer():
for i in range(8):
x = pca.transform(X)
with UnitTimer():
for i in range(8):
x = pca.transform_mpi(X, keep_order=True, ncpu=1, n_components=2)
print("Output shape:", x.shape)
colors = ['r' if i == 0 else ('b' if i == 1 else 'g')
for i in iris['y'][:]]
visual.plot_scatter(x[:, 0], x[:, 1], color=colors, size=8)
visual.plot_save('/tmp/tmp.pdf')
# bananab
| 2.359375 | 2 |
app/__init__.py | azhou5211/recipeat | 3 | 12770553 | # import the flask library
from flask import Flask
from config import Config
# configure an object of class Flask with __name__
app = Flask(__name__)
app.config.from_object(Config)
# here app is a package not to be confused with directory app
from app import routes
| 2.15625 | 2 |
nemo_nowcast/workers/clear_checklist.py | douglatornell/nemo_nowcast | 1 | 12770554 | <reponame>douglatornell/nemo_nowcast<gh_stars>1-10
# Copyright 2016-2021 <NAME>, 43ravens
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NEMO_Nowcast framework clear_checklist worker.
Send a message to the nowcast system manager requesting that it clear its
system state checklist.
This worker is normally launched in automation at the end of a nowcast
processing cycle (e.g. end of the day), just prior to launching the
:py:mod:`nemo_nowcast.workers.rotate_logs` worker.
It can also be launched from the command-line by the nowcast administrator
as necessary for system maintenance.
"""
import logging
from nemo_nowcast import NowcastWorker
NAME = "clear_checklist"
logger = logging.getLogger(NAME)
def main():
"""Set up and run the worker.
For command-line usage see:
:command:`python -m nemo_nowcast.workers.clear_checklist --help`
"""
worker = NowcastWorker(NAME, description=__doc__, package="nemo_nowcast.workers")
worker.init_cli()
worker.run(clear_checklist, success, failure)
def success(parsed_args):
logger.info("nowcast system checklist cleared")
msg_type = "success"
return msg_type
def failure(parsed_args):
logger.critical("failed to clear nowcast system checklist")
msg_type = "failure"
return msg_type
def clear_checklist(parsed_args, config, tell_manager):
logger.info("requesting that manager clear system state checklist")
tell_manager("clear checklist")
# Don't return a checklist entry because we just cleared it!
if __name__ == "__main__":
main() # pragma: no cover
| 2.15625 | 2 |
ForLoop/VowelsSum.py | Rohitm619/Softuni-Python-Basic | 1 | 12770555 | <filename>ForLoop/VowelsSum.py
text = input()
sum = 0
for symbol in text:
if symbol == "a":
sum += 1
elif symbol == "e":
sum += 2
elif symbol == "i":
sum += 3
elif symbol == "o":
sum += 4
elif symbol == "u":
sum += 5
print(sum) | 3.875 | 4 |
main.py | wwwins/BrainWavePy | 0 | 12770556 | <filename>main.py
# -*- coding: utf-8 -*-
# main.py
# 分析 EEG 轉換成聲音
import os
from subprocess import Popen, PIPE
from time import sleep
from random import randrange
from random import random
from player import MPG123Player
ENABLE_MPG123 = False
ENABLE_MPLAYER = False
ENABLE_MPG123PLAYER = True
files = []
p = None
doremi = None
eegArray = ['A3', 'A4', 'B3', 'B4', 'C3', 'C4', 'D3', 'D4', 'E3', 'E4', 'F3', 'F4', 'G3', 'G4']
attentionArray = ['2', '4', '8', '16']
folder = './music/lead'+str(randrange(1,4))+'/'
def mpg3player(music, frame = None):
if frame is None:
return Popen(['mpg123', '-q', music], shell=False, stdout=PIPE, stdin=PIPE)
# return Popen(['mplayer', music], shell=False, stdout=PIPE, stdin=PIPE)
return
else:
return Popen(['mpg123', '-q', '-n', frame, music], shell=False, stdout=PIPE, stdin=PIPE)
def playAll():
fn = './music/loop '+str(randrange(1,21))+'.mp3'
# p = mpg3player(fn)
p = Popen(['mpg123', '-q', '-f', '22768', fn], shell=False, stdout=PIPE, stdin=PIPE)
print "bg music:", fn
print "start:", p.pid
fn = processEEG()
if ENABLE_MPG123:
doremi = mpg3player(fn)
if ENABLE_MPG123PLAYER:
doremi = MPG123Player(fn)
doremi.start()
print "doremi:", fn
# 檢查背景音是否播完
while p.poll() is None:
# 檢查前一個單音是否播完,播完才播下一個
if ENABLE_MPG123:
if doremi.poll() is not None:
# 下一個單音
fn = processEEG()
# doremi = mpg3player(fn)
doremi.play(fn)
print "doremi:", fn
if ENABLE_MPG123PLAYER:
if doremi.playing is False:
# 下一個單音
fn = processEEG()
# doremi = mpg3player(fn)
doremi.play(fn)
print "doremi:", fn
sleep(0.1)
if ENABLE_MPG123PLAYER:
doremi.quitAll()
print "done"
def processEEG():
[attention, meditation, delta, theta, lowAlpha, heighAlpha, lowBeta, highBeta, lowGamma, highGamma] = getEEGData()
# lead[1-3] 亂數選
# folder = './music/lead'+str(randrange(1,4))+'/'
# Delta, Theta, lowAlpha, heighAlpha, lowBeta, highBeta, lowGamma, highGamma
# [A[3,4],B[3,4],C[3,4],D[3,4],E[3,4],F[3,4],G[3,4]] 14個音
# Attention: [2,4,8,18]Bit
# Attention/25
bit = attentionArray[int(attention/25)]+'Bit.mp3'
# bit = '16Bit.mp3'
filename = folder + eegArray[int(delta/7.2)] + ' ' + bit
return filename
def getEEGData():
# [Attention, Meditation, Delta, Theta, lowAlpha, heighAlpha, lowBeta, highBeta, lowGamma, highGamma]
return [int(100*random()) for i in xrange(10)]
def main():
playAll()
if __name__ == '__main__':
main()
| 2.515625 | 3 |
SMPyBandits/Policies/Posterior/DiscountedBeta.py | balbok0/SMPyBandits | 309 | 12770557 | # -*- coding: utf-8 -*-
r""" Manipulate posteriors of Bernoulli/Beta experiments., for discounted Bayesian policies (:class:`Policies.DiscountedBayesianIndexPolicy`).
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "<NAME>"
__version__ = "0.9"
# Local imports
try:
from .Beta import Beta, bernoulliBinarization
from .with_proba import with_proba
except (ImportError, SystemError):
from Beta import Beta, bernoulliBinarization
from with_proba import with_proba
try:
from numpy.random import beta as betavariate # Faster! Yes!
except ImportError:
from random import betavariate
from scipy.special import btdtri
# --- Constants
#: Default value for the discount factor :math:`\gamma\in(0,1)`.
#: ``0.95`` is empirically a reasonable value for short-term non-stationary experiments.
GAMMA = 0.95
# --- Class
class DiscountedBeta(Beta):
r""" Manipulate posteriors of Bernoulli/Beta experiments, for discounted Bayesian policies (:class:`Policies.DiscountedBayesianIndexPolicy`).
- It keeps :math:`\tilde{S}(t)` and :math:`\tilde{F}(t)` the *discounted* counts of successes and failures (S and F).
"""
def __init__(self, gamma=GAMMA, a=1, b=1):
r""" Create a Beta posterior :math:`\mathrm{Beta}(\alpha, \beta)` with no observation, i.e., :math:`\alpha = 1` and :math:`\beta = 1` by default."""
assert a >= 0, "Error: parameter 'a' for Beta posterior has to be >= 0." # DEBUG
self._a = a
assert b >= 0, "Error: parameter 'b' for Beta posterior has to be >= 0." # DEBUG
self._b = b
self.N = [0, 0] #: List of two parameters [a, b]
assert 0 < gamma <= 1, "Error: for a DiscountedBayesianIndexPolicy policy, the discount factor has to be in (0,1], but it was {}.".format(gamma) # DEBUG
if gamma == 1:
print("Warning: gamma = 1 is stupid, just use a regular Beta posterior!") # DEBUG
self.gamma = gamma #: Discount factor :math:`\gamma\in(0,1)`.
def __str__(self):
return r"DiscountedBeta(\alpha={:.3g}, \beta={:.3g})".format(self.N[1], self.N[0])
def reset(self, a=None, b=None):
"""Reset alpha and beta, both to 0 as when creating a new default DiscountedBeta."""
if a is None:
a = self._a
if b is None:
b = self._b
self.N = [0, 0]
def sample(self):
"""Get a random sample from the DiscountedBeta posterior (using :func:`numpy.random.betavariate`).
- Used only by :class:`Thompson` Sampling and :class:`AdBandits` so far.
"""
return betavariate(self._a + self.N[1], self._b + self.N[0])
def quantile(self, p):
"""Return the p quantile of the DiscountedBeta posterior (using :func:`scipy.stats.btdtri`).
- Used only by :class:`BayesUCB` and :class:`AdBandits` so far.
"""
return btdtri(self._a + self.N[1], self._b + self.N[0], p)
# Bug: do not call btdtri with (0.5,0.5,0.5) in scipy version < 0.9 (old)
def forget(self, obs):
"""Forget the last observation, and undiscount the count of observations."""
# print("Info: calling DiscountedBeta.forget() with obs = {}, self.N = {} and self.gamma = {} ...".format(obs, self.N, self.gamma)) # DEBUG
# FIXED update this code, to accept obs that are FLOAT in [0, 1] and not just in {0, 1}...
binaryObs = bernoulliBinarization(obs)
self.N[binaryObs] = (self.N[binaryObs] - 1) / self.gamma
otherObs = 1 - binaryObs
self.N[otherObs] = self.N[otherObs] / self.gamma
def update(self, obs):
r""" Add an observation, and discount the previous observations.
- If obs is 1, update :math:`\alpha` the count of positive observations,
- If it is 0, update :math:`\beta` the count of negative observations.
- But instead of using :math:`\tilde{S}(t) = S(t)` and :math:`\tilde{N}(t) = N(t)`, they are updated at each time step using the discount factor :math:`\gamma`:
.. math::
\tilde{S}(t+1) &= \gamma \tilde{S}(t) + r(t),
\tilde{F}(t+1) &= \gamma \tilde{F}(t) + (1 - r(t)).
.. note:: Otherwise, a trick with :func:`bernoulliBinarization` has to be used.
"""
# print("Info: calling DiscountedBeta.update() with obs = {}, self.N = {} and self.gamma = {} ...".format(obs, self.N, self.gamma)) # DEBUG
# FIXED update this code, to accept obs that are FLOAT in [0, 1] and not just in {0, 1}...
binaryObs = bernoulliBinarization(obs)
self.N[binaryObs] = self.gamma * self.N[binaryObs] + 1
otherObs = 1 - binaryObs
self.N[otherObs] = self.gamma * self.N[otherObs]
def discount(self):
r""" Simply discount the old observation, when no observation is given at this time.
.. math::
\tilde{S}(t+1) &= \gamma \tilde{S}(t),
\tilde{F}(t+1) &= \gamma \tilde{F}(t).
"""
# print("Info: calling DiscountedBeta.discount() self.N = {} and self.gamma = {} ...".format(self.N, self.gamma)) # DEBUG
self.N[0] = max(0, self.gamma * self.N[0])
self.N[1] = max(0, self.gamma * self.N[1])
def undiscount(self):
r""" Simply cancel the discount on the old observation, when no observation is given at this time.
.. math::
\tilde{S}(t+1) &= \frac{1}{\gamma} \tilde{S}(t),
\tilde{F}(t+1) &= \frac{1}{\gamma} \tilde{F}(t).
"""
# print("Info: calling DiscountedBeta.undiscount() self.N = {} and self.gamma = {} ...".format(self.N, self.gamma)) # DEBUG
self.N[0] = max(0, self.N[0] / self.gamma)
self.N[1] = max(0, self.N[1] / self.gamma)
| 2.75 | 3 |
src/weblayer/route.py | thruflo/weblayer | 3 | 12770558 | <reponame>thruflo/weblayer<filename>src/weblayer/route.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" :py:mod:`weblayer.route` provides :py:class:`RegExpPathRouter`, an
implementation of :py:class:`~weblayer.interfaces.IPathRouter` that uses
`regular expression`_ patterns. Say, for example, you have some request
handlers::
>>> class DummyIndex(object):
... implements(IRequestHandler)
...
>>> class Dummy404(object):
... implements(IRequestHandler)
...
You can then map request paths to them using a list of two item tuples::
>>> mapping = [(
... # string or compiled regexp pattern to match
... # against the request path
... r'/',
... # class the request should be handled by
... DummyIndex
... ), (
... r'/(.*)',
... Dummy404
... )
... ]
>>> path_router = RegExpPathRouter(mapping)
And use the path router to get handlers for request paths::
>>> path_router.match('/') == (DummyIndex, (), {})
True
Returning the handler and the match groups if any::
>>> path_router.match('/foobar') == (Dummy404, ('foobar',), {})
True
The mapping items are looked up in order::
>>> mapping.reverse()
>>> path_router = RegExpPathRouter(mapping)
>>> path_router.match('/') == (Dummy404, ('',), {})
True
If the path doesn't match, returns ``(None, None, None)``::
>>> path_router = RegExpPathRouter([])
>>> path_router.match('/')
(None, None, None)
.. _`regular expression`: http://docs.python.org/library/re.html
"""
__all__ = [
'RegExpPathRouter'
]
import re
from zope.interface import implements
from interfaces import IPathRouter, IRequestHandler
_RE_TYPE = type(re.compile(r''))
def _compile_top_and_tailed(string_or_compiled_pattern):
""" If ``string_or_compiled_pattern`` is a compiled pattern,
just return it::
>>> p = r'^foobar$'
>>> c = re.compile(p)
>>> _compile_top_and_tailed(c) == c
True
Otherwise if it's a ``basestring`` compile and return it::
>>> _compile_top_and_tailed(p) == c
True
>>> _compile_top_and_tailed({})
Traceback (most recent call last):
...
TypeError: `{}` must be string or compiled pattern
Prepending ``'^'`` if ``pattern`` doesn't already start with it::
>>> p2 = r'foobar$'
>>> _compile_top_and_tailed(p2) == c
True
Appending ``'$'`` if it doesn't already start with it::
>>> p3 = r'^foobar'
>>> _compile_top_and_tailed(p3) == c
True
"""
if isinstance(string_or_compiled_pattern, _RE_TYPE):
return string_or_compiled_pattern
s = string_or_compiled_pattern
if not isinstance(s, basestring):
error_msg = u'`%s` must be string or compiled pattern' % s
raise TypeError(error_msg)
if not s.startswith('^'):
s = r'^%s' % s
if not s.endswith('$'):
s = r'%s$' % s
return re.compile(s)
class RegExpPathRouter(object):
""" Routes paths to request handlers using regexp patterns.
"""
implements(IPathRouter)
def __init__(self, raw_mapping, compile_=None):
""" Takes a list of raw regular expressions mapped to request
handler classes, compiles the regular expressions and
provides ``self._mapping``.
>>> from mock import Mock
>>> mock_compile = Mock()
>>> mock_compile.return_value = re.compile(r'^/foo$')
>>> class MockHandler(object):
... implements(IRequestHandler)
...
>>> raw_mapping = [(
... r'/foo',
... MockHandler
... )
... ]
>>> path_router = RegExpPathRouter(
... raw_mapping,
... compile_=mock_compile
... )
>>> isinstance(path_router._mapping, list)
True
>>> isinstance(path_router._mapping[0], tuple)
True
>>> path_router._mapping[0][1] == MockHandler
True
The first item of each pair is passed to ``compile()``::
>>> mock_compile.call_args[0][0] == r'/foo'
True
>>> path_router._mapping[0][0] == mock_compile.return_value
True
As long as ``raw_mapping`` can be unpacked into pairs of items::
>>> raw_mapping = [('a')]
>>> RegExpPathRouter(raw_mapping) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: need more than 1 value to unpack
And the second item implements
:py:class:`~weblayer.interfaces.IRequestHandler`::
>>> RegExpPathRouter([(r'/foo', Mock)]) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: `<class ... must implement ....IRequestHandler>`
"""
compile_ = compile_ is None and _compile_top_and_tailed or compile_
self._mapping = []
for regexp, handler_class in raw_mapping:
if not IRequestHandler.implementedBy(handler_class):
error_msg = u'`%s` must implement `%s`' % (
handler_class,
IRequestHandler
)
raise TypeError(error_msg)
self._mapping.append((compile_(regexp), handler_class))
def match(self, path):
""" If the ``path`` matches, return the handler class, the
`regular expression`_ match object's `groups`_ (as ``args`` to pass
to the handler) and an empty dict (as ``kwargs`` to pass to the
handler), as per::
>>> path_router = RegExpPathRouter([])
>>> handler_class, args, kwargs = path_router.match('/foo')
Otherwise return ``(None, None, None)``.
.. _`regular expression`: http://docs.python.org/library/re.html
.. _`groups`: http://docs.python.org/library/re.html#re.MatchObject.groups
"""
for regexp, handler_class in self._mapping:
match = regexp.match(path)
if match:
return handler_class, match.groups(), {}
return None, None, None
| 2.671875 | 3 |
Intro to pygame/Animation/thelonghardtruth.py | badateverything/python-projects-for-school | 0 | 12770559 | import turtle as player
wn = player.Screen()
wn.title("disney level animation")
wn.bgcolor("Black")
#create new shapes
wn.register_shape("invader.gif")
wn.register_shape("invader2.gif")
player.shape("invader.gif")
player.frame = 0
#copying the video
player.frames = ["invader.gif", "invader2.gif"]
def player_animate():
player.frame += 1
if player.frame >= len(player.frames):
player.frame = 0
player.shape(player.frames[player.frame])
#TIMER STARTO
wn.ontimer(player_animate, 500)
player_animate()
while True:
wn.update()
print("space")
print("vaidable")
wn.mainloop() | 3.46875 | 3 |
src/utils.py | samsungnlp/semeval2022-task9 | 0 | 12770560 | <filename>src/utils.py
import errno
import json
import os
from typing import Dict
def _create_directory_if_not_exist(file_path: str) -> None:
if not os.path.exists(os.path.dirname(file_path)):
try:
os.makedirs(os.path.dirname(file_path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def read_dict(file_path: str) -> Dict:
""" Read dictionary from path"""
with open(file_path) as f:
return json.load(f)
| 3.1875 | 3 |
dec16.py | bloy/adventofcode-2015 | 0 | 12770561 | #!/usr/bin/env python
import re
import pprint
def parse_sue_input(lines):
sues = []
for line in lines:
line = line.strip()
(sue_number, facts) = line.split(': ', 1)
(_, sue_number) = sue_number.split(' ')
facts = dict([fact.split(': ') for fact in facts.split(', ')])
facts['number'] = sue_number
for key in facts:
facts[key] = int(facts[key])
sues.append(facts)
return sues
def deduce_part1(sues, clues):
for clue in clues:
sues = [sue for sue in sues
if ((not sue.has_key(clue)) or
sue[clue] == clues[clue])]
return sues
def valid_part2_sue(sue, clue, value):
if not sue.has_key(clue):
return True
if clue in ('cats', 'trees'):
if sue[clue] > value:
return True
elif clue in ('pomeranians', 'goldfish'):
if sue[clue] < value:
return True
elif sue[clue] == value:
return True
else:
return False
def deduce_part2(sues, clues):
for clue in clues:
sues = [sue for sue in sues if valid_part2_sue(sue, clue, clues[clue])]
return sues
if __name__ == '__main__':
with open('input/day_16') as lines:
sues = parse_sue_input(lines)
clues = {
"children": 3,
"cats": 7,
"samoyeds": 2,
"pomeranians": 3,
"akitas": 0,
"vizslas": 0,
"goldfish": 5,
"trees": 3,
"cars": 2,
"perfumes": 1
}
pprint.pprint(deduce_part1(sues, clues))
pprint.pprint(deduce_part2(sues, clues))
| 3.8125 | 4 |
src/main.py | w1png/finance-tracker-telegram-bot | 1 | 12770562 | # Mockup of a plan:
#
# Keyboard buttons:
# get my id
# if in family -> leave family
# if in family and is_family_creator -> invite a person by id | kick a person
# if not in family -> create a family | join a family
#
# Database
# user -> user_id | family_id
# family -> family_id | user_list | creator_id
# bills -> bill_id | family_id | user_id | price | message
from datetime import datetime
from math import ceil
import sqlite3
from tokenize import String
from aiogram import Bot, Dispatcher, executor, types
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher import FSMContext
from aiogram.contrib.fsm_storage.memory import MemoryStorage
import user as usr
import markups as mk
import family as fam
import bill as b
import text_templates as tt
import state_handler as sh
conn = sqlite3.connect("data.db")
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS users (user_id INTEGER, family_id INTEGER, username TEXT)")
c.execute("CREATE TABLE IF NOT EXISTS families (family_id INTEGER, creator_id INTEGER)")
c.execute("CREATE TABLE IF NOT EXISTS invites (user_id INTEGER, family_id INTEGER)")
c.execute("CREATE TABLE IF NOT EXISTS bills (bill_id INTEGER, family_id INTEGER, user_id INGEGER, price REAL, message TEXT, date TEXT)")
conn.commit()
with open("token.txt", "r") as token_file:
token = token_file.readline().rstrip()
storage = MemoryStorage()
bot = Bot(token=token)
dp = Dispatcher(bot, storage=storage)
@dp.message_handler(commands=['start'])
async def welcome(message: types.Message):
user = usr.User(message.chat.id)
await bot.send_message(
chat_id=user.get_user_id(),
text=tt.greeting,
reply_markup=mk.get_markup_start(user),
)
@dp.message_handler()
async def msg(message: types.Message):
user = usr.User(message.chat.id)
if message.text == tt.get_my_id:
await bot.send_message(
chat_id=user.get_user_id(),
text=user.get_user_id(),
)
elif message.text == tt.change_my_name:
await bot.send_message(
chat_id=user.get_user_id(),
text=f"Введите новое имя или нажмите на кнопку \"{tt.back}\"",
reply_markup=mk.single_button(mk.btnCancelDelete)
)
await sh.changeName.name.set()
elif message.text == tt.create_family:
if not user.is_in_family():
fam.create_family(user.get_user_id())
await bot.send_message(
chat_id=user.get_user_id(),
text=f"Семья с ID {user.get_family().get_family_id()} была создана.",
reply_markup=mk.get_markup_start(user)
)
elif message.text == tt.leave_family:
if user.is_in_family():
family = user.get_family()
family.remove_user(user.get_user_id())
await bot.send_message(
chat_id=user.get_user_id(),
text=f"Вы вышли из семьи.",
reply_markup=mk.get_markup_start(user)
)
elif message.text == tt.my_invites:
if not user.is_in_family():
if user.get_invites():
text = tt.my_invites
markup = mk.get_markup_myInvites(user)
else:
text = tt.no_active_invites
markup = types.InlineKeyboardMarkup()
await bot.send_message(
chat_id=user.get_user_id(),
text=text,
reply_markup=markup
)
elif message.text == tt.invite_to_family:
await bot.send_message(
chat_id=user.get_user_id(),
text=f"Введите ID пользователя или нажмите на кнопку \"{tt.back}\".",
reply_markup=mk.single_button(mk.btnCancelDelete)
)
await sh.InviteToFamily.invited_id.set()
elif message.text == tt.kick_from_family:
if user.is_in_family():
if user.get_family().get_creator().get_user_id() == user.get_user_id():
await bot.send_message(
chat_id=user.get_user_id(),
text=f"Выберите пользователя, которого хотите выгнать из семьи: ",
reply_markup=mk.get_markup_kickFromFamily(user.get_family())
)
elif message.text in [tt.family_bills_last_30_days, tt.my_bills_last_30_days]:
if user.is_in_family():
text = f"{tt.line_separator}\n"
if message.text == tt.family_bills_last_30_days:
bill_list = user.get_family().get_bills_30_days()
own = False
else:
bill_list = user.get_family().get_bills_30_days(user.get_user_id())
own = True
markup = types.InlineKeyboardMarkup()
if len(bill_list) > 30:
markup = mk.get_markup_billsPage(pagenum=1, maxpages=ceil(len(bill_list) / 30), own=own)
bill_list = bill_list[:30]
for bill in bill_list[::-1]:
text += f"{'{:.2f}'.format(bill.get_price())} руб. - \"{bill.get_message()}\"\nДобавлено {bill.get_user().get_name()} {datetime.strftime(bill.get_date(), '%d-%m-%y в %H:%M')}\n{tt.line_separator}\n"
text += f"{tt.family_bills_last_30_days if message.text == tt.family_bills_last_30_days else tt.my_bills_last_30_days}: {'{:.2f}'.format(user.get_family().get_total_30_days(None if message.text == tt.family_bills_last_30_days else user.get_user_id()))}руб."
await bot.send_message(
chat_id=user.get_user_id(),
text=text,
reply_markup=markup
)
else:
if user.is_in_family():
try:
price = float(message.text.split(" - ")[0])
msg = message.text.split(" - ")[1]
if len(msg) > 120:
text = f"Сообщение не может быть больше 120 символов!"
else:
b.create_bill(user.get_family().get_family_id(), user.get_user_id(), price, msg)
text = f"Счет на {price}руб. был добавлен с сообщением \"{msg}\"."
except:
text = tt.error
else:
text = "Вступите в семью для добавления трат."
await bot.send_message(
chat_id=user.get_user_id(),
text=text,
)
@dp.callback_query_handler()
async def process_callback(callback_query: types.CallbackQuery):
user = usr.User(callback_query.message.chat.id)
call_data = callback_query.data
message_id = callback_query.message.message_id
if call_data.startswith("acceptFamily"):
if fam.family_exists(call_data[12:]):
family = fam.Family(call_data[12:])
if not user.is_in_family() and user.is_invited(family.get_family_id()):
family.add_user(user.get_user_id())
user.delete_invite(family.get_family_id())
text = f"Вы вступили в семью с ID {user.get_family().get_family_id()}."
markup = mk.get_markup_start(user)
else:
text = f"Семьи с ID {call_data[12:]} больше не существует."
user.delete_invite(call_data[12:])
markup = mk.single_button(mk.btnBackMyInvites)
await bot.edit_message_text(
chat_id=user.get_user_id(),
message_id=message_id,
text=text,
reply_markup=markup
)
elif call_data == "myInvites":
if not user.is_in_family():
if user.get_invites():
text = tt.my_invites
markup = mk.get_markup_myInvites(user)
else:
text = tt.no_active_invites
markup = types.InlineKeyboardMarkup()
await bot.edit_message_text(
chat_id=user.get_user_id(),
message_id=message_id,
text=text,
reply_markup=markup
)
elif call_data.startswith("declineFamily"):
family = fam.Family(call_data[13:])
if not user.is_in_family() and user.is_invited(family.get_family_id()):
user.delete_invite(family.get_family_id())
if user.get_invites():
text = tt.my_invites
markup = mk.get_markup_myInvites(user)
else:
text = tt.no_active_invites
markup = types.InlineKeyboardMarkup()
await bot.edit_message_text(
chat_id=user.get_user_id(),
message_id=message_id,
text=text,
reply_markup=markup
)
elif call_data == "kickFromFamily":
if user.is_in_family():
if user.get_family().get_creator().get_user_id() == user.get_user_id():
await bot.edit_message_text(
chat_id=user.get_user_id(),
message_id=message_id,
text=f"Выберите пользователя, которого хотите выгнать из семьи: ",
reply_markup=mk.get_markup_kickFromFamily(user.get_family())
)
elif call_data.startswith("kickFromFamily"):
kicked_user = usr.User(call_data[14:])
family = kicked_user.get_family()
if user.get_user_id() == kicked_user.get_user_id():
text = "Вы не можете выгнать самого себя!"
elif user.is_in_family() and kicked_user.is_in_family():
if kicked_user.get_family().get_creator().get_user_id() == user.get_user_id() and kicked_user.get_family().get_family_id() == user.get_family().get_family_id():
await bot.send_message(
chat_id=kicked_user.get_user_id(),
text=f"Вы были выгнаны из семьи с ID {family.get_family_id()}.",
)
text = f"Пользователь {kicked_user.get_name()} был выгнан из семьи."
family.remove_user(kicked_user.get_user_id())
await bot.edit_message_text(
chat_id=user.get_user_id(),
message_id=message_id,
text=text,
reply_markup=mk.single_button(mk.btnBackKickFromFamily)
)
elif call_data.startswith("billsPage") or call_data.startswith("ownbillsPage"):
if call_data.startswith("ownbillsPage"):
pagenum = int(call_data[12:])
bill_list = user.get_family().get_bills_30_days(user_id=user.get_user_id())
own = True
else:
pagenum = int(call_data[9:])
bill_list = user.get_family().get_bills_30_days()
own = False
bill_offset_start = 0 if pagenum == 1 else 30*(pagenum-1)
maxpages = ceil(len(bill_list) / 30)
bill_list = bill_list[bill_offset_start:bill_offset_start+30]
text = ""
for bill in bill_list[::-1]:
text += f"{'{:.2f}'.format(bill.get_price())} руб. - \"{bill.get_message()}\"\nДобавлено {bill.get_user().get_name()} {datetime.strftime(bill.get_date(), '%d-%m-%y в %H:%M')}\n{tt.line_separator}\n"
text += f"{tt.family_bills_last_30_days if not own else tt.my_bills_last_30_days}: {'{:.2f}'.format(user.get_family().get_total_30_days(None if not own else user.get_user_id()))}руб."
await bot.edit_message_text(
text=text,
chat_id=user.get_user_id(),
message_id=message_id,
reply_markup=mk.get_markup_billsPage(pagenum, maxpages=maxpages, own=own)
)
@dp.message_handler(state=sh.InviteToFamily.invited_id)
async def inviteToFamilySetInvitedID(message: types.Message, state: FSMContext):
user = usr.User(message.chat.id)
family = user.get_family()
if usr.user_exists(message.text):
invited_user = usr.User(message.text)
if invited_user.is_in_family():
text = f"Пользователь {invited_user.get_name()} уже находится в семье."
elif invited_user.is_invited(family.get_family_id()):
text = f"Пользователь {invited_user.get_name()} уже был приглашен в семью с ID {family.get_family_id()}."
else:
try:
invited_user.create_invite(user.get_family().get_family_id())
await bot.send_message(
chat_id=invited_user.get_user_id(),
text=f"Вы были приглашены в семью с ID {user.get_family().get_family_id()}.",
reply_markup=mk.single_button(mk.btnMyInvites)
)
text = f"Пользователь {invited_user.get_name()} был приглашен в семью с ID {user.get_family().get_family_id()}."
except:
text = tt.error
else:
text = f"Пользователя с ID {message.text} не существует."
await bot.send_message(
chat_id=user.get_user_id(),
text=text
)
await state.finish()
@dp.message_handler(state=sh.changeName.name)
async def changeNameSetName(message: types.Message, state: FSMContext):
user = usr.User(message.chat.id)
user.set_name(message.text)
await bot.send_message(
chat_id=message.chat.id,
text=f"Ваше имя было изменено на \"{message.text}\".",
)
await state.finish()
@dp.callback_query_handler(state='*')
async def cancelState(callback_query: types.CallbackQuery, state: FSMContext):
user = usr.User(callback_query.message.chat.id)
call_data = callback_query.data
if call_data == "cancelDelete":
try:
await bot.delete_message(
chat_id=user.get_user_id(),
message_id=callback_query.message.message_id
)
except:
pass # It's a shame I had to do this.
await state.finish()
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True)
| 2.53125 | 3 |
codes/course1/hw02.py | BigShuang/big-shuang-python-introductory-course | 0 | 12770563 | <reponame>BigShuang/big-shuang-python-introductory-course
for i in range(5):
n = 2 * i + 1
print(' ' * (5 - i) + 'A' * n)
| 3.21875 | 3 |
tests/communication/utils.py | yangboz/maro | 598 | 12770564 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import socket
from maro.communication import Proxy
def get_random_port():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as temp_socket:
temp_socket.bind(("", 0))
random_port = temp_socket.getsockname()[1]
return random_port
def proxy_generator(component_type, redis_port):
proxy_parameters = {
"group_name": "communication_unit_test",
"redis_address": ("localhost", redis_port),
"log_enable": False
}
component_type_expected_peers_map = {
"receiver": {"sender": 1},
"sender": {"receiver": 1},
"master": {"worker": 5},
"worker": {"master": 1}
}
proxy = Proxy(
component_type=component_type,
expected_peers=component_type_expected_peers_map[component_type],
**proxy_parameters
)
return proxy
| 2.578125 | 3 |
utils/simulation/endUser_sim.py | jeffmacinnes/pyneal | 24 | 12770565 | <filename>utils/simulation/endUser_sim.py
""" Tool to simulate and demo how an end-user may request results from Pyneal
during a real-time scan.
In a neurofeedback context, for example, the end-user may be the software that
is controlling the experimental task. In this case, anytime the task wants to
present feedback to the participant, it must request the output of the
real-time analysis for a specific set of timepoints (or volumes).
This is an example of how requests should be formatted and sent to Pyneal.
Requests are made on a per-volume basis, and each request should take the form
of a 4-character string representing the desired volume index (using a 0-based
index). For example, to request the first volume in the series, the string
would be '0000'; to request the 25th volume in the series the string would be
'0024', and so on...
Pyneal will send back a response message that contains all of the analysis
results that were calculated for that volume.
The results message starts off as a python dictionary, and then converted to
JSON and encoded as a byte array before sending over the socket. The end-user
should reencode the byte array as a JSON object in order to access the results.
At a minumum each results message will contain an entry called 'foundResults'
that stores a boolean value indicating whether Pyneal has a result for this
volume (True) or not (False). If 'foundResults' is True, there will also be
additional entries containing the results for that volume. How those results
are formatted and named depends on the analysis option chosen. For instance,
for basic ROI averaging, the results may look like 'average':1423, indicating
the ROI had an average value of 1423 on this volume.
Usage
-----
python endUser_sim.py [-sh] [-sp] volIdx
e.g.
python endUser_sim.py 0024
python endUser_sim.py -sh 10.0.0.1 -sp 9999 0024
Parameters
----------
volIdx : int
the index (0-based) of the volume you'd like to request results from
sh : string, optional
i.p. address of the result server. defaults to 127.0.0.1
sp : int, optional
port number to use for communication with result server. defaults to 5556
Returns
-------
The returned result from the Pyneal Result server will be printed to stdOut
"""
import argparse
import socket
import json
import sys
def requestResult(host, port, volIdx):
""" send request to pyneal results server for specific result
Parameters
----------
volIdx : int
the index (0-based) of the volume you'd like to request results from
host : string
i.p. address of the result server
port : int
port number to use for communication with result server
"""
# connect to the results server of Pyneal
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect((host, port))
# send request for volume number. Request must by 4-char string representing
# the volume number requested
request = str(volIdx).zfill(4)
print('Sending request to {}:{} for vol {}'.format(host, port, request))
clientSocket.send(request.encode())
# now read the full response from the server
resp = b''
while True:
serverData = clientSocket.recv(1024)
if serverData:
resp += serverData
else:
break
# format at JSON
resp = json.loads(resp.decode())
print('client received:')
print(resp)
clientSocket.close()
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-sh', '--socketHost',
default='127.0.0.1',
type=str,
help='Pyneal Result Server host')
parser.add_argument('-sp', '--socketPort',
default=5556,
type=int,
help='Pyneal Result Server port')
parser.add_argument('volIdx')
args = parser.parse_args()
requestResult(args.socketHost, args.socketPort, args.volIdx)
| 2.890625 | 3 |
PyTorch/ImageNet/validate-VGG16BN.py | csyhhu/L-OBS | 73 | 12770566 | """
This code validates the performance of VGG16 after L-OBS prunning
"""
import torch
import torch.backends.cudnn as cudnn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from models.vgg import vgg16_bn
from utils import validate, adjust_mean_var
import numpy as np
import os
from datetime import datetime
use_cuda = torch.cuda.is_available()
# -------------------------------------------- User Config ------------------------------------
# Specify parameters path
traindir = '/home/shangyu/imagenet-train'
valdir = '/home/shangyu/imagenet-val'
pruned_weight_root = './VGG16/pruned_weight'
pruned_parameter_root = './VGG16/pruned_param'
if not os.path.exists(pruned_parameter_root):
os.makedirs(pruned_parameter_root)
pretrain_model_path = './VGG16/vgg16_bn-6c64b313.pth'
n_validate_batch = 100 # Number of batches used for validation
validate_batch_size = 50 # Batch size of validation
adjust_batch_size = 128
n_adjust_batch = 500
# Prune data is retrieved from
# Learning bothWeights and Connections for Efficient Neural Networks (Song Han NIPS2015)
layer_name_list = {
'features.0': 55,
'features.3': 20,
'features.7': 35,
'features.10': 35,
'features.14': 55,
'features.17': 25,
'features.20': 40,
'features.24': 30,
'features.27': 30,
'features.30': 35,
'features.34': 35,
'features.37': 30,
'features.40': 35,
'classifier.0': 5,
'classifier.3': 5,
'classifier.6': 25
}
# -------------------------------------------- User Config ------------------------------------
net = vgg16_bn()
# net.load_state_dict(torch.load(pretrain_model_path))
# param = net.state_dict()
param = torch.load(pretrain_model_path)
total_nnz = 0
total_nelements = 0
n_weight_used = 0
n_total_weight = len(os.listdir('%s/CR_5' %(pruned_weight_root))) # It should be 16 * 2 = 32
for layer_name, CR in layer_name_list.items():
# if not os.path.exists('%s/CR_%d/%s.npy' %(pruned_weight_root, CR, layer_name)):
# continue
pruned_weight = np.load('%s/CR_%d/%s.weight.npy' %(pruned_weight_root, CR, layer_name))
pruned_bias = np.load('%s/CR_%d/%s.bias.npy' %(pruned_weight_root, CR, layer_name))
# print pruned_weight
# raw_input()
# Calculate sparsity
this_sparsity = np.count_nonzero(pruned_weight) + np.count_nonzero(pruned_bias)
this_total = pruned_weight.size + pruned_bias.size
print ('%s CR: %f' %(layer_name, float(this_sparsity)/float(this_total)))
total_nnz += this_sparsity
total_nelements += this_total
param['%s.weight' %layer_name] = torch.FloatTensor(pruned_weight)
param['%s.bias' %layer_name] = torch.FloatTensor(pruned_bias)
n_weight_used += 2
# assert(n_weight_used == n_total_weight)
print ('Prune weights used: %d/%d' %(n_weight_used, n_total_weight))
overall_CR = float(total_nnz) / float(total_nelements)
print ('Overall compression rate (nnz/total): %f' %overall_CR)
net.load_state_dict(param)
torch.save(param, open('%s/CR-%.3f.pth' %(pruned_parameter_root, overall_CR), 'w'))
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# Load training dataset for mean/var adjust
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
trainDataset = datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(trainDataset, batch_size = adjust_batch_size, shuffle=True)
print ('[%s] Begin adjust.' %(datetime.now()))
adjust_mean_var(net, train_loader, None, n_adjust_batch, use_cuda)
print ('[%s] Adjust finish. Now saving parameters' %(datetime.now()))
# Load validation dataset
print('==> Preparing data..')
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size = validate_batch_size, shuffle=True)
validate(net, val_loader, None, None, n_validate_batch, use_cuda)
| 2.3125 | 2 |
chapter5_operations/prediction_monitoring_pattern/src/ml/data.py | sudabon/ml-system-in-actions | 133 | 12770567 | <reponame>sudabon/ml-system-in-actions
from typing import List
from pydantic import BaseModel
class Data(BaseModel):
data: List[List[float]] = [[5.1, 3.5, 1.4, 0.2]]
| 2.234375 | 2 |
50_List_Remove_item/main.py | jmmedel/Python-Tutorials- | 0 | 12770568 |
"""
Remove Item
There are several methods to remove items from a list:
"""
thislist = ["apple", "banana", "cherry"]
thislist.remove("banana")
print(thislist)
| 4 | 4 |
controller/modules/NetworkOperations.py | saumitraaditya/ControllersMulticast | 0 | 12770569 | <filename>controller/modules/NetworkOperations.py
from controller.modules.NetworkGraph import ConnEdgeAdjacenctList
from controller.modules.NetworkGraph import ConnectionEdge
from controller.modules.NetworkGraph import EdgeTypesOut
class OperationsModel():
def __init__(self, conn_edge, op_type, priority):
self.conn_edge = conn_edge
self.op_type = op_type
self.op_priority = priority
def __repr__(self):
msg = "connEdge = %s, opType = %s, opPriority=%s>" % \
(self.conn_edge, self.op_type, self.op_priority)
return msg
class NetworkOperations():
def __init__(self, current_Network_State, desired_Network_State):
self.current_Network_State = current_Network_State
self.desired_Network_State = desired_Network_State
self.operations = {}
def __iter__(self):
sorted_list = sorted(
self.operations, key=lambda x: self.operations[x].op_priority)
for x in sorted_list:
yield self.operations[x]
def __repr__(self):
msg = "currentNetworkState = %s, desiredNetworkState = %s, numOfOperations=%d, " \
"Operations=%s>" % \
(self.current_Network_State, self.desired_Network_State,
len(self.operations), self.operations)
return msg
def find_Difference(self):
for edge in self.desired_Network_State.conn_edges:
if edge not in self.current_Network_State.conn_edges:
if self.desired_Network_State.conn_edges[edge].edge_type == 'CETypeEnforced':
op = OperationsModel(
self.desired_Network_State.conn_edges[edge], "opTypeAdd", 1)
self.operations[edge] = op
elif self.desired_Network_State.conn_edges[edge].edge_type == "CETypeSuccessor":
op = OperationsModel(
self.desired_Network_State.conn_edges[edge], "opTypeAdd", 2)
self.operations[edge] = op
elif self.desired_Network_State.conn_edges[edge].edge_type == "CETypeOnDemand":
op = OperationsModel(
self.desired_Network_State.conn_edges[edge], "opTypeAdd", 4)
self.operations[edge] = op
elif self.desired_Network_State.conn_edges[edge].edge_type == "CETypeLongDistance":
op = OperationsModel(
self.desired_Network_State.conn_edges[edge], "opTypeAdd", 7)
self.operations[edge] = op
else:
op = OperationsModel(
self.desired_Network_State.conn_edges[edge], "opTypeUpdate", 0)
self.operations[edge] = op
for edge in self.current_Network_State.conn_edges:
if edge not in self.desired_Network_State.conn_edges:
if self.current_Network_State.conn_edges[edge].edge_type in EdgeTypesOut:
if self.current_Network_State.conn_edges[edge].edge_state == "CEStateConnected":
if self.current_Network_State.conn_edges[edge].edge_type == "CETypeOnDemand":
op = OperationsModel(
self.current_Network_State.conn_edges[edge], "opTypeRemove", 3)
self.operations[edge] = op
elif self.current_Network_State.conn_edges[edge].edge_type == "CETypeSuccessor":
op = OperationsModel(
self.current_Network_State.conn_edges[edge], "opTypeRemove", 5)
self.operations[edge] = op
elif self.current_Network_State.conn_edges[edge].edge_type == "CETypeLongDistance":
op = OperationsModel(
self.current_Network_State.conn_edges[edge], "opTypeRemove", 6)
self.operations[edge] = op
| 2.546875 | 3 |
app/main.py | agstack/weather-forecast | 0 | 12770570 | from flask import Flask, render_template, jsonify, request, url_for
from shapely.geometry import Point as Shapely_point, mapping
from geojson import Point as Geoj_point, Polygon as Geoj_polygon, Feature, FeatureCollection
from datetime import datetime
from sqlalchemy import *
import pandas as pd
import geopandas as gpd
import numpy as np
import psycopg2 as pg
import json
import leaflet as L
from elastic_app_search import Client
from elasticsearch import Elasticsearch
from elasticapm.contrib.flask import ElasticAPM
import matplotlib.colors as cl
import h3
import h3.api.basic_int as h3int
import json
import h3pandas
import cmasher as cmr
import plotly
import plotly.express as px
from scipy.stats import percentileofscore
from scipy import stats
import plotly.graph_objects as go
import os
import datetime
from netCDF4 import Dataset
import shapely.wkt
import folium
import ftplib
from ftplib import FTP
from pathlib import Path
from os import path, walk
############ globals
outDir = '/home/sumer/my_project_dir/ncep/'
updated_data_available_file = '/home/sumer/weather/weather-forecast/updated_data_available.txt'
#outDir = '/root/ncep/data/'
#updated_data_available_file = '/root/ncep/scripts/updated_data_available.txt'
list_of_ncfiles = [x for x in os.listdir(outDir) if x.endswith('.nc')]
list_of_ncfiles.sort()
time_dim = len(list_of_ncfiles)
varDict = {'TMP_2maboveground': 'Air Temp [C] (2 m above surface)',
'TSOIL_0D1M0D4mbelowground':'Soil Temperature [C] - 0.1-0.4 m below ground',
'SOILW_0D1M0D4mbelowground':'Volumetric Soil Moisture Content [Fraction] - 0.1-0.4 m below ground',
'CRAIN_surface':'Rainfall Boolean [1/0]',
}
#varList = ['TMP_2maboveground','TSOIL_0D1M0D4mbelowground','SOILW_0D1M0D4mbelowground', 'CRAIN_surface']
varList = list(varDict.keys())
var_val3D = []
var_val4D = []
#NOTE: the variable are in opposite order var_val4D[lat, lon, forecast_time_index, 0/1/2/3, where 0=CRAIN, 1=SOILW... etc]
updatedDtStr = list_of_ncfiles[0].split('__')[0]
updatedDt = datetime.datetime.strptime(updatedDtStr,'%Y%m%d_%H%M%S')
updatedDtDisplay = datetime.datetime.strftime(updatedDt, '%Y-%m-%dT%H%M%S')
#get the forecast end dt
forecastEndDtStr = list_of_ncfiles[-1].split('__')[1].split('__')[0]
forecastEndDt = datetime.datetime.strptime(forecastEndDtStr,'%Y%m%d_%H%M%S')
forecastEndDtDisplay = datetime.datetime.strftime(forecastEndDt, '%Y-%m-%dT%H%M%S')
i=0
for varName in varList:
tm_arr = []
print('Reading data for :'+varName)
j=0
for f in list_of_ncfiles:
#f = '20211209_000000__20211212_210000__093___gfs.t00z.pgrb2.0p25.f093.grb2.nc'
ncin = Dataset(outDir+f, "r")
titleStr = varDict[varName]
var_mat = ncin.variables[varName][:]
if 'Temp' in titleStr:
var_val = var_mat.squeeze() - 273.15 #convert to DegC
else:
var_val = var_mat.squeeze()
lons = ncin.variables['longitude'][:]
lats = ncin.variables['latitude'][:]
tms = ncin.variables['time'][:]
#tmstmpStr = datetime.datetime.fromtimestamp(tm.data[0]).strftime('%Y%m%d%H%M%S')
if j>0:
var_val3D = np.dstack((var_val3D,var_val.data))
else:
var_val3D = var_val.data
tm_arr.append(tms.data[0])
ncin.close()
j=j+1
if i>0:
var_val3D_rshp = np.reshape(var_val3D , (720,1440,time_dim,1))
var_val4D = np.append( var_val3D_rshp , var_val4D , axis = 3)
else:
var_val4D = np.reshape(var_val3D , (720,1440,time_dim,1))
i=i+1
def getWeatherForecastVars():
weatherForecastVars = {}
weatherForecastVars['source'] = 'United States NOAA - NOMADS Global Forecast Model'
weatherForecastVars['variables'] = list(varDict.values())
weatherForecastVars['updated at time [UTC]'] = updatedDt
weatherForecastVars['forecast start time [UTC]'] = updatedDtDisplay
weatherForecastVars['forecast end time [UTC]'] = forecastEndDtDisplay
weatherForecastVars['forecast type'] = 'hourly'
weatherForecastVars['Number of time intervals'] = time_dim
return weatherForecastVars
def getWeatherForecast(lon, lat):
df = pd.DataFrame()
try:
lat = float(lat)
lon = float(lon)
varList = list(varDict.keys())
df = pd.DataFrame()
idx=0
updated_dtStr = list_of_ncfiles[0].split('__')[0]
updated_dt = datetime.datetime.strptime(updated_dtStr, '%Y%m%d_%H%M%S')
for f in list_of_ncfiles:
dtStr = f.split('__')[1]
forecast_dt = datetime.datetime.strptime(dtStr, '%Y%m%d_%H%M%S')
#print([f,updated_dt, forecast_dt])
ncin = Dataset(outDir+f, "r")
#valList = list(ncin.variables.keys())
#extract the variable of interest from the list
for varName in varList:
titleStr = varDict[varName]
var_mat = ncin.variables[varName][:]
if 'Temp' in titleStr:
var_val = var_mat.squeeze() - 273.15 #convert to DegC
else:
var_val = var_mat.squeeze()
lons = ncin.variables['longitude'][:]
lats = ncin.variables['latitude'][:]
lon_ind = [i for i,v in enumerate(lons.data) if v >= lon][0]
lat_ind = [i for i,v in enumerate(lats.data) if v >= lat][0]
vv = var_val[lat_ind, lon_ind]
df.loc[idx,'UPDATED_DATE_UTC']=updated_dt
df.loc[idx,'FORECAST_DATE_UTC']=forecast_dt
df.loc[idx,'MEASURE']=titleStr
df.loc[idx,'lon']=lon
df.loc[idx,'lat']=lat
df.loc[idx,'VALUE']=vv
idx=idx+1
ncin.close()
except Exception as e:
print(e)
return df
def get4DWeatherForecast(lon, lat):
df_all = pd.DataFrame()
try:
lat = float(lat)
lon = float(lon)
idx=3
updated_dtStr = list_of_ncfiles[0].split('__')[0]
updated_dt = datetime.datetime.strptime(updated_dtStr, '%Y%m%d_%H%M%S')
df_all = pd.DataFrame()
updated_dts = [updated_dt for x in range(0,len(tm_arr))]
forecast_dts = [datetime.datetime.utcfromtimestamp(int(x)) for x in tm_arr]
df_all['UPDATED_DATE_UTC']=updated_dts
df_all['FORECAST_DATE_UTC']=forecast_dts
for varName in varList:
df = pd.DataFrame()
print(varName)
#try:
titleStr = varDict[varName]
lon_ind = [i for i,v in enumerate(lons.data) if v >= lon][0]
lat_ind = [i for i,v in enumerate(lats.data) if v >= lat][0]
vv = var_val4D[lat_ind, lon_ind,:,idx]
df[titleStr]=vv
df_all = pd.concat([df_all, df],axis=1)
idx=idx-1
except Exception as e:
print(e)
return df_all
############
#create the app
app = Flask(__name__)
app.config['JSON_SORT_KEYS']=False
error_res = {}
#rendering the entry using any of these routes:
@app.route('/')
@app.route('/index')
@app.route('/home')
def index():
return render_template('index.html')
#global weather forecast implementation
@app.route('/weatherForecastVariables')
def weatherForecastVariables():
try:
weatherForcastVars = getWeatherForecastVars()
except ValueError:
error_res['db function call error'] = 'function call failed for getWeatherForecastVars'
error_msg = jsonify(error_res)
return jsonify(weatherForcastVars)
#global weather forecast implementation
@app.route('/weatherForecast')
def weatherForecast():
lat = request.args.get('lat')
lon = request.args.get('lon')
try:
weatherForcast_df = get4DWeatherForecast(lon, lat)
except ValueError:
error_res['db function call error'] = 'DB function call failed for getWeatherForecast'
error_res['value given'] = 'lat='+str(lat)+', lon='+(str(lon))
error_msg = jsonify(error_res)
if len(weatherForcast_df)>0:
res = jsonify(weatherForcast_df.to_dict(orient='records'))
else:
res = "{'Error': 'WeatherForecast function returned no data'}"
return res
#main to run the app
if __name__ == '__main__':
extra_files = [updated_data_available_file,]
"""
#For auto-reload if anyhing changes in the entire directory do the following:
extra_dirs = [outDir,]
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in walk(extra_dir):
for filename in files:
filename = path.join(dirname, filename)
if path.isfile(filename):
extra_files.append(filename)
"""
app.run(host='0.0.0.0' , port=5000, debug=True, extra_files=extra_files) | 2.125 | 2 |
tests/testutils/repo/__init__.py | gtristan/buildstream-plugins | 0 | 12770571 | from .bzrrepo import Bzr
from .gitrepo import Git
| 1.03125 | 1 |
cogs/help.py | Mrmagicpie/LineSMP-Bot | 2 | 12770572 | <reponame>Mrmagicpie/LineSMP-Bot<filename>cogs/help.py
#
# LineSMP help.py | 2020 :copyright: Mrmagicpie
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
import discord
from discord import Embed as e
from discord.ext import commands
from discord.ext.commands import BucketType
import datetime
#
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
class help(commands.Cog):
def __init__(self, bot):
self.bot = bot
#
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
@commands.command()
@commands.cooldown(1, 3, type=BucketType.user)
async def help(self, ctx):
help = e(
title="LineSMP Help",
colour=discord.Colour.green(),
timestamp=datetime.datetime.utcnow()
)
help.add_field(
name="Uh-Oh Commands:",
value="``;-;``\n``;=;``\n``;__=__;``"
)
help.set_footer(
icon_url=self.bot.user.avatar_url,
text="LineSMP "
)
await ctx.send(embed=help)
#
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
def setup(bot):
bot.add_cog(help(bot))
| 2.34375 | 2 |
utils.py | PervasiveWellbeingTech/Popbots-Engine | 1 | 12770573 | <reponame>PervasiveWellbeingTech/Popbots-Engine
import os
import logging
import time
from functools import wraps
"""
Time wrapper from https://gist.github.com/bradmontgomery/bd6288f09a24c06746bbe54afe4b8a82
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)-4s %(message)s')
time_logger = logging.getLogger(__name__)
file_handler = logging.FileHandler('logs/global.log')
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
file_handler2 = logging.FileHandler('logs/global_debug.log')
file_handler2.setLevel(logging.DEBUG)
file_handler2.setFormatter(formatter)
file_handler_time = logging.FileHandler('logs/execution_time.log')
file_handler_time.setFormatter(formatter)
file_handler_time.setLevel(logging.WARNING)
stream_handler = logging.StreamHandler()
stream_handler_formatter = logging.Formatter('[%(levelname)s] %(message)s')
stream_handler.setFormatter(stream_handler_formatter)
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
logger.addHandler(file_handler2)
time_logger.addHandler(stream_handler)
time_logger.addHandler(file_handler_time)
def log(log_type,string):
if log_type=="AUTHORING ERROR" or log_type=="FATAL ERROR":
logger.error(string)
elif log_type == 'ERROR':
logger.error(string)
elif log_type == 'DEBUG':
logger.debug(string)
elif log_type == "INFO":
logger.info(string)
elif log_type == "TIME TOOK":
logger.debug(string)
time_logger.warning(string)
else:
logger.debug(string)
def timed(func):
"""This decorator prints the execution time for the decorated function."""
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
file_path = func.__code__.co_filename # extract the path of the function
k = file_path.rfind("/")
file_name = file_path[k+1:] #take the latest / of the path (may cause error if running on windows )
log('TIME TOOK',"Time took for function {} in file {} is {} s".format(func.__name__,file_name, round(end - start, 2)))
return result
return wrapper
def flatten(l):
"""
Parameters:
l (list) -- input list 2d array list
Returns:
(list) -- output list flatten
"""
return [item for sublist in l for item in sublist]
if __name__ == "__main__":
log("AUTHORING ERROR",'@Thierry Lincoln Test')
"""
import slack
import asyncio
import tracemalloc
tracemalloc.start()
#SLACK_API_DEBUG_TOKEN = os.getenv("SLACK_API_DEBUG_TOKEN")
#client = slack.WebClient(token=SLACK_API_DEBUG_TOKEN,run_async=True)
#loop = asyncio.get_event_loop()
#loop.run_until_complete(post_slack_message(string))
async def post_slack_message(text):
response = await client.chat_postMessage(
channel="popbots-bugs-report",
text=text
)
""" | 2.46875 | 2 |
pandapower/shortcircuit/results.py | ZhengLiu1119/pandapower | 0 | 12770574 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pandas as pd
from pandapower.shortcircuit.idx_brch import IKSS_F, IKSS_T, IP_F, IP_T, ITH_F, ITH_T
from pandapower.shortcircuit.idx_bus import IKSS1, IP, ITH, IKSS2, R_EQUIV_OHM, X_EQUIV_OHM, SKSS
from pandapower.pypower.idx_bus import BUS_TYPE, BASE_KV
BRANCH_RESULTS_KEYS = ("branch_ikss_f", "branch_ikss_t",
"branch_ip_f", "branch_ip_t",
"branch_ith_f", "branch_ith_t")
def _copy_result_to_ppci_orig(ppci_orig, ppci, ppci_bus, calc_options):
if ppci_orig is ppci:
return
ppci_orig["bus"][ppci_bus, :] = ppci["bus"][ppci_bus, :]
if calc_options["branch_results"]:
if calc_options["return_all_currents"]:
ppci_orig["internal"]["br_res_ks_ppci_bus"] =\
ppci_bus if "br_res_ks_ppci_bus" not in ppci_orig["internal"]\
else np.r_[ppci_orig["internal"]["br_res_ks_ppci_bus"], ppci_bus]
for res_key in BRANCH_RESULTS_KEYS:
# Skip not required data points
if res_key not in ppci["internal"]:
continue
if res_key not in ppci_orig["internal"]:
ppci_orig["internal"][res_key] = ppci["internal"][res_key]
else:
ppci_orig["internal"][res_key] = np.c_[ppci_orig["internal"][res_key],
ppci["internal"][res_key]]
else:
case = calc_options["case"]
branch_results_cols = [IKSS_F, IKSS_T, IP_F, IP_T, ITH_F, ITH_T]
if case == "max":
ppci_orig["branch"][:, branch_results_cols] =\
np.maximum(np.nan_to_num(ppci["branch"][:, branch_results_cols]),
np.nan_to_num(ppci_orig["branch"][:, branch_results_cols]))
else:
ppci_orig["branch"][:, branch_results_cols] =\
np.minimum(np.nan_to_num(ppci["branch"][:, branch_results_cols], nan=1e10),
np.nan_to_num(ppci_orig["branch"][:, branch_results_cols], nan=1e10))
def _get_bus_ppc_idx_for_br_all_results(net, ppc, bus):
bus_lookup = net._pd2ppc_lookups["bus"]
if bus is None:
bus = net.bus.index
ppc_index = bus_lookup[bus]
ppc_index[ppc["bus"][ppc_index, BUS_TYPE] == 4] = -1
return bus, ppc_index
def _extract_results(net, ppc, ppc_0, bus):
_get_bus_results(net, ppc, ppc_0, bus)
if net._options["branch_results"]:
if net._options['return_all_currents']:
_get_line_all_results(net, ppc, bus)
_get_trafo_all_results(net, ppc, bus)
_get_trafo3w_all_results(net, ppc, bus)
else:
_get_line_results(net, ppc)
_get_trafo_results(net, ppc)
_get_trafo3w_results(net, ppc)
def _get_bus_results(net, ppc, ppc_0, bus):
bus_lookup = net._pd2ppc_lookups["bus"]
ppc_index = bus_lookup[net.bus.index]
if net["_options"]["fault"] == "1ph":
net.res_bus_sc["ikss_ka"] = ppc_0["bus"][ppc_index, IKSS1] + ppc["bus"][ppc_index, IKSS2]
net.res_bus_sc["rk0_ohm"] = ppc_0["bus"][ppc_index, R_EQUIV_OHM]
net.res_bus_sc["xk0_ohm"] = ppc_0["bus"][ppc_index, X_EQUIV_OHM]
# in trafo3w, we add very high numbers (1e10) as impedances to block current
# here, we need to replace such high values by np.inf
baseZ = ppc_0["bus"][ppc_index, BASE_KV] ** 2 / ppc_0["baseMVA"]
net.res_bus_sc["xk0_ohm"].loc[net.res_bus_sc["xk0_ohm"]/baseZ > 1e9] = np.inf
net.res_bus_sc["rk0_ohm"].loc[net.res_bus_sc["rk0_ohm"]/baseZ > 1e9] = np.inf
else:
net.res_bus_sc["ikss_ka"] = ppc["bus"][ppc_index, IKSS1] + ppc["bus"][ppc_index, IKSS2]
net.res_bus_sc["skss_mw"] = ppc["bus"][ppc_index, SKSS]
if net._options["ip"]:
net.res_bus_sc["ip_ka"] = ppc["bus"][ppc_index, IP]
if net._options["ith"]:
net.res_bus_sc["ith_ka"] = ppc["bus"][ppc_index, ITH]
# Export also equivalent rk, xk on the calculated bus
net.res_bus_sc["rk_ohm"] = ppc["bus"][ppc_index, R_EQUIV_OHM]
net.res_bus_sc["xk_ohm"] = ppc["bus"][ppc_index, X_EQUIV_OHM]
# if for some reason (e.g. contribution of ext_grid set close to 0) we used very high values for rk, xk, we replace them by np.inf
baseZ = ppc["bus"][ppc_index, BASE_KV] ** 2 / ppc["baseMVA"]
net.res_bus_sc["rk_ohm"].loc[net.res_bus_sc["rk_ohm"] / baseZ > 1e9] = np.inf
net.res_bus_sc["xk_ohm"].loc[net.res_bus_sc["xk_ohm"] / baseZ > 1e9] = np.inf
net.res_bus_sc = net.res_bus_sc.loc[bus, :]
def _get_line_results(net, ppc):
branch_lookup = net._pd2ppc_lookups["branch"]
case = net._options["case"]
if "line" in branch_lookup:
f, t = branch_lookup["line"]
minmax = np.max if case == "max" else np.min
net.res_line_sc["ikss_ka"] = minmax(ppc["branch"][f:t, [IKSS_F, IKSS_T]].real, axis=1)
if net._options["ip"]:
net.res_line_sc["ip_ka"] = minmax(ppc["branch"][f:t, [IP_F, IP_T]].real, axis=1)
if net._options["ith"]:
net.res_line_sc["ith_ka"] = minmax(ppc["branch"][f:t, [ITH_F, ITH_T]].real, axis=1)
def _get_line_all_results(net, ppc, bus):
case = net._options["case"]
bus, ppc_index = _get_bus_ppc_idx_for_br_all_results(net, ppc, bus)
branch_lookup = net._pd2ppc_lookups["branch"]
multindex = pd.MultiIndex.from_product([net.res_line_sc.index, bus], names=['line','bus'])
net.res_line_sc = net.res_line_sc.reindex(multindex)
if "line" in branch_lookup:
f, t = branch_lookup["line"]
minmax = np.maximum if case == "max" else np.minimum
net.res_line_sc["ikss_ka"] = minmax(ppc["internal"]["branch_ikss_f"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1),
ppc["internal"]["branch_ikss_t"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1))
if net._options["ip"]:
net.res_line_sc["ip_ka"] = minmax(ppc["internal"]["branch_ip_f"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1),
ppc["internal"]["branch_ip_t"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1))
if net._options["ith"]:
net.res_line_sc["ith_ka"] = minmax(ppc["internal"]["branch_ith_f"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1),
ppc["internal"]["branch_ith_t"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1))
def _get_trafo_results(net, ppc):
branch_lookup = net._pd2ppc_lookups["branch"]
if "trafo" in branch_lookup:
f, t = branch_lookup["trafo"]
net.res_trafo_sc["ikss_hv_ka"] = ppc["branch"][f:t, IKSS_F].real
net.res_trafo_sc["ikss_lv_ka"] = ppc["branch"][f:t, IKSS_T].real
def _get_trafo_all_results(net, ppc, bus):
bus, ppc_index = _get_bus_ppc_idx_for_br_all_results(net, ppc, bus)
branch_lookup = net._pd2ppc_lookups["branch"]
multindex = pd.MultiIndex.from_product([net.res_trafo_sc.index, bus], names=['trafo', 'bus'])
net.res_trafo_sc = net.res_trafo_sc.reindex(multindex)
if "trafo" in branch_lookup:
f, t = branch_lookup["trafo"]
net.res_trafo_sc["ikss_hv_ka"] = ppc["internal"]["branch_ikss_f"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1)
net.res_trafo_sc["ikss_lv_ka"] = ppc["internal"]["branch_ikss_t"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1)
def _get_trafo3w_results(net, ppc):
branch_lookup = net._pd2ppc_lookups["branch"]
if "trafo3w" in branch_lookup:
f, t = net._pd2ppc_lookups["branch"]["trafo3w"]
hv = int(f + (t - f) / 3)
mv = int(f + 2 * (t - f) / 3)
lv = t
net.res_trafo3w_sc["ikss_hv_ka"] = ppc["branch"][f:hv, IKSS_F].real
net.res_trafo3w_sc["ikss_mv_ka"] = ppc["branch"][hv:mv, IKSS_T].real
net.res_trafo3w_sc["ikss_lv_ka"] = ppc["branch"][mv:lv, IKSS_T].real
def _get_trafo3w_all_results(net, ppc, bus):
bus, ppc_index = _get_bus_ppc_idx_for_br_all_results(net, ppc, bus)
branch_lookup = net._pd2ppc_lookups["branch"]
multindex = pd.MultiIndex.from_product([net.res_trafo3w_sc.index, bus], names=['trafo3w', 'bus'])
net.res_trafo3w_sc = net.res_trafo3w_sc.reindex(multindex)
if "trafo3w" in branch_lookup:
f, t = branch_lookup["trafo3w"]
hv = int(f + (t - f) / 3)
mv = int(f + 2 * (t - f) / 3)
lv = t
net.res_trafo3w_sc["ikss_hv_ka"] = ppc["internal"]["branch_ikss_f"].iloc[f:hv,:].loc[:, ppc_index].values.real.reshape(-1, 1)
net.res_trafo3w_sc["ikss_mv_ka"] = ppc["internal"]["branch_ikss_t"].iloc[hv:mv, :].loc[:, ppc_index].values.real.reshape(-1, 1)
net.res_trafo3w_sc["ikss_lv_ka"] = ppc["internal"]["branch_ikss_t"].iloc[mv:lv, :].loc[:, ppc_index].values.real.reshape(-1, 1)
| 2.046875 | 2 |
mipengine/node/monetdb_interface/views.py | ThanKarab/MIP-Engine | 0 | 12770575 | <reponame>ThanKarab/MIP-Engine
from typing import List
from mipengine.filters import build_filter_clause
from mipengine.node import config as node_config
from mipengine.node.monetdb_interface.common_actions import get_table_names
from mipengine.node.monetdb_interface.monet_db_connection import MonetDB
from mipengine.node_tasks_DTOs import InsufficientDataError
from mipengine.node_tasks_DTOs import TableType
MINIMUM_ROW_COUNT = node_config.privacy.minimum_row_count
def get_view_names(context_id: str) -> List[str]:
return get_table_names(TableType.VIEW, context_id)
def create_view(
view_name: str,
table_name: str,
columns: List[str],
filters: dict,
enable_min_rows_threshold=False,
):
filter_clause = ""
if filters:
filter_clause = f"WHERE {build_filter_clause(filters)}"
columns_clause = ", ".join(columns)
view_creation_query = f"""
CREATE VIEW {view_name}
AS SELECT {columns_clause}
FROM {table_name}
{filter_clause}
"""
MonetDB().execute(view_creation_query)
if enable_min_rows_threshold:
view_rows_query_result = MonetDB().execute_and_fetchall(
f"""
SELECT COUNT(*)
FROM {view_name}
"""
)
view_rows_result_row = view_rows_query_result[0]
view_rows_count = view_rows_result_row[0]
if view_rows_count < MINIMUM_ROW_COUNT:
MonetDB().execute(f"""DROP VIEW {view_name}""")
raise InsufficientDataError(
f"The following view has less rows than the PRIVACY_THRESHOLD({MINIMUM_ROW_COUNT}): {view_creation_query}"
)
| 2.484375 | 2 |
shop/migrations/0010_auto_20210911_0602.py | Johirul300/BookShop-Project | 0 | 12770576 | <reponame>Johirul300/BookShop-Project<filename>shop/migrations/0010_auto_20210911_0602.py
# Generated by Django 3.2.5 on 2021-09-11 06:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0009_rename_customer_cart_user'),
]
operations = [
migrations.RemoveField(
model_name='cartitem',
name='order',
),
migrations.AddField(
model_name='cartitem',
name='cart',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='shop.cart'),
),
migrations.AlterField(
model_name='cartitem',
name='product',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='shop.book'),
),
]
| 1.492188 | 1 |
include/wxPython/pubsub/core/callables.py | SWEN-712/screen-reader-brandonp728 | 1 | 12770577 | <reponame>SWEN-712/screen-reader-brandonp728
"""
Low level functions and classes related to callables.
The AUTO_TOPIC
is the "marker" to use in callables to indicate that when a message
is sent to those callables, the topic object for that message should be
added to the data sent via the call arguments. See the docs in
CallArgsInfo regarding its autoTopicArgName data member.
:copyright: Copyright since 2006 by <NAME>, all rights reserved.
:license: BSD, see LICENSE_BSD_Simple.txt for details.
"""
from inspect import ismethod, isfunction, signature, Parameter
import sys
from types import ModuleType
from typing import Tuple, List, Sequence, Callable, Any
# Opaque constant used to mark a kwarg of a listener as one to which pubsub should assign the topic of the
# message being sent to the listener. This constant should be used by reference; its value is "unique" such that
# pubsub can find such kwarg.
class AUTO_TOPIC: pass
# In the user domain, a listener is any callable, regardless of signature. The return value is ignored,
# i.e. the listener will be treated as though it is a Callable[..., None]. Also, the args, "...", must be
# consistent with the MDS of the topic to which listener is being subscribed.
UserListener = Callable[..., Any]
def getModule(obj: Any) -> ModuleType:
"""
Get the module in which an object was defined.
:param obj: the object for which to get module
:return: the module object, or the string '__main__' if no module defined for obj
(which usually indicates either a builtin, or a definition within main script).
"""
if hasattr(obj, '__module__'):
module = obj.__module__
else:
module = '__main__'
return module
def getID(callable_obj: UserListener) -> Tuple[str, ModuleType]:
"""
Get "ID" of a callable, in the form of its name and module in which it is defined
E.g. getID(Foo.bar) returns ('Foo.bar', 'a.b') if Foo.bar was defined in module a.b.
:param callable_obj: a callable, ie function, bound method or callable instance
"""
sc = callable_obj
if ismethod(sc):
module = getModule(sc.__self__)
obj_name = '%s.%s' % (sc.__self__.__class__.__name__, sc.__func__.__name__)
elif isfunction(sc):
module = getModule(sc)
obj_name = sc.__name__
else: # must be a functor (instance of a class that has __call__ method)
module = getModule(sc)
obj_name = sc.__class__.__name__
return obj_name, module
def getRawFunction(callable_obj: UserListener) -> Tuple[Callable]:
"""
Get raw function information about a callable.
:param callable_obj: any object that can be called
:return: function corresponding to callable, and offset is 0 or 1 to
indicate whether the function's first argument is 'self' (1) or not (0)
:raise ValueError: if callable_obj is not of a recognized type (function, method or object with __call__ method).
"""
firstArg = 0
if isfunction(callable_obj):
# print 'Function', getID(callable_obj)
func = callable_obj
elif ismethod(callable_obj):
# print 'Method', getID(callable_obj)
func = callable_obj
elif hasattr(callable_obj, '__call__'):
# print 'Functor', getID(callable_obj)
func = callable_obj.__call__
else:
msg = 'type "%s" not supported' % type(callable_obj).__name__
raise ValueError(msg)
return func
class ListenerMismatchError(ValueError):
"""
Raised when an attempt is made to subscribe a listener to
a topic, but listener does not satisfy the topic's message data
specification (MDS). This specification is inferred from the first
listener subscribed to a topic, or from an imported topic tree
specification (see pub.addTopicDefnProvider()).
"""
def __init__(self, msg: str, listener: UserListener, *args):
idStr, module = getID(listener)
msg = 'Listener "%s" (from module "%s") inadequate: %s' % (idStr, module, msg)
ValueError.__init__(self, msg)
self.args = args
self.msg = msg
self.module = module
self.idStr = idStr
def __str__(self):
return self.msg
class CallArgsInfo:
"""
Represent the "signature" of a listener of topic messages: which arguments are
required vs optional.
"""
def __init__(self, func: UserListener, ignoreArgs: Sequence[str] = ()):
"""
:param func: the callable for which to get paramaters info
:param ignoreArgs: do not include the given names in the get*Args() return values
After construction,
- self.acceptsAllKwargs = True if the listener has a **kwargs arg
- self.autoTopicArgName will be the name of argument in which to put the Topic
object for which pubsub message is sent, or None if auto off. This is identified
by a parameter that has a default value of AUTO_TOPIC.
For instance,
- listener(self, arg1, arg2=AUTO_TOPIC, arg3=None) will have self.allParams = (arg1, arg2, arg3),
self.numRequired=1, and self.autoTopicArgName = 'arg2', whereas
- listener(self, arg1, arg3=None) will have self.allParams = (arg1, arg3), self.numRequired=1, and
self.autoTopicArgName = None.
"""
requiredArgs = []
optionalArgs = []
self.autoTopicArgName = None
self.acceptsAllKwargs = False
for argName, param in signature(func).parameters.items():
if argName in ignoreArgs or param.kind == Parameter.VAR_POSITIONAL:
continue
if param.kind == Parameter.VAR_KEYWORD:
self.acceptsAllKwargs = True
continue
if param.default == Parameter.empty:
requiredArgs.append(argName)
else:
if param.default == AUTO_TOPIC:
self.autoTopicArgName = argName
else:
optionalArgs.append(argName)
self.requiredArgs = tuple(requiredArgs)
self.optionalArgs = tuple(optionalArgs)
self.allParams = self.requiredArgs + self.optionalArgs
def getAllArgs(self) -> Tuple[str]:
"""
Return a tuple of names indicating the complete set of message data
(keyword args) that can be given to this listener
"""
return self.optionalArgs
def getOptionalArgs(self) -> Tuple[str]:
"""
Return a tuple of names indicating which message data (keyword args)
are optional when this listener is called.
"""
return self.optionalArgs
def getRequiredArgs(self) -> Tuple[str]:
"""
Return a tuple of names indicating which message data (keyword args)
are required when this listener is called.
"""
return self.requiredArgs
def getArgs(callable_obj: UserListener, ignoreArgs: Sequence[str] = ()) -> CallArgsInfo:
"""
Get the call parameters of a callable to be used as listener.
:param callable_obj: the callable for which to get call parameters
:param ignoreArgs: optional list of names of parameters of callable_obj that should not be in the returned object
:return: an instance of CallArgsInfo for the given callable_obj
:raise ListenerMismatchError: if callable_obj is not a callable, or ignoreArgs has an item that is not a call
param of callable
"""
# figure out what is the actual function object to inspect:
try:
func = getRawFunction(callable_obj)
except ValueError:
exc = sys.exc_info()[1]
raise ListenerMismatchError(str(exc), callable_obj)
return CallArgsInfo(func, ignoreArgs=ignoreArgs)
| 2.84375 | 3 |
makahiki/apps/widgets/status/migrations/0002_auto__chg_field_dailystatus_date__add_unique_dailystatus_date.py | justinslee/Wai-Not-Makahiki | 1 | 12770578 | <reponame>justinslee/Wai-Not-Makahiki
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'DailyStatus.date'
db.alter_column('status_dailystatus', 'date', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50))
# Adding unique constraint on 'DailyStatus', fields ['date']
db.create_unique('status_dailystatus', ['date'])
def backwards(self, orm):
# Removing unique constraint on 'DailyStatus', fields ['date']
db.delete_unique('status_dailystatus', ['date'])
# Changing field 'DailyStatus.date'
db.alter_column('status_dailystatus', 'date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
models = {
'status.dailystatus': {
'Meta': {'object_name': 'DailyStatus'},
'daily_visitors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['status']
| 2.203125 | 2 |
app/models/vehicle.py | TataneInYourFace/wifill-api | 0 | 12770579 | from django.db import models
from app.models.user import User
class Vehicle(models.Model):
user = models.ForeignKey(User, null=True)
plate = models.CharField(max_length=255, blank=False)
brand = models.CharField(max_length=255, blank=False)
name = models.CharField(max_length=255, blank=False)
color = models.CharField(max_length=255, blank=True)
class Meta:
ordering = ('user', 'brand',)
| 2.3125 | 2 |
src/cone/firebase/management.py | conestack/cone.firebase | 0 | 12770580 | from cone.app import ugm_backend
from cone.ugm.events import UserCreatedEvent
from cone.ugm.events import UserDeletedEvent
from cone.ugm.events import UserModifiedEvent
from firebase_admin import auth
from firebase_admin.auth import UserNotFoundError
from pyramid.security import remember
from typing import List
from typing import Tuple
from yafowil.base import ExtractionError
from zope.event import classhandler
import cone.firebase
FIREBASE_DEVICE_TOKENS = "firebase_device_tokens"
@classhandler.handler(UserCreatedEvent)
def on_user_created(event: UserCreatedEvent):
user = event.principal
email = user.attrs["mail"]
uid = user.attrs["id"]
# if firebase_user checkbox is not installed, we want to add all users to FB
if "firebase_user" not in user.attrs or user.attrs["firebase_user"]:
if email:
user_record = create_firebase_user(user, event.password)
cone.firebase.logger.info(f"user {uid} added to firebase with email {email} -> {user_record.__dict__}")
else:
cone.firebase.logger.warn(f"user {uid} has no email -> not added to firebase")
def create_firebase_user(user, password):
fullname = user.attrs["fullname"]
user_record = auth.create_user(
uid=user.attrs["id"],
email=user.attrs["mail"],
# phone_number='+15555550100',
# email_verified=True,
password=password,
display_name=fullname,
# photo_url='http://www.example.com/12345678/photo.png',
disabled=False
)
user.attrs["login"] = "mail"
return user_record
@classhandler.handler(UserModifiedEvent)
def on_user_modified(event: UserModifiedEvent):
user = event.principal
email = user.attrs["mail"]
uid = user.attrs["id"]
try:
fbuser = auth.get_user(uid)
except UserNotFoundError:
# user does not exist in firebase, lets push it to fb, the password has to be specified by hand, otherwise the
# hashed password will be set in fb!
if user.attrs.get("firebase_user", False):
cone.firebase.logger.warn(f"user wth id {uid} not found in firebase, creating it in fb")
if not user.attrs["fullname"]:
raise ExtractionError("Fullname not given")
fbuser = create_firebase_user(user, event.password)
cone.firebase.logger.warn(f"created user wth id {uid} in fb")
else:
fbuser = None
cone.firebase.logger.warn(f"user wth id {uid} not found in firebase")
if email and fbuser:
fullname = user.attrs["fullname"]
password = <PASSWORD>
params = dict(
email=email,
# phone_number='+15555550100',
display_name=fullname,
disabled=False)
if event.password:
params["password"] = <PASSWORD>
res = auth.update_user(
uid,
**params
)
cone.firebase.logger.info(f"user {uid} changes promoted to firebase with email {email} -> {res}")
else:
cone.firebase.logger.warn(f"user {uid} has no email -> not added to firebase")
@classhandler.handler(UserDeletedEvent)
def on_user_deleted(event):
user = event.principal
uid = user.attrs["id"]
try:
fbuser = auth.get_user(uid)
auth.delete_user(uid)
cone.firebase.logger.info(f"user with id {uid} deleted in firebase")
except UserNotFoundError:
cone.firebase.logger.warn(f"user with id {uid} not found in firebase -> user not deleted in fb")
def authenticate_with_id_token(request, id_token: str) -> Tuple[str, str]:
"""
uses the firebase ID token to login without password
needs installed UGM user folder located at AppRoot()["users"]
:return: tuple (user_id, auth token)
"""
# users = AppRoot()["users"]
cone.firebase.logger.info(f"loggin in with firebase id token: {id_token}")
ugm = ugm_backend.ugm
users = ugm.users
res = auth.verify_id_token(id_token)
user_id = res["user_id"]
if user_id not in users:
# TODO: creation shall take place in security.authenticate()
cone.firebase.logger.info(f"user with id {user_id} not stored locally - creating")
users.create(
user_id,
login="email",
email=res["email"],
fullname=res.get("name", ""),
email_verified=res["email_verified"],
phone=res.get("phone_number", ""),
idtoken=id_token
)
cone.firebase.logger.info(f"user with {user_id} successfully created")
return user_id, remember(request, user_id)
def register_device_token_for_user(login: str, token: str) -> List[str]:
"""
registers a device token for a given user
:param login: email or uid
:param token: firebase device token
"""
users = ugm_backend.ugm.users
if login not in users:
uid = users.id_for_login(login)
else:
uid = login
user = users[uid]
tokens = user.attrs.get(FIREBASE_DEVICE_TOKENS, []) or []
if token not in tokens:
user.attrs[FIREBASE_DEVICE_TOKENS] = list(tokens) + [token]
return user.attrs[FIREBASE_DEVICE_TOKENS]
def unregister_device_token_for_user(login: str, token: str):
"""
registers a device token for a given user
:param login: email or uid
:param token: firebase device token
"""
users = ugm_backend.ugm.users
if login not in users:
uid = users.id_for_login(login)
else:
uid = login
user = users[uid]
tokens = user.attrs.get(FIREBASE_DEVICE_TOKENS, []) or []
if token in tokens:
user.attrs[FIREBASE_DEVICE_TOKENS] = [tok for tok in tokens if token != tok]
def get_device_tokens_for_user(login: str) -> List[str]:
users = ugm_backend.ugm.users
if login not in users:
uid = users.id_for_login(login)
else:
uid = login
if uid in users:
user = users[uid]
return user.attrs.get(FIREBASE_DEVICE_TOKENS, []) or []
else:
return []
| 2.0625 | 2 |
DobotDemoV2.0/Python/DobotDemoForPython/DobotDemoForPython/DobotControl.py | nanusefue/dobotMagician | 5 | 12770581 | import threading
import DobotDllType as dType
CON_STR = {
dType.DobotConnect.DobotConnect_NoError: "DobotConnect_NoError",
dType.DobotConnect.DobotConnect_NotFound: "DobotConnect_NotFound",
dType.DobotConnect.DobotConnect_Occupied: "DobotConnect_Occupied"}
#Load Dll
api = dType.load()
#Connect Dobot
state = dType.ConnectDobot(api, "", 115200)[0]
print("Connect status:",CON_STR[state])
if (state == dType.DobotConnect.DobotConnect_NoError):
#Clean Command Queued
dType.SetQueuedCmdClear(api)
#Async Motion Params Setting
dType.SetHOMEParams(api, 200, 200, 200, 200, isQueued = 1)
dType.SetPTPJointParams(api, 200, 200, 200, 200, 200, 200, 200, 200, isQueued = 1)
dType.SetPTPCommonParams(api, 100, 100, isQueued = 1)
#Async Home
dType.SetHOMECmd(api, temp = 0, isQueued = 1)
#Async PTP Motion
for i in range(0, 5):
if i % 2 == 0:
offset = 50
else:
offset = -50
lastIndex = dType.SetPTPCmd(api, dType.PTPMode.PTPMOVLXYZMode, 200 + offset, offset, offset, offset, isQueued = 1)[0]
#Start to Execute Command Queued
dType.SetQueuedCmdStartExec(api)
#Wait for Executing Last Command
while lastIndex > dType.GetQueuedCmdCurrentIndex(api)[0]:
dType.dSleep(100)
#Stop to Execute Command Queued
dType.SetQueuedCmdStopExec(api)
#Disconnect Dobot
dType.DisconnectDobot(api)
| 2.15625 | 2 |
tests/analysis/file_hashes.py | SamuelePilleri/plaso | 0 | 12770582 | <filename>tests/analysis/file_hashes.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the unique hashes analysis plugin."""
from __future__ import unicode_literals
import unittest
from dfvfs.path import fake_path_spec
from plaso.analysis import file_hashes
from tests.analysis import test_lib
class UniqueHashesTest(test_lib.AnalysisPluginTestCase):
"""Test for the unique hashes analysis plugin."""
_TEST_EVENTS = [
{'pathspec': fake_path_spec.FakePathSpec(
location='/var/testing directory with space/file.txt'),
'test_hash': '4'},
{'pathspec': fake_path_spec.FakePathSpec(
location='C:\\Windows\\a.file.txt'),
'test_hash': '4'},
{'pathspec': fake_path_spec.FakePathSpec(
location='/opt/dfvfs'),
'test_hash': '4'},
{'pathspec': fake_path_spec.FakePathSpec(
location='/opt/2hash_file'),
'test_hash': '4',
'alternate_test_hash': '5'},
{'pathspec': fake_path_spec.FakePathSpec(
location='/opt/no_hash_file')}
]
def testExamineEventAndCompileReport(self):
"""Tests the ExamineEvent and CompileReport functions."""
events = []
for event_dictionary in self._TEST_EVENTS:
event = self._CreateTestEventObject(event_dictionary)
events.append(event)
plugin = file_hashes.FileHashesPlugin()
storage_writer = self._AnalyzeEvents(events, plugin)
self.assertEqual(len(storage_writer.analysis_reports), 1)
analysis_report = storage_writer.analysis_reports[0]
expected_text = (
'Listing file paths and hashes\n'
'FAKE:/opt/2hash_file: alternate_test_hash=5 test_hash=4\n'
'FAKE:/opt/dfvfs: test_hash=4\n'
'FAKE:/opt/no_hash_file:\n'
'FAKE:/var/testing directory with space/file.txt: test_hash=4\n'
'FAKE:C:\\Windows\\a.file.txt: test_hash=4\n')
self.assertEqual(expected_text, analysis_report.text)
self.assertEqual(analysis_report.plugin_name, 'file_hashes')
if __name__ == '__main__':
unittest.main()
| 2.53125 | 3 |
Data Science With Python/02-intermediate-python-for-data-science/1-matplotlib/scatter_plot-(1).py | aimanahmedmoin1997/DataCamp | 3 | 12770583 | <reponame>aimanahmedmoin1997/DataCamp<filename>Data Science With Python/02-intermediate-python-for-data-science/1-matplotlib/scatter_plot-(1).py
'''
Scatter Plot (1)
100xp
When you have a time scale along the horizontal axis, the line plot is your friend.
But in many other cases, when you're trying to assess if there's a correlation between
two variables, for example, the scatter plot is the better choice. Below is an example
of how to build a scatter plot.
import matplotlib.pyplot as plt
plt.scatter(x,y)
plt.show()
Let's continue with the gdp_cap versus life_exp plot, the GDP and life expectancy data
for different countries in 2007. Maybe a scatter plot will be a better alternative?
Again, the matploblib.pyplot package is available as plt.
Instructions
-Change the line plot that's coded in the script to a scatter plot.
-A correlation will become clear when you display the GDP per capita on a logarithmic scale.
Add the line plt.xscale('log').
-Finish off your script with plt.show() to display the plot.
'''
import matplotlib.pyplot as plt
# Change the line plot below to a scatter plot
plt.scatter(gdp_cap, life_exp)
# Put the x-axis on a logarithmic scale
plt.xscale('log')
# Show plot
plt.show()
| 4.46875 | 4 |
plotting/output_mpl_colormap_to_RGB.py | mnky9800n/python-data-analysis-tools | 2 | 12770584 | <reponame>mnky9800n/python-data-analysis-tools
# output colormap to txt file
#
# dependencies: matplotlib
from matplotlib import cm
import csv
with open('ylgnbu.txt', 'wb') as f:
writer = csv.writer(f, delimiter=' ')
for c in range(0,255):
# in this case I picked YlGnBu but you can pick any
# color map you want.
writer.writerow(cm.YlGnBu(c))
f.close()
| 2.890625 | 3 |
forecast.py | Amirktb1994/infamous-werewolves | 0 | 12770585 | <reponame>Amirktb1994/infamous-werewolves
import os
import pickle
from features import *
pd.options.mode.chained_assignment = None
with open('model_dict.pickle' , 'rb') as f:
model = pickle.load(f)
data_dir = '/hkfs/work/workspace/scratch/bh6321-energy_challenge/data/'
save_dir = 'forecasts/'
# dataloader
test_file = os.path.join(data_dir, 'test.csv')
valid_file = os.path.join(data_dir, 'valid.csv')
data_file = test_file if os.path.exists(test_file) else valid_file
df = pd.read_csv(data_file)
df = preprocess(df)
df = add_time_features(df, drop = True)
df.drop(columns='day_name', inplace=True)
df_list = add_ts_features(df, return_as_list=True)
forecast_dict = dict()
for df_tmp in df_list:
city = df_tmp.city.unique()[0]
print(city)
df_tmp.drop(columns='city', inplace=True)
pred = model[city].predict(df_tmp)
forecast_dict[city] = pred
y_pred = np.array([])
for f in forecast_dict.values():
f = f[:-24*7]
for i in range(8424):
y_pred = np.concatenate([y_pred, f[i : i+168]])
submission = pd.DataFrame(np.reshape(y_pred, (len(y_pred)//(24*7), 24*7)))
# save to csv
result_path = os.path.join(save_dir, 'forecasts.csv')
submission.to_csv(result_path, header=False, index=False)
print(f"Done! The result is saved in {result_path}") | 2.75 | 3 |
vip/apis.py | zhongmei57485/SwiperPro | 0 | 12770586 | from django.shortcuts import render
from libs.http import render_json
from vip.models import Vip
def info(request):
vip_info = []
for vip in Vip.objects.exclude(level=0).order_by('level'):
v_info = vip.to_dict()
v_info['perms'] = []
for perm in vip.perms:
v_info['perms'].append(perm.to_dict())
vip_info.append(v_info)
return render_json(data=vip_info) | 2.125 | 2 |
src/python/pants/backend/native/subsystems/libc_dev.py | StephanErb/pants | 0 | 12770587 | # coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.backend.native.config.environment import HostLibcDev
from pants.backend.native.subsystems.utils.parse_search_dirs import ParseSearchDirs
from pants.base.hash_utils import hash_file
from pants.option.custom_types import dir_option
from pants.subsystem.subsystem import Subsystem
from pants.util.memo import memoized_property
class LibcDev(Subsystem):
"""Subsystem to detect and provide the host's installed version of a libc "dev" package.
A libc "dev" package is provided on most Linux systems by default, but may not be located at any
standardized path. We define a libc dev package as one which provides crti.o, an object file which
is part of any libc implementation and is required to create executables (more information
available at https://wiki.osdev.org/Creating_a_C_Library).
NB: This is currently unused except in CI, because we have no plans to support creating native
executables from C or C++ sources yet (PRs welcome!). It is used to provide an "end-to-end" test
of the compilation and linking toolchain in CI by creating and invoking a "hello world"
executable.
"""
options_scope = 'libc'
class HostLibcDevResolutionError(Exception): pass
@classmethod
def subsystem_dependencies(cls):
return super(LibcDev, cls).subsystem_dependencies() + (ParseSearchDirs.scoped(cls),)
@memoized_property
def _parse_search_dirs(self):
return ParseSearchDirs.scoped_instance(self)
@classmethod
def register_options(cls, register):
super(LibcDev, cls).register_options(register)
register('--enable-libc-search', type=bool, default=False, fingerprint=True, advanced=True,
help="Whether to search for the host's libc installation. Set to False if the host "
"does not have a libc install with crti.o -- this file is necessary to create "
"executables on Linux hosts.")
register('--libc-dir', type=dir_option, default=None, fingerprint=True, advanced=True,
help='A directory containing a host-specific crti.o from libc.')
register('--host-compiler', type=str, default='gcc', fingerprint=True, advanced=True,
help='The host compiler to invoke with -print-search-dirs to find the host libc.')
# NB: crti.o is required to create executables on Linux. Our provided gcc and clang can find it if
# the containing directory is within the LIBRARY_PATH environment variable when we invoke the
# compiler.
_LIBC_INIT_OBJECT_FILE = 'crti.o'
def _get_host_libc_from_host_compiler(self):
"""Locate the host's libc-dev installation using a specified host compiler's search dirs."""
compiler_exe = self.get_options().host_compiler
# Implicitly, we are passing in the environment of the executing pants process to
# `get_compiler_library_dirs()`.
# These directories are checked to exist!
library_dirs = self._parse_search_dirs.get_compiler_library_dirs(compiler_exe)
libc_crti_object_file = None
for libc_dir_candidate in library_dirs:
maybe_libc_crti = os.path.join(libc_dir_candidate, self._LIBC_INIT_OBJECT_FILE)
if os.path.isfile(maybe_libc_crti):
libc_crti_object_file = maybe_libc_crti
break
if not libc_crti_object_file:
raise self.HostLibcDevResolutionError(
"Could not locate {fname} in library search dirs {dirs} from compiler: {compiler!r}. "
"You may need to install a libc dev package for the current system. "
"For many operating systems, this package is named 'libc-dev' or 'libc6-dev'."
.format(fname=self._LIBC_INIT_OBJECT_FILE, dirs=library_dirs, compiler=compiler_exe))
return HostLibcDev(crti_object=libc_crti_object_file,
fingerprint=hash_file(libc_crti_object_file))
@memoized_property
def _host_libc(self):
"""Use the --libc-dir option if provided, otherwise invoke a host compiler to find libc dev."""
libc_dir_option = self.get_options().libc_dir
if libc_dir_option:
maybe_libc_crti = os.path.join(libc_dir_option, self._LIBC_INIT_OBJECT_FILE)
if os.path.isfile(maybe_libc_crti):
return HostLibcDev(crti_object=maybe_libc_crti,
fingerprint=hash_file(maybe_libc_crti))
raise self.HostLibcDevResolutionError(
"Could not locate {} in directory {} provided by the --libc-dir option."
.format(self._LIBC_INIT_OBJECT_FILE, libc_dir_option))
return self._get_host_libc_from_host_compiler()
def get_libc_dirs(self, platform):
if not self.get_options().enable_libc_search:
return []
return platform.resolve_platform_specific({
'darwin': lambda: [],
'linux': lambda: [self._host_libc.get_lib_dir()],
})
| 2.015625 | 2 |
data_preprocessing/map_root.py | skelemoa/mugl | 21 | 12770588 | import numpy as np
import ipyvolume as ipv
import h5py
import os
import matplotlib.pyplot as plt
import sys
from tqdm import tqdm
kinect_dir = '../dataset/kinect/'
dir = '../dataset/data/'
kinect_files = os.listdir(kinect_dir)
missing_file_count = 0
def get_vibe_dir(x):
x1 = x[16,:] - x[0,:]
x2 = x[17,:] - x[0,:]
return np.cross(x1,x2)
def get_kinect_dir(x):
x1 = x[8,:] - x[0,:]
x2 = x[4,:] - x[0,:]
return np.cross(x1, x2)
def get_kinect_peron(i):
# this function returns Kinect skeleton given the index
f = i + '.skeleton.npy'
p = np.zeros((300,2,25,3))
if f not in kinect_files:
# print(f)
pass
else:
kp = np.load(os.path.join(kinect_dir, f))
if kp.shape[0] != 0:
if kp.shape[0] == 1:
p[:,0,:,:] = kp[0,:,:,:]
else:
p[:,0,:,:] = kp[0,:,:,:]
p[:,1,:,:] = kp[1,:,:,:]
return p[:256,:,:,:]
def order_root(kinect_person, vibe):
vibe = vibe.reshape((256, 2, 24, 3))
# kinect_person = kinect_person[::4,:,:,:]
person = vibe[:,:,:]
left = kinect_person[:,0,0,:].reshape((256,1,3))
right = kinect_person[:,1,0,:].reshape((256,1,3))
person1 = person[:,0,:,:] + left
person2 = person[:,1,:,:] + right
v1 = get_vibe_dir(person[0,0,:,:])
v2 = get_vibe_dir(person[0,1,:,:])
v_cross = np.cross(v1, v2)
k1 = get_kinect_dir(kinect_person[0,0,:,:])
k2 = get_kinect_dir(kinect_person[0,1,:,:])
k_cross = np.cross(k1,k2)
dot_prod = np.sum(v_cross*k_cross)
# print(dot_prod)
if dot_prod > 0:
# right direction
return left, right
elif dot_prod < 0:
# Wrong Direction
return right, left
else:
# one person missing
return left, right
def get_root(x, y, train_file_names):
count = 0
root_list = []
# person_2_cls = [50,51,52,53,54,55,56,57,58,59,60]
person_2_cls = [50,51,52,53,54,55,56,57,58,59,60,106, 107, 108, 109
,110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120]
for i in tqdm(range(train_file_names.shape[0])):
file_name = train_file_names[i]
if len(file_name) >= 2 and len(file_name) != 20:
file_name = file_name[0]
if str(file_name)[0] == '[':
file_name = file_name[0]
root = np.zeros((256, 2, 3))
if y[i] in person_2_cls:
p = get_kinect_peron(file_name)
left, right = order_root(p, x[i])
if y[i] == 60:
root[:,0:1,:] = right
root[:,1:,:] = left
else:
root[:,0:1,:] = left
root[:,1:,:] = right
root_list.append(root)
return np.array(root_list)
if __name__ == '__main__':
f = h5py.File(os.path.join(dir, 'NTU_VIBE_CSet_120.h5'), 'r')
# train data
x = f['x'][:]
y = f['y'][:]
train_file_names = np.load(os.path.join(dir, 'Train_File_order.npy'), allow_pickle=True)
# print(x.shape)
train_root = get_root(x, y, train_file_names)
print(train_root.shape)
np.save(dir + 'Train_root.npy', train_root)
# test data
# test_x = f['test_x'][:]
# test_y = f['test_y'][:]
# test_file_names = np.load(os.path.join(dir, 'Test_File_order.npy'), allow_pickle=True)
# test_root = get_root(test_x, test_y, test_file_names)
# print(test_root.shape)
# np.save(dir + 'Test_root.npy', test_root) | 2.40625 | 2 |
oslo_messaging/tests/rpc/test_dispatcher.py | sapcc/oslo.messaging | 131 | 12770589 | <gh_stars>100-1000
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
import time
import oslo_messaging
from oslo_messaging import rpc
from oslo_messaging import serializer as msg_serializer
from oslo_messaging.tests import utils as test_utils
from unittest import mock
load_tests = testscenarios.load_tests_apply_scenarios
class _FakeEndpoint(object):
def __init__(self, target=None):
self.target = target
def foo(self, ctxt, **kwargs):
pass
@rpc.expose
def bar(self, ctxt, **kwargs):
pass
def _foobar(self, ctxt, **kwargs):
pass
class TestDispatcher(test_utils.BaseTestCase):
scenarios = [
('no_endpoints',
dict(endpoints=[],
access_policy=None,
dispatch_to=None,
ctxt={}, msg=dict(method='foo'),
exposed_methods=['foo', 'bar', '_foobar'],
success=False, ex=oslo_messaging.UnsupportedVersion)),
('default_target',
dict(endpoints=[{}],
access_policy=None,
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo'),
exposed_methods=['foo', 'bar', '_foobar'],
success=True, ex=None)),
('default_target_ctxt_and_args',
dict(endpoints=[{}],
access_policy=oslo_messaging.LegacyRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='bar'),
ctxt=dict(user='bob'), msg=dict(method='bar',
args=dict(blaa=True)),
exposed_methods=['foo', 'bar', '_foobar'],
success=True, ex=None)),
('default_target_namespace',
dict(endpoints=[{}],
access_policy=oslo_messaging.LegacyRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo', namespace=None),
exposed_methods=['foo', 'bar', '_foobar'],
success=True, ex=None)),
('default_target_version',
dict(endpoints=[{}],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo', version='1.0'),
exposed_methods=['foo', 'bar'],
success=True, ex=None)),
('default_target_no_such_method',
dict(endpoints=[{}],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=None,
ctxt={}, msg=dict(method='foobar'),
exposed_methods=['foo', 'bar'],
success=False, ex=oslo_messaging.NoSuchMethod)),
('namespace',
dict(endpoints=[{}, dict(namespace='testns')],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=dict(endpoint=1, method='foo'),
ctxt={}, msg=dict(method='foo', namespace='testns'),
exposed_methods=['foo', 'bar'],
success=True, ex=None)),
('namespace_mismatch',
dict(endpoints=[{}, dict(namespace='testns')],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=None,
ctxt={}, msg=dict(method='foo', namespace='nstest'),
exposed_methods=['foo', 'bar'],
success=False, ex=oslo_messaging.UnsupportedVersion)),
('version',
dict(endpoints=[dict(version='1.5'), dict(version='3.4')],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=dict(endpoint=1, method='foo'),
ctxt={}, msg=dict(method='foo', version='3.2'),
exposed_methods=['foo', 'bar'],
success=True, ex=None)),
('version_mismatch',
dict(endpoints=[dict(version='1.5'), dict(version='3.0')],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=None,
ctxt={}, msg=dict(method='foo', version='3.2'),
exposed_methods=['foo', 'bar'],
success=False, ex=oslo_messaging.UnsupportedVersion)),
('message_in_null_namespace_with_multiple_namespaces',
dict(endpoints=[dict(namespace='testns',
legacy_namespaces=[None])],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo', namespace=None),
exposed_methods=['foo', 'bar'],
success=True, ex=None)),
('message_in_wrong_namespace_with_multiple_namespaces',
dict(endpoints=[dict(namespace='testns',
legacy_namespaces=['second', None])],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=None,
ctxt={}, msg=dict(method='foo', namespace='wrong'),
exposed_methods=['foo', 'bar'],
success=False, ex=oslo_messaging.UnsupportedVersion)),
('message_with_endpoint_no_private_and_public_method',
dict(endpoints=[dict(namespace='testns',
legacy_namespaces=['second', None])],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo', namespace='testns'),
exposed_methods=['foo', 'bar'],
success=True, ex=None)),
('message_with_endpoint_no_private_and_private_method',
dict(endpoints=[dict(namespace='testns',
legacy_namespaces=['second', None], )],
access_policy=oslo_messaging.DefaultRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='_foobar'),
ctxt={}, msg=dict(method='_foobar', namespace='testns'),
exposed_methods=['foo', 'bar'],
success=False, ex=oslo_messaging.NoSuchMethod)),
('message_with_endpoint_explicitly_exposed_without_exposed_method',
dict(endpoints=[dict(namespace='testns',
legacy_namespaces=['second', None], )],
access_policy=oslo_messaging.ExplicitRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo', namespace='testns'),
exposed_methods=['bar'],
success=False, ex=oslo_messaging.NoSuchMethod)),
('message_with_endpoint_explicitly_exposed_with_exposed_method',
dict(endpoints=[dict(namespace='testns',
legacy_namespaces=['second', None], )],
access_policy=oslo_messaging.ExplicitRPCAccessPolicy,
dispatch_to=dict(endpoint=0, method='bar'),
ctxt={}, msg=dict(method='bar', namespace='testns'),
exposed_methods=['bar'],
success=True, ex=None)),
]
def test_dispatcher(self):
def _set_endpoint_mock_properties(endpoint):
endpoint.foo = mock.Mock(spec=dir(_FakeEndpoint.foo))
# mock doesn't pick up the decorated method.
endpoint.bar = mock.Mock(spec=dir(_FakeEndpoint.bar))
endpoint.bar.exposed = mock.PropertyMock(return_value=True)
endpoint._foobar = mock.Mock(spec=dir(_FakeEndpoint._foobar))
return endpoint
endpoints = [_set_endpoint_mock_properties(mock.Mock(
spec=_FakeEndpoint, target=oslo_messaging.Target(**e)))
for e in self.endpoints]
serializer = None
dispatcher = oslo_messaging.RPCDispatcher(endpoints, serializer,
self.access_policy)
incoming = mock.Mock(ctxt=self.ctxt, message=self.msg,
client_timeout=0)
res = None
try:
res = dispatcher.dispatch(incoming)
except Exception as ex:
self.assertFalse(self.success, ex)
self.assertIsNotNone(self.ex, ex)
self.assertIsInstance(ex, self.ex, ex)
if isinstance(ex, oslo_messaging.NoSuchMethod):
self.assertEqual(self.msg.get('method'), ex.method)
elif isinstance(ex, oslo_messaging.UnsupportedVersion):
self.assertEqual(self.msg.get('version', '1.0'),
ex.version)
if ex.method:
self.assertEqual(self.msg.get('method'), ex.method)
else:
self.assertTrue(self.success,
"Unexpected success of operation during testing")
self.assertIsNotNone(res)
for n, endpoint in enumerate(endpoints):
for method_name in self.exposed_methods:
method = getattr(endpoint, method_name)
if self.dispatch_to and n == self.dispatch_to['endpoint'] and \
method_name == self.dispatch_to['method'] and \
method_name in self.exposed_methods:
method.assert_called_once_with(
self.ctxt, **self.msg.get('args', {}))
else:
self.assertEqual(0, method.call_count,
'method: {}'.format(method))
class TestDispatcherWithPingEndpoint(test_utils.BaseTestCase):
def test_dispatcher_with_ping(self):
self.config(rpc_ping_enabled=True)
dispatcher = oslo_messaging.RPCDispatcher([], None, None)
incoming = mock.Mock(ctxt={},
message=dict(method='oslo_rpc_server_ping'),
client_timeout=0)
res = dispatcher.dispatch(incoming)
self.assertEqual('pong', res)
def test_dispatcher_with_ping_already_used(self):
class MockEndpoint(object):
def oslo_rpc_server_ping(self, ctxt, **kwargs):
return 'not_pong'
mockEndpoint = MockEndpoint()
self.config(rpc_ping_enabled=True)
dispatcher = oslo_messaging.RPCDispatcher([mockEndpoint], None, None)
incoming = mock.Mock(ctxt={},
message=dict(method='oslo_rpc_server_ping'),
client_timeout=0)
res = dispatcher.dispatch(incoming)
self.assertEqual('not_pong', res)
class TestSerializer(test_utils.BaseTestCase):
scenarios = [
('no_args_or_retval',
dict(ctxt={}, dctxt={}, args={}, retval=None)),
('args_and_retval',
dict(ctxt=dict(user='bob'),
dctxt=dict(user='alice'),
args=dict(a='a', b='b', c='c'),
retval='d')),
]
def test_serializer(self):
endpoint = _FakeEndpoint()
serializer = msg_serializer.NoOpSerializer()
dispatcher = oslo_messaging.RPCDispatcher([endpoint], serializer)
endpoint.foo = mock.Mock()
args = dict([(k, 'd' + v) for k, v in self.args.items()])
endpoint.foo.return_value = self.retval
serializer.serialize_entity = mock.Mock()
serializer.deserialize_entity = mock.Mock()
serializer.deserialize_context = mock.Mock()
serializer.deserialize_context.return_value = self.dctxt
expected_side_effect = ['d' + arg for arg in self.args]
serializer.deserialize_entity.side_effect = expected_side_effect
serializer.serialize_entity.return_value = None
if self.retval:
serializer.serialize_entity.return_value = 's' + self.retval
incoming = mock.Mock()
incoming.ctxt = self.ctxt
incoming.message = dict(method='foo', args=self.args)
incoming.client_timeout = 0
retval = dispatcher.dispatch(incoming)
if self.retval is not None:
self.assertEqual('s' + self.retval, retval)
endpoint.foo.assert_called_once_with(self.dctxt, **args)
serializer.deserialize_context.assert_called_once_with(self.ctxt)
expected_calls = [mock.call(self.dctxt, arg) for arg in self.args]
self.assertEqual(expected_calls,
serializer.deserialize_entity.mock_calls)
serializer.serialize_entity.assert_called_once_with(self.dctxt,
self.retval)
class TestMonitorFailure(test_utils.BaseTestCase):
"""Test what happens when the call monitor watchdog hits an exception when
sending the heartbeat.
"""
class _SleepyEndpoint(object):
def __init__(self, target=None):
self.target = target
def sleep(self, ctxt, **kwargs):
time.sleep(kwargs['timeout'])
return True
def test_heartbeat_failure(self):
endpoints = [self._SleepyEndpoint()]
dispatcher = oslo_messaging.RPCDispatcher(endpoints,
serializer=None)
# sleep long enough for the client_timeout to expire multiple times
# the timeout is (client_timeout/2) and must be > 1.0
message = {'method': 'sleep',
'args': {'timeout': 3.5}}
ctxt = {'test': 'value'}
incoming = mock.Mock(ctxt=ctxt, message=message, client_timeout=2.0)
incoming.heartbeat = mock.Mock(side_effect=Exception('BOOM!'))
res = dispatcher.dispatch(incoming)
self.assertTrue(res)
# only one call to heartbeat should be made since the watchdog thread
# should exit on the first exception thrown
self.assertEqual(1, incoming.heartbeat.call_count)
| 2 | 2 |
notebooks/96.0-BDP-try-leiden.py | zeou1/maggot_models | 0 | 12770590 | # %% [markdown]
# #
import os
import pickle
import warnings
from operator import itemgetter
from pathlib import Path
from timeit import default_timer as timer
import colorcet as cc
import community as cm
import matplotlib.colors as mplc
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from matplotlib.cm import ScalarMappable
from sklearn.model_selection import ParameterGrid
from graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed
from graspy.plot import gridplot, heatmap, pairplot
from graspy.utils import symmetrize
from src.data import load_everything, load_metagraph, load_networkx
from src.embed import lse, preprocess_graph
from src.graph import MetaGraph, preprocess
from src.hierarchy import signal_flow
from src.io import savefig, saveobj, saveskels, savecsv
from src.utils import get_blockmodel_df, get_sbm_prob
from src.visualization import (
CLASS_COLOR_DICT,
CLASS_IND_DICT,
barplot_text,
bartreeplot,
draw_networkx_nice,
get_color_dict,
get_colors,
palplot,
probplot,
sankey,
screeplot,
stacked_barplot,
random_names,
)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
# %% [markdown]
# # Parameters
BRAIN_VERSION = "2020-03-02"
BLIND = True
SAVEFIGS = False
SAVESKELS = False
SAVEOBJS = True
np.random.seed(9812343)
sns.set_context("talk")
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
plt.close()
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, save_on=True, **kws)
def stashskel(name, ids, labels, colors=None, palette=None, **kws):
saveskels(
name,
ids,
labels,
colors=colors,
palette=None,
foldername=FNAME,
save_on=SAVESKELS,
**kws,
)
def stashobj(obj, name, **kws):
saveobj(obj, name, foldername=FNAME, save_on=SAVEOBJS, **kws)
graph_type = "G"
threshold = 3
binarize = True
# load and preprocess the data
mg = load_metagraph(graph_type, version=BRAIN_VERSION)
mg = preprocess(
mg, threshold=threshold, sym_threshold=True, remove_pdiff=True, binarize=binarize
)
#%%
import leidenalg as la
import igraph as ig
def _process_metagraph(mg, temp_loc):
adj = mg.adj
adj = symmetrize(adj, method="avg")
mg = MetaGraph(adj, mg.meta)
nx.write_graphml(mg.g, temp_loc)
def run_leiden(
mg,
temp_loc=None,
implementation="igraph",
partition_type=la.CPMVertexPartition,
**kws,
):
if temp_loc is None:
temp_loc = f"maggot_models/data/interim/temp-{np.random.randint(1e8)}.graphml"
else:
temp_loc = f"maggot_models/data/interim/{temp_loc}.graphml"
_process_metagraph(mg, temp_loc)
g = ig.Graph.Read_GraphML(temp_loc)
os.remove(temp_loc)
nodes = [int(v["id"]) for v in g.vs]
if implementation == "igraph":
vert_part = g.community_leiden(**kws)
elif implementation == "leidenalg":
vert_part = la.find_partition(g, partition_type, **kws)
labels = vert_part.membership
partition = pd.Series(data=labels, index=nodes)
return partition, vert_part.modularity
# %% [markdown]
# #
temp_loc = f"maggot_models/data/interim/temp-{np.random.randint(1e8)}.graphml"
_process_metagraph(mg, temp_loc)
g = ig.Graph.Read_GraphML(temp_loc)
os.remove(temp_loc)
nodes = [int(v["id"]) for v in g.vs]
vert_part = g.community_multilevel()
labels = vert_part.membership
partition = pd.Series(data=labels, index=nodes)
# %% [markdown]
# #
partition, modularity = run_leiden(
mg,
implementation="igraph",
resolution_parameter=0.1,
beta=0.1,
partition_type=la.CPMVertexPartition,
weights="weight",
n_iterations=-1,
)
print(partition.nunique())
# %% [markdown]
# #
pred_labels = partition
pred_labels = pred_labels[pred_labels.index.isin(mg.meta.index)]
partition = pred_labels.astype(int)
class_labels = mg["Merge Class"]
lineage_labels = mg["lineage"]
basename = ""
title = ""
def augment_classes(class_labels, lineage_labels, fill_unk=True):
if fill_unk:
classlin_labels = class_labels.copy()
fill_inds = np.where(class_labels == "unk")[0]
classlin_labels[fill_inds] = lineage_labels[fill_inds]
used_inds = np.array(list(CLASS_IND_DICT.values()))
unused_inds = np.setdiff1d(range(len(cc.glasbey_light)), used_inds)
lineage_color_dict = dict(
zip(np.unique(lineage_labels), np.array(cc.glasbey_light)[unused_inds])
)
color_dict = {**CLASS_COLOR_DICT, **lineage_color_dict}
hatch_dict = {}
for key, val in color_dict.items():
if key[0] == "~":
hatch_dict[key] = "//"
else:
hatch_dict[key] = ""
else:
color_dict = "class"
hatch_dict = None
return classlin_labels, color_dict, hatch_dict
lineage_labels = np.vectorize(lambda x: "~" + x)(lineage_labels)
classlin_labels, color_dict, hatch_dict = augment_classes(class_labels, lineage_labels)
# TODO then sort all of them by proportion of sensory/motor
# barplot by merge class and lineage
_, _, order = barplot_text(
partition,
classlin_labels,
color_dict=color_dict,
plot_proportions=False,
norm_bar_width=True,
figsize=(24, 18),
title=title,
hatch_dict=hatch_dict,
return_order=True,
)
stashfig(basename + "barplot-mergeclasslin-props")
plt.close()
category_order = np.unique(partition)[order]
fig, axs = barplot_text(
partition,
class_labels,
color_dict=color_dict,
plot_proportions=False,
norm_bar_width=True,
figsize=(24, 18),
title=title,
hatch_dict=None,
category_order=category_order,
)
stashfig(basename + "barplot-mergeclass-props")
fig, axs = barplot_text(
partition,
class_labels,
color_dict=color_dict,
plot_proportions=False,
norm_bar_width=False,
figsize=(24, 18),
title=title,
hatch_dict=None,
category_order=category_order,
)
stashfig(basename + "barplot-mergeclass-counts")
plt.close()
# TODO add gridmap
counts = False
weights = False
prob_df = get_blockmodel_df(
mg.adj, partition, return_counts=counts, use_weights=weights
)
prob_df = prob_df.reindex(category_order, axis=0)
prob_df = prob_df.reindex(category_order, axis=1)
probplot(100 * prob_df, fmt="2.0f", figsize=(20, 20), title=title, font_scale=0.7)
stashfig(basename + f"probplot-counts{counts}-weights{weights}")
plt.close()
| 1.679688 | 2 |
tools/map_extractor.py | swiss-avalanches/swiss-avalanches.github.io | 4 | 12770591 | <reponame>swiss-avalanches/swiss-avalanches.github.io<gh_stars>1-10
import numpy as np
import argparse
import json
from pylab import contour
from PIL import ImageFilter, Image, ImageDraw
import cv2
from skimage import measure
import os
import json
import visvalingamwyatt as vw
import glob
import sys
from pathlib import Path
import pandas as pd
from scipy.spatial import distance
from scipy.misc import imsave
from pathos.multiprocessing import ProcessingPool as Pool
# colors definitions RGB alpha
black = np.array([0, 0, 0])
white = np.array([255, 255, 255])
green = np.array([204, 255, 102])
yellow = np.array([255, 255, 0])
orange = np.array([255, 153, 0])
red = np.array([255, 0, 0])
color_scale = [green, yellow, orange, red]
raw_red = np.array([255, 0, 0])
raw_green = np.array([0, 255, 0])
raw_blue = np.array([0, 0, 255])
raw_pink = np.array([255, 0, 255])
raw_pink = np.array([255, 0, 255])
raw_cyan = np.array([0, 255, 255])
raw_yellow = np.array([255, 255, 0])
leman_west = (6.148131, 46.206042)
quatre_canton_north = (8.435177, 47.082150)
majeur_east = (8.856851, 46.151857)
east_end = (10.472221, 46.544303)
constance_nw = (9.035247, 47.812716)
landmarks_colors = {
leman_west: raw_red,
quatre_canton_north: raw_green,
majeur_east: raw_blue,
constance_nw: raw_pink,
east_end: raw_yellow,
}
# remove contours areas that have more than 30% of white
WHITE_RATIO_THRESHOLD = .6
GRAY_STD_THRESHOLD = 10
SMOOTHING_THRESHOLD = 0.0001
def keep_colors(img, colors, replace_with=white):
"""return a new image with only the `colors` selected, other pixel are `replace_with`"""
keep = np.zeros(img.shape[:2], dtype=bool)
for c in colors:
keep = keep | (c == img).all(axis=-1)
new_img = img.copy()
new_img[~keep] = replace_with
return new_img
def remove_colors(img, colors, replace_with=white):
"""return a new image without the `colors` selected which will be replaced by `replace_with`"""
keep = np.zeros(img.shape[:2], dtype=bool)
for c in colors:
keep = keep | (c == img).all(axis=-1)
new_img = img.copy()
new_img[keep] = replace_with
return new_img
def replace_color(img, color_map):
"""return a new image replacing the image colors which will be mapped to their corresponding colors in `color_map`"""
new_img = img.copy()
for _, (source, target) in color_map.iterrows():
new_img[(img == source).all(axis=-1)] = target
return new_img
def numpify(o):
if not isinstance(o, np.ndarray):
o = np.array(o)
return o
def remove_grey(img):
mask = np.std(img, axis=-1) < GRAY_STD_THRESHOLD
new_img = img.copy()
new_img[mask] = 255
return new_img
def coord_color(img, color):
return np.array(list(zip(*(img == color).all(-1).nonzero())))
def open_mask(height, width):
masks_path = args.masks_directory
mask_name = '{}x{}.gif'.format(height, width)
mask_path = os.path.join(masks_path, mask_name)
mask = Image.open(mask_path)
mask = mask.convert('RGB')
mask = np.array(mask)
landmarks_pix = {
geo_point: (width, height)
for geo_point, color in landmarks_colors.items()
for height, width in coord_color(mask, color)
}
binary_mask = (mask != 255).any(-1) # different of white
return binary_mask, landmarks_pix
def replace_color(img, color_map):
"""return a new image replacing the image colors which will be mapped to their corresponding colors in `color_map` (df)"""
new_img = img.copy()
for _, (source, target) in color_map.iterrows():
new_img[(img == source).all(axis=-1)] = target
return new_img
def build_color_map(img_arr, image_shades):
"""return colormap as dataframe"""
im_df = pd.DataFrame([img_arr[i,j,:] for i,j in np.ndindex(img_arr.shape[0],img_arr.shape[1])])
im_df = im_df.drop_duplicates()
image_colors = im_df.as_matrix()
colors = np.zeros(image_colors.shape)
dist = distance.cdist(image_colors, image_shades, 'sqeuclidean')
for j in range(dist.shape[0]):
distances = dist[j,:]
colors[j, :] = image_shades[distances.argmin()]
color_map = pd.DataFrame(
{'source': image_colors.tolist(),
'target': colors.tolist()
})
return color_map
def color_contours(img, color):
img = numpify(img)
color = numpify(color)
mask = (img == color[:3]).all(axis=-1)
monocholor = img.copy()
monocholor[~mask] = 255
contours = measure.find_contours(mask, 0.5)
# heuristic filter for contours
filter_contours = []
for c in contours:
region = Image.new("L", [img.shape[1], img.shape[0]], 0)
ImageDraw.Draw(region).polygon(list(map(lambda t: (t[1],t[0]), c)), fill=1)
region = np.array(region).astype(bool)
white_ratio = (monocholor == 255).all(axis=-1)[region].mean()
if white_ratio <= WHITE_RATIO_THRESHOLD:
filter_contours.append(c)
return filter_contours
def main(args):
def function(year, args=args):
extensions = ['nbk', 'gk']
language = ['de', 'en', 'fr']
file_type = ['gif']
origin_paths = []
url = "https://www.slf.ch/fileadmin/user_upload/import/lwdarchiv/public"
urls = []
# for y in years:
y = year
for ext in extensions:
for lan in language:
for f_type in file_type:
origin = os.path.join(*[args.maps_directory,y, ext, lan, f_type])#,"*."+f_type])
if(Path(origin).exists()):
origin_paths.append(os.path.join(*[origin, "*."+f_type]))
urls.append("/".join([url, y, ext, lan, f_type]))
for i, origin in enumerate(origin_paths):
for file_map in glob.glob(origin):
basename = os.path.basename(file_map)
filename = '{}.json'.format(os.path.splitext(os.path.basename(file_map))[0])
destination = os.path.join(args.out_path, filename)
file_url = urls[i]+"/"+basename
if Path(destination).exists() and not args.f:
print('Skip {} because {} already exists'.format(file_map, destination))
continue
img = Image.open(file_map)
img = img.convert('RGB')
img_arr = np.array(img)
img_no_gray = remove_grey(img_arr)
color_map = build_color_map(img_no_gray, [white] + color_scale)
img_projected = replace_color(img_no_gray, color_map)
# load mask of this size
try:
binary_mask, landmarks_pix = open_mask(*img_arr.shape[:2])
except FileNotFoundError:
print('Missing mask "{}x{}.gif" for file "{}"'.format(*img_arr.shape[:2], file_map), file=sys.stderr)
continue
# keep useful colors
regions_only = keep_colors(img_projected, color_scale)
# clip the binary mask to remove color key
regions_only[~binary_mask] = 255
regions_only = Image.fromarray(regions_only).convert('RGB')
smoothed = regions_only.filter(ImageFilter.MedianFilter(7))
pix = np.array(list(map(numpify, landmarks_pix.values())))
coord = np.array(list(map(numpify, landmarks_pix.keys())))
# add 1 bias raw
pix_ext = np.vstack([np.ones((1,pix.shape[0])), pix.T])
coord_ext = np.vstack([np.ones((1,pix.shape[0])), coord.T])
T = np.linalg.lstsq(pix_ext.T, coord_ext.T)[0]
def transform_pix2map(points):
"""n x 2 array"""
points_ext = np.hstack([np.ones((points.shape[0], 1)), points])
points_map = points_ext.dot(T)
return points_map[:, 1:]
geo_json = {
"type": "FeatureCollection",
"features": []
}
for danger_level, color in enumerate([green, yellow, orange, red]):
for contour in color_contours(smoothed, color):
contour_right = contour.copy()
contour_right[:,0] = contour[:,1]
contour_right[:,1] = contour[:,0]
contour_right = transform_pix2map(contour_right)
simplifier = vw.Simplifier(contour_right)
contour_right = simplifier.simplify(threshold=SMOOTHING_THRESHOLD)
geo_json['features'].append({
"type": "Feature",
"properties": {
"date": ".".join([basename[6:8], basename[4:6], basename[0:4]]),
"danger_level": danger_level + 1,
"url": file_url
},
"geometry": {
"type": "Polygon",
"coordinates": [ list(reversed(contour_right.tolist())) ]
}
})
with open(destination, 'w') as f:
print('{} -> {}'.format(file_map, destination))
json.dump(geo_json, f)
with Pool(4) as p:
p.map(function, [str(i) for i in range(2002, 2018)])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Extract danger map to JSON.')
parser.add_argument('maps_directory', type=str, help='directory of GIF file of the map')
parser.add_argument('masks_directory', type=str, help='directory of GIF file of the mask')
parser.add_argument('out_path', type=str, help='destination directory')
parser.add_argument('-f', action='store_true', help='override existing JSON files')
args = parser.parse_args()
main(args)
| 2 | 2 |
charfred.py | jinkhya/Charfred_Bot | 0 | 12770592 | <filename>charfred.py
import asyncio
import click
import logging
import coloredlogs
import traceback
import datetime
import aiohttp
import os
from pathlib import Path
from discord.ext import commands
from discord import ClientException, Intents
from utils import Config, CharfredContext
log = logging.getLogger('charfred')
try:
import uvloop
except ImportError:
pass
else:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
log.info('uvloop imported, oh yeah! *high five*')
description = """
Charfred is a gentleman and a scholar,
he will do whatever you ask of him to the best of his abilities,
however he can be quite rude sometimes.
"""
def _admincogs(direc):
for dirpath, _, filenames in os.walk(direc):
if '__' in dirpath:
continue
else:
for filename in filenames:
if filename.endswith('.py'):
yield os.path.join(dirpath, filename[:-3])
def _get_prefixes(bot, msg):
bot_id = bot.user.id
prefixes = [f'<@{bot_id}> ', f'<@!{bot_id}> ']
if msg.guild:
try:
prefixes.extend(bot.cfg['prefix'][str(msg.guild.id)])
except KeyError:
pass
return prefixes
class Charfred(commands.Bot):
def __init__(self):
super().__init__(command_prefix=_get_prefixes, description=description,
pm_help=False, intents=Intents.all())
self.session = aiohttp.ClientSession(loop=self.loop)
self.dir = Path(__file__).parent
self.cfg = Config(f'{self.dir}/configs/botCfg.toml',
load=True, loop=self.loop)
if 'prefix' not in self.cfg:
self.cfg['prefix'] = {}
if 'nodes' not in self.cfg:
self.cfg['nodes'] = {}
if 'hierarchy' not in self.cfg:
self.cfg['hierarchy'] = []
if 'cogcfgs' not in self.cfg:
self.cfg['cogcfgs'] = {}
self.cfg._save()
self.keywords = Config(f'{self.dir}/configs/keywords.json',
load=True, loop=self.loop,
default=f'{self.dir}/configs/keywords.json_default')
try:
os.chdir(self.dir)
for admincog in _admincogs('admincogs'):
self.load_extension(admincog.replace('/', '.').replace('\\', '.'))
except ClientException:
log.critical('Could not load administrative cogs!')
except ImportError:
log.critical('Administrative cogs could not be imported!')
traceback.print_exc()
def register_nodes(self, nodes):
for node in nodes:
if node not in self.cfg['nodes']:
self.cfg['nodes'][node] = None
def register_cfg(self, cfg, prompt=None, defaultvalue=None):
if cfg not in self.cfg['cogcfgs']:
self.cfg['cogcfgs'][cfg] = (defaultvalue, prompt)
async def get_context(self, message, *, cls=CharfredContext):
return await super().get_context(message, cls=cls)
async def on_command(self, ctx):
log.info(f'[{ctx.author.name}]: {ctx.message.content}')
async def on_ready(self):
log.info(f'{self.user} reporting for duty!')
log.info(f'ID: {self.user.id}')
if not hasattr(self, 'uptime'):
self.uptime = datetime.datetime.now()
async def on_message(self, message):
if message.author.bot:
return
if message.guild is None:
is_owner = await self.is_owner(message.author)
if not is_owner:
return
ctx = await self.get_context(message)
await self.invoke(ctx)
async def close(self):
log.info('Shutting down, this may take a couple seconds...')
await super().close()
log.info('Client disconnected.')
await self.session.close()
log.info('Session closed.')
log.info('All done, goodbye sir!')
def run(self, token=None):
if token is None:
log.info('Using pre-configured Token...')
try:
token = self.cfg['botToken']
except KeyError:
log.error('No token given, no token saved, abort!')
return
else:
self.cfg['botToken'] = token
self.cfg._save()
log.info('Token saved for future use!')
super().run(token, reconnect=True)
@click.command()
@click.option('--loglvl', default="DEBUG", help='Logging Level')
@click.option('--token', default=None, help='Discord Bot Token')
def run(loglvl, token):
coloredlogs.install(level=loglvl,
logger=log,
fmt='%(asctime)s:%(msecs)03d [%(name)s]: %(levelname)s %(message)s')
log.info('Initializing Charfred!')
char = Charfred()
char.run(token)
| 2.15625 | 2 |
examples/example_time_course_kspace_affines.py | GReguig/torchio | 0 | 12770593 | <reponame>GReguig/torchio
from torchio.transforms.augmentation.intensity.random_motion_kspace_time_course import RandomMotionTimeCourseAffines
from torchio import Image, ImagesDataset, transforms, INTENSITY, LABEL
from torchvision.transforms import Compose
import numpy as np
from nibabel.viewers import OrthoSlicer3D as ov
from copy import deepcopy
np.random.seed(12)
out_dir = '/data/ghiles/'
subject = [[
Image('T1', '/data/romain/HCPdata/suj_100307/T1w_1mm.nii.gz', INTENSITY),
Image('mask', '/data/romain/HCPdata/suj_100307/brain_mT1w_1mm.nii', LABEL)
]]
subjects_list = [subject]
dataset = ImagesDataset(subject)
sample = dataset[0]
#sample = deepcopy(sample_orig)
nT = 100
time_points = [.55, 1.0]
fitpars = np.zeros((6, nT))
fitpars[1, 55:] = -15
#fitpars[dim_modif, :45] = -7.5
#fitpars[dim_modif, 45:] = 7.5
#ov(sample["T1"]["data"][0], sample["T1"]["affine"])
transform = RandomMotionTimeCourseAffines(fitpars=fitpars, time_points=time_points, pct_oversampling=0.30, verbose=True,combine_axis=0)
transformed = transform(sample)
dataset.save_sample(transformed, dict(T1='/home/romain.valabregue/tmp/mot/t1_motion_axis0.nii.gz'))
sample = dataset[0]
transform = RandomMotionTimeCourseAffines(fitpars=fitpars, time_points=time_points, pct_oversampling=0.30, verbose=True,combine_axis=1)
transformed = transform(sample)
dataset.save_sample(transformed, dict(T1='/home/romain.valabregue/tmp/mot/t1_motion_axis1.nii.gz'))
sample = dataset[0]
transform = RandomMotionTimeCourseAffines(fitpars=fitpars, time_points=time_points, pct_oversampling=0.30, verbose=True,combine_axis=2)
transformed = transform(sample)
dataset.save_sample(transformed, dict(T1='/home/romain.valabregue/tmp/mot/t1_motion_axis2.nii.gz'))
#ov(transformed["T1"]["data"][0], sample["T1"]["affine"])
| 2.265625 | 2 |
main.py | ryokamoi/sg_langevin | 9 | 12770594 | import os
import shutil
import argparse
import datetime
import tensorflow as tf
import model
from get_dataset import get_dataset
from visualizer import Visualizer
tf.enable_eager_execution()
parser = argparse.ArgumentParser(description='Stochastic Gradient Langevin Dynamics')
parser.add_argument('--hparams', type=str, default=None,
help='The name of a file containing comma separated list of "name=value" pairs.')
args = parser.parse_args()
tf.set_random_seed(1202)
def main():
# train logistic regression with stocastic gradient Langevin Gradient
if not os.path.isdir("log/"):
os.makedirs("log/")
now = datetime.datetime.today()
logdir = "log/log%s/" % now.strftime("%Y%m%d-%H%M")
os.makedirs(logdir)
# tensorboard
writer = tf.contrib.summary.create_file_writer(logdir)
global_step=tf.train.get_or_create_global_step()
writer.set_as_default()
# read hyperparameters from file
hparams = tf.contrib.training.HParams(
lr=0.1,
model="SGLD_LR",
epoch=10,
batch_size=10)
if args.hparams:
shutil.copyfile(args.hparams, logdir + "params")
hparams_from_file = ""
with open(args.hparams, "r") as f:
for l in f.readlines():
hparams_from_file += l
hparams.parse(hparams_from_file)
# choose model
if hparams.model == "SGLD_LR":
nn = model.SGLD_LR(hparams)
train_dataset, train_dataset_size = get_dataset(hparams.model, "train")
val_dataset, val_dataset_size = get_dataset(hparams.model, "validation")
else:
raise "Invalid parameter for hparams.model"
visualizer = Visualizer()
# train
epsilon_ = hparams.lr
step = 0
for epoch in range(hparams.epoch):
train_dataset_iter = train_dataset.shuffle(train_dataset_size).batch(hparams.batch_size)
for batch, data in enumerate(train_dataset_iter):
global_step.assign_add(1)
step += 1
epsilon_ = hparams.lr / (1 + 0.05 * step)
epsilon = tf.convert_to_tensor(epsilon_, tf.float32)
loss = nn.loss(data["data"], data["label"]).numpy()
accuracy = nn.accuracy(data["data"], data["label"]).numpy()
visualizer.store_results(nn)
nn.update(data["data"], data["label"], epsilon, train_dataset_size)
with tf.contrib.summary.record_summaries_every_n_global_steps(10):
tf.contrib.summary.scalar('loss', loss)
tf.contrib.summary.scalar('accuracy', accuracy)
tf.contrib.summary.scalar('epsilon', epsilon)
grads_vars = nn.grads_variances()
for i in range(len(grads_vars)):
tf.contrib.summary.scalar('grads_var%d' % (i+1), grads_vars[i])
print("epoch %3d\tbatch %4d\tloss %.4f\taccuracy %.4f" % (epoch+1, batch+1, loss, accuracy))
for l_epoch in range(100):
print("langevin epoch %3d" % (l_epoch+1))
train_dataset_iter = train_dataset.shuffle(train_dataset_size).batch(hparams.batch_size)
for batch, data in enumerate(train_dataset_iter):
visualizer.store_results(nn)
nn.update(data["data"], data["label"], epsilon, train_dataset_size)
# visualize
visualizer.save_results(logdir, train_dataset)
if __name__ == "__main__":
main()
| 2.375 | 2 |
plus/model/mlp.py | bioembeddings/PLUS | 36 | 12770595 | # Written by <NAME>, Seoul National University (<EMAIL>)
# Some parts of the code were referenced from or inspired by below
# - <NAME>'s code (https://github.com/tbepler/protein-sequence-embedding-iclr2019)
# PLUS
""" MLP model classes and functions """
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, cfg, per_seq=False):
""" MLP model for fine-tuning prediction tasks """
super(MLP, self).__init__()
self.drop = nn.Dropout(cfg.dropout)
self.relu = nn.ReLU()
self.per_seq = per_seq
if self.per_seq:
self.attention = nn.Linear(cfg.input_dim, 1)
self.hidden = nn.Linear(cfg.input_dim, cfg.hidden_dim)
self.output = nn.Linear(cfg.hidden_dim, cfg.num_classes)
def forward(self, X):
logits = []
for x in X:
if self.per_seq:
att = self.attention(x)
x = torch.sum(x * F.softmax(att, 1).expand_as(x), 0)
x = self.drop(self.relu(self.hidden(x)))
x = self.output(x)
if self.per_seq: logits.append(x)
else: logits.append(x.unsqueeze(0))
return logits
def load_weights(self, pretrained_model):
# load pretrained_model weights
state_dict = {}
for key, value in torch.load(pretrained_model, map_location=torch.device('cpu')).items():
if key.startswith("module"): state_dict[key[7:]] = value
else: state_dict[key] = value
self.load_state_dict(state_dict)
| 2.734375 | 3 |
Calculators/College/debt calc.py | EnderBro1000/Scripts | 2 | 12770596 | <reponame>EnderBro1000/Scripts<gh_stars>1-10
debt = 95000
interest_percent = .059
yearly_salary = None
hourly_salary = 20
debt_repayment_percent = .2
debt_repayment_length = 0
work_days = 261
work_hours = 8
interest = 0
if yearly_salary and hourly_salary is not None:
if yearly_salary != hourly_salary * work_days * work_hours:
raise ValueError('only yearly or hourly salary can be defined')
if yearly_salary is None:
yearly_salary = hourly_salary * (work_days * work_hours)
else:
hourly_salary = yearly_salary / (work_days * work_hours)
while debt > 0:
debt_repayment_length += 1
debt *= 1 + interest_percent * (1 / 12)
interest += debt * (1 + interest_percent * (1 / 12)) - debt
debt -= yearly_salary / 12 * debt_repayment_percent
print(f'{debt_repayment_length} months, paying ${round(interest)} in interest')
| 3.5 | 4 |
cli/preprocess.py | Guitaricet/new-semantic-parsing | 0 | 12770597 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Preprocess text data and save binary Dataset objects along with tokenizers to a directory.
Should NOT be used for class-incremental scenario, because of arbitrary vocabulary order.
"""
import os
import sys
import logging
import argparse
from functools import reduce
from os.path import join as path_join
from random import shuffle
import toml
import torch
import pandas as pd
import transformers
import new_semantic_parsing as nsp
from new_semantic_parsing import utils
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger(os.path.basename(__file__))
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def parse_args(args=None):
parser = argparse.ArgumentParser()
# fmt: off
parser.add_argument("--data", required=True,
help="path to TOP dataset directory")
parser.add_argument("--text-tokenizer", required=True,
help="pratrained tokenizer name or path to a saved tokenizer")
parser.add_argument("--output-dir", required=True,
help="directory to save preprocessed data")
parser.add_argument("--seed", default=34)
# splitting parameters
parser.add_argument("--split-class", default=None,
help="remove --split-ratio of the class from the training dataset and make a finetune_data; "
"do not perform split by default")
parser.add_argument("--split-amount", default=None, type=float,
help="0 < --split-amount < 1, amount of data to remove from the training dataset")
# fmt: on
args = parser.parse_args(args)
if args.split_amount is not None:
if not 0.0 < args.split_amount < 1.0:
raise ValueError("--split-amount should be between 0. and 1.")
if args.split_class is not None:
if args.split_amount is None:
raise ValueError("--split-amount should be specified if --split-class is provided")
return args
def train_finetune_split(train_data, schema_vocab, split_amount, split_class=None):
"""Split train_data into train and finetune parts with ratio split_amount.
Train part should contain all classses from the original train_data.
If split_class is provided, split across examples containing this class.
E.i. split_amount of data with split_class goes to finetune set.
Args:
train_data: pd.DataFrame
schema_vocab: set of tokens
split_amount: float
split_class: if provided, split across the specified class
"""
# Get a small set of examples that contains all classes from schema_vocab
required_example_ids = utils.get_required_example_ids(schema_vocab, train_data)
ids = set(range(len(train_data)))
if split_class is not None:
ids = set(train_data.index[train_data.schema.str.contains(split_class)])
logger.info(f"Moving {100 * split_amount}% of {split_class} into a finetuning subset")
_take = int(len(ids) * split_amount)
_leave = len(ids) - _take
logger.info(
f"Take {_take} class examples to finetuning set and leave {_leave} class examles in"
" training set."
)
if len(ids) == 0:
raise RuntimeError(f"Cannot find specified class {split_class} in the data.")
split_ids = list(ids - required_example_ids)
take = int(len(split_ids) * split_amount)
leave = len(train_data) - take
assert take > 0
logger.info(f"Taking {take} examples and leaving {leave} examples")
shuffle(split_ids)
subset_ids = split_ids[:take]
subset_ids_set = set(subset_ids)
all_ids = set(range(len(train_data)))
assert len(subset_ids_set.intersection(required_example_ids)) == 0
train_data_ids = list(all_ids - subset_ids_set | required_example_ids)
finetune_data = train_data.iloc[subset_ids]
train_data = train_data.iloc[train_data_ids]
return train_data, finetune_data
def main(args):
utils.set_seed(args.seed)
if os.path.exists(args.output_dir):
raise ValueError(f"output_dir {args.output_dir} already exists")
# File structure:
# that's text\tthat 's text\t[IN:UNSUPPORTED that 's text]
train_path = path_join(path_join(args.data, "train.tsv"))
train_data = pd.read_table(train_path, names=["text", "tokens", "schema"])
full_train_data_size = len(train_data) # used to check the train/finetune split
finetune_data, finetune_path = None, None
# NOTE: Do not use this for class-incremental scenario, where vocab order is important
schema_vocab = list(reduce(set.union, map(utils.get_vocab_top_schema, train_data.schema)))
if args.split_amount is not None:
# finetune part is not used by train script, but used by retrain script
logger.info("Splitting the training dataset")
train_data, finetune_data = train_finetune_split(
train_data, schema_vocab, args.split_amount, args.split_class
)
os.makedirs(args.output_dir)
finetune_path = path_join(args.output_dir, "finetune.tsv")
logger.info(f"Saving the finetune_data to {finetune_path}")
finetune_data.to_csv(finetune_path, sep="\t", index=False, header=False)
train_path = path_join(args.output_dir, "train.tsv")
logger.info(f"Saving the modified training set to {train_path}")
train_data.to_csv(train_path, sep="\t", index=False, header=False)
logger.info("Getting schema vocabulary")
if args.split_amount is not None:
# NOTE: Do not use this for class-incremental scenario, where vocab order is important
finetune_schema_vocab = list(reduce(
set.union, map(utils.get_vocab_top_schema, finetune_data.schema)
))
vocab_delta = set(finetune_schema_vocab) - set(schema_vocab)
if len(vocab_delta) > 0:
logger.warning(
f"Finetuning subset contains vocabulary elements not from the training subset"
)
logger.warning(f"New elements: {', '.join(vocab_delta)}")
logger.info(f"Schema vocabulary size: {len(schema_vocab)}")
logger.info("Building tokenizers")
text_tokenizer = transformers.AutoTokenizer.from_pretrained(args.text_tokenizer, use_fast=True)
schema_tokenizer = nsp.TopSchemaTokenizer(schema_vocab, text_tokenizer)
logger.info("Tokenizing train dataset")
train_dataset = nsp.data.make_dataset(train_path, schema_tokenizer)
logger.info("Tokenizing validation and test datasets")
valid_dataset = nsp.data.make_dataset(path_join(args.data, "eval.tsv"), schema_tokenizer)
test_dataset = nsp.data.make_dataset(path_join(args.data, "test.tsv"), schema_tokenizer)
finetune_dataset = None
if args.split_amount is not None:
logger.info("Tokenizing finetune set")
finetune_dataset = nsp.data.make_dataset(finetune_path, schema_tokenizer)
logger.info(f"Original train set size: {full_train_data_size}")
logger.info(f"Reduced train set size: {len(train_dataset)}")
logger.info(f"Finetune set size: {len(finetune_dataset)}")
train_finetune_data_size = len(train_dataset) + len(finetune_dataset)
if train_finetune_data_size != full_train_data_size:
raise RuntimeError(f"{train_finetune_data_size} != {full_train_data_size}")
logger.info(f"Saving config, data and tokenizer to {args.output_dir}")
os.makedirs(args.output_dir, exist_ok=True)
with open(path_join(args.output_dir, "args.toml"), "w") as f:
args_dict = {"version": nsp.SAVE_FORMAT_VERSION, **vars(args)}
toml.dump(args_dict, f)
# text tokenizer is saved along with schema_tokenizer
model_type = None
if not os.path.exists(args.text_tokenizer):
model_type = utils.get_model_type(args.text_tokenizer)
schema_tokenizer.save(path_join(args.output_dir, "tokenizer"), encoder_model_type=model_type)
data_state = {
"train_dataset": train_dataset,
"valid_dataset": valid_dataset,
"test_dataset": test_dataset,
"finetune_dataset": finetune_dataset,
"version": nsp.SAVE_FORMAT_VERSION,
}
torch.save(data_state, path_join(args.output_dir, "data.pkl"))
if __name__ == "__main__":
args = parse_args()
main(args)
| 1.789063 | 2 |
signal_processing.py | antonyvm1102/Redbox | 0 | 12770598 | import numpy as np
from Redbox_v2 import file_manager as fm
import pandas as pd
from scipy.fftpack import rfft, rfftfreq
import matplotlib.pyplot as plt
import os
import math
def rms_time_dom(signal):
N = len(signal)
return math.sqrt(np.sum(np.power(signal,2))/N)
def rms_freq_dom(amplitude):
return math.sqrt(2*np.sum(np.power(amplitude, 2)))/2
def n_minutes_max(signal, dt, n=5):
""""
:param signal (np.array or list)
:param n: n-minutes range to obtain maximum (int)
:param dt: sample space or time between samples
this functions does not return the real time of the occuring
return: numpy array with
"""
maximums = np.zeros(1)
samples = int((n*60)/dt)
start = 0
end = samples
while start < len(signal):
selection = signal[start:end]
maximums = np.append(maximums, [np.amax(selection),np.amin(selection)])
start = end
end = min(len(signal), start + samples)
maximums = np.delete(maximums,0)
return maximums
def n_seconds_min_max(data, dt, n):
"""
:param data = 2D array with t and velocity in one direction
:param dt = sample space or time between samples
:param n = in seconds for which interval maximum value is determined
collect minimum and maximum values of the data over an interval
"""
samples = int(1/dt * n)
start = 0
end = samples
min_max_array = np.zeros([1, 2]) # col 1 = time [s], col 2 = min and max of direction
while start < data.shape[0]:
index_max = start + np.argmax(data[start:end, 1])
index_min = start + np.argmin(data[start:end, 1])
x = np.array([[data[index_max,0], data[index_max,1]], [data[index_min,0], data[index_min,1]]])
min_max_array = np.concatenate((min_max_array, x), axis=0)
start = end
end += samples
min_max_array = np.delete(min_max_array,0,0)
min_max_array = min_max_array[min_max_array[:,0].argsort()]
return min_max_array
def FFT(signal,dT):
"""
:param signal: [array]
:param dT: sample space [float]
"""
ampl = np.abs(rfft(signal)) * 2.0 / len(signal)
freq = rfftfreq(len(ampl),d=dT)
return ampl, freq
def FFT_amplitude(signal):
"""
:param signal: [array]
"""
ampl = np.abs(rfft(signal)) * 2.0 / len(signal)
return ampl
def OneThird_octave(low, high):
""""
:param low: lowest required frequency band
:param high: highest required frequency band
this function starts at the highest band and
"""
one_third_octave = 2**(1/3)
last_band = high
first_band = last_band
N = 0
while first_band > low:
first_band = first_band/one_third_octave
N += 1
first_band = first_band * one_third_octave
return first_band * np.logspace(0, N, endpoint=False, num=N, base=one_third_octave)
def FFT_to_OneThird_Octave(amplitude, df, low, high):
"""
:param amplitude: amplitudes of the FFT [array]
:param frequency: frequencies of the FFT [array]
"""
one_third_octave = 2 ** (1 / 3)
spectrum = OneThird_octave(low, high)
rms_amplitude = np.empty(len(spectrum))
#check if the maximum available frequency exceeds the upper bound
if (df*len(amplitude))*one_third_octave**0.5 > high:
lower_bound = spectrum[0] / one_third_octave ** 0.5
upper_bound = spectrum[0] * one_third_octave ** 0.5
for n in range(rms_amplitude.size):
rms_amplitude[n] = rms_freq_dom(amplitude[int(lower_bound // df)*2:int(upper_bound // df)*2])
lower_bound = lower_bound * one_third_octave
upper_bound = upper_bound * one_third_octave
return rms_amplitude, spectrum
else:
print("ERROR frequency range is not large enough")
return
def FFT_to_OneThird_Octave2(amplitude, df, spectrum):
"""
:param amplitude: amplitudes of the FFT [array]
:param frequency: frequencies of the FFT [array]
"""
one_third_octave = 2 ** (1 / 3)
spectrum = spectrum
rms_amplitude = np.empty(len(spectrum))
high = spectrum[-1]
#check if the maximum available frequency exceeds the upper bound
if (df*len(amplitude))*one_third_octave**0.5 > high:
lower_bound = spectrum[0] / one_third_octave ** 0.5
upper_bound = spectrum[0] * one_third_octave ** 0.5
for n in range(rms_amplitude.size):
rms_amplitude[n] = rms_freq_dom(amplitude[int(lower_bound // df)*2:int(upper_bound // df)*2])
lower_bound = lower_bound * one_third_octave
upper_bound = upper_bound * one_third_octave
return rms_amplitude
else:
print("ERROR frequency range is not large enough")
return
"""
integration and differentiation
"""
def integ_to_disp(vel,dt):
"""
:param vel: velocity obtained from data (np.array)
:return: (np.array) (displacement)
"""
disp = np.zeros(len(vel))
disp = disp[:-1]
for i in range(1, len(disp)):
disp[i] = disp[i - 1] + (vel[i + 1] - vel[i]) * dt
return disp
def diff_to_acc(vel,dt):
"""
:param vel: velocity obtained from data(lst)
:return: (tpl) (acceleration)
"""
acc = np.zeros(len(vel))
acc = acc[:-1]
for i in range(0, len(acc)):
acc[i] = (vel[i + 1] - vel[i]) / dt
return acc
def select_part(start, stop, to_select):
"""
TODO nagaan of deze functie werkelijk nuttig is
:param start: start of selection(flt/int)
:param stop: end time of selection (flt/int)
:return: (tpl) (displacement u, velocity v)
"""
i = int(start / dt)
j = int(stop / dt)
lst = []
for k in range(i,j):
lst.append(to_select[k])
lst_t = np.linspace(start, dt, stop)
return lst_t, lst
""" SBR methods"""
def compute_veff_sbr(v,T,Ts=0.125, a=8):
"""
:param =df = vels (mm/s)
:param = T = sample space (s)
:param a = each a'th sample is used
"""
l = int(np.log2(v.size)+1) #nth-power
N_org = v.size
N = 2**l
t = np.linspace(0,N*T,N,endpoint=False)
v = np.pad(v,(0,N-v.size),'constant')
vibrations_fft = np.fft.fft(v)
f = np.linspace(0, 1 / T, N, endpoint=False)
f_mod=f
f_mod[f<1.0]=0.1
weight = 1 / np.sqrt(1 + (5.6 / f_mod) ** 2)
vibrations_fft_w = weight * vibrations_fft
vibrations_w = np.fft.ifft(vibrations_fft_w).real
t_sel = t[:N_org:a]
vibrations_w = vibrations_w[:N_org:a]
v_sqrd_w = vibrations_w ** 2
v_eff = np.zeros(t_sel.size)
dt = t_sel[1] - t_sel[0]
print('compute v_eff')
for i in range(t_sel.size - 1):
g_xi = np.exp(-t_sel[:i + 1][::-1] / Ts)
v_eff[i] = np.sqrt(1 / Ts * np.trapz(g_xi * v_sqrd_w[:i + 1], dx=dt))
fm.progress(i,t_sel.size-1,"processing %s of %s" % (i + 1, t_sel.size))
idx = np.argmax(v_eff)
return v_eff[idx], t_sel, vibrations_w, v_eff
def plot_SBR_B(save_to_path,vibrations, vibrations_w,v_eff,t_sel):
"""
vibrations, vibrations_w,v_eff are optional arguments
"""
plt.figure(figsize=(10, 6))
if vibrations:
plt.plot(t_sel, vibrations, label="signal")
if vibrations_w:
plt.plot(t_sel, vibrations_w, label="weighted_signal")
if v_eff:
plt.plot(t_sel, v_eff, label="v_eff")
plt.text(t[idx], v_eff[idx], "max v_eff: {}".format(round(v_eff[idx], 3)), color="r")
plt.xlabel("t [s]")
plt.ylabel("v [mm/s]")
plt.title("velocity")
plt.legend()
plt.savefig(save_to_path.format("png"))
plt.show()
def plot_SBR_B_xyz(save_to_path,vibrations, vibrations_w,v_eff,t_sel):
"""
TODO check use of pandas plotting wrapper
vibrations, vibrations_w,v_eff are optional arguments (tpl)
"""
fig = plt.figure(figsize=(10, 18))
ax1 = fig.add_subplot(3,1,1)
ax2 = fig.add_subplot(3,1,2)
ax3 = fig.add_subplot(3,1,3)
if vibrations:
ax1.plot(t_sel, vibrations[0], label="signal")
ax2.plot(t_sel, vibrations[1], label="signal")
ax3.plot(t_sel, vibrations[2], label="signal")
if vibrations_w:
ax1.plot(t_sel, vibrations_w[0], label="weighted_signal")
ax2.plot(t_sel, vibrations_w[1], label="weighted_signal")
ax3.plot(t_sel, vibrations_w[2], label="weighted_signal")
if v_eff:
idx = [np.argmax(v_eff[x]) for x in range(len(v_eff))]
ax1.plot(t_sel, v_eff, label="v_eff")
ax1.text(t[idx[0]], v_eff[0][idx[0]], "max v_eff: {}".format(round(v_eff[idx], 3)), color="r")
ax2.plot(t_sel, v_eff, label="v_eff")
ax2.text(t[idx[1]], v_eff[1][idx[1]], "max v_eff: {}".format(round(v_eff[idx], 3)), color="r")
ax3.plot(t_sel, v_eff, label="v_eff")
ax3.text(t[idx[1]], v_eff[2][idx[2]], "max v_eff: {}".format(round(v_eff[idx], 3)), color="r")
plt.xlabel("t [s]")
plt.ylabel("v [mm/s]")
plt.title("velocity")
plt.legend()
plt.savefig(save_to_path.format("png"))
plt.show() | 2.96875 | 3 |
apphub/contrastive_learning/simclr/simclr_tf.py | DwijayDS/fastestimator | 57 | 12770599 | # Copyright 2021 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The FastEstimator implementation of SimCLR with ResNet9 on CIFAIR10.
This code took reference from google implementation (https://github.com/google-research/simclr).
Note that we use the ciFAIR10 dataset instead (https://cvjena.github.io/cifair/)
"""
import tempfile
import tensorflow as tf
from tensorflow.keras import layers
import fastestimator as fe
from fastestimator.dataset.data.cifair10 import load_data
from fastestimator.op.numpyop.meta import Sometimes
from fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop
from fastestimator.op.numpyop.univariate import ColorJitter, GaussianBlur, ToFloat, ToGray
from fastestimator.op.tensorop import LambdaOp, TensorOp
from fastestimator.op.tensorop.loss import CrossEntropy
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.trace.io import ModelSaver
from fastestimator.trace.metric import Accuracy
def ResNet9(input_size=(32, 32, 3), head_len=128, classes=10):
"""A small 9-layer ResNet Tensorflow model for cifar10 image classification.
The model architecture is from https://github.com/davidcpage/cifar10-fast
Args:
input_size: The size of the input tensor (height, width, channels).
classes: The number of outputs the model should generate.
Raises:
ValueError: Length of `input_size` is not 3.
ValueError: `input_size`[0] or `input_size`[1] is not a multiple of 16.
Returns:
A TensorFlow ResNet9 model.
"""
# prep layers
inp = layers.Input(shape=input_size)
x = layers.Conv2D(64, 3, padding='same')(inp)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
# layer1
x = layers.Conv2D(128, 3, padding='same')(x)
x = layers.MaxPool2D()(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Add()([x, residual(x, 128)])
# layer2
x = layers.Conv2D(256, 3, padding='same')(x)
x = layers.MaxPool2D()(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
# layer3
x = layers.Conv2D(512, 3, padding='same')(x)
x = layers.MaxPool2D()(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Add()([x, residual(x, 512)])
# layers4
x = layers.GlobalMaxPool2D()(x)
code = layers.Flatten()(x)
p_head = layers.Dense(head_len)(code)
model_con = tf.keras.Model(inputs=inp, outputs=p_head)
s_head = layers.Dense(classes)(code)
s_head = layers.Activation('softmax', dtype='float32')(s_head)
model_finetune = tf.keras.Model(inputs=inp, outputs=s_head)
return model_con, model_finetune
def residual(x, num_channel):
"""A ResNet unit for ResNet9.
Args:
x: Input Keras tensor.
num_channel: The number of layer channel.
Return:
Output Keras tensor.
"""
x = layers.Conv2D(num_channel, 3, padding='same')(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Conv2D(num_channel, 3, padding='same')(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
return x
class NTXentOp(TensorOp):
def __init__(self, arg1, arg2, outputs, temperature=1.0, mode=None):
super().__init__(inputs=(arg1, arg2), outputs=outputs, mode=mode)
self.temperature = temperature
def forward(self, data, state):
arg1, arg2 = data
loss = NTXent(arg1, arg2, self.temperature)
return loss
def NTXent(A, B, temperature):
large_number = 1e9
batch_size = tf.shape(A)[0]
A = tf.math.l2_normalize(A, -1)
B = tf.math.l2_normalize(B, -1)
mask = tf.one_hot(tf.range(batch_size), batch_size)
labels = tf.one_hot(tf.range(batch_size), 2 * batch_size)
aa = tf.matmul(A, A, transpose_b=True) / temperature
aa = aa - mask * large_number
ab = tf.matmul(A, B, transpose_b=True) / temperature
bb = tf.matmul(B, B, transpose_b=True) / temperature
bb = bb - mask * large_number
ba = tf.matmul(B, A, transpose_b=True) / temperature
loss_a = tf.nn.softmax_cross_entropy_with_logits(labels, tf.concat([ab, aa], 1))
loss_b = tf.nn.softmax_cross_entropy_with_logits(labels, tf.concat([ba, bb], 1))
loss = tf.reduce_mean(loss_a + loss_b)
return loss, ab, labels
def pretrain_model(epochs, batch_size, train_steps_per_epoch, save_dir):
# step 1: prepare dataset
train_data, test_data = load_data()
pipeline = fe.Pipeline(
train_data=train_data,
batch_size=batch_size,
ops=[
PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x"),
# augmentation 1
RandomCrop(32, 32, image_in="x", image_out="x_aug"),
Sometimes(HorizontalFlip(image_in="x_aug", image_out="x_aug"), prob=0.5),
Sometimes(
ColorJitter(inputs="x_aug", outputs="x_aug", brightness=0.8, contrast=0.8, saturation=0.8, hue=0.2),
prob=0.8),
Sometimes(ToGray(inputs="x_aug", outputs="x_aug"), prob=0.2),
Sometimes(GaussianBlur(inputs="x_aug", outputs="x_aug", blur_limit=(3, 3), sigma_limit=(0.1, 2.0)),
prob=0.5),
ToFloat(inputs="x_aug", outputs="x_aug"),
# augmentation 2
RandomCrop(32, 32, image_in="x", image_out="x_aug2"),
Sometimes(HorizontalFlip(image_in="x_aug2", image_out="x_aug2"), prob=0.5),
Sometimes(
ColorJitter(inputs="x_aug2", outputs="x_aug2", brightness=0.8, contrast=0.8, saturation=0.8, hue=0.2),
prob=0.8),
Sometimes(ToGray(inputs="x_aug2", outputs="x_aug2"), prob=0.2),
Sometimes(GaussianBlur(inputs="x_aug2", outputs="x_aug2", blur_limit=(3, 3), sigma_limit=(0.1, 2.0)),
prob=0.5),
ToFloat(inputs="x_aug2", outputs="x_aug2")
])
# step 2: prepare network
model_con, model_finetune = fe.build(model_fn=ResNet9, optimizer_fn=["adam", "adam"])
network = fe.Network(ops=[
LambdaOp(lambda x, y: tf.concat([x, y], axis=0), inputs=["x_aug", "x_aug2"], outputs="x_com"),
ModelOp(model=model_con, inputs="x_com", outputs="y_com"),
LambdaOp(lambda x: tf.split(x, 2, axis=0), inputs="y_com", outputs=["y_pred", "y_pred2"]),
NTXentOp(arg1="y_pred", arg2="y_pred2", outputs=["NTXent", "logit", "label"]),
UpdateOp(model=model_con, loss_name="NTXent")
])
# step 3: prepare estimator
traces = [
Accuracy(true_key="label", pred_key="logit", mode="train", output_name="contrastive_accuracy"),
ModelSaver(model=model_con, save_dir=save_dir),
]
estimator = fe.Estimator(pipeline=pipeline,
network=network,
epochs=epochs,
traces=traces,
train_steps_per_epoch=train_steps_per_epoch)
estimator.fit()
return model_con, model_finetune
def finetune_model(model, epochs, batch_size, train_steps_per_epoch, save_dir):
train_data, test_data = load_data()
train_data = train_data.split(0.1)
pipeline = fe.Pipeline(train_data=train_data,
eval_data=test_data,
batch_size=batch_size,
ops=[
ToFloat(inputs="x", outputs="x"),
])
network = fe.Network(ops=[
ModelOp(model=model, inputs="x", outputs="y_pred"),
CrossEntropy(inputs=["y_pred", "y"], outputs="ce"),
UpdateOp(model=model, loss_name="ce")
])
traces = [
Accuracy(true_key="y", pred_key="y_pred"),
]
estimator = fe.Estimator(pipeline=pipeline,
network=network,
epochs=epochs,
traces=traces,
train_steps_per_epoch=train_steps_per_epoch)
estimator.fit()
def fastestimator_run(epochs_pretrain=50,
epochs_finetune=10,
batch_size=512,
train_steps_per_epoch=None,
save_dir=tempfile.mkdtemp()):
model_con, model_finetune = pretrain_model(epochs_pretrain, batch_size, train_steps_per_epoch, save_dir)
finetune_model(model_finetune, epochs_finetune, batch_size, train_steps_per_epoch, save_dir)
if __name__ == "__main__":
fastestimator_run()
| 1.898438 | 2 |
part-1/process_strings.py | pt-dev/python-newbie | 0 | 12770600 | <gh_stars>0
max_len = 79
str_plier = 2
foo = "Oh oh oh oh you don't know, Joe."*str_plier
curr_len = len(foo)
if curr_len > max_len:
print("Danger, <NAME>!!!")
print("Your sentiment is {} ".format(curr_len-max_len)+"characters too long!")
else:
print("What a lovely sentiment!\n"+ foo+"\nIn only {} characters!".format(curr_len))
| 3.703125 | 4 |
src/rawsalad/rsdbapi.py | CCLab/Raw-Salad | 0 | 12770601 | <reponame>CCLab/Raw-Salad
# -*- coding: utf-8 -*-
"""
project: Raw Salad
function: classes representing the API to data and meta-data
requirements: mongod, conf file (see conf_filename)
"""
from time import time
from ConfigParser import ConfigParser
import pymongo
import re
import os
meta_src= "md_budg_scheme"
state_counter= "md_sta_cnt"
nav_schema= "ms_nav"
dir_path = os.path.dirname( __file__ )
conf_filename = os.path.join(dir_path, 'site_media', 'rawsdata.conf')
class Response:
"""
response object
returns dict with http response and description
"""
def __init__(self):
self.code= 0 # Response class is optimistic
self.response_dict= {
'0': {
'httpresp': 200,
'descr': 'OK'
},
'1': {
'httpresp': 200,
'descr': 'OK: Data successfully updated'
},
'2': {
'httpresp': 200,
'descr': 'OK: Data successfully inserted'
},
'10': {
'httpresp': 404,
'descr': 'ERROR: No such data!'
},
'20': {
'httpresp': 404,
'descr': 'ERROR: No such meta-data!'
},
'30': {
'httpresp': 400,
'descr': 'ERROR: Bad request!'
},
'31': {
'httpresp': 400,
'descr': 'ERROR: Scope +TO+ is applicable to the codes on the same level!'
},
'32': {
'httpresp': 400,
'descr': 'ERROR: Wrong sequence in the scope +TO+!'
},
'33': {
'httpresp': 400,
'descr': 'ERROR: Scope +TO+ should include only 2 elements!'
},
'34': {
'httpresp': 400,
'descr': 'ERROR: Syntax error in scope definition!',
},
'35': {
'httpresp': 400,
'descr': 'ERROR: Format not specified!'
},
'36': {
'httpresp': 400,
'descr': 'ERROR: Search string not given!'
},
'37': {
'httpresp': 404,
'descr': 'ERROR: No such collection(s)!'
},
'40': {
'httpresp': 404,
'descr': 'ERROR: No data for specified state id!'
},
'41': {
'httpresp': 500,
'descr': 'ERROR: Cannot insert data into the db!'
},
'42': {
'httpresp': 400,
'descr': 'ERROR: Wrong state id!'
},
'43': {
'httpresp': 404,
'descr': 'ERROR: No data specified!'
},
'44': {
'httpresp': 500,
'descr': 'ERROR: Cannot update document!'
},
}
def __del__(self):
pass
def get_response(self, code):
self.code= code
self.http_resp= self.response_dict[str(code)]['httpresp']
self.descr= self.response_dict[str(code)]['descr']
return self.response_dict[str(code)]
class DBconnect:
def __init__(self, db_type):
if db_type == 'mongodb':
self.fill_connection(db_type)
self.connect= pymongo.Connection(self.host, self.port)
dbase= self.connect[self.database]
dbase.authenticate(self.username, self.password)
elif db_type == 'postgresql':
dbase= None # not yet realized
self.dbconnect= dbase
def __del__(self):
pass
def fill_connection(self, db_type):
cfg= ConfigParser({ 'basedir': conf_filename })
cfg.read(conf_filename)
self.host= cfg.get(db_type,'host')
self.port= cfg.getint(db_type,'port')
self.database= cfg.get(db_type,'database')
self.username= cfg.get(db_type,'username')
try:
pssw= cfg.get(db_type,'password')
except:
pssw = None
if pssw is not None:
self.password= pssw
else:
self.password= '' # must be instance of basestring
class Navtree:
""" Navigator tree """
def __init__(self, **parms):
"""
**parms are:
- fields_aux - {} specified keys from the structure
- query_aux - {} additional query conditions
"""
self.fields= parms.pop("fields_aux", {}) # before match against metadata
self.query= parms.pop("query_aux", {}) # before update from metadata
self.response= Response().get_response(0) # Navtree class is optimistic
def __del__(self):
pass
def get_nav_full(self, datasrc):
out= []
self.request= 'navigator'
nav_fields= {'_id':0} # _id is never returned
nav_fields.update(self.fields)
query= {} # query conditions
query.update(self.query) # additional query, depends on the call
cursor_data= datasrc[nav_schema].find(query, nav_fields)
if cursor_data is not None:
self.response= Response().get_response(0)
for row in cursor_data:
out.append(row)
else: # error
self.response= Response().get_response(10)
return out
def get_dataset(self, datasrc):
out= []
self.request= 'dataset'
cursor_data= datasrc[nav_schema].find({}, { '_id':0, 'perspectives':0 })
if cursor_data.count() > 0:
self.response= Response().get_response(0) # no error
for row in cursor_data:
out.append(row)
else:
self.response= Response().get_response(10)
return out
def get_view(self, datasrc, dataset_idef):
out= []
self.request= 'view'
nav_fields= {
'_id':0,
'perspectives.idef':1,
'perspectives.name':1,
'perspectives.description':1,
'perspectives.long_description':1
}
query= { 'idef': int(dataset_idef) }
cursor_data= datasrc[nav_schema].find_one(query, nav_fields)
if cursor_data is not None:
self.response= Response().get_response(0)
out= cursor_data['perspectives']
else: # error
self.response= Response().get_response(10)
return out
def get_issue(self, datasrc, dataset_idef, view_idef):
out= []
self.request= 'issue'
nav_fields= { '_id':0, 'perspectives.issues':1 }
query={
'idef': int(dataset_idef),
'perspectives': { '$elemMatch': { 'idef': int(view_idef) } }
}
cursor_data= datasrc[nav_schema].find_one(query, nav_fields)
if cursor_data is not None:
self.response= Response().get_response(0)
out= cursor_data['perspectives'][int(view_idef)]['issues']
else: # error
self.response= Response().get_response(10)
return out
def get_count(self, datasrc, dataset_idef= None, view_idef= None):
count= 0
if dataset_idef is None and view_idef is None: # datasets count
element_list= self.get_dataset(datasrc)
elif dataset_idef is not None and view_idef is None: # views count
element_list= self.get_view(datasrc, dataset_idef)
else: # issues count
element_list= self.get_issue(datasrc, dataset_idef, view_idef)
if self.response['httpresp'] == 200:
count= len(element_list)
else:
self.response= Response().get_response(20)
return count
def get_max_dataset(self, datasrc):
"""
the max dataset id in the nav tree
"""
dsmax_dict= datasrc[nav_schema].find(
fields= { '_id': 0, 'idef':1 },
sort= [('idef', -1)],
limit= 1
)
for ii in dsmax_dict:
result= int(ii['idef']) # it is anyway only one record
return result
def get_max_view(self, datasrc, dataset):
"""
the max view number of the given dataset in the nav tree
"""
vwmax_dict= datasrc[nav_schema].find_one(
spec_or_id= { 'idef': int(dataset) },
fields= { '_id':0, 'perspectives.idef':1}
)
vwid_list= []
for vw in vwmax_dict['perspectives']:
vwid_list.append( int(vw['idef']) )
return max(vwid_list)
class Collection:
"""
extraction of the imformation from the db
params: dataset, view, issue
additional params: query, user defined list of fields
"""
def __init__(self, **parms):
"""
**parms are URL params:
- fields - [] or None (fields to return)
- query - {} or None (query to db before defined in meta-data)
"""
self.raw_usrdef_fields= parms.pop("fields", []) # before match against metadata
self.request_fields= {}
if len(self.raw_usrdef_fields) > 0:
self.set_fields(self.raw_usrdef_fields) # for queries
self.raw_query= parms.pop("query", {}) # before update from metadata
self.warning= None # non-critical errors and typos
self.response= Response().get_response(0) # Collection class is optimistic
self.count= 0
def __del__(self):
pass
def set_query(self, query):
if query is not None:
self.raw_query= query
def set_fields(self, field_list= None):
if field_list is not None:
self.request_fields= { k:1 for k in field_list }
else:
self.request_fields= { }
def get_metadata(self, datasrc, dataset_id, view_id, issue):
metadata= {}
metadata_complete= self.get_complete_metadata(
int(dataset_id), int(view_id), str(issue), datasrc
)
if metadata_complete is None: # no such source
self.response= Response().get_response(20)
self.request= "unknown"
else:
self.response= Response().get_response(0)
self.request= metadata_complete['name']
count_query= metadata_complete['query'] # used for counting
# define useless keys
useless_keys= ['ns', 'aux', 'batchsize', 'sort', 'query', 'explorable', 'name', 'dataset', 'idef', 'issue']
if len(self.raw_query) != 0: # the query is on the specific elements
useless_keys.append('max_level') # so, max_level is also useless
count_query.update(self.raw_query)
# but before delete useless keys - counting children of a given parent
count= self.get_count(datasrc, metadata_complete['ns'], count_query)
if count == 0:
self.response= Response().get_response(10)
else:
metadata_complete['count']= count
for curr in useless_keys: # now delete useless keys
if curr in metadata_complete:
del metadata_complete[curr]
field_list_complete= metadata_complete.pop('columns')
field_list= []
field_names_complete= []
if len(self.raw_usrdef_fields) > 0: # describe only user defined columns
for fld in field_list_complete:
field_names_complete.append(fld['key']) # for future check
if fld['key'] in self.raw_usrdef_fields:
field_list.append(fld)
self.fill_warning(field_names_complete) # fill self.warning
else:
field_list= field_list_complete # substitute 'columns' for 'fields'
metadata_complete['fields']= field_list # to match the name of URL parameter
metadata= metadata_complete
return metadata
def get_complete_metadata(self, ds_id, ps_id, iss, dbase, use_fields= False):
field_dict= { '_id' : 0 }
if use_fields: # return only the fields specified in self.request_fields
field_dict.update(self.request_fields)
self.metadata_complete= dbase[meta_src].find_one(
{ 'dataset': ds_id, 'idef' : ps_id, 'issue': iss },
field_dict
)
return self.metadata_complete
def save_complete_metadata(self, new_object, dbase):
"""
saves metadata defined by a user
into the db collection for metadata
"""
ds_id, ps_id, iss= new_object['dataset'], new_object['idef'], new_object['issue']
update_status= True # true - update, false - insert
if new_object is not None:
current_object= dbase[meta_src].find_one({ 'dataset': ds_id, 'idef': ps_id, 'issue': iss })
if current_object is None: # is dataset-view-issue already in the db?
current_object= {}
update_status= False # insert instead of update
else:
current_object= { '_id': current_object['_id'] } # only _id is required for save()
current_object.update(new_object)
try:
dbase[meta_src].save(current_object)
if update_status:
self.response= Response().get_response(1) # OK, updated
else:
self.response= Response().get_response(2) # OK, inserted
except Exception as e:
self.response= Response().get_response(41) # ERROR, can't insert into the db
self.response['descr']= ' '.join([ self.response['descr'], str(e) ])
else:
self.response= Response().get_response(43) # ERROR, bad request - data is empty
return ds_id, ps_id, iss, update_status
def save_doc(self, new_doc, dataset_id, view_id, issue, idef, dbase):
"""
saves new_dict (a dictionary) into specified doc in the db
"""
qry= { 'idef': idef }
self.set_query(qry)
orig_doc= self.get_data(dbase, dataset_id, view_id, issue)
coll_name= self.metadata_complete['ns'] # get_data calls for complete metadata
if self.response['httpresp'] == 200: # record found
orig_doc= orig_doc[0] # expecting only one element
orig_doc.update(new_doc)
try:
dbase[coll_name].update(qry, orig_doc) # update doc by its idef
except Exception as e:
self.response= Response().get_response(44) # ERROR, can't insert into the db
self.response['descr']= ' '.join([ self.response['descr'], str(e) ])
return self.response
def get_data(self, datasrc, dataset_id, view_id, issue):
data= []
elm_count= 0
metadata_complete= self.get_complete_metadata(
int(dataset_id), int(view_id), str(issue), datasrc
)
if metadata_complete is None: # no such source
self.response= Response().get_response(20)
self.request= "unknown"
else:
self.response= Response().get_response(0)
self.request= metadata_complete['name']
conn_coll= metadata_complete['ns'] # collection name
cursor_fields= self.get_fields(metadata_complete) # full columns list
cursor_sort= self.get_sort_list(metadata_complete) # list of sort columns
try: # batch size
cursor_batchsize= metadata_complete['batchsize']
except:
cursor_batchsize= 'default'
cursor_query= {}
if 'query' in metadata_complete:
cursor_query.update( metadata_complete['query'] ) # initial query
if len(self.raw_query) != 0:
cursor_query.update(self.raw_query) # additional query build on the path argument
# EXTRACT data (rows)
if cursor_batchsize in ['default', None]:
cursor_data= datasrc[conn_coll].find(cursor_query, cursor_fields, sort=cursor_sort)
else:
cursor_data= datasrc[conn_coll].find(cursor_query, cursor_fields, sort=cursor_sort).batch_size(cursor_batchsize)
if cursor_data.count() > 0:
elm_count= cursor_data.count()
for row in cursor_data:
data.append(row)
else:
self.response= Response().get_response(10)
self.count= elm_count
return data
def get_tree(self, datasrc, dataset_id, view_id, issue):
tree= []
metadata_complete= self.get_complete_metadata(
int(dataset_id), int(view_id), str(issue), datasrc
)
if metadata_complete is None: # no such source
self.response= Response().get_response(20)
self.request= "unknown"
else:
self.response= Response().get_response(0)
self.request= metadata_complete['name']
conn_coll= metadata_complete['ns'] # collection name
cursor_fields= self.get_fields(metadata_complete) # full columns list
cursor_sort= self.get_sort_list(metadata_complete) # list of sort columns
cursor_query= metadata_complete['query'] # initial query
clean_query= cursor_query.copy() # saving initial query for iteration
if len(self.raw_query) == 0: # no additional query, extract the whole collection in a form of a tree
cursor_query.update({ 'level': 'a' })
cursor_data= datasrc[conn_coll].find(cursor_query, cursor_fields, sort=cursor_sort)
for curr_root in cursor_data:
if 'idef' in clean_query: del clean_query['idef'] # clean the clean_query before it starts working
if 'parent' in clean_query: del clean_query['parent']
curr_branch= self.build_tree(datasrc[conn_coll], clean_query, cursor_fields, cursor_sort, curr_root['idef'])
tree.append(curr_branch)
else:
if 'idef' in self.raw_query: # root element
result_tree= self.build_tree(datasrc[conn_coll], cursor_query, cursor_fields, cursor_sort, self.raw_query['idef'])
if result_tree is not None:
tree.append(result_tree)
else: # error
self.response= Response().get_response(10)
else: # means we deal with URL like /a/X/b/ or /a/X/b/Y/c - which is nonesense for a tree
self.response= Response().get_response(30)
return tree
def build_tree(self, cl, query, columns, sortby, root):
out= {}
query['idef']= root
root_elt= cl.find_one(query, columns, sort=sortby)
if root_elt is not None:
if not root_elt['leaf']: # there are children
if 'idef' in query: del query['idef'] # don't need this anymore
self._get_children_recurse(root_elt, cl, query, columns, sortby)
else: # no children, just leave root_elt as it is
pass
out.update(root_elt)
else: # error - no such data!
out= None
return out
def _get_children_recurse(self, parent, coll, curr_query, columns, srt):
if not parent['leaf']:
parent['children']= []
curr_query['parent']= parent['idef']
crs= coll.find(curr_query, columns, sort=srt)
if crs.count() > 0:
for elm in crs:
parent['children'].append(elm)
self._get_children_recurse(elm, coll, curr_query, columns, srt)
def get_count(self, datasrc, collection, count_query= {}):
self.count= datasrc[collection].find(count_query).count()
return self.count
def get_fields(self, meta_data):
fields_dict= {'_id':0} # _id is never returned
if len(self.request_fields) > 0:
fields_dict.update(self.request_fields)
field_names_complete= [] # reverse check
for fl in meta_data['columns']:
field_names_complete.append(fl['key'])
aux_fields= []
if 'aux' in meta_data:
aux_fields= [k for k,v in meta_data['aux'].iteritems()]
self.fill_warning( field_names_complete + aux_fields ) # fill self.warning
else:
if 'aux' in meta_data:
fields_dict.update(meta_data['aux'])
md_fields= meta_data['columns'] # list of main columns to be returned
for fld in md_fields:
fields_dict[fld['key']]= 1
return fields_dict
def get_sort_list(self, meta_data):
sort_list= []
try:
cond_sort= meta_data['sort']
except:
cond_sort= None
if cond_sort is not None:
srt= [int(k) for k, v in cond_sort.iteritems()]
srt.sort()
for sort_key in srt:
sort_list.append((cond_sort[str(sort_key)].keys()[0], cond_sort[str(sort_key)].values()[0]))
return sort_list
def fill_warning(self, field_names_list):
"""
check if there are user defined fields
that are not listed in metadata
"""
warning_list= []
for fld in self.raw_usrdef_fields:
if fld not in field_names_list:
warning_list.append( fld )
if len(warning_list) == 0:
pass
elif len(warning_list) == 1:
self.warning= "There is no such column as '%s' in meta-data!" % warning_list[0]
elif len(warning_list) > 1:
self.warning= "There are no such columns as ['%s'] in meta-data!" % "', '".join(warning_list)
class State:
"""
the class saves to and restores from mongo
the current state of open datasheets
"""
def __init__(self):
self.response= Response().get_response(0) # CollectionState class is optimistic
def __del__(self):
pass
def get_state(self, state_id, datasrc):
""" extracts user view (string) from the db """
data= ''
success= True
if state_id == 0 or state_id is None:
self.response= Response().get_response(42) # state id not specified
success= False
if success:
state_coll_name= ''
try:
state_coll_name= "_".join(["sd", "%07d" % state_id])
except:
success= False
self.response= Response().get_response(42) # wrong state id
if success:
state_dict= datasrc[state_coll_name].find_one() # state is always a single object
if state_dict is not None:
data= state_dict['content']
else:
self.response= Response().get_response(40) # no state data
return data
def save_state(self, state_object, datasrc):
"""
saves state compiled by a user
into the db collection sd_0000xxxx
returns xxxx (id for permalink)
"""
state_id= 0 # generate state id
state_id_dict= datasrc[state_counter].find_one( {'curr_state_id': True } )
if state_id_dict is None: # not yet created
state_id, state_id_inc= 0, 1
state_id_dict= {
"curr_state_id":True,
"curr_id": state_id,
"increment": state_id_inc
}
if state_object is not None: # save object to the db
state_id= int(state_id_dict['curr_id']) + state_id_dict['increment']
state_collection_name= "_".join(["sd", "%07d" % state_id]) # sd - state data
success= True
try:
datasrc[state_collection_name].insert({ 'content': state_object })
except Exception as e:
success= False
if success: # incrementing state counter & saving it into the db
state_id_dict['curr_id']= state_id
datasrc[state_counter].save(state_id_dict)
else:
self.response= Response().get_response(41) # can't insert into the db
else:
self.response= Response().get_response(40) # bad request - data is empty
return state_id
class Search:
"""
the class searches through data in mongo
"""
def __init__(self):
self.set_query( None )
self.set_scope( None )
self.strict= False
self.found= {} # results in short form
self.response= Response().get_response(0) # Search class is optimistic
def __del__(self):
self.found= None
def set_query(self, query):
self.qrystr= query
def set_scope(self, scope):
self.scope= scope
def set_lookup(self, lookup):
self.lookup= lookup
if lookup is None:
self.lookup= []
def switch_strict(self, strict):
if strict:
self.strict= True
def do_search(self, regx, dbconn, collect):
"""
TO-DO:
- search with automatic substitution of specific polish letters
(lowercase & uppercase): user can enter 'lodz', but the search
should find 'Łódż' as well
- search with flexible processing of prefixes and suffixes
(see str.endswith and startswith)
- search in 'info' keys (???)
"""
ns_list= [] # list of results
error_list= []
found_num= 0 # number of records found
exclude_fields= ['idef', 'idef_sort', 'parent', 'parent_sort', 'level'] # not all fields are searchable!
for sc in self.scope: # fill the list of collections
sc_list= sc.split('-')
dataset, idef, issue= int(sc_list[0]), int(sc_list[1]), str(sc_list[2])
collect.set_fields( ["perspective", "ns", "columns"] )
metadata= collect.get_complete_metadata(dataset, idef, issue, dbconn)
if metadata is None:
error_list.append('collection not found %s' % sc)
else:
curr_coll_dict= {
'perspective': metadata['perspective'],
'dataset': dataset,
'view': idef,
'issue': issue,
'data': []
}
collect.set_fields(None) # presumed to search through all fields
for fld in metadata['columns']:
if 'processable' in fld:
check_str= fld['type'] == 'string'
if len(self.lookup) > 0:
check_valid= fld['key'] in self.lookup
else:
check_valid= fld['key'] not in exclude_fields
if fld['processable'] and check_str and check_valid:
search_query= { fld['key']: regx }
collect.set_query(search_query)
# actual query to the db
found= collect.get_data(dbconn, dataset, idef, issue)
for found_elt in found:
# control of what is already found
if sc not in self.found:
self.found[sc]= []
if found_elt['idef'] not in self.found[sc]:
self.found[sc].append(found_elt['idef'])
curr_coll_dict['data'].append({
'key': fld['key'],
'text': found_elt[str(fld['key'])],
'idef': found_elt['idef'],
'parent': found_elt['parent']
})
found_num += 1
if len(curr_coll_dict['data']) > 0:
ns_list.append(curr_coll_dict)
out_dict= { 'records_found': found_num, 'result': ns_list }
if len(error_list) > 0:
out_dict['errors']= error_list
return out_dict
def build_regexp(self, searchline, strict):
""" construct regexp for search """
if strict:
# version 1 - have problems
# searchline= "^%(lookupstr)s([^a-z][^A-Z][^0-9]|\s)|([^a-z][^A-Z][^0-9]|\s)%(lookupstr)s([^a-z][^A-Z][^0-9]|\s)|([^a-z][^A-Z][^0-9]|\s)%(lookupstr)s$" % { "lookupstr": searchline }
# version 2
searchline= r"^%(lookupstr)s(\s)+|^%(lookupstr)s$|\s+%(lookupstr)s\s+|\s+%(lookupstr)s$" % { "lookupstr": searchline }
return searchline
def search_data(self, datasrc, qrystr, scope, strict, lookup= None):
self.set_query(qrystr)
self.set_scope(scope)
self.switch_strict(strict)
self.set_lookup(lookup)
regxsearch= self.build_regexp( self.qrystr, self.strict )
regx= re.compile(regxsearch, re.IGNORECASE)
coll= Collection()
out= { }
total_rec= 0
self.found= {}
# 1st pass
tl1= time() # starting to search
result_1= self.do_search(regx, datasrc, coll)
total_rec += result_1['records_found']
tlap1= time()-tl1 # 1st pass finished
result_1.update( { "search_time": "%0.6f" % tlap1 } )
out['strict']= result_1
# 2nd pass
second_pass_list= []
if not self.strict: # second pass makes sense
result_2= { 'records_found': 0 } # blank dict for 2nd pass
tl2= time() # starting to search
second_pass_list= self.qrystr.split(' ')
if len(second_pass_list) > 1: # 2nd pass makes sense only if search str consists of >1 words
for wrd in second_pass_list:
lookup= self.build_regexp(wrd, True) # we look for separate words using strict
regx= re.compile(lookup, re.IGNORECASE)
result_2_curr= self.do_search(regx, datasrc, coll)
if result_2_curr['records_found'] > 0:
result_2['result']= result_2_curr['result']
result_2['records_found'] += result_2_curr['records_found']
total_rec += result_2_curr['records_found']
tlap2= time()-tl2 # 2nd pass finished
result_2.update( { "search_time": "%0.6f" % tlap2 } )
out['loose']= result_2
tlap= time()-tl1
out.update( {
'search_time_total': "%0.6f" % tlap,
'records_found_total': total_rec
} )
return out
def search_text(self, datasrc, qrystr, scope, strict, display= None):
self.set_query(qrystr)
self.set_scope(scope)
self.switch_strict(strict)
out= { 'result': [] }
self.found= {}
qry_dict= { }
collect= Collection()
words_list= qrystr.strip().lower().split(' ')
if len(words_list) > 1: # multiple words
kwds_list= []
for word in words_list:
query_regx= r'^%s' % word
# query_regx= r'%s' % word # WARNING! it works, but extremely slow!
if strict:
query_regx += '$'
# query_regx = '^' + query_regx + '$'
kwds_list.append({ '_keywords': re.compile(query_regx) })
qry_dict.update({ '$and': kwds_list })
elif len(words_list) == 1: # one word
query_regx= r'^%s' % qrystr
# query_regx= r'%s' % qrystr # WARNING! it works, but extremely slow!
if strict:
query_regx += '$'
# query_regx = '^' + query_regx + '$'
qry_dict.update( { '_keywords': re.compile(query_regx) } )
collect.set_query(qry_dict)
tl1= time() # starting to search
found_total= 0
# iterating through collections
error_list= []
for sc in self.scope: # fill the list of collections
found_num= 0
sc_list= sc.split('-')
dataset, idef, issue= int(sc_list[0]), int(sc_list[1]), str(sc_list[2])
collect.set_fields( ["perspective", "ns"] ) # WARNING! eventually we can add "columns" here for indication where the result is found
metadata= collect.get_complete_metadata(dataset, idef, issue, datasrc)
if metadata is None:
error_list.append('collection not found %s' % sc)
else:
curr_coll_dict= {
'perspective': metadata.get('perspective', ''),
'dataset': dataset,
'view': idef,
'issue': issue,
'data': []
}
collect.set_fields(display)
# actual query to the db
found= collect.get_data(datasrc, dataset, idef, issue)
for rec in found:
curr_coll_dict['data'].append(rec)
found_num += 1
if found_num > 0:
found_total += found_num
out['result'].append(curr_coll_dict)
tlap= time()-tl1
out.update( {
'search_time_total': "%0.6f" % tlap,
'records_found_total': found_total
} )
return out
| 2.25 | 2 |
simple/nim/nimnega_v2.py | feiooo/games-puzzles-algorithms | 0 | 12770602 | <filename>simple/nim/nimnega_v2.py
#!/usr/bin/env python3
# compute nim values using negamax and a dictionary
# that holds values already computed RBH 2019
# version 2.0 December 2020 new features
# - verbose option, showing win/loss value once it is known
# - move initialization of the start position outside the main loop
# - tidying, e.g. pad variable instead of ' '
def get_piles():
while True:
raw = input('nim game pile sizes (eg. 3 5 7) ')
try:
dim = tuple( int(x) for x in raw.split() )
if len(dim) > 0 and all(d >= 0 for d in dim): return dim
except ValueError: pass
print('invalid, try again')
def win_loss(b): return 'win' if b else 'loss'
"""
sd dictionary(state: boolean), true if player-to-move wins
"""
def winning(nim_psn, sd, depth, verbose):
# tuple, dictionary, recursion depth, verbose mode (True/False)
pad = ' '
if nim_psn in sd:
if depth==0: print('solved before search')
if verbose: print(pad*depth, nim_psn, win_loss(sd[nim_psn]), 'dict')
return sd[nim_psn]
# nim_psn not in dictionary, so update before we return
if verbose: print(pad*depth, nim_psn)
psn = tuple(sorted(nim_psn))
for j in range(len(psn)): # each pile
for k in range(psn[j]): # number of stones that will remain in that pile
child = tuple(sorted(psn[:j] + (k,) + psn[j+1:]))
if not winning(child, sd, depth+1, verbose):
if verbose: print(pad*depth, nim_psn, win_loss(True), 'losing child')
sd.update({ nim_psn: True }) # update before return
if depth == 0: print('\nwin: move to ',child, len(sd), 'states') # show a winning move
return True
if verbose: print(pad*depth, nim_psn, win_loss(False), 'no win')
sd.update({ nim_psn: False }) # update before return
if depth == 0: print('\nloss,', len(sd), 'states')
return False
v = get_piles()
S = dict()
empty = tuple([0]*len(v))# position (0 0 ... )
S.update({empty: False}) # position (0 0 ... ) loses
w = winning(v, S, 0, True)
| 3.0625 | 3 |
process_run_chicago.py | Multiscale-Genomics/C-HiC | 0 | 12770603 | #!/usr/bin/env python
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import argparse
from basic_modules.workflow import Workflow
from utils import logger
from CHiC.tool.run_chicago import ChicagoTool
#################################################
class process_run_chicago(Workflow):
"""
Function for processing capture Hi-C fastq files. Files are aligned,
filtered and analysed for Cpature Hi-C peaks
"""
def __init__(self, configuration=None):
"""
initiate the class
Parameters:
-----------
Configuration: dict
dictinoary with parameters for different tools, indicating
how to run each of them
"""
logger.info("Initiating process_runChicago")
if configuration is None:
configuration = {}
self.configuration.update(configuration)
def run(self, input_files, metadata, output_files):
"""
This main function that run the chicago pipeline with runChicago.R wrapper
Parameters
----------
input_files: dict
location with the .chinput files.
chinput_file: str in case there is one input file
chinput_file: comma separated list in case there
is more than one input file.
metadata: dict
Input metadata, str
output: dict
output file locations
Returns
-------
output_files : dict
Folder location with the output files
output_metadata: dict
Output metadata for the associated files in output_files
"""
try:
chicago_caller = ChicagoTool(self.configuration)
output_files_generated, output_metadata = chicago_caller.run(
input_files, metadata, output_files)
return output_files_generated, output_metadata
except IOError:
logger.info("chicago failed to generate output files =(")
################################################################
def main_json(config, in_metadata, out_metadata):
"""
Alternative main function
This function launches the app using configuration written in
two json files: config.json and metadata.json
"""
# 1. Instantiate and launch the App
print("1. Instantiate and launch the App")
from apps.jsonapp import JSONApp
app = JSONApp()
results = app.launch(process_run_chicago,
config,
in_metadata,
out_metadata)
# 2. The App has finished
print("2. Execution finished; see " + out_metadata)
print(results)
return results
###############################################################
if __name__ == "__main__":
#set up the command line parameters
PARSER = argparse.ArgumentParser(
description="Chicago algorithm for capture Hi-C peak detection")
PARSER.add_argument("--config", help="Configuration file")
PARSER.add_argument(
"--in_metadata", help="Location of metadata file")
PARSER.add_argument(
"--out_metadata", help="Location of output metadata file")
PARSER.add_argument(
"--local", action="store_const", const=True, default=False)
#Get matching parameters from the command line
ARGS = PARSER.parse_args()
CONFIG = ARGS.config
IN_METADATA = ARGS.in_metadata
OUT_METADATA = ARGS.out_metadata
LOCAL = ARGS.local
if LOCAL:
import sys
sys._run_from_cmdl = True # pylint: disable=protected-access
RESULTS = main_json(CONFIG, IN_METADATA, OUT_METADATA)
print(RESULTS)
| 1.929688 | 2 |
status.py | SeverinDavis/MT | 1 | 12770604 | TEMPERATURE = 0x01
GAS = 0x02
VOLTAGE = 0x04
DELAY = 0x10
TEST_SUCCESS = 0x3F
ERROR_RELAY_OPEN = 0x04
ERROR_RELAY_CLOSED = 0x08
MODE_CONFIG = 0x80
MODE_RUN = 0x40
MODE_ERROR = 0xC0
MODE_TIMEOUT = 0x00
WRITE = 0x20
def print_status(state):
if(state&MODE_ERROR == MODE_ERROR):
print("ERROR MODE")
elif(state&MODE_ERROR == MODE_TIMEOUT):
print("TIMEOUT")
elif(state&MODE_ERROR == MODE_CONFIG):
print("CONFIG MODE")
elif(state&MODE_ERROR == MODE_RUN):
print("RUN MODE")
if(state&TEST_SUCCESS == 0x00):
print("OK")
if(state&WRITE):
print("WRITE OPERATION:")
if(state&TEMPERATURE):
print("TEMPERATURE")
if(state&GAS):
print("GAS LEVEL")
if(state&ERROR_RELAY_OPEN):
print("VOLTAGE LEVEL")
if(state&ERROR_RELAY_CLOSED):
print("VOLTAGE LEVEL")
if(state&DELAY):
print("DELAY")
else:
if(state&TEMPERATURE):
print("TEMPERATURE EXCEEDED")
if(state&GAS):
print("GAS LEVEL EXCEEDED")
if(state&ERROR_RELAY_OPEN):
print("RELAY STUCK OPEN")
if(state&ERROR_RELAY_CLOSED):
print("RELAY STUCK CLOSED")
def mode_is_run(state):
return (state&MODE_ERROR == MODE_RUN)
def mode_is_config(state):
return (state&MODE_ERROR == MODE_CONFIG)
def mode_is_timeout(state):
return (state&MODE_ERROR == MODE_TIMEOUT) | 3.109375 | 3 |
examples/beta/cdc-postgres/main.py | beneath-hq/beneath | 65 | 12770605 | import asyncio
import beneath
import psycopg2
import json
import yaml
from datetime import datetime
from schemas import get_schema, check_for_and_encode_ts
with open(".development.yaml", "r") as ymlfile:
config = yaml.safe_load(ymlfile)
POLLING_INTERVAL = 5
SCHEMA = """
type Change @schema {
table: String! @key
timestamp: Timestamp! @key
operation: String! @key
value: String!
}
"""
def connect_to_source_db():
conn = psycopg2.connect(
database=config["postgres"]["database"],
user=config["postgres"]["username"],
password=config["postgres"]["password"],
host=config["postgres"]["host"],
port="5432",
)
conn.autocommit = True
cursor = conn.cursor()
return cursor
cursor = connect_to_source_db()
async def get_all_changes(p):
while True:
cursor.execute(
f"""
SELECT data FROM pg_logical_slot_get_changes('{config['postgres']['replication_slot']}', NULL, NULL,
'include-lsn', 'True', 'include-timestamp', 'True', 'add-tables', '{','.join(config['postgres']['tables'])}');
"""
)
txns = cursor.fetchall()
for txn in txns:
txn_json = json.loads(txn[0])
changes = txn_json["change"]
for change in changes:
yield {
"table": change["table"],
"timestamp": datetime.strptime(
f"{txn_json['timestamp']}00", "%Y-%m-%d %H:%M:%S.%f%z"
),
"operation": change["kind"],
"value": json.dumps(
{
k: change.get(k)
for k in (
"columnnames",
"columntypes",
"columnvalues",
"oldkeys",
)
}
),
}
# TODO: consider checkpointing the LSN (but pg_logical_slot_get_changes() doesn't let us choose LSN position)
# p.checkpoints.set("nextlsn", txn_json["nextlsn"])
await asyncio.sleep(POLLING_INTERVAL)
def filter_for_table(table):
async def filter(in_record):
if in_record["table"] == table:
# construct out_record
value_blob = json.loads(in_record["value"])
if in_record["operation"] in ["insert", "update"]:
# TODO: test more types, might have to do more type conversions
out_record = {
col: check_for_and_encode_ts(
value_blob["columntypes"][i], value_blob["columnvalues"][i]
)
for (i, col) in enumerate(value_blob["columnnames"])
}
out_record["_updated_at"] = in_record["timestamp"]
if in_record["operation"] == "delete":
# TODO: Handle required non-key columns.
# - Option1: Set Replica Identity to FULL for all tables
# -- not ideal, since a) requires more user setup and b) passes more data
# - Option2: Just generate synthetic data here
# -- the row is getting deleted anyways, so doesn't really matter what the values are
# print(value_blob)
out_record = dict(
zip(
value_blob["oldkeys"]["keynames"],
value_blob["oldkeys"]["keyvalues"],
)
)
out_record["_updated_at"] = in_record["timestamp"]
out_record["_deleted_at"] = in_record["timestamp"]
yield out_record
return filter
def fan_out(p, all_changes, list_of_tables):
# list_of_tables: ["schemaA.table1", "schemaA.table2", "schemaB.table1", ...]
for schema_table in list_of_tables:
schema = schema_table.split(".")[0] # a Postgres "schema" (a namespace)
table = schema_table.split(".")[1]
table_changes = p.apply(all_changes, filter_for_table(table))
p.write_table(
table_changes,
f"{config['beneath']['username']}/{config['beneath']['project']}/{config['postgres']['database']}-{schema}-{table}",
schema=get_schema(cursor, table), # a Beneath "schema" (type info)
description=f"{table} table replicated from Postgres",
)
if __name__ == "__main__":
p = beneath.Pipeline(parse_args=True, disable_checkpoints=True)
p.description = "Postgres CDC"
all_changes = p.generate(get_all_changes)
p.write_table(
all_changes,
f"{config['beneath']['username']}/{config['beneath']['project']}/{config['postgres']['database']}-cdc",
schema=SCHEMA,
description="Raw data captured from a Postgres CDC service",
)
fan_out(p, all_changes, config["postgres"]["tables"])
p.main()
| 2.375 | 2 |
spotify_terminal/display.py | marcdjulien/spotifyterminal | 0 | 12770606 | <reponame>marcdjulien/spotifyterminal
import time
import datetime
from . import common
from . import unicurses as uc
from .gui import WindowManager
from .periodic import PeriodicCallback, PeriodicDispatcher
logger = common.logging.getLogger(__name__)
class CursesDisplay(object):
# How often to clear the screen if it gets garbled.
CLEAR_PERIOD = 60 * 15
# Max amount of time to dispatch each cycle when the program is active.
ACTIVE_PROGRAM_DISPATCH_TIME = 0.01
# Max amount of time to dispatch each cycle when the program is idle.
IDLE_PROGRAM_DISPATCH_TIME = 0.1
# Max amount of time to dispatch each cycle when the program is sleeping.
SLEEP_PROGRAM_DISPATCH_TIME = 0.4
# How long to wait before declaring the program is not active and is idle.
ACTIVE_TO_IDLE_TIMEOUT = 0.5
# How long to wait before declaring the program is not idle and is sleeping.
IDLE_TO_SLEEP_TIMEOUT = 5 * 60
POP_UP_WINDOW_NAMES = ["search", "help", "popup", "select_device"]
ACTIVE_STATE, IDLE_STATE, SLEEP_STATE = (1, 2, 3)
def __init__(self, sp_state):
self.state = sp_state
"""The SpotifyState object."""
self.activity_state = self.ACTIVE_STATE
self.dispatch_time = self.ACTIVE_PROGRAM_DISPATCH_TIME
"""Active, idle, or sleep."""
self.wm = WindowManager()
"""The WindowManager."""
self.create_all_windows()
self._running = True
"""Whether to continue running."""
self.other_tasks = PeriodicDispatcher([
PeriodicCallback(self.CLEAR_PERIOD, self.wm.clear)
])
"""Other tasks to run periodically."""
# This increments each control loop. A value of -50 means that we'll have
# 2s (200 * PROGRAM_LOOP) until the footer begins to roll.
# TODO: Not valid for all activity states
self._footer_roll_index = -200
self.last_pressed_time = time.time()
self.dispatch_times = {
self.ACTIVE_STATE: self.ACTIVE_PROGRAM_DISPATCH_TIME,
self.IDLE_STATE: self.IDLE_PROGRAM_DISPATCH_TIME,
self.SLEEP_STATE: self.SLEEP_PROGRAM_DISPATCH_TIME
}
def start(self):
# Initial render.
self.wm.render()
logger.info("="*50)
logger.info("Starting main loop")
logger.info("="*50)
while self._running:
with common.ContextDuration() as t:
self.process()
self.render()
self.other_tasks.dispatch()
time.sleep(max(0, self.dispatch_time - t.duration))
# Tear down the display.
self.wm.exit()
common.clear()
def process(self):
"""Dispatch main program logic."""
# Handle user input.
self.process_input()
# Do any calculations related to rendering.
self.render_calcs()
# Are we still running?
self._running = self.state.is_running()
def process_input(self):
"""Process all keyboard input."""
# Gather all key inputs.
key_pressed = False
keys = []
while True:
k = self.wm.get_input()
if k != -1:
keys.append(k)
else:
break
# For now, we only process individual key commands.
# Soemthing like "Shift + Left Arrow" will result in multiple
# keys and could trigger unintentional commands.
# Disallow this until we support these kinds of key combinations.
if len(keys) == 1:
key_pressed = True
key = keys[0]
self.last_pressed_time = time.time()
self.state.process_key(key)
# If we didn't press a key, kick the state anyway.
if not key_pressed:
self.state.process_key(None)
def render_calcs(self):
"""Perform any calculations related to rendering."""
# TODO: Make this state based?
key_timeout = time.time() - self.last_pressed_time
if key_timeout <= self.ACTIVE_TO_IDLE_TIMEOUT:
self.activity_state = self.ACTIVE_STATE
elif self.ACTIVE_TO_IDLE_TIMEOUT < key_timeout and key_timeout <= self.IDLE_TO_SLEEP_TIMEOUT:
self.activity_state = self.IDLE_STATE
elif self.IDLE_TO_SLEEP_TIMEOUT < key_timeout:
self.activity_state = self.SLEEP_STATE
self.dispatch_time = self.dispatch_times[self.activity_state]
self.set_active_window()
self.set_popup_window()
def render(self):
# Check if we need to resize.
if self.wm.resize_requested():
self.resize()
# Draw all of the panels.
self.render_user_panel()
self.render_tracks_panel()
self.render_player_panel()
self.render_other_panel()
self.render_footer()
self.render_search_panel()
self.render_select_device_panel()
self.render_popup_panel()
self.render_help_panel()
# Render!
self.wm.render()
def render_user_panel(self):
win = self.wm.get_window("user")
rows, cols = win.get_size()
win.erase()
# Draw border.
win.draw_box()
# Show the display_name.
title_start_line = 1
win.draw_text(
"[Spotify Terminal]",
title_start_line, 2,
cols-3,
style=uc.A_BOLD,
centered=True
)
# Show the display_name.
display_name_start_line = title_start_line + 1
win.draw_text(
self.state.get_display_name(),
display_name_start_line, 2,
cols-3,
style=uc.A_BOLD,
centered=True
)
# Bar.
win.draw_text(
"_"*cols,
display_name_start_line+1, 1,
cols-2,
style=uc.A_NORMAL
)
# Show the playlists.
playlists = [str(playlist) for playlist in self.state.user_list]
selected_i = self.state.user_list.i
playlist_start_line = display_name_start_line + 2
nplaylist_rows = rows-(playlist_start_line+1)
win.draw_list(
playlists,
playlist_start_line, nplaylist_rows,
2, cols-4,
selected_i,
scroll_bar=(playlist_start_line+1, cols-2, nplaylist_rows-1)
)
def render_tracks_panel(self):
win = self.wm.get_window("tracks")
rows, cols = win.get_size()
win.erase()
# Draw border.
win.draw_box()
# Show the title of the context.
title_start_row = 1
win.draw_text(
self.state.tracks_list.header,
title_start_row, 2,
cols-3,
style=uc.A_BOLD
)
# Show the tracks.
selected_i = self.state.tracks_list.i
track_start_line = title_start_row + 2
text_disp_width = cols-3
tracks = []
for track in self.state.tracks_list:
track_str = track.str(text_disp_width-1) # +1 to account for >
if track == self.state.get_currently_playing_track():
track_str = ">"+track_str
else:
track_str = " "+track_str
tracks.append(track_str)
win.draw_list(
tracks,
track_start_line, rows - 4,
1, text_disp_width,
selected_i,
scroll_bar=(2, cols-2, rows-3)
)
def render_player_panel(self):
win = self.wm.get_window("player")
rows, cols = win.get_size()
win.erase()
# Draw border.
win.draw_box()
# Display currently playing track
current_track = self.state.get_currently_playing_track()
win.draw_text(current_track.track, 1, 2, cols-3, style=uc.A_BOLD)
win.draw_text(current_track.album, 2, 2, cols-3, style=uc.A_BOLD)
win.draw_text(current_track.artist, 3, 2, cols-3, style=uc.A_BOLD)
if self.state.progress is not None:
dur, total_dur = self.state.progress
fmt = "%H:%M:%S" if total_dur >= 60 * 60 * 1000 else "%M:%S"
dur = time.strftime(fmt, time.gmtime(dur//1000))
total_dur = time.strftime(fmt, time.gmtime(total_dur//1000))
win.draw_text("{} // {}".format(dur, total_dur), 4, 2, cols-3, style=uc.A_BOLD)
# Display the current device
device_info = "{} ({}%)".format(self.state.current_device, self.state.volume)
win.draw_text(device_info, 8, 2, cols-3, style=uc.A_NORMAL)
# Display the media icons
col = 2
for i, action in enumerate(self.state.player_list):
if ((i == self.state.player_list.i)
and self.state.current_state.get_list().name == "player"):
style = uc.A_BOLD | uc.A_STANDOUT
else:
style = uc.A_NORMAL
icon = action.title
win.draw_text(icon, 6, col, style=style)
col += len(icon) + 2
def render_other_panel(self):
win = self.wm.get_window("other")
rows, cols = win.get_size()
win.erase()
# Draw border.
win.draw_tab_box()
# Display other actions
win.draw_list(
self.state.other_actions_list,
1, rows-2,
1, cols-1,
self.state.other_actions_list.i
)
def render_footer(self):
win = self.wm.get_window("footer")
rows, cols = win.get_size()
win.erase()
if self.state.is_loading():
percent = self.state.get_loading_progress()
if percent is not None:
text = " " * int(cols * percent)
win.draw_text(text, rows-1, 0, style=uc.A_STANDOUT)
elif self.state.is_adding_track_to_playlist():
text = "Select a playlist to add this track"
win.draw_text(text, rows-1, 0, style=uc.A_BOLD)
elif self.state.is_creating_command():
start_col = 1
query = self.state.get_command_query()
text = str(query) + " "
win.draw_text(text, rows-1, start_col)
win.draw_text(
query.get_current_index() or " ",
rows-1, start_col+query.get_cursor_index(),
style=uc.A_STANDOUT
)
else:
entry = self.state.current_state.get_list().get_current_entry()
if entry:
ncols = cols-1
long_str = common.ascii(str(entry))
short_str = entry.str(ncols) if hasattr(entry, "str") else long_str
# Check if we need to scroll or not.
if "".join(short_str.split()) == "".join(long_str.split()):
win.draw_text(short_str, rows-1, 0, style=uc.A_BOLD)
# This ensures that we always start form the same position
# when we go from a static footer to a long footer that needs rolling.
self._footer_roll_index = -200
else:
self._footer_roll_index += 1
footer_roll_index = max(0, self._footer_roll_index)
footer_roll_index //= 10
# Double the string length so that we always uniformly roll
# even in the case the entire string length is less than the terminal width.
# Also, add a border to easily identify the end.
long_str = 2 * (long_str + " | ")
text = list(long_str)
for _ in range(footer_roll_index):
text.append(text.pop(0))
text = "".join(text)
text = text[0:ncols]
win.draw_text(text, rows-1, 0, style=uc.A_BOLD)
if self.state.alert.is_active():
text = self.state.alert.get_message()
text = "[{}]".format(text)
win.draw_text(text, rows-1, 0, style=uc.A_STANDOUT)
# Track progress bar
progress = self.state.get_track_progress()
if progress:
percent = float(progress[0])/progress[1]
text = "-"*int(cols*percent)
win.draw_text(text, rows-2, 0, cols, style=uc.A_BOLD)
def render_search_panel(self):
win = self.wm.get_window("search")
rows, cols = win.get_size()
win.erase()
win.draw_box()
n_display_cols = cols - 4
# Show the title of the context.
title_start_row = 1
win.draw_text(
self.state.search_list.header,
title_start_row, 2,
n_display_cols,
style=uc.A_BOLD
)
# Show the results.
results = [r.str(n_display_cols) for r in self.state.search_list]
selected_i = self.state.search_list.i
win.draw_list(
results,
3, rows-4,
2, n_display_cols,
selected_i
)
def render_select_device_panel(self):
win = self.wm.get_window("select_device")
rows, cols = win.get_size()
win.erase()
win.draw_box()
# Show the title of the context.
title_start_row = 1
win.draw_text(
"Searching." + ("."*(int(time.time())%3)),
title_start_row,
2, cols-3,
style=uc.A_BOLD
)
win.draw_text(
"Open Spotify on your computer or any device",
title_start_row+1,
2, cols-3
)
selected_i = self.state.device_list.i
win.draw_list(
self.state.device_list,
title_start_row+3, rows-4, 2,
cols-3,
selected_i
)
def render_popup_panel(self):
win = self.wm.get_window("popup")
rows, cols = win.get_size()
win.erase()
win.draw_box()
current_popup_list = self.state.current_state.get_list()
# Show the title of the context.
prompt = current_popup_list.header
title_start_row = 1
win.draw_text(
prompt,
title_start_row,
(cols//2) - (len(prompt)//2) - 1,
cols-3,
style=uc.A_BOLD
)
selected_i = current_popup_list.i
list_start_row = title_start_row + 2
win.draw_list(
current_popup_list,
list_start_row, rows - list_start_row - 1,
2, cols-4,
selected_i,
centered=True
)
def render_help_panel(self):
win = self.wm.get_window("help")
rows, cols = win.get_size()
win.erase()
win.draw_box()
current_help_list = self.state.current_state.get_list()
# Show the title of the context.
prompt = "Shortcuts"
title_start_row = 1
win.draw_text(
prompt,
title_start_row,
(cols//2) - (len(prompt)//2) - 1,
cols-3,
style=uc.A_BOLD
)
selected_i = current_help_list.i
list_start_row = title_start_row + 2
win.draw_list(
current_help_list,
list_start_row, rows - list_start_row - 1,
2, cols-4,
selected_i
)
def set_active_window(self):
popup_states = [
self.state.a2p_confirm_state,
self.state.remove_track_confirm_state,
self.state.remove_playlist_confirm_state,
self.state.select_artist_state
]
playlist_states =[
self.state.user_state,
self.state.a2p_select_state
]
if self.state.in_search_menu():
window_name = "search"
elif self.state.in_select_device_menu():
window_name = "select_device"
elif self.state.is_in_state(popup_states):
window_name = "popup"
elif self.state.is_in_state(self.state.help_state):
window_name = "help"
elif self.state.is_in_state(self.state.player_state):
window_name = "player"
elif self.state.is_in_state(self.state.other_actions_state):
window_name = "other"
elif self.state.is_in_state(self.state.tracks_state):
window_name = "tracks"
elif self.state.is_in_state(playlist_states):
window_name = "user"
else:
window_name = "footer"
self.wm.set_focus(window_name)
def set_popup_window(self):
for window_name in self.POP_UP_WINDOW_NAMES:
window = self.wm.get_window(window_name)
if window.get_focus():
window.show()
else:
window.hide()
def resize(self):
self.wm.resize(self.get_window_sizes())
def create_all_windows(self):
sizes = self.get_window_sizes()
self.wm.create_window("user", *sizes["user"])
self.wm.create_window("tracks", *sizes["tracks"])
self.wm.create_window("player", *sizes["player"])
self.wm.create_window("other", *sizes["other"])
self.wm.create_window("search", *sizes["search"])
self.wm.create_window("select_device", *sizes["select_device"])
self.wm.create_window("help", *sizes["help"])
self.wm.create_window("popup", *sizes["popup"])
self.wm.create_window("footer", *sizes["footer"])
for name in self.POP_UP_WINDOW_NAMES:
self.wm.get_window(name).hide()
def get_window_sizes(self):
rows, cols = self.wm.get_size()
user = (rows-2,
cols//4,
0,
0)
tracks = (rows*2//3,
cols-(user[1])-1,
0,
user[1]+user[3])
player = (rows-tracks[0]-2,
tracks[1],
tracks[0],
tracks[3])
start = user[1]+(player[1]*2//3)
other = (rows - tracks[0] - 2 - 3,
cols - start - 3,
player[2],
start)
search = (rows*8//10,
cols*8//10,
rows//10,
cols//10)
select_device = (rows*6//10,
cols*6//10,
rows*2//10,
cols*2//10)
help = (rows*8//10,
cols*8//10,
rows//10,
cols//10)
popup = (rows//4,
cols//4,
rows*3//8,
cols*3//8)
footer = (2, cols, rows-2, 0)
return {
"user": user,
"tracks": tracks,
"player": player,
"other": other,
"search": search,
"select_device": select_device,
"help": help,
"popup": popup,
"footer": footer
} | 2.96875 | 3 |
pytools/log.py | alexfikl/pytools | 52 | 12770607 | from warnings import warn
warn("pytools.log was moved to https://github.com/illinois-ceesd/logpyle/. "
"I will try to import that for you. If the import fails, say "
"'pip install logpyle', and change your imports from 'pytools.log' "
"to 'logpyle'.", DeprecationWarning)
from logpyle import * # noqa # pylint: disable=import-error
| 1.476563 | 1 |
chapter_3/wheres_waldorf.py | prabal1997/uva_judge | 1 | 12770608 | import sys
#read input file
input_file = open("input_test_case", "r");
input_file = [ line.strip() for line in input_file if (line.strip() != "") ];
case_count = int(input_file[0]);
input_file = input_file[1:];
#access data
def access_data(input_matrix, start_range, end_range):
#receives address, returns '#' if invalid
def get_data(input_matrix, addr):
row, col = addr;
if ( ( row >= len(input_matrix) or col >= len(input_matrix[0]) ) or ( row < 0 or col < 0 ) ):
return '#';
return input_matrix[row][col];
#handle special case
if (start_range == end_range):
return get_data(input_matrix, start_range);
#define start, end, and increment variables
start_row_idx, start_col_idx = start_range[0], start_range[1];
end_row_idx, end_col_idx = end_range[0], end_range[1];
sign = lambda x: 0 if (not x) else (x/abs(x));
row_inc, col_inc = [ sign(end_range[idx] - start_range[idx]) for idx in range(0, 1+1)];
#itearte to make word
output_list = ['#'] * max(abs(end_row_idx - start_row_idx)+1, abs(end_col_idx - start_col_idx)+1);
counter = 0;
while(start_row_idx != end_row_idx + row_inc + (row_inc==0) and start_col_idx != end_col_idx + col_inc + (col_inc == 0) ):
output_list[counter] = str(get_data(input_matrix, (start_row_idx, start_col_idx)));
counter += 1;
start_col_idx += col_inc;
start_row_idx += row_inc;
return "".join(output_list);
#gives the preferred index out of two
def give_preferred_index(first, second):
if (first[0] < second[0]):
return first;
elif (first[0] == second[0]):
if (first[1] < second[1]):
return first;
else:
return second;
else:
return second;
#store all the cases in ram as [(grid, word_dict)]
input_cases = [[] for idx in range(case_count)];
case_counter, file_read_counter = 0, 0;
row_size, col_size, dict_size = 0, 0, 0;
row_read, word_read = 0, 0;
grid, input_dict = [], [];
while(case_counter < case_count):
#read grid dimensions
grid_size = input_file[file_read_counter]; file_read_counter += 1;
grid_size = grid_size.split(' ');
row_size, col_size = int(grid_size[0]), int(grid_size[1]);
#read in the grid
grid = [0]*row_size;
for row_idx in range(row_size):
grid[row_idx] = input_file[file_read_counter]; file_read_counter += 1;
#read in the dict_size
dict_size = int(input_file[file_read_counter]); file_read_counter += 1;
#read in the dictionary
input_dict = [0]*dict_size;
for dict_idx in range(dict_size):
input_dict[dict_idx] = input_file[file_read_counter]; file_read_counter += 1;
#store case data, reset everything
input_cases[case_counter] = (tuple(grid), tuple(input_dict));
row_size, col_size, dict_size, row_read, word_read = 0, 0, 0, 0, 0;
grid, input_dict = [], [];
#increment case count
case_counter +=1;
#make an array, intialize
for case in input_cases:
output_list = [(float("inf"), float("inf"))] * len(case[1]);
row_size, col_size = len(case[0]), len(case[0][0]);
#iterate through each position in grid, and look for word in EVERY orientation
for word_idx, word in enumerate(case[1]):
current_word_loc = (float("inf"), float("inf"));
for row_sgn in [-1, 0, 1]:
for col_sgn in [-1, 0, 1]:
for col_idx in range(col_size):
for row_idx in range(row_size):
start_addr = (row_idx, col_idx);
end_addr = (row_idx+row_sgn*(len(word)-1), col_idx+col_sgn*(len(word)-1));
found_word = access_data(case[0], start_addr, end_addr);
if (word.lower() == found_word.lower()):
current_word_loc = give_preferred_index(current_word_loc, start_addr);
output_list[word_idx] = current_word_loc;
for start_addr in output_list:
print( str(start_addr[0]+1) + " " + str(start_addr[1]+1) );
print("");
| 3.40625 | 3 |
toleranceinterval/__init__.py | jedludlow/tolerance_interval_py | 9 | 12770609 | <filename>toleranceinterval/__init__.py
from . import oneside # noqa F401
from . import twoside # noqa F401
from . import hk # noqa F401
from . import checks # noqa F401
import os as _os # noqa F401
# add rudimentary version tracking
__VERSION_FILE__ = _os.path.join(_os.path.dirname(__file__), 'VERSION')
__version__ = open(__VERSION_FILE__).read().strip()
| 1.578125 | 2 |
generate/generate_all_data_augmentation.py | prise-3d/Thesis-NoiseDetection-metrics | 0 | 12770610 | # main imports
import sys, os, argparse
import numpy as np
import random
import time
import json
# image processing imports
from PIL import Image
from ipfml.processing import transform, segmentation
from ipfml import utils
# modules imports
sys.path.insert(0, '') # trick to enable import of main folder module
import custom_config as cfg
from modules.utils import data as dt
from data_attributes import get_image_features
# getting configuration information
zone_folder = cfg.zone_folder
min_max_filename = cfg.min_max_filename_extension
# define all scenes values
scenes_list = cfg.scenes_names
scenes_indexes = cfg.scenes_indices
choices = cfg.normalization_choices
zones = cfg.zones_indices
seuil_expe_filename = cfg.seuil_expe_filename
features_choices = cfg.features_choices_labels
output_data_folder = cfg.output_data_folder
data_augmented_filename = cfg.data_augmented_filename
generic_output_file_svd = '_random.csv'
def generate_data_svd(data_type, mode, path):
"""
@brief Method which generates all .csv files from scenes
@param data_type, feature choice
@param mode, normalization choice
@param path, data augmented path
@return nothing
"""
scenes = os.listdir(path)
# remove min max file from scenes folder
scenes = [s for s in scenes if min_max_filename and generic_output_file_svd not in s]
# keep in memory min and max data found from data_type
min_val_found = sys.maxsize
max_val_found = 0
data_min_max_filename = os.path.join(path, data_type + min_max_filename)
data_filename = os.path.join(path, data_augmented_filename)
# getting output filename
output_svd_filename = data_type + "_" + mode + generic_output_file_svd
current_file = open(os.path.join(path, output_svd_filename), 'w')
with open(data_filename, 'r') as f:
lines = f.readlines()
number_of_images = len(lines)
for index, line in enumerate(lines):
data = line.split(';')
scene_name = data[0]
number_of_samples = data[2]
label_img = data[3]
img_path = data[4].replace('\n', '')
block = Image.open(os.path.join(path, img_path))
###########################
# feature computation part #
###########################
data = get_image_features(data_type, block)
##################
# Data mode part #
##################
# modify data depending mode
if mode == 'svdne':
# getting max and min information from min_max_filename
with open(data_min_max_filename, 'r') as f:
min_val = float(f.readline())
max_val = float(f.readline())
data = utils.normalize_arr_with_range(data, min_val, max_val)
if mode == 'svdn':
data = utils.normalize_arr(data)
# save min and max found from dataset in order to normalize data using whole data known
if mode == 'svd':
current_min = data.min()
current_max = data.max()
if current_min < min_val_found:
min_val_found = current_min
if current_max > max_val_found:
max_val_found = current_max
# add of index
current_file.write(scene_name + ';' + number_of_samples + ';' + label_img + ';')
for val in data:
current_file.write(str(val) + ";")
print(data_type + "_" + mode + " - " + "{0:.2f}".format((index + 1) / number_of_images * 100.) + "%")
sys.stdout.write("\033[F")
current_file.write('\n')
print('\n')
# save current information about min file found
if mode == 'svd':
with open(data_min_max_filename, 'w') as f:
f.write(str(min_val_found) + '\n')
f.write(str(max_val_found) + '\n')
print("%s_%s : end of data generation\n" % (data_type, mode))
def main():
parser = argparse.ArgumentParser(description="Compute and prepare data of feature of all scenes (keep in memory min and max value found)")
parser.add_argument('--feature', type=str,
help="feature choice in order to compute data (use 'all' if all features are needed)")
parser.add_argument('--folder', type=str, help="folder which contains the whole dataset")
args = parser.parse_args()
p_feature = args.feature
p_folder = args.folder
# generate all or specific feature data
if p_feature == 'all':
for m in features_choices:
generate_data_svd(m, 'svd', p_folder)
generate_data_svd(m, 'svdn', p_folder)
generate_data_svd(m, 'svdne', p_folder)
else:
if p_feature not in features_choices:
raise ValueError('Unknown feature choice : ', features_choices)
generate_data_svd(p_feature, 'svd', p_folder)
generate_data_svd(p_feature, 'svdn', p_folder)
generate_data_svd(p_feature, 'svdne', p_folder)
if __name__== "__main__":
main()
| 2.1875 | 2 |
PedSimulation/gui/drawer/scene_drawer.py | HDL951236874/PedSimulation | 1 | 12770611 | <gh_stars>1-10
# TODO write docs
class SceneDrawer(object):
def __init__(self, device, register):
self.device = device
self.background_color = "black"
self.entity_register = register
def draw(self, scene):
for entity in scene.entities:
if entity.drawer is not None:
entity.drawer.draw(entity)
else:
self.entity_register.add_drawer_support(entity)
entity.drawer.draw(entity)
| 3.0625 | 3 |
nrm_django/contacts/models.py | 18F/NRM-Grants-Agreements | 5 | 12770612 | <reponame>18F/NRM-Grants-Agreements
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
from grants.choices import BOOL_CHOICES
# TO-DO: either figure out this whole in_instance thing or remove it.
FAKE_INSTANCE = "10602"
class AdminUnit(models.Model):
admin_unit_cn = models.CharField(max_length=40, primary_key=True)
fs_unit_id = models.CharField(max_length=4)
name = models.CharField(max_length=255)
short_name = models.CharField(max_length=255)
parent_unit_fk = models.CharField(max_length=40, blank=True, null=True)
last_update = models.DateField()
class Meta:
managed = False
db_table = "admin_units"
class Contact(models.Model):
cn = models.CharField(
primary_key=True,
max_length=34,
editable=False,
)
id = models.CharField(
"Contact ID",
max_length=30,
editable=False,
help_text="""
For new Contacts, this field is a system generated number.
For older contacts it will be a shortened version of the contact's full name or organization title.
""",
)
obj_tech = models.CharField(max_length=30, editable=False, default="ORACLE")
obj_name = models.CharField(max_length=30, editable=False, default="ORGANIZATION")
obj_class = models.CharField(max_length=30, editable=False, default="CONTACT")
# TO-DO: Capture request.user on save()
created_by = models.CharField(
max_length=30, editable=False, help_text="Should capture username."
)
created_date = models.DateField(auto_now_add=True)
# Need to capture the instance, when we have one.
created_in_instance = models.DecimalField(
max_digits=6, decimal_places=0, editable=False, default=FAKE_INSTANCE
)
trans_id = models.CharField("Trans ID", max_length=34, blank=True, null=True)
# User guide says this is display only. Unclear how new values are created.
# TO-DO: Find out how
name = models.CharField(
max_length=120,
blank=True,
null=True,
editable=False,
help_text="Contact's Last Name, First Name",
)
master_site = models.DecimalField(
max_digits=6, decimal_places=0, blank=True, null=True
)
remarks = models.CharField(
max_length=255,
blank=True,
null=True,
help_text="Any comments regarding the contact or cooperator.",
)
modified_by = models.CharField(
max_length=30,
blank=True,
null=True,
editable=False,
help_text="Should capture username.",
)
modified_date = models.DateField(blank=True, null=True, auto_now=True)
modified_in_instance = models.DecimalField(
max_digits=6,
decimal_places=0,
blank=True,
null=True,
editable=False,
default=FAKE_INSTANCE,
)
security_id = models.CharField(max_length=30, blank=True, null=True, editable=False)
agency_code = models.CharField(max_length=4)
admin_org_ind = models.CharField("Admin Org", max_length=1, choices=BOOL_CHOICES)
# ein and duns are noted as display only in the User Guide
# TO-DO: determine how new entries can be created
ein = models.CharField(
"EIN",
max_length=40,
blank=True,
null=True,
editable=False,
help_text="Employer Identification Number.",
)
duns = models.CharField(
"DUNS",
max_length=40,
blank=True,
null=True,
editable=False,
help_text="""
The identification number assigned to the cooperator/recipient by the
Dun and Bradstreet Data Universal Numbering System (DUNS).
Call 1-866-705-5711 or visit the DUNS Number Request Form to request and register for a DUNS number.
""",
)
faads_cn = models.CharField(max_length=34, blank=True, null=True, editable=False)
ffis_vendor_status = models.CharField(
"FFIS vendor status", max_length=40, blank=True, null=True
)
alc_code = models.CharField(
"ALC",
max_length=40,
blank=True,
null=True,
help_text="The Agency Location Code.",
)
parent_duns = models.CharField(
"Parent DUNS",
max_length=40,
blank=True,
null=True,
help_text="""
The identification number assigned to the cooperator/recipient by the
Dun and Bradstreet Data Universal Numbering System (DUNS).""",
)
# Choices
duns_confidence_cd = models.CharField(
"DUNS Confidence Score",
max_length=2,
blank=True,
null=True,
help_text="Confidence Code assigned via SAM validation.",
)
international = models.CharField(
max_length=1, blank=True, null=True, default="N", choices=BOOL_CHOICES
)
archived_flag = models.CharField(max_length=1, default="N", choices=BOOL_CHOICES)
office_hours = models.CharField(max_length=300, blank=True, null=True)
class Meta:
managed = False
db_table = "ii_contacts"
unique_together = (("obj_tech", "obj_name", "obj_class", "id", "trans_id"),)
def __str__(self):
if self.obj_name == "ORGANIZATION":
return "{} - {}".format(self.id, self.name)
return self.name
class AccomplishmentInstrument(models.Model):
cn = models.CharField(primary_key=True, max_length=34)
grant = models.OneToOneField(
"grants.Grant", to_field="gid", on_delete=models.CASCADE, db_column="id"
)
name = models.CharField(max_length=200, blank=True, null=True)
obj_tech = models.CharField(max_length=30)
obj_name = models.CharField(max_length=30)
obj_class = models.CharField(max_length=30)
managing_contact = models.ForeignKey(
Contact, on_delete=models.CASCADE, db_column="managing_cont_cn"
)
replaced_by_ai_cn = models.CharField(max_length=34, blank=True, null=True)
description = models.CharField(max_length=4000, blank=True, null=True)
exp_expiration_date = models.DateField(blank=True, null=True)
tim_allow_updates = models.CharField(max_length=1, blank=True, null=True)
tim_contract_no = models.CharField(max_length=15, blank=True, null=True)
tim_region = models.CharField(max_length=2, blank=True, null=True)
tim_forest = models.CharField(max_length=2, blank=True, null=True)
tim_district = models.CharField(max_length=2, blank=True, null=True)
acbladd_bill_address_id = models.DecimalField(
max_digits=10, decimal_places=0, blank=True, null=True
)
trans_id = models.CharField(max_length=34, blank=True, null=True)
created_by = models.CharField(max_length=30) # TO-DO: set this
created_date = models.DateField(auto_now_add=True)
created_in_instance = models.DecimalField(
max_digits=6, decimal_places=0, default=FAKE_INSTANCE
)
modified_by = models.CharField(
max_length=30, blank=True, null=True
) # TO-DO: set this
modified_date = models.DateField(blank=True, null=True, auto_now=True)
modified_in_instance = models.DecimalField(
max_digits=6, decimal_places=0, blank=True, null=True, default=FAKE_INSTANCE
)
master_site = models.DecimalField(
max_digits=6, decimal_places=0, blank=True, null=True
)
security_id = models.CharField(max_length=30)
agency_code = models.CharField(max_length=4, blank=True, null=True)
parent_cn = models.CharField(max_length=34, blank=True, null=True)
class Meta:
managed = False
db_table = "accplishment_instruments"
unique_together = (("obj_name", "grant", "security_id", "trans_id"),)
class AccinstContLink(models.Model):
accinst = models.ForeignKey(
AccomplishmentInstrument, models.DO_NOTHING, db_column="accinst_cn"
)
contact = models.ForeignKey(Contact, models.DO_NOTHING, db_column="cont_cn")
link_type_name = models.CharField(max_length=40)
start_date = models.DateField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
remarks = models.CharField(max_length=255, blank=True, null=True)
leading_ind = models.CharField(max_length=1, blank=True, null=True)
department = models.CharField(max_length=40, blank=True, null=True)
division = models.CharField(max_length=40, blank=True, null=True)
# Choices
ffis_vendor_id_pre_fmmi = models.CharField(
"FMMI Vendor ID",
max_length=11,
blank=True,
null=True,
help_text="The cooperator/recipient's VEND number as assigned by FMMI.",
)
# TO-DO: Find out what this field is for
faads_addr_cn = models.CharField(max_length=34, blank=True, null=True)
# CHOICES
institution_code = models.CharField(
max_length=40,
blank=True,
null=True,
help_text="""
The institution code for State Controlled Institution of Higher Learning only.
Required if FFATA reported.
""",
)
fed_debt_delnqnt_ind = models.CharField(
"Delinquent on Fed. Debt",
max_length=1,
default="N",
choices=BOOL_CHOICES,
blank=True,
null=True,
help_text="""
Check this box to indicate if the applicant is delinquent on any federal debt.
""",
)
payee_ind = models.CharField(
"Payee", max_length=1, blank=True, null=True, choices=BOOL_CHOICES, default="N"
)
applicant_ind = models.CharField(max_length=1, blank=True, null=True)
link_sub_type = models.CharField(
"Sub Type",
max_length=40,
blank=True,
null=True,
help_text="""
Indicates the type of contact, for example, FS Signatory Official (SO) or Reviewer (RW),
depending on the responsibility/role the contact has with the instrument.
""",
)
cn = models.CharField(primary_key=True, max_length=34)
ffis_can_number_status = models.CharField(max_length=40, blank=True, null=True)
payer_ind = models.CharField(max_length=1, blank=True, null=True)
fmmi_customer_number = models.CharField(max_length=10, blank=True, null=True)
ffis_vendor_id = models.CharField(max_length=10, blank=True, null=True)
fmmi_customer_number_status = models.CharField(max_length=40, blank=True, null=True)
sam_expiration_date = models.DateField(blank=True, null=True)
class Meta:
managed = False
db_table = "ii_accinst_cont_links"
unique_together = (("accinst", "contact", "link_type_name", "link_sub_type"),)
| 1.945313 | 2 |
integration_tests/test_manila_dalian_over_kuiper/step_3_run.py | KaushikChavali/hypatia | 70 | 12770613 | # The MIT License (MIT)
#
# Copyright (c) 2020 ETH Zurich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import exputil
import time
try:
from .run_list import *
except (ImportError, SystemError):
from run_list import *
local_shell = exputil.LocalShell()
max_num_processes = 4
# Check that no screen is running
if local_shell.count_screens() != 0:
print("There is a screen already running. "
"Please kill all screens before running this analysis script (killall screen).")
exit(1)
# Generate the commands
commands_to_run = []
for run in get_tcp_run_list():
logs_ns3_dir = "temp/runs/" + run["name"] + "/logs_ns3"
local_shell.remove_force_recursive(logs_ns3_dir)
local_shell.make_full_dir(logs_ns3_dir)
commands_to_run.append(
"cd ../../ns3-sat-sim/simulator; "
"./waf --run=\"main_satnet "
"--run_dir='../../integration_tests/test_manila_dalian_over_kuiper/temp/runs/" + run["name"] + "'\" "
"2>&1 | "
"tee '../../integration_tests/test_manila_dalian_over_kuiper/" + logs_ns3_dir + "/console.txt'"
)
# Run the commands
print("Running commands (at most %d in parallel)..." % max_num_processes)
for i in range(len(commands_to_run)):
print("Starting command %d out of %d: %s" % (i + 1, len(commands_to_run), commands_to_run[i]))
local_shell.detached_exec(commands_to_run[i])
while local_shell.count_screens() >= max_num_processes:
time.sleep(2)
# Awaiting final completion before exiting
print("Waiting completion of the last %d..." % max_num_processes)
while local_shell.count_screens() > 0:
time.sleep(2)
print("Finished.")
| 1.75 | 2 |
theory/lib/app_globals.py | ralfonso/theory | 4 | 12770614 | <reponame>ralfonso/theory
"""The application's Globals object"""
from theory.model.mpdpool import MPDPool
from theory.model.tconfig import TConfig
from pylons import config
class Globals(object):
"""Globals acts as a container for objects available throughout the
life of the application
"""
searchterms = ['Artist','Title','Album','Genre','Any']
def __init__(self):
"""One instance of Globals is created during application
initialization and is available during requests via the 'g'
variable
"""
self.tc = TConfig()
self.p = MPDPool(self)
self.get_genres()
def get_genres(self):
""" load all tracks and create a list of every unique genre in the database"""
self.genres = set()
# this won't work before configuration
try:
m = self.p.connect()
all_tracks = m.listallinfo()
for t in all_tracks:
if not 'genre' in t:
continue
if type(t['genre']) == list:
track_genres = t['genre']
else:
track_genres = [t['genre']]
for genre in track_genres:
self.genres.add(genre)
m.disconnect()
except:
pass
| 2.875 | 3 |
data.py | AlejandroCatalina/lstm_wind_energy | 1 | 12770615 | <reponame>AlejandroCatalina/lstm_wind_energy
import torch
import torchvision
import torch.utils.data.dataset as data
import os
import gzip
class Sotavento(data.Dataset):
train_file = 'stv_h_train.pt'
validation_file = 'stv_h_val.pt'
test_file = 'stv_h_test.pt'
def __init__(self,
root,
train=True,
validation=False,
transform=None,
target_transform=None,
download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.validation = validation # validation set
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.train_file
else:
if self.validation:
data_file = self.validation_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(
os.path.join(self.processed_folder, data_file))
# set float32 type
self.data = self.data.type(torch.Tensor)
self.targets = self.targets.type(torch.Tensor)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (data, target) where target is index of the target class.
"""
data, target = self.data[index], self.targets[index]
if self.transform is not None:
data = self.transform(data)
if self.target_transform is not None:
target = self.target_transform(target)
return data, target
def __len__(self):
return len(self.data)
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def class_to_idx(self):
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return os.path.exists(os.path.join(self.processed_folder, self.train_file)) and \
os.path.exists(os.path.join(self.processed_folder, self.test_file))
@staticmethod
def extract_gzip(gzip_path, remove_finished=False):
print('Extracting {}'.format(gzip_path))
with open(gzip_path.replace('.gz', ''), 'wb') as out_f, \
gzip.GzipFile(gzip_path) as zip_f:
out_f.write(zip_f.read())
if remove_finished:
os.unlink(gzip_path)
def download(self):
print('Nothing to download!')
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp,
self.transform.__repr__().replace(
'\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp,
self.target_transform.__repr__().replace(
'\n', '\n' + ' ' * len(tmp)))
return fmt_str
| 2.796875 | 3 |
aws_toolbox/emails/email_sender.py | and-computers/aws_pytools | 0 | 12770616 | #!/usr/bin/env python3
import boto3
class EmailSender():
def __init__(self, from_address, session=None):
self._from_address = from_address
self._client = boto3.client('ses', region_name='us-east-1')
def send_email(self, to_address, subject, message):
# type checking
if isinstance(to_address, str):
# check if commas are contained
if "," in to_address:
# if commas contained split into multiple address
addresses_to_pass = to_address.split(",")
else:
# if there is no comma, its one address
addresses_to_pass = [to_address]
elif isinstance(to_address, list):
addresses_to_pass = to_address
else:
raise ValueError()
res = self._client.send_email(
Source=self._from_address,
Destination={
'ToAddresses': addresses_to_pass,
'CcAddresses': [],
'BccAddresses': []
},
Message={
'Subject': {
'Data': subject,
'Charset': 'utf-8'
},
'Body': {
'Text': {
'Data': message,
'Charset': 'utf-8'
},
# TODO: check if you can have both
'Html': {
'Data': message,
'Charset': 'utf-8'
}
}
}
)
return res
| 2.703125 | 3 |
nodes/roboclaw_node.py | utahrobotics/phidgets_ros | 0 | 12770617 | #!/usr/bin/env python
from __future__ import division
from math import pi, cos, sin
import sys
import diagnostic_msgs
import diagnostic_updater
sys.path.insert(1, '/home/usr2020/usr_ws/src/phidgets_ros/src')
from roboclaw_driver.roboclaw_driver import Roboclaw
import rospy
import tf
from geometry_msgs.msg import Quaternion, Twist
from nav_msgs.msg import Odometry
import threading
from motion_control.msg import Mobility
__author__ = "<EMAIL> (<NAME>)"
class EncoderOdom:
def __init__(self, ticks_per_meter, base_width):
self.TICKS_PER_METER = ticks_per_meter
self.BASE_WIDTH = base_width
self.odom_pub = rospy.Publisher('/roboclaw/odom', Odometry, queue_size=10)
self.cur_x = 0
self.cur_y = 0
self.cur_theta = 0.0
self.last_enc_left = 0
self.last_enc_right = 0
self.last_enc_time = rospy.Time.now()
@staticmethod
def normalize_angle(angle):
while angle > pi:
angle -= 2.0 * pi
while angle < -pi:
angle += 2.0 * pi
return angle
def update(self, enc_left, enc_right):
left_ticks = -(enc_left - self.last_enc_left)
right_ticks = enc_right - self.last_enc_right
self.last_enc_left = enc_left
self.last_enc_right = enc_right
dist_left = left_ticks / self.TICKS_PER_METER
dist_right = right_ticks / self.TICKS_PER_METER
#rospy.loginfo(self.TICKS_PER_METER)
dist = (dist_right + dist_left) / 2.0
current_time = rospy.Time.now()
d_time = (current_time - self.last_enc_time).to_sec()
self.last_enc_time = current_time
# TODO find better way to determine if going straight, this means slight deviation is accounted for
if left_ticks == right_ticks:
d_theta = 0.0
self.cur_x += dist * cos(self.cur_theta)
self.cur_y += dist * sin(self.cur_theta)
else:
d_theta = (dist_right - dist_left) / self.BASE_WIDTH
r = dist / d_theta
self.cur_x += r * (sin(d_theta + self.cur_theta) - sin(self.cur_theta))
self.cur_y -= r * (cos(d_theta + self.cur_theta) - cos(self.cur_theta))
self.cur_theta = self.normalize_angle(self.cur_theta + d_theta)
if abs(d_time) < 0.000001:
vel_x = 0.0
vel_theta = 0.0
else:
vel_x = dist / d_time
vel_theta = d_theta / d_time
#rospy.loginfo("enocder left: %d", self.last_enc_left)
#rospy.loginfo("enocder right: %d", self.last_enc_right)
#rospy.loginfo(vel_x)
#rospy.loginfo(vel_theta)
return vel_x, vel_theta
def update_publish(self, enc_left, enc_right):
vel_x, vel_theta = self.update(enc_left, enc_right)
self.publish_odom(self.cur_x, self.cur_y, self.cur_theta, vel_x, vel_theta)
def publish_odom(self, cur_x, cur_y, cur_theta, vx, vth):
quat = tf.transformations.quaternion_from_euler(0, 0, cur_theta)
current_time = rospy.Time.now()
#br = tf.TransformBroadcaster()
#br.sendTransform((cur_x, cur_y, 0),
# tf.transformations.quaternion_from_euler(0, 0, cur_theta),
# current_time,
# "base_link",
# "odom")
odom = Odometry()
odom.header.stamp = current_time
odom.header.frame_id = 'odom'
odom.pose.pose.position.x = cur_x
odom.pose.pose.position.y = cur_y
odom.pose.pose.position.z = 0.0
odom.pose.pose.orientation = Quaternion(*quat)
odom.pose.covariance[0] = 0.01
odom.pose.covariance[7] = 0.01
odom.pose.covariance[14] = 99999
odom.pose.covariance[21] = 99999
odom.pose.covariance[28] = 99999
odom.pose.covariance[35] = 0.01
odom.child_frame_id = 'base_link'
odom.twist.twist.linear.x = vx
odom.twist.twist.linear.y = 0
odom.twist.twist.angular.z = vth
odom.twist.covariance = odom.pose.covariance
self.odom_pub.publish(odom)
class Node:
""" Class for running roboclaw ros node for 2 motors in a diff drive setup"""
def __init__(self):
self.lock = threading.Lock()
self.ERRORS = {0x0000: (diagnostic_msgs.msg.DiagnosticStatus.OK, "Normal"),
0x0001: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "M1 over current"),
0x0002: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "M2 over current"),
0x0004: (diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Emergency Stop"),
0x0008: (diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Temperature1"),
0x0010: (diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Temperature2"),
0x0020: (diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Main batt voltage high"),
0x0040: (diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Logic batt voltage high"),
0x0080: (diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Logic batt voltage low"),
0x0100: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "M1 driver fault"),
0x0200: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "M2 driver fault"),
0x0400: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "Main batt voltage high"),
0x0800: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "Main batt voltage low"),
0x1000: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "Temperature1"),
0x2000: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "Temperature2"),
0x4000: (diagnostic_msgs.msg.DiagnosticStatus.OK, "M1 home"),
0x8000: (diagnostic_msgs.msg.DiagnosticStatus.OK, "M2 home")}
rospy.init_node("roboclaw_node")
#rospy.init_node("roboclaw_node", log_level=rospy.DEBUG)
rospy.on_shutdown(self.shutdown)
rospy.loginfo("Connecting to roboclaw")
self.dev_name = rospy.get_param("~dev")
self.baud_rate = int(rospy.get_param("~baud"))
self.frontaddr = int(rospy.get_param("~frontaddr"))
self.backaddr = int(rospy.get_param("~backaddr"))
self.accel = int(rospy.get_param("~accel"))
self._has_showed_message = False
self.last_motor1_command = 0.0
self.last_motor2_command = 0.0
# self.accel_limit = 635 # ramp up to full speed (127) in 1/period * 127. current setting is 0.2 seconds
self.roboclaw = Roboclaw(self.dev_name, self.baud_rate)
status = self.roboclaw.Open()
self.roboclaw.SetM1DefaultAccel(self.frontaddr, self.accel) # default is 655360
self.roboclaw.SetM2DefaultAccel(self.frontaddr, self.accel)
self.roboclaw.SetM1DefaultAccel(self.backaddr, self.accel)
self.roboclaw.SetM2DefaultAccel(self.backaddr, self.accel)
rospy.loginfo("Roboclaw Port Status: " + str(status))
self.updater = diagnostic_updater.Updater()
self.updater.setHardwareID("Roboclaw")
self.updater.add(diagnostic_updater.FunctionDiagnosticTask("Vitals", self.check_vitals))
try:
version = self.roboclaw.ReadVersion(self.frontaddr)
except Exception as e:
rospy.logwarn("Problem getting front roboclaw version")
rospy.logdebug(e)
if version is None:
rospy.logwarn("Could not get version from front roboclaw")
else:
rospy.logdebug("Version " + str(repr(version[1])))
try:
version = self.roboclaw.ReadVersion(self.backaddr)
except Exception as e:
rospy.logwarn("Problem getting rear roboclaw version")
rospy.logdebug(e)
if version is None:
rospy.logwarn("Could not get version from rear roboclaw")
else:
rospy.logdebug("Version "+ str(repr(version[1])))
self.roboclaw.SpeedM1M2(self.frontaddr, 0, 0)
self.roboclaw.ResetEncoders(self.frontaddr)
self.roboclaw.SpeedM1M2(self.backaddr, 0, 0)
self.roboclaw.ResetEncoders(self.backaddr)
self.LINEAR_MAX_SPEED = float(rospy.get_param("~linear/x/max_velocity"))
self.ANGULAR_MAX_SPEED = float(rospy.get_param("~angular/z/max_velocity"))
self.TICKS_PER_METER = float(rospy.get_param("~ticks_per_meter"))
self.BASE_WIDTH = float(rospy.get_param("~base_width"))
self.encodm = EncoderOdom(self.TICKS_PER_METER, self.BASE_WIDTH)
self.last_set_speed_time = rospy.get_rostime()
self.sub = rospy.Subscriber("motor_vel", Mobility, self.cmd_vel_callback, queue_size=5)
self.TIMEOUT = 2
rospy.sleep(1)
rospy.logdebug("dev %s", self.dev_name)
rospy.logdebug("baud %d", self.baud_rate)
rospy.logdebug("front address %d", self.frontaddr)
rospy.logdebug("back address %d", self.frontaddr)
rospy.logdebug("max_speed %f", self.LINEAR_MAX_SPEED)
rospy.logdebug("ticks_per_meter %f", self.TICKS_PER_METER)
rospy.logdebug("base_width %f", self.BASE_WIDTH)
def run(self):
"""Run the main ros loop"""
rospy.loginfo("Starting motor drive")
r_time = rospy.Rate(30)
while not rospy.is_shutdown():
with self.lock:
if (rospy.get_rostime() - self.last_set_speed_time).to_sec() > self.TIMEOUT:
try:
self.roboclaw.ForwardM1(self.frontaddr, 0)
self.roboclaw.ForwardM2(self.frontaddr, 0)
self.roboclaw.ForwardM1(self.backaddr, 0)
self.roboclaw.ForwardM2(self.backaddr, 0)
except OSError as e:
rospy.logerr("Could not stop")
rospy.logdebug(e)
if (not self._has_showed_message):
rospy.loginfo("Did not get command for 1 second, stopping")
self._has_showed_message = True
else:
self._has_showed_message = False
# TODO need find solution to the OSError11 looks like sync problem with serial status1, enc1, crc1 = None, None, None
status2, enc1, enc2, crc2 = None, None, None, None
try:
status1, enc1, crc1 = self.roboclaw.ReadEncM1(self.frontaddr)
except ValueError:
pass
except OSError as e:
rospy.logwarn("ReadEncM1 OSError: %d", e.errno)
rospy.logdebug(e)
try:
status2, enc2, crc2 = self.roboclaw.ReadEncM2(self.frontaddr)
except ValueError:
pass
except OSError as e:
rospy.logwarn("ReadEncM2 OSError: %d", e.errno)
rospy.logdebug(e)
if ((enc1 is not None) and (enc2 is not None)):
rospy.logdebug(" Encoders %d %d" % (enc1, enc2))
self.encodm.update_publish(enc1, enc2)
self.updater.update()
r_time.sleep()
def cmd_vel_callback(self, mobility):
with self.lock:
"""Command the motors based on the incoming twist message"""
now_time = rospy.get_rostime()
dt = (now_time - self.last_set_speed_time).to_sec()
self.last_set_speed_time = now_time
rospy.logdebug("Front Left: %d Front Right: %d Rear Left: %d Rear Right: %d", mobility.front_left, mobility.front_right, mobility.rear_left, mobility.rear_left)
# linear_x = -twist.linear.x
# angular_z = twist.angular.z
velocities = [mobility.front_left, mobility.front_right, mobility.rear_left, mobility.rear_right]
#clamp
velocities = [max(-self.LINEAR_MAX_SPEED, min(x, self.LINEAR_MAX_SPEED)) for x in velocities]
#scale to motor pwm
velocities = [int((x/self.LINEAR_MAX_SPEED)*127) for x in velocities]
# Enforce acceleration limits
# TODO: test this block. it has not been tested. if it doesn't work, just comment it out for now
# TODO: use dt, the time since last command to enfore better acceleration limits
#if (motor1_command - self.last_motor1_command)/dt > self.accel_limit:
# motor1_command = self.last_motor1_command + (self.accel_limit)*dt
# rospy.logdebug("Motor command exceeded acceleration limits, was clipped to %d", motor1_command)
#elif (motor1_command - self.last_motor1_command)/dt < -self.accel_limit:
# motor1_command = self.last_motor1_command - (self.accel_limit)*dt
# rospy.logdebug("Motor command exceeded acceleration limits, was clipped to %d", motor1_command)
#if (motor2_command - self.last_motor2_command)/dt > self.accel_limit:
# motor2_command = self.last_motor2_command + (self.accel_limit)*dt
# rospy.logdebug("Motor command exceeded acceleration limits, was clipped to %d", motor2_command)
#elif (motor2_command - self.last_motor2_command)/dt < -self.accel_limit:
# motor2_command = self.last_motor2_command - (self.accel_limit)*dt
# rospy.logdebug("Motor command exceeded acceleration limits, was clipped to %d", motor2_command)
# Clip commands to within bounds (-127,127)
#motor1_command = int(max(-127, min(127, motor1_command)))
#motor2_command = int(max(-127, min(127, motor2_command)))
rospy.loginfo("%d, %d, %d, %d",int(velocities[0]),int(velocities[1]),int(velocities[2]),int(velocities[3]))
try:
if(velocities[0]>=0):
rospy.loginfo("setting front left wheel to " + str(velocities[0]))
self.roboclaw.ForwardM1(self.frontaddr, velocities[0])
else:
self.roboclaw.BackwardM1(self.frontaddr, (-velocities[0]))
if(velocities[1]>=0):
rospy.loginfo("setting front right wheel to " + str(velocities[1]))
self.roboclaw.ForwardM2(self.frontaddr, velocities[1])
else:
self.roboclaw.BackwardM2(self.frontaddr, -velocities[1])
if(velocities[2]>=0):
rospy.loginfo("setting back left wheel to " + str(velocities[2]))
self.roboclaw.ForwardM1(self.backaddr, velocities[2])
else:
self.roboclaw.BackwardM1(self.backaddr, -velocities[2])
if(velocities[3]>=0):
rospy.loginfo("setting back right wheel to " + str(velocities[3]))
self.roboclaw.ForwardM2(self.backaddr, velocities[3])
else:
self.roboclaw.BackwardM2(self.backaddr, -velocities[3])
except OSError as e:
rospy.logwarn("Roboclaw OSError: %d", e.errno)
rospy.logdebug(e)
#self.last_motor1_command = motor1_command
#self.last_motor2_command = motor2_command
def check_vitals(self, stat):
"""Check battery voltage and temperatures from roboclaw"""
try:
statusfront = self.roboclaw.ReadError(self.frontaddr)[1]
statusrear = self.roboclaw.ReadError(self.backaddr)[1]
except OSError as e:
rospy.logwarn("Diagnostics OSError: %d", e.errno)
rospy.logdebug(e)
return
statefront, messagefront = self.ERRORS[statusfront]
staterear, messagerear = self.ERRORS[statusfront]
stat.summary(statefront, messagefront)
stat.summary(staterear, messagerear)
try:
stat.add("Front Main Batt V:", float(self.roboclaw.ReadMainBatteryVoltage(self.frontaddr)[1] / 10))
stat.add("Front Logic Batt V:", float(self.roboclaw.ReadLogicBatteryVoltage(self.frontaddr)[1] / 10))
stat.add("Front Temp1 C:", float(self.roboclaw.ReadTemp(self.frontaddr)[1] / 10))
stat.add("Front Temp2 C:", float(self.roboclaw.ReadTemp2(self.frontaddr)[1] / 10))
front_currents = self.roboclaw.ReadCurrents(self.frontaddr)
stat.add("Front Left Current:", float(front_currents[1] / 100))
stat.add("Front Right Current:", float(front_currents[2] / 100))
back_currents = self.roboclaw.ReadCurrents(self.backaddr)
stat.add("Back Left Current:", float(back_currents[1] / 100))
stat.add("Back Right Current:", float(back_currents[2] / 100))
stat.add("Back Main Batt V:", float(self.roboclaw.ReadMainBatteryVoltage(self.backaddr)[1] / 10))
stat.add("Back Logic Batt V:", float(self.roboclaw.ReadLogicBatteryVoltage(self.backaddr)[1] / 10))
stat.add("Back Temp1 C:", float(self.roboclaw.ReadTemp(self.backaddr)[1] / 10))
stat.add("Back Temp2 C:", float(self.roboclaw.ReadTemp2(self.backaddr)[1] / 10))
except OSError as e:
rospy.logwarn("Diagnostics OSError: %d", e.errno)
rospy.logdebug(e)
return stat
def shutdown(self):
"""Handle shutting down the node"""
rospy.loginfo("Shutting down")
if hasattr(self, "sub"):
self.sub.unregister() # so it doesn't get called after we're dead
try:
self.roboclaw.ForwardM1(self.frontaddr, 0)
self.roboclaw.ForwardM2(self.frontaddr, 0)
self.roboclaw.ForwardM1(self.backaddr, 0)
self.roboclaw.ForwardM2(self.backaddr, 0)
rospy.loginfo("Closed Roboclaw serial connection")
except OSError:
rospy.logerr("Shutdown did not work trying again")
try:
self.roboclaw.ForwardM1(self.frontaddr, 0)
self.roboclaw.ForwardM2(self.frontaddr, 0)
self.roboclaw.ForwardM1(self.backaddr, 0)
self.roboclaw.ForwardM2(self.backaddr, 0)
except OSError as e:
rospy.logerr("Could not shutdown motors!!!!")
rospy.logdebug(e)
#quit()
if __name__ == "__main__":
try:
node = Node()
node.run()
except rospy.ROSInterruptException:
pass
rospy.loginfo("Exiting")
| 2.03125 | 2 |
lib/cmd/cmd.py | sachio222/socketchat_v3 | 1 | 12770618 | import socket
import subprocess
from chatutils import utils
from chatutils.chatio2 import ChatIO
configs = utils.JSONLoader()
HEADER_LEN = configs.dict["system"]["headerLen"]
def commands(client_socket):
# while True:
# breakpoint()
ChatIO().pack_n_send(client_socket, "C", b"<<cmd:#>>")
# client_socket.send(b"<<cmd:#>> ")
cmd_buffer = ChatIO.unpack_data(client_socket)
response = run_cmd(cmd_buffer)
client_socket.send(response)
def run_cmd(command) -> bytes:
# Trim the \n char.
command = command.rstrip().decode()
try:
output = subprocess.check_output(command,
stderr=subprocess.STDOUT,
shell=True)
except:
output = f"Command not found: {command} \r\n"
output = output.encode()
return output
| 2.578125 | 3 |
S4/S4 Library/simulation/venues/park_zone_director.py | NeonOcean/Environment | 1 | 12770619 | <gh_stars>1-10
from situations.complex.yoga_class import YogaClassScheduleMixin
from venues.relaxation_center_zone_director import VisitorSituationOnArrivalZoneDirectorMixin
from venues.scheduling_zone_director import SchedulingZoneDirector
class ParkZoneDirector(YogaClassScheduleMixin, VisitorSituationOnArrivalZoneDirectorMixin, SchedulingZoneDirector):
pass
| 1.21875 | 1 |
day-12/part-2/youyoun.py | evqna/adventofcode-2020 | 12 | 12770620 | <reponame>evqna/adventofcode-2020<gh_stars>10-100
from tool.runners.python import SubmissionPy
dir_plane_map = {
(0, 0): 0,
(0, 1): 1,
(1, 1): 2,
(1, 0): 3,
}
planes = [(-1, -1), (-1, 1), (1, 1), (1, -1)]
class YouyounSubmission(SubmissionPy):
def __init__(self):
super().__init__()
self.ship_x = 0
self.ship_y = 0
self.way_x = 10
self.way_y = 1
def reinit(self):
self.ship_x = 0
self.ship_y = 0
self.way_x = 10
self.way_y = 1
def run(self, s):
"""
:param s: input in string format
:return: solution flag
"""
s = s.splitlines()
self.reinit()
for e in s:
if e[0] == 'L' or e[0] == 'R':
self.turn(e[0], int(e[1:]))
else:
self.process(e[0], int(e[1:]))
return abs(self.ship_x) + abs(self.ship_y)
def process(self, dir_, steps):
if dir_ == 'N':
self.way_y += steps
elif dir_ == 'S':
self.way_y -= steps
elif dir_ == 'E':
self.way_x += steps
elif dir_ == 'W':
self.way_x -= steps
elif dir_ == 'F':
self.ship_y = self.ship_y + steps * self.way_y
self.ship_x = self.ship_x + steps * self.way_x
def turn(self, degrees, steps):
dir = dir_plane_map[((self.way_x > 0), (self.way_y > 0))]
turn = 1 if degrees == 'R' else -1
for _ in range(steps // 90):
dir = (dir + turn) % 4
self.way_y, self.way_x = planes[dir][1] * abs(self.way_x), planes[dir][0] * abs(self.way_y)
if __name__ == '__main__':
print(YouyounSubmission().run(open('../input/youyoun.txt').read()))
| 2.71875 | 3 |
tests/test_example.py | lig/pyventory | 28 | 12770621 | import json
import os
import shlex
import subprocess
import pytest
@pytest.fixture(scope='session')
def example_dir(tests_dir):
return tests_dir / 'example'
@pytest.fixture(scope='session')
def anisble_inventory(example_dir):
return open(example_dir / 'ansible.json', 'r')
@pytest.fixture(scope='session')
def terraform_config(example_dir):
return open(example_dir / 'terraform.tf.json', 'r')
def test_ansible_inventory(tests_dir, example_dir, anisble_inventory):
project_dir = tests_dir.parent
inventory_exe = example_dir / 'ansible_hosts.py'
result = subprocess.run(
shlex.split(str(inventory_exe)),
stdout=subprocess.PIPE,
check=True,
env=dict(os.environ, PYTHONPATH='{}:{}'.format(project_dir, example_dir)),
).stdout
assert json.loads(result.decode()) == json.load(anisble_inventory)
def test_terraform_vars(tests_dir, example_dir, terraform_config):
project_dir = tests_dir.parent
inventory_exe = example_dir / 'terraform_vars.py'
subprocess.run(
shlex.split(str(inventory_exe)),
check=True,
env=dict(os.environ, PYTHONPATH='{}:{}'.format(project_dir, example_dir)),
)
result_path = example_dir / 'terraform_result.tf.json'
result = open(result_path, 'r')
assert json.load(result) == json.load(terraform_config)
result_path.unlink()
| 1.90625 | 2 |
ms2ldaviz/add_parentmass.py | RP0001/ms2ldaviz | 6 | 12770622 | import os
import pickle
import numpy as np
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ms2ldaviz.settings")
import django
django.setup()
import jsonpickle
from basicviz.models import Experiment,Document
if __name__ == '__main__':
experiment_name = sys.argv[1]
experiment = Experiment.objects.get(name = experiment_name)
documents = Document.objects.filter(experiment = experiment)
for document in documents:
md = jsonpickle.decode(document.metadata)
if 'm/z' in md:
md['parentmass'] = float(md['m/z'])
document.metadata = jsonpickle.encode(md)
document.save() | 2.40625 | 2 |
torchnlp/utils.py | JiaqiLiu/PyTorch-NLP | 0 | 12770623 | import logging
import inspect
import collections
import random
import torch
logger = logging.getLogger(__name__)
def get_tensors(object_):
""" Get all tensors associated with ``object_``
Args:
object_ (any): Any object to look for tensors.
Returns:
(list of torch.tensor): List of tensors that are associated with ``object_``.
"""
if torch.is_tensor(object_):
return [object_]
elif isinstance(object_, (str, float, int)):
return []
tensors = set()
if isinstance(object_, collections.abc.Mapping):
for value in object_.values():
tensors.update(get_tensors(value))
elif isinstance(object_, collections.abc.Iterable):
for value in object_:
tensors.update(get_tensors(value))
else:
members = [
value for key, value in inspect.getmembers(object_)
if not isinstance(value, (collections.abc.Callable, type(None)))
]
tensors.update(get_tensors(members))
return tensors
def sampler_to_iterator(dataset, sampler):
""" Given a batch sampler or sampler returns examples instead of indices
Args:
dataset (torch.utils.data.Dataset): Dataset to sample from.
sampler (torch.utils.data.sampler.Sampler): Sampler over the dataset.
Returns:
generator over dataset examples
"""
for sample in sampler:
if isinstance(sample, (list, tuple)):
# yield a batch
yield [dataset[i] for i in sample]
else:
# yield a single example
yield dataset[sample]
def datasets_iterator(*datasets):
"""
Args:
*datasets (:class:`list` of :class:`torch.utils.data.Dataset`)
Returns:
generator over rows in ``*datasets``
"""
for dataset in datasets:
for row in dataset:
yield row
def flatten_parameters(model):
""" ``flatten_parameters`` of a RNN model loaded from disk. """
model.apply(lambda m: m.flatten_parameters() if hasattr(m, 'flatten_parameters') else None)
def shuffle(list_, random_seed=123):
""" Shuffle list deterministically based on ``random_seed``.
**Reference:**
https://stackoverflow.com/questions/19306976/python-shuffling-with-a-parameter-to-get-the-same-result
Example:
>>> a = [1, 2, 3, 4, 5]
>>> b = [1, 2, 3, 4, 5]
>>> shuffle(a, random_seed=456)
>>> shuffle(b, random_seed=456)
>>> a == b
True
>>> a, b
([1, 3, 2, 5, 4], [1, 3, 2, 5, 4])
Args:
list_ (list): List to be shuffled.
random_seed (int): Random seed used to shuffle.
Returns:
None:
"""
random.Random(random_seed).shuffle(list_)
def resplit_datasets(dataset, other_dataset, random_seed=None, split=None):
"""Deterministic shuffle and split algorithm.
Given the same two datasets and the same ``random_seed``, the split happens the same exact way
every call.
Args:
dataset (lib.datasets.Dataset): First dataset.
other_dataset (lib.datasets.Dataset): Another dataset.
random_seed (int, optional): Seed to control the shuffle of both datasets.
split (float, optional): If defined it is the percentage of rows that first dataset gets
after split otherwise the original proportions are kept.
Returns:
:class:`lib.datasets.Dataset`, :class:`lib.datasets.Dataset`: Resplit datasets.
"""
# Prevent circular dependency
from torchnlp.datasets import Dataset
concat = dataset.rows + other_dataset.rows
shuffle(concat, random_seed=random_seed)
if split is None:
return Dataset(concat[:len(dataset)]), Dataset(concat[len(dataset):])
else:
split = max(min(round(len(concat) * split), len(concat)), 0)
return Dataset(concat[:split]), Dataset(concat[split:])
def torch_equals_ignore_index(tensor, tensor_other, ignore_index=None):
"""
Compute ``torch.equal`` with the optional mask parameter.
Args:
ignore_index (int, optional): Specifies a ``tensor`` index that is ignored.
Returns:
(bool) Returns ``True`` if target and prediction are equal.
"""
if ignore_index is not None:
assert tensor.size() == tensor_other.size()
mask_arr = tensor.ne(ignore_index)
tensor = tensor.masked_select(mask_arr)
tensor_other = tensor_other.masked_select(mask_arr)
return torch.equal(tensor, tensor_other)
def is_namedtuple(object_):
return hasattr(object_, '_asdict') and isinstance(object_, tuple)
def lengths_to_mask(*lengths, **kwargs):
""" Given a list of lengths, create a batch mask.
Example:
>>> lengths_to_mask([1, 2, 3])
tensor([[1, 0, 0],
[1, 1, 0],
[1, 1, 1]], dtype=torch.uint8)
>>> lengths_to_mask([1, 2, 2], [1, 2, 2])
tensor([[[1, 0],
[0, 0]],
<BLANKLINE>
[[1, 1],
[1, 1]],
<BLANKLINE>
[[1, 1],
[1, 1]]], dtype=torch.uint8)
Args:
*lengths (list of int or torch.Tensor)
**kwargs: Keyword arguments passed to ``torch.zeros`` upon initially creating the returned
tensor.
Returns:
torch.ByteTensor
"""
# Squeeze to deal with random additional dimensions
lengths = [l.squeeze().tolist() if torch.is_tensor(l) else l for l in lengths]
# For cases where length is a scalar, this needs to convert it to a list.
lengths = [l if isinstance(l, list) else [l] for l in lengths]
assert all(len(l) == len(lengths[0]) for l in lengths)
batch_size = len(lengths[0])
other_dimensions = tuple([int(max(l)) for l in lengths])
mask = torch.zeros(batch_size, *other_dimensions, **kwargs)
for i, length in enumerate(zip(*tuple(lengths))):
mask[i][[slice(int(l)) for l in length]].fill_(1)
return mask.byte()
def collate_tensors(batch, stack_tensors=torch.stack):
""" Collate a list of type ``k`` (dict, namedtuple, list, etc.) with tensors.
Inspired by:
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31
Args:
batch (list of k): List of rows of type ``k``.
stack_tensors (callable): Function to stack tensors into a batch.
Returns:
k: Collated batch of type ``k``.
Example use case:
This is useful with ``torch.utils.data.dataloader.DataLoader`` which requires a collate
function. Typically, when collating sequences you'd set
``collate_fn=partial(collate_tensors, stack_tensors=encoders.text.stack_and_pad_tensors)``.
Example:
>>> import torch
>>> batch = [
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... ]
>>> collated = collate_tensors(batch)
>>> {k: t.size() for (k, t) in collated.items()}
{'column_a': torch.Size([2, 5]), 'column_b': torch.Size([2, 5])}
"""
if all([torch.is_tensor(b) for b in batch]):
return stack_tensors(batch)
if (all([isinstance(b, dict) for b in batch]) and
all([b.keys() == batch[0].keys() for b in batch])):
return {key: collate_tensors([d[key] for d in batch], stack_tensors) for key in batch[0]}
elif all([is_namedtuple(b) for b in batch]): # Handle ``namedtuple``
return batch[0].__class__(**collate_tensors([b._asdict() for b in batch], stack_tensors))
elif all([isinstance(b, list) for b in batch]):
# Handle list of lists such each list has some column to be batched, similar to:
# [['a', 'b'], ['a', 'b']] → [['a', 'a'], ['b', 'b']]
transposed = zip(*batch)
return [collate_tensors(samples, stack_tensors) for samples in transposed]
else:
return batch
def tensors_to(tensors, *args, **kwargs):
""" Apply ``torch.Tensor.to`` to tensors in a generic data structure.
Inspired by:
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31
Args:
tensors (tensor, dict, list, namedtuple or tuple): Data structure with tensor values to
move.
*args: Arguments passed to ``torch.Tensor.to``.
**kwargs: Keyword arguments passed to ``torch.Tensor.to``.
Example use case:
This is useful as a complementary function to ``collate_tensors``. Following collating,
it's important to move your tensors to the appropriate device.
Returns:
The inputted ``tensors`` with ``torch.Tensor.to`` applied.
Example:
>>> import torch
>>> batch = [
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... ]
>>> tensors_to(batch, torch.device('cpu')) # doctest: +ELLIPSIS
[{'column_a': tensor(...}]
"""
if torch.is_tensor(tensors):
return tensors.to(*args, **kwargs)
elif isinstance(tensors, dict):
return {k: tensors_to(v, *args, **kwargs) for k, v in tensors.items()}
elif hasattr(tensors, '_asdict') and isinstance(tensors, tuple): # Handle ``namedtuple``
return tensors.__class__(**tensors_to(tensors._asdict(), *args, **kwargs))
elif isinstance(tensors, list):
return [tensors_to(t, *args, **kwargs) for t in tensors]
elif isinstance(tensors, tuple):
return tuple([tensors_to(t, *args, **kwargs) for t in tensors])
else:
return tensors
| 2.59375 | 3 |
tofu/data/_inversions_algos.py | WinstonLHS/tofu | 6 | 12770624 |
import warnings
import numpy as np
import scipy.linalg as scplin
import scipy.optimize as scpop
import scipy.sparse as scpsp
dfail = {}
try:
import sksparse as sksp
except Exception as err:
sksp = False
dfail['sksparse'] = "For cholesk factorizations"
try:
import scikits.umfpack as skumf
except Exception as err:
skumf = False
dfail['umfpack'] = "For faster sparse matrices"
if len(dfail) > 0:
lstr = [f"\t- {k0}: {v0}" for k0, v0 in dfail.items()]
msg = (
"Consider installing the following for faster inversions:\n"
+ "\n".join(lstr)
)
warnings.warn(msg)
# #############################################################################
# #############################################################################
# Basic routines - augmented tikhonov
# #############################################################################
def inv_linear_augTikho_dense(
Tn=None,
TTn=None,
Tyn=None,
R=None,
yn=None,
sol0=None,
nchan=None,
nbs=None,
mu0=None,
conv_crit=None,
a0bis=None,
b0=None,
a1bis=None,
b1=None,
d=None,
conv_reg=True,
verb=None,
verb2head=None,
**kwdargs,
):
"""
Linear algorithm for Phillips-Tikhonov regularisation
Called "Augmented Tikhonov", dense matrix version
"""
conv = 0. # convergence variable
niter = 0 # number of iterations
mu1 = 0. # regularisation param
# verb
if verb >= 2:
chi2n = np.sum((Tn.dot(sol0) - yn)**2) / nchan
reg = sol0.dot(R.dot(sol0))
temp = f"{nchan} * {chi2n:.3e} + {mu0:.3e} * {reg:.3e}"
print(
f"{verb2head}\n\t\t\t {temp} = {nchan*chi2n + mu0*reg:.3e}",
end='\n',
)
# loop
# Continue until convergence criterion, and at least 2 iterations
while niter < 2 or conv > conv_crit:
# call solver
sol = scplin.solve(
TTn + mu0*R, Tyn,
assume_a='pos', # faster than 'sym'
overwrite_a=True, # no significant gain
overwrite_b=False, # True is faster, but a copy of Tyn is needed
check_finite=False, # small speed gain compared to True
transposed=False,
) # 3
# compute residu, regularity...
res2 = np.sum((Tn.dot(sol)-yn)**2) # residu**2
reg = sol.dot(R.dot(sol)) # regularity term
# update lamb, tau
lamb = a0bis/(0.5*reg + b0) # Update reg. param. estimate
tau = a1bis/(0.5*res2 + b1) # Update noise coef. estimate
mu1 = (lamb/tau) * (2*a1bis/res2)**d # rescale mu with noise estimate
# Compute convergence variable
if conv_reg:
conv = np.abs(mu1 - mu0) / mu1
else:
sol2 = sol**2
sol2max = np.max(sol2)
sol2[sol2 < 0.001*sol2max] = 0.001*sol2max
conv = np.sqrt(np.sum((sol - sol0)**2 / sol2) / nbs)
# verb
if verb >= 2:
temp1 = f"{nchan} * {res2/nchan:.3e} + {mu1:.3e} * {reg:.3e}"
temp2 = f"{res2 + mu1*reg:.3e}"
temp = f"{temp1} = {temp2}"
print(f"\t\t{niter} \t {temp} {tau:.3e} {conv:.3e}")
# update sol0, mu0 for next iteration
sol0[:] = sol[:]
mu0 = mu1
niter += 1
return sol, mu1, res2/nchan, reg, niter, [tau, lamb]
def inv_linear_augTikho_sparse(
Tn=None,
TTn=None,
Tyn=None,
R=None,
yn=None,
sol0=None,
nchan=None,
nbs=None,
mu0=None,
conv_crit=None,
a0bis=None,
b0=None,
a1bis=None,
b1=None,
d=None,
conv_reg=True,
verb=None,
verb2head=None,
maxiter=None,
tol=None,
precond=None, # test
**kwdargs,
):
"""
Linear algorithm for Phillips-Tikhonov regularisation
Called "Augmented Tikhonov", sparese matrix version
see InvLin_AugTikho_V1.__doc__ for details
"""
conv = 0. # convergence variable
niter = 0 # number of iterations
mu1 = 0. # regularisation param
# verb
if verb >= 2:
chi2n = np.sum((Tn.dot(sol0) - yn)**2) / nchan
reg = sol0.dot(R.dot(sol0))
temp = f"{nchan} * {chi2n:.3e} + {mu0:.3e} * {reg:.3e}"
print(
f"{verb2head}\n\t\t\t {temp} = {nchan*chi2n + mu0*reg:.3e}",
end='\n',
)
# loop
# Continue until convergence criterion, and at least 2 iterations
while niter < 2 or conv > conv_crit:
# sol = scpsp.linalg.spsolve(
# TTn + mu0*R, Tyn,
# permc_spec=None,
# use_umfpack=True,
# )
# seems faster
sol, itconv = scpsp.linalg.cg(
TTn + mu0*R, Tyn,
x0=sol0,
tol=tol,
maxiter=maxiter,
M=precond,
)
res2 = np.sum((Tn.dot(sol)-yn)**2) # residu**2
reg = sol.dot(R.dot(sol)) # regularity term
lamb = a0bis/(0.5*reg + b0) # Update reg. param. estimate
tau = a1bis/(0.5*res2 + b1) # Update noise coef. estimate
mu1 = (lamb/tau) * (2*a1bis/res2)**d # rescale mu with noise estimate
# Compute convergence variable
if conv_reg:
conv = np.abs(mu1 - mu0) / mu1
else:
sol2 = sol**2
sol2max = np.max(sol2)
sol2[sol2 < 0.001*sol2max] = 0.001*sol2max
conv = np.sqrt(np.sum((sol - sol0)**2 / sol2) / nbs)
# verb
if verb >= 2:
temp1 = f"{nchan} * {res2/nchan:.3e} + {mu1:.3e} * {reg:.3e}"
temp2 = f"{res2 + mu1*reg:.3e}"
temp = f"{temp1} = {temp2}"
print(
f"\t\t{niter} \t {temp} {tau:.3e} {conv:.3e}"
)
sol0[:] = sol[:] # Update reference solution
niter += 1 # Update number of iterations
mu0 = mu1
return sol, mu1, res2/nchan, reg, niter, [tau, lamb]
def inv_linear_augTikho_chol_dense(
Tn=None,
TTn=None,
Tyn=None,
R=None,
yn=None,
sol0=None,
nchan=None,
nbs=None,
mu0=None,
conv_crit=None,
a0bis=None,
b0=None,
a1bis=None,
b1=None,
d=None,
conv_reg=True,
verb=None,
verb2head=None,
**kwdargs,
):
"""
"""
conv = 0. # convergence variable
niter = 0 # number of iterations
mu1 = 0. # regularisation param
# verb
if verb >= 2:
chi2n = np.sum((Tn.dot(sol0) - yn)**2) / nchan
reg = sol0.dot(R.dot(sol0))
temp = f"{nchan} * {chi2n:.3e} + {mu0:.3e} * {reg:.3e}"
print(
f"{verb2head}\n\t\t\t {temp} = {nchan*chi2n + mu0*reg:.3e}",
end='\n',
)
# loop
# Continue until convergence criterion, and at least 2 iterations
while niter < 2 or conv > conv_crit:
try:
# choleski decomposition requires det(TT + mu0*LL) != 0
# (chol(A).T * chol(A) = A
chol = scplin.cholesky(
TTn + mu0*R,
lower=False,
check_finite=False,
overwrite_a=False,
)
# Use np.linalg.lstsq for double-solving the equation
sol = scplin.cho_solve(
(chol, False), Tyn,
overwrite_b=None,
check_finite=True,
)
except Exception as err:
# call solver
sol = scplin.solve(
TTn + mu0*R, Tyn,
assume_a='sym', # chol failed => not 'pos'
overwrite_a=True, # no significant gain
overwrite_b=False, # True faster, but a copy of Tyn needed
check_finite=False, # small speed gain compared to True
transposed=False,
) # 3
# compute residu, regularity...
res2 = np.sum((Tn.dot(sol)-yn)**2) # residu**2
reg = sol.dot(R.dot(sol)) # regularity term
# update lamb, tau
lamb = a0bis/(0.5*reg + b0) # Update reg. param. estimate
tau = a1bis/(0.5*res2 + b1) # Update noise coef. estimate
mu1 = (lamb/tau) * (2*a1bis/res2)**d # mu rescale with noise estimate
# Compute convergence variable
if conv_reg:
conv = np.abs(mu1 - mu0) / mu1
else:
sol2 = sol**2
sol2max = np.max(sol2)
sol2[sol2 < 0.001*sol2max] = 0.001*sol2max
conv = np.sqrt(np.sum((sol - sol0)**2 / sol2) / nbs)
# verb
if verb >= 2:
temp1 = f"{nchan} * {res2/nchan:.3e} + {mu1:.3e} * {reg:.3e}"
temp2 = f"{res2 + mu1*reg:.3e}"
temp = f"{temp1} = {temp2}"
print(f"\t\t{niter} \t {temp} {tau:.3e} {conv:.3e}")
# update sol0, mu0 for next iteration
sol0[:] = sol[:]
mu0 = mu1
niter += 1
return sol, mu1, res2/nchan, reg, niter, [tau, lamb]
def inv_linear_augTikho_chol_sparse(
Tn=None,
TTn=None,
Tyn=None,
R=None,
yn=None,
sol0=None,
nchan=None,
nbs=None,
mu0=None,
conv_crit=None,
a0bis=None,
b0=None,
a1bis=None,
b1=None,
d=None,
conv_reg=True,
verb=None,
verb2head=None,
**kwdargs,
):
"""
Linear algorithm for Phillips-Tikhonov regularisation
Called "Augmented Tikhonov"
Augmented in the sense that bayesian statistics are combined
with standard Tikhonov regularisation
Determines both noise (common multiplicative coefficient) and
regularisation parameter automatically
We assume here that all arrays are scaled (noise, conditioning...)
Sparse matrixes are also prefered to speed-up the computation
In this method:
tau is an approximation of the inverse of the noise coefficient
lamb is an approximation of the regularisation parameter
N.B.: The noise and reg. param. have probability densities of the form:
f(x) = x^(a-1) * exp(-bx)
This function's maximum is in x = (a-1)/b, so a = b+1 gives a maximum at 1.
(a0, b0) for the reg. param.
(a1, b1) for the noise estimate
Ref:
[1] <NAME>., <NAME>., Inverse Problems, vol.25, nb.2, 025001, 2009
[2] http://www.math.uni-bremen.de/zetem/cms/media.php/250/nov14talk_jin%20bangti.pdf
[3] <NAME>, <NAME>, <NAME>,
"A New Choice Rule for Regularization Parameters in Tikhonov
Regularization", Research report, University of Hong Kong, 2008
"""
conv = 0. # convergence variable
niter = 0 # number of iterations
mu1 = 0. # regularisation param
# verb
if verb >= 2:
chi2n = np.sum((Tn.dot(sol0) - yn)**2) / nchan
reg = sol0.dot(R.dot(sol0))
temp = f"{nchan} * {chi2n:.3e} + {mu0:.3e} * {reg:.3e}"
print(
f"{verb2head}\n\t\t\t {temp} = {nchan*chi2n + mu0*reg:.3e}",
end='\n',
)
# loop
# Continue until convergence criterion, and at least 2 iterations
factor = None
while niter < 2 or conv > conv_crit:
try:
# choleski decomposition requires det(TT + mu0*LL) != 0
# A = (chol(A).T * chol(A)
# optimal if matrix is csc
if sksp is False:
factor = scpsp.linalg.factorized(TTn + mu0*R)
sol = factor(Tyn)
else:
if factor is None:
factor = sksp.cholmod.cholesky(
TTn + mu0*R,
beta=0,
mode='auto',
ordering_method='default',
use_long=False,
)
else:
# re-use same factor
factor.cholesky_inplace(TTn + mu0*R, beta=0)
sol = factor.solve_A(Tyn)
except Exception as err:
# call solver
sol = scpsp.linalg.spsolve(
TTn + mu0*R, Tyn,
permc_spec=None,
use_umfpack=True,
)
# compute residu, regularity...
res2 = np.sum((Tn.dot(sol)-yn)**2) # residu**2
reg = sol.dot(R.dot(sol)) # regularity term
# update lamb, tau
lamb = a0bis/(0.5*reg + b0) # Update reg. param. estimate
tau = a1bis/(0.5*res2 + b1) # Update noise coef. estimate
mu1 = (lamb/tau) * (2*a1bis/res2)**d # Update reg. param. rescaling
# Compute convergence variable
if conv_reg:
conv = np.abs(mu1 - mu0) / mu1
else:
sol2 = sol**2
sol2max = np.max(sol2)
sol2[sol2 < 0.001*sol2max] = 0.001*sol2max
conv = np.sqrt(np.sum((sol - sol0)**2 / sol2) / nbs)
# verb
if verb >= 2:
temp1 = f"{nchan} * {res2/nchan:.3e} + {mu1:.3e} * {reg:.3e}"
temp2 = f"{res2 + mu1*reg:.3e}"
temp = f"{temp1} = {temp2}"
print(f"\t\t{niter} \t {temp} {tau:.3e} {conv:.3e}")
# update sol0, mu0 for next iteration
sol0[:] = sol[:]
mu0 = mu1
niter += 1
return sol, mu1, res2/nchan, reg, niter, [tau, lamb]
def inv_linear_augTikho_pos_dense(
Tn=None,
TTn=None,
Tyn=None,
R=None,
yn=None,
sol0=None,
nchan=None,
nbs=None,
mu0=None,
conv_crit=None,
a0bis=None,
b0=None,
a1bis=None,
b1=None,
d=None,
conv_reg=True,
verb=None,
verb2head=None,
# specific
method=None,
options=None,
bounds=None,
func_val=None,
func_jac=None,
func_hess=None,
**kwdargs,
):
"""
Quadratic algorithm for Phillips-Tikhonov regularisation
Alternative to the linear version with positivity constraint
see TFI.InvLin_AugTikho_V1.__doc__ for details
"""
conv = 0. # convergence variable
niter = 0 # number of iterations
mu1 = 0. # regularisation param
# verb
if verb >= 2:
chi2n = np.sum((Tn.dot(sol0) - yn)**2) / nchan
reg = sol0.dot(R.dot(sol0))
temp = f"{nchan} * {chi2n:.3e} + {mu0:.3e} * {reg:.3e}"
print(
f"{verb2head}\n\t\t\t {temp} = {nchan*chi2n + mu0*reg:.3e}",
end='\n',
)
while niter < 2 or conv > conv_crit:
# quadratic method for positivity constraint
sol = scpop.minimize(
func_val, sol0,
args=(mu0, Tn, yn, TTn, Tyn),
jac=func_jac,
hess=func_hess,
method=method,
bounds=bounds,
options=options,
).x
# compute residu, regularity...
res2 = np.sum((Tn.dot(sol)-yn)**2) # residu**2
reg = sol.dot(R.dot(sol)) # regularity term
# update lamb, tau
lamb = a0bis/(0.5*reg + b0) # Update reg. param. estimate
tau = a1bis/(0.5*res2 + b1) # Update noise coef. estimate
mu1 = (lamb/tau) * (2*a1bis/res2)**d # Update reg. param. rescaling
# Compute convergence variable
if conv_reg:
conv = np.abs(mu1 - mu0) / mu1
else:
sol2 = sol**2
sol2max = np.max(sol2)
sol2[sol2 < 0.001*sol2max] = 0.001*sol2max
conv = np.sqrt(np.sum((sol - sol0)**2 / sol2) / nbs)
# verb
if verb >= 2:
temp1 = f"{nchan} * {res2/nchan:.3e} + {mu1:.3e} * {reg:.3e}"
temp2 = f"{res2 + mu1*reg:.3e}"
temp = f"{temp1} = {temp2}"
print(f"\t\t{niter} \t {temp} {tau:.3e} {conv:.3e}")
# update sol0, mu0 for next iteration
sol0[:] = sol[:]
mu0 = mu1
niter += 1
return sol, mu1, res2/nchan, reg, niter, [tau, lamb]
# #############################################################################
# #############################################################################
# Basic routines - discrepancy principle
# #############################################################################
def inv_linear_DisPrinc_sparse(
Tn=None,
TTn=None,
Tyn=None,
R=None,
yn=None,
sol0=None,
nchan=None,
mu0=None,
precond=None,
verb=None,
verb2head=None,
# specific
chi2n_tol=None,
chi2n_obj=None,
maxiter=None,
tol=None,
**kwdargs,
):
"""
Discrepancy principle: find mu such that chi2n = 1 +/- tol
"""
niter = 0
lchi2n = np.array([np.sum((Tn.dot(sol0) - yn)**2) / nchan])
lmu = np.array([mu0])
chi2n_obj_log = np.log(chi2n_obj)
# verb
if verb >= 2:
reg = sol0.dot(R.dot(sol0))
temp = f"{nchan} * {lchi2n[0]:.3e} + {mu0:.3e} * {reg:.3e}"
print(
f"{verb2head}\n\t\t\t {temp} = {nchan*lchi2n[0] + mu0*reg:.3e}",
end='\n',
)
while niter == 0 or np.abs(lchi2n[-1] - chi2n_obj) > chi2n_tol:
sol, itconv = scpsp.linalg.cg(
TTn + lmu[-1]*R, Tyn,
x0=sol0,
tol=tol,
maxiter=maxiter,
M=precond,
)
lchi2n = np.append(lchi2n, np.sum((Tn.dot(sol) - yn)**2) / nchan)
if niter == 0:
if lchi2n[-1] >= chi2n_obj + chi2n_tol:
lmu = np.append(lmu, lmu[-1] / 50.)
elif lchi2n[-1] <= chi2n_obj - chi2n_tol:
lmu = np.append(lmu, lmu[-1] * 50.)
else:
lmu = np.append(lmu, lmu[-1])
elif niter == 1 or (
np.all(lchi2n >= chi2n_obj + chi2n_tol)
or np.all(lchi2n <= chi2n_obj - chi2n_tol)
):
if lchi2n[-1] >= chi2n_obj + chi2n_tol:
lmu = np.append(lmu, lmu[-1] / 50.)
else:
lmu = np.append(lmu, lmu[-1] * 50.)
else:
if lmu[-2] == lmu[-1]:
# if the algo is stuck => break to avoid infinite loop
ind = np.argmin(lchi2n[1:] - chi2n_obj)
lmu[-1] = lmu[ind]
lchi2n[-1] = lchi2n[ind]
sol, itconv = scpsp.linalg.cg(
TTn + lmu[-1]*R, Tyn,
x0=sol0,
tol=tol,
maxiter=maxiter,
M=precond,
)
break
else:
indsort = np.argsort(lchi2n[1:])
lmu = np.append(lmu, np.exp(np.interp(
chi2n_obj_log,
np.log(lchi2n[1:])[indsort],
np.log(lmu)[indsort]
)))
# verb
if verb >= 2:
reg = sol.dot(R.dot(sol))
res2 = np.sum((Tn.dot(sol)-yn)**2)
temp1 = f"{nchan} * {lchi2n[-1]:.3e} + {lmu[-1]:.3e} * {reg:.3e}"
temp2 = f"{res2 + lmu[-1]*reg:.3e}"
temp = f"{temp1} = {temp2}"
print(f"\t\t{niter} \t {temp}")
sol0[:] = sol
niter += 1
reg = sol.dot(R.dot(sol)) # regularity term
return sol, lmu[-1], lchi2n[-1], reg, niter, None
| 1.976563 | 2 |
bdd/setup_losses.py | feedbackward/bdd | 1 | 12770625 | <filename>bdd/setup_losses.py
'''Setup: loss functions used for training and evaluation.'''
## External modules.
from copy import deepcopy
from numpy import absolute, expand_dims, sign
## Internal modules.
from mml.losses import Loss
from mml.losses.absolute import Absolute
from mml.losses.classification import Zero_One
from mml.losses.cvar import CVaR
from mml.losses.dro import DRO_CR
from mml.losses.logistic import Logistic
from mml.losses.quadratic import Quadratic
from mml.losses.tilted import Tilted
from setup_dispersions import get_dispersion
###############################################################################
## Special loss class definitions.
class R_Risk(Loss):
'''
A special loss class, which takes a base loss as
an input, and returns a modified loss which is
an unbiased estimate of the R-risk (regularized risk).
'''
def __init__(self, loss_base, dispersion, dispersion_d1,
sigma=None, eta=None, name=None):
loss_name = "R_Risk x {}".format(str(loss_base))
super().__init__(name=loss_name)
self.loss = loss_base
self.dispersion = dispersion
self.dispersion_d1 = dispersion_d1
self.sigma = sigma
self.eta = eta
return None
def func(self, model, X, y):
'''
'''
losses = self.loss(model=model, X=X, y=y) # compute losses.
theta = model.paras["theta"].item() # extract scalar.
return losses + self.eta * self.dispersion(
x=losses-theta,
sigma=self.sigma
)
def grad(self, model, X, y):
'''
'''
## Initial computations.
losses = self.loss(model=model, X=X, y=y) # compute losses.
loss_grads = self.loss.grad(model=model, X=X, y=y) # loss gradients.
theta = model.paras["theta"].item() # extract scalar.
dispersion_grads = self.dispersion_d1(
x=losses-theta,
sigma=self.sigma
) # evaluate the derivative of the dispersion term.
ddim = dispersion_grads.ndim
tdim = model.paras["theta"].ndim
## Main gradient computations.
for pn, g in loss_grads.items():
gdim = g.ndim
if ddim > gdim:
raise ValueError("Axis dimensions are wrong; ddim > gdim.")
elif ddim < gdim:
dispersion_grads_exp = expand_dims(
a=dispersion_grads,
axis=tuple(range(ddim,gdim))
)
g *= 1.0 + self.eta * dispersion_grads_exp / self.sigma
else:
g *= 1.0 + self.eta * dispersion_grads / self.sigma
## Compute the derivative with respect to threshold theta.
## (be careful to note the minus sign)
loss_grads["theta"] = -self.eta * expand_dims(
a=dispersion_grads,
axis=tuple(range(ddim,1+tdim))
) / self.sigma
## Return gradients for all parameters being optimized.
return loss_grads
class T_Risk(Loss):
'''
A special loss class, which takes a base loss as
an input, and returns a modified loss which is
an unbiased estimate of the R-risk (regularized risk).
'''
def __init__(self, loss_base, dispersion, dispersion_d1,
sigma=None, etatilde=None, name=None):
loss_name = "R_Risk x {}".format(str(loss_base))
super().__init__(name=loss_name)
self.loss = loss_base
self.dispersion = dispersion
self.dispersion_d1 = dispersion_d1
self.sigma = sigma
self.etatilde = etatilde
return None
def func(self, model, X, y):
'''
'''
losses = self.loss(model=model, X=X, y=y) # compute losses.
theta = model.paras["theta"].item() # extract scalar.
return self.etatilde * theta + self.dispersion(
x=losses-theta,
sigma=self.sigma
)
def grad(self, model, X, y):
'''
'''
## Initial computations.
losses = self.loss(model=model, X=X, y=y) # compute losses.
loss_grads = self.loss.grad(model=model, X=X, y=y) # loss gradients.
theta = model.paras["theta"].item() # extract scalar.
dispersion_grads = self.dispersion_d1(
x=losses-theta,
sigma=self.sigma
) # evaluate the derivative of the dispersion term.
ddim = dispersion_grads.ndim
tdim = model.paras["theta"].ndim
## Main gradient computations.
for pn, g in loss_grads.items():
gdim = g.ndim
if ddim > gdim:
raise ValueError("Axis dimensions are wrong; ddim > gdim.")
elif ddim < gdim:
dispersion_grads_exp = expand_dims(
a=dispersion_grads,
axis=tuple(range(ddim,gdim))
)
g *= dispersion_grads_exp
else:
g *= dispersion_grads
## Compute the derivative with respect to threshold theta.
## (be careful to note the minus sign)
loss_grads["theta"] = self.etatilde - expand_dims(
a=dispersion_grads,
axis=tuple(range(ddim,1+tdim))
) / self.sigma
## Return gradients for all parameters being optimized.
return loss_grads
class ConvexPolynomial(Loss):
'''
'''
def __init__(self, exponent, name=None):
super().__init__(name=name)
self.exponent = exponent
if self.exponent < 1.0:
raise ValueError("This class only takes exponent >= 1.0.")
return None
def func(self, model, X, y):
'''
'''
abdiffs = absolute(model(X=X)-y)
if self.exponent == 1.0:
return abdiffs
else:
return abdiffs**self.exponent / self.exponent
def grad(self, model, X, y):
'''
'''
loss_grads = deepcopy(model.grad(X=X)) # start with model grads.
diffs = model(X=X)-y
if self.exponent == 1.0:
factors = sign(diffs)
else:
factors = absolute(diffs)**(self.exponent-1.0) * sign(diffs)
## Shape check to be safe.
if factors.ndim != 2:
raise ValueError("Require model(X)-y to have shape (n,1).")
elif factors.shape[1] != 1:
raise ValueError("Only implemented for single-output models.")
else:
for pn, g in loss_grads.items():
g *= expand_dims(a=factors,
axis=tuple(range(2,g.ndim)))
return loss_grads
## Parser function for setting the DRO_CR parameters.
def parse_dro(atilde):
shape = 2.0
bound = ((1.0/(1.0-atilde))-1.0)**2.0 / 2.0
return (bound, shape)
## Grab the desired loss object.
dict_losses = {
"absolute": Absolute(name="absolute"),
"logistic": Logistic(name="logistic"),
"quadratic": Quadratic(name="quadratic"),
"zeroone": Zero_One(name="zeroone")
}
def get_loss(name, **kwargs):
'''
A simple parser that takes a base loss and risk name,
and returns the loss object that amounts to an unbiased
estimator of the specified risk.
'''
## First grab the loss and risk name, with a check.
try:
loss_base = dict_losses[name]
risk_name = kwargs["risk_name"]
except KeyError:
print("Error: either loss is invalid or risk is missing.")
## Prepare and return the modified loss object as requested.
if risk_name == "erm":
loss = loss_base
elif risk_name in ["rrisk", "trisk"]:
dispersion_kwargs = {"interpolate": kwargs["interpolate"],
"alpha": kwargs["alpha"],
"beta": kwargs["beta"]}
dispersion, dispersion_d1 = get_dispersion(
name=kwargs["dispersion"], **dispersion_kwargs
)
if risk_name == "rrisk":
loss = R_Risk(loss_base=loss_base,
dispersion=dispersion,
dispersion_d1=dispersion_d1,
sigma=kwargs["sigma"],
eta=kwargs["eta"])
else:
loss = T_Risk(loss_base=loss_base,
dispersion=dispersion,
dispersion_d1=dispersion_d1,
sigma=kwargs["sigma"],
etatilde=kwargs["etatilde"])
elif risk_name == "cvar":
loss = CVaR(loss_base=loss_base,
alpha=1.0-kwargs["prob"])
elif risk_name == "entropic":
loss = Tilted(loss_base=loss_base,
tilt=kwargs["gamma"])
elif risk_name == "dro":
bound, shape = parse_dro(atilde=kwargs["atilde"])
loss = DRO_CR(loss_base=loss_base, bound=bound, shape=shape)
else:
raise ValueError("Invalid risk name.")
## Finally, return both the base loss and the modified loss.
return (loss_base, loss)
###############################################################################
| 2.5 | 2 |
automol/extern/pybel_.py | snelliott/automol | 2 | 12770626 | <filename>automol/extern/pybel_.py
""" pybel interface
"""
import pybel
from phydat import ptab
import automol.geom.base
# geometry
def to_geometry(pbm):
""" Build an automol geometry data structure from a Pybel molecule object.
:param pbm: Pybel molecule object
:type pbm: Pybel object
:rtype: automol geometry data structure
"""
pbm.addh()
pbm.make3D()
nums = [atm.atomicnum for atm in pbm.atoms]
symbs = list(map(ptab.to_symbol, nums))
xyzs = tuple(tuple(atm.coords) for atm in pbm.atoms)
geo = automol.geom.base.from_data(symbs, xyzs, angstrom=True)
return geo
# inchi
def from_inchi(ich):
""" Build a Pybel molecule object from an InChI string.
:param ich: InChI string for a species
:type ich: str
:rtype: Pybel molecule object
"""
return pybel.readstring('inchi', ich)
def to_inchi(pbm):
""" Build an InChI string from a Pybel molecule object.
:param pbm: Pybel molecule object
:type pbm: Pybel object
:rtype: str
"""
return pbm.write('inchi').strip()
| 2.515625 | 3 |
PAL/Cross/pupy/packages/all/portscan.py | infosecsecurity/OSPTF | 2 | 12770627 | <reponame>infosecsecurity/OSPTF
# -*- coding: UTF8 -*-
# Copyright (c) 2015, <NAME> (<EMAIL>)
# Pupy is under the BSD 3-Clause license. see the LICENSE file at the root of the project for the detailed licence terms
from scapy.all import *
def format_response(pkt):
res=""
if "R" in pkt.sprintf("%TCP.flags%"):
res+="TCP/{:<7} closed {}".format(pkt[TCP].sport, pkt.sprintf("{TCP:%TCP.flags%}{ICMP:%IP.src% - %ICMP.type%}"))
elif pkt.sprintf("%TCP.flags%")=="SA":
res+="TCP/{:<7} open {}".format(pkt[TCP].sport, pkt.sprintf("{TCP:%TCP.flags%}{ICMP:%IP.src% - %ICMP.type%}"))
else:
res+="TCP/{:<7} filtered {}".format(pkt[TCP].sport, pkt.sprintf("{TCP:%TCP.flags%}{ICMP:%IP.src% - %ICMP.type%}"))
return res+"\n"
class PortScanner(object):
def __init__(self):
pass
def scan(self, address, ports, timeout=4, iface=None):
res=""
ans,unans=sr(IP(dst=address)/TCP(flags="S",dport=list(ports)), verbose=False, iface=iface, timeout=timeout)
for req,resp in ans:
res+=format_response(resp)
return res
if __name__=='__main__':
p=PortScanner()
print p.scan("192.168.2.133",[443,80,22])
| 2.390625 | 2 |
thespiae/conf/exception.py | apleshakov/thespiae | 0 | 12770628 | #
# SPDX-License-Identifier: Apache-2.0
#
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from dataclasses import dataclass, field
from gettext import translation
from typing import TYPE_CHECKING
from thespiae.exception import ThespiaeError, generate___str__
if TYPE_CHECKING:
from typing import Sequence, Collection, Mapping, Any
from .data import AppEntry, ConfigPath
t = translation('thespiae', fallback=True)
_ = t.gettext
@generate___str__
@dataclass(frozen=True)
class AppEntryMixin:
app_entry: AppEntry = field(metadata={'format': _('configuration entry for: {0.ref}')})
@generate___str__
@dataclass(frozen=True)
class _ConfigPathsMixin:
config_paths: Sequence[ConfigPath] = field(metadata={'format': _('corresponding paths: {}')})
@generate___str__
@dataclass(frozen=True)
class _ConfigPathMixin:
config_path: Sequence[ConfigPath] = field(metadata={'format': _('corresponding path: {}')})
@generate___str__
@dataclass(frozen=True)
class _ExpectedValueTypeMixin:
expected: type = field(metadata={'format': _('expected value type: {0.__name__}')})
received: type = field(metadata={'format': _('received value type: {0.__name__}')})
@generate___str__
@dataclass(frozen=True)
class _FieldMixin:
field_name: str = field(metadata={'format': _('config field name: {}')})
field_index: str = field(metadata={'format': _('collection index: {}')})
@generate___str__(_('Required field is missing'))
@dataclass(frozen=True)
class AppDataMissingFieldError(_FieldMixin, _ConfigPathsMixin, ThespiaeError):
pass
@generate___str__(_('Unexpected config value'))
@dataclass(frozen=True)
class AppDataFieldValueTypeError(_ExpectedValueTypeMixin, _FieldMixin, _ConfigPathsMixin, ThespiaeError):
pass
@generate___str__(_('Circular field reference'))
@dataclass(frozen=True)
class AppDataCircularReferenceError(_FieldMixin, _ConfigPathsMixin, ThespiaeError):
pass
@generate___str__(_('Invalid configuration field value'))
@dataclass(frozen=True)
class ConfigElementTypeError(_ExpectedValueTypeMixin, _ConfigPathMixin, ThespiaeError):
pass
@generate___str__(_('Excessive configuration attribute found'))
@dataclass(frozen=True)
class ConfigExcessiveAttributeError(_ConfigPathMixin, ThespiaeError):
pass
@generate___str__(_('Required config attributes not found'))
@dataclass(frozen=True)
class ConfigRequiredAttributesNotFoundError(_ConfigPathMixin, ThespiaeError):
attributes: Collection[str] = field(metadata={'format': _('missing field names: {}')})
@generate___str__(_('Another configuration entry with the same identity has been found'))
@dataclass(frozen=True)
class ConfigDuplicatedEntryIdentityError(_ConfigPathsMixin, ThespiaeError):
another_paths: Sequence[ConfigPath] = field(metadata={'format': _('another paths: {}')})
identity_values: Mapping[str, Any] = field(metadata={'format': _('identity field values: {}')})
@generate___str__(_('Unable to complete config branches with required identity attributes'))
@dataclass(frozen=True)
class ConfigIncompleteBranchesError(_ConfigPathsMixin, ThespiaeError):
attributes: Collection[str] = field(metadata={'format': _('missing field names: {}')})
class _CircularReferenceError(ThespiaeError):
pass
| 1.945313 | 2 |
tabnine-vim/third_party/ycmd/third_party/frozendict/setup.py | MrMonk3y/vimrc | 239 | 12770629 | from distutils.core import setup
setup(
name = 'frozendict',
version = '0.3',
url = 'https://github.com/slezica/python-frozendict',
author = '<NAME>',
author_email = '<EMAIL>',
packages = ['frozendict'],
license = 'MIT License',
description = 'An immutable dictionary',
long_description = open('README.txt').read()
)
| 1.03125 | 1 |
Python/016.py | jaimeliew1/Project_Euler_Solutions | 0 | 12770630 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Solution to Project Euler problem 16
Author: <NAME>
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
def run():
n = pow(2,1000)
digitSum = sum(int(i) for i in str(n))
return digitSum
if __name__ == "__main__":
print(run())
| 2.796875 | 3 |
dependencies/src/4Suite-XML-1.0.2/Ft/Xml/Sax.py | aleasims/Peach | 0 | 12770631 | ########################################################################
# $Header: /var/local/cvsroot/4Suite/Ft/Xml/Sax.py,v 1.8 2006/04/12 21:06:09 uogbuji Exp $
"""
Abstraction module for Domlette SAX usage.
Copyright 2005 Fourthought, Inc. (USA).
Detailed license and copyright information: http://4suite.org/COPYRIGHT
Project home, documentation, distributions: http://4suite.org/
"""
import sys
from Ft.Xml.cDomlettec import CreateParser
# Alias function definition to allow for use with xml.sax.make_parser()
# To create a parser using this method, use the following:
# parser = xml.sax.make_parser(['Ft.Xml.Sax'])
create_parser = CreateParser
# 4Suite-specific SAX features
from Ft.Xml.cDomlettec import FEATURE_PROCESS_XINCLUDES
from Ft.Xml.cDomlettec import FEATURE_GENERATOR
# 4Suite-specific SAX properties
from Ft.Xml.cDomlettec import PROPERTY_WHITESPACE_RULES
from Ft.Xml.cDomlettec import PROPERTY_YIELD_RESULT
from Ft.Xml import XMLNS_NAMESPACE
from Ft.Xml.Lib.XmlPrinter import XmlPrinter
class ContentHandler:
"""Interface for receiving logical document content events.
This is the main callback interface for the Parser. The order of
events in this interface mirrors the order of the information in the
document."""
def setDocumentLocator(self, locator):
"""Called by the parser to give the application a locator for
locating the origin of document events.
The locator allows the application to determine the end
position of any document-related event, even if the parser is
not reporting an error. Typically, the application will use
this information for reporting its own errors (such as
character content that does not match an application's
business rules). The information returned by the locator is
probably not sufficient for use with a search engine.
Note that the locator will return correct information only
during the invocation of the events in this interface. The
application should not attempt to use it at any other time."""
def startDocument(self):
"""Receive notification of the beginning of a document.
The parser will invoke this method only once, before any
other methods in this interface."""
def endDocument(self):
"""Receive notification of the end of a document.
The parser will invoke this method only once, and it will
be the last method invoked during the parse. The parser shall
not invoke this method until it has either abandoned parsing
(because of an unrecoverable error) or reached the end of
input."""
def startPrefixMapping(self, prefix, uri):
"""Begin the scope of a prefix-URI Namespace mapping.
The information from this event is not necessary for normal
Namespace processing: the XmlParser will automatically replace
prefixes for element and attribute names.
There are cases, however, when applications need to use
prefixes in character data or in attribute values, where they
cannot safely be expanded automatically; the
start/endPrefixMapping event supplies the information to the
application to expand prefixes in those contexts itself, if
necessary.
Note that start/endPrefixMapping events are not guaranteed to
be properly nested relative to each-other: all
startPrefixMapping events will occur before the corresponding
startElementNS event, and all endPrefixMapping events will occur
after the corresponding endElementNS event, but their order is
not guaranteed."""
def endPrefixMapping(self, prefix):
"""End the scope of a prefix-URI mapping.
See startPrefixMapping for details. This event will always
occur after the corresponding endElementNS event, but the order
of endPrefixMapping events is not otherwise guaranteed."""
def startElementNS(self, (uri, localName), qualifiedName, atts):
"""Signals the start of an element.
The uri parameter is None for elements which have no namespace,
the qualifiedName parameter is the raw XML name used in the source
document, and the atts parameter holds an instance of the
Attributes class containing the attributes of the element.
"""
def endElementNS(self, (uri, localName), qualifiedName):
"""Signals the end of an element.
The uri parameter is None for elements which have no namespace,
the qualifiedName parameter is the raw XML name used in the source
document."""
def characters(self, content):
"""Receive notification of character data.
The parser will call this method to report each chunk of
character data. The parser will return all contiguous
character data in a single chunk."""
class Locator:
"""Interface for associating a parse event with a document
location. A locator object will return valid results only during
calls to ContentHandler methods; at any other time, the results are
unpredictable."""
def getColumnNumber(self):
"""Return the column number where the current event ends."""
def getLineNumber(self):
"""Return the line number where the current event ends."""
def getSystemId(self):
"""Return the system identifier for the current event."""
class Attributes:
"""Interface for a set of XML attributes.
Contains a set of XML attributes, accessible by expanded name."""
def getValue(self, name):
"""Returns the value of the attribute with the given name."""
def getQNameByName(self, name):
"""Returns the qualified name of the attribute with the given name."""
def __len__(self):
"""Returns the number of attributes in the list."""
return len(self._values)
def __getitem__(self, name):
"""Alias for getValue."""
def __delitem__(self, name):
"""Removes the attribute with the given name."""
def __contains__(self, name):
"""Alias for has_key."""
def has_key(self, name):
"""Returns True if the attribute name is in the list,
False otherwise."""
def get(self, name, alternative=None):
"""Return the value associated with attribute name; if it is not
available, then return the alternative."""
def keys(self):
"""Returns a list of the names of all attribute in the list."""
def items(self):
"""Return a list of (attribute_name, value) pairs."""
def values(self):
"""Return a list of all attribute values."""
class DomBuilder(ContentHandler):
"""
A ContentHandler that is used to construct a Domlette Document.
"""
def __init__(self):
self._ownerDoc = None
return
def getDocument(self):
"""
Returns the newly constructed Document instance.
"""
return self._ownerDoc
def startDocument(self):
from Ft.Xml.Domlette import implementation
self._ownerDoc = implementation.createRootNode()
self._namespaces = {}
self._nodeStack = [self._ownerDoc]
return
def endDocument(self):
del self._nodeStack[-1]
assert len(self._nodeStack) == 0, "orphaned node stack"
return
def startPrefixMapping(self, prefix, uri):
self._namespaces[prefix] = uri
return
def startElementNS(self, expandedName, qualifiedName, attributes):
namespaceURI, localName = expandedName
element = self._ownerDoc.createElementNS(namespaceURI, qualifiedName)
for prefix in self._namespaces:
if prefix:
qualifiedName = u'xmlns:' + prefix
else:
qualifiedName = u'xmlns'
value = self._namespaces[prefix]
element.setAttributeNS(XMLNS_NAMESPACE, qualifiedName, value)
self._namespaces = {}
for expandedName in attributes:
namespaceURI, localName = expandedName
qualifiedName = attributes.getQNameByName(expandedName)
value = attributes[expandedName]
element.setAttributeNS(namespaceURI, qualifiedName, value)
self._nodeStack.append(element)
return
def endElementNS(self, expandedName, qualifiedName):
element = self._nodeStack.pop()
self._nodeStack[-1].appendChild(element)
return
def characters(self, data):
text = self._ownerDoc.createTextNode(data)
self._nodeStack[-1].appendChild(text)
return
class SaxPrinter(ContentHandler):
"""
A ContentHandler that serializes the result using a 4Suite printer
"""
def __init__(self, printer=XmlPrinter(sys.stdout, 'utf-8')):
self._printer = printer
try:
self._printer.reset()
except AttributeError:
pass
self._namespaces = {}
return
def startDocument(self):
self._printer.startDocument()
return
def endDocument(self):
self._printer.endDocument()
return
def startPrefixMapping(self, prefix, uri):
self._namespaces[prefix] = uri
return
def startElementNS(self, (namespaceURI, localName), qualifiedName,
attributes):
attributes = dict([ (attributes.getQNameByName(name), value)
for name, value in attributes.items() ])
self._printer.startElement(namespaceURI, qualifiedName,
self._namespaces, attributes)
self._namespaces = {}
return
def endElementNS(self, (namespaceURI, localName), qualifiedName):
self._printer.endElement(namespaceURI, qualifiedName)
return
def characters(self, data):
self._printer.text(data)
return
| 2.109375 | 2 |
app/forms.py | mihalw28/library_books | 1 | 12770632 | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import DataRequired, ValidationError
from app.models import Book, Author, Category
class AddBook(FlaskForm):
title = StringField("Tytuł książki", validators=[DataRequired()])
author = StringField(
"Autor/Autorzy",
validators=[DataRequired()],
description='Np. "<NAME>, <NAME>"',
)
category = StringField(
"Kategoria",
validators=[DataRequired()],
description="Przynajmniej jedna kategoria.",
)
description = TextAreaField("Opis", validators=[DataRequired()])
submit = SubmitField("Dodaj do biblioteki!")
def validate_title(self, title):
"""Basic validator. Assumption/simplification: two books with one title do not exist."""
# In a commercial project a better solution will be making validation based on
# books' ISBN number.
book = Book.query.filter_by(title=self.title.data).first()
if book is not None:
raise ValidationError("Książka o takim tytule już znajduje się w bazie.")
class ImportBooks(FlaskForm):
intitle = StringField("Tytuł książki")
inauthor = StringField("Autor")
inpublisher = StringField("Wydawca")
subject = StringField("Kategoria")
isbn = StringField("ISBN")
submit = SubmitField("Zatwierdź dane.")
class FilterBooks(FlaskForm):
filter_a = StringField("Autor")
filter_c = StringField("Kategoria")
submit = SubmitField("Filtruj")
| 3.203125 | 3 |
replay_buffer.py | Jekyll1021/hindsight-experience-replay | 0 | 12770633 | <reponame>Jekyll1021/hindsight-experience-replay<filename>replay_buffer.py
import threading
import numpy as np
import time
"""
the replay buffer here is basically from the openai baselines code
"""
class replay_buffer:
def __init__(self, env_params, buffer_size, sample_func, image=False):
self.env_params = env_params
self.T = env_params['max_timesteps']
self.size = buffer_size // self.T
# memory management
self.current_size = 0
self.n_transitions_stored = 0
self.sample_func = sample_func
self.image = image
# create the buffer to store info
self.buffers = {'obs': np.empty([self.size, self.T, self.env_params['obs']]),
'obs_next': np.empty([self.size, self.T, self.env_params['obs']]),
'ag': np.empty([self.size, self.T, self.env_params['goal']]),
'ag_next': np.empty([self.size, self.T, self.env_params['goal']]),
'g': np.empty([self.size, self.T, self.env_params['goal']]),
'actions': np.empty([self.size, self.T, self.env_params['action']]),
'sg': np.empty([self.size, self.T, self.env_params['action']]),
'sg_next': np.empty([self.size, self.T, self.env_params['action']]),
'hidden': np.empty([self.size, self.T, 64]),
'hidden_next': np.empty([self.size, self.T, 64])
}
if image:
self.buffers["image"] = np.empty([self.size, self.T, 224, 224, 3 * (int(env_params['two_cam'])+1)])
self.buffers["image_next"] = np.empty([self.size, self.T, 224, 224, 3 * (int(env_params['two_cam'])+1)])
# thread lock
self.lock = threading.Lock()
# store the episode
def store_episode(self, episode_batch):
if self.image:
mb_obs, mb_ag, mb_g, mb_actions, mb_sg, mb_hidden, mb_image = episode_batch
else:
mb_obs, mb_ag, mb_g, mb_actions, mb_sg, mb_hidden = episode_batch
mb_obs_next, mb_ag_next, mb_sg_next, mb_hidden_next = mb_obs[:, 1:, :], mb_ag[:, 1:, :], mb_sg[:, 1:, :], mb_hidden[:, 1:, :]
mb_obs, mb_ag, mb_sg, mb_hidden = mb_obs[:, :-1, :], mb_ag[:, :-1, :], mb_sg[:, :-1, :], mb_hidden[:, :-1, :]
if self.image:
mb_image_next = mb_image[:, 1:, :]
mb_image = mb_image[:, :-1, :]
batch_size = mb_obs.shape[0]
with self.lock:
idxs = self._get_storage_idx(inc=batch_size)
# store the informations
self.buffers['obs'][idxs] = mb_obs
self.buffers['obs_next'][idxs] = mb_obs_next
self.buffers['ag'][idxs] = mb_ag
self.buffers['ag_next'][idxs] = mb_ag_next
self.buffers['g'][idxs] = mb_g
self.buffers['sg'][idxs] = mb_sg
self.buffers['sg_next'][idxs] = mb_sg_next
self.buffers['actions'][idxs] = mb_actions
self.buffers['hidden'][idxs] = mb_hidden
self.buffers['hidden_next'][idxs] = mb_hidden_next
self.n_transitions_stored += self.T * batch_size
if self.image:
self.buffers['image'][idxs] = mb_image
self.buffers['image_next'][idxs] = mb_image_next
# sample the data from the replay buffer
def sample(self, batch_size):
temp_buffers = {}
with self.lock:
for key in self.buffers.keys():
temp_buffers[key] = self.buffers[key][:self.current_size].copy()
#
# # sample transitions
# if self.ee_reward:
# transitions = self.sample_func(temp_buffers, batch_size, info="precise")
# else:
# transitions = self.sample_func(temp_buffers, batch_size)
transitions = self.sample_func(temp_buffers, batch_size)
return transitions
def _get_storage_idx(self, inc=None):
inc = inc or 1
if self.current_size+inc <= self.size:
idx = np.arange(self.current_size, self.current_size+inc)
elif self.current_size < self.size:
overflow = inc - (self.size - self.current_size)
idx_a = np.arange(self.current_size, self.size)
idx_b = np.random.randint(0, self.current_size, overflow)
idx = np.concatenate([idx_a, idx_b])
else:
idx = np.random.randint(0, self.size, inc)
self.current_size = min(self.size, self.current_size+inc)
if inc == 1:
idx = idx[0]
return idx
| 2.90625 | 3 |
tools/Polygraphy/tests/tools/args/onnxrt/test_loader.py | borisfom/TensorRT | 1 | 12770634 | <filename>tools/Polygraphy/tests/tools/args/onnxrt/test_loader.py
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnxruntime as onnxrt
from polygraphy.tools.args import ModelArgs, OnnxLoaderArgs, OnnxrtSessionArgs
from tests.models.meta import ONNX_MODELS
from tests.tools.args.helper import ArgGroupTestHelper
class TestOnnxrtSessionArgs(object):
def test_execution_providers(self):
arg_group = ArgGroupTestHelper(OnnxrtSessionArgs(), deps=[ModelArgs(), OnnxLoaderArgs()])
arg_group.parse_args([ONNX_MODELS["identity_identity"].path, "--providers", "cpu"])
sess = arg_group.load_onnxrt_session()
assert sess
assert isinstance(sess, onnxrt.InferenceSession)
assert sess.get_providers() == ["CPUExecutionProvider"]
| 2 | 2 |
jobs/reverse_strings.py | mtahmed/antnest | 1 | 12770635 | <filename>jobs/reverse_strings.py<gh_stars>1-10
def processor(self, string):
return string[::-1]
def split(self, input_data, processor):
import taskunit
input_lines = input_data.split('\n')
for input_line in input_lines:
yield taskunit.TaskUnit(data=input_line, processor=processor)
def combine(self):
for t in self.taskunits:
print(t.result)
return
input_data = 'hello\nworld'
| 3.375 | 3 |
OLD/agentModel/run.py | adamdboult/predictions | 0 | 12770636 | #!/usr/bin/env python3
#############
# Libraries #
#############
import random
import math
###########
# Classes #
###########
class person:
"""A person"""
instances = []
def __init__(self):
a = random.random()
self.prefs = [0.5, 0.5]
self.data = [a, 1 - a, 0]
self.utilityUpdate()
person.instances.append(self)
def trade(self, market):
self.marginalUtilityUpdate()
self.goalUpdate(market)
goods = [0, 1]
for good in goods:
toBuy = self.goal[good] - self.data[good]
price = market.price[good]
if (
self.data[good] + toBuy >= 0 and
self.data[2] - toBuy * price >= 0 and
market.data[good] - toBuy >= 0 and
market.data[2] + toBuy * price >= 0
):
market.data[good] -= toBuy
market.data[2] += toBuy * price
self.data[good] += toBuy
self.data[2] -= toBuy * price
self.utilityUpdate()
def goalUpdate(self, market):
self.goal = [self.data[0], self.data[1]]
if (self.marginalUtility[0]/market.price[0] > self.marginalUtility[1]/market.price[1]):
self.goal[0] +=0.01
self.goal[1] -=0.01
elif (self.marginalUtility[0]/market.price[0] < self.marginalUtility[1]/market.price[1]):
self.goal[0] -=0.01
self.goal[1] +=0.01
def utilityUpdate(self):
self.utility = (self.data[0] ** self.prefs[0]) * (self.data[1] ** self.prefs[1])
def marginalUtilityUpdate(self):
self.marginalUtility = [
self.prefs[0] * (self.data[0] ** (self.prefs[0] -1)) * (self.data[1] ** self.prefs[1]),
self.prefs[1] * (self.data[0] ** self.prefs[0] ) * (self.data[1] ** (self.prefs[1] - 1))
]
class market:
"""A amarket"""
instances = []
def __init__(self):
self.data = [0, 0, 100]
self.lastStock = self.data[:]
self.price = [10.0, 10.0]
market.instances.append(self)
def updatePrices(self):
goods = [0, 1]
for good in goods:
if self.data[good] < self.lastStock[good]:
self.price[good] += 0.001
else:
self.price[good] -= 0.001
self.lastStock = self.data[:]
#############
# Functions #
#############
def printStatus(entity):
print ("Printing")
for instance in entity.instances:
print("Data: ", instance.data)
#try:
# print("Utility: ", instance.utility)
#except:
# continue
print ()
#################
# Configuration #
#################
population = 3
##############
# Initialise #
##############
print ("Intialising...")
print ("--------------")
for i in range (population):
person()
auctioneer = market()
#######
# Run #
#######
printStatus(person)
printStatus(market)
var = ""
i = 0
while (var != "q"):
print ("STARTING")
print (i, ".......")
for instance in person.instances:
instance.trade(auctioneer)
auctioneer.updatePrices()
var = input("Please enter something: ")
print ("you entered", var)
printStatus(person)
printStatus(market)
print (auctioneer.price)
i+=1
| 3.625 | 4 |
mode/examples/Basics/Structure/Loop/Loop.pyde | timgates42/processing.py | 1,224 | 12770637 | <reponame>timgates42/processing.py
"""
Loop.
The loop() function causes draw() to execute
repeatedly. If noLoop is called in setup()
the draw() is only executed once. In this example
click the mouse to execute loop(), which will
cause the draw() to execute repeatedly.
"""
y = 100
def setup():
"""
The statements in the setup() function
run once when the program begins.
"""
size(640, 360) # Size should be the first statement
stroke(255) # Set stroke color to white
noLoop()
y = height * 0.5
def draw():
"""
The statements in draw() are run until the
program is stopped. Each statement is run in
sequence and after the last line is read, the first
line is run again.
"""
global y
background(0) # Set the background to black
line(0, y, width, y)
y = y - 1
if y < 0:
y = height
def mousePressed():
loop()
| 3.78125 | 4 |
secure_mail/migrations/0001_initial.py | Imbernix/django-secure-mail | 14 | 12770638 | <reponame>Imbernix/django-secure-mail
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Key',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.TextField()),
('fingerprint', models.CharField(blank=True, editable=False, max_length=200)),
('use_asc', models.BooleanField(default=False, help_text="If True, an '.asc' extension will be added to email attachments sent to the address for this key.")),
],
options={
'verbose_name': 'Key',
'verbose_name_plural': 'Keys',
},
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.EmailField(blank=True, max_length=254)),
('key', models.ForeignKey(editable=False, null=True, on_delete=models.deletion.CASCADE, to='secure_mail.Key')),
('use_asc', models.BooleanField(default=False, editable=False)),
],
options={
'verbose_name': 'Address',
'verbose_name_plural': 'Addresses',
},
),
]
| 1.929688 | 2 |
model_zoo/YoloFastest/model/detector.py | danieltao1993/YoloAll | 190 | 12770639 | <reponame>danieltao1993/YoloAll
import torch
import torch.nn as nn
from model.fpn import *
from model.backbone.shufflenetv2 import *
class Detector(nn.Module):
def __init__(self, classes, anchor_num, load_param):
super(Detector, self).__init__()
out_depth = 112
stage_out_channels = [-1, 24, 48, 96, 192]
self.backbone = ShuffleNetV2(stage_out_channels, load_param)
self.fpn = LightFPN(stage_out_channels[-2] + stage_out_channels[-1], stage_out_channels[-1], out_depth)
self.output_layers = nn.Conv2d(out_depth, (5 + classes) * 3, 1, 1, 0, bias=True)
def forward(self, x):
C2, C3 = self.backbone(x)
P2, P3 = self.fpn(C2, C3)
out_2 = self.output_layers(P2)
out_3 = self.output_layers(P3)
return out_2, out_3
if __name__ == "__main__":
model = Detector(80, 3)
test_data = torch.rand(1, 3, 320, 320)
torch.onnx.export(model, #model being run
test_data, # model input (or a tuple for multiple inputs)
"test.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=11, # the ONNX version to export the model to
do_constant_folding=True) # whether to execute constant folding for optimization
| 2.640625 | 3 |
qsiprep/interfaces/denoise.py | arokem/qsiprep | 36 | 12770640 | <filename>qsiprep/interfaces/denoise.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Interfaces for image denoising
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import numpy as np
import nibabel as nb
import pandas as pd
from nilearn.image import load_img, threshold_img, iter_img
from nipype import logging
from nipype.interfaces.base import traits, isdefined
from nipype.interfaces.mixins import reporting
from ..niworkflows.viz.utils import cuts_from_bbox, compose_view, plot_denoise
LOGGER = logging.getLogger('nipype.interface')
class SeriesPreprocReportInputSpec(reporting.ReportCapableInputSpec):
nmse_text = traits.File(
name_source='in_file',
keep_extension=False,
name_template='%s_nmse.txt')
class SeriesPreprocReportOutputSpec(reporting.ReportCapableOutputSpec):
nmse_text = traits.File(desc='nmse between input and output volumes')
class SeriesPreprocReport(reporting.ReportCapableInterface):
input_spec = SeriesPreprocReportInputSpec
output_spce = SeriesPreprocReportOutputSpec
_n_cuts = 7
def __init__(self, **kwargs):
"""Instantiate SeriesPreprocReportlet."""
self._n_cuts = kwargs.pop('n_cuts', self._n_cuts)
super(SeriesPreprocReport, self).__init__(generate_report=True, **kwargs)
def _calculate_nmse(self, original_nii, corrected_nii):
"""Calculate NMSE from the applied preprocessing operation."""
outputs = self._list_outputs()
output_file = outputs.get('nmse_text')
pres = []
posts = []
differences = []
for orig_img, corrected_img in zip(iter_img(original_nii), iter_img(corrected_nii)):
orig_data = orig_img.get_fdata()
corrected_data = corrected_img.get_fdata()
baseline = orig_data.mean()
pres.append(baseline)
posts.append(corrected_data.mean())
scaled_diff = np.abs(corrected_data - orig_data).mean() / baseline
differences.append(scaled_diff)
title = str(self.__class__)[:-2].split('.')[-1]
pd.DataFrame({title+"_pre": pres,
title+"_post": posts,
title+"_change": differences}).to_csv(output_file, index=False)
def _generate_report(self):
"""Generate a reportlet."""
LOGGER.info('Generating denoising visual report')
input_dwi, denoised_nii, field_nii = self._get_plotting_images()
# find an image to use as the background
image_data = input_dwi.get_fdata()
image_intensities = np.array([img.mean() for img in image_data.T])
lowb_index = int(np.argmax(image_intensities))
highb_index = int(np.argmin(image_intensities))
# Original images
orig_lowb_nii = input_dwi.slicer[..., lowb_index]
orig_highb_nii = input_dwi.slicer[..., highb_index]
# Denoised images
denoised_lowb_nii = denoised_nii.slicer[..., lowb_index]
denoised_highb_nii = denoised_nii.slicer[..., highb_index]
# Find spatial extent of the image
contour_nii = mask_nii = None
if isdefined(self.inputs.mask):
contour_nii = load_img(self.inputs.mask)
else:
mask_nii = threshold_img(denoised_lowb_nii, 50)
cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts)
# What image should be contoured?
if field_nii is None:
lowb_field_nii = nb.Nifti1Image(denoised_lowb_nii.get_fdata()
- orig_lowb_nii.get_fdata(),
affine=denoised_lowb_nii.affine)
highb_field_nii = nb.Nifti1Image(denoised_highb_nii.get_fdata()
- orig_highb_nii.get_fdata(),
affine=denoised_highb_nii.affine)
else:
lowb_field_nii = highb_field_nii = field_nii
# Call composer
compose_view(
plot_denoise(orig_lowb_nii, orig_highb_nii, 'moving-image',
estimate_brightness=True,
cuts=cuts,
label='Raw Image',
lowb_contour=lowb_field_nii,
highb_contour=highb_field_nii,
compress=False),
plot_denoise(denoised_lowb_nii, denoised_highb_nii, 'fixed-image',
estimate_brightness=True,
cuts=cuts,
label="Denoised",
lowb_contour=lowb_field_nii,
highb_contour=highb_field_nii,
compress=False),
out_file=self._out_report
)
self._calculate_nmse(input_dwi, denoised_nii)
def _get_plotting_images(self):
"""Implemented in subclasses to return the original image, the denoised image,
and optionally an image created during the denoieing step."""
raise NotImplementedError()
| 2.140625 | 2 |
swarm5 - Copy.py | bobmitch/swarm | 0 | 12770641 | from psyco.classes import *
from heapq import heappush, heappop
import math
import operator
import pyglet
pyglet.options['debug_gl'] = False # TURN ON FOR DEBUGGIN GL!!!
import random
from pyglet.window import key
from pyglet.window import mouse
from pyglet.gl import *
from pyglet import clock
import copy
#window = pyglet.window.Window(800, 600,"test",True, style='borderless')
window = pyglet.window.Window(1024, 768,"test",True, style='borderless')
window.set_vsync (False)
#window.set_fullscreen (True)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable (GL_TEXTURE_2D)
#window.set_minimum_size (width=1024, height=768)
#
# LOAD TEXTURES
#
def tex_border (tex):
""" takes a pyglet texture/region and insets the texture coordinates by half a texel
allowing for sub-pixel blitting without interpolation with nearby regions within
same texture atlas """
coord_width = tex.tex_coords[3] - tex.tex_coords[0]
coord_height = tex.tex_coords[4] - tex.tex_coords[1]
x_adjust = (coord_width / tex.width) / 2.0 # get tex coord half texel width
y_adjust = (coord_height / tex.height) / 2.0 # get tex coord half texel width
# create new 12-tuple texture coordinate
tex.tex_coords = ( tex.tex_coords[0]+x_adjust, tex.tex_coords[1]+y_adjust, 0,
tex.tex_coords[3]-x_adjust, tex.tex_coords[4]+y_adjust, 0,
tex.tex_coords[6]-x_adjust, tex.tex_coords[7]-y_adjust, 0,
tex.tex_coords[9]+x_adjust, tex.tex_coords[10]-y_adjust, 0)
sprite_atlas = pyglet.image.atlas.TextureAtlas (width=1024, height=1024)
batch = pyglet.graphics.Batch()
small_red_bug_pic = pyglet.image.load('medium_red_bug.png')
small_red_bug_tex = sprite_atlas.add (small_red_bug_pic)
small_red_bug_tex.anchor_x = small_red_bug_pic.width / 2
small_red_bug_tex.anchor_y = small_red_bug_pic.height / 2
tex_border (small_red_bug_tex)
valid_placement_pic = pyglet.image.load('valid_placement.png')
valid_placement_tex = sprite_atlas.add (valid_placement_pic)
selected_pic = pyglet.image.load('selected.png')
selected_tex = sprite_atlas.add (selected_pic)
money_pic = pyglet.image.load('money.png')
money_tex = sprite_atlas.add (money_pic)
money_tex.anchor_x = money_pic.width / 2
money_tex.anchor_y = money_pic.width / 2
invalid_placement_pic = pyglet.image.load('invalid_placement.png')
invalid_placement_tex = sprite_atlas.add (invalid_placement_pic)
invalid_placement_tex.anchor_x = invalid_placement_pic.width / 2
invalid_placement_tex.anchor_y = invalid_placement_pic.width / 2
cannon_1_base_pic = pyglet.image.load('base.png')
cannon_1_base_sprite = sprite_atlas.add (cannon_1_base_pic)
cannon_1_turret_pic = pyglet.image.load('cannon_1_turret.png')
cannon_1_turret_sprite = sprite_atlas.add (cannon_1_turret_pic)
cannon_1_turret_sprite.anchor_x = cannon_1_turret_pic.width / 2
cannon_1_turret_sprite.anchor_y = cannon_1_turret_pic.width / 2
tex_border (cannon_1_turret_sprite)
sell_outline_pic = pyglet.image.load ('sell_outline.png')
sell_outline_sprite = sprite_atlas.add (sell_outline_pic)
bullet_1_pic = pyglet.image.load('bullet_1.png')
bullet_1_sprite = sprite_atlas.add (bullet_1_pic)
bullet_1_sprite.anchor_x = bullet_1_pic.width / 2
bullet_1_sprite.anchor_y = bullet_1_pic.height / 2
spark_pic = pyglet.image.load('spark.png')
spark_sprite = sprite_atlas.add (spark_pic)
spark_sprite.anchor_x = spark_pic.width / 2
spark_sprite.anchor_y = spark_pic.height / 2
range_pic = pyglet.image.load('range.png')
range_sprite = sprite_atlas.add (range_pic)
frame_test_pic = pyglet.image.load ('frame_test.png')
button_ai_weak_off_pic = pyglet.image.load ('ai_weak_off.png')
button_ai_weak_on_pic = pyglet.image.load ('ai_weak_on.png')
button_cannon_tower_pic = pyglet.image.load ('cannon.png')
button_cannon_tower_mouseover_pic = pyglet.image.load ('cannon_mouseover.png')
button_cannon_tower_selected_pic = pyglet.image.load ('cannon_selected.png')
button_start_pic = pyglet.image.load ('start.png')
button_start_mouseover_pic = pyglet.image.load ('start_mouseover.png')
upgrade_pic = pyglet.image.load('upgrade.png')
sell_pic = pyglet.image.load('sell_ok.png')
map_bg = pyglet.image.load ('wp.jpg')
#
# END OF LOAD TEXTURES
#
class amouse:
def __init__ (self):
self.x = 0
self.y = 0
self.mapx = 0
self.mapy = 0
self.dx = 0 # deploy x and y - used rather than mapx and mapy to give more natural feel to deploying towers
self.dy = 0
self.overmap = False
self.lmb_clicked=False
self.rmb_clicked=False
self.tex = None
def update_pos (self,x,y):
self.x = x
self.y = y
def draw (self):
if self.tex:
if self.tex is valid_placement_tex:
glColor4f (1,1,1,0.3)
self.tex.blit (self.x-game.map.cellsize, self.y-game.map.cellsize)
glColor4f (1,1,1,1)
self.tex.blit (game.map.llx + (self.dx*game.map.cellsize), game.map.lly + (self.dy*game.map.cellsize) )
else:
self.tex.blit (self.x, self.y)
def update (self):
# get map coords and display appropraite mouse icons
if game.state & game.states['ingame']:
if self.x > game.map.llx and self.x < game.map.llx + game.map.width_pixels and self.y > game.map.lly and self.y < game.map.lly + game.map.height_pixels:
self.overmap = True
self.mapx = (self.x - game.map.llx ) / game.map.cellsize
self.mapy = (self.y - game.map.lly ) / game.map.cellsize
#if game.deploying and game.map.placement_valid (self.mapx, self.mapy):
if game.deploying:
self.dx = int( ((self.x - game.map.llx )-(game.map.cellsize*0.5)) / game.map.cellsize)
self.dy = int( ((self.y - game.map.lly )-(game.map.cellsize*0.5)) / game.map.cellsize)
if game.credits >= game.deploying.cost:
if game.map.placement_valid (self.dx, self.dy):
self.tex = valid_placement_tex
else:
self.tex = invalid_placement_tex
else:
self.tex = money_tex
else:
self.overmap = False
self.tex = None
self.mapx = -1
self.mapy = -1
# reset mouse clicks from last frame and handle mouse clicks made
if self.lmb_clicked:
self.lmb_clicked=False
if game.state & game.states['ingame']:
if self.tex is valid_placement_tex and game.deploying:
if game.deploying.deploy():
game.credits-=game.deploying.cost
credits_label.text = 'Credits: '+str(game.credits)
game.deploying=copy.copy(game.deploying) # ready to deploy next tower of same type
else:
pass # could not deploy for whatever reason
if self.tex is invalid_placement_tex and game.deploying:
# handle playing of error noise or something
pass
if self.rmb_clicked:
game.deploying=None
self.tex = None
game.selected=None
self.rmb_clicked=False
def over (self, ll, tr):
if self.x >= ll[0] and self.x <= tr[0] and self.y >= ll[1] and self.y <=tr[1]:
return True
else:
return False
class GUI:
""" gui contains widgets or frames, which contain all widgets """
def __init__ (self):
# get coords of corners
self.ll = (0,0)
self.lr = (window.width,0)
self.tr = (window.width,window.height)
self.tl = (0,window.height)
self.widgets=[]
def update (self,dt):
for frame in self.widgets:
frame.update(dt)
def draw (self):
for frame in self.widgets:
frame.draw()
class Frame:
""" a widget container """
def __init__ (self, pos=(250,250), width=100, height=100, scale=False, border=None, background=None, background_colour=None):
self.pos=pos
self.scale=scale
self.enabled = True
# if background texture supplied, use that to determine area of frame, unless scale is True
if background and not scale:
self.width = background.width
self.height = background.height
else:
self.width=width
self.height=height
# get coords of corners
self.ll = (pos[0],pos[1]-self.height)
self.lr = (pos[0]+self.width,pos[1]-self.height)
self.tr = (pos[0]+self.width,pos[1])
self.tl = (pos[0], pos[1])
self.widgets=[]
self.background = background
# if no background colour given, then make it opaqe if texture given, otherwise make it almost black
if background_colour is None:
if background:
self.background_colour = (1,1,1,1)
else:
self.background_colour = (1,1,1,0.1)
else:
self.background_colour = background_colour
self.border = border
# logic
self.mouseover = False
# callbacks
self.on_mouseover = None
# add self to gui
gui.widgets.append (self)
def update(self,dt):
#if mymouse.over ( (self.pos[0],self.pos[1]-self.height) , (self.pos[0]+self.width,self.pos[1]) ):
if mymouse.over ( self.ll , self.tr ):
self.mouseover = True
else:
self.mouseover = False
# update widgets in frame
for widget in self.widgets:
widget.update(dt)
def draw (self):
if self.background:
if self.scale:
glColor4f (*self.background_colour)
self.background.blit (self.ll[0], self.ll[1], width=self.width, height=self.height)
glColor4f (1,1,1,1)
else:
glColor4f (*self.background_colour)
self.background.blit (self.ll[0], self.ll[1])
glColor4f (1,1,1,1)
else:
glColor4f (*self.background_colour)
glDisable (GL_TEXTURE_2D)
pyglet.graphics.draw(4, pyglet.gl.GL_QUADS, ('v2f', ( self.ll[0], self.ll[1],
self.lr[0], self.lr[1],
self.tr[0], self.tr[1],
self.tl[0], self.tl[1])))
glEnable (GL_TEXTURE_2D)
for widget in self.widgets:
widget.draw()
class Button:
""" a button widget """
def __init__ (self, parent, pos=(10, 10), width=10, height=10, scale=False, rmb_clears=True, background=None, background_colour=None, background_selected=None, background_mouseover=None):
self.parent=parent # pointer to parent frame
self.pos=(parent.tl[0]+pos[0], parent.tl[1]-pos[1]) # relative to parent top left
self.scale=scale
self.enabled=True
self.rmb_clears=rmb_clears
# if background texture supplied, use that to determine area of frame
if background and not scale:
self.width = background.width
self.height = background.height
else:
self.width=width
self.height=height
# get coords of corners
self.ll = (self.pos[0], self.pos[1]-self.height)
self.lr = (self.pos[0]+self.width, self.pos[1]-self.height)
self.tr = (self.pos[0]+self.width, self.pos[1])
self.tl = (self.pos[0], self.pos[1])
self.background = background
self.background_mouseover = background_mouseover
self.background_selected = background_selected
# if no background colour given, then make it opaqe if texture given, otherwise make it almost black
if background_colour is None:
if background:
self.background_colour = (1,1,1,1)
else:
self.background_colour = (1,1,1,0.1)
else:
self.background_colour = background_colour
# logic
self.mouseover = False
self.click = False
self.selected = False
# callbacks
self.on_mouseover = None
self.on_click = None
self.on_right_click = None
# add self to parent frame
parent.widgets.append (self)
# push handlers
window.push_handlers(self)
def update(self,dt):
if self.enabled:
pass
def draw (self):
if self.enabled:
if self.selected:
glColor4f (*self.background_colour)
self.background_selected.blit (self.ll[0], self.ll[1])
glColor4f (1,1,1,1)
else:
if self.mouseover and self.background_mouseover:
glColor4f (*self.background_colour)
self.background_mouseover.blit (self.ll[0], self.ll[1])
glColor4f (1,1,1,1)
elif self.mouseover:
glColor4f (1,1,1,0.8)
self.background.blit (self.ll[0], self.ll[1])
glColor4f (1,1,1,1)
else:
glColor4f (*self.background_colour)
self.background.blit (self.ll[0], self.ll[1])
glColor4f (1,1,1,1)
def on_mouse_motion(self, x, y, dx, dy):
if (x>self.ll[0] and x<self.tr[0] and y>self.ll[1] and y<self.tl[1]):
self.mouseover=True
else:
self.mouseover=False
def on_mouse_release(self, x, y, buttons, modifiers):
if self.mouseover and buttons & mouse.LEFT:
if self.on_click:
self.on_click(self)
if buttons & mouse.RIGHT and self.rmb_clears:
self.selected = False
class aSell:
def __init__ (self, x, y, time):
self.x = copy.copy(x)
self.y = copy.copy(y)
self.screenx = game.map.llx + x*game.map.cellsize
self.screeny = game.map.lly + y*game.map.cellsize
self.time=time
self.time_alive=0
self.scale = 1.0 / time
def update(self,dt):
self.time_alive+=dt
if self.time_alive>self.time:
# sale complete - update map and recalc routes
game.towers.remove (self)
game.map.cell[self.x][self.y].passable=True
game.map.cell[self.x+1][self.y].passable=True
game.map.cell[self.x+1][self.y+1].passable=True
game.map.cell[self.x][self.y+1].passable=True
for route in game.map.routes:
route.recalc_no_priority()
def draw_selection (self):
# no need
pass
def draw(self):
glColor4f (1,1,0.5, 1-(self.time_alive * self.scale))
glDisable (GL_TEXTURE_2D)
pyglet.graphics.draw(4, pyglet.gl.GL_QUADS, ('v2f', ( self.screenx, self.screeny,
self.screenx+game.cw*2, self.screeny,
self.screenx+game.cw*2, self.screeny+game.cw*2,
self.screenx, self.screeny+game.cw*2)))
glEnable (GL_TEXTURE_2D)
glColor4f (1,1,1,1)
sell_outline_sprite.blit (self.screenx, self.screeny)
class aCell:
def __init__ (self):
self.passable=True
self.tower = None
class aSwarm:
def __init__(self, type=0, no=10, health=100, time=60, credits=10):
""" create a swarm """
self.type=type
self.no=no
self.count=0
self.health=health
self.credits=credits
self.time=time
if type==0:
self.time_between_spawns = 0.8
self.time_since_last_spawn=0
def update (self, tick):
self.time -= tick
self.time_since_last_spawn+=tick
if self.count < self.no and self.time_since_last_spawn > self.time_between_spawns:
if self.type == 0:
# create critter type 0 at start of routes
for route in game.map.routes:
yadda = normalEnemy(route, self.health, self.credits )
yadda.health=self.health
game.enemies.append ( yadda )
self.count+=1
self.time_since_last_spawn=0
if self.time < 0:
# do next wave
log_label.text += "Next wave...\n"
if game.map.swarms and len(game.map.active_swarms)==1:
# if this is only current swarm, and still have swarms left, pop next from list onto actives
game.map.active_swarms.append ( game.map.swarms.pop(0) )
# remove self from active swarm list
game.map.active_swarms.remove (self)
# else do nothing
class aNode:
def __init__ (self,pos):
self.pos=pos
self.next=None
self.distance=99999
#for heapq
def __cmp__(self, other): return cmp(self.distance, other.distance)
class aRoute:
def __init__ (self, start, end, map):
""" route class - takes start, end tuples of (x,y), and the map it belongs to """
self.map = map
self.start=start
self.end=end
self.route={}
for a in xrange(0,self.map.width):
for b in xrange(0,self.map.height):
self.route[(a,b)]=aNode([a,b])
def reset (self):
for a in xrange(0,self.map.width):
for b in xrange(0,self.map.height):
self.route[(a,b)].next=None
self.route[(a,b)].distance=99999
def recalc (self):
""" dijkstra algorithm - works from exit to start fills all reachable nodes in map """
path_found = False
self.reset()
Q = []
visited = {}
self.route[(self.end[0],self.end[1])].next=None
self.route[(self.end[0],self.end[1])].distance=0 # dist to exit (end) node is going to be zero
# create starting set Q, (binary heap) containing all nodes with exit node set to 0
heappush (Q, self.route[(self.end[0],self.end[1])] )
map = self.map
nb = self.map.get_neighbours
while Q:
curnode = heappop (Q) # pop next nearest node off top of binary heap
if curnode not in visited:
visited[curnode]=True
neighbours = nb (curnode.pos)
for pos in neighbours:
if pos == self.start:
path_found = True
neighbour_node = self.route[(pos[0],pos[1])]
if neighbour_node.distance > curnode.distance+1:
neighbour_node.next = curnode
neighbour_node.distance = curnode.distance+1
if neighbour_node not in visited:
# if neighbour node not already fully processed, stick on priority queue for consideration
heappush (Q, neighbour_node)
return path_found
def recalc_no_priority (self):
""" dijkstra algorithm - works from exit to start fills all reachable nodes in map """
path_found = False
self.reset()
Q = []
visited = {}
self.route[(self.end[0],self.end[1])].next=None
self.route[(self.end[0],self.end[1])].distance=0 # dist to exit (end) node is going to be zero
# create starting set Q, (binary heap) containing all nodes with exit node set to 0
Q.append (self.route[(self.end[0],self.end[1])] )
map = self.map
nb = self.map.get_neighbours
distance = 1 # default distance - changed to 1.4 for diagonals
while Q:
curnode = Q.pop() # pop next nearest node off top of binary heap
if curnode not in visited:
visited[curnode]=True
neighbours = nb (curnode.pos)
for pos in neighbours:
if pos == self.start:
path_found = True
neighbour_node = self.route[(pos[0],pos[1])]
if pos[0]<>curnode.pos[0] and pos[1]<>curnode.pos[1]:
distance = 1.4
else:
distance = 1
if neighbour_node.distance > curnode.distance+distance:
neighbour_node.next = curnode
neighbour_node.distance = curnode.distance+distance
if neighbour_node not in visited:
# if neighbour node not already fully processed, stick on priority queue for consideration
Q.append (neighbour_node)
return path_found
class aMap:
def __init__ (self, width=28, height=24, cellsize=24, llx=148, lly=148):
""" aMap - takes width, height, cellsize and llx and lly for offset of lower left hand corner from screen edges """
# create 2d array of Cells
self.cell = []
self.width = width
self.height = height
self.width_pixels = width * cellsize
self.height_pixels = height * cellsize
self.current_swarm=0
self.llx = llx
self.lly = lly
self.cellsize=cellsize
self.sell_time = 2
self.sell_time_multiplier = 1.05
self.active_swarms = []
self.swarms = [] # type, number of critters, health of each critter, time till next swarm, credits per kill
self.swarms.append ( aSwarm (0, 5, 10, 10, 1) )
self.swarms.append ( aSwarm (0, 10, 15, 10, 1) )
self.swarms.append ( aSwarm (0, 15, 15, 20, 1) )
self.swarms.append ( aSwarm (0, 20, 20, 20, 1) )
self.swarms.append ( aSwarm (0, 20, 40, 20, 1) )
self.swarms.append ( aSwarm (0, 20, 60, 30, 1) )
self.swarms.append ( aSwarm (0, 20, 80, 30, 1) )
self.swarms.append ( aSwarm (0, 30, 90, 30, 2) )
self.swarms.append ( aSwarm (0, 30, 100, 40, 3) )
self.swarms.append ( aSwarm (0, 30, 120, 40, 4) )
self.swarms.append ( aSwarm (0, 40, 150, 50, 5) )
self.routes=[]
self.routes.append ( aRoute([0,height/2],[width-1,height/2], self) )
self.routes.append ( aRoute([width/2,height-1],[width/2,0], self) )
# create map
for a in xrange(0,width):
row=[]
for b in xrange(0,height):
row.append ( aCell() )
self.cell.append ( row )
# TEMPORARY WALLS - TODO: implement custom map loading
for a in range(width):
if a < (width/2)-4 or a > (width/2)+3:
self.cell[a][0].passable = False
self.cell[a][height-1].passable = False
for a in range(height):
if a < (height/2)-3 or a > (height/2)+2:
self.cell[0][a].passable = False
self.cell[width-1][a].passable = False
self.create_display_list()
def swarm_update (self, tick):
if self.active_swarms:
for swarm in self.active_swarms:
swarm.update(tick)
def create_display_list (self):
glNewList(1,GL_COMPILE)
# draw map background here if needed
map_bg.blit (self.llx, self.lly, width=(self.width*self.cellsize), height=(self.height*self.cellsize))
glDisable (GL_TEXTURE_2D)
# GRID
glColor4f (0.7,0.7,1,0.05)
glLineWidth(1)
glBegin (GL_LINES)
for x in range (0,self.width+1):
glVertex2f (self.llx + (x*self.cellsize), self.lly)
glVertex2f (self.llx + (x*self.cellsize), self.lly + (self.height * self.cellsize))
for y in range (0,self.height+1):
glVertex2f (self.llx, self.lly + (y*self.cellsize))
glVertex2f (self.llx + (self.width * self.cellsize), self.lly + (y*self.cellsize))
glEnd()
# WALLS
glColor4f (1,1,1,0.2)
glBegin (GL_QUADS)
for x in range (0,self.width):
for y in range (0,self.height):
if self.cell[x][y].passable != True:
glVertex2f ( (x*self.cellsize)+self.llx, (y*self.cellsize)+self.lly)
glVertex2f ((x*self.cellsize)+self.llx, (y*self.cellsize)+self.lly+self.cellsize)
glVertex2f ((x*self.cellsize)+self.llx+self.cellsize, (y*self.cellsize)+self.lly+self.cellsize)
glVertex2f ((x*self.cellsize)+self.llx+self.cellsize, (y*self.cellsize)+self.lly)
glEnd()
glColor4f (1,1,1,1)
glEnable (GL_TEXTURE_2D)
glEndList()
def draw (self):
pass
glCallList(1)
def draw_selection (self):
pass
def debug_check_cells (self):
for x in range(self.width):
for y in range(self.height):
print "Cell ",x,",",y," is passable? ", self.cell[x][y].passable
def recalc_routes (self):
valid = True
for route in self.routes:
if not route.recalc_no_priority():
valid = False
return valid
def get_neighbours (self, pos):
""" for pos (tuple of x,y) returns passable neightbours in map as a list of coordinate tuples - empty list if none """
l=[]
px=pos[0]
py=pos[1]
for x in range (pos[0]-1, pos[0]+2):
for y in range (pos[1]-1,pos[1]+2):
if x is not -1 and y is not -1 and x is not self.width and y is not self.height:
# in bounds
#print "debug: getting neighbours for cell at ",x,",",y
if self.cell[x][y].passable:
corner_walkable=True
if x == px-1:
if y == py-1:
if self.cell[px-1][py].passable is False or self.cell[px][py-1].passable is False:
corner_walkable=False
elif y == py+1:
if self.cell[px-1][py].passable is False or self.cell[px][py+1].passable is False:
corner_walkable=False
elif x == px+1:
if y == py-1:
if self.cell[px][py-1].passable is False or self.cell[px+1][py].passable is False:
corner_walkable=False
elif y == py+1:
if self.cell[px+1][py].passable is False or self.cell[px][py+1].passable is False:
corner_walkable=False
if corner_walkable:
l.append ([x,y])
return l
def placement_valid (self, x, y):
""" takes map x and y and determines if 2x2 placement is valid """
# check against map itself
if x<0 or y<0 or x>self.width-2 or y>self.height-2:
return False
if not self.cell[x][y].passable or not self.cell[x+1][y].passable or not self.cell[x+1][y+1].passable or not self.cell[x][y+1].passable:
return False
# map is ok, check against all beasties
for enemy in game.enemies:
if int(enemy.pos[0]) == x or int(enemy.pos[0]) == x+1:
if int(enemy.pos[1]) == y or int(enemy.pos[1]) == y+1:
return False
return True
class normalEnemy:
""" normal enemy class. takes: route, health, credits """
def __init__ (self, route=None, health = 10, credits = 5):
self.map = game.map
self.pos=[0,0]
self.pos[0]=route.start[0]+0.5
self.pos[1]=route.start[1]+0.5
self.route = route
self.diag = False
self.health = health
self.credits = credits
self.alive = True
self.dir = None
self.target_dir = None
self.speed = 1.8
self.next_position = copy.copy(self.route.route[(int(self.pos[0]),int(self.pos[1]))].next.pos)
self.next_position[0] += 0.5
self.next_position[1] += 0.5
self.previous_position = self.next_position
if abs(self.pos[0]-self.next_position[0]) > 0.6 and abs(self.pos[1]-self.next_position[1]) > 0.6:
self.diag = True
else:
self.diag = False
# make sure initial direction is facing intital target path
self.set_target_dir()
self.dir = copy.copy (self.target_dir)
def draw (self):
glPushMatrix()
x = self.map.llx + (self.pos[0]*self.map.cellsize)
y = self.map.lly + (self.pos[1]*self.map.cellsize)
glTranslatef (x, y, 0)
glRotatef (self.dir, 0, 0, 1)
small_red_bug_tex.blit (0, 0)
glPopMatrix()
def set_target_dir (self):
if int(self.next_position[0]) > int(self.pos[0]):
# right
if int(self.next_position[1]) > int(self.pos[1]):
# up right
self.target_dir = 315
elif int(self.next_position[1]) == int(self.pos[1]):
# right
self.target_dir = 270
else:
#down right
self.target_dir = 225
elif int(self.next_position[0]) < int(self.pos[0]):
# left
if int(self.next_position[1]) > int(self.pos[1]):
# up left
self.target_dir = 45
elif int(self.next_position[1]) == int(self.pos[1]):
# left
self.target_dir = 90
else:
#down left
self.target_dir = 135
else:
# straight up or down
if int(self.next_position[1]) > int(self.pos[1]):
# up
self.target_dir = 0
else:
#down
self.target_dir = 180
def get_current_grid_pos (self):
return [ int(self.pos[0]), int(self.pos[1]) ]
def get_path_distance_left (self):
gridpos = self.get_current_grid_pos ()
return self.route.route[(gridpos[0],gridpos[1])].distance
def update (self, tick):
if self.health<0:
# I`m dead
# do other cleanup jobs and spawn whatever necessary particles
game.particles.append (particle_explosion(self.pos))
self.alive = False
game.score += 200
game.credits += self.credits
credits_label.text = "Credits: " + str(game.credits)
game.enemies.remove (self)
return 0
# check still on path
if abs(self.pos[0]-self.next_position[0]) < 0.1 and abs(self.pos[1]-self.next_position[1]) < 0.1:
# get next path position
if self.route.route[(int(self.pos[0]),int(self.pos[1]))].next:
self.previous_position = self.next_position
self.next_position = copy.copy(self.route.route[(int(self.pos[0]),int(self.pos[1]))].next.pos)
self.next_position[0] += 0.5
self.next_position[1] += 0.5
if abs(self.pos[0]-self.next_position[0]) > 0.6 and abs(self.pos[1]-self.next_position[1]) > 0.6:
self.diag = True
else:
self.diag = False
self.set_target_dir() # set new target direction according to new next position
else:
# might be end of path - check
if self.route.route[(int(self.pos[0]),int(self.pos[1]))].pos == self.route.end:
# end of path
# job done - update all
self.alive = False
game.enemies.remove (self)
game.lives-=1
else:
#else not end of path
self.next_position = self.previous_position
# rotate if necessary to face direction of travel
if self.dir <> self.target_dir:
if self.dir < self.target_dir:
self.dir += 9 # must be a divisor of 45 for rotations to work properly ie. 3, 5, 9, 15, 45
else:
self.dir -= 9 # must be a divisor of 45 for rotations to work properly
if self.diag:
speed = self.speed * 0.7
if self.pos[0] > self.next_position[0]+0.1:
self.pos[0] -= tick * speed
elif self.pos[0] < self.next_position[0]-0.1:
self.pos[0] += tick * speed
if self.pos[1] > self.next_position[1]+0.1:
self.pos[1] -= tick *speed
elif self.pos[1] < self.next_position[1]-0.1:
self.pos[1] += tick *speed
else:
if self.pos[0] > self.next_position[0]+0.1:
self.pos[0] -= tick * self.speed
elif self.pos[0] < self.next_position[0]-0.1:
self.pos[0] += tick * self.speed
if self.pos[1] > self.next_position[1]+0.1:
self.pos[1] -= tick * self.speed
elif self.pos[1] < self.next_position[1]-0.1:
self.pos[1] += tick * self.speed
class Bullet_1:
def __init__ (self, pos, target, damage):
self.pos=pos
self.oldpos=copy.copy(pos)
self.reallyoldpos=copy.copy(pos)
self.target=target
self.damage=damage
self.speed = 8
dx = target.pos[0]-pos[0]
dy = target.pos[1]-pos[1]
length = math.sqrt (dx**2 + dy**2)
if dx==0:
self.direction=[0,self.speed*dy]
elif dy==0:
self.direction=[self.speed*dx,0]
else:
self.direction = [ self.speed*(dx/length), self.speed*(dy/length) ]
def draw (self):
bullet_1_sprite.blit (game.map.llx + (game.map.cellsize * self.pos[0]) , game.map.lly + (game.map.cellsize * self.pos[1]) )
def update (self, tick):
if self.target.alive:
dx = self.target.pos[0]-self.pos[0]
dy = self.target.pos[1]-self.pos[1]
if abs(dx) < 0.5 and abs(dy) < 0.5:
# HIT TARGET
# Spawn hit particle
for x in xrange(6):
game.particles.append (particle_small_hit (copy.copy(self.pos)))
game.projectiles.remove (self)
self.target.health -= self.damage
else:
speed = copy.copy (self.speed) * tick
self.reallyoldpos=copy.copy(self.oldpos)
self.oldpos=copy.copy(self.pos)
if dx==0:
self.direction=[0,speed*dy]
elif dy==0:
self.direction=[speed*dx,0]
else:
length = math.sqrt (dx**2 + dy**2)
self.direction = [ speed*(dx/length), speed*(dy/length) ]
self.pos[0] += self.direction[0]
self.pos[1] += self.direction[1]
else:
# target gone - just remove projectile once out of range / off map
if self.pos[0]<0 or self.pos[0]>game.map.width or self.pos[1]<0 or self.pos[1]>game.map.height:
game.projectiles.remove (self)
return 1
self.reallyoldpos=copy.copy(self.oldpos)
self.oldpos=copy.copy(self.pos)
self.pos[0] += self.direction[0]
self.pos[1] += self.direction[1]
class Cannon_1 (pyglet.window.Window):
def __init__ (self, position=(0,0)):
self.label = "CANNON - LVL 1"
self.highlight = False
self.pos = position
self.center = (position[0]+1, position[1]+1)
# screen coords
self.llx = 0
self.lly = 0
self.urx = 0
self.ury = 0
self.cx = 0
self.cy = 0
self.direction = 0
self.active = False
self.target = None
self.cost = 5
self.range_sq = 3
self.range = self.range_sq * self.range_sq
self.upgrade_cost = 40
self.upgrade_time = 2
self.damage_per_shot = 3
self.sell_price = 3
self.time_between_shots = 1.2
self.time_since_last_shot = 3
self.target_mode = 2 # 1=nearest end of path 2=nearest 3=strongest 4=strongest 5=fastest 6=slowest 0 = no AI
self.upgradeable = True
self.info_headings = "Range:\nDamage:\nRate:\n\nUpgrade for:\nSell for:"
self.info = str(self.range_sq) + "\n" + str(self.damage_per_shot) + "\n" + str(self.time_between_shots) + "\n\n" + str(self.upgrade_cost) + "\n" + str(self.sell_price)
self.mouse_over = False
def deploy (self):
# make map squares impassable
game.map.cell[mymouse.dx][mymouse.dy].passable=False
game.map.cell[mymouse.dx+1][mymouse.dy].passable=False
game.map.cell[mymouse.dx+1][mymouse.dy+1].passable=False
game.map.cell[mymouse.dx][mymouse.dy+1].passable=False
# put pointer to tower in map cells
game.map.cell[mymouse.dx][mymouse.dy].tower=self
game.map.cell[mymouse.dx+1][mymouse.dy].tower=self
game.map.cell[mymouse.dx+1][mymouse.dy+1].tower=self
game.map.cell[mymouse.dx][mymouse.dy+1].tower=self
self.pos = (mymouse.dx, mymouse.dy)
self.center = (self.pos[0]+1, self.pos[1]+1)
# get screen coord area
self.llx = game.map.llx + self.pos[0] * game.map.cellsize
self.lly = game.map.lly + self.pos[1] * game.map.cellsize
self.urx = game.map.llx + (self.pos[0]+2) * game.map.cellsize
self.ury = game.map.lly + (self.pos[1]+2) * game.map.cellsize
self.cx = game.map.llx + self.center[0] * game.map.cellsize
self.cy = game.map.lly + self.center[1] * game.map.cellsize
if game.map.recalc_routes(): # recalc routes
game.towers.append ( self )
window.push_handlers(self)
return True
else:
# make routes passable again
game.map.cell[mymouse.dx][mymouse.dy].passable=True
game.map.cell[mymouse.dx+1][mymouse.dy].passable=True
game.map.cell[mymouse.dx+1][mymouse.dy+1].passable=True
game.map.cell[mymouse.dx][mymouse.dy+1].passable=True
# remove pointers to tower in cells
game.map.cell[mymouse.dx][mymouse.dy].tower=None
game.map.cell[mymouse.dx+1][mymouse.dy].tower=None
game.map.cell[mymouse.dx+1][mymouse.dy+1].tower=None
game.map.cell[mymouse.dx][mymouse.dy+1].tower=None
game.map.recalc_routes() # recalc original routes
return False # tell calling function that deploy failed
def sell (self):
window.remove_handlers(self)
self.mouse_over = False # BUGFIX: range still drawn when S key used to sell and mouse over spot where tower was
game.towers.remove(self)
game.selected = None
if game.state & game.states['pregame']:
# take no time
game.towers.append ( aSell (self.pos[0],self.pos[1], 0.01) )
# and free
game.credits += self.cost
else:
game.towers.append ( aSell (self.pos[0],self.pos[1], game.map.sell_time) )
game.credits += self.sell_price
game.map.sell_time *= game.map.sell_time_multiplier
def upgrade (self):
# take upgrade time from selected tower
game.credits-=self.upgrade_cost
pos=copy.copy(self.pos)
upgrade_to = self.get_upgrade()
if game.state & game.states['pregame']:
self.upgrade_time = 0.01
game.towers.append ( anUpgrade (pos[0], pos[1], self.upgrade_time, upgrade_to) )
game.towers.remove( self )
global selected
selected=None
def get_upgrade(self):
return Cannon_1 (copy.copy(self.pos))
def update (self,dt):
self.time_since_last_shot+=dt
if self.time_since_last_shot > self.time_between_shots:
# find a new target
nearby_enemies=[]
for enemy in game.enemies:
# get squared distance
sd = (self.center [0]-enemy.pos[0])**2 + (self.center [1]-enemy.pos[1])**2
if sd < self.range:
nearby_enemies.append ((enemy, sd, enemy.get_path_distance_left() )) # create list of enemy, dist, and path length remaining
if nearby_enemies:
if self.target_mode == 1: # nearest to end of path
enemy_path_left=99999
for near_enemy in nearby_enemies:
if near_enemy[2] < enemy_path_left:
enemy_path_left=near_enemy[2]
self.target = near_enemy[0]
elif self.target_mode == 2: # nearest to tower
closest=99999
for near_enemy in nearby_enemies:
if near_enemy[1] < closest:
closest = near_enemy[1]
self.target = near_enemy[0]
elif self.target_mode == 3: # strongest
strongest=-1
for near_enemy in nearby_enemies:
if near_enemy[0].health > strongest:
strongest = near_enemy[0].health
self.target = near_enemy[0]
elif self.target_mode == 4: # weakest
strongest=99999
for near_enemy in nearby_enemies:
if near_enemy[0].health < strongest:
strongest = near_enemy[0].health
self.target = near_enemy[0]
elif self.target_mode == 5: # fastest
fastest=0
for near_enemy in nearby_enemies:
if near_enemy[0].speed > fastest:
fastest = near_enemy[0].speed
self.target = near_enemy[0]
elif self.target_mode == 6: # slowest
fastest=99999
for near_enemy in nearby_enemies:
if near_enemy[0].speed < fastest:
fastest = near_enemy[0].speed
self.target = near_enemy[0]
else:
self.target = None
return 0
if self.target and self.target.alive:
# if target also still alive
# check target is still in range
sd = (self.center[0]-self.target.pos[0])**2 + (self.center[1]-self.target.pos[1])**2
if sd < self.range:
# have target, can we fire?
if self.time_since_last_shot > self.time_between_shots:
# yes we can
#print "BANG!"
game.projectiles.append (Bullet_1 ( [self.pos[0]+1, self.pos[1]+1], self.target, self.damage_per_shot))
self.time_since_last_shot=0
# turn turret to face target
# get angle in degrees from turret to target
dx = self.center[0] - self.target.pos[0]
dy = self.center[1] - self.target.pos[1]
self.direction = math.atan2 (dy, -dx) * 57.2957795 + 90
else:
# old target left range
self.target = None
else:
self.target = None
def draw (self, alpha=1):
# base
#cannon_1_base_sprite.blit (game.map.llx + (game.map.cellsize * self.pos[0]) , game.map.lly + (game.map.cellsize * self.pos[1]) )
cannon_1_base_sprite.blit (self.llx, self.lly)
# turret
glPushMatrix()
#glTranslatef (game.map.llx + (game.map.cellsize * self.center[0]),game.map.llx + (game.map.cellsize * self.center[1]),0)
glTranslatef (self.cx, self.cy, 0)
glRotatef (-self.direction, 0, 0, 1)
cannon_1_turret_sprite.blit (0,0)
glPopMatrix()
def draw_highlight (self):
width = self.range_sq * game.map.cellsize * 2
offset = width * 0.5
#range_sprite.blit ( -offset + game.map.llx + (game.map.cellsize * self.center[0]), -offset + game.map.lly + (game.map.cellsize * self.center[1]), width=width, height=width)
range_sprite.blit ( -offset + self.cx, -offset + self.cy, width=width, height=width)
def draw_selection (self):
selected_tex.blit (self.llx, self.lly)
def on_mouse_motion(self, x, y, dx, dy):
if (x>self.llx and x<self.urx and y>self.lly and y<self.ury):
self.mouse_over=True
game.highlighted=self
else:
self.mouse_over=False
def on_mouse_release(self, x, y, buttons, modifiers):
if self.mouse_over and buttons & mouse.LEFT:
game.selected=self
class particle_small_hit:
def __init__ (self, pos):
self.duration = random.random()*0.8
self.time_alive = 0
self.xdir = 0.1-(random.random()*0.2)
self.ydir = 0.1-(random.random()*0.2)
self.pos=pos
self.alpha_scale = 1.0 / self.duration
def draw (self):
x = self.time_alive * self.alpha_scale
glColor4f (1,1-x,1-x, 1-x)
spark_sprite.blit (game.map.llx + (game.map.cellsize * self.pos[0]) , game.map.lly + (game.map.cellsize * self.pos[1]) )
glColor4f (1,1,1,1)
def update (self, tick):
self.time_alive += tick
if self.time_alive < self.duration:
self.pos[0]+=self.xdir
self.pos[1]+=self.ydir
else:
game.particles.remove (self)
class particle_explosion:
def __init__ (self, pos):
print "Explosion created..."
self.duration = 1.5
self.time_alive = 0
self.particle_count = 264
self.dir=[]
self.curpos=[]
self.screenx=game.map.llx + (game.map.cellsize * self.curpos[0])
self.screeny=game.map.lly + (game.map.cellsize * self.curpos[1])
for i in xrange (0,self.particle_count):
self.dir.append ([0.5-(random.random()*1.0), 0.5-(random.random()*1.0)])
b=copy.copy(pos)
self.curpos.append (b)
self.alpha_scale = 1.0 / self.duration
#self.sprite = pyglet.sprite.Sprite(spark_image, x, y, batch=batch)
def draw (self):
#glColor4f (1,0.7,0.1, 1-(self.time_alive/self.duration) ) # fading particles
#glColor4f (1,0.7,0.1, 1 ) # non fading particles
x = self.time_alive * self.alpha_scale
glColor4f (1,1-x,1-x, 1-x)
for i in range (0,self.particle_count):
spark_sprite.blit (game.map.llx + (game.map.cellsize * self.curpos[i][0]) , game.map.lly + (game.map.cellsize * self.curpos[i][1]) )
glColor4f (1,1,1,1)
def update (self, tick):
self.time_alive += tick
if self.time_alive < self.duration:
for i in range (0,self.particle_count):
self.curpos[i][0]+=self.dir[i][0]
self.curpos[i][1]+=self.dir[i][1]
self.dir[i][0]*=0.96 # slows particles due to air density
self.dir[i][1]*=0.96 # slows particles due to air density
else:
game.particles.remove (self)
class aGame:
def __init__ (self, cellwidth=24):
self.cw=cellwidth
self.states={'paused':1, 'pregame':2, 'ingame':4, 'menu':8}
self.state=self.states['ingame'] | self.states['pregame']
self.map = None
self.towers=[]
self.enemies=[]
self.projectiles=[]
self.particles=[]
self.score = 0
self.bonus = 0
self.credits = 100
self.lives = 20
self.create_level()
self.selected = None
self.highlighted = None
self.deploying = None
def create_level (self):
self.map = aMap(cellsize=self.cw, llx=self.cw * 2, lly=self.cw * 4)
for route in self.map.routes:
route.recalc()
@window.event
def on_key_release (symbol, modifiers):
if game.state & game.states['ingame']:
if game.selected and symbol==key.S:
game.selected.sell()
if symbol==key._1:
cannon_tower_button.on_click(cannon_tower_button)
@window.event
def on_mouse_release (x,y,buttons,modifiers):
if buttons & mouse.RIGHT:
mymouse.rmb_clicked=True
if buttons & mouse.LEFT:
mymouse.lmb_clicked=True
@window.event
def on_mouse_motion (x,y,dx,dy):
mymouse.update_pos (x,y)
debug_label.text = str(x) + "," + str(y)
@window.event
def on_draw():
window.clear()
#background_image.blit(0,0)
if game.state & game.states['ingame']:
# draw in height order, far to near
game.map.draw()
# draw appropriate ingame stuff
for enemy in game.enemies:
enemy.draw()
for tower in game.towers:
tower.draw()
if game.selected:
game.selected.draw_highlight()
game.selected.draw()
game.selected.draw_selection()
if game.highlighted and game.highlighted.mouse_over:
if game.selected and game.selected is game.highlighted:
pass
else:
game.highlighted.draw_highlight()
for proj in game.projectiles:
proj.draw()
for particle in game.particles:
particle.draw()
# draw labels
debug_label.draw()
#log_label.draw()
credits_label.text = "Credits: " + str(game.credits)
credits_label.draw()
# draw gui
gui.draw()
# draw mouse
mymouse.draw()
# for x in range(1,15): #14 wide
# for y in range(1,13): #12 high
# base_tex.blit (x*48,y*48)
def update(dt):
# update gui
gui.update(dt)
if game.state & game.states['ingame']:
# while ingame...
if game.state & game.states['paused']:
# change dt = 0, so that anything dependant on delta time doesnt change :)
dt = 0
if not game.state & game.states['pregame']:
# make sure swarms dont kick off until pregame is unset
game.map.swarm_update (dt)
for enemy in game.enemies:
enemy.update(dt)
for tower in game.towers:
tower.update (dt)
for proj in game.projectiles:
proj.update (dt)
for particle in game.particles:
particle.update (dt)
mymouse.update()
# global instances of classes declared above
# create game singleton
game=aGame(24)
credits_label = pyglet.text.Label ('Credits: '+str(game.credits),
font_name='Arial',
font_size=14,
width=window.width/2,
height=200,
multiline=True,
x=100, y=window.height-57,
anchor_x='left', anchor_y='top')
log_label = pyglet.text.Label('Log:\n',
font_name='Arial',
font_size=14,
width=window.width/2,
height=200,
multiline=True,
x=window.width/2, y=window.height-27,
anchor_x='left', anchor_y='top')
debug_label = pyglet.text.Label('blah',
font_name='Arial',
font_size=8,
x=20, y=window.height-20,
anchor_x='left', anchor_y='center')
def cannon_button_on_click (button):
# unselect all buttons in same frame
for widget in button.parent.widgets:
widget.selected=False
# select button clicked
button.selected=True
# update game selected - which in turn drives the mouse draw icon
game.deploying = Cannon_1 ()
def start_button_on_click (button):
# disable start button
button.enabled=False
# flip pregame state
game.state = game.state ^ game.states['pregame']
# start swarms
game.map.active_swarms.append (game.map.swarms.pop(0))
def upgrade_button_on_click (button):
pass
def sell_button_on_click (button):
if game.selected:
game.selected.sell()
mymouse=amouse()
gui=GUI()
start_button = Button (gui, pos=(650,40), rmb_clears=False, background=button_start_pic, background_mouseover=button_start_mouseover_pic)
start_button.on_click = start_button_on_click
selected_frame = Frame ( pos=(game.cw*3+(game.map.width*game.cw), game.cw*15 + 32) , width = 300, height = 300, background_colour=(0,0,0,0))
upgrade_button = Button (selected_frame, pos=(0,0), background=upgrade_pic)
sell_button = Button (selected_frame, pos=(141, 0), background=sell_pic)
sell_button.on_click = sell_button_on_click
tower_frame = Frame ( pos=(60+(game.map.width*game.map.cellsize),600), width=250, height=150, background_colour=(0,0,0,0) )
cannon_tower_button = Button (tower_frame, pos=(10,10), background=button_cannon_tower_pic, background_selected=button_cannon_tower_selected_pic, background_mouseover=button_cannon_tower_mouseover_pic)
cannon_tower_button.on_click = cannon_button_on_click
#pyglet.clock.schedule(update)
pyglet.clock.schedule_interval(update,1/60.)
pyglet.app.run()
| 2.34375 | 2 |
setup.py | ClashTheBunny/bluedesk | 6 | 12770642 | <reponame>ClashTheBunny/bluedesk
#!/usr/bin/env python3
from os import path
from distutils.core import setup
current_dir = path.abspath(path.dirname(__file__))
description_file = path.join(current_dir, 'README.md')
if path.exists(description_file):
with open(path.join(current_dir, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
else:
long_description = ""
setup(name='Bluedesk',
version='1.9',
scripts=[ 'bin/bluedesk' ],
description='CLI tool to control lower energy actuator systems (office desks) through bluetooth.',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
packages=[
'bluedesk',
'bluedesk.desks',
'bluedesk.commands'
],
install_requires=[
"blessings==1.7",
"bluepy==1.3.0",
"inquirer==2.6.3",
"python-editor==1.0.4",
"readchar==2.0.1",
"six==1.12.0",
]
) | 1.59375 | 2 |
granite/analysis/example.py | codacy-badger/granite | 0 | 12770643 | <reponame>codacy-badger/granite
from markdowngenerator import MarkdownGenerator
def main():
with MarkdownGenerator(
# By setting enable_write as False, content of the file is written
# into buffer at first, instead of writing directly into the file
# This enables for example the generation of table of contents
filename="example.md", enable_write=False
) as doc:
doc.addHeader(1, "Hello there!")
doc.writeTextLine(f'{doc.addBoldedText("This is just a test.")}')
doc.addHeader(2, "Second level header.")
table = [
{"Column1": "col1row1 data", "Column2": "col2row1 data"},
{"Column1": "col1row2 data", "Column2": "col2row2 data"},
]
doc.addTable(dictionary_list=table)
doc.writeTextLine("Ending the document....")
if __name__ == "__main__":
main() | 3.265625 | 3 |
tests/internal/instance_type/test_instance_type_p4d_auto.py | frolovv/aws.ec2.compare | 0 | 12770644 | <gh_stars>0
# Testing module instance_type.p4d
import pytest
import ec2_compare.internal.instance_type.p4d
def test_get_internal_data_instance_type_p4d_get_instances_list():
assert len(ec2_compare.internal.instance_type.p4d.get_instances_list()) > 0
def test_get_internal_data_instance_type_p4d_get():
assert len(ec2_compare.internal.instance_type.p4d.get) > 0
| 2.015625 | 2 |
scofield/account/urls.py | howiworkdaily/scofield-project | 4 | 12770645 | <reponame>howiworkdaily/scofield-project
from django.conf.urls.defaults import *
from account.forms import *
urlpatterns = patterns('',
url(r'^email/$', 'account.views.email', name="acct_email"),
url(r'^signup/$', 'account.views.signup', name="acct_signup"),
url(r'^login/$', 'account.views.login', name="acct_login"),
url(r'^password_change/$', 'account.views.password_change', name="acct_passwd"),
url(r'^password_reset/$', 'account.views.password_reset', name="acct_passwd_reset"),
url(r'^logout/$', 'django.contrib.auth.views.logout', {"template_name": "account/logout.html"}, name="acct_logout"),
url(r'^confirm_email/(\w+)/$', 'emailconfirmation.views.confirm_email', name="acct_confirm_email"),
)
| 2.046875 | 2 |
test/e2e/live_cluster/test_show.py | jfwm2/aerospike-admin | 0 | 12770646 | <gh_stars>0
# Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import unittest
import lib.live_cluster.live_cluster_root_controller as controller
import lib.utils.util as util
from test.e2e import util as test_util
sys.path.insert(1, os.getcwd())
from lib.view.sheet import set_style_json
set_style_json()
def print_header(actual_header):
for item in actual_header:
print('"' + item + '",')
class TestShowConfig(unittest.TestCase):
real_stdout = None
output_list = list()
service_config = ""
network_config = ""
test_namespace_config = ""
bar_namespace_config = ""
xdr_config = ""
@classmethod
def setUpClass(cls):
cls.real_stdout = sys.stdout
TestShowConfig.rc = controller.LiveClusterRootController(
user="admin", password="<PASSWORD>"
)
actual_out = util.capture_stdout(TestShowConfig.rc.execute, ["show", "config"])
actual_out += util.capture_stdout(
TestShowConfig.rc.execute, ["show", "config", "xdr"]
)
TestShowConfig.output_list = test_util.get_separate_output(actual_out)
TestShowConfig.is_bar_present = False
for item in TestShowConfig.output_list:
title = item["title"]
if "Service Configuration" in title:
TestShowConfig.service_config = item
elif "Network Configuration" in title:
TestShowConfig.network_config = item
elif "test Namespace Configuration" in title:
TestShowConfig.test_namespace_config = item
elif "bar Namespace Configuration" in title:
TestShowConfig.bar_namespace_config = item
TestShowConfig.is_bar_present = True
elif "XDR Configuration" in title:
TestShowConfig.xdr_config = item
@classmethod
def tearDownClass(cls):
cls.rc = None
sys.stdout = cls.real_stdout
def test_network(self):
"""
This test will assert network output on heading, header, parameters.
TODO: test for values as well
"""
exp_heading = "Network Configuration"
exp_header = [
"Node",
"fabric.channel-bulk-fds",
"fabric.channel-bulk-recv-threads",
"fabric.channel-ctrl-fds",
"fabric.channel-ctrl-recv-threads",
"fabric.channel-meta-fds",
"fabric.channel-meta-recv-threads",
"fabric.channel-rw-fds",
"fabric.channel-rw-recv-pools",
"fabric.channel-rw-recv-threads",
"fabric.keepalive-enabled",
"fabric.keepalive-intvl",
"fabric.keepalive-probes",
"fabric.keepalive-time",
"fabric.latency-max-ms",
"fabric.port",
"fabric.recv-rearm-threshold",
"fabric.send-threads",
"fabric.tls-name",
"fabric.tls-port",
"heartbeat.connect-timeout-ms",
"heartbeat.interval",
"heartbeat.mode",
"heartbeat.mtu",
"heartbeat.multicast-group",
"heartbeat.port",
"heartbeat.protocol",
"heartbeat.timeout",
"info.port",
"service.access-port",
"service.address",
"service.alternate-access-port",
"service.port",
"service.tls-access-port",
"service.tls-alternate-access-port",
"service.tls-name",
"service.tls-port",
]
(
actual_heading,
actual_description,
actual_header,
actual_data,
num_records,
) = test_util.parse_output(TestShowConfig.network_config)
self.assertTrue(exp_heading in actual_heading)
self.assertListEqual(exp_header, actual_header)
def test_service(self):
"""
Asserts service config output with heading, header & parameters.
TODO: test for values as well
"""
exp_heading = "Service Configuration"
exp_header = [
"Node",
"paxos-single-replica-limit",
"pidfile",
"proto-fd-max",
"advertise-ipv6",
"auto-pin",
"batch-index-threads",
"batch-max-buffers-per-queue",
"batch-max-requests",
"batch-max-unused-buffers",
"batch-without-digests",
"cluster-name",
"disable-udf-execution",
"enable-benchmarks-fabric",
"enable-health-check",
"enable-hist-info",
"feature-key-file",
"info-threads",
"keep-caps-ssd-health",
"log-local-time",
"log-millis",
"microsecond-histograms",
"migrate-fill-delay",
"migrate-max-num-incoming",
"migrate-threads",
"min-cluster-size",
"node-id",
"node-id-interface",
"proto-fd-idle-ms",
"proto-slow-netio-sleep-ms",
"query-batch-size",
"query-buf-size",
"query-bufpool-size",
"query-in-transaction-thread",
"query-long-q-max-size",
"query-microbenchmark",
"query-pre-reserve-partitions",
"query-priority",
"query-priority-sleep-us",
"query-rec-count-bound",
"query-req-in-query-thread",
"query-req-max-inflight",
"query-short-q-max-size",
"query-threads",
"query-threshold",
"query-untracked-time-ms",
"query-worker-threads",
"run-as-daemon",
"scan-max-done",
"scan-threads-limit",
"service-threads",
"sindex-builder-threads",
"sindex-gc-max-rate",
"sindex-gc-period",
"stay-quiesced",
"ticker-interval",
"transaction-max-ms",
"transaction-retry-ms",
"vault-ca",
"vault-path",
"vault-token-file",
"vault-url",
"work-directory",
"debug-allocations",
"indent-allocations",
"service.port",
"service.address",
"service.access-port",
"service.alternate-access-port",
"service.tls-port",
"service.tls-access-port",
"service.tls-alternate-access-port",
"service.tls-name",
"heartbeat.mode",
"heartbeat.multicast-group",
"heartbeat.port",
"heartbeat.interval",
"heartbeat.timeout",
"heartbeat.mtu",
"heartbeat.protocol",
"fabric.port",
"fabric.tls-port",
"fabric.tls-name",
"fabric.channel-bulk-fds",
"fabric.channel-bulk-recv-threads",
"fabric.channel-ctrl-fds",
"fabric.channel-ctrl-recv-threads",
"fabric.channel-meta-fds",
"fabric.channel-meta-recv-threads",
"fabric.channel-rw-fds",
"fabric.channel-rw-recv-pools",
"fabric.channel-rw-recv-threads",
"fabric.keepalive-enabled",
"fabric.keepalive-intvl",
"fabric.keepalive-probes",
"fabric.keepalive-time",
"fabric.latency-max-ms",
"fabric.recv-rearm-threshold",
"fabric.send-threads",
"info.port",
"enable-ldap",
"enable-security",
"ldap-login-threads",
"privilege-refresh-period",
"ldap.disable-tls",
"ldap.polling-period",
"ldap.query-base-dn",
"ldap.query-user-dn",
"ldap.query-user-password-file",
"ldap.role-query-base-dn",
"ldap.role-query-search-ou",
"ldap.server",
"ldap.session-ttl",
"ldap.tls-ca-file",
"ldap.token-hash-method",
"ldap.user-dn-pattern",
"ldap.user-query-pattern",
"report-authentication-sinks",
"report-data-op-sinks",
"report-sys-admin-sinks",
"report-user-admin-sinks",
"report-violation-sinks",
"syslog-local",
]
(
actual_heading,
actual_description,
actual_header,
actual_data,
num_records,
) = test_util.parse_output(TestShowConfig.service_config)
self.assertTrue(exp_heading in actual_heading)
self.assertTrue(actual_header, exp_header)
def test_test_namespace(self):
"""
Asserts namespace config output with heading, header & parameters.
TODO: test for values as well
"""
exp_heading = "test Namespace Configuration"
exp_header_test = [
"Node",
"allow-ttl-without-nsup",
"background-scan-max-rps",
"conflict-resolution-policy",
"data-in-index",
"default-ttl",
"disable-cold-start-eviction",
"disable-write-dup-res",
"disallow-null-setname",
"enable-benchmarks-batch-sub",
"enable-benchmarks-ops-sub",
"enable-benchmarks-read",
"enable-benchmarks-udf",
"enable-benchmarks-udf-sub",
"enable-benchmarks-write",
"enable-hist-proxy",
"evict-hist-buckets",
"evict-tenths-pct",
"geo2dsphere-within.earth-radius-meters",
"geo2dsphere-within.level-mod",
"geo2dsphere-within.max-cells",
"geo2dsphere-within.max-level",
"geo2dsphere-within.min-level",
"geo2dsphere-within.strict",
"high-water-disk-pct",
"high-water-memory-pct",
"ignore-migrate-fill-delay",
"index-stage-size",
"index-type",
"memory-size",
"migrate-order",
"migrate-retransmit-ms",
"migrate-sleep",
"nsid",
"nsup-hist-period",
"nsup-period",
"nsup-threads",
"partition-tree-sprigs",
"prefer-uniform-balance",
"rack-id",
"read-consistency-level-override",
"reject-non-xdr-writes",
"reject-xdr-writes",
"replication-factor",
"single-bin",
"single-scan-threads",
"stop-writes-pct",
"strong-consistency",
"strong-consistency-allow-expunge",
"tomb-raider-eligible-age",
"tomb-raider-period",
"transaction-pending-limit",
"truncate-threads",
"write-commit-level-override",
"xdr-bin-tombstone-ttl",
"xdr-tomb-raider-period",
"xdr-tomb-raider-threads",
"storage-engine",
"sindex.num-partitions",
]
(
actual_heading,
actual_description,
actual_header,
actual_data,
num_records,
) = test_util.parse_output(TestShowConfig.test_namespace_config)
self.assertTrue(exp_heading in actual_heading)
self.assertTrue(test_util.check_for_subset(actual_header, exp_header_test))
def test_bar_namespace(self):
"""
Asserts namespace config output with heading, header & parameters.
TODO: test for values as well
"""
if not TestShowConfig.is_bar_present:
return
exp_heading = "bar Namespace Configuration"
exp_header_bar = [
"Node",
"allow-ttl-without-nsup",
"background-scan-max-rps",
"conflict-resolution-policy",
"conflict-resolve-writes",
"data-in-index",
"default-ttl",
"disable-cold-start-eviction",
"disable-write-dup-res",
"disallow-null-setname",
"enable-benchmarks-batch-sub",
"enable-benchmarks-ops-sub",
"enable-benchmarks-read",
"enable-benchmarks-udf",
"enable-benchmarks-udf-sub",
"enable-benchmarks-write",
"enable-hist-proxy",
"evict-hist-buckets",
"evict-tenths-pct",
"geo2dsphere-within.earth-radius-meters",
"geo2dsphere-within.level-mod",
"geo2dsphere-within.max-cells",
"geo2dsphere-within.max-level",
"geo2dsphere-within.min-level",
"geo2dsphere-within.strict",
"high-water-disk-pct",
"high-water-memory-pct",
"ignore-migrate-fill-delay",
"index-stage-size",
"index-type",
"memory-size",
"migrate-order",
"migrate-retransmit-ms",
"migrate-sleep",
"nsid",
"nsup-hist-period",
"nsup-period",
"nsup-threads",
"partition-tree-sprigs",
"prefer-uniform-balance",
"rack-id",
"read-consistency-level-override",
"reject-non-xdr-writes",
"reject-xdr-writes",
"replication-factor",
"sindex.num-partitions",
"single-bin",
"single-scan-threads",
"stop-writes-pct",
"storage-engine",
"strong-consistency",
"strong-consistency-allow-expunge",
"tomb-raider-eligible-age",
"tomb-raider-period",
"transaction-pending-limit",
"truncate-threads",
"write-commit-level-override",
"xdr-bin-tombstone-ttl",
"xdr-tomb-raider-period",
"xdr-tomb-raider-threads",
]
(
actual_heading,
actual_description,
actual_header,
actual_data,
num_records,
) = test_util.parse_output(TestShowConfig.bar_namespace_config)
self.assertTrue(exp_heading in actual_heading)
self.assertListEqual(exp_header_bar, actual_header)
# Needs updating after XDR config parsing has been fixed.
# Tracked here: https://aerospike.atlassian.net/browse/TOOLS-1521
@unittest.skip("Will enable only when xdr is configuired")
def test_xdr(self):
"""
Asserts XDR config output with heading, header & parameters.
TODO: test for values as well
"""
exp_heading = "~XDR Configuration"
exp_header = "NODE"
exp_params = [
"enable-xdr",
"forward",
"xdr-batch-num-retry",
"xdr-batch-retry-sleep",
"xdr-check-data-before-delete",
"xdr-compression-threshold",
"xdr-digestlog-size",
"xdr-forward-with-gencheck",
"xdr-hotkey-maxskip",
"xdr-info-timeout",
"xdr-local-port",
"xdr-max-recs-inflight",
"xdr-namedpipe-path",
"xdr-nw-timeout",
"xdr-read-mode",
"xdr-read-threads",
"xdr-ship-delay",
"xdr-shipping-enabled",
"xdr-timeout",
"xdr-write-batch-size",
]
(
actual_heading,
actual_description,
actual_header,
actual_data,
) = test_util.parse_output(TestShowConfig.xdr_config)
self.assertTrue(exp_heading in actual_heading)
self.assertTrue(exp_header in actual_header)
self.assertTrue(set(exp_params).issubset(set(actual_data)))
class TestShowLatenciesDefault(unittest.TestCase):
output_list = list()
@classmethod
def setUpClass(cls):
TestShowLatenciesDefault.rc = controller.LiveClusterRootController(
user="admin", password="<PASSWORD>"
)
actual_out = util.capture_stdout(
TestShowLatenciesDefault.rc.execute, ["show", "latencies", "-v"]
)
TestShowLatenciesDefault.output_list = test_util.get_separate_output(actual_out)
@classmethod
def tearDownClass(cls):
cls.rc = None
def test_latencies(self):
"""
Asserts <b> read latencies <b> output with heading, header & no of node processed(based on row count).
"""
exp_heading = "Latency"
exp_header = [
"Namespace",
"Histogram",
"Node",
"ops/sec",
">1ms",
">8ms",
">64ms",
]
exp_data = [
("bar", "test"),
(
"read",
"read-dup-res",
"read-local",
"read-repl-ping",
"read-response",
"read-restart",
"read-start",
"write",
"write-dup-res",
"write-master",
"write-repl-write",
"write-response",
"write-restart",
"write-start",
),
]
exp_data_types = [str, str, str, float, float, float, float]
(
actual_heading,
actual_description,
actual_header,
actual_data,
actual_no_of_rows,
) = test_util.parse_output(
TestShowLatenciesDefault.output_list[0], horizontal=True, header_len=1
)
self.assertTrue(exp_heading in actual_heading)
self.assertEqual(exp_header, actual_header)
self.assertTrue(
test_util.check_for_types(actual_data, exp_data_types),
"%s returned the wrong data types" % exp_heading,
)
for data in actual_data:
self.assertTrue(test_util.check_for_subset(data, exp_data))
class TestShowLatenciesWithArguments(unittest.TestCase):
@classmethod
def setUpClass(cls):
TestShowLatenciesWithArguments.rc = controller.LiveClusterRootController(
user="admin", password="<PASSWORD>"
)
def test_latencies_e_1_b_17(self):
"""
Asserts <b> show latencies <b> tables with arguments -e 1 -b 17 display the correct header
and that each row of data has the corresponding data type.
"""
# exp_heading = "~read Latency"
exp_header = [
"Namespace",
"Histogram",
"Node",
"ops/sec",
">1ms",
">2ms",
">4ms",
">8ms",
">16ms",
">32ms",
">64ms",
">128ms",
">256ms",
">512ms",
">1024ms",
">2048ms",
">4096ms",
">8192ms",
">16384ms",
">32768ms",
">65536ms",
]
exp_data_types = [
str,
str,
str,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
]
actual_out = util.capture_stdout(
TestShowLatenciesWithArguments.rc.execute,
["show", "latencies", "-e", "1", "-b", "17"],
)
output_list = test_util.get_separate_output(actual_out)
for output in output_list:
(
actual_heading,
actual_description,
actual_header,
actual_data,
actual_no_of_rows,
) = test_util.parse_output(output, horizontal=True, header_len=1)
self.assertEqual(exp_header, actual_header)
self.assertTrue(
test_util.check_for_types(actual_data, exp_data_types),
"returned the wrong data types",
)
def test_latencies_e_1_b_18(self):
"""
Asserts <b> show latencies <b> tables with arguments -e 1 -b 18 display the correct header
and that each row of data has the corresponding data type.
"""
# exp_heading = "~read Latency"
exp_header = [
"Namespace",
"Histogram",
"Node",
"ops/sec",
">1ms",
">2ms",
">4ms",
">8ms",
">16ms",
">32ms",
">64ms",
">128ms",
">256ms",
">512ms",
">1024ms",
">2048ms",
">4096ms",
">8192ms",
">16384ms",
">32768ms",
">65536ms",
]
exp_data_types = [
str,
str,
str,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
]
actual_out = util.capture_stdout(
TestShowLatenciesWithArguments.rc.execute,
["show", "latencies", "-e", "1", "-b", "18"],
)
output_list = test_util.get_separate_output(actual_out)
for output in output_list:
(
actual_heading,
actual_description,
actual_header,
actual_data,
actual_no_of_rows,
) = test_util.parse_output(output, horizontal=True, header_len=1)
self.assertEqual(exp_header, actual_header)
self.assertTrue(
test_util.check_for_types(actual_data, exp_data_types),
"returned the wrong data types",
)
def test_latencies_e_0_b_17(self):
"""
Asserts <b> show latencies <b> tables with arguments -e 0 -b 17 display the correct header
and that each row of data has the corresponding data type.
"""
# exp_heading = "~read Latency"
exp_header = [
"Namespace",
"Histogram",
"Node",
"ops/sec",
">1ms",
">2ms",
">4ms",
">8ms",
">16ms",
">32ms",
">64ms",
">128ms",
">256ms",
">512ms",
">1024ms",
">2048ms",
">4096ms",
">8192ms",
">16384ms",
">32768ms",
">65536ms",
]
exp_data_types = [
str,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
]
exp_no_of_rows = len(TestShowLatenciesWithArguments.rc.cluster._live_nodes)
actual_out = util.capture_stdout(
TestShowLatenciesWithArguments.rc.execute,
["show", "latencies", "-e", "0", "-b", "17"],
)
output_list = test_util.get_separate_output(actual_out)
for output in output_list:
(
actual_heading,
actual_description,
actual_header,
actual_data,
actual_no_of_rows,
) = test_util.parse_output(output, horizontal=True, header_len=1)
self.assertEqual(exp_header, actual_header)
self.assertTrue(
test_util.check_for_types(actual_data, exp_data_types),
"returned the wrong data types",
)
self.assertEqual(exp_no_of_rows, int(actual_no_of_rows.strip()))
def test_latencies_e_17_b_1(self):
"""
Asserts <b> show latencies <b> tables with arguments -e 17 -b 1 display the correct header
and that each row of data has the corresponding data type.
"""
# exp_heading = "~read Latency"
exp_header = ["Namespace", "Histogram", "Node", "ops/sec", ">1ms"]
exp_data_types = [str, str, str, float, float]
actual_out = util.capture_stdout(
TestShowLatenciesWithArguments.rc.execute,
["show", "latencies", "-e", "17", "-b", "1"],
)
output_list = test_util.get_separate_output(actual_out)
for output in output_list:
(
actual_heading,
actual_description,
actual_header,
actual_data,
actual_no_of_rows,
) = test_util.parse_output(output, horizontal=True, header_len=1)
self.assertEqual(exp_header, actual_header)
self.assertTrue(
test_util.check_for_types(actual_data, exp_data_types),
"returned the wrong data types",
)
def test_latencies_e_100_b_200(self):
"""
Asserts <b> show latencies <b> tables with arguments -e 100 -b 200 display the correct header
and that each row of data has the corresponding data type.
"""
# exp_heading = "~read Latency"
exp_header = ["Namespace", "Histogram", "Node", "ops/sec", ">1ms"]
exp_data_types = [
str,
str,
str,
float,
float,
]
actual_out = util.capture_stdout(
TestShowLatenciesWithArguments.rc.execute,
["show", "latencies", "-e", "100", "-b", "200"],
)
output_list = test_util.get_separate_output(actual_out)
for output in output_list:
(
actual_heading,
actual_description,
actual_header,
actual_data,
actual_no_of_rows,
) = test_util.parse_output(output, horizontal=True, header_len=1)
self.assertEqual(exp_header, actual_header)
self.assertTrue(
test_util.check_for_types(actual_data, exp_data_types),
"returned the wrong data types",
)
def test_latencies_e_16_b_2(self):
"""
Asserts <b> show latencies <b> tables with arguments -e 16 -b 2 display the correct header
and that each row of data has the corresponding data type.
"""
# exp_heading = "~read Latency"
exp_header = ["Namespace", "Histogram", "Node", "ops/sec", ">1ms", ">65536ms"]
exp_data_types = [str, str, str, float, float, float]
actual_out = util.capture_stdout(
TestShowLatenciesWithArguments.rc.execute,
["show", "latencies", "-e", "16", "-b", "2"],
)
output_list = test_util.get_separate_output(actual_out)
for output in output_list:
(
actual_heading,
actual_description,
actual_header,
actual_data,
actual_no_of_rows,
) = test_util.parse_output(output, horizontal=True, header_len=1)
self.assertEqual(exp_header, actual_header)
self.assertTrue(
test_util.check_for_types(actual_data, exp_data_types),
"returned the wrong data types",
)
def test_latencies_e_4_b_7(self):
"""
Asserts <b> show latencies <b> tables with arguments -e 4 -b 7 display the correct header
and that each row of data has the corresponding data type.
"""
# exp_heading = "~read Latency"
exp_header = [
"Namespace",
"Histogram",
"Node",
"ops/sec",
">1ms",
">16ms",
">256ms",
">4096ms",
">65536ms",
]
exp_data_types = [str, str, str, float, float, float, float, float, float]
actual_out = util.capture_stdout(
TestShowLatenciesWithArguments.rc.execute,
["show", "latencies", "-e", "4", "-b", "7"],
)
output_list = test_util.get_separate_output(actual_out)
for output in output_list:
(
actual_heading,
actual_description,
actual_header,
actual_data,
actual_no_of_rows,
) = test_util.parse_output(output, horizontal=True, header_len=1)
self.assertListEqual(exp_header, actual_header)
self.assertTrue(
test_util.check_for_types(actual_data, exp_data_types),
"returned the wrong data types",
)
def test_latencies_group_by_machine_name(self):
"""
Asserts <b> show latencies <b> with a -m argument which groups tables by machine name
"""
exp_header = [
"Namespace",
"Histogram",
"Node",
"ops/sec",
">1ms",
">8ms",
">64ms",
]
exp_data_types = [str, str, str, float, float, float, float]
actual_out = util.capture_stdout(
TestShowLatenciesWithArguments.rc.execute, ["show", "latencies", "-m"]
)
output_list = test_util.get_separate_output(actual_out)
for output in output_list:
(
actual_heading,
actual_description,
actual_header,
actual_data,
_,
) = test_util.parse_output(output)
self.assertEqual(exp_header, actual_header)
self.assertTrue(
test_util.check_for_types(actual_data, exp_data_types),
"returned the wrong data types",
)
def test_latencies_group_by_machine_name_e_2_8(self):
"""
Asserts <b> show latencies <b> with a -m argument which groups tables by machine name
"""
exp_header = [
"Namespace",
"Histogram",
"Node",
"ops/sec",
">1ms",
">4ms",
">16ms",
">64ms",
">256ms",
">1024ms",
">4096ms",
">16384ms",
]
exp_data_types = [
str,
str,
str,
float,
float,
float,
float,
float,
float,
float,
float,
float,
]
actual_out = util.capture_stdout(
TestShowLatenciesWithArguments.rc.execute,
["show", "latencies", "-m", "-e", "2", "-b", "8"],
)
output_list = test_util.get_separate_output(actual_out)
for output in output_list:
(
actual_heading,
actual_description,
actual_header,
actual_data,
_,
) = test_util.parse_output(output, horizontal=True, header_len=1)
self.assertEqual(exp_header, actual_header)
self.assertTrue(
test_util.check_for_types(actual_data, exp_data_types),
"returned the wrong data types",
)
class TestShowDistribution(unittest.TestCase):
output_list = list()
test_ttl_distri = ""
bar_ttl_distri = ""
@classmethod
def setUpClass(cls):
rc = controller.LiveClusterRootController(user="admin", password="<PASSWORD>")
actual_out = util.capture_stdout(rc.execute, ["show", "distribution"])
# use regex in get_separate_output(~.+Distribution.*~.+)
# if you are changing below Distribution keyword
TestShowDistribution.output_list = test_util.get_separate_output(actual_out)
TestShowDistribution.is_bar_present = False
for item in TestShowDistribution.output_list:
title = item["title"]
if "test - TTL Distribution in Seconds" in title:
TestShowDistribution.test_ttl_distri = item
elif "bar - TTL Distribution in Seconds" in title:
TestShowDistribution.bar_ttl_distri = item
TestShowDistribution.is_bar_present = True
elif "~~~~" in item:
TestShowDistribution.test_namespace_config = item
@classmethod
def tearDownClass(cls):
cls.rc = None
def test_test_ttl(self):
"""
Asserts TTL Distribution in Seconds for test namespace with heading, header & parameters.
TODO: test for values as well
"""
exp_heading = "test - TTL Distribution in Seconds"
exp_description = """Percentage of records having ttl less than or equal to value measured in Seconds"""
exp_header = [
"Node",
"10%",
"20%",
"30%",
"40%",
"50%",
"60%",
"70%",
"80%",
"90%",
"100%",
]
(
actual_heading,
actual_description,
actual_header,
actual_data,
num_records,
) = test_util.parse_output(
TestShowDistribution.test_ttl_distri, horizontal=True, merge_header=False
)
self.assertTrue(exp_heading in actual_heading)
self.assertEqual(exp_description, actual_description)
self.assertListEqual(exp_header, actual_header)
def test_bar_ttl(self):
"""
Asserts TTL Distribution in Seconds for bar namespace with heading, header & parameters.
TODO: test for values as well
"""
if not TestShowDistribution.is_bar_present:
return
exp_heading = "bar - TTL Distribution in Seconds"
exp_description = """Percentage of records having ttl less than or equal to value measured in Seconds"""
exp_header = [
"Node",
"10%",
"20%",
"30%",
"40%",
"50%",
"60%",
"70%",
"80%",
"90%",
"100%",
]
exp_types = [str, int, int, int, int, int, int, int, int, int, int]
(
actual_heading,
actual_description,
actual_header,
actual_data,
num_records,
) = test_util.parse_output(
TestShowDistribution.bar_ttl_distri, horizontal=True, merge_header=False
)
self.assertTrue(exp_heading in actual_heading)
self.assertEqual(exp_description, actual_description)
self.assertListEqual(exp_header, actual_header)
self.assertTrue(test_util.check_for_types(actual_data, exp_types))
class TestShowStatistics(unittest.TestCase):
output_list = list()
test_bin_stats = ""
bar_bin_stats = ""
service_stats = ""
bar_namespace_stats = ""
test_namespace_stats = ""
xdr_stats = ""
@classmethod
def setUpClass(cls):
rc = controller.LiveClusterRootController(user="admin", password="<PASSWORD>")
actual_out = util.capture_stdout(rc.execute, ["show", "statistics"])
actual_out += util.capture_stdout(rc.execute, ["show", "statistics", "xdr"])
TestShowStatistics.output_list = test_util.get_separate_output(actual_out)
TestShowStatistics.is_bar_present = False
for item in TestShowStatistics.output_list:
title = item["title"]
if "test Bin Statistics" in title:
TestShowStatistics.test_bin_stats = item
elif "bar Bin Statistics" in title:
TestShowStatistics.bar_bin_stats = item
TestShowStatistics.is_bar_present = True
elif "Service Statistics" in title:
TestShowStatistics.service_stats = item
elif "bar Namespace Statistics" in title:
TestShowStatistics.bar_namespace_stats = item
TestShowStatistics.is_bar_present = True
elif "test Namespace Statistics" in title:
TestShowStatistics.test_namespace_stats = item
elif "XDR Statistics" in title:
TestShowStatistics.xdr_stats = item
# TODO: Add missing tests
# else:
# raise Exception('A statistics table is unaccounted for in test setUp', item)
@classmethod
def tearDownClass(cls):
cls.rc = None
def test_test_bin(self):
"""
This test will assert <b> test Bin Statistics </b> output for heading, header and parameters.
TODO: test for values as well
"""
exp_heading = "test Bin Statistics"
exp_header = [
("Node"),
("bin-names-quota", "bin_names_quota"),
("num-bin-names", "bin_names"),
]
(
actual_heading,
actual_description,
actual_header,
actual_data,
num_records,
) = test_util.parse_output(TestShowStatistics.test_bin_stats)
self.assertTrue(exp_heading in actual_heading)
self.assertTrue(test_util.check_for_subset(actual_header, exp_header))
def test_bar_bin(self):
"""
This test will assert <b> bar Bin Statistics </b> output for heading, header and parameters.
TODO: test for values as well
"""
if not TestShowStatistics.is_bar_present:
return
exp_heading = "bar Bin Statistics"
exp_header = [
("Node"),
("bin-names-quota", "bin_names_quota"),
("num-bin-names", "bin_names"),
]
(
actual_heading,
actual_description,
actual_header,
actual_data,
num_records,
) = test_util.parse_output(TestShowStatistics.bar_bin_stats)
self.assertTrue(exp_heading in actual_heading)
self.assertTrue(test_util.check_for_subset(actual_header, exp_header))
def test_service(self):
"""
This test will assert <b> Service Statistics </b> output for heading, header and parameters.
TODO: test for values as well
"""
exp_heading = "Service Statistics"
# TODO: Add possibly missing params. This is only verified as a subset
exp_header = [
"Node",
"client_connections",
"cluster_integrity",
"cluster_key",
"cluster_size",
"heartbeat_received_foreign",
"heartbeat_received_self",
"info_queue",
"objects",
"paxos_principal",
"proxy_in_progress",
"query_long_running",
"query_short_running",
"reaped_fds",
"sindex_gc_garbage_cleaned",
"sindex_gc_garbage_found",
"sindex_gc_list_creation_time",
"sindex_gc_list_deletion_time",
"sindex_gc_objects_validated",
"sindex_ucgarbage_found",
"uptime",
]
(
actual_heading,
actual_description,
actual_header,
actual_data,
num_records,
) = test_util.parse_output(TestShowStatistics.service_stats)
self.assertTrue(exp_heading in actual_heading)
self.assertTrue(test_util.check_for_subset(actual_header, exp_header))
def test_bar_namespace(self):
"""
This test will assert <b> bar Namespace Statistics </b> output for heading, header and parameters.
TODO: test for values as well
"""
if not TestShowStatistics.is_bar_present:
return
exp_heading = "bar Namespace Statistics"
# TODO: Add possibly missing params. This is only verified as a subset
exp_header = [
"Node",
"reject-non-xdr-writes",
"reject-xdr-writes",
"available_bin_names",
"conflict-resolution-policy",
"current_time",
"memory_used_data_bytes",
"default-ttl",
"disallow-null-setname",
"evict-tenths-pct",
"evicted_objects",
"expired_objects",
"memory_free_pct",
"high-water-disk-pct",
"high-water-memory-pct",
"hwm_breached",
"memory_used_index_bytes",
"master_objects",
"memory-size",
"non_expirable_objects",
"nsup_cycle_duration",
"objects",
"prole_objects",
"read-consistency-level-override",
"replication-factor",
"memory_used_sindex_bytes",
"single-bin",
"stop_writes",
"stop-writes-pct",
"memory_used_bytes",
"write-commit-level-override",
]
(
actual_heading,
actual_description,
actual_header,
actual_data,
num_records,
) = test_util.parse_output(TestShowStatistics.bar_namespace_stats)
self.assertTrue(exp_heading in actual_heading)
self.assertTrue(test_util.check_for_subset(actual_header, exp_header))
def test_test_namespace(self):
"""
This test will assert <b> test Namespace Statistics </b> output for heading, header and parameters.
TODO: test for values as well
"""
exp_heading = "test Namespace Statistics"
# TODO: Add possibly missing params. This is only verified as a subset
exp_header = [
"Node",
"allow-ttl-without-nsup",
"appeals_records_exonerated",
"appeals_rx_active",
"appeals_tx_active",
"appeals_tx_remaining",
"available_bin_names",
"background-scan-max-rps",
"batch_sub_proxy_complete",
"batch_sub_proxy_error",
"batch_sub_proxy_timeout",
"batch_sub_read_error",
"batch_sub_read_filtered_out",
"batch_sub_read_not_found",
"batch_sub_read_success",
"batch_sub_read_timeout",
"batch_sub_tsvc_error",
"batch_sub_tsvc_timeout",
"cache_read_pct",
"client_delete_error",
"client_delete_filtered_out",
"client_delete_not_found",
"client_delete_success",
"client_delete_timeout",
"client_lang_delete_success",
"client_lang_error",
"client_lang_read_success",
"client_lang_write_success",
"client_proxy_complete",
"client_proxy_error",
"client_proxy_timeout",
"client_read_error",
"client_read_filtered_out",
"client_read_not_found",
"client_read_success",
"client_read_timeout",
"client_tsvc_error",
"client_tsvc_timeout",
"client_udf_complete",
"client_udf_error",
"client_udf_filtered_out",
"client_udf_timeout",
"client_write_error",
"client_write_filtered_out",
"client_write_success",
"client_write_timeout",
"clock_skew_stop_writes",
"conflict-resolution-policy",
"current_time",
"data-in-index",
"dead_partitions",
"default-ttl",
"deleted_last_bin",
"device_available_pct",
"device_compression_ratio",
"device_free_pct",
"device_total_bytes",
"device_used_bytes",
"disable-cold-start-eviction",
"disable-write-dup-res",
"disallow-null-setname",
"effective_is_quiesced",
"effective_prefer_uniform_balance",
"effective_replication_factor",
"enable-benchmarks-batch-sub",
"enable-benchmarks-ops-sub",
"enable-benchmarks-read",
"enable-benchmarks-udf",
"enable-benchmarks-udf-sub",
"enable-benchmarks-write",
"enable-hist-proxy",
"evict-hist-buckets",
"evict-tenths-pct",
"evict_ttl",
"evict_void_time",
"evicted_objects",
"expired_objects",
"fail_generation",
"fail_key_busy",
"fail_record_too_big",
"fail_xdr_forbidden",
"from_proxy_batch_sub_read_error",
"from_proxy_batch_sub_read_filtered_out",
"from_proxy_batch_sub_read_not_found",
"from_proxy_batch_sub_read_success",
"from_proxy_batch_sub_read_timeout",
"from_proxy_batch_sub_tsvc_error",
"from_proxy_batch_sub_tsvc_timeout",
"from_proxy_delete_error",
"from_proxy_delete_filtered_out",
"from_proxy_delete_not_found",
"from_proxy_delete_success",
"from_proxy_delete_timeout",
"from_proxy_lang_delete_success",
"from_proxy_lang_error",
"from_proxy_lang_read_success",
"from_proxy_lang_write_success",
"from_proxy_read_error",
"from_proxy_read_filtered_out",
"from_proxy_read_not_found",
"from_proxy_read_success",
"from_proxy_read_timeout",
"from_proxy_tsvc_error",
"from_proxy_tsvc_timeout",
"from_proxy_udf_complete",
"from_proxy_udf_error",
"from_proxy_udf_filtered_out",
"from_proxy_udf_timeout",
"from_proxy_write_error",
"from_proxy_write_filtered_out",
"from_proxy_write_success",
"from_proxy_write_timeout",
"geo2dsphere-within.earth-radius-meters",
"geo2dsphere-within.level-mod",
"geo2dsphere-within.max-cells",
"geo2dsphere-within.max-level",
"geo2dsphere-within.min-level",
"geo2dsphere-within.strict",
"geo_region_query_cells",
"geo_region_query_falsepos",
"geo_region_query_points",
"geo_region_query_reqs",
"high-water-disk-pct",
"high-water-memory-pct",
"hwm_breached",
"ignore-migrate-fill-delay",
"index-stage-size",
"index-type",
"master_objects",
"master_tombstones",
"memory-size",
"memory_free_pct",
"memory_used_bytes",
"memory_used_data_bytes",
"memory_used_index_bytes",
"memory_used_sindex_bytes",
"migrate-order",
"migrate-retransmit-ms",
"migrate-sleep",
"migrate_record_receives",
"migrate_record_retransmits",
"migrate_records_skipped",
"migrate_records_transmitted",
"migrate_rx_instances",
"migrate_rx_partitions_active",
"migrate_rx_partitions_initial",
"migrate_rx_partitions_remaining",
"migrate_signals_active",
"migrate_signals_remaining",
"migrate_tx_instances",
"migrate_tx_partitions_active",
"migrate_tx_partitions_imbalance",
"migrate_tx_partitions_initial",
"migrate_tx_partitions_lead_remaining",
"migrate_tx_partitions_remaining",
"nodes_quiesced",
"non_expirable_objects",
"non_replica_objects",
"non_replica_tombstones",
"ns_cluster_size",
"nsup-hist-period",
"nsup-period",
"nsup-threads",
"nsup_cycle_duration",
"objects",
"ops_sub_tsvc_error",
"ops_sub_tsvc_timeout",
"ops_sub_write_error",
"ops_sub_write_filtered_out",
"ops_sub_write_success",
"ops_sub_write_timeout",
"partition-tree-sprigs",
"pending_quiesce",
"prefer-uniform-balance",
"prole_objects",
"prole_tombstones",
"query_agg",
"query_agg_abort",
"query_agg_avg_rec_count",
"query_agg_error",
"query_agg_success",
"query_fail",
"query_long_queue_full",
"query_long_reqs",
"query_lookup_abort",
"query_lookup_avg_rec_count",
"query_lookup_error",
"query_lookup_success",
"query_lookups",
"query_ops_bg_failure",
"query_ops_bg_success",
"query_proto_compression_ratio",
"query_proto_uncompressed_pct",
"query_reqs",
"query_short_queue_full",
"query_short_reqs",
"query_udf_bg_failure",
"query_udf_bg_success",
"rack-id",
"re_repl_error",
"re_repl_success",
"re_repl_timeout",
"read-consistency-level-override",
"record_proto_compression_ratio",
"record_proto_uncompressed_pct",
"reject-non-xdr-writes",
"reject-xdr-writes",
"replication-factor",
"retransmit_all_batch_sub_dup_res",
"retransmit_all_delete_dup_res",
"retransmit_all_delete_repl_write",
"retransmit_all_read_dup_res",
"retransmit_all_udf_dup_res",
"retransmit_all_udf_repl_write",
"retransmit_all_write_dup_res",
"retransmit_all_write_repl_write",
"retransmit_ops_sub_dup_res",
"retransmit_ops_sub_repl_write",
"retransmit_udf_sub_dup_res",
"retransmit_udf_sub_repl_write",
"scan_aggr_abort",
"scan_aggr_complete",
"scan_aggr_error",
"scan_basic_abort",
"scan_basic_complete",
"scan_basic_error",
"scan_ops_bg_abort",
"scan_ops_bg_complete",
"scan_ops_bg_error",
"scan_proto_compression_ratio",
"scan_proto_uncompressed_pct",
"scan_udf_bg_abort",
"scan_udf_bg_complete",
"scan_udf_bg_error",
"sindex.num-partitions",
"single-bin",
"single-scan-threads",
"smd_evict_void_time",
"stop-writes-pct",
"stop_writes",
"storage-engine",
"strong-consistency",
"strong-consistency-allow-expunge",
"tomb-raider-eligible-age",
"tomb-raider-period",
"tombstones",
"transaction-pending-limit",
"truncate-threads",
"truncate_lut",
"truncated_records",
"udf_sub_lang_delete_success",
"udf_sub_lang_error",
"udf_sub_lang_read_success",
"udf_sub_lang_write_success",
"udf_sub_tsvc_error",
"udf_sub_tsvc_timeout",
"udf_sub_udf_complete",
"udf_sub_udf_error",
"udf_sub_udf_filtered_out",
"udf_sub_udf_timeout",
"unavailable_partitions",
"write-commit-level-override",
"xdr-bin-tombstone-ttl",
"xdr-tomb-raider-period",
"xdr-tomb-raider-threads",
"xdr_client_delete_error",
"xdr_client_delete_not_found",
"xdr_client_delete_success",
"xdr_client_delete_timeout",
"xdr_client_write_error",
"xdr_client_write_success",
"xdr_client_write_timeout",
"xdr_from_proxy_delete_error",
"xdr_from_proxy_delete_not_found",
"xdr_from_proxy_delete_success",
"xdr_from_proxy_delete_timeout",
"xdr_from_proxy_write_error",
"xdr_from_proxy_write_success",
"xdr_from_proxy_write_timeout",
"xdr_tombstones",
"xmem_id",
]
(
actual_heading,
actual_description,
actual_header,
actual_data,
num_records,
) = test_util.parse_output(TestShowStatistics.test_namespace_stats)
self.assertTrue(exp_heading in actual_heading)
self.assertTrue(test_util.check_for_subset(actual_header, exp_header))
# @unittest.skip("Will enable only when xdr is configuired")
def test_xdr(self):
"""
This test will assert <b> xdr Statistics </b> output for heading, header and parameters.
TODO: test for values as well
"""
exp_heading = "XDR Statistics"
# 5.0+
exp_header = [
"Node",
"abandoned",
"compression_ratio",
"filtered_out",
"hot_keys",
"in_progress",
"in_queue",
"lag",
"lap_us",
"latency_ms",
"nodes",
"not_found",
"recoveries",
"recoveries_pending",
"retry_conn_reset",
"retry_dest",
"retry_no_node",
"success",
"throughput",
"uncompressed_pct",
]
(
actual_heading,
actual_description,
actual_header,
actual_data,
num_records,
) = test_util.parse_output(TestShowStatistics.xdr_stats)
self.assertTrue(exp_heading in actual_heading)
self.assertListEqual(exp_header, actual_header)
# self.assertTrue(test_util.check_for_subset(actual_data, exp_header))
def capture_separate_and_parse_output(rc, commands):
actual_stdout = util.capture_stdout(rc.execute, commands)
separated_stdout = test_util.get_separate_output(actual_stdout)
result = test_util.parse_output(separated_stdout[0])
return result
def get_data(exp_first, data):
found_values = None
for values in data:
if len(data) and values.pop(0) == exp_first:
found_values = values
break
return found_values
class TestShowUsers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.rc = controller.LiveClusterRootController(user="admin", password="<PASSWORD>")
util.capture_stdout(cls.rc.execute, ["enable"])
@classmethod
def tearDownClass(cls):
cls.rc = None
@classmethod
def setUp(cls):
# Added since tests were failing. I assume because the server response
# comes before the request is commited to SMD or security layer.
time.sleep(0.25)
util.capture_stdout_and_stderr(
cls.rc.execute, ["manage", "acl", "delete", "user", "foo"]
)
def test_show_users(self):
exp_title = "Users"
exp_header = [
"User",
"Roles",
"Connections",
"Read Quota",
"Read Single Record TPS",
"Read Scan/Query Limited RPS",
"Read Scan/Query Limitless",
"Write Quota",
"Write Single Record TPS",
"Write Scan/Query Limited RPS",
"Write Scan/Query Limitless",
]
actual_title, _, actual_header, _, _ = capture_separate_and_parse_output(
self.rc, ["show", "users"]
)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
def test_create_user_with_no_roles(self):
exp_user = "foo"
exp_data = [
"--",
"--",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
]
exp_title = "Users"
exp_header = [
"User",
"Roles",
"Connections",
"Read Quota",
"Read Single Record TPS",
"Read Scan/Query Limited RPS",
"Read Scan/Query Limitless",
"Write Quota",
"Write Single Record TPS",
"Write Scan/Query Limited RPS",
"Write Scan/Query Limitless",
]
_, _, _, _, num_records = capture_separate_and_parse_output(
self.rc, ["show", "users"]
)
exp_num_rows = num_records + 1
util.capture_stdout(
self.rc.execute,
["manage", "acl", "create", "user", exp_user, "password", "<PASSWORD>"],
)
(
actual_title,
_,
actual_header,
actual_data,
actual_num_records,
) = capture_separate_and_parse_output(self.rc, ["show", "users"])
actual_roles = get_data(exp_user, actual_data)
self.assertEqual(exp_num_rows, actual_num_records)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
self.assertListEqual(exp_data, actual_roles)
def test_create_user_with_roles(self):
exp_user = "foo"
exp_roles = ["sys-admin", "user-admin"]
exp_data = [
", ".join(exp_roles),
"--",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
]
exp_title = "Users"
exp_header = [
"User",
"Roles",
"Connections",
"Read Quota",
"Read Single Record TPS",
"Read Scan/Query Limited RPS",
"Read Scan/Query Limitless",
"Write Quota",
"Write Single Record TPS",
"Write Scan/Query Limited RPS",
"Write Scan/Query Limitless",
]
_, _, _, _, num_records = capture_separate_and_parse_output(
self.rc, ["show", "users"]
)
exp_num_rows = num_records + 1
time.sleep(0.5)
util.capture_stdout(
self.rc.execute,
[
"manage",
"acl",
"create",
"user",
exp_user,
"password",
"<PASSWORD>",
"roles",
*exp_roles,
],
)
time.sleep(2)
(
actual_title,
_,
actual_header,
actual_data,
actual_num_records,
) = capture_separate_and_parse_output(self.rc, ["show", "users"])
actual_data = get_data(exp_user, actual_data)
self.assertEqual(exp_num_rows, actual_num_records)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
self.assertListEqual(exp_data, actual_data)
def test_delete_a_user(self):
exp_user = "foo"
exp_roles = ["sys-admin", "user-admin"]
exp_title = "Users"
exp_header = [
"User",
"Roles",
"Connections",
"Read Quota",
"Read Single Record TPS",
"Read Scan/Query Limited RPS",
"Read Scan/Query Limitless",
"Write Quota",
"Write Single Record TPS",
"Write Scan/Query Limited RPS",
"Write Scan/Query Limitless",
]
_, _, _, _, num_records = capture_separate_and_parse_output(
self.rc, ["show", "users"]
)
util.capture_stdout(
self.rc.execute,
[
"manage",
"acl",
"create",
"user",
exp_user,
"password",
"<PASSWORD>",
"roles",
*exp_roles,
],
)
_, _, _, _, num_records = capture_separate_and_parse_output(
self.rc, ["show", "users"]
)
exp_num_rows = num_records - 1
util.capture_stdout(
self.rc.execute, ["manage", "acl", "delete", "user", exp_user]
)
(
actual_title,
_,
actual_header,
actual_data,
actual_num_records,
) = capture_separate_and_parse_output(self.rc, ["show", "users"])
for data in actual_data:
self.assertNotIn(exp_user, data)
self.assertEqual(exp_num_rows, actual_num_records)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
def test_revoke_user_role(self):
exp_user = "foo"
exp_roles = ["sys-admin", "user-admin"]
exp_title = "Users"
exp_header = [
"User",
"Roles",
"Connections",
"Read Quota",
"Read Single Record TPS",
"Read Scan/Query Limited RPS",
"Read Scan/Query Limitless",
"Write Quota",
"Write Single Record TPS",
"Write Scan/Query Limited RPS",
"Write Scan/Query Limitless",
]
util.capture_stdout(
self.rc.execute,
[
"manage",
"acl",
"create",
"user",
exp_user,
"password",
"<PASSWORD>",
"roles",
*exp_roles,
"to-remove",
],
)
time.sleep(0.25)
util.capture_stdout(
self.rc.execute,
["manage", "acl", "revoke", "user", exp_user, "roles", "to-remove"],
)
time.sleep(0.25)
(
actual_title,
_,
actual_header,
actual_data,
_,
) = capture_separate_and_parse_output(self.rc, ["show", "users"])
actual_roles = get_data(exp_user, actual_data)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
self.assertEqual(", ".join(exp_roles), actual_roles[0])
class TestShowRoles(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.rc = controller.LiveClusterRootController(user="admin", password="<PASSWORD>")
util.capture_stdout(cls.rc.execute, ["enable"])
util.capture_stdout_and_stderr(
cls.rc.execute,
[
"manage",
"acl",
"create",
"role",
"temp",
"priv",
"sys-admin",
"allow",
"1.1.1.1",
],
)
@classmethod
def tearDownClass(cls):
util.capture_stdout_and_stderr(
cls.rc.execute, ["manage", "acl", "delete", "role", "foo"]
)
util.capture_stdout_and_stderr(
cls.rc.execute, ["manage", "acl", "delete", "role", "temp"]
)
cls.rc = None
@classmethod
def setUp(cls):
# Added since tests were failing. I assume because the server response
# comes before the request is commited to SMD or security layer.
time.sleep(0.25)
util.capture_stdout_and_stderr(
cls.rc.execute, ["manage", "acl", "delete", "role", "foo"]
)
time.sleep(0.25)
def test_show_roles(self):
exp_title = "Roles"
exp_header = [
"Role",
"Privileges",
"Allowlist",
"Quotas Read",
"Quotas Write",
]
actual_title, _, actual_header, _, _ = capture_separate_and_parse_output(
self.rc, ["show", "roles"]
)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
def test_create_role_with_privileges(self):
exp_role = "foo"
exp_privilege = "sys-admin"
exp_allowlist = ["--"]
exp_quotas = ["--", "--"]
exp_data = [exp_privilege, ", ".join(exp_allowlist), *exp_quotas]
exp_title = "Roles"
exp_header = ["Role", "Privileges", "Allowlist", "Quotas Read", "Quotas Write"]
_, _, _, _, num_records = capture_separate_and_parse_output(
self.rc, ["show", "roles"]
)
exp_num_rows = num_records + 1
util.capture_stdout(
self.rc.execute,
["manage", "acl", "create", "role", exp_role, "priv", exp_privilege],
)
time.sleep(0.5)
(
actual_title,
_,
actual_header,
actual_data,
actual_num_records,
) = capture_separate_and_parse_output(self.rc, ["show", "roles"])
actual_data = get_data(exp_role, actual_data)
self.assertEqual(exp_num_rows, actual_num_records)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
self.assertListEqual(exp_data, actual_data)
def test_create_role_with_allowlist(self):
exp_role = "foo"
exp_privileges = "write"
exp_allowlist = ["1.1.1.1", "2.2.2.2"]
exp_quotas = ["--", "--"]
exp_data = [exp_privileges, ", ".join(exp_allowlist), *exp_quotas]
exp_title = "Roles"
exp_header = [
"Role",
"Privileges",
"Allowlist",
"Quotas Read",
"Quotas Write",
]
_, _, _, _, num_records = capture_separate_and_parse_output(
self.rc, ["show", "roles"]
)
exp_num_rows = num_records + 1
util.capture_stdout(
self.rc.execute,
[
"manage",
"acl",
"create",
"role",
exp_role,
"priv",
"write",
"allow",
*exp_allowlist,
],
)
time.sleep(0.25)
(
actual_title,
_,
actual_header,
actual_data,
actual_num_records,
) = capture_separate_and_parse_output(self.rc, ["show", "roles"])
actual_data = get_data(exp_role, actual_data)
self.assertEqual(exp_num_rows, actual_num_records)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
self.assertListEqual(exp_data, actual_data)
def test_delete_a_role(self):
exp_role = "foo"
exp_privilege = "sys-admin"
exp_title = "Roles"
exp_header = [
"Role",
"Privileges",
"Allowlist",
"Quotas Read",
"Quotas Write",
]
_, _, _, _, num_records = capture_separate_and_parse_output(
self.rc, ["show", "roles"]
)
util.capture_stdout(
self.rc.execute,
["manage", "acl", "create", "role", exp_role, "priv", exp_privilege],
)
time.sleep(0.25)
_, _, _, _, num_records = capture_separate_and_parse_output(
self.rc, ["show", "roles"]
)
exp_num_rows = num_records - 1
util.capture_stdout(
self.rc.execute, ["manage", "acl", "delete", "role", exp_role]
)
time.sleep(0.25)
(
actual_title,
_,
actual_header,
actual_data,
actual_num_records,
) = capture_separate_and_parse_output(self.rc, ["show", "roles"])
for data in actual_data:
self.assertNotIn(exp_role, data)
self.assertEqual(exp_num_rows, actual_num_records)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
def test_revoke_role(self):
exp_role = "foo"
exp_privilege = "read"
exp_title = "Roles"
exp_header = [
"Role",
"Privileges",
"Allowlist",
"Quotas Read",
"Quotas Write",
]
util.capture_stdout(
self.rc.execute,
["manage", "acl", "create", "role", exp_role, "priv", exp_privilege],
)
util.capture_stdout(
self.rc.execute,
["manage", "acl", "grant", "role", exp_role, "priv", "write"],
)
time.sleep(0.5)
util.capture_stdout(
self.rc.execute,
["manage", "acl", "revoke", "role", exp_role, "priv", "write"],
)
time.sleep(0.5)
(
actual_title,
_,
actual_header,
actual_data,
_,
) = capture_separate_and_parse_output(self.rc, ["show", "roles"])
actual_privileges = get_data(exp_role, actual_data)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
self.assertEqual(exp_privilege, actual_privileges[0])
def test_add_quotas(self):
exp_role = "foo"
exp_privilege = "read"
exp_title = "Roles"
exp_header = ["Role", "Privileges", "Allowlist", "Quotas Read", "Quotas Write"]
util.capture_stdout(
self.rc.execute,
["manage", "acl", "create", "role", exp_role, "priv", exp_privilege],
)
time.sleep(1)
(
actual_title,
_,
actual_header,
actual_data,
_,
) = capture_separate_and_parse_output(self.rc, ["show", "roles"])
actual_data = get_data(exp_role, actual_data)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
self.assertEqual("--", actual_data[2])
self.assertEqual("--", actual_data[3])
util.capture_stdout(
self.rc.execute,
[
"manage",
"acl",
"rate-limit",
"role",
exp_role,
"read",
"1000",
"write",
"2000",
],
)
time.sleep(1)
(
actual_title,
_,
actual_header,
actual_data,
_,
) = capture_separate_and_parse_output(self.rc, ["show", "roles"])
actual_data = get_data(exp_role, actual_data)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
self.assertEqual("1000", actual_data[2])
self.assertEqual("2000", actual_data[3])
class TestShowUdfs(unittest.TestCase):
exp_module = "test__.lua"
path = "test/e2e/test.lua"
@classmethod
def setUpClass(cls):
cls.rc = controller.LiveClusterRootController(user="admin", password="<PASSWORD>")
util.capture_stdout(cls.rc.execute, ["enable"])
util.capture_stdout_and_stderr(
cls.rc.execute, ["manage", "udfs", "add", "filler_.lua", "path", cls.path]
)
@classmethod
def tearDownClass(cls):
util.capture_stdout_and_stderr(
cls.rc.execute, ["manage", "udfs", "remove", cls.exp_module]
)
util.capture_stdout_and_stderr(
cls.rc.execute, ["manage", "udfs", "remove", "filler_.lua"]
)
cls.rc = None
@classmethod
def setUp(cls):
util.capture_stdout_and_stderr(
cls.rc.execute, ["manage", "udfs", "remove", cls.exp_module]
)
def test_show_udfs(self):
exp_title = "UDF Modules"
exp_header = ["Filename", "Hash", "Type"]
actual_title, _, actual_header, _, _ = capture_separate_and_parse_output(
self.rc, ["show", "udfs"]
)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
def test_add_udf(self):
exp_title = "UDF Modules"
exp_header = ["Filename", "Hash", "Type"]
exp_module = ["61e9c132a6a4c1a14852dc1641a35b420664c4a1", "LUA"]
_, _, _, _, num_rows = capture_separate_and_parse_output(
self.rc, ["show", "udfs"]
)
exp_num_rows = num_rows + 1
util.capture_stdout(
self.rc.execute,
["manage", "udfs", "add", self.exp_module, "path", self.path],
)
time.sleep(1)
(
actual_title,
_,
actual_header,
actual_data,
actual_num_rows,
) = capture_separate_and_parse_output(self.rc, ["show", "udfs"])
actual_module = get_data(self.exp_module, actual_data)
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
self.assertEqual(exp_num_rows, actual_num_rows)
self.assertListEqual(exp_module, actual_module)
def test_remove_udf(self):
exp_title = "UDF Modules"
exp_header = ["Filename", "Hash", "Type"]
util.capture_stdout(
self.rc.execute,
["manage", "udfs", "add", self.exp_module, "path", self.path],
)
time.sleep(0.50)
_, _, _, _, num_rows = capture_separate_and_parse_output(
self.rc, ["show", "udfs"]
)
exp_num_rows = num_rows - 1
util.capture_stdout(
self.rc.execute, ["manage", "udfs", "remove", self.exp_module]
)
time.sleep(0.50)
(
actual_title,
_,
actual_header,
actual_data,
actual_num_rows,
) = capture_separate_and_parse_output(self.rc, ["show", "udfs"])
self.assertIn(exp_title, actual_title)
self.assertListEqual(exp_header, actual_header)
self.assertEqual(exp_num_rows, actual_num_rows)
if __name__ == "__main__":
unittest.main()
| 1.695313 | 2 |
hpat/tests/test_utils.py | AlexanderKalistratov/hpat | 1 | 12770647 | import hpat
def count_array_REPs():
from hpat.distributed import Distribution
vals = hpat.distributed.dist_analysis.array_dists.values()
return sum([v == Distribution.REP for v in vals])
def count_parfor_REPs():
from hpat.distributed import Distribution
vals = hpat.distributed.dist_analysis.parfor_dists.values()
return sum([v == Distribution.REP for v in vals])
def count_parfor_OneDs():
from hpat.distributed import Distribution
vals = hpat.distributed.dist_analysis.parfor_dists.values()
return sum([v == Distribution.OneD for v in vals])
def count_array_OneDs():
from hpat.distributed import Distribution
vals = hpat.distributed.dist_analysis.array_dists.values()
return sum([v == Distribution.OneD for v in vals])
def count_parfor_OneD_Vars():
from hpat.distributed import Distribution
vals = hpat.distributed.dist_analysis.parfor_dists.values()
return sum([v == Distribution.OneD_Var for v in vals])
def count_array_OneD_Vars():
from hpat.distributed import Distribution
vals = hpat.distributed.dist_analysis.array_dists.values()
return sum([v == Distribution.OneD_Var for v in vals])
def dist_IR_contains(*args):
return sum([(s in hpat.distributed.fir_text) for s in args])
@hpat.jit
def get_rank():
return hpat.distributed_api.get_rank()
@hpat.jit
def get_start_end(n):
rank = hpat.distributed_api.get_rank()
n_pes = hpat.distributed_api.get_size()
start = hpat.distributed_api.get_start(n, n_pes, rank)
end = hpat.distributed_api.get_end(n, n_pes, rank)
return start, end
| 2.46875 | 2 |
align.py | clamsproject/app-fastpunct | 0 | 12770648 | <reponame>clamsproject/app-fastpunct
"""align.py
Aligning two sequences. The elements of the sequences are stings or string like
elements, it is not clear to me what interface for the class is expected in the
latter case.
Originally written by <NAME>.
"""
def align(a, b, d=-5, s=lambda x,y: x==y, key=lambda x: x, gap=None):
"""Find the globally optimal alignment between the two sequences a and b
using gap penalty d and similarity function s, and return the aligned
sequences. The similarity function is applied to the result of key(x)
and key(y) for each x in a and y in b, and should return an integer;
key defaults to the identity function.
This implementation uses the Needleman-Wunsch algorithm."""
m = len(a) + 1
n = len(b) + 1
# Rather than keeping a separate traceback matrix, we'll store (score, fun)
# tuples in the alignment matrix, where fun is one of the following three
# traceback functions.
trace = [m-1, n-1] # decoding starts in the lower right-hand corner
def diag(): trace[0] -= 1; trace[1] -= 1; return a[trace[0]], b[trace[1]]
def up(): trace[0] -= 1; return a[trace[0]], gap
def left(): trace[1] -= 1; return gap, b[trace[1]]
# Initialize the alignment matrix.
f = [[None]*n for _ in range(m)]
f[0][0] = (0, lambda: None)
for i in range(1, m): f[i][0] = (d*i, up)
for j in range(1, n): f[0][j] = (d*j, left)
# Compute the optimal alignment.
for i in range(1, m):
for j in range(1, n):
f[i][j] = max((f[i-1][j-1][0] + s(key(a[i-1]), key(b[j-1])), diag),
(f[i-1][j][0] + d, up), # a[i] -> gap
(f[i][j-1][0] + d, left), # b[j] -> gap
key=lambda x: x[0])
# Decoding is now just a matter of running the stored traceback functions
# until we get back to the upper left-hand corner.
aligned_a = []; aligned_b = []
while trace != [0, 0]:
next_a, next_b = f[trace[0]][trace[1]][1]()
aligned_a.append(next_a)
aligned_b.append(next_b)
aligned_a.reverse(); aligned_b.reverse()
if isinstance(a, str) and isinstance(b, str):
# Be nice and coerce the results back to strings.
def default_gap(x): return x if x is not None else "-"
return ("".join(map(default_gap, aligned_a)),
"".join(map(default_gap, aligned_b)))
return aligned_a, aligned_b
def levenshtein_distance(a, b):
"""Compute the Levenshtein edit distance between the sequences a and b."""
m = len(a) + 1
n = len(b) + 1
d = [[0]*n for _ in range(m)]
for i in range(m): d[i][0] = i # deletion
for j in range(n): d[0][j] = j # insertion
for i in range(1, m):
for j in range(1, n):
if a[i-1] == b[j-1]:
d[i][j] = d[i-1][j-1]
else:
d[i][j] = min(d[i-1][j] + 1, # deletion
d[i][j-1] + 1, # insertion
d[i-1][j-1] + 1) # substitution
return d[m-1][n-1]
class Word(object):
def __init__(self, string):
self.text = string
def x__str__(self):
return self.text
def x__getitem__(self, i):
return self.text[i]
def x__len__(self):
return len(self.text)
if __name__ == '__main__':
s0 = ['door', 'knob']
s1 = ['door', 'knobs', 'are', 'out']
s2 = ['doors', 'knobs', 'are', 'in']
s3 = ['door', 's', 'knobs', 'were', 'in']
s4 = ['door\'s', 'knobs', 'are', 'in']
for x, y in [(s0, s1), (s1, s2), (s2, s3), (s2, s4), (s3, s4)]:
print("\nAligning")
print(' ', x)
print(' ', y)
result = align(x, y)
print(' ==>')
print(' ', ''.join(["%-20s" % e for e in result[0]]))
print(' ', ''.join(["%-20s" % e for e in result[1]]))
s1 = "hello this is <NAME> with the newshour on pbs we have news about the tomato it has been observed recently that they dont taste good anymore".split()
s2 = 'Hello, this is <NAME>, with the newshour on BBC: "We have news about the tomato it has been observed recently that they don\'t taste good anymore.'.split()
s1 = [Word(w) for w in s1]
s2 = [Word(w) for w in s2]
for x in align(s1, s2):
print('>>>', ' '.join([str(w) for w in x]))
first, second = align(s1, s2)
#print(' ', ''.join(["%-10s" % str(e) for e in first]))
#print(' ', ''.join(["%-10s" % e for e in second]))
for x in zip(first, second):
print(str(x[0].text), str(x[1].text))
| 3.234375 | 3 |
kolga/libs/service.py | Hi-Fi/kolga | 7 | 12770649 | from pathlib import Path
from typing import Dict, List, Mapping, Optional, Set
from kolga.utils.general import get_environment_vars_by_prefix, get_project_secret_var
from kolga.utils.models import HelmValues
class Service:
"""
A service is a by Helm deployable software
A service takes care of storing the configuration needed
to deploy a service to Kubernetes. It also stores metadata
about the service so that it can be shared with other services
if need be.
"""
def __init__(
self,
name: str,
track: str,
values: Optional[HelmValues] = None,
artifact_name: Optional[str] = None,
values_files: Optional[List[Path]] = None,
chart: str = "",
chart_path: Optional[Path] = None,
chart_version: Optional[str] = None,
depends_on: Optional[Set["Service"]] = None,
) -> None:
self.name = name
self.track = track
self.values = values or {}
self.artifact_name = artifact_name
self.values_files: List[Path] = values_files or []
self.chart = chart
self.chart_path = chart_path
self.chart_version = chart_version
self.depends_on: Set["Service"] = depends_on or set()
self._prerequisite_of: Set["Service"] = set()
self._validate_chart()
self.service_specific_values = self._get_service_variables()
def _validate_chart(self) -> None:
if not self.chart and not self.chart_path:
raise ValueError("Either chart or chart_name must be defined")
def _get_service_variables(self) -> Dict[str, str]:
return get_environment_vars_by_prefix(f"K8S_SERVICE_{self.name.upper()}_")
def add_dependency(self, service: "Service") -> None:
self.depends_on.add(service)
service.add_prerequisite(self)
def add_prerequisite(self, service: "Service") -> None:
self._prerequisite_of.add(service)
if self not in service.depends_on:
service.add_dependency(self)
def setup_prerequisites(self) -> None:
pass
def get_artifacts(self) -> Mapping[str, str]:
return {}
def get_service_secret_artifact_name(self, service: "Service") -> str:
if not self.artifact_name:
raise ValueError(f"No artifact name set for the service {self.name}")
return get_project_secret_var(
project_name=service.name, value=self.artifact_name
)
| 2.46875 | 2 |
meta_info_tools/test_meta_html.py | fawzi/meta-tools | 0 | 12770650 | <gh_stars>0
import unittest
from .meta_html import *
from .test_meta_schema import metaMetaSchema
import io
import json
import tempfile, shutil
class TestMetaHtml(unittest.TestCase):
"""tests the documentation generation"""
def test_md2html(self):
"tests markdown to html conversion (and math and meta info linking)"
self.maxDiff = None
test1 = [
"The shape of the multidimensional array used to store the data corresponding to this meta info, either ",
"meta_dimension_fixed or meta_dimension_symbolic.\n",
"Example: [{ ``meta_dimension_symbolic'': ``number_of_atoms''}, {``meta_dimension_fixed'': 3 }].\n",
"If no meta_dimension are given the data is a scalar.",
]
schema = metaMetaSchema()
r1 = md2html(
text="".join(test1), basePath="..", schema=schema, raiseException=True
)
self.assertEqual(
r1,
'<p>The shape of the multidimensional array used to store the data corresponding to this meta info, either <a href="../section/meta_dimension/value/meta_dimension_fixed.html">meta_dimension_fixed</a> or <a href="../section/meta_dimension/value/meta_dimension_symbolic.html">meta_dimension_symbolic</a>.\nExample: [{ <code><a href="../section/meta_dimension/value/meta_dimension_symbolic.html">meta_dimension_symbolic</a>\'\':</code>number_of_atoms\'\'}, {``<a href="../section/meta_dimension/value/meta_dimension_fixed.html">meta_dimension_fixed</a>\'\': 3 }].\nIf no <a href="../section/meta_dimension/index.html">meta_dimension</a> are given the data is a scalar.</p>',
)
def test_site_writer(self):
"tests SiteWriter (currently just that it runs)"
tempDir = tempfile.mkdtemp(suffix="testSiteWriter")
schema = metaMetaSchema()
try:
w = SiteWriter(schema, tempDir)
w.writeAll()
w.cleanupUnknown()
finally:
shutil.rmtree(tempDir)
if __name__ == "__main__":
unittest.main()
| 2.875 | 3 |