content stringlengths 5 1.05M |
|---|
from gym.envs.registration import register
register(
id='snake-v0',
entry_point='snake_gym.envs:SnakeEnv',
)
register(
id='snake-tiled-v0',
entry_point='snake_gym.envs:SnakeEnvTiled',
) |
colors = ["red", "white", "blue"]
colors.insert(2,"yellow")
print(colors)
|
#!/usr/bin/env python3
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: op-test-framework/common/OpTestCronus.py $
#
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2015
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
import os
import datetime
import time
import subprocess
import traceback
import socket
from .Exceptions import ParameterCheck, UnexpectedCase
from .OpTestSystem import OpSystemState
import logging
import OpTestLogger
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
match_list = ["CRONUS_HOME",
"OPTKROOT",
"ECMD_DLL_FILE",
"ECMD_EXE",
"ECMD_ARCH",
"ECMD_TARGET",
"ECMD_PRODUCT",
"LD_LIBRARY_PATH",
"ECMD_PATH",
"ECMD_PLUGIN",
"ECMD_RELEASE",
"ECMDPERLBIN",
"PERL5LIB",
"PYTHONPATH",
"PATH",
"LABCPU",
"LABTS",
"SBE_TOOLS_PATH",
]
class OpTestCronus():
'''
OpTestCronus Class for Cronus Setup and Environment Persistance
See testcases/testCronus.py for Cronus Install and Setup
'''
def __init__(self, conf=None):
self.conf = conf
self.env_ready = False # too early to know if system supports cronus
self.cronus_ready = False # flag to indicate setup complete
self.cv_SYSTEM = None # flag to show if we have a system yet
self.capable = False
self.current_target = None
self.cronus_env = None
def dump_env(self):
for xs in sorted(match_list):
log.debug("os.environ[{}]={}".format(xs, os.environ[xs]))
def setup(self):
self.cv_SYSTEM = self.conf.system() # we hope its not still too early
# test no op_system
self.capable = self.cv_SYSTEM.cronus_capable()
if not self.cv_SYSTEM.cronus_capable():
log.debug("System is NOT cronus_capable={}".format(
self.cv_SYSTEM.cronus_capable()))
# safeguards
self.env_ready = False
self.cronus_ready = False
return
# rc=139 is a segfault (-11)
log.debug("gethostbyname starts '{}'".format(self.conf.args.bmc_ip))
just_ip = socket.gethostbyname(self.conf.args.bmc_ip)
log.debug("gethostbyname ends '{}'".format(just_ip))
proposed_target = just_ip + "_optest_target"
ecmdtargetsetup_string = ("ecmdtargetsetup -n \"{}\" "
"-env hw -sc \"k0:eth:{}\" "
"-bmc \"k0:eth:{}\" "
"-bmcid \"k0:{}\" "
"-bmcpw \"k0:{}\""
.format(proposed_target,
just_ip,
just_ip,
self.conf.args.bmc_username,
self.conf.args.bmc_password))
try:
op_cronus_login = "/etc/profile.d/openpower.sh"
self.cronus_env = os.path.join(self.conf.logdir, "cronus.env")
if not os.path.isfile(op_cronus_login):
log.warning("NO Cronus installed, check the system")
return
except Exception as e:
log.warning("Cronus setup problem check the installation,"
" Exception={}".format(e))
try:
source_string = ("source {} && "
"ecmdsetup auto cro {} {} && "
"printenv >{}"
.format(op_cronus_login,
self.conf.args.cronus_product,
self.conf.args.cronus_code_level,
self.cronus_env))
command = "source"
stdout_value = self.conf.util.cronus_subcommand(
command=source_string, minutes=2)
log.debug("source stdout='{}'".format(stdout_value))
if not os.path.isfile(self.cronus_env):
log.error("NO Cronus environment "
"data captured, this is a problem")
raise UnexpectedCase(message="NO Cronus environment "
"data captured, this is a problem")
ecmd_dict = {}
with open(self.cronus_env) as f:
for line in f:
new_line = line.split("=")
for xs in match_list:
if xs == new_line[0]:
if len(new_line) >= 2:
ecmd_dict[new_line[0]] = new_line[1].rstrip()
log.debug("ECMD's len(match_list)={} len(ecmd_dict)={}, "
"these may not match"
.format(len(match_list), len(ecmd_dict)))
for k, v in sorted(ecmd_dict.items()):
log.debug("ecmd_dict[{}]={}".format(k, ecmd_dict[k]))
os.environ[k] = ecmd_dict[k]
self.env_ready = True
log.debug(
"cronus setup setting self.env_ready={}".format(self.env_ready))
except subprocess.CalledProcessError as e:
tb = traceback.format_exc()
raise UnexpectedCase(message="Cronus environment issue rc={} "
"output={} traceback={}"
.format(e.returncode, e.output, tb))
except Exception as e:
tb = traceback.format_exc()
raise UnexpectedCase(message="Cronus environment issue "
"Exception={} traceback={}"
.format(e, tb))
try:
command = "ecmdtargetsetup"
stdout_value = self.conf.util.cronus_subcommand(
command=ecmdtargetsetup_string, minutes=2)
log.debug("ecmdtargetsetup stdout='{}'".format(stdout_value))
target_string = "target {}".format(proposed_target)
command = "target"
stdout_value = self.conf.util.cronus_subcommand(
command=target_string, minutes=2)
log.debug("target stdout='{}'".format(stdout_value))
self.current_target = proposed_target
log.debug("ECMD_TARGET={}".format(self.current_target))
# need to manually update the environment to persist
os.environ['ECMD_TARGET'] = self.current_target
command = "setupsp"
stdout_value = self.conf.util.cronus_subcommand(
command=command, minutes=2)
log.debug("target stdout='{}'".format(stdout_value))
if self.cv_SYSTEM.get_state() not in [OpSystemState.OFF]:
command = "crodetcnfg"
crodetcnfg_string = ("crodetcnfg {}"
.format(self.conf.args.cronus_system_type))
stdout_value = self.conf.util.cronus_subcommand(
command=crodetcnfg_string, minutes=2)
log.debug("crodetcnfg stdout='{}'".format(stdout_value))
self.cronus_ready = True
log.debug("cronus_ready={}".format(self.cronus_ready))
else:
log.warning("Cronus problem setting up, we need the "
"System powered ON and it is OFF")
raise UnexpectedCase(state=self.cv_SYSTEM.get_state(),
message=("Cronus setup problem, we need"
" the System powered ON and it is OFF"))
except subprocess.CalledProcessError as e:
tb = traceback.format_exc()
raise UnexpectedCase(message="Cronus setup issue rc={} output={}"
" traceback={}"
.format(e.returncode, e.output, tb))
except Exception as e:
tb = traceback.format_exc()
raise UnexpectedCase(message="Cronus setup issue Exception={}"
" traceback={}".format(e, tb))
|
import os
from ehive.runnable.IGFBaseProcess import IGFBaseProcess
from igf_data.utils.fileutils import copy_remote_file, calculate_file_checksum
class TransferAndCheckRemoteBclFile(IGFBaseProcess):
'''
A class for transferring files from remote server and checking the file checksum value
'''
def param_defaults(self):
params_dict=super(TransferAndCheckRemoteBclFile,self).param_defaults()
params_dict.update({
'seqrun_server':None,
'chacksum_type':'md5',
'seqrun_local_dir':None,
'seqrun_source':None,
'seqrun_user':None,
'seqrun_server':None,
})
return params_dict
def run(self):
try:
seqrun_igf_id = self.param_required('seqrun_igf_id')
seqrun_source = self.param_required('seqrun_source')
seqrun_server = self.param_required('seqrun_server')
seqrun_user = self.param_required('seqrun_user')
seqrun_local_dir = self.param_required('seqrun_local_dir')
chacksum_type = self.param_required('checksum_type')
seqrun_file_name = self.param_required('seqrun_file_name')
file_md5_value = self.param_required('file_md5')
transfer_remote_file = True # transfer file from remote server
source_file_path = \
os.path.join(\
seqrun_source,
seqrun_igf_id,
seqrun_file_name) # get new seqrun path
dir_name = os.path.dirname(seqrun_file_name) # returns dir name or empty strings
destination_dir = \
os.path.join(\
seqrun_local_dir,
seqrun_igf_id,
dir_name) # get file copy path
destination_path = \
os.path.join(\
destination_dir,
os.path.basename(seqrun_file_name)) # get destination path
if os.path.exists(destination_path) and \
os.path.isfile(destination_path):
existing_checksum = \
calculate_file_checksum(\
destination_path,\
hasher=chacksum_type) # calculate checksum of existing file
if existing_checksum == file_md5_value:
transfer_remote_file = False # skip file transfer if its up to date
else:
os.remove(destination_path) # remove existing file
if transfer_remote_file:
if seqrun_user is None and seqrun_server is None:
raise ValueError('seqrun: {0}, missing required value for seqrun_user or seqrun_server'.\
format(seqrun_igf_id))
source_address = '{0}@{1}'.format(seqrun_user,seqrun_server) # get host username and address
copy_remote_file(\
source_path=source_file_path,
destination_path=destination_path,
source_address=source_address,
check_file=False) # copy remote file
if not os.path.exists(destination_path):
raise IOError('failed to copy file {0} for seqrun {1}'.\
format(seqrun_file_name,seqrun_igf_id)) # check destination file after copy
new_checksum = \
calculate_file_checksum(\
destination_path,
hasher=chacksum_type) # calculate checksum of the transferred file
if new_checksum != file_md5_value:
raise ValueError('seqrun:{3}, checksum not matching for file {0}, expected: {1}, got {2}'.\
format(seqrun_file_name,
file_md5_value,
new_checksum,
seqrun_igf_id)) # raise error if checksum doesn't match
self.param('dataflow_params',{'seqrun_file_name':seqrun_file_name})
except Exception as e:
message = \
'seqrun: {2}, Error in {0}: {1}'.\
format(\
self.__class__.__name__,
e,
seqrun_igf_id)
self.warning(message)
self.post_message_to_slack(message,reaction='fail') # post msg to slack for failed jobs
self.post_message_to_ms_team(
message=message,
reaction='fail')
raise |
from base import AlertApp, parse_list, parse_tod, between
class GenericAlert(AlertApp):
def initialize(self):
self.telegram_list = self.args.get("telegram") or []
self.states = parse_list(self.args.get("state"))
self.message = self.args.get("message")
self.done_message = self.args.get("done_message")
self.camera = self.args.get("camera")
self.camera_output = self.args.get("camera_output")
self.tod = parse_tod(self.args.get("tod"), tzinfo=self.AD.tz)
super().initialize()
def should_trigger(self, old, new):
if self.tod:
now = self.datetime(aware=True)
# TODO(dcramer): pretty sure i should just be using the builtin and didnt need to write this code
# if not self.now_is_between("sunset - 00:45:00", "sunrise + 00:45:00"):
if not between(now, self.tod["after"], self.tod["before"]):
self.log("not correct time of day")
return False
return new in self.states
def on_activate(self, *args, **kwargs):
if self.message:
self.send_notifications(self.message)
else:
self.log("Not notifying")
def on_deactivate(self, *args, **kwargs):
if self.done_message:
self.send_notifications(self.done_message)
else:
self.log("Not notifying")
def send_notifications(self, message):
if self.camera:
self.call_service(
"camera/snapshot",
entity_id=self.camera,
filename=self.camera_output.format(alert_id=self.alert_id),
)
for target in self.telegram_list:
self.log(f"Notifying telegram {target}")
if self.camera:
self.call_service(
"telegram_bot/send_photo",
target=target,
file=self.camera_output.format(alert_id=self.alert_id),
)
self.call_service(
"telegram_bot/send_message", target=target, message=message
)
|
def astronauts():
"""Astronauts crews URL from NASA Api"""
return "http://api.open-notify.org/astros.json"
def locations():
"""ISS location from NASA Api"""
return "http://api.open-notify.org/iss-now.json"
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import retworkx
class TestMeshGraph(unittest.TestCase):
def test_directed_mesh_graph(self):
graph = retworkx.generators.directed_mesh_graph(20)
self.assertEqual(len(graph), 20)
self.assertEqual(len(graph.edges()), 380)
for i in range(20):
ls = []
for j in range(19, -1, -1):
if i != j:
ls.append((i, j, None))
self.assertEqual(graph.out_edges(i), ls)
def test_directed_mesh_graph_weights(self):
graph = retworkx.generators.directed_mesh_graph(
weights=list(range(20)))
self.assertEqual(len(graph), 20)
self.assertEqual([x for x in range(20)], graph.nodes())
self.assertEqual(len(graph.edges()), 380)
for i in range(20):
ls = []
for j in range(19, -1, -1):
if i != j:
ls.append((i, j, None))
self.assertEqual(graph.out_edges(i), ls)
def test_mesh_directed_no_weights_or_num(self):
with self.assertRaises(IndexError):
retworkx.generators.directed_mesh_graph()
def test_mesh_graph(self):
graph = retworkx.generators.mesh_graph(20)
self.assertEqual(len(graph), 20)
self.assertEqual(len(graph.edges()), 190)
def test_mesh_graph_weights(self):
graph = retworkx.generators.mesh_graph(weights=list(range(20)))
self.assertEqual(len(graph), 20)
self.assertEqual([x for x in range(20)], graph.nodes())
self.assertEqual(len(graph.edges()), 190)
def test_mesh_no_weights_or_num(self):
with self.assertRaises(IndexError):
retworkx.generators.mesh_graph()
|
import sys
if './' not in sys.path: sys.path.append('./')
from abc import ABC
from scipy.misc import derivative
import numpy as np
from types import FunctionType, MethodType
class NumericalPartialDerivative_txyz(ABC):
"""
Numerical partial derivative, we call it '4' because we compute a function or method of 4 inputs like
``A=f(t,x,y,z)``. And we will evaluate dA/dt, dA/dx, dA/dy, dA/dz at `(t, x, y, z)`. Note that `(x,y,z)`
must be of the same shape; no matter the dimensions (we do not do mesh grid to them). And t must be 1-d.
"""
def __init__(self, func, t, x, y, z, dtdxdydz=1e-6, n=1, order=3):
self.___PRIVATE_check_func___(func)
self.___PRIVATE_check_txyz___(t, x, y, z)
self.___PRIVATE_check_dtdxdydz___(dtdxdydz)
self.___PRIVATE_check_n___(n)
self.___PRIVATE_check_order___(order)
def ___PRIVATE_check_func___(self, func):
assert callable(func), " <PartialDerivative> : func is not callable."
if isinstance(func, FunctionType):
# noinspection PyUnresolvedReferences
assert func.__code__.co_argcount == 4, " <PartialDerivative> : need a func of 4 args."
elif isinstance(func, MethodType):
# noinspection PyUnresolvedReferences
assert func.__code__.co_argcount == 5, \
" <PartialDerivative> : need a method of 5 args (5 including self)."
else:
raise NotImplementedError(func.__class__.__name__)
self._func_ = func
def ___PRIVATE_check_txyz___(self, t, x, y, z):
"""We ask x, y, z, must be of the same shape."""
assert np.shape(x) == np.shape(y) == np.shape(z), " <PartialDerivative> : xyz of different shapes."
self._x_, self._y_, self._z_ = x, y, z
assert isinstance(t, (int, float)), f"t need to be a number, now t={t} is a {t.__class__}."
self._t_ = t
def ___PRIVATE_check_dtdxdydz___(self, dtdxdydz):
if isinstance(dtdxdydz, (int, float)):
self._dt_ = self._dx_ = self._dy_ = self._dz_ = dtdxdydz
else:
assert np.shape(dtdxdydz) == (4,), " <PartialDerivative> : dtdxdydz shape wrong."
self._dt_, self._dx_, self._dy_, self._dz_ = dtdxdydz
assert all([isinstance(d, (int, float)) and d > 0 for d in (self._dt_, self._dx_, self._dy_, self._dz_)]), \
f"dt, dx, dy, dz must be positive number."
def ___PRIVATE_check_n___(self, n):
assert n % 1 == 0 and n >= 1, " <PartialDerivative> : n = {} is wrong.".format(n)
self._n_ = n
def ___PRIVATE_check_order___(self, order):
assert order % 2 == 1 and order > 0, " <PartialDerivative> : order needs to be odd positive."
self._order_ = order
def ___PRIVATE_evaluate_func_for_t___(self, t):
return self._func_(t, self._x_, self._y_, self._z_)
def ___PRIVATE_evaluate_func_for_x___(self, x):
return self._func_(self._t_, x, self._y_, self._z_)
def ___PRIVATE_evaluate_func_for_y___(self, y):
return self._func_(self._t_, self._x_, y, self._z_)
def ___PRIVATE_evaluate_func_for_z___(self, z):
return self._func_(self._t_, self._x_, self._y_, z)
def scipy_partial(self, d_):
"""We compute the partial derivative, i.e. ``df/d_``, at points ``*txyz``."""
if d_ == 't':
# noinspection PyTypeChecker
return derivative(self.___PRIVATE_evaluate_func_for_t___, self._t_, dx=self._dt_,
n=self._n_, order=self._order_)
elif d_ == 'x':
# noinspection PyTypeChecker
return derivative(self.___PRIVATE_evaluate_func_for_x___, self._x_, dx=self._dx_,
n=self._n_, order=self._order_)
elif d_ == 'y':
# noinspection PyTypeChecker
return derivative(self.___PRIVATE_evaluate_func_for_y___, self._y_, dx=self._dy_,
n=self._n_, order=self._order_)
elif d_ == 'z':
# noinspection PyTypeChecker
return derivative(self.___PRIVATE_evaluate_func_for_z___, self._z_, dx=self._dz_,
n=self._n_, order=self._order_)
else:
raise Exception(" <PartialDerivative> : dt, dx or dy or dz? give me 't', 'x', 'y' or 'z'.")
@property
def scipy_total(self):
"""Use scipy to compute the total derivative."""
pt = self.scipy_partial('t')
px = self.scipy_partial('x')
py = self.scipy_partial('y')
pz = self.scipy_partial('z')
return pt, px, py, pz
def check_partial_t(self, px_func, tolerance=1e-5):
"""give a analytical function `px_func`, we check if it is the partial-t derivative of the self.func"""
self_pt = self.scipy_partial('t')
func_pt = px_func(self._t_, self._x_, self._y_, self._z_)
absolute_error = np.max(np.abs(func_pt-self_pt))
if absolute_error < tolerance:
return True
relative_error = np.max(np.abs((func_pt-self_pt)/self_pt))
if relative_error < tolerance:
return True
else:
return False
def check_partial_x(self, px_func, tolerance=1e-5):
"""give a analytical function `px_func`, we check if it is the partial-x derivative of the self.func"""
self_px = self.scipy_partial('x')
func_px = px_func(self._t_, self._x_, self._y_, self._z_)
absolute_error = np.max(np.abs(func_px-self_px))
if absolute_error < tolerance:
return True
relative_error = np.max(np.abs((func_px-self_px)/self_px))
if relative_error < tolerance:
return True
else:
return False
def check_partial_y(self, py_func, tolerance=1e-5):
"""give a analytical function `px_func`, we check if it is the partial-y derivative of the self.func"""
self_py = self.scipy_partial('y')
func_py = py_func(self._t_, self._x_, self._y_, self._z_)
absolute_error = np.max(np.abs(func_py-self_py))
if absolute_error < tolerance:
return True
relative_error = np.max(np.abs((func_py-self_py)/self_py))
if relative_error < tolerance:
return True
else:
return False
def check_partial_z(self, pz_func, tolerance=1e-5):
"""give a analytical function `px_func`, we check if it is the partial-z derivative of the self.func"""
self_pz = self.scipy_partial('z')
func_pz = pz_func(self._t_, self._x_, self._y_, self._z_)
absolute_error = np.max(np.abs(func_pz-self_pz))
if absolute_error < tolerance:
return True
relative_error = np.max(np.abs((func_pz-self_pz)/func_pz))
if relative_error < tolerance:
return True
else:
return False
def check_total(self, pt_func, px_func, py_func, pz_func, tolerance=1e-5):
"""give four analytical functions, we check if it is the partial-t, -x, -y, -z derivatives of the self.func"""
return (self.check_partial_t(pt_func, tolerance=tolerance),
self.check_partial_x(px_func, tolerance=tolerance),
self.check_partial_y(py_func, tolerance=tolerance),
self.check_partial_z(pz_func, tolerance=tolerance))
if __name__ == '__main__':
# mpiexec -n 6 python screws\numerical\time_plus_3d_space\partial_derivative.py
def func(t, x, y, z): return np.sin(np.pi*x) * np.sin(np.pi*y) * np.sin(np.pi*z) * t
def Pt(t, x, y, z): return np.sin(np.pi*x) * np.sin(np.pi*y) * np.sin(np.pi*z) + 0*t
def Px(t, x, y, z): return np.pi*np.cos(np.pi*x) * np.sin(np.pi*y) * np.sin(np.pi*z) * t
def Py(t, x, y, z): return np.pi*np.sin(np.pi*x) * np.cos(np.pi*y) * np.sin(np.pi*z) * t
def Pz(t, x, y, z): return np.pi*np.sin(np.pi*x) * np.sin(np.pi*y) * np.cos(np.pi*z) * t
t = 5
x = np.random.rand(11, 12, 13)
y = np.random.rand(11, 12, 13)
z = np.random.rand(11, 12, 13)
NP = NumericalPartialDerivative_txyz(func, t, x, y, z)
assert all(NP.check_total(Pt, Px, Py, Pz))
|
import matplotlib.pyplot as plt
from matplotlib import rcParams
def plotlib_costobj(df=None, savefig=True, savefilepathandname=None,
xname='theta',
title='Minimal Total Cost vs. Load Constraint',
xlabel='Load Reduction (%) Lower Bound Constraint',
ylabel='Minimal Total Cost ($)',
secondaryxticklabels=None,
showplot=True,
backend='agg'):
if not not backend:
plt.switch_backend(backend)
if not savefilepathandname:
savefig = False
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['MS Reference Sans Serif']
rcParams['font.weight'] = 'normal'
rcParams['font.size'] = 12
df_feasib = df.loc[df['feasible'] == True, :]
df_infeas = df.loc[df['feasible'] == False, :]
fig, ax = plt.subplots()
for ii, data in enumerate([df_feasib, df_infeas]):
if ii == 0:
ax.plot(data[xname],
data['objective'],
color=[31/255, 119/255, 180/255],
linestyle='-',
linewidth=3,
marker='o',
label='feasible solutions',
alpha=1.)
else:
ax.plot(data[xname],
data['objective'],
color=[249/255, 120/255, 80/255],
linestyle='None',
marker='o',
label='infeasible solutions',
alpha=0.7)
ax.grid(color=[200/255, 200/255, 200/255],
alpha=0.4, linestyle='-', linewidth=1)
ax.set_xticks(df[xname])
ax.set_xticklabels(df[xname])
plt.yticks(rotation=45)
# Hide the right and top spines
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.set_ylabel(ylabel, rotation=90)
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.legend(frameon=False, loc='upper left', ncol=1)
if secondaryxticklabels is not None:
ax2 = ax.twiny()
axTicks = ax.get_xticks()
ax2Ticks = axTicks
def tick_function(X):
return ["%.1f" % z for z in X]
ax2.set_xticks(ax2Ticks)
ax2.set_xbound(ax.get_xbound())
ax2.set_xticklabels(tick_function(secondaryxticklabels))
ax2.tick_params(axis='x', which='major', labelsize=9)
# Add some extra space for the second axis at the bottom
plt.subplots_adjust(bottom=0.05)
# ax.xaxis.labelpad = 30
# Move twinned axis ticks and label from top to bottom
ax2.xaxis.set_ticks_position("bottom")
ax2.xaxis.set_label_position("bottom")
# Offset the twin axis below the host
ax2.spines["bottom"].set_position(("axes", -0.2))
# Turn on the frame for the twin axis, but then hide all
# but the bottom spine
ax2.set_frame_on(True)
ax2.patch.set_visible(False)
for k, sp in ax2.spines.items():
sp.set_visible(False)
ax2.spines["bottom"].set_visible(True)
plt.setp(ax2.xaxis.get_majorticklabels(), rotation=45)
if showplot:
plt.show()
else:
plt.draw()
if savefig:
fig.savefig(savefilepathandname, dpi=300,
bbox_inches='tight', transparent=True)
return fig
def plotlib_loadreductionobj(df=None, savefig=True, savefilepathandname=None,
xname='totalcostupperbound',
title='Max Load Reduction vs. Total Cost Constraint',
xlabel='Total Cost ($) Upper Bound Constraint',
ylabel='Maximal Load Reduction (%)',
secondaryxticklabels=None,
showplot=True,
backend='agg'):
if not not backend:
plt.switch_backend(backend)
if not savefilepathandname:
savefig = False
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['MS Reference Sans Serif']
rcParams['font.weight'] = 'normal'
rcParams['font.size'] = 12
df_feasib = df.loc[df['feasible'] == True, :]
df_infeas = df.loc[df['feasible'] == False, :]
fig, ax = plt.subplots()
for ii, data in enumerate([df_feasib, df_infeas]):
if ii == 0:
ax.plot(data[xname],
data['objective'],
color=[31/255, 119/255, 180/255],
linestyle='-',
linewidth=3,
marker='o',
label='feasible solutions',
alpha=1.)
else:
ax.plot(data[xname],
data['objective'],
color=[249/255, 120/255, 80/255],
linestyle='None',
marker='o',
label='infeasible solutions',
alpha=0.7)
ax.grid(color=[200/255, 200/255, 200/255],
alpha=0.4, linestyle='-', linewidth=1)
ax.set_xticks(df[xname])
ax.set_xticklabels(["{:,}".format(x) for x in df[xname]])
plt.yticks(rotation=45)
plt.xticks(rotation=60)
# Hide the right and top spines
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.set_ylabel(ylabel, rotation=90)
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.legend(frameon=False, loc='upper left', ncol=1)
if secondaryxticklabels is not None:
ax2 = ax.twiny()
axTicks = ax.get_xticks()
ax2Ticks = axTicks
def tick_function(X):
return ["%.1f" % z for z in X]
ax2.set_xticks(ax2Ticks)
ax2.set_xbound(ax.get_xbound())
ax2.set_xticklabels(tick_function(secondaryxticklabels))
ax2.tick_params(axis='x', which='major', labelsize=9)
# Add some extra space for the second axis at the bottom
plt.subplots_adjust(bottom=0.05)
# ax.xaxis.labelpad = 30
# Move twinned axis ticks and label from top to bottom
ax2.xaxis.set_ticks_position("bottom")
ax2.xaxis.set_label_position("bottom")
# Offset the twin axis below the host
ax2.spines["bottom"].set_position(("axes", -0.2))
# Turn on the frame for the twin axis, but then hide all
# but the bottom spine
ax2.set_frame_on(True)
ax2.patch.set_visible(False)
for k, sp in ax2.spines.items():
sp.set_visible(False)
ax2.spines["bottom"].set_visible(True)
plt.setp(ax2.xaxis.get_majorticklabels(), rotation=45)
if showplot:
plt.show()
else:
plt.draw()
if savefig:
fig.savefig(savefilepathandname, dpi=300,
bbox_inches='tight', transparent=True)
return fig
|
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import json, os, sys
from Registry import Registry
from qiling.os.windows.const import *
from qiling.exception import *
from qiling.const import *
# Registry Manager reads data from two places
# 1. config.json
# if you want to modify the registry key/value, you can modify config.json
# If there is a registry entry in config.json that needs to be read,
# Registry Manager will read from config.json first.
# 2. windows hive files
# Registry Manager will only write registry changes to config.json
# and will not modify the hive file.
class RegistryManager:
def __init__(self, ql, hive=None):
self.ql = ql
self.log_registry_dir = self.ql.rootfs
if self.log_registry_dir == None:
self.log_registry_dir = "qlog"
self.registry_diff = self.ql.targetname + "_diff.json"
self.regdiff = os.path.join(self.log_registry_dir, "registry", self.registry_diff)
# hive dir
if hive:
self.hive = hive
else:
self.hive = os.path.join(ql.rootfs, "Windows", "registry")
ql.log.debug("Windows Registry PATH: %s" % self.hive)
if not os.path.exists(self.hive) and not self.ql.code:
raise QlErrorFileNotFound(f"Error: Registry files not found in '{self.hive}'!")
if not os.path.exists(self.regdiff):
self.registry_config = {}
try:
os.makedirs(os.path.join(self.log_registry_dir, "registry"), 0o755)
except Exception:
pass
else:
# read config
# use registry config first
self.f_config = open(self.regdiff, "rb")
data = self.f_config.read()
if data == b"":
self.registry_config = {}
self.f_config.close()
else:
try:
self.registry_config = json.loads(data)
except json.decoder.JSONDecodeError:
raise QlErrorJsonDecode("Windows Registry JSON decode error")
finally:
self.f_config.close()
# hkey local system
self.hklm = {}
try:
self.hklm['SECURITY'] = Registry.Registry(os.path.join(self.hive, 'SECURITY'))
self.hklm['SAM'] = Registry.Registry(os.path.join(self.hive, 'SAM'))
self.hklm['SOFTWARE'] = Registry.Registry(os.path.join(self.hive, 'SOFTWARE'))
self.hklm['SYSTEM'] = Registry.Registry(os.path.join(self.hive, 'SYSTEM'))
self.hklm['HARDWARE'] = Registry.Registry(os.path.join(self.hive, 'HARDWARE'))
# hkey current user
self.hkcu = Registry.Registry(os.path.join(self.hive, 'NTUSER.DAT'))
except FileNotFoundError:
if not ql.code:
QlErrorFileNotFound("WARNING: Registry files not found!")
except Exception:
if not ql.code:
QlErrorFileNotFound("WARNING: Registry files format error")
self.accessed = {}
def exists(self, key):
if key in self.regdiff:
return True
keys = key.split("\\")
self.access(key)
try:
if keys[0] == "HKEY_LOCAL_MACHINE":
reg = self.hklm[keys[1]]
sub = "\\".join(keys[2:])
data = reg.open(sub)
elif keys[0] == "HKEY_CURRENT_USER":
reg = self.hkcu
sub = "\\".join(keys[1:])
data = reg.open(sub)
else:
raise QlErrorNotImplemented("Windows Registry %s not implemented" % (keys[0]))
except Exception:
return False
return True
def read(self, key, subkey, reg_type):
# of the key, the subkey is the value checked
# read reg conf first
if key in self.regdiff and subkey in self.regdiff[key]:
if self.regdiff[key][subkey].type in REG_TYPES:
return REG_TYPES[self.regdiff[key][subkey].type], self.regdiff[key][subkey].value
else:
raise QlErrorNotImplemented(
"Windows Registry Type %s not implemented" % self.regdiff[key][subkey].type)
# read hive
reg = None
data = None
keys = key.split('\\')
try:
if keys[0] == "HKEY_LOCAL_MACHINE":
reg = self.hklm[keys[1]]
sub = "\\".join(keys[2:])
data = reg.open(sub)
elif keys[0] == "HKEY_CURRENT_USER":
reg = self.hkcu
sub = "\\".join(keys[1:])
data = reg.open(sub)
else:
raise QlErrorNotImplemented("Windows Registry %s not implemented" % (keys[0]))
for value in data.values():
if value.name() == subkey and (reg_type == Registry.RegNone or
value.value_type() == reg_type):
self.access(key, value_name=subkey, value=value.value(), type=value.value_type())
return value.value_type(), value.value()
except Registry.RegistryKeyNotFoundException:
pass
self.access(key, value_name=subkey, value=None, type=None)
return None, None
def access(self, key, value_name=None, value=None, type=None):
if value_name is None:
if key not in self.accessed:
self.accessed[key] = []
else:
self.accessed[key].append({
"value_name": value_name,
"value": value,
"type": type,
"position": self.ql.os.utils.syscalls_counter
})
# we don't have to increase the counter since we are technically inside a hook
def create(self, key):
self.registry_config[key] = dict()
def write(self, key, subkey, reg_type, data):
if key not in self.registry_config:
self.create(key)
# write registry changes to config.json
self.registry_config[key][subkey] = {
"type": REG_TYPES[reg_type],
"value": data
}
def delete(self, key, subkey):
del self.registry_config[key][subkey]
@staticmethod
def _encode_binary_value(data):
# bytes(hex(data), 'ascii')
# TODO
pass
def write_reg_value_into_mem(self, reg_value, reg_type, address):
length = 0
# string
if reg_type == Registry.RegSZ or reg_type == Registry.RegExpandSZ:
self.ql.mem.write(address, bytes(reg_value, "utf-16le") + b"\x00")
length = len(reg_value)
elif reg_type == Registry.RegBin:
# you can set REG_BINARY like '\x00\x01\x02' in config.json
if type(reg_value) == str:
self.ql.mem.write(address, bytes(reg_value))
length = len(reg_value)
else:
raise QlErrorNotImplemented("Windows Registry Type not implemented")
elif reg_type == Registry.RegDWord:
data = self.ql.pack32(reg_value)
self.ql.mem.write(address, data)
length = len(data)
elif reg_type == Registry.RegQWord:
data = self.ql.pack64(reg_value)
self.ql.mem.write(address, data)
length = len(data)
else:
raise QlErrorNotImplemented(
"Windows Registry Type write to memory %s not implemented" % (REG_TYPES[reg_type]))
return length
def save(self):
# write registry config to config file
if self.registry_config and len(self.registry_config) != 0:
with open(self.regdiff, "wb") as f:
f.write(bytes(json.dumps(self.registry_config), "utf-8"))
|
logo = """
,adPPYba, ,adPPYYba, ,adPPYba, ,adPPYba, ,adPPYYba, 8b,dPPYba,
a8" "" "" `Y8 a8P_____88 I8[ "" "" `Y8 88P' "Y8
8b ,adPPPPP88 8PP""""""" `"Y8ba, ,adPPPPP88 88
"8a, ,aa 88, ,88 "8b, ,aa aa ]8I 88, ,88 88
`"Ybbd8"' `"8bbdP"Y8 `"Ybbd8"' `"YbbdP"' `"8bbdP"Y8 88
88 88
"" 88
88
,adPPYba, 88 8b,dPPYba, 88,dPPYba, ,adPPYba, 8b,dPPYba,
a8" "" 88 88P' "8a 88P' "8a a8P_____88 88P' "Y8
8b 88 88 d8 88 88 8PP""""""" 88
"8a, ,aa 88 88b, ,a8" 88 88 "8b, ,aa 88
`"Ybbd8"' 88 88`YbbdP"' 88 88 `"Ybbd8"' 88
88
88
"""
logo9 = '''
___________
\ /
)_______(
|"""""""|_.-._,.---------.,_.-._
| | | | | | ''-.
| |_| |_ _| |_..-'
|_______| '-' `'---------'` '-'
)"""""""(
/_________\\
.-------------.
/_______________\\
'''
logo10 = '''
_ _ _
| | | | | |
___ __ _| | ___ _ _| | __ _| |_ ___ _ __
/ __/ _` | |/ __| | | | |/ _` | __/ _ \| '__|
| (_| (_| | | (__| |_| | | (_| | || (_) | |
\___\__,_|_|\___|\__,_|_|\__,_|\__\___/|_|
'''
logo11 = """
.------. _ _ _ _ _
|A_ _ |. | | | | | | (_) | |
|( \/ ).-----. | |__ | | __ _ ___| | ___ __ _ ___| | __
| \ /|K /\ | | '_ \| |/ _` |/ __| |/ / |/ _` |/ __| |/ /
| \/ | / \ | | |_) | | (_| | (__| <| | (_| | (__| <
`-----| \ / | |_.__/|_|\__,_|\___|_|\_\ |\__,_|\___|_|\_\\
| \/ K| _/ |
`------' |__/
"""
logo12 = '''
โโโโฌ โฌโโโโโโโโโ โโฌโโฌ โฌโโโ โโโโฌ โฌโโฌโโโ โโโโฌโโ
โ โฆโ โโโค โโโโโโ โ โโโคโโค โโโโ โโโโโโดโโโค โโฌโ
โโโโโโโโโโโโโโโ โด โด โดโโโ โโโโโโโด โดโโโโโโโดโโ
'''
logo14 = '''
__ ___ __
/ / / (_)___ _/ /_ ___ _____
/ /_/ / / __ `/ __ \/ _ \/ ___/
/ __ / / /_/ / / / / __/ /
/_/ ///_/\__, /_/ /_/\___/_/
/ / /____/_ _____ _____
/ / / __ \ | /| / / _ \/ ___/
/ /___/ /_/ / |/ |/ / __/ /
/_____/\____/|__/|__/\___/_/
'''
vs = """
_ __
| | / /____
| | / / ___/
| |/ (__ )
|___/____(_)
""" |
"""
This script makes a few modifications to the Methane data from 2012-2018. Eventually it will also import new 2019
data from the spreadsheet. Created on May 29th, 2019
"""
# Import libraries
from fileLoading import loadExcel
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
root = r'C:\Users\ARL\Desktop\J_Summit\analyses\HarmonicFit\textFiles'
methaneData = loadExcel(r"C:\Users\ARL\Desktop\Python Code\Data\Methane.xlsx")
methaneData = methaneData.dropna(axis=0, how='any') # Remove NaN values, entire row is removed
# Remove extreme outliers
flag1 = methaneData[methaneData['MR'] > 2100].index
flag2 = methaneData[methaneData['MR'] < 1730].index
methaneData = methaneData.drop(flag1)
methaneData = methaneData.drop(flag2)
print(methaneData.max())
print('-'*10)
print(methaneData.min())
with open(root + r"\methaneARL_nofit.txt", 'w+') as f:
for index, value in methaneData.iterrows():
f.write("%f " % value.DecYear)
f.write("%f\n" % value.MR)
|
import pytest
from PIL import Image
from fairypptx import constants
from fairypptx import Slide, Shape, Shapes, Slides
def test_to_image():
slide = Slide()
image = slide.to_image()
assert isinstance(image, Image.Image)
def test_leaf_shapes():
slide = Slides().add(layout=constants.ppLayoutBlank)
slide.select()
s1 = slide.shapes.add(1)
s1.text = "S1"
s2 = slide.shapes.add(1)
s2.text = "S2"
sg = Shapes([s1, s2])
grouped = sg.group()
shapes = slide.leaf_shapes
assert {shape.text for shape in shapes} == {"S1", "S2"}
if __name__ == "__main__":
pytest.main([__file__, "--capture=no"])
|
# -*- coding: utf-8 -*-
import math
import numpy as np
import scipy.special as scipy
import torch
from torch import Tensor
from .faddeeva_erf import FaddeevaErfi
from ..global_config import mnn_config
def chebyshev_val(x: Tensor, c: Tensor) -> Tensor:
"""
Evaluate a Chebyshev series at points x
x: tensor
c: 1d tensor, a array of coefficients ordered so that the coefficients for terms of degree n are contained in c[n]
"""
degree = c.size()[0]
x2 = 2 * x
c0 = c[-2]
c1 = c[-1]
for i in range(3, degree + 1):
temp = c0
c0 = c[-i] - c1
c1 = temp + c1 * x2
return c0 + c1 * x
def chebyshev_val_neg(x: Tensor, c: Tensor, num_sub: int = 50, wrap: float = 4., alpha: int = 1) -> Tensor:
delta_x = 1 / num_sub
x = wrap / (wrap + torch.abs(x).pow(alpha))
y = torch.zeros_like(x)
for i in range(num_sub):
idx = torch.bitwise_and(x > delta_x * i, x <= delta_x * (i + 1))
y[idx] = chebyshev_val(x[idx], c[i, :])
return y
def chebyshev_val_no_transform(x: Tensor, c: Tensor, x_min: float = 0.,
x_max: float = 1., num_sub: int = 50) -> Tensor:
delta_x = (x_max - x_min) / num_sub
y = torch.zeros_like(x)
for i in range(num_sub):
idx = torch.bitwise_and(torch.gt(x, x_min + delta_x * i), torch.le(x, x_min + delta_x * (i + 1)))
y[idx] = chebyshev_val(x[idx], c[i, :])
return y
class DawsonIntegrate(torch.nn.Module):
"""
Args:
"""
__constants__ = ['asym_neg_inf', 'asym_pos_inf', 'taylor', 'int_asym_neg_inf', 'div', 'deg',
'cheb_xmin_for_G', 'euler_gamma', 'boundary']
div: int
deg: int
cheb_xmin_for_G: float
euler_gamma: float
boundary: float
def __init__(self, div: int = 4, deg: int = 8, cheb_xmin_for_G: float = -6.0, boundary: float = 9.,
cheb_G_neg: Tensor = mnn_config.get_value('double_d1_cheb_G_neg'),
cheb_g_neg: Tensor = mnn_config.get_value('double_d1_cheb_lg_neg')) -> None:
super(DawsonIntegrate, self).__init__()
self.div = div
self.deg = deg
self.euler_gamma = np.euler_gamma
self.cheb_xmin_for_G = cheb_xmin_for_G
self.register_buffer('cheb_G_neg', cheb_G_neg)
self.register_buffer('cheb_g_neg', cheb_g_neg)
self.boundary = boundary
self.erfi = FaddeevaErfi(boundary=boundary)
def _gpu_dawson(self, x: Tensor) -> Tensor:
y = torch.zeros_like(x)
region1 = torch.bitwise_or(torch.lt(x, self.cheb_xmin_for_G), torch.gt(x, - self.cheb_xmin_for_G))
y[region1] = self.asym_neg_inf(- torch.abs(x[region1]))
region1.bitwise_not_()
y[region1] = chebyshev_val_neg(- torch.abs(x[region1]), self.cheb_g_neg, num_sub=self.div)
region1 = torch.gt(x, 0.)
y[region1] = math.sqrt(math.pi) * torch.exp(x[region1].pow(2)) - y[region1]
return y
def func_dawson(self, x: Tensor) -> Tensor:
if x.is_cuda:
if x.numel() > mnn_config.get_value('cpu_or_gpu'):
return self._gpu_dawson(x)
else:
device = x.device
return torch.from_numpy(scipy.erfcx(- x.cpu().numpy()) * math.sqrt(math.pi) / 2).to(device=device)
else:
return torch.from_numpy(scipy.erfcx(- x.numpy()) * math.sqrt(math.pi) / 2)
@staticmethod
def asym_neg_inf(x: Tensor) -> Tensor:
y = -0.5 / x
x2 = torch.pow(x, 2)
output = y.clone().detach()
for n in range(5):
y = - y * 0.5 * (2 * n + 1) / x2
output.add_(y)
return output
def integrate_asym_neg_inf(self, x: Tensor) -> Tensor:
"""
Compute asymptotic expansion of the indefinite integral of g(x) for x<<-1
Use recurrence relation so it only contains multiplication and addition.
a(n+1)/a(n) = -(2n+1)/(2x^2)
"""
temp = -0.25 * self.euler_gamma - 0.5 * torch.log(-2 * x)
temp = temp - 1 / 8 * torch.pow(x, -2) + 3 / 32 * torch.pow(x, -4) - 5 / 32 * torch.pow(x, -6)
return temp
def forward(self, x: Tensor) -> Tensor:
pos_idx = torch.gt(x, 0.)
y = torch.zeros_like(x)
if x.is_cuda:
if x.numel() < mnn_config.get_value('cpu_or_gpu'):
device = x.device
temp = torch.from_numpy(math.pi / 2 * scipy.erfi(x[pos_idx].cpu().numpy())).to(device=device)
else:
temp = math.pi / 2 * self.erfi(x[pos_idx])
else:
temp = torch.from_numpy(math.pi / 2 * scipy.erfi(x[pos_idx].numpy()))
idx = torch.bitwise_or(torch.lt(x, self.cheb_xmin_for_G), torch.gt(x, - self.cheb_xmin_for_G))
y[idx] = self.integrate_asym_neg_inf(-torch.abs(x[idx]))
idx.bitwise_not_()
y[idx] = chebyshev_val_no_transform(-torch.abs_(x[idx]), self.cheb_G_neg, x_min=self.cheb_xmin_for_G,
x_max=0., num_sub=self.div)
y[pos_idx] += temp
return y
class DoubleDawsonIntegrate(torch.nn.Module):
"""
Args:
"""
__constants__ = ['div', 'div_pos', 'cheb_xmas_for_H', 'boundary']
div: int
div_pos: int
cheb_xmas_for_H: float
boundary: float
def __init__(self, div: int = 4, div_pos: int = 6, cheb_xmas_for_H: float = 4.5, boundary: float = 9.,
asym_neg_inf: Tensor = mnn_config.get_value('double_d2_asym_neg_inf'),
cheb_neg: Tensor = mnn_config.get_value('double_d2_cheb_neg'),
cheb_H_neg: Tensor = mnn_config.get_value('double_d2_cheb_H_neg'),
cheb_H_pos: Tensor = mnn_config.get_value('double_d2_cheb_H_pos')):
super(DoubleDawsonIntegrate, self).__init__()
self.div = div
self.div_pos = div_pos
self.cheb_xmas_for_H = cheb_xmas_for_H
self.register_buffer('asym_neg_inf', asym_neg_inf)
self.register_buffer('cheb_neg', cheb_neg)
self.register_buffer('cheb_H_neg', cheb_H_neg)
self.register_buffer('cheb_H_pos', cheb_H_pos)
self.dawson1 = DawsonIntegrate(boundary=boundary)
@torch.jit.export
def func_dawson_2nd(self, x: Tensor) -> Tensor:
y = torch.zeros_like(x)
idx1 = torch.lt(x, -10.)
idx2 = torch.gt(x, 10.)
y[idx1] = self.func_asym_neg_inf(x[idx1])
y[idx2] = self.func_asym_pos_inf(x[idx2])
idx1 = torch.bitwise_not(torch.bitwise_or(idx1, idx2))
y[idx1] = chebyshev_val_neg(-x[idx1].abs_(), self.cheb_neg, num_sub=self.div)
idx1 = torch.bitwise_and(idx1, x > 0)
if x.is_cuda:
if x[idx1].numel() < mnn_config.get_value('cpu_or_gpu'):
device = x.device
temp = torch.from_numpy(scipy.erfi(x[idx1].cpu().numpy())).to(device=device)
y[idx1] = math.sqrt(math.pi) * torch.exp(torch.pow(x[idx1], 2)) * \
(0.5 * math.log(2) + 2 * self.dawson1(-x[idx1]) + math.pi / 2 * temp) - y[idx1]
else:
y[idx1] = math.sqrt(math.pi) * torch.exp(torch.pow(x[idx1], 2)) * \
(0.5 * math.log(2) + 2 * self.dawson1(-x[idx1]) + math.pi / 2 * self.dawson1.erfi(x[idx1])) - \
y[idx1]
else:
y[idx1] = math.sqrt(math.pi) * torch.exp(torch.pow(x[idx1], 2)) * \
(0.5 * math.log(2) + 2 * self.dawson1(-x[idx1]) + math.pi / 2 * torch.from_numpy(
scipy.erfi(x[idx1].numpy()))) - y[idx1]
return y
def func_asym_neg_inf(self, x: Tensor, num: int = 7) -> Tensor:
y = torch.zeros_like(x)
for i in range(num):
y.add_(torch.pow(x, -3 - 2 * i) * self.asym_neg_inf[i])
return y
def func_asym_pos_inf(self, x: Tensor) -> Tensor:
y = math.pow(math.sqrt(math.pi) / 2, 3) * torch.exp(torch.pow(x, 2))
if x.is_cuda:
if x.numel() > mnn_config.get_value('cpu_or_gpu'):
y.mul_(torch.pow(torch.erfc(-x), 2) * self.dawson1.erfi(x))
else:
device = y.device
y.mul_(torch.pow(torch.erfc(-x), 2) * torch.from_numpy(scipy.erfi(x.cpu().numpy())).to(device=device))
else:
y.mul_(torch.pow(torch.erfc(-x), 2) * torch.from_numpy(scipy.erfi(x.numpy())))
return y
def func_int_asym_neg_inf(self, x: Tensor, num: int = 7) -> Tensor:
y = torch.zeros_like(x)
for i in range(num):
y.add_(torch.pow(x, -2 - 2 * i) * self.asym_neg_inf[i] / (-2 - 2 * i))
return y
def func_int_asym_pos_inf(self, x: Tensor) -> Tensor:
if x.is_cuda:
if x.numel() > mnn_config.get_value('cpu_or_gpu'):
e1 = self.dawson1.erfi(x)
else:
device = x.device
e1 = torch.from_numpy(scipy.erfi(x.cpu().numpy())).to(device=device)
else:
e1 = torch.from_numpy(scipy.erfi(x.numpy()))
return math.pi ** 2 / 32 * (e1 - 1) * e1 * torch.pow(torch.erfc(-x), 2)
def forward(self, x: Tensor) -> Tensor:
idx1 = torch.lt(x, -10)
idx2 = torch.gt(x, self.cheb_xmas_for_H)
idx3 = torch.bitwise_and(torch.bitwise_not(idx1), x <= 0)
idx4 = torch.bitwise_and(torch.bitwise_not(idx2), x > 0)
y = torch.zeros_like(x)
y[idx1] = self.func_int_asym_neg_inf(x[idx1])
y[idx2] = self.func_int_asym_pos_inf(x[idx2])
y[idx3] = chebyshev_val_neg(x[idx3], self.cheb_H_neg, num_sub=self.div)
y[idx4] = torch.exp(2 * torch.pow(x[idx4], 2)) * chebyshev_val_no_transform(x[idx4], self.cheb_H_pos,
x_max=self.cheb_xmas_for_H,
num_sub=self.div_pos)
return y
|
import unittest, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o_cmd, h2o, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(java_heap_GB=12)
@classmethod
def tearDownClass(cls):
### time.sleep(3600)
h2o.tear_down_cloud()
def test_rf_kddcup_1999_fvec(self):
# h2b.browseTheCloud()
importFolderPath = 'standard'
csvFilename = 'kddcup_1999.data.gz'
csvPathname = importFolderPath + "/" + csvFilename
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='local',
timeoutSecs=300)
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
for trials in range(1):
print "\n" + csvFilename, "Trial #", trials
start = time.time()
kwargs = {
'importance': 0,
'response': 'classifier',
'ntrees': 1,
'sample_rate': 0.67,
'nbins': 1024,
'max_depth': 100,
'seed': 784834182943470027,
}
start = time.time()
RFview = h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=800, retryDelaySecs=10.0, **kwargs)
print "RF end on ", csvFilename, 'took', time.time() - start, 'seconds'
### h2b.browseJsonHistoryAsUrlLastMatch("RFView")
if __name__ == '__main__':
h2o.unit_main()
# histogram of response classes (42nd field)
# 30 buffer_overflow.
# 8 ftp_write.
# 53 guess_passwd.
# 12 imap.
# 12481 ipsweep.
# 21 land.
# 9 loadmodule.
# 7 multihop.
# 1072017 neptune.
# 2316 nmap.
# 972781 normal.
# 3 perl.
# 4 phf.
# 264 pod.
# 10413 portsweep.
# 10 rootkit.
# 15892 satan.
# 2807886 smurf.
# 2 spy.
# 979 teardrop.
# 1020 warezclient.
# 20 warezmaster.
|
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/pylint/blob/main/CONTRIBUTORS.txt
"""Everything related to the 'pylint-config' command.
Everything in this module is private.
"""
from pylint.config._pylint_config.main import _handle_pylint_config_commands # noqa
from pylint.config._pylint_config.setup import _register_generate_config_options # noqa
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import uuid
class FakeTrunk(object):
"""Fake one or more trunks."""
@staticmethod
def create_one_trunk(attrs=None):
"""Create a fake trunk.
:param Dictionary attrs:
A dictionary with all attributes
:return:
A Dictionary with id, name, admin_state_up,
port_id, sub_ports, status and project_id
"""
attrs = attrs or {}
# Set default attributes.
trunk_attrs = {
'id': 'trunk-id-' + uuid.uuid4().hex,
'name': 'trunk-name-' + uuid.uuid4().hex,
'port_id': 'port-' + uuid.uuid4().hex,
'admin_state_up': True,
'project_id': 'project-id-' + uuid.uuid4().hex,
'status': 'ACTIVE',
'sub_ports': [{'port_id': 'subport-' + uuid.uuid4().hex,
'segmentation_type': 'vlan',
'segmentation_id': 100}],
}
# Overwrite default attributes.
trunk_attrs.update(attrs)
return copy.deepcopy(trunk_attrs)
@staticmethod
def create_trunks(attrs=None, count=2):
"""Create multiple fake trunks.
:param Dictionary attrs:
A dictionary with all attributes
:param int count:
The number of routers to fake
:return:
A list of dictionaries faking the trunks
"""
trunks = []
for i in range(0, count):
trunks.append(FakeTrunk.create_one_trunk(attrs))
return trunks
@staticmethod
def get_trunks(trunks=None, count=2):
"""Get an iterable MagicMock object with a list of faked trunks.
If trunks list is provided, then initialize the Mock object with the
list. Otherwise create one.
:param List trunks:
A list of FakeResource objects faking trunks
:param int count:
The number of trunks to fake
:return:
An iterable Mock object with side_effect set to a list of faked
trunks
"""
if trunks is None:
trunks = FakeTrunk.create_trunks(count)
return mock.MagicMock(side_effect=trunks)
|
# -*- coding: utf-8 -*-
"""
Descripttion:
Author: SijinHuang
Date: 2021-12-06 21:19:11
LastEditors: SijinHuang
LastEditTime: 2021-12-13 21:40:15
"""
import json
import numpy as np
import insightface
from common.dao import fetch_img
recognition_model = insightface.model_zoo.get_model('./.insightface/models/buffalo_m/w600k_r50.onnx')
recognition_model.prepare(ctx_id=0)
def recog_faces(img, det_res):
ret = []
for det_obj in det_res:
bbox = np.array(det_obj['bbox'])
kps = np.array(det_obj['kps'])
det_score = det_obj['det_score']
face = insightface.app.common.Face(bbox=bbox, kps=kps, det_score=det_score)
recognition_model.get(img, face)
# ret.append(face)
det_obj['embedding'] = face.normed_embedding.tolist()
ret.append(det_obj)
return ret
def handler(event, context):
img_url = event['img_url']
img = fetch_img(img_url)
det_res = json.loads(event['det_res'])
recog_res = recog_faces(img, det_res)
return json.dumps(recog_res, ensure_ascii=False)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-03-05 05:52
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sass', '0037_auto_20190226_0923'),
('sass', '0036_auto_20190226_0630'),
]
operations = [
]
|
#!/usr/bin/python
import sys
import re
empty_line_pattern = re.compile(r'\s*$')
begin_model_pattern = re.compile(r'begin\(model\((\w+)\)\)')
end_model_pattern = re.compile(r'end\(model\((\w+)\)\)')
start_models_predicate_pattern = re.compile(r'(\w+)\(')
def is_empty_line(line:str)->bool:
return empty_line_pattern.match(line)
# remember: sys.argv[0] is the name of the script
with open(sys.argv[1]) as f:
for line in f:
# remove last char of line
line = line[:-1]
if not is_empty_line(line):
m = begin_model_pattern.match(line)
if m:
key = m.group(1)
elif end_model_pattern.match(line):
print("\n")
else:
m = start_models_predicate_pattern.match(line)
if m:
keys_line = re.sub(m.group(1) + '\(', m.group(1) + '\(' + key + ',', line)
print(keys_line)
else:
#TODO: vul aan
pass
|
import aioredis
from .settings import get_broker_settings
async def connect_redis() -> aioredis.Redis:
broker_settings = get_broker_settings()
return await aioredis.create_redis(f"redis://{broker_settings.host}:{broker_settings.port}/{broker_settings.db}")
|
import gym
import numpy as np
np.random.seed(2)
import copy
from mushroom_rl.utils.spaces import Box, Discrete
from sklearn.ensemble import ExtraTreesRegressor
from ARLO.environment import BaseEnvironment, BaseObservationWrapper
from ARLO.block import DataGenerationRandomUniformPolicy, ModelGenerationMushroomOfflineFQI
from ARLO.metric import SomeSpecificMetric, DiscountedReward
from ARLO.hyperparameter import Integer, Categorical
from ARLO.block import FeatureEngineeringFSCMI
if __name__ == '__main__':
dir_chkpath = '/path/for/saving/output'
my_params = {'approximator': Categorical(hp_name='approximator', obj_name='approximator_fqi',
current_actual_value=ExtraTreesRegressor),
'n_iterations': Integer(hp_name='n_iterations', current_actual_value=60, obj_name='fqi_n_iterations'),
'n_estimators': Integer(hp_name='n_estimators', current_actual_value=100, obj_name='fqi_xgb_n_estimators'),
'criterion': Categorical(hp_name='criterion', current_actual_value='squared_error', obj_name='criterion'),
'min_samples_split': Integer(hp_name='min_samples_split', current_actual_value=10,
obj_name='min_samples_split'),
'n_jobs': Integer(obj_name='n_jobs', hp_name='n_jobs', current_actual_value=16)
}
model_gen = ModelGenerationMushroomOfflineFQI(eval_metric=SomeSpecificMetric('fill_in_metric_model_gen'),
obj_name='model_gen_fqi',
regressor_type='action_regressor',
algo_params=my_params, log_mode='file', checkpoint_log_path=dir_chkpath)
model_gen.pipeline_type = 'offline'
"""
The code below is taken from https://github.com/AndreaTirinzoni/iw-transfer-rl
Cyclostationary Dam Control
Info
----
- State space: 2D Box (storage,day)
- Action space: 1D Box (release decision)
- Parameters: capacity, demand, flooding threshold, inflow mean per day, inflow std, demand weight, flooding weigt=ht
References
----------
- Simone Parisi, Matteo Pirotta, Nicola Smacchia,
Luca Bascetta, Marcello Restelli,
Policy gradient approaches for multi-objective sequential decision making
2014 International Joint Conference on Neural Networks (IJCNN)
- A. Castelletti, S. Galelli, M. Restelli, R. Soncini-Sessa
Tree-based reinforcement learning for optimal water reservoir operation
Water Resources Research 46.9 (2010)
- Andrea Tirinzoni, Andrea Sessa, Matteo Pirotta, Marcello Restelli.
Importance Weighted Transfer of Samples in Reinforcement Learning.
International Conference on Machine Learning. 2018.
"""
class Dam(gym.Env):
# metadata = {
# 'render.modes': ['human', 'rgb_array'],
# 'video.frames_per_second': 30
# }
def set_local_prng(self, new_seeder):
self.local_prng = np.random.default_rng(new_seeder)
self.seeder = new_seeder
def __init__(self, inflow_profile = 1, alpha = 0.5, beta = 0.5, penalty_on = False, experiment=False):
self.local_prng = np.random.default_rng(2)
self.experiment = experiment
self.horizon = 360
self.gamma = 0.999
self.state_dim = 2
self.action_dim = 1
self.DEMAND = 10.0 # Water demand -> At least DEMAND/day must be supplied or a cost is incurred
self.FLOODING = 300.0 # Flooding threshold -> No more than FLOODING can be stored or a cost is incurred
self.MIN_STORAGE = 50.0 # Minimum storage capacity -> At most max{S - MIN_STORAGE, 0} must be released
self.MAX_STORAGE = 500.0 # Maximum storage capacity -> At least max{S - MAX_STORAGE, 0} must be released
# Random inflow (e.g. rain) mean for each day (360-dimensional vector)
self.INFLOW_MEAN = self._get_inflow_profile(inflow_profile)
self.INFLOW_STD = 2.0 # Random inflow std
assert alpha + beta == 1.0 # Check correctness
self.ALPHA = alpha # Weight for the flooding cost
self.BETA = beta # Weight for the demand cost
self.penalty_on = penalty_on # Whether to penalize illegal actions or not
# Gym attributes
self.viewer = None
self.action_space = Discrete(8)
if(self.experiment):
self.observation_space = Box(low=np.zeros(31),
high=np.inf*np.ones(31))
else:
self.observation_space = Box(low=np.array([0,1]),
high=np.array([np.inf,360]))
def _get_inflow_profile(self,n):
assert n >= 1 and n <= 7
if n == 1:
return self._get_inflow_1()
elif n == 2:
return self._get_inflow_2()
elif n == 3:
return self._get_inflow_3()
elif n == 4:
return self._get_inflow_4()
elif n == 5:
return self._get_inflow_5()
elif n == 6:
return self._get_inflow_6()
elif n == 7:
return self._get_inflow_7()
def _get_inflow_1(self):
y = np.zeros(360)
x = np.arange(360)
y[0:120] = np.sin(x[0:120] * 3 * np.pi / 359) + 0.5
y[120:240] = np.sin(x[120:240] * 3 * np.pi / 359) / 2 + 0.5
y[240:] = np.sin(x[240:] * 3 * np.pi / 359) + 0.5
return y * 8 + 4
def _get_inflow_2(self):
y = np.zeros(360)
x = np.arange(360)
y[0:120] = np.sin(x[0:120] * 3 * np.pi / 359) / 2 + 0.25
y[120:240] = np.sin(x[120:240] * 3 * np.pi / 359 + np.pi) * 3 + 0.25
y[240:] = np.sin(x[240:] * 3 * np.pi / 359 + np.pi) / 4 + 0.25
return y * 8 + 4
def _get_inflow_3(self):
y = np.zeros(360)
x = np.arange(360)
y[0:120] = np.sin(x[0:120] * 3 * np.pi / 359) * 3 + 0.25
y[120:240] = np.sin(x[120:240] * 3 * np.pi / 359) / 4 + 0.25
y[240:] = np.sin(x[240:] * 3 * np.pi / 359) / 2 + 0.25
return y * 8 + 4
def _get_inflow_4(self):
y = np.zeros(360)
x = np.arange(360)
y[0:120] = np.sin(x[0:120] * 3 * np.pi / 359) + 0.5
y[120:240] = np.sin(x[120:240] * 3 * np.pi / 359) / 2.5 + 0.5
y[240:] = np.sin(x[240:] * 3 * np.pi / 359) + 0.5
return y * 7 + 4
def _get_inflow_5(self):
y = np.zeros(360)
x = np.arange(360)
y[0:120] = np.sin(x[0:120] * 3 * np.pi / 359 - np.pi / 12) / 2 + 0.5
y[120:240] = np.sin(x[120:240] * 3 * np.pi / 359 - np.pi / 12) / 2 + 0.5
y[240:] = np.sin(x[240:] * 3 * np.pi / 359 - np.pi / 12) / 2 + 0.5
return y * 8 + 5
def _get_inflow_6(self):
y = np.zeros(360)
x = np.arange(360)
y[0:120] = np.sin(x[0:120] * 3 * np.pi / 359 + np.pi / 8) / 3 + 0.5
y[120:240] = np.sin(x[120:240] * 3 * np.pi / 359 + np.pi / 8) / 3 + 0.5
y[240:] = np.sin(x[240:] * 3 * np.pi / 359 + np.pi / 8) / 3 + 0.5
return y * 8 + 4
def _get_inflow_7(self):
y = np.zeros(360)
x = np.arange(360)
y[0:120] = np.sin(x[0:120] * 3 * np.pi / 359) + 0.5
y[120:240] = np.sin(x[120:240] * 3 * np.pi / 359) / 3 + 0.5
y[240:] = np.sin(x[240:] * 3 * np.pi / 359) * 2 + 0.5
return y * 8 + 5
def step(self, action):
action = action[0]
actions_pool = [0, 3, 5, 7, 10, 15, 20, 30]
action = actions_pool[action]
action = float(action)
# Get current state
state = self.get_state()
storage = state[0]
day = state[1]
# Bound the action
actionLB = max(storage - self.MAX_STORAGE, 0.0)
actionUB = max(storage - self.MIN_STORAGE, 0.0)
# Penalty proportional to the violation
bounded_action = min(max(action, actionLB), actionUB)
penalty = -abs(bounded_action - action) * self.penalty_on
# Transition dynamics
action = bounded_action
inflow = self.INFLOW_MEAN[int(day-1)] + self.local_prng.normal() * self.INFLOW_STD
nextstorage = max(storage + inflow - action, 0.0)
# Cost due to the excess level wrt the flooding threshold
reward_flooding = -max(storage - self.FLOODING, 0.0) / 4
# Deficit in the water supply wrt the water demand
reward_demand = -max(self.DEMAND - action, 0.0) ** 2
# The final reward is a weighted average of the two costs
reward = self.ALPHA * reward_flooding + self.BETA * reward_demand + penalty
# Get next day
nextday = day + 1 if day < 360 else 1
self.state = [nextstorage, nextday]
if(self.experiment):
inflow = self.INFLOW_MEAN + self.local_prng.normal() * self.INFLOW_STD
if(day >= 31):
lagged_inflows = inflow[int(day-31):int(day-1)].tolist()
else:
lagged_inflows = inflow[360-int(31-day):].tolist() + inflow[:int(day-1)].tolist()
next_state = np.array([[nextstorage] + lagged_inflows])
else:
next_state = self.get_state()
return next_state, reward, False, {}
def reset(self, state=None):
if state is None:
init_days = np.array([1, 120, 240])
self.state = [self.local_prng.uniform(self.MIN_STORAGE, self.MAX_STORAGE),
init_days[self.local_prng.integers(low=0,high=3)]]
else:
self.state = np.array(state)
if(self.experiment):
current_state = []
for i in range(31):
current_state.append(self.local_prng.uniform(self.MIN_STORAGE, self.MAX_STORAGE))
current_state = np.array(current_state)
else:
current_state = self.get_state()
return current_state
def get_state(self):
return np.array(self.state)
# Create Env Class for ARLO
class myDam(BaseEnvironment):
def __init__(self, obj_name, experiment, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process'):
super().__init__(obj_name, seeder, log_mode, checkpoint_log_path, verbosity, n_jobs, job_type)
self.dam_env = Dam(inflow_profile = 1, alpha = 0.3, beta = 0.7, experiment=experiment)
self.observation_space = self.dam_env.observation_space
self.action_space = self.dam_env.action_space
self.horizon = self.dam_env.horizon
self.gamma =self.dam_env.gamma
self.current_feats = np.arange(len(self.observation_space.low))
def set_local_prng(self, new_seeder):
self.dam_env.set_local_prng(new_seeder)
def seed(self, seeder):
self.set_local_prng(new_seeder=seeder)
def step(self, action):
out = self.dam_env.step(action=action)
new_state = out[0][0][self.current_feats]
return new_state, out[1], out[2], out[3]
def reset(self, state=None):
return self.dam_env.reset()
def render(self, mode='human'):
raise NotImplementedError
my_dam = myDam(obj_name='my_dam', experiment=True)
data_gen = DataGenerationRandomUniformPolicy(eval_metric=SomeSpecificMetric('data_gen'), obj_name='data_gen',
algo_params={'n_samples': Integer(obj_name='n_samples_data_gen',
hp_name='n_samples',
current_actual_value=10800)})
data_gen.pipeline_type = 'offline'
data_gen.pre_learn_check(env=my_dam)
out_data_gen = data_gen.learn(env=my_dam)
feat_block = FeatureEngineeringFSCMI(eval_metric=SomeSpecificMetric('fillin_metric'), obj_name='feat_block',
n_jobs=1, job_type='process', log_mode='file', checkpoint_log_path=dir_chkpath)
feat_block.pipeline_type = 'offline'
possible_ks = [1, 2, 3, 4, 5, 10, 20, 50]
feats_to_use = []
for tmp_k in possible_ks:
out_feat_block = feat_block.learn(train_data=out_data_gen.train_data)
feat_block.logger.info(msg='current k: '+str(tmp_k))
feat_block.logger.info(msg='selected features: '+str(feat_block.ordered_features))
feats_to_use.append(feat_block.ordered_features)
my_data = out_data_gen.train_data
tmp_data = copy.deepcopy(my_data)
out = copy.deepcopy(tmp_data.parse_data())
for i in range(len(feats_to_use)):
current_set_of_feats = feats_to_use[i]
for j in range(len(current_set_of_feats)):
#each time add an extra feature
current_selected_features = current_set_of_feats[:j+1]
tmp_data.dataset = my_data.arrays_as_data(out[0][:,current_selected_features], out[1], out[2],
out[3][:,current_selected_features], out[4], out[5])
new_low = copy.deepcopy(my_data.observation_space.low)[np.array(current_selected_features)]
new_high = copy.deepcopy(my_data.observation_space.high)[np.array(current_selected_features)]
tmp_data.observation_space = Box(low=new_low, high=new_high)
model_gen.pre_learn_check(train_data=tmp_data)
model_gen.full_block_instantiation(tmp_data.info)
res = model_gen.learn(train_data=tmp_data)
class wrapped_env(BaseObservationWrapper):
def __init__(self, env, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process'):
super().__init__(env=env, obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs,
job_type=job_type)
old_observation_space = env.observation_space
#this block can only work on Box observation spaces:
new_low = copy.deepcopy(old_observation_space.low)[np.array(current_selected_features)]
new_high = copy.deepcopy(old_observation_space.high)[np.array(current_selected_features)]
#set the new observation space:
self.observation_space = Box(low=new_low, high=new_high)
self.env.dam_env.observation_space = self.observation_space
def set_local_prng(self, new_seeder):
self.env.dam_env.set_local_prng(new_seeder)
def observation(self, observation):
new_obs = observation[np.array(current_selected_features)]
return new_obs
tmp_env = wrapped_env(obj_name='wrapped', env=copy.deepcopy(my_dam))
tmp_env.current_feats = np.array(current_selected_features)
metr = DiscountedReward(obj_name='metric', n_episodes=10, batch=True)
tmp_eval = metr.evaluate(block_res=res, block=model_gen, env=tmp_env)
print(metr.eval_mean, np.sqrt(metr.eval_var)) |
import os
from multiprocessing.dummy import Pool as ThreadPool
import socket
import itertools
import urllib.request
import array
import http.client
class ImageDownloader(object):
def __init__(self, root_dir='./data', workers=1):
self.workers = 1
self.root_dir = root_dir
def download_images(self, urls_dict):
if not os.path.exists(self.root_dir):
os.makedirs(self.root_dir)
thread_pool = ThreadPool(self.workers)
for search_term in urls_dict:
urls = urls_dict[search_term]
save_dir = os.path.join(self.root_dir, search_term.replace(' ', '_'))
if not os.path.exists(save_dir):
os.mkdir(save_dir)
thread_pool.map(self.download, zip(urls, itertools.repeat(save_dir), range(len(urls))))
def download(self, args):
url = args[0]
save_dir = args[1]
count = args[2]
image_file_name = str(count) + '_' + os.path.basename(save_dir) + '.jpg'
image_path = os.path.join(save_dir, image_file_name)
try:
response = urllib.request.urlopen(url, timeout=15)
except urllib.error.HTTPError as err:
print(f'HTTP Error: {err.reason}\n url: {url}')
return
except urllib.error.URLError as err:
print(f'Urlopen Error: {err.reason}\n url: {url}')
return
except socket.timeout as err:
print(f'Socket Timeout: Response took more than 15 seconds \n url: {url}')
return
except http.client.RemoteDisconnected as err:
print(f'Remote end closed connection without response \n url: {url}')
return
with open(image_path, 'wb') as image_file:
image_file.write(response.read())
|
from argparse import ArgumentError
import sys
import typing
from advent2021.core import run
class BingoCard:
ROW_COUNT = 5
COL_COUNT = 5
"""Represents a 5 ร 5 Bingo Card"""
def __init__(self, board: typing.List[typing.List[int]]):
# Sanity check. :)
if len(board) != BingoCard.ROW_COUNT:
raise ArgumentError("Invalid board. There must be {} rows.".format(BingoCard.ROW_COUNT))
for row in board:
if len(row) != BingoCard.COL_COUNT:
raise ArgumentError("Invalid board. There must be {} columns for all rows.".format(BingoCard.COL_COUNT))
self.board = board
def mark(self, number_called):
"""Mark the board with the number called."""
for row_index in range(BingoCard.ROW_COUNT):
for col_index in range(BingoCard.COL_COUNT):
if self.board[row_index][col_index] == number_called:
self.board[row_index][col_index] = None # Marked!
break
def sum(self):
"""Gets the sum of all unmarked numbers that remained on the board"""
running_sum = 0
for row_index in range(BingoCard.ROW_COUNT):
for col_index in range(BingoCard.COL_COUNT):
number = self.board[row_index][col_index]
if number is None:
continue
running_sum += number
return running_sum
def is_winner(self):
"""Determines whether the board has won or not"""
# First, check to see if we've gotten a horizontal win (i.e., one row has all numbers marked).
for row in self.board:
total_marked = len([True for col in row if col is None])
if total_marked == BingoCard.COL_COUNT:
return True
# To check if we've gotten a vertical win (i.e., one column has all numbers marked), we'll
# need to do some "fancy" traversals.
for col_index in range(BingoCard.COL_COUNT):
total_marked = 0
for row_index in range(BingoCard.ROW_COUNT):
if self.board[row_index][col_index] is None:
total_marked += 1
if total_marked == BingoCard.ROW_COUNT:
return True
# Didn't win (yet) :(
return False
def __str__(self):
# Print out the board.
board_str = ""
for row in self.board: # type: typing.List
board_str += " ".join(["{0:02d}".format(col) if col is not None else "--" for col in row]) + "\n"
return board_str.strip()
def run_part1(file: typing.TextIO) -> int:
# The first line contains the number called out for the game of Bingo.
bingo_numbers = __parse_bingo_numbers(file)
# The next step is to build our 5 ร 5 bingo cards.
bingo_cards = __parse_bingo_cards(file)
# Go through the numbers called out
winning_card = None
winning_number = 0
for bingo_number in bingo_numbers:
# Go through each of our board and mark it.
for bingo_card in bingo_cards: # type: BingoCard
bingo_card.mark(bingo_number)
if bingo_card.is_winner():
winning_card = bingo_card
winning_number = bingo_number
# If we've found a winner, stop
if winning_card is not None:
break
# Now that we've gotten our winning card, figure out the score (i.e., the sum of the unmarked
# numbers on the board multiplied by the winning number)
print("Winning Number: ", winning_number, file=sys.stderr)
print("Winning Board:", file=sys.stderr)
print(winning_card, file=sys.stderr)
return winning_card.sum() * winning_number
def run_part2(file: typing.TextIO) -> int:
# The first line contains the number called out for the game of Bingo.
bingo_numbers = __parse_bingo_numbers(file)
# The next step is to build our 5 ร 5 bingo cards.
bingo_cards = __parse_bingo_cards(file)
# Again, go through the numbers called out. However, we want to keep going until we find
# the last bingo card that would win and calculate the score from that.
last_winning_card = None
last_winning_number = 0
for bingo_number in bingo_numbers:
# Go through each of our board and mark it.
for (index, bingo_card) in enumerate(list(bingo_cards)): # type: BingoCard
bingo_card.mark(bingo_number)
if bingo_card.is_winner():
last_winning_card = bingo_card
last_winning_number = bingo_number
# Since this card won, there's no need to process it anymore, so remove it.
bingo_cards.remove(bingo_card)
# Now that we've gotten our winning (losing) card, figure out the score (i.e., the sum of the
# unmarked numbers on the board multiplied by the winning number)
print("(Last) Winning Number: ", last_winning_number, file=sys.stderr)
print("(Last) Winning Board:", file=sys.stderr)
print(last_winning_card, file=sys.stderr)
return last_winning_card.sum() * last_winning_number
def __parse_bingo_numbers(file: typing.TextIO) -> typing.List[int]:
return [int(x) for x in file.readline().strip().split(",")]
def __parse_bingo_cards(file: typing.TextIO) -> typing.List[BingoCard]:
bingo_cards = []
current_board = []
for line in file:
if line.strip() == "":
# This is an empty line...But is the current board valid, i.e., has five rows?
if len(current_board) == 5:
# That means this is the start of a bingo card and we can
# create a BingoCard from the current board. :) Once we've created a BingoCard,
# empty the list so we can start building the next board.
bingo_cards.append(BingoCard(current_board))
current_board = []
else:
# The line isn't empty, so it must contain numbers. Add it to the current board.
current_board.append([int(num) for num in line.strip().split(" ") if num != ""])
# Don't forget to append the last board!
bingo_cards.append(BingoCard(current_board))
return bingo_cards
run(__package__, run_part1, run_part2)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 9.12 from Kane 1985.
Answer does not match text.
"""
from __future__ import division
from sympy import Dummy
from sympy import expand, symbols
from sympy.physics.mechanics import ReferenceFrame, Point
from sympy.physics.mechanics import dynamicsymbols
from util import msprint, subs, partial_velocities
from util import generalized_active_forces, potential_energy
q1, q2, q3, q4, q5, q6 = q = dynamicsymbols('q1:7')
u1, u2, u3, u4, u5, u6 = u = dynamicsymbols('u1:7')
# L' is the natural length of the springs
a, k, L_prime = symbols('a k L\'', real=True, positive=True)
# reference frames
X = ReferenceFrame('X')
C = X.orientnew('C', 'body', [q4, q5, q6], 'xyz')
# define points
pO = Point('O') # point O is fixed in X
pC_star = pO.locatenew('C*', a*(q1*X.x + q2*X.y + q3*X.z))
# define points of the cube connected to springs
pC1 = pC_star.locatenew('C1', a*(C.x + C.y - C.z))
pC2 = pC_star.locatenew('C2', a*(C.y + C.z - C.x))
pC3 = pC_star.locatenew('C3', a*(C.z + C.x - C.y))
# define fixed spring points
pk1 = pO.locatenew('k1', L_prime * X.x + a*(X.x + X.y - X.z))
pk2 = pO.locatenew('k2', L_prime * X.y + a*(X.y + X.z - X.x))
pk3 = pO.locatenew('k3', L_prime * X.z + a*(X.z + X.x - X.y))
pC_star.set_vel(X, pC_star.pos_from(pO).dt(X))
pC1.v2pt_theory(pC_star, X, C)
pC2.v2pt_theory(pC_star, X, C)
pC3.v2pt_theory(pC_star, X, C)
# kinematic differential equations
kde_map = dict(zip(map(lambda x: x.diff(), q), u))
# forces
x1 = pC1.pos_from(pk1)
x2 = pC2.pos_from(pk2)
x3 = pC3.pos_from(pk3)
forces = [(pC1, -k*(x1.magnitude() - L_prime)*x1.normalize()),
(pC2, -k*(x2.magnitude() - L_prime)*x2.normalize()),
(pC3, -k*(x3.magnitude() - L_prime)*x3.normalize())]
partials = partial_velocities(zip(*forces)[0], u, X, kde_map)
Fr, _ = generalized_active_forces(partials, forces)
print('generalized active forces')
for i, fr in enumerate(Fr, 1):
print('\nF{0} = {1}'.format(i, msprint(fr)))
# use a dummy symbol since series() does not work with dynamicsymbols
_q = Dummy('q')
series_exp = (lambda x, qi, n_:
x.subs(qi, _q).series(_q, n=n_).removeO().subs(_q, qi))
# remove all terms order 3 or higher in qi
Fr_series = [reduce(lambda x, y: series_exp(x, y, 3), q, fr)
for fr in Fr]
print('\nseries expansion of generalized active forces')
for i, fr in enumerate(Fr_series, 1):
print('\nF{0} = {1}'.format(i, msprint(fr)))
V = potential_energy(Fr_series, q, u, kde_map)
print('\nV = {0}'.format(msprint(V)))
print('Setting C = 0, ฮฑ1, ฮฑ2, ฮฑ3, ฮฑ4, ฮฑ5, ฮฑ6 = 0')
V = V.subs(dict(zip(symbols('C ฮฑ1 ฮฑ2 ฮฑ3 ฮฑ4 ฮฑ5 ฮฑ6'), [0] * 7)))
print('V = {0}'.format(msprint(V)))
V_expected = k*a**2/2*((q1 - q5 - q6)**2 + (q2 - q6 - q4)**2 +
(q3 - q4 - q5)**2)
assert expand(V - V_expected) == 0
|
#
# @lc app=leetcode id=1507 lang=python3
#
# [1507] Reformat Date
#
# @lc code=start
from datetime import datetime
from re import sub
class Solution:
def reformatDate(self, date: str) -> str:
return datetime.strptime(sub(r"st|nd|rd|th", "", date), "%d %b %Y").strftime("%Y-%m-%d")
# @lc code=end
|
from abc import ABCMeta, abstractmethod
import numpy
from PIL import Image
from skimage.color import rgb2lab
import typing
import six
@six.add_metaclass(ABCMeta)
class BaseDataProcess(object):
@abstractmethod
def __call__(self, data, test):
pass
class RandomScaleImageProcess(BaseDataProcess):
def __init__(self, min_scale, max_scale):
# type: (float, float) -> None
self._min_scale = min_scale
self._max_scale = max_scale
def __call__(self, image, test):
# type: (Image.Image, any) -> any
base_size = image.size
rand = numpy.random.rand(1) if not test else 0.5
scale = rand * (self._max_scale - self._min_scale) + self._min_scale
size_resize = (int(image.size[0] * scale), int(image.size[1] * scale))
if base_size != size_resize:
image = image.resize(size_resize, resample=Image.BICUBIC)
return image
class LabImageArrayProcess(BaseDataProcess):
def __init__(self, normalize=True, dtype=numpy.float32):
self._normalize = normalize
self._dtype = dtype
def __call__(self, image, test):
# type: (Image.Image, any) -> any
image = numpy.asarray(image, dtype=self._dtype)[:, :, :3] / 255 # rgb
image = rgb2lab(image).astype(self._dtype).transpose(2, 0, 1)
if self._normalize:
image /= 50
image[0] -= 1
return image
class ChainProcess(BaseDataProcess):
def __init__(self, process):
# type: (typing.Iterable[BaseDataProcess]) -> None
self._process = process
def __call__(self, data, test):
for p in self._process:
data = p(data, test)
return data
|
from ._GMatNonLinearElastic import *
|
class Solution:
# Time complexity: O(log n) where n == target
# Space complexity: O(1)
def brokenCalc(self, startValue: int, target: int) -> int:
res = 0
while target > startValue:
res += 1
if target % 2:
target += 1
else:
target //= 2
return res + startValue - target |
id = 1
class Patient( object ):
def __init__( self, id, name='Alex', age=85 ):
#This is the constructor of the object
self.name = name
self.age = age
self.id = id
def printDetails( self ):
print('Name:'+self.name)
print('Age:'+str(self.age))
p1 = Patient(id)
p1.printDetails()
id += 1
p2 = Patient(id,'Bob',67)
p2.printDetails() |
import os
from unittest.mock import patch
from urllib.parse import urlsplit, urlunsplit
from alembic import command
from alembic.config import Config
from pytest import fixture
from sqlalchemy import create_engine, text
try:
import asyncpg # noqa
from sqlalchemy.ext.asyncio import create_async_engine
asyncio_support = True
except ImportError:
asyncio_support = False
@fixture(scope="session")
def db_url():
"""Default db url used by depending fixtures.
When CI key is set in environment variables, it uses `postgres` as host name:
postgresql://postgres@posgres/postgres
Else, host used is `localhost`: postgresql://postgres@localhost/postgres
"""
host = "postgres" if "CI" in os.environ else "localhost"
return f"postgresql://postgres@{host}/postgres"
@fixture(scope="session")
def sqla_connection(db_url):
engine = create_engine(db_url)
connection = engine.connect()
yield connection
connection.close()
@fixture(scope="session")
def alembic_ini_path(): # pragma: no cover
"""Path for alembic.ini file, defaults to `./alembic.ini`."""
return "./alembic.ini"
@fixture(scope="session")
def db_migration(db_url, sqla_connection, alembic_ini_path):
"""Run alembic upgrade at test session setup and downgrade at tear down.
Override fixture `alembic_ini_path` to change path of `alembic.ini` file.
"""
alembic_config = Config(file_=alembic_ini_path)
alembic_config.set_main_option("sqlalchemy.url", db_url)
sqla_connection.execute(text("DROP SCHEMA public CASCADE; CREATE SCHEMA public;"))
command.upgrade(alembic_config, "head")
yield
command.downgrade(alembic_config, "base")
@fixture
def sqla_modules():
raise Exception(
"sqla_modules fixture is not defined. Define a sqla_modules fixture which "
"imports all modules with sqla entities deriving from fastapi_sqla.Base ."
)
@fixture(autouse=True)
def sqla_reflection(sqla_modules, sqla_connection, db_url):
import fastapi_sqla
fastapi_sqla.Base.metadata.bind = sqla_connection
fastapi_sqla.Base.prepare(sqla_connection)
@fixture(autouse=True)
def patch_sessionmaker(db_url, sqla_connection, sqla_transaction):
"""So that all DB operations are never written to db for real."""
with patch("fastapi_sqla.engine_from_config") as engine_from_config:
engine_from_config.return_value = sqla_connection
yield engine_from_config
@fixture
def sqla_transaction(sqla_connection):
transaction = sqla_connection.begin()
yield transaction
transaction.rollback()
@fixture
def session(sqla_transaction, sqla_connection):
"""Sqla session to use when creating db fixtures.
While it does not write any record in DB, the application will still be able to
access any record committed with that session.
"""
import fastapi_sqla
session = fastapi_sqla._Session(bind=sqla_connection)
yield session
session.close()
def format_async_async_sqlalchemy_url(url):
scheme, location, path, query, fragment = urlsplit(url)
return urlunsplit([f"{scheme}+asyncpg", location, path, query, fragment])
@fixture(scope="session")
def async_sqlalchemy_url(db_url):
"""Default async db url.
It is the same as `db_url` with `postgresql+asyncpg://` as scheme.
"""
return format_async_async_sqlalchemy_url(db_url)
if asyncio_support:
@fixture
async def async_engine(async_sqlalchemy_url):
return create_async_engine(async_sqlalchemy_url)
@fixture
async def async_sqla_connection(async_engine, event_loop):
async with async_engine.begin() as connection:
yield connection
await connection.rollback()
@fixture(autouse=True)
async def patch_async_sessionmaker(async_sqlalchemy_url, async_sqla_connection):
"""So that all async DB operations are never written to db for real."""
with patch(
"fastapi_sqla.asyncio_support.create_async_engine"
) as create_async_engine:
create_async_engine.return_value = async_sqla_connection
yield create_async_engine
@fixture
async def async_session(async_sqla_connection):
from fastapi_sqla.asyncio_support import _AsyncSession
session = _AsyncSession(bind=async_sqla_connection)
yield session
await session.close()
|
import asyncio
import datetime
from pyrogram.errors import FloodWait, UserNotParticipant
from pyrogram.types import Message
from pytgcalls import StreamType
from pytgcalls.exceptions import NoActiveGroupCall
from pytgcalls.types.input_stream import AudioPiped, AudioVideoPiped
from .calls import Call
from database.lang_utils import get_message as gm
class TelegramPlayer(Call):
async def _local_audio_play(
self,
mess: Message,
user_id: int,
chat_id: int,
title: str,
duration: str,
source_file: str,
link: str,
):
mention = await self.bot.get_user_mention(user_id)
call = self.call
self.init_telegram_player(
chat_id, user_id, title, duration, source_file, link, "local_music"
)
audio_quality, _ = self.get_quality(chat_id)
try:
await call.join_group_call(
chat_id,
AudioPiped(source_file, audio_quality),
stream_type=StreamType().local_stream,
)
return await mess.edit(
f"""
{gm(chat_id, 'now_streaming')}
๐ {gm(chat_id, 'yt_title')}: [{title}]({link})
โฑ๏ธ {gm(chat_id, 'duration')}: {duration}
โจ {gm(chat_id, 'req_by')}: {mention}
๐ฅ {gm(chat_id, 'stream_type_title')}: {gm(chat_id, 'stream_type_local_audio')}""",
disable_web_page_preview=True,
)
except NoActiveGroupCall:
await self.start_call(chat_id)
await self._local_audio_play(
mess, user_id, chat_id, title, duration, source_file, link
)
except FloodWait as e:
await mess.edit(gm(chat_id, "error_flood".format(str(e.x))))
await asyncio.sleep(e.x)
await self._local_audio_play(
mess, user_id, chat_id, title, duration, source_file, link
)
except UserNotParticipant:
await self.join_chat(chat_id)
await self._local_audio_play(
mess, user_id, chat_id, title, duration, source_file, link
)
async def _local_video_play(
self,
mess: Message,
user_id: int,
chat_id: int,
title: str,
duration: str,
source_file: str,
link: str,
):
call = self.call
mention = await self.bot.get_user_mention(user_id)
self.init_telegram_player(
chat_id, user_id, title, duration, source_file, link, "video_file"
)
audio_quality, video_quality = self.get_quality(chat_id)
try:
await call.join_group_call(
chat_id,
AudioVideoPiped(source_file, audio_quality, video_quality),
stream_type=StreamType().local_stream,
)
return await mess.edit(
f"""
{gm(chat_id, 'now_streaming')}
๐ {gm(chat_id, 'yt_title')}: [{title}]({link})
โฑ๏ธ {gm(chat_id, 'duration')}: {duration}
โจ {gm(chat_id, 'req_by')}: {mention}
๐ฅ {gm(chat_id, 'stream_type_title')}: {gm(chat_id, 'stream_type_local_video')}""",
disable_web_page_preview=True,
)
except NoActiveGroupCall:
await self.start_call(chat_id)
await self._local_video_play(
mess, user_id, chat_id, title, duration, source_file, link
)
except FloodWait as e:
await mess.edit(gm(chat_id, "error_flood".format(str(e.x))))
await self._local_video_play(
mess, user_id, chat_id, title, duration, source_file, link
)
except UserNotParticipant:
await self.join_chat(chat_id)
await self._local_video_play(
mess, user_id, chat_id, title, duration, source_file, link
)
async def local_music(self, user_id: int, replied: Message):
chat_id = replied.chat.id
playlist = self.playlist.playlist
if replied.audio or replied.voice:
bom = await replied.reply(gm(chat_id, "process"))
download = await replied.download()
link = replied.link
if replied.audio:
if replied.audio.title:
title = replied.audio.title[:36]
duration = replied.audio.duration
elif replied.audio.file_name:
title = replied.audio.file_name[:36]
duration = replied.audio.duration
else:
title = "Music"
duration = replied.audio.duration
else:
title = "Voice Note"
duration = replied.voice.duration
duration = str(datetime.timedelta(seconds=duration))
if playlist and chat_id in playlist and len(playlist[chat_id]) >= 1:
objects = {
"user_id": user_id,
"title": title,
"duration": duration,
"source_file": download,
"link": link,
"stream_type": "local_music",
}
mess = await bom.edit(gm(chat_id, "track_queued"))
self.playlist.insert_one(chat_id, objects)
await asyncio.sleep(5)
return await mess.delete()
return await self._local_audio_play(
bom, user_id, chat_id, title, duration, download, link
)
async def local_video(self, user_id: int, replied: Message):
chat_id = replied.chat.id
playlist = self.playlist.playlist
if replied.video or replied.document:
bom = await replied.reply(gm(chat_id, "process"))
source_file = await replied.download()
link = replied.link
if replied.video:
title = replied.video.file_name[:36]
duration = replied.video.duration
else:
title = replied.document.file_name[:36]
duration = "Not Found"
if duration:
duration = str(datetime.timedelta(seconds=duration))
else:
duration = "Not Found"
if playlist and chat_id in playlist and len(playlist[chat_id]) >= 1:
objects = {
"user_id": user_id,
"title": title,
"duration": duration,
"source_file": source_file,
"link": link,
"stream_type": "local_video",
}
mess = await bom.edit(gm(chat_id, "track_queued"))
self.playlist.insert_one(chat_id, objects)
await asyncio.sleep(5)
return mess.delete()
return await self._local_video_play(
bom, user_id, chat_id, title, duration, source_file, link
)
|
"""
Module that provides a class that filters profanities
Source: https://stackoverflow.com/a/3533322
"""
__author__ = "leoluk"
__version__ = '0.0.1'
import random
import re
class ProfanitiesFilter(object):
def __init__(self, filterlist, ignore_case=True, replacements="$@%-?!",
complete=True, inside_words=False):
"""
Inits the profanity filter.
filterlist -- a list of regular expressions that
matches words that are forbidden
ignore_case -- ignore capitalization
replacements -- string with characters to replace the forbidden word
complete -- completely remove the word or keep the first and last char?
inside_words -- search inside other words?
"""
self.badwords = filterlist
self.ignore_case = ignore_case
self.replacements = replacements
self.complete = complete
self.inside_words = inside_words
def _make_clean_word(self, length):
"""
Generates a random replacement string of a given length
using the chars in self.replacements.
"""
return ''.join([random.choice(self.replacements) for i in
range(length)])
def __replacer(self, match):
value = match.group()
if self.complete:
return self._make_clean_word(len(value))
else:
return value[0]+self._make_clean_word(len(value)-2)+value[-1]
def clean(self, text):
"""Cleans a string from profanity."""
regexp_insidewords = {
True: r'(%s)',
False: r'\b(%s)\b',
}
regexp = (regexp_insidewords[self.inside_words] %
'|'.join(self.badwords))
r = re.compile(regexp, re.IGNORECASE if self.ignore_case else 0)
return r.sub(self.__replacer, text) |
"""925. Long Pressed Name
https://leetcode.com/problems/long-pressed-name/
Your friend is typing his name into a keyboard.
Sometimes, when typing a character c, the key might get long pressed,
and the character will be typed 1 or more times.
You examine the typed characters of the keyboard. Return True if it is possible
that it was your friends name, with some characters (possibly none) being long pressed.
Example 1:
Input: name = "alex", typed = "aaleex"
Output: true
Explanation: 'a' and 'e' in 'alex' were long pressed.
Example 2:
Input: name = "saeed", typed = "ssaaedd"
Output: false
Explanation: 'e' must have been pressed twice, but it wasn't in the typed output.
Example 3:
Input: name = "leelee", typed = "lleeelee"
Output: true
Example 4:
Input: name = "laiden", typed = "laiden"
Output: true
Explanation: It's not necessary to long press any character.
Constraints:
1 <= name.length <= 1000
1 <= typed.length <= 1000
The characters of name and typed are lowercase letters.
"""
class Solution:
def is_long_pressed_name(self, name: str, typed: str) -> bool:
def find_same(text: str, start: int) -> int:
x = text[start]
while start < len(text):
if text[start] == x:
start += 1
else:
return start - 1
return start - 1
n1, n2 = len(name), len(typed)
i, j = 0, 0
while i < n1 and j < n2:
if name[i] == typed[j]:
next_i = find_same(name, i)
next_j = find_same(typed, j)
if next_i - i > next_j - j:
return False
else:
i = next_i + 1
j = next_j + 1
else:
return False
if i == n1 and j == n2:
return True
else:
return False
if __name__ == '__main__':
sol = Solution()
print(sol.is_long_pressed_name("alex", "aaleex")) # t
print(sol.is_long_pressed_name("saeed", "ssaaedd")) # f
print(sol.is_long_pressed_name("leelee", "lleeelee")) # t
print(sol.is_long_pressed_name("laiden", "laiden")) # t
print(sol.is_long_pressed_name("pyplrz", "ppyypllr")) # f
|
from Queue import CodaListaCollegata
from Queue import CodaArrayList
from Queue import CodaArrayList_deque
from time import time
#global functions
def enqueueTest(q,n=50000):
start = time()
for i in range(n):
q.enqueue(i)
elapsed = time() - start
print("Required time:", elapsed, "seconds.")
def dequeueTest(q,n=50000):
start = time()
for i in range(n): #@UnusedVariable
q.dequeue()
elapsed = time() - start
print("Required time:", elapsed, "seconds.")
# Se eseguiamo direttamente questo modulo, ossia NON lo importiamo in un altro.
if __name__ == "__main__":
print("\tEnqueueing elements")
print("CodaListaCollegata")
ql=CodaListaCollegata()
enqueueTest(ql)
print("CodaArrayList")
qal=CodaArrayList()
enqueueTest(qal)
print("CodaArrayList_deque")
qald=CodaArrayList_deque()
enqueueTest(qald)
print("\tDequeueing elements")
print("CodaListaCollegata")
dequeueTest(ql)
print("CodaArrayList")
dequeueTest(qal)
print("CodaArrayList_deque")
dequeueTest(qald)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from functools import wraps
from flask import abort
from flask_login import current_user
from .models import User
def role_required(role):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not current_user.is_authenticated or current_user.role < role:
abort(404)
return func(*args, **kwargs)
return wrapper
return decorator
company_required = role_required(User.ROLE_COMPANY)
admin_required = role_required(User.ROLE_ADMIN)
|
expected_output = {
"r5-s": {
"name": "r5-s",
"color": 102,
"end_point": "5.5.5.5",
"owners": "CLI",
"status": {
"admin": "up",
"operational": {
"state": "up",
"time_for_state": "20:10:54",
"since": "08-25 10:31:14.279"
}
},
"candidate_paths": {
"preference": {
1: {
"path_type": {
"explicit": {
"segment_list": {
"to-R5-s": {
"status": "active",
"weight": 1,
"metric_type": "TE",
"hops": {
1: {
"sid": 16052,
"sid_type": "Prefix-SID",
"local_address": "5.5.5.5"
}
}
}
}
}
},
"type": "CLI"
}
}
},
"attributes": {
"binding_sid": {
17: {
"allocation_mode": "dynamic",
"state": "programmed"
}
},
"auto_route": "Include all (Strict)"
},
"tunnel_id": "65537",
"interface_handle": "0x81",
"stats": {
"packets": 32,
"bytes": 2680
},
"event_history": {
1: {
"timestamp": "08-20 14:31:31.046",
"client": "FH Resolution",
"event_type": "REOPT triggered",
"context": {
"Status": "REOPTIMIZED CP: 1"
}
},
2: {
"timestamp": "08-20 14:31:31.090",
"client": "FH Resolution",
"event_type": "REOPT triggered",
"context": {
"Status": "REOPTIMIZED CP: 1"
}
},
3: {
"timestamp": "08-20 14:31:43.795",
"client": "FH Resolution",
"event_type": "REOPT triggered",
"context": {
"Status": "REOPTIMIZED CP: 1"
}
},
4: {
"timestamp": "08-24 11:22:13.235",
"client": "CLI",
"event_type": "Policy ADMIN DOWN",
"context": {
"shutdown": "r5-s"
}
},
5: {
"timestamp": "08-24 11:22:15.369",
"client": "CLI",
"event_type": "Policy state DOWN",
"context": {
"no shutdown": "r5-s"
}
},
6: {
"timestamp": "09-09 20:15:58.969",
"client": "CLI AGENT",
"event_type": "Policy created",
"context": {
"Name": "maxsid"
}
},
7: {
"timestamp": "09-09 20:16:09.573",
"client": "CLI AGENT",
"event_type": "Set colour",
"context": {
"Colour": "100"
}
},
8: {
"timestamp": "09-09 20:16:09.573",
"client": "CLI AGENT",
"event_type": "Set end point",
"context": {
"End-point": "10.169.196.241"
}
},
9: {
"timestamp": "09-09 20:16:23.728",
"client": "CLI AGENT",
"event_type": "Set explicit path",
"context": {
"Path option": "maxsid"
}
},
10: {
"timestamp": "09-09 20:19:30.195",
"client": "FH Resolution",
"event_type": "Policy state UP",
"context": {
"Status": "PATH RESOLVED"
}
},
11: {
"timestamp": "09-09 20:19:30.202",
"client": "FH Resolution",
"event_type": "REOPT triggered",
"context": {
"Status": "REOPTIMIZED"
}
},
12: {
"timestamp": "09-09 20:56:19.877",
"client": "FH Resolution",
"event_type": "REOPT triggered",
"context": {
"Status": "REOPTIMIZED"
}
},
13: {
"timestamp": "09-09 20:57:51.007",
"client": "CLI AGENT",
"event_type": "Set binding SID",
"context": {
"BSID": "Binding SID set"
}
},
14: {
"timestamp": "09-09 21:15:51.840",
"client": "CLI AGENT",
"event_type": "Set explicit path",
"context": {
"Path option": "test1"
}
},
15: {
"timestamp": "09-09 21:19:04.452",
"client": "CLI AGENT",
"event_type": "Set explicit path",
"context": {
"Path option": "test1"
}
},
16: {
"timestamp": "09-09 21:19:04.454",
"client": "FH Resolution",
"event_type": "Policy state UP",
"context": {
"Status": "PATH RESOLVED"
}
},
17: {
"timestamp": "09-09 21:19:04.458",
"client": "FH Resolution",
"event_type": "REOPT triggered",
"context": {
"Status": "REOPTIMIZED"
}
},
18: {
"timestamp": "09-09 21:20:20.811",
"client": "CLI AGENT",
"event_type": "Remove path option",
"context": {
"Path option": "300"
}
},
19: {
"timestamp": "09-09 21:20:20.812",
"client": "FH Resolution",
"event_type": "Policy state UP",
"context": {
"Status": "PATH RESOLVED"
}
}
}
}
} |
from collections import deque, namedtuple
ExploreItem = namedtuple('ExploreItem', ['coordinates', 'distance'])
def find_path(walls, start_coordinates, end_coordinates):
'''Finds the length of the shortest path from start_coordinates to end_coordinates.
>>> t = True; f = False
>>> find_path([[f, f, f], [f, t, t], [f, f, f]], (2, 2), (0, 2))
6
>>> find_path([[f, f, f], [t, t, t], [f, f, f]], (2, 2), (0, 0))
'''
to_explore = deque()
explored = set()
height = len(walls)
width = len(walls[0])
to_explore.append(ExploreItem(start_coordinates, 0))
while to_explore:
current = to_explore.popleft()
if current.coordinates in explored:
continue
elif current.coordinates == end_coordinates:
return current.distance
else:
x, y = current.coordinates
for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)):
new_coordinates = (x + dx, y + dy)
if (x + dx in range(height)
and y + dy in range(width)
and not walls[x + dx][y + dy]
and new_coordinates not in explored):
to_explore.append(ExploreItem(new_coordinates, current.distance + 1))
explored.add(current.coordinates)
|
# The prime factors of 13195 are 5, 7, 13 and 29.
# What is the largest prime factor of the number 600851475143 ?
import math
def main(num):
factors = []
# True condition ok here as all numbers should have prime factors
# so return should always exit the loop
while True:
n = spf(num)
# n is a factor of num
if n < num:
factors.append(n)
num = num // n
else:
factors.append(n)
return num, factors
# smallest prime factor
def spf(num):
# find smallest prime for a given number and return it
# 2 is the lowest prime and getting the sqrt of num gives us an upper bound to look for factors
for i in range(2, int(math.sqrt(num)) + 1):
if num % i == 0:
return i
# num is prime
return num
if __name__ == '__main__':
ans = main(600851475143)
print(str(ans))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-17 20:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
import mirrors.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CheckLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(max_length=255)),
('source_ip', models.GenericIPAddressField(unique=True, unpack_ipv4=True, verbose_name='source IP')),
('country', django_countries.fields.CountryField(max_length=2)),
('created', models.DateTimeField(editable=False)),
],
options={
'ordering': ('hostname', 'source_ip'),
},
),
migrations.CreateModel(
name='Mirror',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('tier', models.SmallIntegerField(choices=[(0, 'Tier 0'), (1, 'Tier 1'), (2, 'Tier 2'), (-1, 'Untiered')], default=2)),
('admin_email', models.EmailField(blank=True, max_length=255)),
('alternate_email', models.EmailField(blank=True, max_length=255)),
('public', models.BooleanField(default=True)),
('active', models.BooleanField(default=True)),
('isos', models.BooleanField(default=True, verbose_name='ISOs')),
('rsync_user', models.CharField(blank=True, default='', max_length=50)),
('rsync_password', models.CharField(blank=True, default='', max_length=50)),
('bug', models.PositiveIntegerField(blank=True, null=True, verbose_name='Flyspray bug')),
('notes', models.TextField(blank=True)),
('created', models.DateTimeField(editable=False)),
('last_modified', models.DateTimeField(editable=False)),
('upstream', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='mirrors.Mirror')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='MirrorLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('check_time', models.DateTimeField(db_index=True)),
('last_sync', models.DateTimeField(null=True)),
('duration', models.FloatField(null=True)),
('is_success', models.BooleanField(default=True)),
('error', models.TextField(blank=True, default='')),
('location', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='logs', to='mirrors.CheckLocation')),
],
options={
'get_latest_by': 'check_time',
'verbose_name': 'mirror check log',
},
),
migrations.CreateModel(
name='MirrorProtocol',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('protocol', models.CharField(max_length=10, unique=True)),
('is_download', models.BooleanField(default=True, help_text='Is protocol useful for end-users, e.g. HTTP')),
('default', models.BooleanField(default=True, help_text='Included by default when building mirror list?')),
('created', models.DateTimeField(editable=False)),
],
options={
'ordering': ('protocol',),
},
),
migrations.CreateModel(
name='MirrorRsync',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', mirrors.fields.IPNetworkField(max_length=44, verbose_name='IP')),
('created', models.DateTimeField(editable=False)),
('mirror', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rsync_ips', to='mirrors.Mirror')),
],
options={
'ordering': ('ip',),
'verbose_name': 'mirror rsync IP',
},
),
migrations.CreateModel(
name='MirrorUrl',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.CharField(max_length=255, unique=True, verbose_name='URL')),
('country', django_countries.fields.CountryField(blank=True, db_index=True, max_length=2)),
('has_ipv4', models.BooleanField(default=True, editable=False, verbose_name='IPv4 capable')),
('has_ipv6', models.BooleanField(default=False, editable=False, verbose_name='IPv6 capable')),
('created', models.DateTimeField(editable=False)),
('active', models.BooleanField(default=True)),
('mirror', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='urls', to='mirrors.Mirror')),
('protocol', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.PROTECT, related_name='urls', to='mirrors.MirrorProtocol')),
('bandwidth', models.FloatField(blank=True, null=True, verbose_name='bandwidth (mbits)')),
],
options={
'verbose_name': 'mirror URL',
},
),
migrations.AddField(
model_name='mirrorlog',
name='url',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='logs', to='mirrors.MirrorUrl'),
),
]
|
import re
from django.db import models
from .settings import *
from django.conf import settings
from django.template import Template
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
try:
from urllib.parse import urlencode
except ImportError:
# py2
from urllib import urlencode
try:
from urllib.parse import urlsplit, urlunsplit, parse_qsl
except ImportError:
from urlparse import urlsplit, urlunsplit, parse_qsl
import requests
# use CMSPLUGIN_SOUNDCLOUD_COLORS to override COLORS
COLORS = getattr(settings, 'CMSPLUGIN_SOUNDCLOUD_COLORS',
CMSPLUGIN_SOUNDCLOUD_COLORS)
# use CMSPLUGIN_SOUNDCLOUD_HEIGHTS to override HEIGHTS
HEIGHTS = getattr(settings, 'CMSPLUGIN_SOUNDCLOUD_HEIGHTS',
CMSPLUGIN_SOUNDCLOUD_HEIGHTS)
OEMBED_URL_FORMAT = 'http://soundcloud.com/oembed?url=%s&format=json'
def get_sound_properties(url):
return requests.get(OEMBED_URL_FORMAT % url).json()
class SoundCloud(CMSPlugin):
url = models.CharField(_('Sound Cloud URL'), max_length=255,
help_text=_('(i. e. https://soundcloud.com/band/song)'))
author_name = models.CharField(max_length=255, editable=False)
author_url = models.CharField(max_length=255, editable=False)
title = models.CharField(max_length=255, editable=False)
description = models.CharField(max_length=255, editable=False)
thumbnail_url = models.CharField(max_length=255, editable=False)
color = models.CharField(_('Color'), max_length=6, choices=COLORS,
default=COLORS[0][0],
help_text=_('Main color of the widget.'))
auto_play = models.BooleanField(_('Play automatically'))
show_artwork = models.BooleanField(_('Show artwork'))
hide_related = models.BooleanField(_('Hide related'))
visual = models.BooleanField(_('Visual mode'))
height = models.IntegerField(_('Height'), choices=HEIGHTS,
default=HEIGHTS[0][0],
help_text=_('Height of widhte in visual mode.'))
src = models.TextField(editable=False)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
properties = get_sound_properties(self.url)
self.author_name = properties['author_name']
self.author_url = properties['author_url']
self.title = properties['title']
self.description = properties['description']
self.thumbnail_url = properties['thumbnail_url']
if not self.visual:
self.height = 166
url = urlsplit(re.findall(r'src="([^"]*)"', properties['html'])[0])
qs = dict(parse_qsl(url.query))
qs['color'] = self.color
qs['auto_play'] = self.auto_play and 'true' or 'false'
qs['show_artwork'] = self.show_artwork and 'true' or 'false'
qs['hide_related'] = self.hide_related and 'true' or 'false'
qs['visual'] = self.visual and 'true' or 'false'
self.src = urlunsplit((url.scheme, url.netloc, url.path, urlencode(qs), url.fragment))
super(SoundCloud, self).save(*args, **kwargs)
|
"""
Module initialization that adjusts the path for beets.
"""
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
""" Copyright 2017 Akamai Technologies, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class headers(object):
def __init__(self):
self.data = {
"category" : {
"change-management-info" : {
"info" : {
"Accept" : "application/vnd.akamai.cps.change-management-info.v3+json"
},
"update" : {
"Accept" : "application/vnd.akamai.cps.change-id.v1+json",
"Content-Type" : "application/vnd.akamai.cps.acknowledgement-with-hash.v1+json"
},
"deloyment-info" : {
"Accept" : "application/vnd.akamai.cps.deployment.v1+json"
}
},
"lets-encrypt-challenges" : {
"info" : {
"Accept" : "application/vnd.akamai.cps.dv-challenges.v1+json"
},
"update" : {
"Accept" : "application/vnd.akamai.cps.change-id.v1+json",
"Content-Type" : "application/vnd.akamai.cps.acknowledgement.v1+json"
}
},
"post-verification-warnings" : {
"info" : {
"Accept" : "application/vnd.akamai.cps.warnings.v1+json"
},
"update" : {
"Accept" : "application/vnd.akamai.cps.change-id.v1+json",
"Content-Type" : "application/vnd.akamai.cps.acknowledgement.v1+json"
}
},
"pre-verification-warnings" : {
"info" : {
"Accept" : "application/vnd.akamai.cps.warnings.v1+json"
},
"update" : {
"Accept" : "application/vnd.akamai.cps.change-id.v1+json",
"Content-Type" : "application/vnd.akamai.cps.acknowledgement.v1+json"
}
},
"third-party-csr" : {
"info" : {
"Accept" : "application/vnd.akamai.cps.csr.v1+json"
},
"update" : {
"Accept" : "application/vnd.akamai.cps.change-id.v1+json",
"Content-Type" : "application/vnd.akamai.cps.certificate-and-trust-chain.v1+json"
}
}
}
}
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
representation of coordinates as latitude/longitude
distance calculations: https://gist.github.com/rochacbruno/2883505
more calculations: http://www.movable-type.co.uk/scripts/latlong.html
"""
import math
def norm_lat(lat):
return max(-90.0, min(lat, 90.0))
def norm_lon(lon):
if lon < -180: return lon + 360
if lon >= 180: return lon - 360
return lon
class LatLon:
def __init__(self, lat, lon):
self.lat = norm_lat(lat)
self.lon = norm_lon(lon)
def __hash__(self):
return hash((self.lat, self.lon))
def __eq__(self, other):
return (int(self.lat * 10000) == int(other.lat * 10000)
and int(self.lon * 10000) == int(other.lon * 10000))
def __repr__(self):
return "LatLon({:8.5f}, {:8.5f})".format(self.lat, self.lon)
def __str__(self):
return "({:8.5f}/{:8.5f})".format(self.lat, self.lon)
def distance_to(self, other):
radius = 6371 # km
d_lat = math.radians(other.lat - self.lat)
d_lon = math.radians(other.lon - self.lon)
a = (math.pow(math.sin(d_lat / 2) , 2)
+ math.cos(math.radians(self.lat))
* math.cos(math.radians(other.lat))
* math.pow(math.sin(d_lon / 2), 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return radius * c
def bearing_to(self, other):
lat1 = math.radians(self.lat)
lat2 = math.radians(other.lat)
d_lon = math.radians(other.lon - self.lon)
y = math.sin(d_lon) * math.cos(lat2)
x = (math.cos(lat1) * math.sin(lat2)
- math.sin(lat1) * math.cos(lat2) * math.cos(d_lon))
bearing = math.degrees(math.atan2(y, x))
return (bearing + 360.0) % 360.0
def bearing_from(self, other):
return (self.bearing_to(other) + 180.0) % 360.0
def neighbours(self):
lat = int(self.lat)
lon = int(self.lon / 2) * 2
neighbours = set()
for d_lon in range(-1, 2):
for d_lat in range(-1, 2):
neighbours.add(LatLon(lat + d_lat, lon + d_lon * 2))
return neighbours - set([self])
|
import os
import tensorflow as tf
from config.config_Faster_RCNN import cfg
import random
import numpy as np
import skimage.io
from utils.faster_rcnn.anchors import all_anchor_conner, anchor_labels_process
import cv2
import xml.etree.ElementTree as ET
class preprocess:
def __init__(self, is_train=True):
# self.input_shape = cfg.image_size
# self.PIXEL_MEANS = np.array([[[122.7717, 115.9465, 102.9801]]])
self.PIXEL_MEANS = [122.7717, 115.9465, 102.9801]
self.is_train = is_train
self.num_classes = cfg.num_classes
self.data_dir = cfg.data_dir
os.makedirs(self.data_dir, exist_ok=True)
file_pattern = self.data_dir + "/*" + '.tfrecords'
self.tfrecord_files = tf.gfile.Glob(file_pattern)
self.classes = []
if len(self.tfrecord_files) == 0:
self.make_tfrecord(self.data_dir, cfg.tfrecord_num)
self.tfrecord_files = tf.gfile.Glob(file_pattern)
def read_annotations(self):
images = []
bboxes = []
files = os.listdir(cfg.image_dir)
for file_idx, file in enumerate(files):
if file_idx % 1000 == 0:
print(file_idx)
if file.find('xml') != -1:
continue
xml_file = file.replace('png', 'xml').replace('jpg', 'xml')
try:
tree = ET.parse(os.path.join(cfg.image_dir, xml_file))
except:
print(xml_file)
continue
objs = tree.findall('object')
# object๊ฐ ์๋ ๊ฒฝ์ฐ
if len(objs) == 0 or objs[0].find('name') is None:
continue
size = tree.findall('size')[0]
height = float(size.find('height').text)
width = float(size.find('width').text)
boxes = []
for obj in objs:
bbox = obj.find('bndbox')
xmin = float(bbox.find('xmin').text)
ymin = float(bbox.find('ymin').text)
xmax = float(bbox.find('xmax').text)
ymax = float(bbox.find('ymax').text)
label = obj.find('name').text
if xmin < 0 or ymin < 0:
print(file)
boxes.append([xmin, ymin, xmax, ymax, label, height, width])
self.classes.append(label)
bboxes.append(boxes)
images.append(os.path.join(cfg.image_dir, file))
self.classes = list(set(self.classes))
self.classes.sort()
if not os.path.exists(cfg.class_file):
open(cfg.class_file, 'w', encoding='utf-8').writelines("\n".join(self.classes))
return images, bboxes
def make_tfrecord(self, tfrecord_path, num_tfrecords):
images, bboxes = self.read_annotations()
images_num = int(len(images) / num_tfrecords)
for index_records in range(num_tfrecords):
output_file = os.path.join(tfrecord_path, str(index_records) + '_' + '.tfrecords')
with tf.python_io.TFRecordWriter(output_file) as record_writer:
for index in range(index_records * images_num, (index_records + 1) * images_num):
with tf.gfile.FastGFile(images[index], 'rb') as file:
image = file.read()
xmin, xmax, ymin, ymax, label, height, width = [], [], [], [], [], [], []
for box in bboxes[index]:
xmin.append(box[0])
ymin.append(box[1])
xmax.append(box[2])
ymax.append(box[3])
label.append(self.classes.index(box[4]))
height.append(box[5])
width.append(box[6])
example = tf.train.Example(features=tf.train.Features(
feature={
'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
'image/xmin': tf.train.Feature(float_list=tf.train.FloatList(value=xmin)),
'image/xmax': tf.train.Feature(float_list=tf.train.FloatList(value=xmax)),
'image/ymin': tf.train.Feature(float_list=tf.train.FloatList(value=ymin)),
'image/ymax': tf.train.Feature(float_list=tf.train.FloatList(value=ymax)),
'image/label': tf.train.Feature(float_list=tf.train.FloatList(value=label)),
'image/height': tf.train.Feature(float_list=tf.train.FloatList(value=height)),
'image/width': tf.train.Feature(float_list=tf.train.FloatList(value=width)),
}
))
record_writer.write(example.SerializeToString())
if index % 1000 == 0:
print('Processed {} of {} images'.format(index + 1, len(images)))
def parser(self, serialized_example):
features = tf.parse_single_example(
serialized_example,
features={
'image/encoded': tf.FixedLenFeature([], dtype=tf.string),
'image/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/label': tf.VarLenFeature(dtype=tf.float32),
'image/height': tf.VarLenFeature(dtype=tf.float32),
'image/width': tf.VarLenFeature(dtype=tf.float32)
}
)
image = tf.image.decode_jpeg(features['image/encoded'], channels=3)
image = tf.image.convert_image_dtype(image, tf.uint8)
xmin = tf.expand_dims(features['image/xmin'].values, axis=0)
ymin = tf.expand_dims(features['image/ymin'].values, axis=0)
xmax = tf.expand_dims(features['image/xmax'].values, axis=0)
ymax = tf.expand_dims(features['image/ymax'].values, axis=0)
label =features['image/label'].values
bbox = tf.concat(axis=0, values=[xmin, ymin, xmax, ymax])
bbox = tf.transpose(bbox, [1, 0])
height = features['image/height'].values[0]
height = tf.cast(height, tf.float32)
width = features['image/width'].values[0]
width = tf.cast(width, tf.float32)
image, bbox, scale = self.preprocess(image, bbox, height, width)
anchors = tf.py_func(all_anchor_conner, [tf.math.ceil(width * scale), tf.math.ceil(height * scale), cfg.anchor_scales, cfg.anchor_ratios, cfg.feat_stride],
tf.float32)
# labels์ ๋ชจ๋ anchor์ ๋ํด์ positive 1 negative๋ 0 ๋๋จธ์ง๋ -1
# labels_bbox ๋ชจ๋ anchor์ ์ด๋ ํ gt object์ ๊ฐ์ฅ ํฌ๊ฒ ๊ฒน์น๋์ง (anchor๋ง๋ค ๋ช ๋ฒ ์งธ object์ ๊ฒน์น๋์ง์ ๋ํ ์ ๋ณด)
labels, labels_bbox = tf.py_func(anchor_labels_process,
[bbox, anchors, cfg.anchor_batch, cfg.overlaps_max,
cfg.overlaps_min, tf.math.ceil(width * scale), tf.math.ceil(height * scale)]
,[tf.float32, tf.int32])
positive_labels, positive_negative_labels, gt_positive_labels_bbox, gt_positive_negative_labels = self.generate_anchors_labels(labels, labels_bbox, bbox, anchors)
return image, bbox, label, tf.math.ceil(height * scale), tf.math.ceil(width * scale), positive_labels, positive_negative_labels, gt_positive_labels_bbox, gt_positive_negative_labels, anchors
def preprocess(self, image, bbox, height, width):
min = tf.minimum(height, width)
max = tf.maximum(height, width)
scale_min = cfg.min_image_size / min
scale_max = cfg.max_image_size / max
# ๋จ์
image๋ฅผ scale๋ก ํค์ ์ ๋ ๋ค๋ฅธ ์ชฝ์ด max๋ณด๋ค ํฐ ๊ฒฝ์ฐ ์์ผ๋ฏ๋ก
# scale = cfg.min_image_size / min
scale = tf.cond(scale_min * max > cfg.max_image_size, lambda : scale_max, lambda : scale_min)
image = tf.image.resize_images(image, [tf.math.ceil(height * scale), tf.math.ceil(width * scale)],
method=tf.image.ResizeMethod.BILINEAR)
image = tf.subtract(image, self.PIXEL_MEANS)
image = tf.divide(image, 128)
# image -= self.PIXEL_MEANS
# image /= 128
xmin, ymin, xmax, ymax = tf.split(value=bbox, num_or_size_splits=4, axis=1)
xmin = xmin * scale
xmax = xmax * scale
ymin = ymin * scale
ymax = ymax * scale
bbox = tf.concat([xmin, ymin, xmax, ymax], 1)
if self.is_train:
def _flip_left_right_boxes(boxes):
# xmin, ymin, xmax, ymax, label = tf.split(value=boxes, num_or_size_splits=5, axis=1)
xmin, ymin, xmax, ymax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_xmin = tf.subtract(width * scale, xmax)
flipped_xmax = tf.subtract(width * scale, xmin)
flipped_boxes = tf.concat([flipped_xmin, ymin, flipped_xmax, ymax], 1)
return flipped_boxes
flip_left_right = tf.greater(tf.random_uniform([], dtype=tf.float32, minval=0, maxval=1), 0.5)
image = tf.cond(flip_left_right, lambda: tf.image.flip_left_right(image), lambda: image)
bbox = tf.cond(flip_left_right, lambda: _flip_left_right_boxes(bbox), lambda: bbox)
# image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
# bbox = tf.clip_by_value(bbox, clip_value_min=0, clip_value_max=width - 1)
# bbox = tf.cond(tf.greater(tf.shape(bbox)[0], 50), lambda: bbox[:self.50],
# lambda: tf.pad(bbox, paddings=[[0, self.50 - tf.shape(bbox)[0]], [0, 0]],
# mode='CONSTANT'))
return image, bbox, scale
def generate_anchors_labels(self, labels, labels_bbox, bbox, anchors):
# postivce anchor์ negative anchor idx ์ ๋ณด
positive_negative_labels = tf.reshape(tf.where(tf.not_equal(labels, -1)), [-1])
positive_labels = tf.reshape(tf.where(tf.equal(labels, 1)), [-1])
# labels_gt_order
positive_labels_bbox = tf.gather(labels_bbox, positive_labels)
positive_labels_acnhors = tf.gather(anchors, positive_labels)
# ์ค์ ground truth ์ขํ๋ฅผ x,y,w,h๋ก ์นํ
gt_x1 = bbox[:, 0]
gt_y1 = bbox[:, 1]
gt_x2 = bbox[:, 2]
gt_y2 = bbox[:, 3]
gt_x = tf.expand_dims(tf.cast((gt_x1 + gt_x2) / 2.0, dtype=tf.float32), -1)
gt_y = tf.expand_dims(tf.cast((gt_y1 + gt_y2) / 2.0, dtype=tf.float32), -1)
gt_w = tf.expand_dims(tf.cast((gt_x2 - gt_x1 + 1.0), dtype=tf.float32), -1)
gt_h = tf.expand_dims(tf.cast((gt_y2 - gt_y1 + 1.0), dtype=tf.float32), -1)
gt_bbox = tf.concat([gt_x, gt_y, gt_w, gt_h], axis=1)
positive_labels_acnhors_x1 = positive_labels_acnhors[:, 0]
positive_labels_acnhors_y1 = positive_labels_acnhors[:, 1]
positive_labels_acnhors_x2 = positive_labels_acnhors[:, 2]
positive_labels_acnhors_y2 = positive_labels_acnhors[:, 3]
positive_labels_acnhors_x = tf.cast((positive_labels_acnhors_x2 + positive_labels_acnhors_x1) / 2.0, dtype=tf.float32)
positive_labels_acnhors_y = tf.cast((positive_labels_acnhors_y2 + positive_labels_acnhors_y1) / 2.0, dtype=tf.float32)
positive_labels_acnhors_w = tf.cast((positive_labels_acnhors_x2 - positive_labels_acnhors_x1), dtype=tf.float32)
positive_labels_acnhors_h = tf.cast((positive_labels_acnhors_y2 - positive_labels_acnhors_y1), dtype=tf.float32)
# positive anchor์ ๋ํ ground truth x,y,w,h ์ขํ
positive_labels_bbox = tf.gather(gt_bbox, positive_labels_bbox)
# RPN์์ bbox loss๋ฅผ ๊ตฌํ ๋ ์ฌ์ฉ
# ์๋ ์์ postive 1, negative0 ์ผ๋ก ํ๊ณ bbox ์ฐจ์ด๋ฅผ ๊ณฑํ ๋ค ๋ํ์ง๋ง posivive๋ง ๊ตฌํด์ ๋ํ๋ ๊ฑฐ๋ ๊ฐ์
# positive achor bbox ์ขํ๋ฅผ x,y,w,h๋ก ์นํ
# positive_labels_x1 = positive_labels_bbox[:, 0]
# positive_labels_y1 = positive_labels_bbox[:, 1]
# positive_labels_x2 = positive_labels_bbox[:, 2]
# positive_labels_y2 = positive_labels_bbox[:, 3]
# positive_labels_x = tf.cast((positive_labels_x2 + positive_labels_x1) / 2.0, dtype=tf.float32)
# positive_labels_y = tf.cast((positive_labels_y2 + positive_labels_y1) / 2.0, dtype=tf.float32)
# positive_labels_w = tf.cast((positive_labels_x2 - positive_labels_x1), dtype=tf.float32)
# positive_labels_h = tf.cast((positive_labels_y2 - positive_labels_y1), dtype=tf.float32)
# ์ค์ ๋ชจ๋ธ์ด ์์ธกํด์ผ ํ๋ x,y,w,h ๊ฐ
gt_positive_labels_x = tf.cast((positive_labels_bbox[:, 0] - positive_labels_acnhors_x) / positive_labels_acnhors_w, dtype=tf.float32)
gt_positive_labels_y = tf.cast((positive_labels_bbox[:, 1] - positive_labels_acnhors_y) / positive_labels_acnhors_h, dtype=tf.float32)
gt_positive_labels_w = tf.cast(tf.log(positive_labels_bbox[:, 2] / positive_labels_acnhors_w), dtype=tf.float32)
gt_positive_labels_h = tf.cast(tf.log(positive_labels_bbox[:, 3] / positive_labels_acnhors_h), dtype=tf.float32)
gt_positive_labels_bbox = tf.stack(
[gt_positive_labels_x, gt_positive_labels_y, gt_positive_labels_w, gt_positive_labels_h], axis=1)
gt_positive_negative_labels = tf.gather(labels, positive_negative_labels)
gt_positive_negative_labels = tf.to_int32(gt_positive_negative_labels)
return positive_labels, positive_negative_labels, gt_positive_labels_bbox, gt_positive_negative_labels
def build_dataset(self, batch_size):
dataset = tf.data.TFRecordDataset(filenames=self.tfrecord_files)
dataset = dataset.map(self.parser, num_parallel_calls=10)
# dataset = dataset.repeat().shuffle(1000).batch(batch_size).prefetch(batch_size)
dataset = dataset.repeat().shuffle(1).batch(batch_size).prefetch(batch_size)
return dataset
if __name__ == '__main__':
dataset = preprocess(True)
out_dir = 'F:\\aaa_'
os.makedirs(out_dir, exist_ok=True)
count = 0
# dataset.read_annotations()
train_data = dataset.build_dataset(cfg.batch_size)
iterator = train_data.make_one_shot_iterator()
tf_image, tf_bbox, tf_label, tf_height, tf_width , \
tf_positive_labels, tf_positive_negative_labels, tf_gt_positive_labels_bbox, tf_gt_positive_negative_labels, tf_anchors = iterator.get_next()
sess = tf.Session()
while True:
image, bbox, label, height, width, positive_labels, positive_negative_labels, gt_positive_labels_bbox, gt_positive_negative_labels, anchors = \
sess.run([tf_image, tf_bbox, tf_label, tf_height, tf_width , tf_positive_labels,
tf_positive_negative_labels, tf_gt_positive_labels_bbox, tf_gt_positive_negative_labels, tf_anchors])
image = image.astype(np.uint8)
image = image[0]
bbox = bbox[0]
label = label[0]
anchors = all_anchor_conner(width, height, cfg.anchor_scales, cfg.anchor_ratios)
for box_idx, box in enumerate(bbox):
# print('box : ', box.astype(np.uint8))
cv2.rectangle(image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0))
print('image : ', image.shape)
print('label : ', label.shape)
print('anchors : ', anchors.shape)
print('len(boxes) : ', len(bbox))
print('positive_negative_label : ', positive_negative_labels.shape)
print('positive_label : ', positive_labels.shape)
print('gt_positive_label : ', gt_positive_labels_bbox.shape)
print('gt_positive_negative_labels : ', gt_positive_negative_labels.shape)
print('positive_labels : ', positive_labels)
print('positive_negative_labels : ', positive_negative_labels)
print('gt_positive_negative_labels : ', gt_positive_negative_labels)
print('gt_positive_labels_bbox : ', gt_positive_labels_bbox)
positive_labels_acnhors = anchors[positive_labels,]
positive_labels_acnhors = positive_labels_acnhors[0]
for positive_labels_acnhors_idx, positive_anchor in enumerate(positive_labels_acnhors):
# print((int(positive_labels_acnhors[0]), int(positive_labels_acnhors[1])), (int(positive_labels_acnhors[2]), int(positive_labels_acnhors[3])))
# print(gt_obj[gener_acs_gt_idx])
cv2.rectangle(image, (int(positive_anchor[0]), int(positive_anchor[1])), (int(positive_anchor[2]), int(positive_anchor[3])), (0, 0, 255))
cv2.imshow('i', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# labels, anchor_obj = anchor_labels_process(boxes, acs, cfg.anchor_batch, cfg.overlaps_max,
# cfg.overlaps_min, w, h)
#
# print('width, height : ', w, h)
# print('labels : ', labels.shape)
# print('anchor_obj : ', anchor_obj.shape)
#
#
# # anchor_obj ๋ ๊ทธ๋ฆผ์์ ํ์ํ ์ ์๋ ๋ชจ๋ anchor ์
# # print(anchor_obj[labels==1].shape)
# # ์ค์ label์ 1์ object๊ฐ ์๋ค, 0์ ๋ฐฐ๊ฒฝ
# # cls loss๋ 2๊ฐ๋ฅผ ๋ค ๋งํ์ผ ํ๊ณ
# # bbox loss๋ positive anchor์ ๋ํด์๋ง ๊ณ์ฐ
#
# # gener_acs_gts = acs[labels!=-1, ]
# # gt_obj = anchor_obj[labels!=-1,]
# gener_acs_gts = acs[labels==1, ]
#
# # bbox๋ ๋ฌด์กฐ๊ฑด positive anchor ์๋ํด์๋ง!!!
# gt_obj = anchor_obj[labels==1,]
#
# print('gener_acs_gts : ', gener_acs_gts.shape)
# print('gt_obj : ', gt_obj.shape)
#
# for gener_acs_gt_idx, gener_acs_gt in enumerate(gener_acs_gts):
# # print(gener_acs_gt)
# # print(gt_obj[gener_acs_gt_idx])
# cv2.rectangle(i, (int(gener_acs_gt[0]), int(gener_acs_gt[1])), (int(gener_acs_gt[2]), int(gener_acs_gt[3])), (0, 0, 255))
#
#
# gt_x1 = boxes[:, 0]
# gt_y1 = boxes[:, 1]
# gt_x2 = boxes[:, 2]
# gt_y2 = boxes[:, 3]
#
# re_gt_0 = (gt_x1 + gt_x2) / 2.0
# re_gt_1 = (gt_y1 + gt_y2) / 2.0
# re_gt_2 = (gt_x2 - gt_x1) + 1.0
# re_gt_3 = (gt_y2 - gt_y1) + 1.0
#
#
# re_gt_0 = np.reshape(re_gt_0, [-1, 1])
# re_gt_1 = np.reshape(re_gt_1, [-1, 1])
# re_gt_2 = np.reshape(re_gt_2, [-1, 1])
# re_gt_3 = np.reshape(re_gt_3, [-1, 1])
#
# re_gt = np.concatenate([re_gt_0, re_gt_1, re_gt_2, re_gt_3], axis=1)
#
# print('re_gt.shape : ', re_gt.shape)
# print('re_gt : ', re_gt)
# print('gt_obj : ', gt_obj)
# # positive anchor์ ๋ํ gt ์ขํ
# gt = re_gt[gt_obj]
# print('gt : ', gt)
# print('gt : ', gt.shape)
#
# anchor_x1 = gener_acs_gts[:, 0]
# anchor_y1 = gener_acs_gts[:, 1]
# anchor_x2 = gener_acs_gts[:, 2]
# anchor_y2 = gener_acs_gts[:, 3]
# re_anchor_0 = (anchor_x2 + anchor_x1) / 2.0
# re_anchor_1 = (anchor_y2 + anchor_y1) / 2.0
# re_anchor_2 = (anchor_x2 - anchor_x1)
# re_anchor_3 = (anchor_y2 - anchor_y1)
#
# bbox_gt_0 = (gt[:, 0] - re_anchor_0) / re_anchor_2
# bbox_gt_1 = (gt[:, 1] - re_anchor_1) / re_anchor_3
# bbox_gt_2 = np.log(gt[:, 2] / re_anchor_2)
# bbox_gt_3 = np.log(gt[:, 3] / re_anchor_3)
# re_bbox_gt = np.stack(
# [bbox_gt_0, bbox_gt_1, bbox_gt_2, bbox_gt_3], axis=1)
#
# print(re_bbox_gt)
#
#
# cv2.imshow('i', i)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# # cv2.imwrite(os.path.join(out_dir, '{}.jpg'.format(count)), i)
# # count+=1
# # if count == 100:
# # break |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('text', help='Message to encode')
parser.add_argument('-d', '--dict', dest='dict', default=None, help='Language file to load')
parser.add_argument('-m', '--mark', dest='mark', default='*', help='Mark character/string')
parser.add_argument('-s', '--space', dest='space', default=' ', help='Space character/string')
parser.add_argument('-n', '--newline', dest='newline', default='\n', help='Newline character/string')
parser.add_argument('-g', '--gap', dest='gap', type=int, default=1, help='Inter-character gap')
parser.add_argument('-G', '--space-gap', dest='space_gap', type=int, default=3, help='Gap for spaces')
parser.add_argument('--no-compress', dest='no_compress', action='store_true', help='Don\'t compress common spaces in letters')
parser.add_argument('--debug', dest='debug', action='store_true', help='Print out a bunch of useless debugging information')
args = parser.parse_args()
if args.dict:
execfile(args.dict) # Pls set variable "d"
else:
d = {
'0': ' ** \n* * \n* * \n* * \n ** ',
'1': ' * \n ** \n* * \n * \n*****',
'2': ' *** \n* *\n ** \n * \n*****',
'3': '*** \n * \n*** \n * \n*** ',
'4': ' * \n ** \n* * \n**** \n * ',
'5': '*** \n* \n** \n * \n** ',
'6': '*** \n* \n*** \n* * \n*** ',
'7': '*****\n * \n * \n * \n* ',
'8': ' *** \n* *\n *** \n* *\n *** ',
'9': '*** \n* * \n*** \n * \n*** ',
'A': ' * \n * * \n*****\n* *\n* *',
'B': '*** \n* * \n*** \n* * \n*** ',
'C': '*****\n* \n* \n* \n*****',
'D': '*** \n* * \n* *\n* * \n*** ',
'E': '**** \n* \n*** \n* \n**** ',
'F': '**** \n* \n*** \n* \n* ',
'G': '*****\n* \n* ***\n* *\n*****',
'H': '* *\n* *\n*****\n* *\n* *',
'I': '*****\n * \n * \n * \n*****',
'J': '*****\n * \n * \n* * \n * ',
'K': '* * \n* * \n** \n* * \n* * ',
'L': '* \n* \n* \n* \n**** ',
'M': '* *\n** **\n* * *\n* *\n* *',
'N': '* *\n** *\n* * *\n* **\n* *',
'O': '*****\n* *\n* *\n* *\n*****',
'P': '**** \n* *\n**** \n* \n* ',
'Q': '**** \n* * \n* ** \n**** \n *',
'R': '**** \n* *\n**** \n* * \n* *',
'S': '**** \n* \n**** \n * \n**** ',
'T': '*****\n * \n * \n * \n * ',
'U': '* *\n* *\n* *\n* *\n*****',
'V': '* *\n* *\n * * \n * * \n * ',
'W': '* *\n* *\n* * *\n** **\n* *',
'X': '* *\n * * \n * \n * * \n* *',
'Y': '* *\n * * \n * \n * \n * ',
'Z': '*****\n * \n * \n * \n*****',
}
lines = {k: v.split('\n') for k, v in d.items()}
rsz = None
for k, v in lines.items():
if rsz is None:
rsz = len(v)
continue
if len(v) != rsz:
print('Warning: inconsistent row size on glyph', k)
csz = None
maxlen = None
for idx, row in enumerate(v):
if csz is None:
csz = len(row)
maxlen = len(row.rstrip())
continue
if len(row) != csz:
print('Warning: inconsistent column size on glyph', k, 'row', idx)
maxlen = max(maxlen, len(row.rstrip()))
if not args.no_compress:
if args.debug:
print('Sym', k, 'maxlen', maxlen)
for idx, row in enumerate(v):
v[idx] = row[:maxlen]
for idx in range(rsz):
for ch in args.text:
if ch == ' ':
print(args.space * args.space_gap, end='')
else:
if ch in lines and idx < len(lines[ch]):
for sym in lines[ch][idx]:
if sym == '*':
print(args.mark, end='')
else:
print(args.space, end='')
print(args.space * args.gap, end='')
print(args.newline, end='')
|
from typing import Any
from malib.algorithm.common.trainer import Trainer
from malib.algorithm.ppo.loss import PPOLoss
from malib.algorithm.ppo.policy import PPO
class PPOTrainer(Trainer):
def __init__(self, tid):
super(PPOTrainer, self).__init__(tid)
self._loss = PPOLoss()
self.cnt = 0
def optimize(self, batch):
assert isinstance(self._policy, PPO), type(self._policy)
self.cnt = (self.cnt + 1) % self._training_config.get("update_interval", 5)
if self.cnt == 0:
self.policy.update_target()
self.loss.zero_grad()
loss_stats = self.loss(batch)
self.loss.step()
return loss_stats
def preprocess(self, **kwargs) -> Any:
pass
|
#!/usr/bin/python
print "deprecated since version 1.1.0 of mtools. Use 'mlogfilter <logfile> <logfile> ...' instead." |
import pytest
from rollo.constraints import Constraints
from deap import base, creator
from collections import OrderedDict
test_output_dict = OrderedDict(
{
"packing_fraction": "openmc",
"keff": "openmc",
"num_batches": "openmc",
"max_temp": "moltres",
}
)
test_input_constraints = {
"keff": {"operator": [">=", "<="], "constrained_val": [1, 1.2]},
"max_temp": {"operator": ["<"], "constrained_val": [500]},
}
toolbox = base.Toolbox()
def test_output_dict_numbered():
c = Constraints(test_output_dict, test_input_constraints, toolbox)
numbered_oup_dict = c.output_dict_numbered(test_output_dict)
expected_num_oup_dict = {
"packing_fraction": 0,
"keff": 1,
"num_batches": 2,
"max_temp": 3,
}
assert numbered_oup_dict == expected_num_oup_dict
def test_constraints_list():
c = Constraints(test_output_dict, test_input_constraints, toolbox)
constraints_list = c.constraints_list(test_input_constraints)
expected_constraints_list = [
["keff", {"op": ">=", "val": 1}],
["keff", {"op": "<=", "val": 1.2}],
["max_temp", {"op": "<", "val": 500}],
]
assert constraints_list == expected_constraints_list
def test_apply_constraints():
creator.create(
"obj",
base.Fitness,
weights=(
-1.0,
1.0,
),
)
creator.create("Ind", list, fitness=creator.obj)
ind1 = creator.Ind([1])
ind2 = creator.Ind([2])
ind3 = creator.Ind([3])
ind4 = creator.Ind([4])
ind5 = creator.Ind([5])
ind1.output = tuple([0.1, 1.1, 1, 400])
ind2.output = tuple([0.1, 1.1, 1, 600])
ind3.output = tuple([0.1, 1.4, 1, 400])
ind4.output = tuple([0.1, 0.9, 1, 400])
ind5.output = tuple([0.1, 1.15, 2, 450])
pop = [ind1, ind2, ind3, ind4, ind5]
c = Constraints(test_output_dict, test_input_constraints, toolbox)
new_pop = c.apply_constraints(pop)
expected_pop = [ind1, ind5]
for ind in new_pop:
assert ind in expected_pop
assert len(new_pop) == len(pop)
|
"""Do some stuff with polynomials.
Going to try and write up a nice docstring to get rid of the error.
We'll see... It worked!
"""
import sys
# 6.00 Problem Set 2
#
# Successive Approximation
def call_poly():
"""Get poly inputs and send them to compute_deriv function."""
print("Enter your polynomial coefficient list. (Format: 0 3 51 ...)")
poly_input = input(">>")
poly = [float(x) for x in poly_input.split()]
print(poly)
print("Enter your x value.")
x = float(input(">>"))
print(evaluate_poly(poly, x))
start()
def call_deriv():
"""Get poly inputs and send them to compute_deriv function."""
print("Enter your polynomial coefficient list. (Format: 0 3 51 ...)")
poly_input = input(">>")
poly = [float(x) for x in poly_input.split()]
print(compute_deriv(poly))
start()
def call_compute_root():
"""Call a the computeRoot function.
Get poly input, initial guess, and epsilon (error amount) and send,
them all to compute_root function.
"""
print("Enter your polynomial coefficient list. (Format: 0 3 51 ...)")
poly_input = input(">>")
poly = [float(x) for x in poly_input.split()]
print(poly)
print("Enter your initial guess at a root for the previous polynomial.")
guess = input(">>")
print("Enter an epsilon or error amount value.")
epsilon = input(">>")
print(compute_root(poly, guess, epsilon))
start()
def evaluate_poly(poly, x):
"""Compute the polynomial function for a given value x.
Returns that value.
Example:
>>> poly = (0.0, 0.0, 5.0, 9.3, 7.0) # f(x) = 7x^4 + 9.3x^3 + 5x^2
>>> x = -13
>>> printevaluate_poly(poly, x) # f(-13) = 7(-13)^4 + 9.3(-13)^3 +
5(-13)^2
180339.9
poly: tuple of numbers, length > 0
x: number
returns: float
"""
# polyLength = len(poly)
# print"You've entered a value which corresponds to the polynomial:"
# for i in range(len(poly)):
# print%fx^%d" % (poly[i],i)
x = float(x)
# print"Your x value was %f" % x
# print"Starting Calcs"
total = 0.0
for i in range(len(poly)):
total += (x ** i) * poly[i]
return total
def compute_deriv(poly):
"""Compute and returns the derivative of a polynomial function.
If the
derivative is 0, returns (0.0,).
Example:
>>> poly = (-13.39, 0.0, 17.5, 3.0, 1.0) # x^4 + 3x^3 + 17.5x^2 - 13.39
>>> printcompute_deriv(poly) # 4x^3 + 9x^2 + 35^x
(0.0, 35.0, 9.0, 4.0)
poly: tuple of numbers, length > 0
returns: tuple of numbers
"""
# print"You've entered a value which corresponds to the polynomial:"
# for i in range(len(poly)):
# print%fx^%d" % (poly[i],i)
# print"Starting Calcs"
poly2 = list(poly)
derivative = poly2
# printderivative
for i in range(len(poly2)):
derivative[i] = derivative[i] * (i)
derivative = derivative[1:len(derivative)]
return derivative
def compute_root(poly, guess, epsilon):
"""Function computes the root of a function.
Uses Newton's method to find and return a root of a polynomial function.
Returns a tuple containing the root and the number of iterations required
to get to the root.
Example:
>>> poly = (-13.39, 0.0, 17.5, 3.0, 1.0) #x^4 + 3x^3 + 17.5x^2 - 13.39
>>> guess = 0.1
>>> epsilon = .0001
>>> printcompute_root(poly, x_0, epsilon)
(0.80679075379635201, 8.0)
poly: tuple of numbers, length > 1.
Represents a polynomial function containing at least one real root.
The derivative of this polynomial function at x_0 is not 0.
guess: float
epsilon: float > 0
returns: tuple (float, int)
"""
# See if guess is close enough right away
deriv = compute_deriv(poly)
guess = float(guess)
epsilon = float(epsilon)
total = abs(evaluate_poly(poly, guess))
answer = [0.0, 0]
if total < epsilon:
print("We've found a root!")
answer[0] = guess
answer[1] = 1
return answer
else:
print("Inital Guess is Incorrect...Calculating")
new_guess = guess
num_guesses = 1 # define counter
# iterate our new guess until it gets smaller than epsilon
while abs(evaluate_poly(poly, new_guess)) > epsilon:
new_guess = new_guess - (evaluate_poly(poly, new_guess)) / \
(evaluate_poly(deriv, new_guess))
print("%f as a guess yields %f." % (new_guess, evaluate_poly(poly,
new_guess)))
num_guesses += 1
if num_guesses > 100: # don't let the while loop go forever
return "failed..."
answer[0] = new_guess
answer[1] = num_guesses
return answer
def start():
"""Call one of the three functions in this worksheet.
Get a user input and call the correct function. Re-call this
function after each of the choices runs.
*Includes an exit value.
"""
print("Enter 1 to call the polynomial function")
print("Enter 2 to call the derivative function")
print("Enter 3 to call the root function")
print("Enter 4 to exit")
choice = input(">>")
if choice == "1":
call_poly()
elif choice == "2":
call_deriv()
elif choice == "3":
call_compute_root()
else:
sys.exit("exiting...")
start()
# end
|
from clickatell import Transport
class Rest(Transport):
"""
Provides access to the Clickatell REST API
"""
def __init__(self, apiKey):
"""
Construct a new API instance with the auth key of the API
:param str apiKey: The auth key
"""
self.apiKey = apiKey
Transport.__init__(self)
def request(self, action, data={}, headers={}, method='GET'):
"""
Append the REST headers to every request
"""
headers = {
"Authorization": self.apiKey,
"Content-Type": "application/json",
"Accept": "application/json"
}
return Transport.request(self, action, data, headers, method)
def sendMessage(self, to, message, extra={}):
"""
If the 'to' parameter is a single entry, we will parse it into a list.
We will merge default values into the request data and the extra parameters
provided by the user.
"""
to = to if isinstance(to, list) else [to]
to = [str(num) for num in to]
data = {'to': to, 'content': message}
data = self.merge(data, extra)
content = self.parseResponse(self.request('messages', data, {}, 'POST'));
return content |
from compare import *
my_language = language("patterns/patterns_en.txt", "entities/entities_en.txt")
print(compare(input("> "), [["Hello", "Hi", "Hey"], "bot"], entities = my_language.entities))
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from array import array
import socket
import os.path
from base import *
class Board(object):
def __init__(self, history_file=None):
self.board = array("B", (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0))
self.history = array("B")
self.history_file = history_file
self.sock = None
self.conn = None
self.addr = None
self.nextColor = BLACK
self.winner = None
def get_height(self, x,y):
# return the bottom blank in (x,y)
for i in xrange(4):
if not (self.board[y*4+x] & (0b10 << 2*i)):
# index i (from the bottom) is first blank position
return i
raise ValueError("This tower is full!")
def remove_last_put(self, color):
if color == WHITE:
x = (self.history[-1] & 0b00001100) >> 2
y = (self.history[-1] & 0b00000011)
self.history[-1] &= 0b11110000
else:
x = (self.history[-1] & 0b11000000) >> 6
y = (self.history[-1] & 0b00110000) >> 4
self.history.pop(-1)
try:
i = self.get_height(x,y) - 1
except ValueError:
i = 3
assert i >= 0
self.board[y*4+x] &= ~(color << 2*i) #bit clear
return
def put(self, x,y,color):
# color = BLACK(0b10) or WHITE(0b11)
try:
i = self.get_height(x,y)
except ValueError:
raise ValueError("This tower is full!")
# put black or white one
self.board[y*4+x] |= (color << 2*i)
# suppose BLACK and WHITE put alternately (BLACK must be first!)
# each char(1byte) in self.history is filled with every two moves
if color == WHITE:# (took the second move)
self.history[-1] += ((x<<2)+y)
else: #color == BLACK(took the first move)
self.history.append( ((x<<2)+y) << 4)
return
def get(self, x,y,z):
# z is index from the bottom
if not self.board[y*4+x] & (0b11 << 2*z):
return BLANK
elif (self.board[y*4+x] & (0b11 << 2*z))>>2*z == BLACK:
return BLACK
elif (self.board[y*4+x] & (0b11 << 2*z))>>2*z == WHITE:
return WHITE
def user_put(self, pos,color):
# eg. pos = "A1" -> y=0,x=0
if pos[0] == "A":
y = 0
elif pos[0] == "B":
y = 1
elif pos[0] == "C":
y = 2
elif pos[0] == "D":
y = 3
else:
raise ValueError("Invalid board position!")
if pos[1] == "1":
x = 0
elif pos[1] == "2":
x = 1
elif pos[1] == "3":
x = 2
elif pos[1] == "4":
x = 3
else:
raise ValueError("Invalid board position!")
return self.put(x,y,color)
def user_get(self, pos):
# eg. pos = "A1" -> y=0,x=0
if pos[0] == "A":
y = 0
elif pos[0] == "B":
y = 1
elif pos[0] == "C":
y = 2
elif pos[0] == "D":
y = 3
else:
raise ValueError("Invalid board position!")
if pos[1] == "1":
x = 0
elif pos[1] == "2":
x = 1
elif pos[1] == "3":
x = 2
elif pos[1] == "4":
x = 3
else:
raise ValueError("Invalid board position!")
return self.get(x,y)
def dump_history(self):
if not self.history_file: return
if os.path.getsize(self.history_file) > 1000000000:
# if history file is over 1GB, drop the current data
return
else:
# three bytes of zero in a row means the end of the game
self.history.append(0)
self.history.append(0)
self.history.append(0)
f = open(self.history_file, "ab")
self.history.tofile(f)
f.close()
return
def is_finished(self):
# call this method every time the board is changed in order not to miss the finish
r = [self._is_finished_x(),
self._is_finished_y(),
self._is_finished_z(),
self._is_finished_xy(),
self._is_finished_yz(),
self._is_finished_zx(),
self._is_finished_diag()]
if BLACK in r:
self.dump_history()
return BLACK
elif WHITE in r:
self.dump_history()
return WHITE
else:
return False
def _is_finished_x(self):
for y in xrange(4):
for z in xrange(4):
if is_same_non0([self.get(x,y,z) for x in xrange(4)]):
return self.get(0,y,z) # winner's color
return False
def _is_finished_y(self):
for z in xrange(4):
for x in xrange(4):
if is_same_non0([self.get(x,y,z) for y in xrange(4)]):
return self.get(x,0,z)
return False
def _is_finished_z(self):
for x in xrange(4):
for y in xrange(4):
if is_same_non0([self.get(x,y,z) for z in xrange(4)]):
return self.get(x,y,0)
return False
def _is_finished_xy(self):
for z in xrange(4):
if is_same_non0([self.get(x,y,z) for x,y in [(0,0), (1,1), (2,2), (3,3)]]):
return self.get(0,0,z)
if is_same_non0([self.get(x,y,z) for x,y in [(0,3), (1,2), (2,1), (3,0)]]):
return self.get(0,3,z)
return False
def _is_finished_yz(self):
for x in xrange(4):
if is_same_non0([self.get(x,y,z) for y,z in [(0,0), (1,1), (2,2), (3,3)]]):
return self.get(x,0,0)
if is_same_non0([self.get(x,y,z) for y,z in [(0,3), (1,2), (2,1), (3,0)]]):
return self.get(x,0,3)
return False
def _is_finished_zx(self):
for y in xrange(4):
if is_same_non0([self.get(x,y,z) for z,x in [(0,0), (1,1), (2,2), (3,3)]]):
return self.get(0,y,0)
if is_same_non0([self.get(x,y,z) for z,x in [(0,3), (1,2), (2,1), (3,0)]]):
return self.get(0,y,3)
return False
def _is_finished_diag(self):
if is_same_non0([self.get(x,y,z) for x,y,z in [(0,0,0), (1,1,1), (2,2,2), (3,3,3)]]):
return self.get(0,0,0)
if is_same_non0([self.get(x,y,z) for x,y,z in [(0,0,3), (1,1,2), (2,2,1), (3,3,0)]]):
return self.get(0,0,0)
if is_same_non0([self.get(x,y,z) for x,y,z in [(0,3,0), (1,2,1), (2,1,2), (3,0,3)]]):
return self.get(0,0,0)
if is_same_non0([self.get(x,y,z) for x,y,z in [(3,0,0), (2,1,1), (1,2,2), (0,3,3)]]):
return self.get(0,0,0)
return False
def is_lizhi(self):
r = [self._is_lizhi_x(),
self._is_lizhi_y(),
self._is_lizhi_z(),
self._is_lizhi_xy(),
self._is_lizhi_yz(),
self._is_lizhi_zx(),
self._is_lizhi_diag()]
for lizhis in r:
for color,pos in lizhis:
yield color,pos
def _is_lizhi_x(self):
for y in xrange(4):
for z in xrange(4):
color,x = has0_and_same([self.get(x,y,z) for x in xrange(4)])
if color:
yield (color, (x,y,z))
def _is_lizhi_y(self):
for z in xrange(4):
for x in xrange(4):
color,y = has0_and_same([self.get(x,y,z) for y in xrange(4)])
if color:
yield (color, (x,y,z))
def _is_lizhi_z(self):
for x in xrange(4):
for y in xrange(4):
color,z = has0_and_same([self.get(x,y,z) for z in xrange(4)])
if color:
yield (color, (x,y,z))
def _is_lizhi_xy(self):
for z in xrange(4):
color,i = has0_and_same([self.get(x,y,z) for x,y in ((0,0), (1,1), (2,2), (3,3))])
if color:
yield (color, ((0,0,z), (1,1,z), (2,2,z), (3,3,z))[i])
color,i = has0_and_same([self.get(x,y,z) for x,y in ((0,3), (1,2), (2,1), (3,0))])
if color:
yield (color, ((0,3,z), (1,2,z), (2,1,z), (3,0,z))[i])
def _is_lizhi_yz(self):
for x in xrange(4):
color,i = has0_and_same([self.get(x,y,z) for y,z in ((0,0), (1,1), (2,2), (3,3))])
if color:
yield (color, ((x,0,0), (x,1,1), (x,2,2), (x,3,3))[i])
color,i = has0_and_same([self.get(x,y,z) for y,z in ((0,3), (1,2), (2,1), (3,0))])
if color:
yield (color, ((x,0,3), (x,1,2), (x,2,1), (x,3,0))[i])
def _is_lizhi_zx(self):
for y in xrange(4):
color,i = has0_and_same([self.get(x,y,z) for z,x in ((0,0), (1,1), (2,2), (3,3))])
if color:
yield (color, ((0,y,0), (1,y,1), (2,y,2), (3,y,3))[i])
color,i = has0_and_same([self.get(x,y,z) for z,x in ((0,3), (1,2), (2,1), (3,0))])
if color:
yield (color, ((3,y,0), (2,y,1), (1,y,2), (0,y,3))[i])
def _is_lizhi_diag(self):
color,i = has0_and_same([self.get(x,y,z) for x,y,z in ((0,0,0), (1,1,1), (2,2,2), (3,3,3))])
if color:
yield (color, ((0,0,0), (1,1,1), (2,2,2), (3,3,3))[i])
color,i = has0_and_same([self.get(x,y,z) for x,y,z in ((0,0,3), (1,1,2), (2,2,1), (3,3,0))])
if color:
yield (color, ((0,0,3), (1,1,2), (2,2,1), (3,3,0))[i])
color,i = has0_and_same([self.get(x,y,z) for x,y,z in ((0,3,0), (1,2,1), (2,1,2), (3,0,3))])
if color:
yield (color, ((0,3,0), (1,2,1), (2,1,2), (3,0,3))[i])
color,i = has0_and_same([self.get(x,y,z) for x,y,z in ((3,0,0), (2,1,1), (1,2,2), (0,3,3))])
if color:
yield (color, ((3,0,0), (2,1,1), (1,2,2), (0,3,3))[i])
def get_number_of_lizhis(self, ):
return sum((sum(1 for _ in self._is_lizhi_x()),
sum(1 for _ in self._is_lizhi_y()),
sum(1 for _ in self._is_lizhi_z()),
sum(1 for _ in self._is_lizhi_xy()),
sum(1 for _ in self._is_lizhi_yz()),
sum(1 for _ in self._is_lizhi_zx()),
sum(1 for _ in self._is_lizhi_diag())))
def get_lizhis(self, x,y,z, color):
# return number of lizhis if mycolor is put on (x,y,z)
numBefore = self.get_number_of_lizhis()
self.put(x,y,color)
numAfter = self.get_number_of_lizhis()
self.remove_last_put(color)
return numAfter - numBefore
def get_clearlines(self, x,y,z, opponent_color):
count = 3
#x
if opponent_color in [self.get(x_,y,z) for x_ in xrange(4)]:
count -= 1
#y
if opponent_color in [self.get(x,y_,z) for y_ in xrange(4)]:
count -= 1
#z
if opponent_color in [self.get(x,y,z_) for z_ in xrange(4)]:
count -= 1
#xy
if (x,y) in ((0,0),(1,1),(2,2),(3,3)):
count += 1
if opponent_color in [self.get(x_,y_,z) for x_,y_ in ((0,0),(1,1),(2,2),(3,3))]:
count -= 1
elif (x,y) in ((0,3), (1,2), (2,1), (3,0)):
count += 1
if opponent_color in [self.get(x_,y_,z) for x_,y_ in ((0,3), (1,2), (2,1), (3,0))]:
count -= 1
#yz
if (y,z) in ((0,0),(1,1),(2,2),(3,3)):
count += 1
if opponent_color in [self.get(x,y_,z_) for y_,z_ in ((0,0),(1,1),(2,2),(3,3))]:
count -= 1
elif (y,z) in ((0,3), (1,2), (2,1), (3,0)):
count += 1
if opponent_color in [self.get(x,y_,z_) for y_,z_ in ((0,3), (1,2), (2,1), (3,0))]:
count -= 1
#zx
if (z,x) in ((0,0),(1,1),(2,2),(3,3)):
count += 1
if opponent_color in [self.get(x_,y,z_) for z_,x_ in ((0,0),(1,1),(2,2),(3,3))]:
count -= 1
elif (z,x) in ((0,3), (1,2), (2,1), (3,0)):
count += 1
if opponent_color in [self.get(x_,y,z_) for z_,x_ in ((0,3), (1,2), (2,1), (3,0))]:
count -= 1
#diag
if (x,y,z) in ((0,0,0), (1,1,1), (2,2,2), (3,3,3)):
count += 1
if opponent_color in [self.get(x_,y_,z_) for x_,y_,z_ in ((0,0,0), (1,1,1), (2,2,2), (3,3,3))]:
count -= 1
elif (x,y,z) in ((0,0,3), (1,1,2), (2,2,1), (3,3,0)):
count += 1
if opponent_color in [self.get(x_,y_,z_) for x_,y_,z_ in ((0,0,3), (1,1,2), (2,2,1), (3,3,0))]:
count -= 1
elif (x,y,z) in ((0,3,0), (1,2,1), (2,1,2), (3,0,3)):
count += 1
if opponent_color in [self.get(x_,y_,z_) for x_,y_,z_ in ((0,3,0), (1,2,1), (2,1,2), (3,0,3))]:
count -= 1
elif (x,y,z) in ((3,0,0), (2,1,1), (1,2,2), (0,3,3)):
count += 1
if opponent_color in [self.get(x_,y_,z_) for x_,y_,z_ in ((3,0,0), (2,1,1), (1,2,2), (0,3,3))]:
count -= 1
return count
def get_scene_dict(self):
""" BOARD:
1 2 3 4
%c %c %c %c
%c %c %c %c
%c %c %c %c
A: %c %c %c %c
%c %c %c %c
%c %c %c %c
%c %c %c %c
B: %c %c %c %c
%c %c %c %c
%c %c %c %c
%c %c %c %c
C: %c %c %c %c
%c %c %c %c
%c %c %c %c
%c %c %c %c
D: %c %c %c %c
"""
return [("A",[[["|","","B","W"][self.get(x,0,z)] for x in xrange(4)] for z in (3,2,1,0)]),
("B",[[["|","","B","W"][self.get(x,1,z)] for x in xrange(4)] for z in (3,2,1,0)]),
("C",[[["|","","B","W"][self.get(x,2,z)] for x in xrange(4)] for z in (3,2,1,0)]),
("D",[[["|","","B","W"][self.get(x,3,z)] for x in xrange(4)] for z in (3,2,1,0)]),
]
def get_scene_list(self):
return [
[
[["|","","B","W"][self.get(x,y,z)] for z in xrange(4)
] for y in xrange(4)
] for x in xrange(4)]
def show(self):
self.output(""" BOARD:
1 2 3 4
%c %c %c %c
%c %c %c %c
%c %c %c %c
A: %c %c %c %c
%c %c %c %c
%c %c %c %c
%c %c %c %c
B: %c %c %c %c
%c %c %c %c
%c %c %c %c
%c %c %c %c
C: %c %c %c %c
%c %c %c %c
%c %c %c %c
%c %c %c %c
D: %c %c %c %c
""" % tuple([["|","","B","W"][self.get(x,y,z)] for y in xrange(4) for z in (3,2,1,0) for x in xrange(4)]))
return
def output(self, mes, rt=True):
print mes
if self.sock and self.conn:
if rt:
self.conn.send(mes+"\n")
else:
self.conn.send(mes)
return
def get_input(self, mes, is_online):
if is_online and self.sock and self.conn:
self.output(mes, rt=False)
data = self.conn.recv(1024)
else:
data = raw_input(mes)
return data
def game_online(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((HOST, PORT))
self.sock.listen(1)
self.conn, self.addr = self.sock.accept()
print 'Connected by', self.addr
mode = self.get_input("mode:", is_online=True)
if mode == "machine":
self.machine_mode = True
print "Starting game...."
self.game()
self.conn.close()
return
def game(self):
self.output("")
self.output("<< Yonmoku3D >>")
self.show()
while True:
self.output("[ BLACK's turn ]")
while True:
pos = self.get_input("Where to put? >", is_online=False)
try:
self.user_put(pos,BLACK)
except ValueError,message:
self.output(message)
continue
else:
break
if self.is_finished():
self.show()
self.output("BLACK win!!")
return
self.show()
self.output("[WHITE's turn ]")
while True:
pos = self.get_input("Where to put? >", is_online=True)
try:
self.user_put(pos,WHITE)
except ValueError, message:
self.output(message)
continue
else:
break
if self.is_finished():
self.show()
self.output("WHITE win!!")
return
self.show()
if __name__ == "__main__":
b = Board()
b.game()
|
import os
from tensorflow import keras
def train_model(x_train, y_train, x_val, y_val, model, params, logger):
"""
Trains a TensorFlow model
Parameters
----------
x_train : Training data
y_train : Training labels
x_val : Validation data
y_val : Validation labels
model : TensorFlow model
params : Dictionary of parameters
logger : Logger
Returns
-------
"""
epochs = params['epochs']
model_file = params['model_file']
model_folder = params['model_folder']
callbacks = [
keras.callbacks.ModelCheckpoint(
model_file,
save_weights_only=True,
# save_best_only=False,
save_best_only=True,
mode='min'),
]
if not os.path.exists(model_folder):
os.makedirs(model_folder)
# Training model
model.fit(
x=x_train,
y=y_train,
batch_size=1, # 16,
epochs=epochs,
validation_data=(x_val, y_val),
callbacks=callbacks,
)
model.load_weights(model_file)
return model
|
# Reproduce results from Fig 11 of
# M. Bocquet and P. Sakov (2012): "Combining inflation-free and
# iterative ensemble Kalman filters for strongly nonlinear systems"
from mods.Lorenz63.sak12 import setup
# The only diff to sak12 is R: boc12 uses 1 and 8, sak12 uses 2 (and 8)
from common import *
setup.h.noise.C = CovMat(eye(3))
setup.name = os.path.relpath(__file__,'mods/')
####################
# Suggested tuning
####################
#from mods.Lorenz63.boc12 import setup ##################### Expected RMSE_a:
#cfgs += iEnKS('-N', N=3,infl=0.95) # 0.20
#
# With dkObs=5:
#cfgs += iEnKS('-N', N=3) # 0.15
#cfgs += iEnKS('-N', N=3,xN=1.4) # 0.14
#
# With R=8*eye(3):
#cfgs += iEnKS('-N', N=3) # 0.70
|
import os.path
import torch.utils.data
from .data_loader import get_transform_dataset
from ..transforms import augment_collate
class AddaDataLoader(object):
def __init__(self, net_transform, dataset, rootdir, downscale, crop_size=None, resize=None,
batch_size=1, shuffle=False, num_workers=2, half_crop=None, src_data_flag=None, small=False):
self.dataset = dataset
self.downscale = downscale
self.resize = resize
self.crop_size = crop_size
self.half_crop = half_crop
self.batch_size = batch_size
self.shuffle = shuffle
self.num_workers = num_workers
assert len(self.dataset) == 2, 'Requires two datasets: source, target'
sourcedir = os.path.join(rootdir, self.dataset[0])
targetdir = os.path.join(rootdir, self.dataset[1])
self.source = get_transform_dataset(self.dataset[0], sourcedir, net_transform, downscale, resize, src_data_flag=src_data_flag, small=small)
self.target = get_transform_dataset(self.dataset[1], targetdir, net_transform, downscale, resize, small=small)
print('Source length:', len(self.source), 'Target length:', len(self.target))
self.n = max(len(self.source), len(self.target)) # make sure you see all images
self.num = 0
self.set_loader_src()
self.set_loader_tgt()
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if self.num % len(self.iters_src) == 0:
print('restarting source dataset')
self.set_loader_src()
if self.num % len(self.iters_tgt) == 0:
print('restarting target dataset')
self.set_loader_tgt()
img_src, label_src = next(self.iters_src)
img_tgt, label_tgt = next(self.iters_tgt)
self.num += 1
return img_src, img_tgt, label_src, label_tgt
def __len__(self):
return min(len(self.source), len(self.target))
def set_loader_src(self):
batch_size = self.batch_size
shuffle = self.shuffle
num_workers = self.num_workers
if self.crop_size is not None or self.resize is not None:
collate_fn = lambda batch: augment_collate(batch, resize=self.resize, crop=self.crop_size,
halfcrop=self.half_crop, flip=True)
else:
collate_fn = torch.utils.data.dataloader.default_collate
self.loader_src = torch.utils.data.DataLoader(self.source,
batch_size=batch_size, shuffle=shuffle, num_workers=num_workers,
collate_fn=collate_fn, pin_memory=True)
self.iters_src = iter(self.loader_src)
def set_loader_tgt(self):
batch_size = self.batch_size
shuffle = self.shuffle
num_workers = self.num_workers
if self.crop_size is not None or self.resize is not None:
collate_fn = lambda batch: augment_collate(batch, resize=self.resize, crop=self.crop_size,
halfcrop=self.half_crop, flip=True)
else:
collate_fn = torch.utils.data.dataloader.default_collate
self.loader_tgt = torch.utils.data.DataLoader(self.target,
batch_size=batch_size, shuffle=shuffle, num_workers=num_workers,
collate_fn=collate_fn, pin_memory=True)
self.iters_tgt = iter(self.loader_tgt)
|
import cv2
import numpy as np
import matplotlib.pylab as plt
# src = cv2.imread("bottle.jpg")
# image = cv2.imread('bottle.jpg',cv2.IMREAD_COLOR)
# image = cv2.bilateralFilter(image,9,75,75)
# original = np.copy(image)
# if image is None:
# print ('Can not read/find the image.')
# exit(-1)
# hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# H,S,V = hsv_image[:,:,0], hsv_image[:,:,1], hsv_image[:,:,2]
# V = V * 2
# hsv_image = cv2.merge([H,S,V])
# image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2RGB)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# image = cv2.medianBlur(image,5)
# plt.figure(), plt.imshow(image)
# plt.show()
# Dx = cv2.Sobel(image,cv2.CV_8UC1,1,0)
# Dy = cv2.Sobel(image,cv2.CV_8UC1,0,1)
# M = cv2.addWeighted(Dx, 1, Dy,1,0)
# plt.subplot(1,3,1), plt.imshow(Dx, 'gray'), plt.title('Dx')
# plt.subplot(1,3,2), plt.imshow(Dy, 'gray'), plt.title('Dy')
# plt.subplot(1,3,3), plt.imshow(M, 'gray'), plt.title('Magnitude')
# plt.show()
# ret, binary = cv2.threshold(M,10,255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# plt.figure(), plt.imshow(binary, 'gray')
# plt.show()
# binary = binary.astype(np.uint8)
# binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20)))
# edges = cv2.Canny(binary, 50, 100)
# plt.figure(), plt.imshow(edges, 'gray')
# plt.show()
# lines = cv2.HoughLinesP(edges,1,3.14/180,50,20,10)[0]
# plt.show()
# output = np.zeros_like(M, dtype=np.uint8)
# for line in lines:
# cv2.line(output,(line[0],line[1]), (line[2], line[3]), (100,200,50), thickness=2)
# plt.figure(), plt.imshow(output, 'gray')
# points = np.array([np.transpose(np.where(output != 0))], dtype=np.float32)
# rect = cv2.boundingRect(points)
# cv2.rectangle(original,(rect[1],rect[0]), (rect[1]+rect[3], rect[0]+rect[2]),(255,255,255),thickness=2)
# original = cv2.cvtColor(original,cv2.COLOR_BGR2RGB)
# plt.figure(), plt.imshow(original,'gray')
from skimage.transform import rescale
image = cv2.imread('bottle.jpg',0)
image = cv2.resize(image,(512,512))
# image = cv2.bilateralFilter(image,5,35,35)
# hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# H,S,V = hsv_image[:,:,0], hsv_image[:,:,1], hsv_image[:,:,2]
# V = V * 2
cimg = cv2.imread('bottle.jpg',1)
cimg = cv2.resize(cimg,(512,512))
# hsv_image = cv2.merge([H,S,V])
# image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2RGB)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
img = image
# ve
img[cimg[:,:,0]<180]= 0
img = cv2.medianBlur(image,11)
# img = cv2.imread('bottle.jpg',0)
# img = cv2.medianBlur(img,5)
# cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
plt.imshow(img,cmap="gray",vmin=0,vmax=255),plt.show()
# circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,3,35,
# param1=20,param2=100,minRadius=5,maxRadius=55)
# circles = np.uint16(np.around(circles))
# for i in circles[0,:]:
# # draw the outer circle
# cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# # draw the center of the circle
# cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
# print(len(circles[0,:]))
# cv2.imshow('detected circles',cimg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# th, bw = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
img[img>0] = 255
bw = img
plt.imshow(img,cmap="gray"),plt.show()
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
# morph = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
# dist = cv2.distanceTransform(morph, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
# borderSize = 75
# distborder = cv2.copyMakeBorder(dist, borderSize, borderSize, borderSize, borderSize,
# cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED, 0)
# gap = 10
# kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2*(borderSize-gap)+1, 2*(borderSize-gap)+1))
# kernel2 = cv2.copyMakeBorder(kernel2, gap, gap, gap, gap,
# cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED, 0)
# distTempl = cv2.distanceTransform(kernel2, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
# nxcor = cv2.matchTemplate(distborder, distTempl, cv2.TM_CCOEFF_NORMED)
# mn, mx, _, _ = cv2.minMaxLoc(nxcor)
# th, peaks = cv2.threshold(nxcor, mx*0.5, 255, cv2.THRESH_BINARY)
im2, contours, hierarchy = cv2.findContours(bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(cimg, contours, -1, (0,255,0), 3)
# hull = cv2.convexHull(contours[3])
# for each contour
for cnt in contours:
# get convex hull
hull = cv2.convexHull(cnt)
# draw it in red color
cv2.drawContours(cimg, [hull], -1, (0, 0, 255), 3)
# peaks8u = cv2.convertScaleAbs(peaks) # to use as mask
# for i in range(len(contours)):
# x, y, w, h = cv2.boundingRect(contours[i])
# _, mx, _, mxloc = cv2.minMaxLoc(dist[y:y+h, x:x+w], peaks8u[y:y+h, x:x+w])
# cv2.circle(cimg, (int(mxloc[0]+x), int(mxloc[1]+y)), int(mx), (255, 0, 0), 2)
# cv2.rectangle(cimg, (x, y), (x+w, y+h), (0, 255, 255), 2)
# cv2.drawContours(cimg, contours, i, (0, 0, 255), 2)
cv2.imshow('circles', cimg)
cv2.waitKey(0)
cv2.destroyAllWindows() |
# -*- extra stuff goes here -*-
from zope.i18nmessageid import MessageFactory
fileMessageFactory = MessageFactory('collective.geo.file')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
|
import os, sys, re, subprocess
from typing import List
def get_pycore_dir() -> str:
dirs = os.listdir("/tmp")
pycore_dir = None
for dir in dirs:
## Example: pycore.32777
if dir[:7] == "pycore.":
pycore_dir = dir
break
if pycore_dir is None:
print("pycore directory not found: Have you started running CORE?")
quit()
return pycore_dir
def print_available_nodes(nodes: List[str]) -> None:
print("Available nodes:", end="")
for node in nodes:
print(node, end=", ")
print()
if __name__ == "__main__":
pycore_dir: str = get_pycore_dir()
if len(sys.argv) < 3:
print("Wrong number of arguments")
quit()
try:
node_dir = "/tmp/{}/{}".format(pycore_dir, sys.argv[1])
cmd = ["vcmd", "-c", node_dir, "--"]
res = subprocess.run(cmd + sys.argv[2:], stderr=subprocess.STDOUT)
print(res.stdout.decode("utf-8"))
except Exception as e:
pass
|
import logging
import random
import re
import warnings
from concurrent.futures import ProcessPoolExecutor
from contextlib import contextmanager
from dataclasses import dataclass
from decimal import ROUND_HALF_UP
from functools import lru_cache, partial
from io import BytesIO
from itertools import islice
from math import ceil, sqrt
from pathlib import Path
from subprocess import PIPE, run
from typing import Any, Callable, Dict, Iterable, List, Mapping, NamedTuple, Optional, Sequence, Tuple, Union
import numpy as np
from tqdm.auto import tqdm
from lhotse.augmentation import AudioTransform, Resample, Speed, Tempo, Volume
from lhotse.serialization import Serializable
from lhotse.utils import (Decibels, NonPositiveEnergyError, Pathlike, Seconds, SetContainingAnything, SmartOpen,
asdict_nonull, compute_num_samples, exactly_one_not_null, fastcopy, ifnone,
index_by_id_and_check, perturb_num_samples, split_sequence)
Channels = Union[int, List[int]]
# TODO: document the dataclasses like this:
# https://stackoverflow.com/a/3051356/5285891
@dataclass
class AudioSource:
"""
AudioSource represents audio data that can be retrieved from somewhere.
Supported sources of audio are currently:
- 'file' (formats supported by soundfile, possibly multi-channel)
- 'command' [unix pipe] (must be WAVE, possibly multi-channel)
- 'url' (any URL type that is supported by "smart_open" library, e.g. http/https/s3/gcp/azure/etc.)
"""
type: str
channels: List[int]
source: str
def load_audio(
self,
offset: Seconds = 0.0,
duration: Optional[Seconds] = None,
force_opus_sampling_rate: Optional[int] = None,
) -> np.ndarray:
"""
Load the AudioSource (from files, commands, or URLs) with soundfile,
accounting for many audio formats and multi-channel inputs.
Returns numpy array with shapes: (n_samples,) for single-channel,
(n_channels, n_samples) for multi-channel.
Note: The elements in the returned array are in the range [-1.0, 1.0]
and are of dtype `np.floatt32`.
:param force_opus_sampling_rate: This parameter is only used when we detect an OPUS file.
It will tell ffmpeg to resample OPUS to this sampling rate.
"""
assert self.type in ('file', 'command', 'url')
# TODO: refactor when another source type is added
source = self.source
if self.type == 'command':
if offset != 0.0 or duration is not None:
# TODO(pzelasko): How should we support chunking for commands?
# We risk being very inefficient when reading many chunks from the same file
# without some caching scheme, because we'll be re-running commands.
warnings.warn('You requested a subset of a recording that is read from disk via a bash command. '
'Expect large I/O overhead if you are going to read many chunks like these, '
'since every time we will read the whole file rather than its subset.')
source = BytesIO(run(self.source, shell=True, stdout=PIPE).stdout)
samples, sampling_rate = read_audio(source, offset=offset, duration=duration)
elif self.type == 'url':
if offset != 0.0 or duration is not None:
# TODO(pzelasko): How should we support chunking for URLs?
# We risk being very inefficient when reading many chunks from the same file
# without some caching scheme, because we'll be re-running commands.
warnings.warn('You requested a subset of a recording that is read from URL. '
'Expect large I/O overhead if you are going to read many chunks like these, '
'since every time we will download the whole file rather than its subset.')
with SmartOpen.open(self.source, 'rb') as f:
source = BytesIO(f.read())
samples, sampling_rate = read_audio(source, offset=offset, duration=duration)
else: # self.type == 'file'
samples, sampling_rate = read_audio(
source,
offset=offset,
duration=duration,
force_opus_sampling_rate=force_opus_sampling_rate,
)
# explicit sanity check for duration as soundfile does not complain here
if duration is not None:
num_samples = samples.shape[0] if len(samples.shape) == 1 else samples.shape[1]
available_duration = num_samples / sampling_rate
if available_duration < duration - 1e-3: # set the allowance as 1ms to avoid float error
raise ValueError(
f'Requested more audio ({duration}s) than available ({available_duration}s)'
)
return samples.astype(np.float32)
def with_path_prefix(self, path: Pathlike) -> 'AudioSource':
if self.type != 'file':
return self
return fastcopy(self, source=str(Path(path) / self.source))
def to_dict(self) -> dict:
return asdict_nonull(self)
@staticmethod
def from_dict(data) -> 'AudioSource':
return AudioSource(**data)
@dataclass
class Recording:
"""
The :class:`~lhotse.audio.Recording` manifest describes the recordings in a given corpus.
It contains information about the recording, such as its path(s), duration, the number of samples, etc.
It allows to represent multiple channels coming from one or more files.
This manifest does not specify any segmentation information or supervision such as the transcript or the speaker
-- we use :class:`~lhotse.supervision.SupervisionSegment` for that.
Note that :class:`~lhotse.audio.Recording` can represent both a single utterance (e.g., in LibriSpeech)
and a 1-hour session with multiple channels and speakers (e.g., in AMI).
In the latter case, it is partitioned into data suitable for model training using :class:`~lhotse.cut.Cut`.
.. hint::
Lhotse reads audio recordings using `pysoundfile`_ and `audioread`_, similarly to librosa,
to support multiple audio formats. For OPUS files we require ffmpeg to be installed.
.. hint::
Since we support importing Kaldi data dirs, if ``wav.scp`` contains unix pipes,
:class:`~lhotse.audio.Recording` will also handle them correctly.
Examples
A :class:`~lhotse.audio.Recording` can be simply created from a local audio file::
>>> from lhotse import RecordingSet, Recording, AudioSource
>>> recording = Recording.from_file('meeting.wav')
>>> recording
Recording(
id='meeting',
sources=[AudioSource(type='file', channels=[0], source='meeting.wav')],
sampling_rate=16000,
num_samples=57600000,
duration=3600.0,
transforms=None
)
This manifest can be easily converted to a Python dict and serialized to JSON/JSONL/YAML/etc::
>>> recording.to_dict()
{'id': 'meeting',
'sources': [{'type': 'file',
'channels': [0],
'source': 'meeting.wav'}],
'sampling_rate': 16000,
'num_samples': 57600000,
'duration': 3600.0}
Recordings can be also created programatically, e.g. when they refer to URLs stored in S3 or somewhere else::
>>> s3_audio_files = ['s3://my-bucket/123-5678.flac', ...]
>>> recs = RecordingSet.from_recordings(
... Recording(
... id=url.split('/')[-1].replace('.flac', ''),
... sources=[AudioSource(type='url', source=url, channels=[0])],
... sampling_rate=16000,
... num_samples=get_num_samples(url),
... duration=get_duration(url)
... )
... for url in s3_audio_files
... )
It allows reading a subset of the audio samples as a numpy array::
>>> samples = recording.load_audio()
>>> assert samples.shape == (1, 16000)
>>> samples2 = recording.load_audio(offset=0.5)
>>> assert samples2.shape == (1, 8000)
"""
id: str
sources: List[AudioSource]
sampling_rate: int
num_samples: int
duration: Seconds
transforms: Optional[List[Dict]] = None
@staticmethod
def from_file(
path: Pathlike,
recording_id: Optional[str] = None,
relative_path_depth: Optional[int] = None,
force_opus_sampling_rate: Optional[int] = None,
) -> 'Recording':
"""
Read an audio file's header and create the corresponding ``Recording``.
Suitable to use when each physical file represents a separate recording session.
.. caution::
If a recording session consists of multiple files (e.g. one per channel),
it is advisable to create the ``Recording`` object manually, with each
file represented as a separate ``AudioSource`` object.
:param path: Path to an audio file supported by libsoundfile (pysoundfile).
:param recording_id: recording id, when not specified ream the filename's stem ("x.wav" -> "x").
:param relative_path_depth: optional int specifying how many last parts of the file path
should be retained in the ``AudioSource``. By default writes the path as is.
:param force_opus_sampling_rate: when specified, this value will be used as the sampling rate
instead of the one we read from the manifest. This is useful for OPUS files that always
have 48kHz rate and need to be resampled to the real one -- we will perform that operation
"under-the-hood". For non-OPUS files this input is undefined.
:return: a new ``Recording`` instance pointing to the audio file.
"""
path = Path(path)
if path.suffix.lower() == '.opus':
# We handle OPUS as a special case because we might need to force a certain sampling rate.
info = opus_info(path, force_opus_sampling_rate=force_opus_sampling_rate)
elif path.suffix.lower() == '.sph':
# We handle SPHERE as another special case because some old codecs (i.e. "shorten" codec)
# can't be handled by neither pysoundfile nor pyaudioread.
info = sph_info(path)
else:
try:
# Try to parse the file using pysoundfile first.
import soundfile as sf
info = sf.info(str(path))
except:
# Try to parse the file using audioread as a fallback.
info = audioread_info(str(path))
# If both fail, then Python 3 will display both exception messages.
return Recording(
id=recording_id if recording_id is not None else path.stem,
sampling_rate=info.samplerate,
num_samples=info.frames,
duration=info.duration,
sources=[
AudioSource(
type='file',
channels=list(range(info.channels)),
source=(
'/'.join(path.parts[-relative_path_depth:])
if relative_path_depth is not None and relative_path_depth > 0
else str(path)
)
)
]
)
def to_dict(self) -> dict:
return asdict_nonull(self)
@property
def num_channels(self):
return sum(len(source.channels) for source in self.sources)
@property
def channel_ids(self):
return sorted(cid for source in self.sources for cid in source.channels)
def load_audio(
self,
channels: Optional[Channels] = None,
offset: Seconds = 0.0,
duration: Optional[Seconds] = None,
) -> np.ndarray:
"""
Read the audio samples from the underlying audio source (path, URL, unix pipe/command).
:param channels: int or iterable of ints, a subset of channel IDs to read (reads all by default).
:param offset: seconds, where to start reading the audio (at offset 0 by default).
Note that it is only efficient for local filesystem files, i.e. URLs and commands will read
all the samples first and discard the unneeded ones afterwards.
:param duration: seconds, indicates the total audio time to read (starting from ``offset``).
:return: a numpy array of audio samples with shape ``(num_channels, num_samples)``.
"""
if channels is None:
channels = SetContainingAnything()
else:
channels = frozenset([channels] if isinstance(channels, int) else channels)
recording_channels = frozenset(self.channel_ids)
assert channels.issubset(recording_channels), "Requested to load audio from a channel " \
"that does not exist in the recording: " \
f"(recording channels: {recording_channels} -- " \
f"requested channels: {channels})"
transforms = [AudioTransform.from_dict(params) for params in self.transforms or []]
# Do a "backward pass" over data augmentation transforms to get the
# offset and duration for loading a piece of the original audio.
offset_aug, duration_aug = offset, duration
for tfn in reversed(transforms):
offset_aug, duration_aug = tfn.reverse_timestamps(
offset=offset_aug,
duration=duration_aug,
sampling_rate=self.sampling_rate
)
samples_per_source = []
for source in self.sources:
# Case: source not requested
if not channels.intersection(source.channels):
continue
samples = source.load_audio(
offset=offset_aug,
duration=duration_aug,
force_opus_sampling_rate=self.sampling_rate,
)
# Case: two-channel audio file but only one channel requested
# it might not be optimal to load all channels, but IDK if there's anything we can do about it
channels_to_remove = [
idx for idx, cid in enumerate(source.channels)
if cid not in channels
]
if channels_to_remove:
samples = np.delete(samples, channels_to_remove, axis=0)
samples_per_source.append(samples)
# shape: (n_channels, n_samples)
audio = np.vstack(samples_per_source)
# We'll apply the transforms now (if any).
for tfn in transforms:
audio = tfn(audio, self.sampling_rate)
# Transformation chains can introduce small mismatches in the number of samples:
# we'll fix them here, or raise an error if they exceeded a tolerance threshold.
audio = assert_and_maybe_fix_num_samples(
audio,
offset=offset,
duration=duration,
recording=self
)
return audio
def _expected_num_samples(self, offset: Seconds, duration: Optional[Seconds]) -> int:
if offset == 0 and duration is None:
return self.num_samples
duration = duration if duration is not None else self.duration - offset
return compute_num_samples(duration, sampling_rate=self.sampling_rate)
def with_path_prefix(self, path: Pathlike) -> 'Recording':
return fastcopy(self, sources=[s.with_path_prefix(path) for s in self.sources])
def perturb_speed(self, factor: float, affix_id: bool = True) -> 'Recording':
"""
Return a new ``Recording`` that will lazily perturb the speed while loading audio.
The ``num_samples`` and ``duration`` fields are updated to reflect the
shrinking/extending effect of speed.
:param factor: The speed will be adjusted this many times (e.g. factor=1.1 means 1.1x faster).
:param affix_id: When true, we will modify the ``Recording.id`` field
by affixing it with "_sp{factor}".
:return: a modified copy of the current ``Recording``.
"""
transforms = self.transforms.copy() if self.transforms is not None else []
transforms.append(Speed(factor=factor).to_dict())
new_num_samples = perturb_num_samples(self.num_samples, factor)
new_duration = new_num_samples / self.sampling_rate
return fastcopy(
self,
id=f'{self.id}_sp{factor}' if affix_id else self.id,
num_samples=new_num_samples,
duration=new_duration,
transforms=transforms
)
def perturb_tempo(self, factor: float, affix_id: bool = True) -> 'Recording':
"""
Return a new ``Recording`` that will lazily perturb the tempo while loading audio.
Compared to speed perturbation, tempo preserves pitch.
The ``num_samples`` and ``duration`` fields are updated to reflect the
shrinking/extending effect of tempo.
:param factor: The tempo will be adjusted this many times (e.g. factor=1.1 means 1.1x faster).
:param affix_id: When true, we will modify the ``Recording.id`` field
by affixing it with "_tp{factor}".
:return: a modified copy of the current ``Recording``.
"""
transforms = self.transforms.copy() if self.transforms is not None else []
transforms.append(Tempo(factor=factor).to_dict())
new_num_samples = perturb_num_samples(self.num_samples, factor)
new_duration = new_num_samples / self.sampling_rate
return fastcopy(
self,
id=f'{self.id}_tp{factor}' if affix_id else self.id,
num_samples=new_num_samples,
duration=new_duration,
transforms=transforms
)
def perturb_volume(self, factor: float, affix_id: bool = True) -> 'Recording':
"""
Return a new ``Recording`` that will lazily perturb the volume while loading audio.
:param factor: The volume scale to be applied (e.g. factor=1.1 means 1.1x louder).
:param affix_id: When true, we will modify the ``Recording.id`` field
by affixing it with "_tp{factor}".
:return: a modified copy of the current ``Recording``.
"""
transforms = self.transforms.copy() if self.transforms is not None else []
transforms.append(Volume(factor=factor).to_dict())
return fastcopy(
self,
id=f'{self.id}_vp{factor}' if affix_id else self.id,
transforms=transforms
)
def resample(self, sampling_rate: int) -> 'Recording':
"""
Return a new ``Recording`` that will be lazily resampled while loading audio.
:param sampling_rate: The new sampling rate.
:return: A resampled ``Recording``.
"""
transforms = self.transforms.copy() if self.transforms is not None else []
transforms.append(
Resample(source_sampling_rate=self.sampling_rate, target_sampling_rate=sampling_rate).to_dict()
)
new_num_samples = compute_num_samples(self.duration, sampling_rate, rounding=ROUND_HALF_UP)
# Duration might need an adjustment when doing a non-trivial resampling
# (e.g. 16000 -> 22050), where the resulting number of samples cannot
# correspond to old duration exactly.
new_duration = new_num_samples / sampling_rate
return fastcopy(
self,
duration=new_duration,
num_samples=new_num_samples,
sampling_rate=sampling_rate,
transforms=transforms
)
@staticmethod
def from_dict(data: dict) -> 'Recording':
raw_sources = data.pop('sources')
return Recording(sources=[AudioSource.from_dict(s) for s in raw_sources], **data)
class RecordingSet(Serializable, Sequence[Recording]):
"""
:class:`~lhotse.audio.RecordingSet` represents a collection of recordings, indexed by recording IDs.
It does not contain any annotation such as the transcript or the speaker identity --
just the information needed to retrieve a recording such as its path, URL, number of channels,
and some recording metadata (duration, number of samples).
It also supports (de)serialization to/from YAML/JSON/etc. and takes care of mapping between
rich Python classes and YAML/JSON/etc. primitives during conversion.
When coming from Kaldi, think of it as ``wav.scp`` on steroids: :class:`~lhotse.audio.RecordingSet`
also has the information from *reco2dur* and *reco2num_samples*,
is able to represent multi-channel recordings and read a specified subset of channels,
and support reading audio files directly, via a unix pipe, or downloading them on-the-fly from a URL
(HTTPS/S3/Azure/GCP/etc.).
Examples:
:class:`~lhotse.audio.RecordingSet` can be created from an iterable of :class:`~lhotse.audio.Recording` objects::
>>> from lhotse import RecordingSet
>>> audio_paths = ['123-5678.wav', ...]
>>> recs = RecordingSet.from_recordings(Recording.from_file(p) for p in audio_paths)
As well as from a directory, which will be scanned recursively for files with parallel processing::
>>> recs2 = RecordingSet.from_dir('/data/audio', pattern='*.flac', num_jobs=4)
It behaves similarly to a ``dict``::
>>> '123-5678' in recs
True
>>> recording = recs['123-5678']
>>> for recording in recs:
>>> pass
>>> len(recs)
127
It also provides some utilities for I/O::
>>> recs.to_file('recordings.jsonl')
>>> recs.to_file('recordings.json.gz') # auto-compression
>>> recs2 = RecordingSet.from_file('recordings.jsonl')
Manipulation::
>>> longer_than_5s = recs.filter(lambda r: r.duration > 5)
>>> first_100 = recs.subset(first=100)
>>> split_into_4 = recs.split(num_splits=4)
>>> shuffled = recs.shuffle()
And lazy data augmentation/transformation, that requires to adjust some information
in the manifest (e.g., ``num_samples`` or ``duration``).
Note that in the following examples, the audio is untouched -- the operations are stored in the manifest,
and executed upon reading the audio::
>>> recs_sp = recs.perturb_speed(factor=1.1)
>>> recs_vp = recs.perturb_volume(factor=2.)
>>> recs_24k = recs.resample(24000)
"""
def __init__(self, recordings: Mapping[str, Recording] = None) -> None:
self.recordings = ifnone(recordings, {})
def __eq__(self, other: 'RecordingSet') -> bool:
return self.recordings == other.recordings
@property
def is_lazy(self) -> bool:
"""
Indicates whether this manifest was opened in lazy (read-on-the-fly) mode or not.
"""
from lhotse.serialization import LazyJsonlIterator
return isinstance(self.recordings, LazyJsonlIterator)
@property
def ids(self) -> Iterable[str]:
return self.recordings.keys()
@staticmethod
def from_recordings(recordings: Iterable[Recording]) -> 'RecordingSet':
return RecordingSet(recordings=index_by_id_and_check(recordings))
@staticmethod
def from_dir(
path: Pathlike,
pattern: str,
num_jobs: int = 1,
force_opus_sampling_rate: Optional[int] = None,
):
"""
Recursively scan a directory ``path`` for audio files that match the given ``pattern`` and create
a :class:`.RecordingSet` manifest for them.
Suitable to use when each physical file represents a separate recording session.
.. caution::
If a recording session consists of multiple files (e.g. one per channel),
it is advisable to create each :class:`.Recording` object manually, with each
file represented as a separate :class:`.AudioSource` object, and then
a :class:`RecordingSet` that contains all the recordings.
:param path: Path to a directory of audio of files (possibly with sub-directories).
:param pattern: A bash-like pattern specifying allowed filenames, e.g. ``*.wav`` or ``session1-*.flac``.
:param num_jobs: The number of parallel workers for reading audio files to get their metadata.
:param force_opus_sampling_rate: when specified, this value will be used as the sampling rate
instead of the one we read from the manifest. This is useful for OPUS files that always
have 48kHz rate and need to be resampled to the real one -- we will perform that operation
"under-the-hood". For non-OPUS files this input does nothing.
:return: a new ``Recording`` instance pointing to the audio file.
"""
msg = f'Scanning audio files ({pattern})'
fn = Recording.from_file
if force_opus_sampling_rate is not None:
fn = partial(Recording.from_file, force_opus_sampling_rate=force_opus_sampling_rate)
if num_jobs == 1:
# Avoid spawning process for one job.
return RecordingSet.from_recordings(
tqdm(
map(fn, Path(path).rglob(pattern)),
desc=msg
)
)
with ProcessPoolExecutor(num_jobs) as ex:
return RecordingSet.from_recordings(
tqdm(
ex.map(fn, Path(path).rglob(pattern)),
desc=msg
)
)
@staticmethod
def from_dicts(data: Iterable[dict]) -> 'RecordingSet':
return RecordingSet.from_recordings(Recording.from_dict(raw_rec) for raw_rec in data)
def to_dicts(self) -> Iterable[dict]:
return (r.to_dict() for r in self)
def filter(self, predicate: Callable[[Recording], bool]) -> 'RecordingSet':
"""
Return a new RecordingSet with the Recordings that satisfy the `predicate`.
:param predicate: a function that takes a recording as an argument and returns bool.
:return: a filtered RecordingSet.
"""
return RecordingSet.from_recordings(rec for rec in self if predicate(rec))
def shuffle(self, rng: Optional[random.Random] = None) -> 'RecordingSet':
"""
Shuffle the recording IDs in the current :class:`.RecordingSet` and return a shuffled copy of self.
:param rng: an optional instance of ``random.Random`` for precise control of randomness.
:return: a shuffled copy of self.
"""
if rng is None:
rng = random
ids = list(self.ids)
rng.shuffle(ids)
return RecordingSet(recordings={rid: self[rid] for rid in ids})
def split(self, num_splits: int, shuffle: bool = False, drop_last: bool = False) -> List['RecordingSet']:
"""
Split the :class:`~lhotse.RecordingSet` into ``num_splits`` pieces of equal size.
:param num_splits: Requested number of splits.
:param shuffle: Optionally shuffle the recordings order first.
:param drop_last: determines how to handle splitting when ``len(seq)`` is not divisible
by ``num_splits``. When ``False`` (default), the splits might have unequal lengths.
When ``True``, it may discard the last element in some splits to ensure they are
equally long.
:return: A list of :class:`~lhotse.RecordingSet` pieces.
"""
return [
RecordingSet.from_recordings(subset) for subset in
split_sequence(self, num_splits=num_splits, shuffle=shuffle, drop_last=drop_last)
]
def subset(self, first: Optional[int] = None, last: Optional[int] = None) -> 'RecordingSet':
"""
Return a new ``RecordingSet`` according to the selected subset criterion.
Only a single argument to ``subset`` is supported at this time.
:param first: int, the number of first recordings to keep.
:param last: int, the number of last recordings to keep.
:return: a new ``RecordingSet`` with the subset results.
"""
assert exactly_one_not_null(first, last), "subset() can handle only one non-None arg."
if first is not None:
assert first > 0
if first > len(self):
logging.warning(f'RecordingSet has only {len(self)} items but first {first} required; '
f'not doing anything.')
return self
return RecordingSet.from_recordings(islice(self, first))
if last is not None:
assert last > 0
if last > len(self):
logging.warning(f'RecordingSet has only {len(self)} items but last {last} required; '
f'not doing anything.')
return self
return RecordingSet.from_recordings(islice(self, len(self) - last, len(self)))
def load_audio(
self,
recording_id: str,
channels: Optional[Channels] = None,
offset_seconds: float = 0.0,
duration_seconds: Optional[float] = None,
) -> np.ndarray:
return self.recordings[recording_id].load_audio(
channels=channels,
offset=offset_seconds,
duration=duration_seconds
)
def with_path_prefix(self, path: Pathlike) -> 'RecordingSet':
return RecordingSet.from_recordings(r.with_path_prefix(path) for r in self)
def num_channels(self, recording_id: str) -> int:
return self.recordings[recording_id].num_channels
def sampling_rate(self, recording_id: str) -> int:
return self.recordings[recording_id].sampling_rate
def num_samples(self, recording_id: str) -> int:
return self.recordings[recording_id].num_samples
def duration(self, recording_id: str) -> Seconds:
return self.recordings[recording_id].duration
def perturb_speed(self, factor: float, affix_id: bool = True) -> 'RecordingSet':
"""
Return a new ``RecordingSet`` that will lazily perturb the speed while loading audio.
The ``num_samples`` and ``duration`` fields are updated to reflect the
shrinking/extending effect of speed.
:param factor: The speed will be adjusted this many times (e.g. factor=1.1 means 1.1x faster).
:param affix_id: When true, we will modify the ``Recording.id`` field
by affixing it with "_sp{factor}".
:return: a ``RecordingSet`` containing the perturbed ``Recording`` objects.
"""
return RecordingSet.from_recordings(r.perturb_speed(factor=factor, affix_id=affix_id) for r in self)
def perturb_tempo(self, factor: float, affix_id: bool = True) -> 'RecordingSet':
"""
Return a new ``RecordingSet`` that will lazily perturb the tempo while loading audio.
The ``num_samples`` and ``duration`` fields are updated to reflect the
shrinking/extending effect of tempo.
:param factor: The speed will be adjusted this many times (e.g. factor=1.1 means 1.1x faster).
:param affix_id: When true, we will modify the ``Recording.id`` field
by affixing it with "_sp{factor}".
:return: a ``RecordingSet`` containing the perturbed ``Recording`` objects.
"""
return RecordingSet.from_recordings(r.perturb_tempo(factor=factor, affix_id=affix_id) for r in self)
def perturb_volume(self, factor: float, affix_id: bool = True) -> 'RecordingSet':
"""
Return a new ``RecordingSet`` that will lazily perturb the volume while loading audio.
:param factor: The volume scale to be applied (e.g. factor=1.1 means 1.1x louder).
:param affix_id: When true, we will modify the ``Recording.id`` field
by affixing it with "_sp{factor}".
:return: a ``RecordingSet`` containing the perturbed ``Recording`` objects.
"""
return RecordingSet.from_recordings(r.perturb_volume(factor=factor, affix_id=affix_id) for r in self)
def resample(self, sampling_rate: int) -> 'RecordingSet':
"""
Apply resampling to all recordings in the ``RecordingSet`` and return a new ``RecordingSet``.
:param sampling_rate: The new sampling rate.
:return: a new ``RecordingSet`` with lazily resampled ``Recording`` objects.
"""
return RecordingSet.from_recordings(r.resample(sampling_rate) for r in self)
def __repr__(self) -> str:
return f'RecordingSet(len={len(self)})'
def __contains__(self, item: Union[str, Recording]) -> bool:
if isinstance(item, str):
return item in self.recordings
else:
return item.id in self.recordings
def __getitem__(self, recording_id_or_index: Union[int, str]) -> Recording:
if isinstance(recording_id_or_index, str):
return self.recordings[recording_id_or_index]
# ~100x faster than list(dict.values())[index] for 100k elements
return next(val for idx, val in enumerate(self.recordings.values()) if idx == recording_id_or_index)
def __iter__(self) -> Iterable[Recording]:
return iter(self.recordings.values())
def __len__(self) -> int:
return len(self.recordings)
def __add__(self, other: 'RecordingSet') -> 'RecordingSet':
return RecordingSet(recordings={**self.recordings, **other.recordings})
class AudioMixer:
"""
Utility class to mix multiple waveforms into a single one.
It should be instantiated separately for each mixing session (i.e. each ``MixedCut``
will create a separate ``AudioMixer`` to mix its tracks).
It is initialized with a numpy array of audio samples (typically float32 in [-1, 1] range)
that represents the "reference" signal for the mix.
Other signals can be mixed to it with different time offsets and SNRs using the
``add_to_mix`` method.
The time offset is relative to the start of the reference signal
(only positive values are supported).
The SNR is relative to the energy of the signal used to initialize the ``AudioMixer``.
"""
def __init__(self, base_audio: np.ndarray, sampling_rate: int):
"""
:param base_audio: A numpy array with the audio samples for the base signal
(all the other signals will be mixed to it).
:param sampling_rate: Sampling rate of the audio.
"""
self.tracks = [base_audio]
self.sampling_rate = sampling_rate
self.reference_energy = audio_energy(base_audio)
self.dtype = self.tracks[0].dtype
@property
def unmixed_audio(self) -> np.ndarray:
"""
Return a numpy ndarray with the shape (num_tracks, num_samples), where each track is
zero padded and scaled adequately to the offsets and SNR used in ``add_to_mix`` call.
"""
return np.vstack(self.tracks)
@property
def mixed_audio(self) -> np.ndarray:
"""
Return a numpy ndarray with the shape (1, num_samples) - a mono mix of the tracks
supplied with ``add_to_mix`` calls.
"""
return np.sum(self.unmixed_audio, axis=0, keepdims=True)
def add_to_mix(
self,
audio: np.ndarray,
snr: Optional[Decibels] = None,
offset: Seconds = 0.0,
):
"""
Add audio (only support mono-channel) of a new track into the mix.
:param audio: An array of audio samples to be mixed in.
:param snr: Signal-to-noise ratio, assuming `audio` represents noise (positive SNR - lower `audio` energy,
negative SNR - higher `audio` energy)
:param offset: How many seconds to shift `audio` in time. For mixing, the signal will be padded before
the start with low energy values.
:return:
"""
assert audio.shape[0] == 1 # TODO: support multi-channels
assert offset >= 0.0, "Negative offset in mixing is not supported."
reference_audio = self.tracks[0]
num_samples_offset = round(offset * self.sampling_rate)
current_num_samples = reference_audio.shape[1]
audio_to_add = audio
# When there is an offset, we need to pad before the start of the audio we're adding.
if offset > 0:
audio_to_add = np.hstack([
np.zeros((1, num_samples_offset), self.dtype),
audio_to_add
])
incoming_num_samples = audio_to_add.shape[1]
mix_num_samples = max(current_num_samples, incoming_num_samples)
# When the existing samples are less than what we anticipate after the mix,
# we need to pad after the end of the existing audio mixed so far.
# Since we're keeping every track as a separate entry in the ``self.tracks`` list,
# we need to pad each of them so that their shape matches when performing the final mix.
if current_num_samples < mix_num_samples:
for idx in range(len(self.tracks)):
padded_audio = np.hstack([
self.tracks[idx],
np.zeros((1, mix_num_samples - current_num_samples), self.dtype)
])
self.tracks[idx] = padded_audio
# When the audio we're mixing in are shorter that the anticipated mix length,
# we need to pad after their end.
# Note: we're doing that non-efficiently, as it we potentially re-allocate numpy arrays twice,
# during this padding and the offset padding before. If that's a bottleneck, we'll optimize.
if incoming_num_samples < mix_num_samples:
audio_to_add = np.hstack([
audio_to_add,
np.zeros((1, mix_num_samples - incoming_num_samples), self.dtype)
])
# When SNR is requested, find what gain is needed to satisfy the SNR
gain = 1.0
if snr is not None:
added_audio_energy = audio_energy(audio)
if added_audio_energy <= 0.0:
raise NonPositiveEnergyError(
f"To perform mix, energy must be non-zero and non-negative (got {added_audio_energy}). "
)
target_energy = self.reference_energy * (10.0 ** (-snr / 10))
# When mixing time-domain signals, we are working with root-power (field) quantities,
# whereas the energy ratio applies to power quantities. To compute the gain correctly,
# we need to take a square root of the energy ratio.
gain = sqrt(target_energy / added_audio_energy)
# self.mixed_audio = reference_audio + gain * audio_to_add
self.tracks.append(gain * audio_to_add)
def audio_energy(audio: np.ndarray) -> float:
return float(np.average(audio ** 2))
FileObject = Any # Alias for file-like objects
def read_audio(
path_or_fd: Union[Pathlike, FileObject],
offset: Seconds = 0.0,
duration: Optional[Seconds] = None,
force_opus_sampling_rate: Optional[int] = None,
) -> Tuple[np.ndarray, int]:
if isinstance(path_or_fd, (str, Path)) and str(path_or_fd).lower().endswith('.opus'):
return read_opus(
path_or_fd,
offset=offset,
duration=duration,
force_opus_sampling_rate=force_opus_sampling_rate,
)
elif isinstance(path_or_fd, (str, Path)) and str(path_or_fd).lower().endswith('.sph'):
return read_sph(
path_or_fd,
offset=offset,
duration=duration
)
try:
import soundfile as sf
with sf.SoundFile(path_or_fd) as sf_desc:
sampling_rate = sf_desc.samplerate
if offset > 0:
# Seek to the start of the target read
sf_desc.seek(compute_num_samples(offset, sampling_rate))
if duration is not None:
frame_duration = compute_num_samples(duration, sampling_rate)
else:
frame_duration = -1
# Load the target number of frames, and transpose to match librosa form
return sf_desc.read(frames=frame_duration, dtype=np.float32, always_2d=False).T, sampling_rate
except:
return _audioread_load(path_or_fd, offset=offset, duration=duration)
class LibsndfileCompatibleAudioInfo(NamedTuple):
channels: int
frames: int
samplerate: int
duration: float
def audioread_info(path: Pathlike) -> LibsndfileCompatibleAudioInfo:
"""
Return an audio info data structure that's a compatible subset of ``pysoundfile.info()``
that we need to create a ``Recording`` manifest.
"""
import audioread
# We just read the file and compute the number of samples
# -- no other method seems fully reliable...
with audioread.audio_open(path, backends=_available_audioread_backends()) as input_file:
shape = _audioread_load(input_file)[0].shape
if len(shape) == 1:
num_samples = shape[0]
else:
num_samples = shape[1]
return LibsndfileCompatibleAudioInfo(
channels=input_file.channels,
frames=num_samples,
samplerate=input_file.samplerate,
duration=num_samples / input_file.samplerate
)
@lru_cache(maxsize=1)
def _available_audioread_backends():
"""
Reduces the overhead of ``audioread.audio_open()`` when called repeatedly
by caching the results of scanning for FFMPEG etc.
"""
import audioread
backends = audioread.available_backends()
logging.info(f'Using audioread. Available backends: {backends}')
return backends
def _audioread_load(
path_or_file: Union[Pathlike, FileObject],
offset: Seconds = 0.0,
duration: Seconds = None,
dtype=np.float32
):
"""Load an audio buffer using audioread.
This loads one block at a time, and then concatenates the results.
This function is based on librosa:
https://github.com/librosa/librosa/blob/main/librosa/core/audio.py#L180
"""
import audioread
@contextmanager
def file_handle():
if isinstance(path_or_file, (str, Path)):
yield audioread.audio_open(path_or_file, backends=_available_audioread_backends())
else:
yield path_or_file
y = []
with file_handle() as input_file:
sr_native = input_file.samplerate
n_channels = input_file.channels
s_start = int(np.round(sr_native * offset)) * n_channels
if duration is None:
s_end = np.inf
else:
s_end = s_start + (int(np.round(sr_native * duration)) * n_channels)
n = 0
for frame in input_file:
frame = _buf_to_float(frame, dtype=dtype)
n_prev = n
n = n + len(frame)
if n < s_start:
# offset is after the current frame
# keep reading
continue
if s_end < n_prev:
# we're off the end. stop reading
break
if s_end < n:
# the end is in this frame. crop.
frame = frame[: s_end - n_prev]
if n_prev <= s_start <= n:
# beginning is in this frame
frame = frame[(s_start - n_prev):]
# tack on the current frame
y.append(frame)
if y:
y = np.concatenate(y)
if n_channels > 1:
y = y.reshape((-1, n_channels)).T
else:
y = np.empty(0, dtype=dtype)
return y, sr_native
def _buf_to_float(x, n_bytes=2, dtype=np.float32):
"""Convert an integer buffer to floating point values.
This is primarily useful when loading integer-valued wav data
into numpy arrays.
This function is based on librosa:
https://github.com/librosa/librosa/blob/main/librosa/util/utils.py#L1312
Parameters
----------
x : np.ndarray [dtype=int]
The integer-valued data buffer
n_bytes : int [1, 2, 4]
The number of bytes per sample in ``x``
dtype : numeric type
The target output type (default: 32-bit float)
Returns
-------
x_float : np.ndarray [dtype=float]
The input data buffer cast to floating point
"""
# Invert the scale of the data
scale = 1.0 / float(1 << ((8 * n_bytes) - 1))
# Construct the format string
fmt = "<i{:d}".format(n_bytes)
# Rescale and format the data buffer
return scale * np.frombuffer(x, fmt).astype(dtype)
# This constant defines by how much our estimation can be mismatched with
# the actual number of samples after applying audio augmentation.
# Chains of augmentation effects (such as resampling, speed perturb) can cause
# difficult to predict roundings and return a few samples more/less than we estimate.
# The default tolerance is a quarter of a millisecond
# (the actual number of samples is computed based on the sampling rate).
AUGMENTATION_DURATION_TOLERANCE: Seconds = 0.00025
def assert_and_maybe_fix_num_samples(
audio: np.ndarray,
offset: Seconds,
duration: Optional[Seconds],
recording: Recording
) -> np.ndarray:
# When resampling in high sampling rates (48k -> 44.1k)
# it is difficult to estimate how sox will perform rounding;
# we will just add/remove one sample to be consistent with
# what we have estimated.
# This effect is exacerbated by chaining multiple augmentations together.
expected_num_samples = compute_num_samples(
duration=duration if duration is not None else recording.duration - offset,
sampling_rate=recording.sampling_rate
)
diff = expected_num_samples - audio.shape[1]
if diff == 0:
return audio # this is normal condition
allowed_diff = int(ceil(AUGMENTATION_DURATION_TOLERANCE * recording.sampling_rate))
if 0 < diff <= allowed_diff:
# note the extra colon in -1:, which preserves the shape
audio = np.append(audio, audio[:, -diff:], axis=1)
return audio
elif -allowed_diff <= diff < 0:
audio = audio[:, :diff]
return audio
else:
raise ValueError("The number of declared samples in the recording diverged from the one obtained "
f"when loading audio (offset={offset}, duration={duration}). "
f"This could be internal Lhotse's error or a faulty transform implementation. "
"Please report this issue in Lhotse and show the "
f"following: diff={diff}, audio.shape={audio.shape}, recording={recording}")
def opus_info(
path: Pathlike,
force_opus_sampling_rate: Optional[int] = None
) -> LibsndfileCompatibleAudioInfo:
samples, sampling_rate = read_opus(path, force_opus_sampling_rate=force_opus_sampling_rate)
return LibsndfileCompatibleAudioInfo(
channels=samples.shape[0],
frames=samples.shape[1],
samplerate=sampling_rate,
duration=samples.shape[1] / sampling_rate
)
def read_opus(
path: Pathlike,
offset: Seconds = 0.0,
duration: Optional[Seconds] = None,
force_opus_sampling_rate: Optional[int] = None
) -> Tuple[np.ndarray, int]:
"""
Reads OPUS files using ffmpeg in a shell subprocess.
Unlike audioread, correctly supports offsets and durations for reading short chunks.
Optionally, we can force ffmpeg to resample to the true sampling rate (if we know it up-front).
:return: a tuple of audio samples and the sampling rate.
"""
# Construct the ffmpeg command depending on the arguments passed.
cmd = f'ffmpeg'
sampling_rate = 48000
# Note: we have to add offset and duration options (-ss and -t) BEFORE specifying the input
# (-i), otherwise ffmpeg will decode everything and trim afterwards...
if offset > 0:
cmd += f' -ss {offset}'
if duration is not None:
cmd += f' -t {duration}'
# Add the input specifier after offset and duration.
cmd += f' -i {path}'
# Optionally resample the output.
if force_opus_sampling_rate is not None:
cmd += f' -ar {force_opus_sampling_rate}'
sampling_rate = force_opus_sampling_rate
# Read audio samples directly as float32.
cmd += ' -f f32le pipe:1'
# Actual audio reading.
proc = run(cmd, shell=True, stdout=PIPE, stderr=PIPE)
raw_audio = proc.stdout
audio = np.frombuffer(raw_audio, dtype=np.float32)
# Determine if the recording is mono or stereo and decode accordingly.
channel_string = parse_channel_from_ffmpeg_output(proc.stderr)
if channel_string == 'stereo':
new_audio = np.empty((2, audio.shape[0] // 2), dtype=np.float32)
new_audio[0, :] = audio[::2]
new_audio[1, :] = audio[1::2]
audio = new_audio
elif channel_string == 'mono':
audio = audio.reshape(1, -1)
else:
raise NotImplementedError(f'Unknown channel description from ffmpeg: {channel_string}')
return audio, sampling_rate
def parse_channel_from_ffmpeg_output(ffmpeg_stderr: bytes) -> str:
# ffmpeg will output line such as the following, amongst others:
# "Stream #0:0: Audio: pcm_f32le, 16000 Hz, mono, flt, 512 kb/s"
# but sometimes it can be "Stream #0:0(eng):", which we handle with regexp
pattern = re.compile(r"^\s*Stream #0:0.*: Audio: pcm_f32le.+(mono|stereo).+\s*$")
for line in ffmpeg_stderr.splitlines():
try:
line = line.decode()
except UnicodeDecodeError:
# Why can we get UnicodeDecoderError from ffmpeg output?
# Because some files may contain the metadata, including a short description of the recording,
# which may be encoded in arbitrarily encoding different than ASCII/UTF-8, such as latin-1,
# and Python will not automatically recognize that.
# We simply ignore these lines as they won't have any relevant information for us.
continue
match = pattern.match(line)
if match is not None:
return match.group(1)
raise ValueError(
f"Could not determine the number of channels for OPUS file from the following ffmpeg output "
f"(shown as bytestring due to avoid possible encoding issues):\n{str(ffmpeg_stderr)}"
)
def sph_info(path: Pathlike) -> LibsndfileCompatibleAudioInfo:
samples, sampling_rate = read_sph(path)
return LibsndfileCompatibleAudioInfo(
channels=samples.shape[0],
frames=samples.shape[1],
samplerate=sampling_rate,
duration=samples.shape[1] / sampling_rate
)
def read_sph(
sph_path: Pathlike,
offset: Seconds = 0.0,
duration: Optional[Seconds] = None
) -> Tuple[np.ndarray, int]:
"""
Reads SPH files using sph2pipe in a shell subprocess.
Unlike audioread, correctly supports offsets and durations for reading short chunks.
:return: a tuple of audio samples and the sampling rate.
"""
sph_path = Path(sph_path)
# Construct the sph2pipe command depending on the arguments passed.
cmd = f'sph2pipe -f wav -p -t {offset}:'
if duration is not None:
cmd += f'{round(offset + duration, 5)}'
# Add the input specifier after offset and duration.
cmd += f' {sph_path}'
# Actual audio reading.
proc = BytesIO(run(cmd, shell=True, check=True, stdout=PIPE, stderr=PIPE).stdout)
import soundfile as sf
with sf.SoundFile(proc) as sf_desc:
audio, sampling_rate = sf_desc.read(dtype=np.float32), sf_desc.samplerate
audio = audio.reshape(1, -1) if sf_desc.channels == 1 else audio.T
return audio, sampling_rate
|
#!/usr/bin/env python
import argparse
from pathlib import Path
import tensorflow as tf
from gym import wrappers
from yarll.environment.registration import make
class ModelRunner(object):
"""
Run an already learned model.
Currently only supports one variation of an environment.
"""
def __init__(self, env, model_directory: str, save_directory: str, **usercfg) -> None:
super().__init__()
self.env = env
self.model_directory = Path(model_directory)
self.save_directory = Path(save_directory)
self.config = dict(
episode_max_length=self.env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps'),
repeat_n_actions=1
)
self.config.update(usercfg)
self.session = tf.Session()
self.saver = tf.train.import_meta_graph(self.model_directory / "model.meta")
self.saver.restore(self.session, self.model_directory / "model")
self.action = tf.get_collection("action")[0]
self.states = tf.get_collection("states")[0]
def choose_action(self, state):
"""Choose an action."""
return self.session.run([self.action], feed_dict={self.states: [state]})[0]
def get_trajectory(self, render: bool = False):
"""
Run agent-environment loop for one whole episode (trajectory)
Return dictionary of results
"""
state = self.env.reset()
for _ in range(self.config["episode_max_length"]):
action = self.choose_action(state)
for _ in range(self.config["repeat_n_actions"]):
_, _, done, _ = self.env.step(action)
if done: # Don't continue if episode has already ended
break
if done:
break
if render:
self.env.render()
return
def run(self):
for _ in range(self.config["n_iter"]):
self.get_trajectory()
parser = argparse.ArgumentParser()
parser.add_argument("environment", metavar="env", type=str, help="Gym environment to execute the model on.")
parser.add_argument("model_directory", type=str, help="Directory from where model files are loaded.")
parser.add_argument("save_directory", type=str, help="Directory where results of running the model are saved")
parser.add_argument("--iterations", default=100, type=int, help="Number of iterations to run the algorithm.")
def main():
args = parser.parse_args()
env = make(args.environment)
runner = ModelRunner(env, args.model_directory, args.save_directory, n_iter=args.iterations)
try:
runner.env = wrappers.Monitor(runner.env, args.save_directory, video_callable=False, force=True)
runner.run()
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
|
import librosa
import math
import numpy as np
class AudioFeatureExtractor(object):
def __init__(self,
sample_rate=16e3,
window_size=0.02,
window_stride=0.01,
window='hamming',
feat_type='mfcc',
normalize_audio=True
):
self.sample_rate = sample_rate
self.window_size = window_size
self.window_stride = window_stride
self.window = window
self.normalize_audio = normalize_audio
self.feat_type = feat_type
self.win_length = math.ceil(self.sample_rate * self.window_size)
self.hop_length = math.ceil(self.sample_rate * self.window_stride)
if feat_type == 'mfcc':
self.feat_dim = 40
elif feat_type == 'stft':
self.feat_dim = int(math.floor((sample_rate * window_size) / 2) + 1)
else:
pass
def __call__(self, audio_path):
sample, sr = librosa.load(audio_path)
assert sr == self.sample_rate, \
"sample rate mismatch (%d vs %d)" % (self.sample_rate, sr)
if self.feat_type == 'mfcc':
spect = librosa.feature.mfcc(
y=sample,
sr=sr,
n_mfcc=40,
n_fft=self.win_length,
hop_length=self.hop_length,
window=self.window)
elif self.feat_type == 'stft':
d = librosa.stft(
y=sample,
n_fft=self.win_length,
hop_length=self.hop_length,
window=self.window)
spect, _ = librosa.magphase(d)
spect = np.log1p(spect)
elif self.feat_type == 'fbank':
pass
if self.normalize_audio:
mean = spect.mean()
std = spect.std()
spect -= mean
spect /= std
return spect.T
|
import threading
import os
global _collector_thread
_collector_thread = None
def find(folder, fileName):
print "Search for : " + fileName + " in " + folder
global _collector_thread
if _collector_thread != None:
_collector_thread.stop()
print "GOT HERE"
_collector_thread = ParsingThread([], folder, 30)
_collector_thread.start()
return ""
class ParsingThread(threading.Thread):
def __init__(self, collector, folder, timeout_seconds):
self.collector = collector
self.timeout = timeout_seconds
self.folder = folder
threading.Thread.__init__(self)
def get_javascript_files(self, dir_name, *args):
self.fileList = []
for file in os.listdir(dir_name):
dirfile = os.path.join(dir_name, file)
if os.path.isfile(dirfile):
fileName, fileExtension = os.path.splitext(dirfile)
print fileName
if fileExtension == ".js" and ".min." not in fileName:
self.fileList.append(dirfile)
elif os.path.isdir(dirfile):
self.fileList += self.get_javascript_files(dirfile, *args)
return self.fileList
def run(self):
print "RUN THREAD"
jsfiles = self.get_javascript_files(self.folder)
for file_name in jsfiles:
file_name
def stop(self):
if self.isAlive():
self._Thread__stop()
|
from django.conf import settings
from django.conf.urls import include
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
path('agenda/', include('apps.agenda.urls')),
path('verhuur/', include('apps.rent.urls')),
path('', include('apps.home.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
# *-* encoding utf-8
# Author: kalizar78
from __future__ import print_function
import tensorflow as tf
import numpy as np
#####################################################################
# LSTM Specific initializations using numpy
#####################################################################
# I forgot where I got these initializations from
def _np_ortho_weights(n,m) :
""" Returns np orthogonal weighs of size [n,m]
"""
nm = max(n,m)
u = np.linalg.svd(np.random.randn(nm,nm))[0]
return u.astype(np.float32)[:n, :m]
def _stacked_ortho_weights(n,m,copies) :
""" returns a stack of copies ortho weights, each of size [n,m]
"""
return np.hstack([_np_ortho_weights(n,m) for _ in range(copies)])
def _bias_init(celldim, nactivations) :
""" initializes b of shape [nactivations * celldim] np arra
with all zeros except b[celldim:2*celldim]
"""
assert(nactivations > 2)
b = np.zeros(nactivations*celldim, dtype = np.float32)
b[celldim:2*celldim] = 1.0
return b
#####################################################################
# LSTM Class
# ambiguous dimension will be the number of steps
# batch_size much be specified
#####################################################################
class LSTMCell(object) :
def __init__ (self, indim, celldim, batch_size, name, reuse = False) :
"""
Basic LSTM cell - only declares the variables
use inference or inference_step to setup inference graph
Args:
indim : input dimension
celldim : cell and hidden state dimension
batch_size : batch size
name : name prefix for variable scope.
"""
self.num_activations = 4
self.name = name
self.indim = indim
self.celldim = celldim
self.batch_size = batch_size
with tf.variable_scope(name, reuse = reuse) as scope:
# non recurrent parameters
self.W = tf.get_variable('W', shape = [indim, self.num_activations * celldim], dtype = tf.float32,
initializer = tf.contrib.layers.xavier_initializer())
# recurrnt parameters
self.U = tf.get_variable('U', initializer = _stacked_ortho_weights(celldim,celldim,self.num_activations))
# bias
self.b = tf.get_variable('b', initializer = _bias_init(celldim, self.num_activations))
# hidden state and cell memories:
#self.hstate = tf.get_variable('hstate', initializer = np.zeros((batch_size, celldim), dtype = np.float32))
#self.cell = tf.get_variable('cell', initializer = np.zeros((batch_size, celldim), dtype = np.float32))
def get_states(self, scope_name, reuse = False, init = None) :
"""
Gets cell, states.
scope_name : name of scope
reuse : reuse of variable
init : initializer (if none zero state is returned)
Returns:
[cell_0, hstate_0]
"""
if(init is not None) :
assert(init.shape == (self.batch_size, self.celldim))
initializer = init
else :
initializer = np.zeros((self.batch_size, self.celldim), dtype = np.float32)
with tf.variable_scope(scope_name, reuse = reuse) as scope:
c = tf.get_variable('cell', initializer = initializer)
h = tf.get_variable('hstate', initializer = initializer)
return [c,h]
def inference(self, x_t, ch) :
""" Sets up inference graph.
x_t : [batch, nsteps, indim] tensor
ch[0] : initial cell
ch[1] : initial state
"""
xt = tf.transpose(x_t, [1,0,2]) # [nsteps, batch, indim]
# unroll since tf doesn't support np.dot(tensor3, tensor2)
xt_unroll = tf.reshape(xt, [-1, self.indim])
Wxt = tf.reshape(tf.matmul(xt_unroll, self.W) + self.b, [-1, self.batch_size, self.num_activations * self.celldim])
# Apply dropout to non-recurrent connections here (future)
# Use tf.scan to apply recurrent connections
cell_t, hstate_t = tf.scan(self._step,
Wxt, # these get sliced
initializer = [ch[0], ch[1]]) # accumulator initial values (don't get set by scan
return cell_t, hstate_t
def inference_step(self, xt, cht) :
"""
inference step, convenience for stepping once so we don't have to enter tf.scan
x_t : [batch, indim] tensor
cht : [seed cell ct, seed state ht]
ct of shape [batch, celldim]
ht of shape [batch, celldim]
"""
# Wxt of shape [batch, celldim]
Wxt = tf.reshape(tf.matmul(xt, self.W) + self.b,
[self.batch_size, self.num_activations * self.celldim])
# Apply dropout to non-recurrent connections here (future)
# step unit
c_tp1, h_tp1 = self._step(cht, Wxt)
return c_tp1, h_tp1
def _step(self, ch, Wxt) :
""" LSTM stepping function
Wxt will be a slice of Wxt above in inference, so a tensor2 of shape [batch, num_activations*celldim]
ch[0] = previous cell
ch[1] = previous hstate
"""
c_tm1 = ch[0]
h_tm1 = ch[1]
# Pre-activation
preact = tf.add(Wxt, tf.matmul(h_tm1, self.U)) # add in recurrent contributions
it = tf.nn.sigmoid(tf.slice(preact, [0, 0], [self.batch_size, self.celldim]))
ft = tf.nn.sigmoid(tf.slice(preact, [0, self.celldim], [self.batch_size, self.celldim]))
ot = tf.nn.sigmoid(tf.slice(preact, [0, 2*self.celldim], [self.batch_size, self.celldim]))
gt = tf.nn.tanh (tf.slice(preact, [0, 3*self.celldim], [self.batch_size, self.celldim]))
ct = tf.add(tf.multiply(it, gt), tf.multiply(ft, c_tm1))
ht = tf.multiply(ot, tf.tanh(ct))
return [ct, ht]
|
# origin https://primes.utm.edu/lists/small/millions/
file = open("primes2.txt", "r")
full = file.read()
file.close()
lines = full.split("\n\n")
texto = ""
for x in range(0, len(lines)):
temp = lines[x].split(" ")
while "" in temp:
temp.remove("")
texto += "\n"
for x in temp:
texto += x +","
file = open("primes2.txt", "w")
file.write(texto) |
from discord.ext import commands
import re
import discord
import random
import typing
import emoji
import unicodedata
import textwrap
import contextlib
import io
import asyncio
import async_tio
import itertools
import os
import base64
import secrets
import utils
from difflib import SequenceMatcher
from discord.ext.commands.cooldowns import BucketType
from jishaku.codeblocks import codeblock_converter
import functools
class Info(commands.Cog):
"Gives you Information about data you are allowed to access"
def __init__(self, bot):
self.bot = bot
@commands.command(
help="gives you info about a guild",
aliases=[
"server_info",
"guild_fetch",
"guild_info",
"fetch_guild",
"guildinfo",
],
)
async def serverinfo(self, ctx, *, guild: typing.Optional[discord.Guild] = None):
guild = guild or ctx.guild
if guild is None:
await ctx.send("Guild wanted has not been found")
if guild:
await utils.guildinfo(ctx, guild)
@commands.command(
aliases=["user_info", "user-info", "ui", "whois"],
brief="a command that gives information on users",
help="this can work with mentions, ids, usernames, and even full names.",
)
async def userinfo(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
user_type = "Bot" if user.bot else "User" if isinstance(user, discord.User) else "Member"
statuses = []
badges = [utils.profile_converter("badges", f) for f in user.public_flags.all()] if user.public_flags else []
if user.bot:
badges.append(utils.profile_converter("badges", "bot"))
if user.system:
badges.append(utils.profile_converter("badges", "system"))
if isinstance(user, discord.Member):
nickname = user.nick
joined_guild = f"{discord.utils.format_dt(user.joined_at, style = 'd')}\n{discord.utils.format_dt(user.joined_at, style = 'T')}"
highest_role = user.top_role
for name, status in (
("Status", user.status),
("Desktop", user.desktop_status),
("Mobile", user.mobile_status),
("Web", user.web_status),
):
statuses.append((name, utils.profile_converter(name.lower(), status)))
else:
nickname = "None Found"
joined_guild = "N/A"
highest_role = "None Found"
member = discord.utils.find(lambda member: member.id == user.id, self.bot.get_all_members())
if member:
for name, status in (
("Status", member.status),
("Desktop", member.desktop_status),
("Mobile", member.mobile_status),
("Web", member.web_status),
):
statuses.append((name, utils.profile_converter(name.lower(), status)))
embed = discord.Embed(title=f"{user}", color=random.randint(0, 16777215), timestamp=ctx.message.created_at)
embed.add_field(
name="User Info: ",
value=f"**Username**: {user.name} \n**Discriminator**: {user.discriminator} \n**ID**: {user.id}",
inline=False,
)
join_badges: str = "\u0020".join(badges) if badges else "N/A"
join_statuses = (
" \n| ".join(f"**{name}**: {value}" for name, value in statuses) if statuses else "**Status**: \nUnknown"
)
embed.add_field(
name="User Info 2:",
value=f"Type: {user_type} \nBadges: {join_badges} \n**Joined Discord**: {discord.utils.format_dt(user.created_at, style = 'd')}\n{discord.utils.format_dt(user.created_at, style = 'T')}\n {join_statuses}",
inline=False,
)
embed.add_field(
name="Guild Info:",
value=f"**Joined Guild**: {joined_guild} \n**Nickname**: {nickname} \n**Highest Role:** {highest_role}",
inline=False,
)
embed.set_image(url=user.display_avatar.url)
guilds_list = utils.grab_mutualguilds(ctx, user)
pag = commands.Paginator(prefix="", suffix="")
for g in guilds_list:
pag.add_line(f"{g}")
pages = pag.pages or ["None"]
if ctx.author.dm_channel is None:
await ctx.author.create_dm()
menu = utils.MutualGuildsEmbed(pages, ctx=ctx, disable_after=True)
view = utils.UserInfoSuper(ctx, menu, ctx.author.dm_channel)
await ctx.send(
"Pick a way for Mutual Guilds to be sent to you or not if you really don't the mutualguilds",
embed=embed,
view=view,
)
@commands.command(brief="uploads your emojis into a Senarc Bin link")
async def look_at(self, ctx):
if isinstance(ctx.message.channel, discord.TextChannel):
message_emojis = ""
for x in ctx.guild.emojis:
message_emojis = message_emojis + " " + str(x) + "\n"
paste = await utils.post(self.bot, message_emojis)
await ctx.send(paste)
if isinstance(ctx.channel, discord.DMChannel):
await ctx.send("We can't use that in DMS as it takes emoji regex and puts it into a paste.")
@commands.command(help="gives the id of the current guild or DM if you are in one.")
async def guild_get(self, ctx):
if isinstance(ctx.channel, discord.TextChannel):
await ctx.send(content=ctx.guild.id)
if isinstance(ctx.channel, discord.DMChannel):
await ctx.send(ctx.channel.id)
@commands.command(brief="a command to tell you the channel id", aliases=["GetChannelId"])
async def this(self, ctx):
await ctx.send(ctx.channel.id)
@commands.command(brief="Gives you mention info don't abuse(doesn't mention tho)")
async def mention(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
await ctx.send(
f"Discord Mention: {user.mention} \nRaw Mention: {discord.utils.escape_mentions(user.mention)}",
allowed_mentions=discord.AllowedMentions.none(),
)
@commands.cooldown(1, 30, BucketType.user)
@commands.command(help="fetch invite details")
async def fetch_invite(self, ctx, *invites: typing.Union[discord.Invite, str]):
if invites:
menu = utils.InviteInfoEmbed(invites, ctx=ctx, delete_after=True)
await menu.send()
if not invites:
await ctx.send("Please get actual invites to attempt grab")
ctx.command.reset_cooldown(ctx)
if len(invites) > 50:
await ctx.send(
"Reporting using more than 50 invites in this command. This is to prevent ratelimits with the api."
)
jdjg = await self.bot.try_user(168422909482762240)
await self.bot.get_channel(855217084710912050).send(
f"{jdjg.mention}.\n{ctx.author} causes a ratelimit issue with {len(invites)} invites"
)
@commands.command(brief="gives info about a file")
async def file(self, ctx):
if not ctx.message.attachments:
await ctx.send(ctx.message.attachments)
await ctx.send("no file submitted")
if ctx.message.attachments:
embed = discord.Embed(title="Attachment info", color=random.randint(0, 16777215))
for a in ctx.message.attachments:
embed.add_field(name=f"ID: {a.id}", value=f"[{a.filename}]({a.url})")
embed.set_footer(text="Check on the url/urls to get a direct download to the url.")
await ctx.send(embed=embed, content="\nThat's good")
@commands.command(
brief="a command to get the avatar of a user",
help="using the userinfo technology it now powers avatar grabbing.",
aliases=["pfp", "av"],
)
async def avatar(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
embed = discord.Embed(color=random.randint(0, 16777215))
embed.set_author(name=f"{user.name}'s avatar:", icon_url=user.display_avatar.url)
embed.set_image(url=user.display_avatar.url)
embed.set_footer(text=f"Requested by {ctx.author}")
await ctx.send(embed=embed)
@commands.command(brief="this is a way to get the nearest channel.")
async def find_channel(self, ctx, *, args=None):
if args is None:
await ctx.send("Please specify a channel")
if args:
if isinstance(ctx.channel, discord.TextChannel):
channel = discord.utils.get(ctx.guild.channels, name=args)
if channel:
await ctx.send(channel.mention)
if channel is None:
await ctx.send("Unforantely we haven't found anything")
if isinstance(ctx.channel, discord.DMChannel):
await ctx.send("You can't use it in a DM.")
@commands.command(brief="a command to get the closest user.")
async def closest_user(self, ctx, *, args=None):
if args is None:
return await ctx.send("please specify a user")
if args and not self.bot.users:
return await ctx.send("There are no users cached :(")
if args:
userNearest = discord.utils.get(self.bot.users, name=args)
user_nick = discord.utils.get(self.bot.users, display_name=args)
if userNearest is None:
userNearest = sorted(self.bot.users, key=lambda x: SequenceMatcher(None, x.name, args).ratio())[-1]
if user_nick is None:
user_nick = sorted(self.bot.users, key=lambda x: SequenceMatcher(None, x.display_name, args).ratio())[
-1
]
if isinstance(ctx.channel, discord.TextChannel):
member_list = [x for x in ctx.guild.members if x.nick]
nearest_server_nick = sorted(member_list, key=lambda x: SequenceMatcher(None, x.nick, args).ratio())[-1]
if isinstance(ctx.channel, discord.DMChannel):
nearest_server_nick = "You unfortunately don't get the last value(a nickname) as it's a DM."
await ctx.send(f"Username : {userNearest} \nDisplay name : {user_nick} \nNickname: {nearest_server_nick}")
@commands.command(help="gives info on default emoji and custom emojis", name="emoji")
async def emoji_info(self, ctx, *emojis: typing.Union[utils.EmojiConverter, str]):
if emojis:
menu = utils.EmojiInfoEmbed(emojis, ctx=ctx, delete_after=True)
await menu.send()
if not emojis:
await ctx.send("Looks like there was no emojis.")
@commands.command(brief="gives info on emoji_id and emoji image.")
async def emoji_id(
self,
ctx,
*,
emoji: typing.Optional[typing.Union[discord.PartialEmoji, discord.Message, utils.EmojiBasic]] = None,
):
if isinstance(emoji, discord.Message):
emoji_message = emoji.content
emoji = None
with contextlib.suppress(commands.CommandError, commands.BadArgument):
emoji = await utils.EmojiBasic.convert(
ctx, emoji_message
) or await commands.PartialEmojiConverter().convert(ctx, emoji_message)
if emoji:
embed = discord.Embed(description=f" Emoji ID: {emoji.id}", color=random.randint(0, 16777215))
embed.set_image(url=emoji.url)
await ctx.send(embed=embed)
else:
await ctx.send("Not a valid emoji id.")
@commands.command()
async def fetch_content(self, ctx, *, args=None):
if args is None:
await ctx.send("please send actual text")
if args:
args = discord.utils.escape_mentions(args)
args = discord.utils.escape_markdown(args, as_needed=False, ignore_links=False)
for x in ctx.message.mentions:
args = args.replace(x.mention, f"\{x.mention}")
emojis = emoji.emoji_lis(args)
emojis_return = [d["emoji"] for d in emojis]
for x in emojis_return:
args = args.replace(x, f"\{x}")
for x in re.findall(r":\w*:\d*", args):
args = args.replace(x, f"\{x}")
await ctx.send(f"{args}", allowed_mentions=discord.AllowedMentions.none())
@commands.command(brief="gives info about a role.", aliases=["roleinfo"])
async def role_info(self, ctx, *, role: typing.Optional[discord.Role] = None):
if role:
await utils.roleinfo(ctx, role)
if not role:
await ctx.send(f"The role you wanted was not found.")
class DevTools(commands.Cog):
"Helpful commands for developers in general"
def __init__(self, bot):
self.bot = bot
async def rtfm_lookup(self, url=None, *, args=None):
if not args:
return url
else:
res = await self.bot.session.get(
"https://repi.openrobot.xyz/search_docs",
params={"query": args, "documentation": url},
headers={"Authorization": os.environ["frostiweeb_api"]},
)
results = await res.json()
if not results:
return f"Could not find anything with {args}."
else:
return results
async def rtfm_send(self, ctx, results):
if isinstance(results, str):
await ctx.send(results, allowed_mentions=discord.AllowedMentions.none())
else:
embed = discord.Embed(color=random.randint(0, 16777215))
results = dict(itertools.islice(results.items(), 10))
embed.description = "\n".join(f"[`{result}`]({results.get(result)})" for result in results)
reference = utils.reference(ctx.message)
await ctx.send(embed=embed, reference=reference)
@commands.command(
aliases=["rtd", "rtfs", "rtdm"],
invoke_without_command=True,
brief="a rtfm command that allows you to lookup at any library we support looking up(using selects)",
)
async def rtfm(self, ctx, *, args=None):
rtfm_dictionary = await self.bot.db.fetch("SELECT * FROM RTFM_DICTIONARY")
view = utils.RtfmChoice(ctx, rtfm_dictionary, timeout=15.0)
await ctx.send(content="Please Pick a library you want to parse", view=view)
await view.wait()
await ctx.trigger_typing()
results = await self.rtfm_lookup(url=view.value, args=args)
await self.rtfm_send(ctx, results)
def charinfo_converter(self, string):
digit = f"{ord(string):x}"
name = unicodedata.name(string, "The unicode was not found")
return f"`\\U{digit:>08}`: {name} - {string} \N{EM DASH} <http://www.fileformat.info/info/unicode/char/{digit}>"
@commands.command(brief="Gives you data about charinfo (based on R.danny's command)")
async def charinfo(self, ctx, *, args=None):
if not args:
return await ctx.send("That doesn't help out all :(")
values = "\n".join(map(self.charinfo_converter, set(args)))
content = textwrap.wrap(values, width=2000)
menu = utils.charinfoMenu(content, ctx=ctx, delete_after=True)
await menu.send()
@commands.command(brief="a command to view the rtfm DB")
async def rtfm_view(self, ctx):
rtfm_dictionary = dict(await self.bot.db.fetch("SELECT * FROM RTFM_DICTIONARY"))
pag = commands.Paginator(prefix="", suffix="")
for g in rtfm_dictionary:
pag.add_line(f"{g} : {rtfm_dictionary.get(g)}")
menu = utils.RtfmEmbed(pag.pages, ctx=ctx, delete_after=True)
await menu.send()
@commands.command(brief="a command to autoformat your python code to pep8")
async def pep8(self, ctx):
modal = utils.CodeBlockView(ctx, timeout=180.0)
message = await ctx.send(
"Please Submit the Code Block\nDo you want to use black's line formatter at 120 (i.e. black - l120 .), or just use the default? (i.e black .):",
view=modal,
)
await modal.wait()
if not modal.value:
return await ctx.reply("You need to give it code to work with it.", mention_author=False)
code = codeblock_converter(argument=f"{modal.value}")
if modal.value2 is None or modal.value2 is False:
await message.edit("Default it is.", view=None)
if modal.value is True:
await message.edit("Speacil Formatting at 120 lines it is.")
code_conversion = functools.partial(utils.formatter, code.content, bool(modal.value))
try:
code = await self.bot.loop.run_in_executor(None, code_conversion)
except Exception as e:
return await message.edit(f"Error Ocurred with {e}")
embed = discord.Embed(
title="Reformatted with Black",
description=f"code returned: \n```python\n{code}```",
color=random.randint(0, 16777215),
)
embed.set_footer(text="Make sure you use python code, otherwise it will not work properly.")
await message.edit(embed=embed)
@commands.command(brief="grabs your pfp's image")
async def pfp_grab(self, ctx):
if_animated = ctx.author.display_avatar.is_animated()
save_type = ".gif" if if_animated else ".png"
icon_file = await ctx.author.display_avatar.read()
buffer = io.BytesIO(icon_file)
buffer.seek(0)
# print(len(buffer.getvalue()))
file = discord.File(buffer, filename=f"pfp{save_type}")
try:
await ctx.send(content="here's your avatar:", file=file)
except:
await ctx.send("it looks like it couldn't send the pfp due to the file size.")
@commands.command(brief="Gives info on pypi packages")
async def pypi(self, ctx, *, args=None):
# https://pypi.org/simple/
if args:
pypi_response = await self.bot.session.get(f"https://pypi.org/pypi/{args}/json")
if pypi_response.ok:
pypi_response = await pypi_response.json()
pypi_data = pypi_response["info"]
embed = discord.Embed(
title=f"{pypi_data.get('name') or 'None provided'} {pypi_data.get('version') or 'None provided'}",
url=f"{pypi_data.get('release_url') or 'None provided'}",
description=f"{pypi_data.get('summary') or 'None provided'}",
color=random.randint(0, 16777215),
)
embed.set_thumbnail(url="https://i.imgur.com/oP0e7jK.png")
embed.add_field(
name="**Author Info**",
value=f"**Author Name:** {pypi_data.get('author') or 'None provided'}\n**Author Email:** {pypi_data.get('author_email') or 'None provided'}",
inline=False,
)
embed.add_field(
name="**Package Info**",
value=f"**Download URL**: {pypi_data.get('download_url') or 'None provided'}\n**Documentation URL:** {pypi_data.get('docs_url') or 'None provided'}\n**Home Page:** {pypi_data.get('home_page') or 'None provided'}\n**Keywords:** {pypi_data.get('keywords') or 'None provided'}\n**License:** {pypi_data.get('license') or 'None provided'}",
inline=False,
)
await ctx.send(embed=embed)
else:
await ctx.send(
f"Could not find package **{args}** on pypi.", allowed_mentions=discord.AllowedMentions.none()
)
else:
await ctx.send("Please look for a library to get the info of.")
@commands.command(brief="make a quick bot invite with 0 perms")
async def invite_bot(self, ctx, *, user: typing.Optional[discord.User] = None):
user = user or ctx.author
if not user.bot:
return await ctx.send("That's not a legit bot")
invite = discord.utils.oauth_url(client_id=user.id, scopes=("bot",))
slash_invite = discord.utils.oauth_url(client_id=user.id)
view = discord.ui.View()
view.add_item(
discord.ui.Button(label=f"{user.name}'s Normal Invite", url=invite, style=discord.ButtonStyle.link)
)
view.add_item(
discord.ui.Button(
label=f"{user.name}'s Invite With Slash Commands", url=slash_invite, style=discord.ButtonStyle.link
)
)
await ctx.send(f"Invite with slash commands and the bot scope or only with a bot scope:", view=view)
@commands.command(brief="gets you a guild's icon", aliases=["guild_icon"])
async def server_icon(self, ctx, *, guild: typing.Optional[discord.Guild] = None):
guild = guild or ctx.guild
if not guild:
return await ctx.send("no guild to get the icon of.")
await ctx.send(f"{guild.icon.url if guild.icon else 'No Url for This Guild, I am sorry dude :('}")
@commands.command(brief="some old fooz command..")
async def fooz(self, ctx, *, args=None):
if not args:
await ctx.send("success")
if args:
await ctx.send("didn't use it properly :(")
@commands.command(brief="puts the message time as a timestamp")
async def message_time(self, ctx):
embed = discord.Embed(title="Message Time", color=random.randint(0, 16777215), timestamp=ctx.message.created_at)
embed.set_footer(text=f"{ctx.message.id}")
await ctx.send(content=f"Only here cause JDJG Bot has it and why not have it here now.", embed=embed)
@commands.command(brief="converts info about colors for you.", invoke_without_command=True)
async def color(self, ctx, *, color: utils.ColorConverter = None):
if not color:
return await ctx.send("you need to give me a color to use.")
await ctx.send(f"Hexadecimal: {color} \nValue : {color.value} \nRGB: {color.to_rgb()}")
@commands.command(brief="a command that tells a user creation time.")
async def created_at(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
creation_info = f"{discord.utils.format_dt(user.created_at, style = 'd')}\n{discord.utils.format_dt(user.created_at, style = 'T')}"
await ctx.send(
f"\nName : {user}\nMention : {user.mention} was created:\n{creation_info}\nRaw Version: ```{creation_info}```",
allowed_mentions=discord.AllowedMentions.none(),
)
@commands.command(brief="a command that makes a fake user id based on the current time.")
async def fake_user_id(self, ctx):
await ctx.send(f"User id: {utils.generate_snowflake()}")
@commands.command(brief="gives information on snowflakes")
async def snowflake_info(self, ctx, *, snowflake: typing.Optional[utils.ObjectPlus] = None):
if not snowflake:
await ctx.send(
"you either returned nothing or an invalid snowflake now going to the current time for information."
)
# change objectplus convert back to the before(discord.Object), same thing with utls.ObjectPlus, if edpy adds my pull request into the master.
generated_time = await utils.ObjectPlusConverter().convert(ctx, argument=f"{int(utils.generate_snowflake())}")
snowflake = snowflake or generated_time
embed = discord.Embed(title="โ๏ธ SnowFlake Info:", color=5793266)
embed.add_field(
name="Created At:",
value=f"{discord.utils.format_dt(snowflake.created_at, style = 'd')}\n{discord.utils.format_dt(snowflake.created_at, style = 'T')}",
)
embed.add_field(name="Worker ID:", value=f"{snowflake.worker_id}")
embed.add_field(name="Process ID:", value=f"{snowflake.process_id}")
embed.add_field(name="Increment:", value=f"{snowflake.increment_id}")
embed.set_footer(text=f"Snowflake ID: {snowflake.id}")
await ctx.send(embed=embed)
@commands.command(brief="Generates a fake token from the current time")
async def fake_token(self, ctx):
object = discord.Object(utils.generate_snowflake())
first_encoded = base64.b64encode(f"{object.id}".encode())
first_bit = first_encoded.decode()
timestamp = int(object.created_at.timestamp() - 129384000)
d = timestamp.to_bytes(4, "big")
second_bit_encoded = base64.standard_b64encode(d)
second_bit = second_bit_encoded.decode().rstrip("==")
last_bit = secrets.token_urlsafe(20)
embed = discord.Embed(
title=f"Newly Generated Fake Token",
description=f"ID: ``{object.id}``\nCreated at : \n{discord.utils.format_dt(object.created_at, style = 'd')}\n{discord.utils.format_dt(object.created_at, style = 'T')}",
)
embed.add_field(name="Generated Token:", value=f"``{first_bit}.{second_bit}.{last_bit}``")
embed.set_thumbnail(url=ctx.author.display_avatar.url)
embed.set_footer(text=f"Requested by {ctx.author}")
await ctx.send("We generated a fake token :clap::", embed=embed)
@commands.cooldown(1, 60, BucketType.user)
@commands.command(brief="makes a request to add a bot to the test guild")
async def addbot(self, ctx, *, user: typing.Optional[discord.User] = None):
user = user or ctx.author
if not user.bot:
ctx.command.reset_cooldown(ctx)
return await ctx.send("Please Use A **Bot** ID, not a **User** ID.")
modal = utils.AddBotView(ctx, timeout=180.0)
message = await ctx.send("Please Tell us the reason you want to add your bot to the Test Guild:", view=modal)
await modal.wait()
if modal.value is None:
ctx.command.reset_cooldown(ctx)
return await message.edit("Provide a reason why you want your bot added to your guild")
guild = self.bot.get_guild(438848185008390158)
member = await self.bot.try_member(guild, ctx.author.id)
if member is None:
view = discord.ui.View()
view.add_item(
discord.ui.Button(
label=f"Test Guild Invite",
url="https://discord.gg/hKn8qgCDzK",
style=discord.ButtonStyle.link,
row=1,
)
)
return await message.edit(
"Make sure to join the guild linked soon... then rerun the command. If you are in the guild contact the owner(the owner is listed in the owner command)",
view=view,
)
embed = discord.Embed(
title="Bot Request",
colour=discord.Colour.blurple(),
description=f"reason: \n{modal.value}\n\n[Invite URL]({discord.utils.oauth_url(client_id = user.id, scopes=('bot',))})",
timestamp=ctx.message.created_at,
)
embed.add_field(name="Author", value=f"{ctx.author} (ID: {ctx.author.id})", inline=False)
embed.add_field(name="Bot", value=f"{user} (ID: {user.id})", inline=False)
embed.set_footer(text=ctx.author.id)
embed.set_author(name=user.id, icon_url=user.display_avatar.with_format("png"))
jdjg = self.bot.get_user(168422909482762240)
benitz = self.bot.get_user(529499034495483926)
await self.bot.get_channel(816807453215424573).send(content=f"{jdjg.mention} {benitz.mention}", embed=embed)
await ctx.reply(
f"It appears adding your bot worked. \nIf you leave your bot will be kicked, unless you have an alt there, a friend, etc. \n(It will be kicked to prevent raiding and taking up guild space if you leave). \nYour bot will be checked out. {jdjg} will then determine if your bot is good to add to the guild. Make sure to open your Dms to JDJG, so he can dm you about the bot being added. \nIf you don't add him, your bot will be denied."
)
@commands.command(
brief="a command that takes a url and sees if it's an image (requires embed permissions at the moment)."
)
async def image_check(self, ctx):
await ctx.send(
"Please wait for discord to edit your message, if it does error about not a valid image, please send a screenshot of your usage and the bot's message."
)
await asyncio.sleep(5)
images = list(filter(lambda e: e.type == "image", ctx.message.embeds))
if not images or not ctx.message.embeds:
return await ctx.send(
"you need to pass a url with an image, if you did, then please run again. This is a discord issue, and I do not want to wait for discord to change its message."
)
await ctx.send(f"You have {len(images)} / {len(ctx.message.embeds)} links that are valid images.")
@commands.command(brief="Gives info on npm packages")
async def npm(self, ctx, *, args=None):
if args:
npm_response = await self.bot.session.get(f"https://registry.npmjs.com/{args}")
if npm_response.ok:
npm_response = await npm_response.json()
data = utils.get_required_npm(npm_response)
await ctx.send(embed=utils.npm_create_embed(data))
else:
await ctx.send(
f"Could not find package **{args}** on npm.", allowed_mentions=discord.AllowedMentions.none()
)
else:
await ctx.send("Please look for a library to get the info of.")
@commands.cooldown(1, 30, BucketType.user)
@commands.command(
brief="runs some code in a sandbox(based on Soos's Run command)", aliases=["eval", "run", "sandbox"]
)
async def console(self, ctx, *, code: codeblock_converter = None):
if not code:
return await ctx.send("You need to give me some code to use, otherwise I can not determine what it is.")
if not code.language:
return await ctx.send("You Must provide a language to use")
if not code.content:
return await ctx.send("No code provided")
tio = await async_tio.Tio(session=self.bot.session)
output = await tio.execute(f"{code.content}", language=f"{code.language}")
text_returned = (
f"```{code.language}\n{output}```"
if len(f"{output}") < 200
else await utils.post(self.bot, code=f"{output}")
)
embed = discord.Embed(
title=f"Your code exited with code {output.exit_status}", description=f"{text_returned}", color=242424
)
embed.set_author(name=f"{ctx.author}", icon_url=ctx.author.display_avatar.url)
embed.set_footer(text="Powered by Tio.run")
await ctx.send(content="I executed your code in a sandbox", embed=embed)
async def setup(bot):
await bot.add_cog(Info(bot))
await bot.add_cog(DevTools(bot))
|
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'nonassocINleft+-left*/rightUMINUSrightUPLUSright^DEL EQUATION FLOAT IMPORT IN RAWSTR RESET SI SOLVE STRING UNIVARIATE_FN VARIABLESstart : statement\n | command \';\'\n | command \n |\n command : DEL STRING\n | VARIABLES\n | IMPORT RAWSTR \n | RESET\n | SOLVE to_solve\n | EQUATION RAWSTRto_solve : STRING\n | to_solve \',\' STRING\n statement : STRING "=" expression\n | STRING "=" expression \';\'\n statement : expression\n | expression \';\'\n expression : expression \'+\' expression\n | expression \'-\' expression\n | expression \'*\' expression\n | expression \'/\' expression\n | expression \'^\' expression\n | expression IN expression \n | expression IN SIexpression : \'-\' expression %prec UMINUSexpression : \'+\' expression %prec UPLUSexpression : \'(\' expression \')\'expression : FLOATexpression : STRINGexpression : UNIVARIATE_FN \'(\' expression \')\' '
_lr_action_items = {'$end':([0,1,2,3,4,5,7,9,15,17,19,26,27,28,29,30,31,32,33,36,37,38,39,40,41,42,43,45,47,48,49,],[-4,0,-1,-3,-28,-15,-6,-8,-27,-2,-16,-5,-7,-9,-11,-10,-25,-28,-24,-13,-17,-18,-19,-20,-21,-22,-23,-26,-14,-12,-29,]),'STRING':([0,6,10,12,13,14,18,20,21,22,23,24,25,35,44,],[4,26,29,32,32,32,32,32,32,32,32,32,32,32,48,]),'DEL':([0,],[6,]),'VARIABLES':([0,],[7,]),'IMPORT':([0,],[8,]),'RESET':([0,],[9,]),'SOLVE':([0,],[10,]),'EQUATION':([0,],[11,]),'-':([0,4,5,12,13,14,15,18,20,21,22,23,24,25,31,32,33,34,35,36,37,38,39,40,41,42,43,45,46,49,],[13,-28,21,13,13,13,-27,13,13,13,13,13,13,13,-25,-28,-24,21,13,21,-17,-18,-19,-20,-21,21,-23,-26,21,-29,]),'+':([0,4,5,12,13,14,15,18,20,21,22,23,24,25,31,32,33,34,35,36,37,38,39,40,41,42,43,45,46,49,],[12,-28,20,12,12,12,-27,12,12,12,12,12,12,12,-25,-28,-24,20,12,20,-17,-18,-19,-20,-21,20,-23,-26,20,-29,]),'(':([0,12,13,14,16,18,20,21,22,23,24,25,35,],[14,14,14,14,35,14,14,14,14,14,14,14,14,]),'FLOAT':([0,12,13,14,18,20,21,22,23,24,25,35,],[15,15,15,15,15,15,15,15,15,15,15,15,]),'UNIVARIATE_FN':([0,12,13,14,18,20,21,22,23,24,25,35,],[16,16,16,16,16,16,16,16,16,16,16,16,]),';':([3,4,5,7,9,15,26,27,28,29,30,31,32,33,36,37,38,39,40,41,42,43,45,48,49,],[17,-28,19,-6,-8,-27,-5,-7,-9,-11,-10,-25,-28,-24,47,-17,-18,-19,-20,-21,-22,-23,-26,-12,-29,]),'=':([4,],[18,]),'*':([4,5,15,31,32,33,34,36,37,38,39,40,41,42,43,45,46,49,],[-28,22,-27,-25,-28,-24,22,22,22,22,-19,-20,-21,22,-23,-26,22,-29,]),'/':([4,5,15,31,32,33,34,36,37,38,39,40,41,42,43,45,46,49,],[-28,23,-27,-25,-28,-24,23,23,23,23,-19,-20,-21,23,-23,-26,23,-29,]),'^':([4,5,15,31,32,33,34,36,37,38,39,40,41,42,43,45,46,49,],[-28,24,-27,24,-28,24,24,24,24,24,24,24,24,24,-23,-26,24,-29,]),'IN':([4,5,15,31,32,33,34,36,37,38,39,40,41,42,43,45,46,49,],[-28,25,-27,-25,-28,-24,25,25,-17,-18,-19,-20,-21,25,-23,-26,25,-29,]),'RAWSTR':([8,11,],[27,30,]),')':([15,31,32,33,34,37,38,39,40,41,42,43,45,46,49,],[-27,-25,-28,-24,45,-17,-18,-19,-20,-21,-22,-23,-26,49,-29,]),'SI':([25,],[43,]),',':([28,29,48,],[44,-11,-12,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'start':([0,],[1,]),'statement':([0,],[2,]),'command':([0,],[3,]),'expression':([0,12,13,14,18,20,21,22,23,24,25,35,],[5,31,33,34,36,37,38,39,40,41,42,46,]),'to_solve':([10,],[28,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> start","S'",1,None,None,None),
('start -> statement','start',1,'p_start_statement','parsing.py',86),
('start -> command ;','start',2,'p_start_statement','parsing.py',87),
('start -> command','start',1,'p_start_statement','parsing.py',88),
('start -> <empty>','start',0,'p_start_statement','parsing.py',89),
('command -> DEL STRING','command',2,'p_start_command','parsing.py',95),
('command -> VARIABLES','command',1,'p_start_command','parsing.py',96),
('command -> IMPORT RAWSTR','command',2,'p_start_command','parsing.py',97),
('command -> RESET','command',1,'p_start_command','parsing.py',98),
('command -> SOLVE to_solve','command',2,'p_start_command','parsing.py',99),
('command -> EQUATION RAWSTR','command',2,'p_start_command','parsing.py',100),
('to_solve -> STRING','to_solve',1,'p_to_solve','parsing.py',128),
('to_solve -> to_solve , STRING','to_solve',3,'p_to_solve','parsing.py',129),
('statement -> STRING = expression','statement',3,'p_statement_assign','parsing.py',140),
('statement -> STRING = expression ;','statement',4,'p_statement_assign','parsing.py',141),
('statement -> expression','statement',1,'p_statement_expr','parsing.py',150),
('statement -> expression ;','statement',2,'p_statement_expr','parsing.py',151),
('expression -> expression + expression','expression',3,'p_expression_binop','parsing.py',159),
('expression -> expression - expression','expression',3,'p_expression_binop','parsing.py',160),
('expression -> expression * expression','expression',3,'p_expression_binop','parsing.py',161),
('expression -> expression / expression','expression',3,'p_expression_binop','parsing.py',162),
('expression -> expression ^ expression','expression',3,'p_expression_binop','parsing.py',163),
('expression -> expression IN expression','expression',3,'p_expression_binop','parsing.py',164),
('expression -> expression IN SI','expression',3,'p_expression_binop','parsing.py',165),
('expression -> - expression','expression',2,'p_expression_uminus','parsing.py',199),
('expression -> + expression','expression',2,'p_expression_uplus','parsing.py',204),
('expression -> ( expression )','expression',3,'p_expression_group','parsing.py',209),
('expression -> FLOAT','expression',1,'p_expression_number','parsing.py',214),
('expression -> STRING','expression',1,'p_expression_name','parsing.py',226),
('expression -> UNIVARIATE_FN ( expression )','expression',4,'p_expression_func','parsing.py',244),
]
|
import numpy as np
from tqdm import tqdm
def angle_to_state(angle):
return int(30 * ((angle + np.pi) / (2 * np.pi) % 1)) # Discretization of the angle space
def vel(theta, theta_0=0, theta_dead=np.pi / 12):
return 1 - np.exp(-(theta - theta_0) ** 2 / theta_dead)
def rew(theta, theta_0=0, theta_dead=np.pi / 12):
return vel(theta, theta_0, theta_dead) * np.cos(theta)
random_ys = []
for episode in tqdm(range(100), desc='Running: Random agent on open sea task'): # run for 500 episodes
angle = 0 # always start with angle 0
y = 0
for i in range(200):
a = np.random.choice(range(2)) # Sample a random action
out = [-0.1, 0.1][a] # Get the change in angle as a result of the selected angle
y += rew(angle + out)
# Update the angle
angle += out
random_ys.append(y)
Q = np.zeros((30, 2)) # Initialization of the Q-values with zeros
# There are 30 angle states and 2 actions
rho = 0 # Initialize the average reward to 0
td_ys = []
for episode in tqdm(range(500), desc='Running: Train agent on open sea task'): # run for 500 episodes
angle = 0 # always start with angle 0
y = 0
for i in range(200):
state = angle_to_state(angle)
p = np.exp(Q[state]) / np.sum(np.exp(Q[state])) # Action selection using softmax
a = np.random.choice(range(2), p=p) # Sample the action from the softmax distribution
out = [-0.1, 0.1][a] # Get the change in angle as a result of the selected angle
new_state = angle_to_state(angle + out)
y += rew(angle + out)
# Calculate the prediction error
delta = rew(angle + out) - rho + Q[new_state].max() - Q[state, a]
# Update the average reward
rho += 0.1 * (rew(angle + out) - rho)
# Update the Q-value
Q[state, a] += 0.1 * delta
# Update the angle
angle += out
td_ys.append(y)
random_mean = np.mean(random_ys[-100:])
random_std = np.std(random_ys[-100:])
td_mean = np.mean(td_ys[-100:])
td_stq = np.std(td_ys[-100:])
print('Results from last 100 episodes')
print('| ===== agent ===== | ===== mean ===== | ===== std ===== |')
print(f'{"| Random":<20}| {random_mean:<17.2f}| {random_std:<16.2f}|')
print(f'{"| Trained":<20}| {td_mean:<17.2f}| {td_stq:<16.2f}|')
|
import matplotlib.pyplot as plt
import networkx as nx
def graph(tmp_graph):
"""Plots networkx graph."""
node_list = [
tmp_graph.nodes[idx]['label']
for idx in range(len(tmp_graph.nodes))]
nx.draw_networkx(tmp_graph)
print(dict(zip(range(len(node_list)), node_list)))
def similarity_matrix(matrix):
"""Plots similarity matrix."""
fig, ax = plt.subplots()
plt.imshow(matrix,)
plt.xlabel('Macromolecules', fontsize=18)
plt.ylabel('Macromolecules', fontsize=18)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.locator_params(axis='both', nbins=6)
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=16)
cbar.set_label('Similarity', size=18)
plt.show()
|
import turtle as t
import random
te = t. Turtle()
te. shape("turtle") #์
๋น ๊ฑฐ๋ถ์ด(๋นจ๊ฐ์)
te. color("red")
te. speed(0)
te. up()
te. goto(0, 200)
ts = t. Turtle() #๋จน์ด(์ด๋ก์ ๋๊ทธ๋ผ๋ฏธ)
ts. shape("circle")
ts. color("green")
ts. speed(0)
ts. up()
ts. goto(0, -200)
def turn_right():
t. seth(0)
def turn_up():
t. seth(90)
def turn_left():
t. seth(180)
def turn_down():
t. seth(270)
def play():
t. fd(10)
ang = te.towards(t. pos())
te. seth(ang)
te. fd(9)
if t. distance(ts) < 12:
star_x =
|
from output.models.nist_data.atomic.positive_integer.schema_instance.nistschema_sv_iv_atomic_positive_integer_min_exclusive_5_xsd.nistschema_sv_iv_atomic_positive_integer_min_exclusive_5 import NistschemaSvIvAtomicPositiveIntegerMinExclusive5
__all__ = [
"NistschemaSvIvAtomicPositiveIntegerMinExclusive5",
]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 23 15:13:36 2021
@author: RWint
"""
import pandas as pd
import warnings
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
def preproc_exprs_matrix(exprs_matrix, exprs_percentile):
"""Accepts expression table and filters the genes in the top and bottom X percentile of expression """
####---------------quick detect whether table format is csv or tsv
sample = pd.read_table(exprs_matrix, sep='\t', nrows=3)
if sample.shape[1]>1:
df_exprs = pd.read_table(exprs_matrix, sep='\t' )
else:
df_exprs = pd.read_table(exprs_matrix, sep=',')
del sample
#Checkpoint: expression table must have at least 2 columns with seqs IDs and their expression values
print(df_exprs.shape, '\n\n')
assert df_exprs.shape[1]>1,'SizeError: Expression table must have at least 2 columns.'
#retain first 2 cols and drop unused to reduce space
cols = df_exprs.columns
df_exprs= df_exprs[cols[:2]]
#ensure that the ID and Expres columns are correct data types (1st column is type string and 2nd float)
df_exprs[cols[0]]= df_exprs[cols[0]].astype(str)
df_exprs[cols[1]] = df_exprs[cols[1]].astype(float)
#1. sort values by exprs. Assume that expression is the 2nd column
df_exprs.sort_values(by=cols[1], ascending=True)
#remove zero-valued expression or NaN
df_exprs=df_exprs[ df_exprs[ cols[1] ]!=0.0].dropna()
#2. ##determine training quantiles cut-off
##choose top_10 and bottom exprs_10
lower = df_exprs[cols[1] ].quantile(exprs_percentile)
upper = df_exprs[cols[1] ].quantile(1-exprs_percentile)
#3. extract top and bottom percentile
bottom = df_exprs[df_exprs[cols[1]]<lower]
top= df_exprs[df_exprs[cols[1]]>upper]
top['Exprs_Level'] = 'High'
bottom['Exprs_Level'] = 'Low'
#combine the top and bottom exprs
trunc_exprs= pd.concat([top,bottom], axis=0)
return trunc_exprs
if __name__=='__main__':
preproc_exprs_matrix(exprs_matrix, sep)
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os
this_dir = os.path.dirname(__file__)
readme_filename = os.path.join(this_dir, 'README.md')
requirements_filename = os.path.join(this_dir, 'requirements.txt')
PACKAGE_NAME = 'facebook_business'
PACKAGE_VERSION = '3.2.7'
PACKAGE_AUTHOR = 'Facebook'
PACKAGE_AUTHOR_EMAIL = ''
PACKAGE_URL = 'https://github.com/facebook/facebook-python-business-sdk'
PACKAGE_DOWNLOAD_URL = \
'https://github.com/facebook/facebook-python-business-sdk/tarball/' + PACKAGE_VERSION
PACKAGES = [
'facebook_business',
'facebook_business.test',
'facebook_business.utils',
'facebook_business.adobjects',
'facebook_business.adobjects.helpers',
]
PACKAGE_DATA = {
'facebook_business': ['*.crt'],
'facebook_business.test': ['*.jpg']
}
PACKAGE_LICENSE = 'LICENSE.txt'
PACKAGE_DESCRIPTION = 'Facebook Business SDK'
with open(readme_filename) as f:
PACKAGE_LONG_DESCRIPTION = f.read()
with open(requirements_filename) as f:
PACKAGE_INSTALL_REQUIRES = [line[:-1] for line in f]
setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
author=PACKAGE_AUTHOR,
author_email=PACKAGE_AUTHOR_EMAIL,
url=PACKAGE_URL,
download_url=PACKAGE_DOWNLOAD_URL,
packages=PACKAGES,
package_data=PACKAGE_DATA,
license=PACKAGE_LICENSE,
description=PACKAGE_DESCRIPTION,
long_description=PACKAGE_LONG_DESCRIPTION,
install_requires=PACKAGE_INSTALL_REQUIRES,
)
|
# -*- coding: utf-8 -*-
from typing import Dict
import os
import pkg_resources
from bag.design import Module
yaml_file = pkg_resources.resource_filename(__name__,
os.path.join('netlist_info',
'clk_invamp_diff_reset_logic.yaml'))
# noinspection PyPep8Naming
class bag_analog_ec__clk_invamp_diff_reset_logic(Module):
"""Module for library bag_analog_ec cell clk_invamp_diff_reset_logic.
Fill in high level description here.
"""
def __init__(self, bag_config, parent=None, prj=None, **kwargs):
Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs)
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
return dict(
flop_info='flop static master library/cell info.',
inv_info='inverter static master library/cell info.',
)
def design(self, flop_info, inv_info):
for inst_name, clk, in_name, out_name in [('XFFB0', 'clkn', 'rstp0', 'rstn0'),
('XFFB1', 'clkn', 'rstn0', 'noconn'),
('XFFT0', 'clkp', 'rst', 'rstd'),
('XFFT1', 'clkp', 'rstd', 'rstp0'), ]:
self.replace_instance_master(inst_name, flop_info[0], flop_info[1], static=True)
self.reconnect_instance_terminal(inst_name, 'VDD', 'VDD')
self.reconnect_instance_terminal(inst_name, 'VSS', 'VSS')
self.reconnect_instance_terminal(inst_name, 'CLK', clk)
self.reconnect_instance_terminal(inst_name, 'I', in_name)
self.reconnect_instance_terminal(inst_name, 'O', out_name)
for inst_name, in_name, out_name in [('XINVB0', 'rstn0', 'rstnb'),
('XINVB1', 'rstnb', 'rstn'),
('XINVT0', 'rstp0', 'rstpb'),
('XINVT1', 'rstpb', 'rstp'), ]:
self.replace_instance_master(inst_name, inv_info[0], inv_info[1], static=True)
self.reconnect_instance_terminal(inst_name, 'VDD', 'VDD')
self.reconnect_instance_terminal(inst_name, 'VSS', 'VSS')
self.reconnect_instance_terminal(inst_name, 'I', in_name)
self.reconnect_instance_terminal(inst_name, 'O', out_name)
|
#
# soaplib - Copyright (C) Soaplib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import sys
import urllib
import soaplib
from lxml import etree
def create_relates_to_header(relatesTo, attrs={}):
'''Creates a 'relatesTo' header for async callbacks'''
relatesToElement = etree.Element(
'{%s}RelatesTo' % namespaces.ns_wsa)
for k, v in attrs.items():
relatesToElement.set(k, v)
relatesToElement.text = relatesTo
return relatesToElement
def create_callback_info_headers(message_id, reply_to):
'''Creates MessageId and ReplyTo headers for initiating an
async function'''
message_id = etree.Element('{%s}MessageID' % namespaces.ns_wsa)
message_id.text = message_id
reply_to = etree.Element('{%s}ReplyTo' % namespaces.ns_wsa)
address = etree.SubElement(reply_to, '{%s}Address' % namespaces.ns_wsa)
address.text = reply_to
return message_id, reply_to
def get_callback_info(request):
'''
Retrieves the messageId and replyToAddress from the message header.
This is used for async calls.
'''
message_id = None
reply_to_address = None
header = request.soap_req_header
if header:
headers = header.getchildren()
for header in headers:
if header.tag.lower().endswith("messageid"):
message_id = header.text
if header.tag.lower().find("replyto") != -1:
replyToElems = header.getchildren()
for replyTo in replyToElems:
if replyTo.tag.lower().endswith("address"):
reply_to_address = replyTo.text
return message_id, reply_to_address
def get_relates_to_info(request):
'''Retrieves the relatesTo header. This is used for callbacks'''
header = request.soap_req_header
if header:
headers = header.getchildren()
for header in headers:
if header.tag.lower().find('relatesto') != -1:
return header.text
def split_url(url):
'''Splits a url into (uri_scheme, host[:port], path)'''
scheme, remainder = urllib.splittype(url)
host, path = urllib.splithost(remainder)
return scheme.lower(), host, path
def reconstruct_url(environ):
'''
Rebuilds the calling url from values found in the
environment.
This algorithm was found via PEP 333, the wsgi spec and
contributed by Ian Bicking.
'''
url = environ['wsgi.url_scheme'] + '://'
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
if (urllib.quote(environ.get('SCRIPT_NAME', '')) == '/' and
urllib.quote(environ.get('PATH_INFO', ''))[0:1] == '/'):
#skip this if it is only a slash
pass
elif urllib.quote(environ.get('SCRIPT_NAME', ''))[0:2] == '//':
url += urllib.quote(environ.get('SCRIPT_NAME', ''))[1:]
else:
url += urllib.quote(environ.get('SCRIPT_NAME', ''))
url += urllib.quote(environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
def check_pyversion(*minversion):
return sys.version_info[:3] >= minversion
|
# Erica Brophy
# BMI_503
# 09/28/19
# Calculator Project 1
import sys
class MyCalculator:
# define functions to parse equation based on * and / > + and -
import re
calc_input = input("Enter your equation that you would like the calculator to solve: \n")
calc_list = re.findall("[a-zA-Z]|[\d\.]+|[^a-zA-Z0-9]|[\t]+", calc_input)
#create new lists for numbers and operators
@staticmethod
def num_split(calc_list):
num = []
for i in calc_list[0:len(calc_list):2]:
element = float(i)
num.append(element)
return num
#create new lists for operators
@staticmethod
def op_split(calc_list):
operator = []
for n in calc_list[1:len(calc_list)-1:2]:
operator.append(n)
return operator
#terminates the program if user types end
@staticmethod
def terminate():
calc_list = MyCalculator.calc_list
if calc_list[0] == "E" or "e":
print("The program has been terminated")
else:
return True
# looks for the index of the operator
@staticmethod
def get_op_index(op_list, f):
return [i for (i, v) in enumerate(op_list) if v == f]
# multiplication function
@staticmethod
def multi(num_split, op_split):
num_multi = []
num = num_split
op_new = op_split
op = MyCalculator.get_op_index(op_new, "*")
for i in op:
num_multi.append(num[i]*num[i+1])
for n in op:
num[n] = num_multi[op.index(n)]
for index in sorted(op, reverse=True):
del num[index + 1]
for index in sorted(op, reverse=True):
del op_new[index]
return op_new, num
#divide function
@staticmethod
def divide(num_split, op_split):
num_divide = []
num = num_split
op_new = op_split
op = MyCalculator.get_op_index(op_new, "/")
for i in op:
num_divide.append(num[i] / num[i + 1])
for n in op:
num[n] = num_divide[op.index(n)]
for d in sorted(op, reverse=True):
del num[d + 1]
for d in sorted(op, reverse=True):
del op_new[d]
return op_new, num
#add function
@staticmethod
def add(num_split, op_split):
num_add = []
num = num_split
op_new = op_split
op = MyCalculator.get_op_index(op_new, "+")
for i in op:
num_add.append(num[i] + num[i + 1])
for n in op:
num[n] = num_add[op.index(n)]
for d in sorted(op, reverse=True):
del num[d + 1]
for d in sorted(op, reverse=True):
del op_new[d]
return op_new, num
#subtraction function
@staticmethod
def subtract(num_split, op_split):
num_subtract = []
num = num_split
op_new = op_split
op = MyCalculator.get_op_index(op_new, "-")
for i in op:
num_subtract.append(num[i] - num[i + 1])
for n in op:
num[n] = num_subtract[op.index(n)]
for d in sorted(op, reverse=True):
del num[d + 1]
for d in sorted(op, reverse=True):
del op_new[d]
return op_new, num
#order of operations in an equation to execute one number
@staticmethod
def evaluate():
num_split = MyCalculator.num_split(MyCalculator.calc_list)
op_split = MyCalculator.op_split(MyCalculator.calc_list)
op_split, num_split = MyCalculator.multi(num_split, op_split)
op_split, num_split = MyCalculator.divide(num_split, op_split)
op_split, num_split = MyCalculator.add(num_split, op_split)
op_split, num_split = MyCalculator.subtract(num_split, op_split)
return num_split[0]
#check for errors
@staticmethod
def error_checks():
calc_list = MyCalculator.calc_list
try:
num = float(calc_list[0])
except ValueError:
print("Error. You entered an operator first.")
sys.exit()
for n in calc_list:
if n == " ":
calc_list.pop(calc_list.index(n))
for n in calc_list[0:len(calc_list)-1:2]:
try:
num = float(n)
except ValueError:
print("Error. You have entered two operators in a row.")
sys.exit()
for n in calc_list[1:len(calc_list):2]:
try:
n == "+" or "-" or "*" or "/"
except ValueError:
sys.exit()
MyCalculator.error_checks()
print("Your answer is " + str(MyCalculator.evaluate()) + ".")
|
email_header = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<title>Traveling Strategy</title>
</head>
"""
email_exterior_bg = """
<body leftmargin="0" marginwidth="0" topmargin="0" marginheight="0" offset="0" style="margin: 0pt auto; padding-left: 90px; padding-right: 90px; background:#F4F7FA;">
<table id="main" width="100%" height="100%" cellpadding="0" cellspacing="0" border="0" bgcolor="#F4F7FA">
<tbody>
<tr>
<td valign="top">
<table class="innermain" cellpadding="0" width="580" cellspacing="0" border="0" bgcolor="#F4F7FA" align="center" style="margin:0 auto; table-layout: fixed;">
<tbody>
<tr>
<td colspan="6">
"""
email_table_one = """
<table class="logo" width="100%" cellpadding="0" cellspacing="0" border="0">
<tbody>
<tr>
<td colspan="2" height="30"></td>
</tr>
<tr>
<td valign="top" align="center">
<a href="https://www.travelingstrategy.com" style="display:inline-block; cursor:pointer; text-align:center;">
<img src="https://media0.giphy.com/media/dYUslDahf6Uw71gH3t/giphy.gif" height="150" width="150" border="0" alt="TravelingStrategy"/>
</a>
</td>
</tr>
<tr>
<td align="center" valign="bottom" colspan="2">
<a href="https://www.travelingstrategy.com" style="display:inline-block; cursor:pointer; text-align:center;">
<img alt="TravelingStrategy" width="300" src="https://d15k2d11r6t6rl.cloudfront.net/public/users/BeeFree/beefree-tsux5brk51t/editor_images/toppng.png"/>
</a>
</td>
</tr>
<tr>
<td colspan="2" height="30"></td>
</tr>
</tbody>
</table>
"""
email_interior_bg = """
<table width="100%" cellpadding="0" cellspacing="0" border="0" bgcolor="#ffffff" style="border-radius: 4px; box-shadow: 0 2px 8px rgba(0,0,0,0.05);">
<tbody>
<tr>
<td height="40"></td>
</tr>
<tr style="font-family: -apple-system,BlinkMacSystemFont,'Segoe UI','Roboto','Oxygen','Ubuntu','Cantarell','Fira Sans','Droid Sans','Helvetica Neue',sans-serif; color:#4E5C6E; font-size:14px; line-height:20px; margin-top:20px;">
<td class="content" colspan="2" valign="top" align="center" style="padding-left:90px; padding-right:90px;">
"""
email_table_two = """
<table width="100%" cellpadding="0" cellspacing="0" border="0" bgcolor="#ffffff">
<tbody>
<tr>
<td height="24"></td>
</tr>
<tr>
<td height="2" bgcolor="#DAE1E9"></td>
</tr>
<tr>
<td height="24"></td>
</tr>
<tr>
<td height="30"></td>
</tr>
<tr>
<td align="center"><span style="color:#48545d;font-size:38px;font-weight:bold;line-height:38px;">LIFE IS AN EXPERIMENT IN WHICH YOU FAIL OR SUCCEED. EXPLORE MORE, EXPECT LEAST.</span></td>
</tr>
<tr>
<td height="30"></td>
</tr>
<tr>
<td height="24"></td>
</tr>
<tr>
<td height="2" bgcolor="#DAE1E9"></td>
</tr>
<tr>
<td height="24"></td>
</tr>
<tr>
<td align="center"><span style="color:#a2a2a2;font-size:10px;line-height:24px;">Here is one of the events on your interest list.</span></td>
</tr>
<tr>
<td height="24"></td>
</tr>
"""
email_table_three = """
<tr>
<td height="24"></td>
</tr>
<tr>
<td height="5" bgcolor="#DAE1E9"></td>
</tr>
</tbody>
</table>
<table id="promo" width="100%" cellpadding="0" cellspacing="0" border="0">
<tbody>
<tr>
<td colspan="2"></td>
</tr>
<tr>
<td colspan="2" height="20"></td>
</tr>
<tr>
<td height="24"></td>
</tr>
<tr>
<td valign="top" width="50%" align="right">
<img style="display:inline-block;margin-right:10px;" src="https://d15k2d11r6t6rl.cloudfront.net/public/users/BeeFree/beefree-tsux5brk51t/editor_images/giphy2_1.gif" height="180" width="200" border="0" alt="giphy"/>
</td>
<td valign="top">
<p style="color:#a2a2a2; font-size:25px; line-height:25px; font-style:italic; font-weight:bold; display:inline-block; margin-left:5px; margin-top:30px;">GO DISCOVER NEW EVENTS FOR YOUR
<span style="color:#ff6e61; font-size:25px; line-height:25px; font-style:italic; font-weight:bold; display:inline-block;">NEXT DESTINATION</span>
</p>
</td>
</tr>
<tr>
<td colspan="2" height="20"></td>
</tr>
<tr>
<td height="24"></td>
</tr>
</tbody>
</table>
"""
email_table_four = """
<table width="100%" cellpadding="0" cellspacing="0" border="0" bgcolor="#ffffff">
<tbody>
<tr>
<td align="center"><span><a href="https://www.travelingstrategy.com" style="display:block; padding:15px 25px; background-color:#ff6e61; color:#ffffff; border-radius:3px;text-decoration:none;">Visit our website</a></span></td>
</tr>
</tbody>
</table>
"""
email_table_five = """
<table id="promo" width="100%" cellpadding="0" cellspacing="0" border="0" style="margin-top:20px;">
<tbody>
<tr>
<td colspan="2" height="20"></td>
</tr>
<tr>
<td height="60"></td>
</tr>
<tr>
<td colspan="2" align="center"><span style="font-size:14px; font-weight:500; margin-bottom:10px; color:#7E8A98; font-family: -apple-system,BlinkMacSystemFont,'Segoe UI','Roboto','Oxygen','Ubuntu','Cantarell','Fira Sans','Droid Sans','Helvetica Neue',sans-serif;">Stay on the lookout for our new TravelingStrategy App</span></td>
</tr>
<tr>
<td colspan="2" height="20"></td>
</tr>
<tr>
<td valign="top" width="50%" align="right">
<a href="#" style="display:inline-block;margin-right:10px;">
<img src="https://i.imgur.com/vtJsCmN.png" height="40" border="0" alt="TravelingStrategy iOS mobile app"/>
</a>
</td>
<td valign="top">
<a href="#" style="display:inline-block;margin-left:5px;" >
<img src="https://i.imgur.com/maleUl2.png" height="40" border="0" alt="TravelingStrategy Android mobile app"/>
</a>
</td>
</tr>
<tr>
<td colspan="2" height="20"></td>
</tr>
</tbody>
</table>
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tbody>
<tr>
<td height="50"></td>
</tr>
</tbody>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
</tbody>
</table>
"""
email_footer = """
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tbody>
<tr>
<td height="10">ย </td>
</tr>
<tr>
<td valign="top" align="center">
<a href="https://www.travelingstrategy.com" style="display:inline-block; cursor:pointer; text-align:center;"><img src="https://travelingstrategy.com/static/media/logo.541e4d6e.png" height="54" width="54" border="0" alt="TravelingStrategy"/></a>
</td>
</tr>
<tr>
<td valign="top" align="center"><span style="font-family: -apple-system,BlinkMacSystemFont,'Segoe UI','Roboto','Oxygen','Ubuntu','Cantarell','Fira Sans','Droid Sans','Helvetica Neue',sans-serif; color:#9EB0C9; font-size:10px;">ยฉ<a href="https://www.travelingstrategy.com/" target="_blank" style="color:#9EB0C9 !important; text-decoration:none;">TravelingStrategy</a>2020</span></td>
</tr>
<tr>
<td height="50"></td>
</tr>
</tbody>
</table>
</td>
</tr>
</tbody>
</table>
</body>
</html>
""" |
# Standard Library
import logging
# Third-Party
import pydf
from rest_framework_json_api.filters import OrderingFilter
from rest_framework_json_api.django_filters import DjangoFilterBackend
from django_fsm import TransitionNotAllowed
from dry_rest_permissions.generics import DRYPermissions
from rest_framework import status
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.permissions import AllowAny
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
# Django
from django.core.files.base import ContentFile
from django.db.models import Sum, Q, Avg
from django.template.loader import render_to_string
from django.utils.text import slugify
# Local
from .filtersets import EntryFilterset
from .filtersets import SessionFilterset
from .models import Contest
from .models import Entry
from .models import Session
from .renderers import PDFRenderer
from .renderers import XLSXRenderer
from .responders import PDFResponse
from .responders import XLSXResponse
from .serializers import ContestSerializer
from .serializers import EntrySerializer
from .serializers import SessionSerializer
# from .filtersets import AssignmentFilterset
from .models import Assignment
from .serializers import AssignmentSerializer
from .serializers import RepertorySerializer
log = logging.getLogger(__name__)
from rest_framework.negotiation import BaseContentNegotiation
from .models import Repertory
class IgnoreClientContentNegotiation(BaseContentNegotiation):
def select_parser(self, request, parsers):
"""
Select the first parser in the `.parser_classes` list.
"""
return parsers[0]
def select_renderer(self, request, renderers, format_suffix):
"""
Select the first renderer in the `.renderer_classes` list.
"""
return (renderers[0], renderers[0].media_type)
class AssignmentViewSet(viewsets.ModelViewSet):
queryset = Assignment.objects.select_related(
# 'user',
# 'convention',
).prefetch_related(
).order_by('id')
serializer_class = AssignmentSerializer
# filterset_class = AssignmentFilterset
filter_backends = [
DjangoFilterBackend,
]
permission_classes = [
DRYPermissions,
]
resource_name = "assignment"
# @action(methods=['post'], detail=True)
# def activate(self, request, pk=None, **kwargs):
# object = self.get_object()
# try:
# object.activate(by=self.request.user)
# except TransitionNotAllowed:
# return Response(
# {'status': 'Transition conditions not met.'},
# status=status.HTTP_400_BAD_REQUEST,
# )
# object.save()
# serializer = self.get_serializer(object)
# return Response(serializer.data)
# @action(methods=['post'], detail=True)
# def deactivate(self, request, pk=None, **kwargs):
# object = self.get_object()
# try:
# object.deactivate(by=self.request.user)
# except TransitionNotAllowed:
# return Response(
# {'status': 'Transition conditions not met.'},
# status=status.HTTP_400_BAD_REQUEST,
# )
# object.save()
# serializer = self.get_serializer(object)
# return Response(serializer.data)
class ContestViewSet(viewsets.ModelViewSet):
queryset = Contest.objects.select_related(
'session',
# 'award',
).prefetch_related(
).order_by('id')
serializer_class = ContestSerializer
filterset_class = None
filter_backends = [
DjangoFilterBackend,
]
permission_classes = [
DRYPermissions,
]
resource_name = "contest"
# @action(methods=['post'], detail=True)
# def include(self, request, pk=None, **kwargs):
# object = self.get_object()
# try:
# object.include(by=self.request.user)
# except TransitionNotAllowed:
# return Response(
# {'status': 'Transition conditions not met.'},
# status=status.HTTP_400_BAD_REQUEST,
# )
# object.save()
# serializer = self.get_serializer(object)
# return Response(serializer.data)
# @action(methods=['post'], detail=True)
# def exclude(self, request, pk=None, **kwargs):
# object = self.get_object()
# try:
# object.exclude(by=self.request.user)
# except TransitionNotAllowed:
# return Response(
# {'status': 'Transition conditions not met.'},
# status=status.HTTP_400_BAD_REQUEST,
# )
# object.save()
# serializer = self.get_serializer(object)
# return Response(serializer.data)
class EntryViewSet(viewsets.ModelViewSet):
queryset = Entry.objects.select_related(
'session',
# 'group',
).prefetch_related(
'statelogs',
'owners',
).order_by('id')
serializer_class = EntrySerializer
filterset_class = EntryFilterset
filter_backends = [
DjangoFilterBackend,
]
permission_classes = [
DRYPermissions,
]
resource_name = "entry"
@action(methods=['post'], detail=True)
def build(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.build(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Transition conditions not met.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def invite(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.invite(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Transition conditions not met.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def withdraw(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.withdraw(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Transition conditions not met.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def submit(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.submit(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Transition conditions not met.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def approve(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.approve(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Transition conditions not met.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
class RepertoryViewSet(viewsets.ModelViewSet):
queryset = Repertory.objects.select_related(
# 'group',
# 'chart',
).prefetch_related(
'statelogs',
).order_by('id')
serializer_class = RepertorySerializer
filter_backends = [
DjangoFilterBackend,
# RepertoryFilterBackend,
]
permission_classes = [
DRYPermissions,
]
resource_name = "repertory"
# @action(methods=['post'], detail=True)
# def activate(self, request, pk=None, **kwargs):
# object = self.get_object()
# try:
# object.activate(by=self.request.user)
# except TransitionNotAllowed:
# return Response(
# {'status': 'Transition conditions not met.'},
# status=status.HTTP_400_BAD_REQUEST,
# )
# object.save()
# serializer = self.get_serializer(object)
# return Response(serializer.data)
# @action(methods=['post'], detail=True)
# def deactivate(self, request, pk=None, **kwargs):
# object = self.get_object()
# try:
# object.deactivate(by=self.request.user)
# except TransitionNotAllowed:
# return Response(
# {'status': 'Transition conditions not met.'},
# status=status.HTTP_400_BAD_REQUEST,
# )
# object.save()
# serializer = self.get_serializer(object)
# return Response(serializer.data)
class SessionViewSet(viewsets.ModelViewSet):
queryset = Session.objects.select_related(
# 'convention',
'target',
).prefetch_related(
'owners',
'contests',
'entries',
).order_by('id')
serializer_class = SessionSerializer
filterset_class = SessionFilterset
filter_backends = [
DjangoFilterBackend,
]
permission_classes = [
DRYPermissions,
]
resource_name = "session"
@action(methods=['post'], detail=True)
def build(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.build(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Transition conditions not met.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def open(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.open(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Transition conditions not met.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def close(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.close(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Transition conditions not met.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def verify(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.verify(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Transition conditions not met.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def package(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.package(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Transition conditions not met.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def finish(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.finish(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Transition conditions not met.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(
methods=['get'],
detail=True,
renderer_classes=[XLSXRenderer],
permission_classes=[DRYPermissions],
content_negotiation_class=IgnoreClientContentNegotiation,
)
def legacy(self, request, pk=None):
session = Session.objects.get(pk=pk)
if session.legacy_report:
xlsx = session.legacy_report.file
else:
xlsx = session.get_legacy_report()
file_name = '{0} {1} Session Legacy Report'.format(
# session.convention,
session.get_kind_display(),
)
return XLSXResponse(
xlsx,
file_name=file_name,
status=status.HTTP_200_OK
)
@action(
methods=['get'],
detail=True,
renderer_classes=[XLSXRenderer],
permission_classes=[DRYPermissions],
content_negotiation_class=IgnoreClientContentNegotiation,
)
def drcj(self, request, pk=None):
session = Session.objects.get(pk=pk)
if session.drcj_report:
xlsx = session.drcj_report.file
else:
xlsx = session.get_drcj_report()
file_name = '{0} {1} Session DRCJ Report'.format(
session.convention,
session.get_kind_display(),
)
return XLSXResponse(
xlsx,
file_name=file_name,
status=status.HTTP_200_OK
)
|
from ontobio import OntologyFactory
from ontobio.tsv_expander import expand_tsv
import csv
INPUT = "tests/resources/data_table.tsv"
OUTPUT = "tests/resources/data_table-expanded.tsv"
def test_expand():
factory = OntologyFactory()
ontobj = factory.create("tests/resources/goslim_pombe.json")
expand_tsv(INPUT, ontology=ontobj, outfile=open(OUTPUT,"w"), cols=["term"])
reader = csv.DictReader(open(OUTPUT, "r"), delimiter='\t')
n=0
for row in reader:
if row['term'] == 'GO:0002181':
assert row['term_label'] == 'cytoplasmic translation'
n += 1
if row['term'] == 'FAKE:123':
assert row['term_label'] == ''
n += 1
assert n == 2
|
# Copyright 2017 Brocade Communications Systems, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import struct
import pcapng.util as util
from pcapng.util import to_bytes, str_to_bytes
def test_block32_pad_len():
assert 0 == util.block32_ceil_num_bytes(0)
assert 4 == util.block32_ceil_num_bytes(1)
assert 4 == util.block32_ceil_num_bytes(2)
assert 4 == util.block32_ceil_num_bytes(3)
assert 4 == util.block32_ceil_num_bytes(4)
assert 8 == util.block32_ceil_num_bytes(5)
assert 8 == util.block32_ceil_num_bytes(6)
assert 8 == util.block32_ceil_num_bytes(7)
assert 8 == util.block32_ceil_num_bytes(8)
def test_pad_to_len():
with pytest.raises(AssertionError): util.pad_bytes([1, 2, 3, 4], 3)
with pytest.raises(AssertionError): util.pad_bytes('superlong', 3)
assert to_bytes( 'superlong' + chr(0)*23 ) == util.pad_bytes('superlong', 32)
assert to_bytes( [0, 0, 0, 0] ) == util.pad_bytes([ ], 4)
assert to_bytes( [1, 0, 0, 0] ) == util.pad_bytes([1, ], 4)
assert to_bytes( [1, 2, 0, 0] ) == util.pad_bytes([1, 2], 4)
assert to_bytes( [1, 2, 3, 0] ) == util.pad_bytes([1, 2, 3], 4)
assert to_bytes( [1, 2, 3, 4] ) == util.pad_bytes([1, 2, 3, 4], 4)
assert to_bytes( [9, 9, 9, 9] ) == util.pad_bytes([ ], 4, 9)
assert to_bytes( [1, 9, 9, 9] ) == util.pad_bytes([1, ], 4, 9)
assert to_bytes( [1, 2, 9, 9] ) == util.pad_bytes([1, 2], 4, 9)
assert to_bytes( [1, 2, 3, 9] ) == util.pad_bytes([1, 2, 3], 4, 9)
assert to_bytes( [1, 2, 3, 4] ) == util.pad_bytes([1, 2, 3, 4], 4, 9)
def test_pad_to_block32():
assert to_bytes( [ ] ) == util.block32_pad_bytes([])
assert to_bytes( [1, 0, 0, 0 ] ) == util.block32_pad_bytes([1])
assert to_bytes( [1, 2, 0, 0 ] ) == util.block32_pad_bytes([1, 2])
assert to_bytes( [1, 2, 3, 0 ] ) == util.block32_pad_bytes([1, 2, 3])
assert to_bytes( [1, 2, 3, 4 ] ) == util.block32_pad_bytes([1, 2, 3, 4])
assert to_bytes( [1, 2, 3, 4, 5, 0, 0, 0] ) == util.block32_pad_bytes([1, 2, 3, 4, 5])
assert to_bytes( [1, 2, 3, 4, 5, 6, 0, 0] ) == util.block32_pad_bytes([1, 2, 3, 4, 5, 6])
assert to_bytes( [1, 2, 3, 4, 5, 6, 7, 0] ) == util.block32_pad_bytes([1, 2, 3, 4, 5, 6, 7])
assert to_bytes( [1, 2, 3, 4, 5, 6, 7, 8] ) == util.block32_pad_bytes([1, 2, 3, 4, 5, 6, 7, 8])
util.assert_block32_length([ ])
util.assert_block32_length([1, 2, 3, 4])
util.assert_block32_length([1, 2, 3, 4, 5, 6, 7, 8])
with pytest.raises(AssertionError): util.assert_block32_length([1])
with pytest.raises(AssertionError): util.assert_block32_length([1, 2])
with pytest.raises(AssertionError): util.assert_block32_length([1, 2, 3])
def test_block32_bytes_pack():
def assert_block32_bytes_packing( data_bytes ):
orig = to_bytes( data_bytes )
extra_bytes = to_bytes('dummy-start') + orig + to_bytes('dummy-end')
unpacked, remaining = util.block32_lv_bytes_unpack_rolling(
util.block32_lv_bytes_pack(orig) + extra_bytes )
assert unpacked == orig
assert remaining == extra_bytes
assert_block32_bytes_packing( '' )
assert_block32_bytes_packing( 'a' )
assert_block32_bytes_packing( 'go' )
assert_block32_bytes_packing( 'ray' )
assert_block32_bytes_packing( 'Doh!' )
assert_block32_bytes_packing( 'How do you like me now?' )
for i in range(23):
assert_block32_bytes_packing( range(i) )
def test_block32_labelled_bytes_pack():
block_label = util.curr_time_utc_secs()
def assert_block32_labelled_bytes_packing( data_bytes ):
orig = to_bytes( data_bytes )
extra_bytes = to_bytes('dummy-start') + orig + to_bytes('dummy-end')
label, unpacked, remaining = util.block32_tlv_bytes_unpack_rolling(
util.block32_tlv_bytes_pack(block_label, orig) + extra_bytes )
assert label == block_label
assert unpacked == orig
assert remaining == extra_bytes
assert_block32_labelled_bytes_packing( '' )
assert_block32_labelled_bytes_packing( 'a' )
assert_block32_labelled_bytes_packing( 'go' )
assert_block32_labelled_bytes_packing( 'ray' )
assert_block32_labelled_bytes_packing( 'Doh!' )
assert_block32_labelled_bytes_packing( 'How do you like me now?' )
for i in range(13):
assert_block32_labelled_bytes_packing( range(i) )
#-----------------------------------------------------------------------------
def test_types():
util.assert_type_str('')
util.assert_type_str('a')
util.assert_type_str('abc')
util.assert_type_list( [] )
util.assert_type_list( [1] )
util.assert_type_list( [1,2,3,] )
util.assert_type_dict( {} )
util.assert_type_dict( {'a':1} )
util.assert_type_dict( {'a':1, 'b':2} )
with pytest.raises(AssertionError): util.assert_type_str( None )
with pytest.raises(AssertionError): util.assert_type_str( [1] )
with pytest.raises(AssertionError): util.assert_type_str( {'a':1} )
with pytest.raises(AssertionError): util.assert_type_list( None )
with pytest.raises(AssertionError): util.assert_type_list( 'a' )
with pytest.raises(AssertionError): util.assert_type_list( {'a':1} )
with pytest.raises(AssertionError): util.assert_type_dict( None )
with pytest.raises(AssertionError): util.assert_type_dict( 'a' )
with pytest.raises(AssertionError): util.assert_type_dict( [1] )
def test_uint8():
for ub in range(256):
util.assert_uint8(ub)
with pytest.raises(AssertionError): util.assert_uint8( -1 )
with pytest.raises(AssertionError): util.assert_uint8( 256 )
def test_int8():
for sb in range(-128,127):
util.assert_int8(sb)
with pytest.raises(AssertionError): util.assert_int8( -129 )
with pytest.raises(AssertionError): util.assert_int8( 128 )
def test_bytearray():
util.assert_type_bytearray( bytearray( [1,2,255] ))
with pytest.raises(AssertionError): util.assert_type_bytearray( list( [1,2,255] ) )
with pytest.raises(AssertionError): util.assert_type_bytearray( 'abc' )
def test_to_bytes():
assert 'abc' == to_bytes( 'abc' )
assert 'abc' == to_bytes( [97,98,99] )
if util.is_python2():
assert str( 'abc' ) == to_bytes( 'abc' )
if util.is_python3():
assert bytes( [97,98,99] ) == to_bytes( [97,98,99] )
def test_str_to_bytes():
assert to_bytes( [97,98,99] ) == str_to_bytes( 'abc' )
def test_fibonacci_list():
assert util.fibonacci_list(0) == []
assert util.fibonacci_list(1) == [0]
assert util.fibonacci_list(2) == [0, 1]
assert util.fibonacci_list(3) == [0, 1, 1]
assert util.fibonacci_list(4) == [0, 1, 1, 2]
assert util.fibonacci_list(5) == [0, 1, 1, 2, 3] #todo verify test
assert util.fibonacci_list(6) == [0, 1, 1, 2, 3, 5]
assert util.fibonacci_list(7) == [0, 1, 1, 2, 3, 5, 8]
assert util.fibonacci_list(8) == [0, 1, 1, 2, 3, 5, 8, 13]
assert util.fibonacci_list(9) == [0, 1, 1, 2, 3, 5, 8, 13, 21]
#todo need test fibo_list_signed
def test_assert_rel_equal():
util.assert_rel_equal( 1000, 1001, digits=1 )
util.assert_rel_equal( 1000, 1001, digits=2 )
util.assert_rel_equal( 1000, 1001, digits=2.5 )
with pytest.raises(AssertionError):
util.assert_rel_equal( 1000, 1001, digits=3.5 )
util.assert_rel_equal( 1000, 1001, digits=4 )
util.assert_rel_equal( 1000, 1001, digits=5 )
def test_dict_merge():
assert { "a":1, 'b':2 } == util.dict_merge( {'a':1}, {'b':2} )
assert { "a":1, 'b':2, 'c':3 } == util.dict_merge_all( [ {'a':1}, {'b':2}, {'c':3} ] )
def test_even_odd():
assert util.is_even(2)
assert util.is_even(4)
assert util.is_even(6)
assert util.is_odd(3)
assert util.is_odd(5)
assert util.is_odd(7)
def test_str_to_intvec():
assert util.str_to_intvec('123456') == [12, 34, 56]
assert util.str_to_intvec('420001') == [42, 0, 1]
assert util.str_to_intvec('123456', 3) == [123, 456]
def test_uint64_split32():
def assert_round_trip(orig):
(high32, low32) = util.uint64_split32( orig )
result = util.uint64_join32(high32, low32)
assert result == orig
for x in util.fibonacci_range( pow(2,50) ):
assert_round_trip(x)
def test_xxx():
xx1 = struct.pack( '!hhl', 1, 2, 3 ) # h='short', l='long'
xx2 = struct.unpack( '!hhl', xx1 ) # ! => network byte order (big-endian)
assert xx1 == '\x00\x01\x00\x02\x00\x00\x00\x03'
assert xx2 == ( 1, 2, 3 )
assert '\x00\x00\x00\x00\x00\x00\x00\x05' == struct.pack( '!q', 5 )
assert '\x00\x00\x00\x05' == struct.pack( '!l', 5 )
assert '\x00\x05' == struct.pack( '!h', 5 )
assert 3 == len( [ 1, 2, 3] )
assert (3, 140000) == util.split_float(3.14)
assert (3, 141593) == util.split_float(3.141592654)
assert 'abc' == util.chrList_to_str(['a', 'b', 'c'])
def test_time():
time_tst = 123.456789
util.test_time_utc_set( 123.456789 )
util.assert_rel_equal(time_tst, util.curr_time_utc(), digits=9)
assert abs(123456789 - util.curr_time_utc_micros()) <= 1
assert abs(123456 - util.curr_time_utc_millis()) <= 1
assert abs(123 - util.curr_time_utc_secs()) <= 1
(secs,usecs) = util.curr_utc_timetuple()
assert 123 == secs
assert abs(456789 - usecs) <= 1
util.test_time_utc_unset()
util.test_time_utc_set(123456)
assert '0x0001e240' == util.curr_time_utc_secs_hexstr()
util.test_time_utc_unset()
def test_quot():
assert [0,0,0, 1,1,1, 2,2,2] == map( lambda x:util.quot(x,3), range(9) )
def test_mod():
assert [0,1,2, 0,1,2, 0,1,2] == map( lambda x:util.mod(x,3), range(9) )
def test_take():
def generate_ints(N):
for i in range(N):
yield i
gen_vals = generate_ints(99)
lst_vals = [0,1,2,3,4,5,6,7,8,9]
assert ( [0,1,2,3,4] == util.take( 5, gen_vals )
== util.take( 5, lst_vals ))
|
import util
class test_views:
""" Test accumulate of all kind of views"""
def init(self):
for cmd, shape in util.gen_random_arrays("R", 4, dtype="np.float32"):
cmd = "R = bh.random.RandomState(42); a = %s; " % cmd
for i in range(len(shape)):
yield (cmd, i)
for i in range(len(shape)):
yield (cmd, -i)
@util.add_bh107_cmd
def test_accumulate(self, arg):
(cmd, axis) = arg
cmd += "res = M.add.accumulate(a, axis=%d)" % axis
return cmd
class test_sum:
""" Test reduction of sum(), prod(), any(), and all()"""
def init(self):
for cmd, shape in util.gen_random_arrays("R", 3, dtype="np.float32"):
cmd = "R = bh.random.RandomState(42); a = %s; " % cmd
for op in ["cumsum", "cumprod"]:
for axis in range(len(shape)):
yield (cmd, op, axis)
def test_func(self, arg):
(cmd, op, axis) = arg
cmd += "res = M.%s(a, axis=%d)" % (op, axis)
return cmd
def test_method(self, arg):
(cmd, op, axis) = arg
cmd += "res = a.%s(axis=%d)" % (op, axis)
return cmd
class test_primitives:
def init(self):
for op in ["add", "multiply"]:
yield (op, "np.float64")
yield (op, "np.bool")
@util.add_bh107_cmd
def test_vector(self, arg):
(op, dtype) = arg
cmd = "R = bh.random.RandomState(42); a = R.random_of_dtype(shape=10, dtype=%s, bohrium=BH); " % dtype
cmd += "res = M.%s.accumulate(a)" % op
return cmd
class test_overwrite:
def init(self):
yield None
@util.add_bh107_cmd
def test_vector(self, _):
cmd = """\
a = M.arange(10)
b = a.copy()
M.add.accumulate(a, out=b)
res = b.copy()
del b
"""
return cmd
|
#!/usr/bin/env python
def main():
res = 0
a = 3
b = 2
digsA = 1
digsB = 1
ceilA = 10
ceilB = 10
it = 1
while(it < 1000):
it += 1
b += a;
a += 2*(b-a)
if(a > ceilA):
digsA += 1
ceilA *= 10
if(b > ceilB):
digsB += 1
ceilB *= 10
if(digsA > digsB):
res += 1
print("If you can trust me, the number you are looking for is " + str(res))
main()
|
from shexer.core.class_shexer import ClassShexer
def get_class_shexer(class_instances_target_dict, class_profile_dict):
class_count_dicts = {}
for a_class_key in class_instances_target_dict:
class_count_dicts[a_class_key] = len(class_instances_target_dict[a_class_key])
return ClassShexer(
class_counts_dict=class_count_dicts,
class_profile_dict=class_profile_dict,
class_profile_json_file=None
) |
from .dataPipeline import DataPipeline
from .moleculePipeline import MoleculePipeline
__all__ = ['DataPipeline', 'MoleculePipeline']
|
#!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2015-2017 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
# Test mempool limiting together/eviction with the wallet
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MempoolLimitTest(BitcoinTestFramework):
def __init__(self):
self.txouts = gen_return_txouts()
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir,
["-maxmempool=5",
"-spendzeroconfchange=0",
"-minlimitertxfee=2",
"-limitdescendantcount=25",
"-limitancestorcount=25",
"-limitancestorsize=101",
"-limitdescendantsize=101",
"-debug"]))
self.is_network_split = False
self.sync_all()
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 2)
def run_test(self):
txids = []
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 91)
# create a lot of txns up to but not exceeding the maxmempool
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (2):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[33*i:33*i+33], 33, (i+1)*base_fee)
print(str(self.nodes[0].getmempoolinfo()))
num_txns_in_mempool = self.nodes[0].getmempoolinfo()["size"]
# create another txn that will exceed the maxmempool which should evict some random transaction.
all_txns = self.nodes[0].getrawmempool()
tries = 0
while tries < 10:
i = 2
new_txn = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[33*i:33*i+33], 1, (i+1)*base_fee + Decimal(0.00001*tries)) # Adding tries to the fee changes the transaction (we are reusing the prev UTXOs)
print("newtxns " + str(new_txn[0]))
assert(self.nodes[0].getmempoolinfo()["usage"] < self.nodes[0].getmempoolinfo()["maxmempool"])
# make sure the mempool count did not change
waitFor(10, lambda: num_txns_in_mempool == self.nodes[0].getmempoolinfo()["size"])
# make sure new tx is in the mempool, but since the mempool has a random eviction policy,
# this tx could be the one that was evicted. So retry 10 times to make failures it VERY unlikely
# we have a spurious failure due to ejecting the tx we just added.
if new_txn[0] in self.nodes[0].getrawmempool():
break
tries+=1
if tries >= 10:
assert False, "Newly created tx is repeatedly NOT being put into the mempool"
if __name__ == '__main__':
MempoolLimitTest().main()
def Test():
t = MempoolLimitTest()
# t.drop_to_pdb = True
bitcoinConf = {
"debug": ["blk", "mempool", "net", "req"],
"logtimemicros": 1
}
flags = standardFlags()
t.main(flags, bitcoinConf, None)
|
#!/usr/bin/env python3
# https://codeforces.com/problemset/problem/761/A
# ้ข็ฎๆฏinteral, ไธไธๅฎๆฏไป1ๅผๅง, ๆไปฅไนๅๅ้ไบ; ไฝ่ฟๆฏๅพๆ ่็้ข็ฎ
# 0 0 ไธๅฏ่ฝ, ้็ฌฌไบๆฌก
def f(l):
a,b = l
return (b==a+1 or b==a or b==a-1) and (a+b>0)
l = list(map(int,input().split()))
print('YES' if f(l) else 'NO')
|
"""Example using Bio.SeqIO to load a FASTA file as a dictionary.
An example function (get_accession_num) is defined to demonstrate
a non-trivial naming scheme where the dictionary key is based on
the record identifier.
The first version uses Bio.SeqIO.parse() and loads the entire
FASTA file into memory as a Python dictionary of SeqRecord
objects. This is *not* suitable for large files.
The second version used Bio.SeqIO.index() which is suitable
for FASTA files with millions of records.
See also Bio.SeqIO.index_db() and the examples in the main tutorial.
"""
from __future__ import print_function
from Bio.Alphabet import generic_dna
from Bio import SeqIO
def get_accession_num(seq_record):
accession_atoms = seq_record.id.split('|')
gb_name = accession_atoms[3]
# strip the version info before returning
return gb_name[:-2]
# In Memory
# =========
# This next bit of code uses Bio.SeqIO.parse() to load a FASTA file,
# and then turns it into an in-memory python dictionary.
# This is *not* suitable for FASTA files with millions of entries.
rec_iterator = SeqIO.parse("ls_orchid.fasta", "fasta", generic_dna)
orchid_dict = SeqIO.to_dict(rec_iterator, get_accession_num)
for id_num in orchid_dict:
print('id number: %s' % id_num)
print('description: %s' % orchid_dict[id_num].description)
print('sequence: %s' % orchid_dict[id_num].seq)
# Indexed
# =======
# This next version uses the Bio.SeqIO.index() function which will index
# the FASTA file without loading all the records into memory at once.
# This is suitable for FASTA files with millions of entries.
orchid_dict = SeqIO.index("ls_orchid.fasta", "fasta", generic_dna)
for id_num in orchid_dict:
print('id number: %s' % id_num)
print('description: %s' % orchid_dict[id_num].description)
print('sequence: %s' % orchid_dict[id_num].seq)
|
import os
import numpy
import sysconfig
from Cython.Build import cythonize
from Cython.Distutils import build_ext
from distutils.core import Extension, setup # pylint: disable=import-error,no-name-in-module
def get_ext_filename_without_platform_suffix(filename):
name, ext = os.path.splitext(filename)
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix == ext:
return filename
ext_suffix = ext_suffix.replace(ext, '')
idx = name.find(ext_suffix)
if idx == -1:
return filename
else:
return name[:idx] + ext
class BuildExtWithoutPlatformSuffix(build_ext):
def get_ext_filename(self, ext_name):
filename = super().get_ext_filename(ext_name)
return get_ext_filename_without_platform_suffix(filename)
setup(
name='Cython transformations wrapper',
cmdclass={'build_ext': BuildExtWithoutPlatformSuffix},
ext_modules=cythonize(
Extension(
"transformations",
sources=["transformations.pyx"],
language="c++",
extra_compile_args=["-std=c++14", "-Wno-nullability-completeness"],
include_dirs=[numpy.get_include()],
)
))
|
"""Asymetrically course searching algorithm using Pre-trained MS MARCO models."""
import time
import numpy as np
import torch
from sentence_transformers import SentenceTransformer, util
from sqrl import create_app
from sqrl.models import Campus, Course
def get_courses() -> list[Course]:
"""Returns a list of courses."""
print('Loading courses...')
start_time = time.time()
app = create_app()
with app.app_context():
courses = list(Course.objects.all())
print(
f'Loaded {len(courses)} courses in {time.time() - start_time} seconds.')
return courses
def load_model(model_name: str = 'msmarco-distilbert-base-v4',
) -> SentenceTransformer:
"""Loads the pretrained model."""
print(f'Loading model "{model_name}""...')
start_time = time.time()
model = SentenceTransformer(model_name)
if torch.cuda.is_available():
# Use cuda if available
model = model.to(torch.device('cuda'))
print('Using GPU')
print(f'Loaded model in {time.time() - start_time} seconds.')
return model
# def vectorise_courses(courses: list[Course], model: SentenceTransformer) \
# -> tuple[int, np.ndarray]:
# print('Vectorising courses...')
# start_time = time.time()
# documents = []
# for course in courses:
# br_str = f'Breath Requirements: {course.breadth_categories}'
# year_str = {
# 0: 'zeroth',
# 100: 'first',
# 200: 'second',
# 300: 'third',
# 400: 'fourth'
# }[course.level]
# level_str = f'level {course.level}'
# year_str = f'{year_str} year'
# documents.extend([course.code, course.title, course.description, br_str, level_str, year_str])
# n_tags = len(documents) // len(courses)
# embeddings = model.encode(documents, show_progress_bar=True, convert_to_numpy=True)
# print(f'Vectorised {len(documents)} courses in {time.time() - start_time} seconds.')
# return n_tags, embeddings.reshape((len(courses), n_tags, -1))
def vectorise_courses(courses: list[Course],
model: SentenceTransformer) -> np.ndarray:
CAMPUS_NAMES = {
Campus.ST_GEORGE: 'St George',
Campus.SCARBOROUGH: 'Scarborough',
Campus.MISSISSAUGA: 'Mississauga',
}
print('Vectorising courses...')
start_time = time.time()
documents = []
for course in courses:
campus_str = CAMPUS_NAMES[course.campus]
year_str = {0: 'zeroth', 100: 'first', 200: 'second',
300: 'third', 400: 'fourth', }[course.level]
documents.append(
f'{course.code}, {course.title}, is a {year_str} year course (level '
f'{course.level} course) at the {campus_str} campus. In {course.code} '
f'students will learn: {course.description}.'.lower())
embeddings = model.encode(
documents, show_progress_bar=True, convert_to_numpy=True)
print(
f'Vectorised {len(documents)} courses in {time.time() - start_time} seconds.')
return embeddings
def search_courses(query: str, top_k: int = 10) -> list[Course]:
"""Returns the top k courses that match the query."""
print('Searching courses...')
start_time = time.time()
query_vector = np.array(model.encode([query]))
# query_vector_matrix = np.tile(query_vector, (n_tags, 1))
# scores = []
# tag_scores = []
# for tag_embeddings in course_embeddings:
# x = np.linalg.norm(query_vector_matrix - tag_embeddings, axis=1)
# tag_scores.append(x)
# scores.append(np.mean(x, axis=0))
# top_k_indices = np.argsort(scores)[:top_k]
scores = util.pytorch_cos_sim(query_vector, course_embeddings)[0]
top_results = torch.topk(scores, k=top_k)
print('\n\n======================\n\n')
print('Query:', query)
print(f'Found {top_k} results in {time.time() - start_time} seconds:\n')
for score, index in zip(top_results[0], top_results[1]):
print(courses[index], f'(score: {score:.4f})')
# for i in top_k_indices:
# score = scores[i]
# print(courses[i], f'(score: {score:.4f}, tags: {tag_scores[i]})')
courses = get_courses()
model = load_model()
course_embeddings = vectorise_courses(courses, model)
|
# encoding: utf-8
"""Module to compute salary-based recommendations using 1 - CDF (CDF =
Cumulative Distribution Function).
Design doc : https://docs.google.com/document/d/1WjyjxYOzC0_98ULXVr4nsmGwcDsN2qm-sXuTb__1nkE
The recommendation can be summed up in several steps:
Step 1 : Check for each available salaries, how many offers we would have
access to if we would decrease our salary by X% (X = 1, 2, .., 10)
Step 2 : Compute a maping : salary -> how many offers with -1%, -2%, -X% ...
Then we got two approaches:
Approach 1 (compute_recommendation_cdf):
We select the best recommendation for each salary i.e. the one that produce the best raise
in job offers for a percent decrease in salary.
Returns : [(from_salary=18000, salary_decrease='1percent_salary_decrease'),
(from_salary=19000, salary_decrease='5percent_salary_decrease'),
(from_salary=20000, salary_decrease='1percent_salary_decrease'), ..].
NOTE: '1percent_salary_decrease' means that decreasing the salary of 1 percent is
the best option.
Approach 2 (compute_recommendation_score):
We compute a score for each salary decrease and select the one that maximizes the latter.
The score is : sqrt(delta(O))/delta(S), with 'delta(O)' the variation of number of offers
and 'delta(S)' the respective variation of salary.
Returns : [(from_salary=18000, gained_offers=0.33), (from_salary=19000, gained_offers=0.25),
(from_salary=20000, gained_offers=0.26), ..].
NOTE: (from_salary=18000, gained_offers=0.33) means that from salary 18000, the best
recommendation will increase job offers of +33%.
"""
import collections
import numpy as np
import pandas as pd
_MAX_SALARY_DECREASE_CDF = 10
_RecoScore = collections.namedtuple('RecoScore', ['from_salary', 'gained_offers'])
_RecoCDF = collections.namedtuple('RecoCDF', ['from_salary', 'salary_decrease'])
def compute_recommendation_cdf(table_offers):
"""Approach 1: compute the salary recommendation based on the CDF, it was designed to be
called in a groupby.
Args:
table_offers: pandas Dataframe containing every offers of
a single job group.
Returns:
a list of namedtuple containing each recommendations in a way that is easy
to look-up for value. e.g.:
[(from_salary=18000, salary_decrease='1percent_salary_decrease'),
(from_salary=19000, salary_decrease='5percent_salary_decrease'), ...]
"""
# Step 1.
num_offers_per_salary = _compute_job_offers_salary(table_offers)
# Step 2.
all_percent_decrease = _compute_percent_decrease(
num_offers_per_salary,
max_salary_decrease=_MAX_SALARY_DECREASE_CDF)
# Step 3a.
comparing_reco = _compute_comparing_reco(all_percent_decrease)
# Compute the best recommendation we can get for each salary.
top1reco = comparing_reco[comparing_reco > 0].idxmax(axis=1)
top1reco.fillna('no better alternative', inplace=True)
# Store recommendations in a list of tuple.
top1reco.sort_index(inplace=True, ascending=True)
reco_as_namedtuple = top1reco.loc[top1reco.shift(1) != top1reco].reset_index().apply(
_RecoCDF._make, axis=1)
return reco_as_namedtuple.tolist()
def compute_recommendation_score(table_offers):
"""Approach 2: compute the salary recommendation based on the score sqrt(delta(O))/delta(S),
it was designed to be called in a groupby.
Args:
table_offers: pandas Dataframe containing every offers of a single job group.
Returns:
a list of namedtuple containing each recommendations in a way that is easy
to look-up for value. e.g.: [(from_salary=18000, gained_offers=0.25),
(from_salary=19000, gained_offers=0.15), (from_salary=20000, gained_offers=0.05), ..]
"""
num_offers_with_higher_salary = _compute_job_offers_salary(table_offers)
cumul_offers = num_offers_with_higher_salary.reset_index()
def _scoring(idx):
return _apply_score(cumul_offers, idx)
cumul_offers['gained_offers'] = pd.DataFrame(cumul_offers.reset_index()['index'].map(_scoring))
reco_as_namedtuple = cumul_offers[['annual_minimum_salary', 'gained_offers']].apply(
_RecoScore._make, axis=1)
return reco_as_namedtuple.tolist()
def _compute_job_offers_salary(table_offers):
"""Compute a pandas Series containing the amount of jof offers available
for every salary (cumulative count). It relies on the hypothesis that you
have access to every offers that propose a salary equal or above
your actual salary.
(Step 1)
Args:
table_offers: pandas Dataframe containing every offers of a single job group.
Returns:
Pandas Series containing the amount of job offers (value) by salary (index).
"""
initial_salary = table_offers.copy()
initial_salary.sort_values('annual_minimum_salary', inplace=True)
initial_salary.reset_index(drop=True, inplace=True)
# Cumulative counts.
initial_salary.index.rename('num_offers_with_lower_salary', inplace=True)
initial_salary.reset_index(inplace=True)
initial_salary.set_index('annual_minimum_salary', inplace=True)
initial_salary['num_offers_with_higher_salary'] = (
len(initial_salary) - initial_salary.num_offers_with_lower_salary)
# Necessary for identical salaries.
initial_salary = initial_salary.groupby(initial_salary.index).max()
return initial_salary.num_offers_with_higher_salary
def _compute_percent_decrease(num_offers_per_salary, max_salary_decrease):
"""Compute dataframe with all percent decreases until max_salary_decrease.
(Step 2)
Args:
num_offers_per_salary: Pandas Series containing the amount of job offers (value)
by salary (index)
max_salary_decrease (int.): maximal amount of percent you agree to decrease
your salary from.
Returns:
Pandas dataframe containing the amount of job offers available for each salary decrease
from initial (columns) and for each salary (index).
"""
percent_decreases = ((i + 1) / 100 for i in range(max_salary_decrease))
all_percent_decreases = pd.DataFrame(num_offers_per_salary)
for percent_decrease in percent_decreases:
salary_offers = num_offers_per_salary.copy()
# to get the amount of offers if we would decrease the salary by
# percent_decrease%.
salary_offers.index /= (1 - percent_decrease)
salary_offers.index.name = '%dpercent_decrease' % round(percent_decrease * 100)
salary_offers.name = '%dpercent_salary_decrease' % round(percent_decrease * 100)
percent_decrease_df = pd.DataFrame(salary_offers)
all_percent_decreases = pd.merge(
all_percent_decreases,
percent_decrease_df,
right_index=True,
left_index=True,
how='outer')
# Salaries are not aligned, so we backfill all NaN to get value for
# every salaries.
all_percent_decreases.fillna(method='backfill', inplace=True)
# + fillna(0) when no offers are available (tail).
all_percent_decreases.fillna(0, inplace=True)
return all_percent_decreases
def _compute_comparing_reco(all_percent_decreases):
"""Calculate percentage of change in offers between salary variation.
(Step 3a)
Args:
all_percent_decreases: Pandas dataframe containing the amount of job offers available
for each salary decreases from initial (columns) and for each salary (index).
Returns:
a Pandas DataFrame containing the percentage of change in offers
between each salary variation.
"""
comparing_reco = all_percent_decreases.transpose().apply(
lambda x: x.pct_change(1), axis=0).transpose()
comparing_reco.replace([np.inf, -np.inf], np.nan, inplace=True)
return comparing_reco
def _apply_score(num_offers_with_higher_salary, idx):
""" Calculate a score for each salary of table_offers, maximize it and return the amount of
gained offers for the optimal decrease of salary.
Args:
num_offers_with_higher_salary: Pandas Series containing the amount of job offers (value)
by salary (index).
idx: the index of the salary on which to compute the score.
Returns:
Gained offers (Float.)
"""
# Cumulative count.
cumul_offers = num_offers_with_higher_salary.reset_index()
if idx == 0:
return 0
delta_salaries = _compute_delta_from_index(cumul_offers.annual_minimum_salary, idx)
delta_offers = _compute_delta_from_index(cumul_offers.num_offers_with_higher_salary, idx)
# Compute score.
scores = _compute_score(delta_offers, delta_salaries)
# Best score = max(score).
idx_max_score = scores.idxmax()
# Compute results.
gained_offers = delta_offers.iloc[idx_max_score]
return gained_offers
def _compute_score(delta_offers, delta_salaries):
return np.sqrt(delta_offers) / delta_salaries
def _compute_delta_from_index(serie, index):
""" Compute the variations on a specific serie and from a specific index"""
return serie.iloc[:index] / serie.iloc[index] - 1
|
"""
PRIVATE MODULE: do not import (from) it directly.
This module contains the ``BaseMatcher``class.
"""
from typing import Any, Optional
from jacked._compatibility_impl import get_naked_class
from jacked._injectable import Injectable
from jacked._container import Container
class BaseMatcher:
"""
This is the base class for all matchers. A matcher tries to match some type
hint with the type it can handle. For example, if a ``list`` is hinted, the
``ListMatcher`` should be able to match it and will handle the injection of
a ``list``.
"""
def can_match(self, hint: object) -> bool:
"""
Determine whether this matcher can match the given ``hint``.
:param hint: the type hint that is to be matched.
:return: ``True`` if this matcher can handle ``hint``.
"""
if hint is Any:
return self._matching_type() is Any
try:
return issubclass(get_naked_class(hint), self._matching_type())
except TypeError:
return False
def match(
self,
hint: object,
injectable: Injectable,
container: Container) -> Optional[object]:
"""
See if there is a match between ``hint`` and the ``injectable``. If
there is a match, return an object that corresponds to ``hint``.
Otherwise, return ``None``.
:param hint: the type hint that is to be matched.
:param injectable: the ``Injectable`` that may be a match for ``hint``.
:param container: the instance that contains all injectables.
:return: an object that corresponds to ``hint`` or ``None``.
"""
raise NotImplementedError
def priority(self) -> int:
"""
Determine the priority of this matcher; whether ``can_match`` of this
matcher should be invoked before or after that of other matchers. A
higher integer corresponds to a higher priority and thus an earlier
invocation of ``can_match``.
:return: the priority of this matcher as a number (0 is lowest).
"""
return 100 # Default.
def _matching_type(self) -> type:
"""
Return the type this matcher can handle.
:return: the type that can be handled by this matcher.
"""
raise NotImplementedError
|
import os
from deriva.core import stob
from deriva.core.utils.mime_utils import guess_content_type
from deriva.transfer.download import DerivaDownloadError, DerivaDownloadConfigurationError
from deriva.transfer.download.processors.base_processor import BaseProcessor, LOCAL_PATH_KEY, SOURCE_URL_KEY
from bdbag import bdbag_ro as ro
class BaseTransformProcessor(BaseProcessor):
"""
Base class for TransformProcessor classes
"""
def __init__(self, envars=None, **kwargs):
super(BaseTransformProcessor, self).__init__(envars, **kwargs)
self.base_path = kwargs["base_path"]
self.input_paths = self.parameters.get("input_paths", [])
if not self.input_paths:
self.input_paths = [self.parameters["input_path"]] # for backward compatibility
self.sub_path = self.parameters.get("output_path", "")
self.is_bag = kwargs.get("bag", False)
self.transformed_output = self.outputs.get(self.input_path, dict())
self.url = self.transformed_output.get(SOURCE_URL_KEY)
self.ro_file_provenance = stob(self.parameters.get("ro_file_provenance", False if not self.is_bag else True))
self.ro_manifest = self.kwargs.get("ro_manifest")
self.ro_author_name = self.kwargs.get("ro_author_name")
self.ro_author_orcid = self.kwargs.get("ro_author_orcid")
self.delete_input = stob(self.parameters.get("delete_input", True))
self.input_relpaths = []
self.input_abspaths = []
self.output_relpath = None
self.output_abspath = None
@property
def input_path(self): # for backward compatibility
return self.input_paths[0]
@property
def input_relpath(self): # for backward compatibility
return self.input_relpaths[0]
@property
def input_abspath(self): # for backward compatibility
return self.input_abspaths[0]
def _create_input_output_paths(self):
for input_path in self.input_paths:
relpath, abspath = self.create_paths(self.base_path, input_path, is_bag=self.is_bag, envars=self.envars)
self.input_relpaths.append(relpath)
self.input_abspaths.append(abspath)
self.output_relpath, self.output_abspath = self.create_paths(
self.base_path, self.sub_path, is_bag=self.is_bag, envars=self.envars)
def _delete_input(self):
for input_abspath in self.input_abspaths:
if os.path.isfile(input_abspath):
os.remove(input_abspath)
for input_relpath in self.input_relpaths:
del self.outputs[input_relpath]
def process(self):
if self.ro_manifest and self.ro_file_provenance:
ro.add_file_metadata(self.ro_manifest,
source_url=self.url,
local_path=self.output_relpath,
media_type=guess_content_type(self.output_abspath),
retrieved_on=ro.make_retrieved_on(),
retrieved_by=ro.make_retrieved_by(self.ro_author_name, orcid=self.ro_author_orcid),
bundled_as=ro.make_bundled_as())
if self.delete_input:
self._delete_input()
self.outputs.update({self.output_relpath: {LOCAL_PATH_KEY: self.output_abspath, SOURCE_URL_KEY: self.url}})
return self.outputs
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: agent/api/proto/v1/protoconf_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='agent/api/proto/v1/protoconf_service.proto',
package='v1',
syntax='proto3',
serialized_options=_b('\n\032com.protoconf.agent.api.v1'),
serialized_pb=_b('\n*agent/api/proto/v1/protoconf_service.proto\x12\x02v1\x1a\x19google/protobuf/any.proto\")\n\x19\x43onfigSubscriptionRequest\x12\x0c\n\x04path\x18\x01 \x01(\t\"3\n\x0c\x43onfigUpdate\x12#\n\x05value\x18\x01 \x01(\x0b\x32\x14.google.protobuf.Any2[\n\x10ProtoconfService\x12G\n\x12SubscribeForConfig\x12\x1d.v1.ConfigSubscriptionRequest\x1a\x10.v1.ConfigUpdate0\x01\x42\x1c\n\x1a\x63om.protoconf.agent.api.v1b\x06proto3')
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,])
_CONFIGSUBSCRIPTIONREQUEST = _descriptor.Descriptor(
name='ConfigSubscriptionRequest',
full_name='v1.ConfigSubscriptionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='v1.ConfigSubscriptionRequest.path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=77,
serialized_end=118,
)
_CONFIGUPDATE = _descriptor.Descriptor(
name='ConfigUpdate',
full_name='v1.ConfigUpdate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='v1.ConfigUpdate.value', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=120,
serialized_end=171,
)
_CONFIGUPDATE.fields_by_name['value'].message_type = google_dot_protobuf_dot_any__pb2._ANY
DESCRIPTOR.message_types_by_name['ConfigSubscriptionRequest'] = _CONFIGSUBSCRIPTIONREQUEST
DESCRIPTOR.message_types_by_name['ConfigUpdate'] = _CONFIGUPDATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ConfigSubscriptionRequest = _reflection.GeneratedProtocolMessageType('ConfigSubscriptionRequest', (_message.Message,), dict(
DESCRIPTOR = _CONFIGSUBSCRIPTIONREQUEST,
__module__ = 'agent.api.proto.v1.protoconf_service_pb2'
# @@protoc_insertion_point(class_scope:v1.ConfigSubscriptionRequest)
))
_sym_db.RegisterMessage(ConfigSubscriptionRequest)
ConfigUpdate = _reflection.GeneratedProtocolMessageType('ConfigUpdate', (_message.Message,), dict(
DESCRIPTOR = _CONFIGUPDATE,
__module__ = 'agent.api.proto.v1.protoconf_service_pb2'
# @@protoc_insertion_point(class_scope:v1.ConfigUpdate)
))
_sym_db.RegisterMessage(ConfigUpdate)
DESCRIPTOR._options = None
_PROTOCONFSERVICE = _descriptor.ServiceDescriptor(
name='ProtoconfService',
full_name='v1.ProtoconfService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=173,
serialized_end=264,
methods=[
_descriptor.MethodDescriptor(
name='SubscribeForConfig',
full_name='v1.ProtoconfService.SubscribeForConfig',
index=0,
containing_service=None,
input_type=_CONFIGSUBSCRIPTIONREQUEST,
output_type=_CONFIGUPDATE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_PROTOCONFSERVICE)
DESCRIPTOR.services_by_name['ProtoconfService'] = _PROTOCONFSERVICE
# @@protoc_insertion_point(module_scope)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to protection service."""
from karbor.db import base
from karbor.services.protection import rpcapi as protection_rpcapi
class API(base.Base):
"""API for interacting with the protection manager."""
def __init__(self, db_driver=None):
self.protection_rpcapi = protection_rpcapi.ProtectionAPI()
super(API, self).__init__(db_driver)
def restore(self, context, restore, restore_auth):
return self.protection_rpcapi.restore(context, restore, restore_auth)
def verification(self, context, verification):
return self.protection_rpcapi.verification(context, verification)
def protect(self, context, plan, checkpoint_properties):
return self.protection_rpcapi.protect(context, plan,
checkpoint_properties)
def copy(self, context, plan):
return self.protection_rpcapi.copy(context, plan)
def delete(self, context, provider_id, checkpoint_id):
return self.protection_rpcapi.delete(
context,
provider_id,
checkpoint_id
)
def reset_state(self, context, provider_id, checkpoint_id, state):
return self.protection_rpcapi.reset_state(
context,
provider_id,
checkpoint_id,
state
)
def show_checkpoint(self, context, provider_id, checkpoint_id):
return self.protection_rpcapi.show_checkpoint(
context,
provider_id,
checkpoint_id
)
def list_checkpoints(self, context, provider_id, marker, limit,
sort_keys, sort_dirs, filters, offset, all_tenants):
return self.protection_rpcapi.list_checkpoints(
context,
provider_id,
marker,
limit,
sort_keys,
sort_dirs,
filters,
all_tenants
)
def list_protectable_types(self, context):
return self.protection_rpcapi.list_protectable_types(context)
def show_protectable_type(self, context, protectable_type):
return self.protection_rpcapi.show_protectable_type(
context,
protectable_type
)
def list_protectable_instances(self, context, protectable_type,
marker, limit, sort_keys,
sort_dirs, filters, offset, parameters):
return self.protection_rpcapi.list_protectable_instances(
context,
protectable_type,
marker,
limit,
sort_keys,
sort_dirs,
filters,
parameters
)
def list_protectable_dependents(self, context,
protectable_id,
protectable_type,
protectable_name):
return self.protection_rpcapi.list_protectable_dependents(
context,
protectable_id,
protectable_type,
protectable_name
)
def show_protectable_instance(self, context,
protectable_type,
protectable_id,
parameters=None):
return self.protection_rpcapi.show_protectable_instance(
context,
protectable_type,
protectable_id,
parameters=parameters
)
def show_provider(self, context, provider_id):
return self.protection_rpcapi.show_provider(context, provider_id)
def list_providers(self, context, marker, limit,
sort_keys, sort_dirs, filters, offset):
return self.protection_rpcapi.list_providers(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters
)
|
'''
Finite state gridworld environment with multiple agents.
'''
import gym
import numpy as np
class GridWorld(gym.Env):
'''
Finite 2D grid with each agent occupying a point in the grid.
'''
def __init__(self, grid_size, start_positions):
self.grid_size = np.array(grid_size, dtype=int)
self.start_positions = start_positions
self.n = len(self.start_positions)
obs_spaces = [gym.spaces.Tuple([gym.spaces.Discrete(self.grid_size[0]),
gym.spaces.Discrete(self.grid_size[1])])
for _ in range(self.n)]
self.observation_space = gym.spaces.Tuple(obs_spaces)
self.action_space = gym.spaces.Tuple([gym.spaces.Discrete(5)
for _ in range(self.n)])
self.reset()
def reset(self):
self.positions = np.array(self.start_positions, dtype=np.int)
return self._get_obs()
def step(self, actions):
actions = list(actions)
for a in range(len(actions)):
if np.random.rand() < 0.2:
actions[a] = self.action_space.spaces[a].sample()
action_vec = np.array([self._int_to_vec(action) for action in actions], dtype=np.int)
self.positions = self.positions + action_vec
self.positions = np.clip(self.positions, 0, self.grid_size.reshape(1, -1)-1)
return self._get_obs(), 0, False, {}
def render(self):
print(self._get_obs())
def get_sim_state(self):
return self._get_obs()
def set_sim_state(self, state):
self.positions = np.array(state, dtype=np.int)
return self._get_obs()
def _get_obs(self):
return tuple(map(tuple, self.positions))
def _int_to_vec(self, action):
if action == 0:
return [0, 0]
elif action == 1:
return [1, 0]
elif action == 2:
return [0, 1]
elif action == 3:
return [-1, 0]
else:
return [0, -1]
|
#!/usr/bin/env python
'''
Originally this was used as the main driver for the trading logic. It's sole
purpose in life was to determine if you should buy or sell. Currently that
logic has been removed while I rework the algorithm.
'''
import getMtGoxRequest as req
import time
import dbConnection as db
from decimal import Decimal
from blessings import Terminal # for colors
def main():
t = Terminal() # for colors
memory = {}
tmp = driver()
data = tmp.getPrice()
memory['my_usd'] = data['my_usd']
memory['my_btc'] = data['my_btc']
while True:
if memory['my_usd'] == data['my_usd'] and memory['my_btc'] == data['my_btc']:
print '{t.green}No Change{t.normal}:'.format(t=t),' My USD:', t.green(str(data['my_usd'])), ' My BTC:', t.green(str(data['my_btc']))
pass
else:
if memory['my_usd'] > data['my_usd']:
print t.green('Made Sale!')
db.db.transhistory.insert({'previous':str(memory['my_usd']),
'current':str(data['my_usd']),
'action':'Sell'
})
else:
print t.blue('Made Buy!')
db.db.transhistory.insert({'previous':str(memory['my_usd']),
'current':str(data['my_usd']),
'action':'Buy'
})
memory['my_usd'] = data['my_usd']
memory['my_btc'] = data['my_btc']
print t.bold_blue('Current Price:'), t.bold_blue(str(data['buy']))
time.sleep(10)
try:
data = tmp.getPrice()
except:
time.sleep(30)
try:
data = tmp.getPrice()
except:
pass
class driver:
def __init__(self):
self.info = req.get_res('0/info.php', {})
self.USD = self.info['Wallets']['USD']['Balance']['value_int']
self.BTC = self.info['Wallets']['BTC']['Balance']['value_int']
pass
def getPrice(self):
data = req.get_res('1/BTCUSD/public/ticker', {})
buyPrice = data['return']['buy']['value_int']
sellPrice = data['return']['sell']['value_int']
volume = data['return']['vol']['value_int']
price = {
'buy':Decimal(int(buyPrice) * .00001).quantize(Decimal('1.00000')),
'sell':Decimal(int(sellPrice) * .00001).quantize(Decimal('1.00000')),
'volume':Decimal(int(volume) * .00000001).quantize(Decimal('1.00000')),
'my_btc':Decimal(int(self.BTC) * .00000001).quantize(Decimal('1.00000000')),
'my_usd':Decimal(int(self.USD) * .00001).quantize(Decimal('1.00000'))
}
db.db.priceindex.insert({'buy':str(price['buy']),
'sell':str(price['sell']),
'volume':str(price['volume']),
'exchange':'mtgox',
'holdings':
{'btc':str(price['my_btc']),
'usd':str(price['my_usd'])
}
})
return price
def logic(self):
last = db.db.transhistory.find().sort(-1).limit(1)
if last['action'] == 'Buy':
print 'Do selling logic'
elif last['action'] == 'Sell':
print 'Do buying logic'
else:
print t.red('No Transactions exist')
def logic_buy(self, sell_price):
cur = self.getPrice()
if cur['buy'] < (sell_price - (sell_price * .1)):
'''
When the price drops 10% spend 10% of your cash
'''
print 'Buy with 10% of available cash'
elif cur['buy'] < (sell_price - (sell_price * .2)):
'''
If sell price was 100 and we have gone down 20% from sell (80) buy
2x the first buy of 10% or ($10) in this case. So we are buying $20
worth of bitcoin in this round on this algo if you started with $100
you are now 22% of available cash in ($22 of $80)
'''
print 'Double down from original sell price'
elif cur['buy'] < (sell_price - (sell_price * .3)):
'''
If the price drops 30% we are doubling down on the last buy. In this case
we are going to buy $40 worth of btc. This is similar to betting in blackjack
at this point we are 85% of available cash ($40 of $70)
'''
print 'Double down again'
elif cur['buy'] > (sell_price * 1.05): #increase of 5% of
print 'Sell the house'
def logic_sell(self, buy_prices):
pass
if __name__ == '__main__':
main()
|
from backend.views import style, test
def setup_routes(app):
# GET routes:
app.router.add_get('/test', test)
# POST routes:
app.router.add_post('/style', style)
|
class BackupNotFoundError(Exception):
def __init__(self):
super().__init__("backup not found")
class BackupsFailureInGroupError(Exception):
def __init__(self, completed_backups, exceptions):
"""
:param completed_backups: dictionary of completed backups.
{dom_name: completed_backup}
:param exceptions: dictionary of exceptions. {dom_name: exception}
"""
super().__init__(
"backups failed for domains: {}".format(
", ".join(sorted(exceptions.keys()))
)
)
self.completed_backups = completed_backups
self.exceptions = exceptions
class DiskNotFoundError(Exception):
"""
Disk not found in a domain
"""
def __init__(self, disk):
super().__init__("disk {} not found".format(disk))
class DomainNotFoundError(Exception):
def __init__(self, domain):
super().__init__("domain {} not found".format(domain))
class DomainRunningError(Exception):
"""
Domain is running when a task would need it to be shutdown
"""
def __init__(self, domain):
message = (
"DomainRunningError: domain {} need to be shutdown to perform the "
"task"
).format(domain)
super().__init__(message)
class SnapshotNotStarted(Exception):
def __init__(self):
super().__init__("snapshot not started")
class DiskNotSnapshot(Exception):
def __init__(self, disk):
super().__init__("disk {} not snapshot".format(disk))
|
from aiogram.dispatcher.filters.state import StatesGroup, State
class States(StatesGroup):
get_amount_balance_targ = State()
class Mailing(StatesGroup):
mailing_text_targ = State()
class SendMessage(StatesGroup):
send_message_targ = State()
class Phone(StatesGroup):
get_phone_targ = State()
class AttackChat(StatesGroup):
get_link_chat_targ = State()
get_text_targ = State() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.