hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2b3e31697255a8a75c6e99f73eedb9d930e36533
| 522
|
py
|
Python
|
task003.py
|
lessunc/python-guanabara
|
5c4c41eb46cc1742fdf36e3dc3c830a189344fad
|
[
"MIT"
] | 32
|
2018-12-09T00:44:20.000Z
|
2022-03-11T19:28:53.000Z
|
task003.py
|
lessunc/python-guanabara
|
5c4c41eb46cc1742fdf36e3dc3c830a189344fad
|
[
"MIT"
] | null | null | null |
task003.py
|
lessunc/python-guanabara
|
5c4c41eb46cc1742fdf36e3dc3c830a189344fad
|
[
"MIT"
] | 4
|
2019-01-21T08:04:29.000Z
|
2020-06-01T14:27:15.000Z
|
#coding: utf-8
#-------------------------------------------------------------------
# Um programa que recebe dois valores e retorna a soma entre eles.
#-------------------------------------------------------------------
# Somando no Python - Exercício #003
#-------------------------------------------------------------------
n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
soma = n1 + n2
print('--' * 22)
print('A soma entre o valor {} e {} é {}.'.format(n1, n2, soma))
print('--' * 22)
| 32.625
| 68
| 0.377395
|
13f68dd0fe0df8ec273db94cd2007f608eeaec88
| 18,859
|
py
|
Python
|
SMPyBandits/main.py
|
balbok0/SMPyBandits
|
c8ff765687989e0c20ab42c2e2e1d8440923225b
|
[
"MIT"
] | 309
|
2018-03-03T22:07:59.000Z
|
2022-03-26T08:15:58.000Z
|
SMPyBandits/main.py
|
balbok0/SMPyBandits
|
c8ff765687989e0c20ab42c2e2e1d8440923225b
|
[
"MIT"
] | 125
|
2018-02-27T22:54:03.000Z
|
2021-11-05T10:50:15.000Z
|
SMPyBandits/main.py
|
balbok0/SMPyBandits
|
c8ff765687989e0c20ab42c2e2e1d8440923225b
|
[
"MIT"
] | 60
|
2018-04-30T20:54:24.000Z
|
2022-02-21T22:41:46.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to load the config, run the simulations, and plot them.
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "Lilian Besson"
__version__ = "0.9"
# Generic imports
import sys
from os import mkdir, getenv
import os.path
import importlib
# Backup evaluation object
import pickle
# Local imports
configuration_module = None
try:
from save_configuration_for_reproducibility import save_configuration_for_reproducibility
from Environment import Evaluator, notify, start_tracemalloc, display_top_tracemalloc
# Import a configuration file
for arg in sys.argv:
if "configuration" in arg:
filename = arg.replace('.py', '')
dirname, module_name = os.path.dirname(filename), os.path.basename(filename)
sys.path.insert(0, dirname)
print("Reading argument from command line, importing the configuration module from arg = {} (module = {} in directory {})...".format(arg, module_name, dirname))
configuration_module = importlib.import_module(module_name)
if configuration_module is None:
import configuration as configuration_module
except ImportError:
from SMPyBandits.save_configuration_for_reproducibility import save_configuration_for_reproducibility
from SMPyBandits.Environment import Evaluator, notify, start_tracemalloc, display_top_tracemalloc
for arg in sys.argv:
if "configuration" in arg:
filename = arg.replace('.py', '')
dirname, module_name = os.path.dirname(filename), os.path.basename(filename)
sys.path.insert(0, dirname)
print("Reading argument from command line, importing the configuration module from arg = {} (module = {} in directory {})...".format(arg, module_name, dirname))
configuration_module = importlib.import_module('.{}'.format(module_name), package='SMPyBandits')
if configuration_module is None:
import SMPyBandits.configuration as configuration_module
# Get the configuration dictionnary
configuration = configuration_module.configuration
# For instance, call SLEEP=12h to delay the simulation for 12hours
if getenv('SLEEP', 'False') != 'False':
from subprocess import call
SLEEP = str(getenv('SLEEP'))
print("\nSleeping for", SLEEP, "seconds before starting the simulation...") # DEBUG
call(["sleep", SLEEP]) # more general
print("Done Sleeping for", SLEEP, "seconds... Now I can start the simulation...")
USE_PICKLE = False #: Should we save the Evaluator object to a .pickle file at the end of the simulation?
USE_HD5 = True #: Should we save the data to a .hdf5 file at the end of the simulation?
# Parameters for the plots (where to save them) and what to draw
PLOT_DIR = getenv('PLOT_DIR', 'plots') #: Directory for the plots
semilogx = False #: Plot in semilogx by default?
semilogy = False #: Plot in semilogy by default?
loglog = False #: Plot in loglog by default?
meanReward = True #: Plot mean regret ?
normalizedRegret = True #: Plot instantaneous regret?
plotSTD = True #: Plot regret with a STD?
plotSTD = False #: Plot regret with a STD?
plotMaxMin = True #: Plot +- max - min (amplitude) for regret.
plotMaxMin = False #: Plot +- max - min (amplitude) for regret.
saveallfigs = False #: Save all the figures ?
saveallfigs = True # XXX dont keep it like this when experimenting
# Parameters for the Evaluator object
finalRanksOnAverage = True #: Use an average instead of the last value for the final ranking of the tested policies
averageOn = 1e-2 #: Average the final rank on the 1% last time steps
#: Whether to do the plots or not
do_plots = True
if getenv('NOPLOTS', 'False') == 'True' and __name__ == '__main__':
print("====> TURNING NOPLOTS MODE ON <=====")
do_plots = False
#: Whether to show plots, one by one, or not at all and just save them
interactive = True # XXX dont keep it like this
interactive = False
#: Debug the memory consumption? Using :func:`Environment.memory_consumption.display_top_tracemalloc`.
debug_memory = False
if getenv('DEBUG', 'False') == 'True' and __name__ == '__main__':
print("====> TURNING DEBUG MODE ON <=====")
saveallfigs, interactive = False, True
if getenv('DEBUGMEMORY', 'False') == 'True' and __name__ == '__main__':
print("====> TURNING DEBUGMEMORY MODE ON <=====")
debug_memory = True
if getenv('SAVEALL', 'False') == 'True' and __name__ == '__main__':
print("====> SAVING FIGURES <=====")
saveallfigs = True
import matplotlib as mpl
FIGSIZE = (19.80, 10.80) #: Figure size, in inches!
# FIGSIZE = (16, 9) #: Figure size, in inches!
mpl.rcParams['figure.figsize'] = FIGSIZE
if getenv('XKCD', 'False') == 'True' and interactive and not saveallfigs:
import matplotlib.pyplot as plt
plt.xkcd() # XXX turn on XKCD-like style ?! cf. http://matplotlib.org/xkcd/ for more details
# FIXED try to switch to a non interactive backend when running without DEBUG=True
# https://matplotlib.org/api/matplotlib_configuration_api.html?highlight=matplotlib%20use#matplotlib.use
if not interactive:
import matplotlib
print("Warning: Non interactive simulations, switching from '{}' backend to 'agg'...".format(matplotlib.get_backend())) # DEBUG
matplotlib.use("agg", warn=True, force=True)
if interactive:
import seaborn as sns
sns.set(context="talk", style="whitegrid", palette="hls", font="sans-serif", font_scale=0.65)
import matplotlib as mpl
# Configure size for axes and x and y labels
mpl.rcParams['axes.labelsize'] = "x-small"
mpl.rcParams['xtick.labelsize'] = "xx-small"
mpl.rcParams['ytick.labelsize'] = "xx-small"
mpl.rcParams['figure.titlesize'] = "x-small"
if __name__ == '__main__':
# Update configuration
configuration['showplot'] = interactive
if os.path.isdir(PLOT_DIR):
print("{}/ is already a directory here...".format(PLOT_DIR))
elif os.path.isfile(PLOT_DIR):
raise ValueError("[ERROR] {} is a file, cannot use it as a directory !".format(PLOT_DIR))
else:
mkdir(PLOT_DIR)
evaluation = Evaluator(configuration, finalRanksOnAverage=finalRanksOnAverage, averageOn=averageOn)
# Start the evaluation and then print final ranking and plot, for each environment
N = len(evaluation.envs)
for envId, env in enumerate(evaluation.envs):
# # Plot histogram for rewards for that env
# if do_plots and interactive:
# env.plotHistogram(evaluation.horizon * evaluation.repetitions)
# (almost) unique hash from the configuration
hashvalue = abs(hash((tuple(configuration.keys()), tuple([(len(k) if isinstance(k, (dict, tuple, list)) else k) for k in configuration.values()]))))
if debug_memory: start_tracemalloc() # DEBUG
# --- Also plotting the history of means
if interactive:
evaluation.plotHistoryOfMeans(envId) # XXX To plot without saving
# Evaluate just that env
evaluation.startOneEnv(envId, env)
# Display the final regrets and rankings for that env
evaluation.printLastRegrets(envId)
evaluation.printFinalRanking(envId, moreAccurate=True)
evaluation.printRunningTimes(envId)
evaluation.printMemoryConsumption(envId)
evaluation.printNumberOfCPDetections(envId)
if debug_memory: display_top_tracemalloc() # DEBUG
# Sub folder with a useful name
subfolder = "SP__K{}_T{}_N{}__{}_algos".format(env.nbArms, configuration['horizon'], configuration['repetitions'], len(configuration['policies']))
plot_dir = os.path.join(PLOT_DIR, subfolder)
# Get the name of the output file
imagename = "main____env{}-{}_{}".format(envId + 1, N, hashvalue)
mainfig = os.path.join(plot_dir, imagename)
savefig = mainfig
picklename = mainfig + '.pickle'
h5pyname = mainfig + '.hdf5'
if saveallfigs:
# Create the sub folder
if os.path.isdir(plot_dir):
print("{} is already a directory here...".format(plot_dir))
elif os.path.isfile(plot_dir):
raise ValueError("[ERROR] {} is a file, cannot use it as a directory !".format(plot_dir))
else:
mkdir(plot_dir)
# --- DONE Copy (save) the current full configuration file to this folder as configuration__hashvalue.py
# --- DONE Save just the configuration to a minimalist python file
# TODO do the same on other main_*.py scripts
save_configuration_for_reproducibility(
configuration=configuration,
configuration_module=configuration_module,
plot_dir=plot_dir,
hashvalue="env{}-{}_{}".format(envId + 1, N, hashvalue),
main_name="main.py",
)
# --- Save it to a pickle file
if USE_PICKLE:
with open(picklename, 'wb') as picklefile:
print("Saving the Evaluator 'evaluation' objet to", picklename, "...")
pickle.dump(evaluation, picklefile, pickle.HIGHEST_PROTOCOL)
# --- Save it to a HD5 file
if USE_HD5:
evaluation.saveondisk(h5pyname)
if not do_plots:
continue # XXX don't use break, it exit the loop on different environments
# --- Also plotting the history of means
if saveallfigs:
savefig = mainfig.replace('main', 'main_HistoryOfMeans')
print(" - Plotting the history of means, and saving the plot to {} ...".format(savefig))
evaluation.plotHistoryOfMeans(envId, savefig=savefig) # XXX To save the figure
else:
evaluation.plotHistoryOfMeans(envId) # XXX To plot without saving
# --- Also plotting the boxplot of last regrets
if saveallfigs:
savefig = mainfig.replace('main', 'main_BoxPlotRegret')
evaluation.plotLastRegrets(envId, boxplot=True, savefig=savefig)
else:
evaluation.plotLastRegrets(envId, boxplot=True) # XXX To plot without saving
# --- Also plotting the running times
if saveallfigs:
savefig = mainfig.replace('main', 'main_RunningTimes')
print(" - Plotting the running times, and saving the plot to {} ...".format(savefig))
evaluation.plotRunningTimes(envId, savefig=savefig) # XXX To save the figure
else:
evaluation.plotRunningTimes(envId) # XXX To plot without saving
# --- Also plotting the memory consumption
if saveallfigs:
savefig = mainfig.replace('main', 'main_MemoryConsumption')
print(" - Plotting the memory consumption, and saving the plot to {} ...".format(savefig))
evaluation.plotMemoryConsumption(envId, savefig=savefig) # XXX To save the figure
else:
evaluation.plotMemoryConsumption(envId) # XXX To plot without saving
# --- Also plotting the number of detected change-points
if saveallfigs:
savefig = mainfig.replace('main', 'main_NumberOfCPDetections')
print(" - Plotting the memory consumption, and saving the plot to {} ...".format(savefig))
evaluation.plotNumberOfCPDetections(envId, savefig=savefig) # XXX To save the figure
else:
evaluation.plotNumberOfCPDetections(envId) # XXX To plot without saving
if meanReward:
if saveallfigs:
savefig = mainfig.replace('main', 'main_MeanRewards')
print(" - Plotting the mean rewards, and saving the plot to {} ...".format(savefig))
evaluation.plotRegrets(envId, savefig=savefig, semilogx=semilogx, semilogy=semilogy, loglog=loglog, meanReward=True) # XXX To save the figure
else:
evaluation.plotRegrets(envId, semilogx=semilogx, semilogy=semilogy, loglog=loglog, meanReward=True) # XXX To plot without saving
# --- Also plotting the regret
if saveallfigs:
print(" - Plotting the cumulative rewards, and saving the plot to {} ...".format(savefig))
savefig = mainfig
evaluation.plotRegrets(envId, savefig=savefig, moreAccurate=True) # XXX To save the figure
savefig = mainfig.replace('main', 'main_LessAccurate')
evaluation.plotRegrets(envId, savefig=savefig, moreAccurate=False) # XXX To save the figure
savefig = mainfig.replace('main', 'main_BestArmPulls')
print(" - Plotting the probability of picking the best arm, and saving the plot to {} ...".format(savefig))
# --- Also plotting the probability of picking the best arm
evaluation.plotBestArmPulls(envId, savefig=savefig) # XXX To save the figure
# if configuration['horizon'] >= 1000:
# savefig = mainfig.replace('main', 'main_semilogx')
# evaluation.plotRegrets(envId, savefig=savefig, semilogx=True) # XXX To save the figure
savefig = mainfig.replace('main', 'main_semilogy')
evaluation.plotRegrets(envId, savefig=savefig, semilogy=True) # XXX To save the figure
if configuration['horizon'] >= 1000:
savefig = mainfig.replace('main', 'main_loglog')
evaluation.plotRegrets(envId, savefig=savefig, loglog=True) # XXX To save the figure
if configuration['repetitions'] > 1:
if plotSTD:
savefig = savefig.replace('main', 'main_STD')
evaluation.plotRegrets(envId, savefig=savefig, semilogx=semilogx, semilogy=semilogy, loglog=loglog, plotSTD=True) # XXX To save the figure
if plotMaxMin:
savefig = savefig.replace('main', 'main_MaxMin')
evaluation.plotRegrets(envId, savefig=savefig, semilogx=semilogx, semilogy=semilogy, loglog=loglog, plotMaxMin=True) # XXX To save the figure
else:
evaluation.plotRegrets(envId, moreAccurate=True) # XXX To plot without saving
evaluation.plotRegrets(envId, moreAccurate=False) # XXX To plot without saving
# --- Also plotting the probability of picking the best arm
evaluation.plotBestArmPulls(envId) # XXX To plot without saving
# if configuration['horizon'] >= 1000:
# evaluation.plotRegrets(envId, semilogx=True) # XXX To plot without saving
evaluation.plotRegrets(envId, semilogy=True) # XXX To plot without saving
if configuration['horizon'] >= 1000:
evaluation.plotRegrets(envId, loglog=True)
if configuration['repetitions'] > 1:
if plotSTD:
evaluation.plotRegrets(envId, semilogx=semilogx, semilogy=semilogy, loglog=loglog, plotSTD=True) # XXX To plot without saving
if plotMaxMin:
evaluation.plotRegrets(envId, semilogx=semilogx, semilogy=semilogy, loglog=loglog, plotMaxMin=True) # XXX To plot without saving
if normalizedRegret:
if saveallfigs:
savefig = mainfig.replace('main', 'main_Normalized')
print(" - Plotting the mean rewards, and saving the plot to {} ...".format(savefig))
evaluation.plotRegrets(envId, savefig=savefig, semilogx=semilogx, semilogy=semilogy, loglog=loglog, normalizedRegret=True) # XXX To save the figure
if configuration['repetitions'] > 1:
if plotSTD:
savefig = savefig.replace('main', 'main_STD')
evaluation.plotRegrets(envId, savefig=savefig, semilogx=semilogx, semilogy=semilogy, loglog=loglog, normalizedRegret=True, plotSTD=True) # XXX To save the figure
if plotMaxMin:
savefig = savefig.replace('main', 'main_MaxMin')
evaluation.plotRegrets(envId, savefig=savefig, semilogx=semilogx, semilogy=semilogy, loglog=loglog, normalizedRegret=True, plotMaxMin=True) # XXX To save the figure
else:
evaluation.plotRegrets(envId, semilogx=semilogx, semilogy=semilogy, loglog=loglog, normalizedRegret=True) # XXX To plot without saving
if configuration['repetitions'] > 1:
if plotSTD:
evaluation.plotRegrets(envId, semilogx=semilogx, semilogy=semilogy, loglog=loglog, normalizedRegret=True, plotSTD=True) # XXX To plot without saving
if plotMaxMin:
evaluation.plotRegrets(envId, semilogx=semilogx, semilogy=semilogy, loglog=loglog, normalizedRegret=True, plotMaxMin=True) # XXX To plot without saving
# --- Also plotting the histograms of regrets
if saveallfigs:
savefig = mainfig.replace('main', 'main_HistogramsRegret')
evaluation.plotLastRegrets(envId, subplots=False, savefig=savefig)
print(" - Plotting the histograms of regrets, and saving the plot to {} ...".format(savefig))
# for sharex, sharey in product([True, False], repeat=2): # XXX 3 out of 4 were UGLY!
for sharex, sharey in [(True, False)]:
savefig = mainfig.replace('main', 'main_HistogramsRegret{}{}'.format(
"_shareX" if sharex else "",
"_shareY" if sharey else "",
))
print(" and saving the plot to {} ...".format(savefig))
evaluation.plotLastRegrets(envId, savefig=savefig, sharex=sharex, sharey=sharey) # XXX To save the figure
print(" - Plotting the histograms of regrets for each algorithm separately, and saving the plots ...")
savefig = mainfig.replace('main', 'main_HistogramsRegret')
print(" and saving the plot to {} ...".format(savefig))
evaluation.plotLastRegrets(envId, all_on_separate_figures=True, savefig=savefig) # XXX To save the figure
else:
evaluation.plotLastRegrets(envId, subplots=False) # XXX To plot without saving
# for sharex, sharey in product([True, False], repeat=2): # XXX 3 out of 4 were UGLY!
for sharex, sharey in [(True, False)]:
evaluation.plotLastRegrets(envId, sharex=sharex, sharey=sharey) # XXX To plot without saving
# evaluation.plotLastRegrets(envId, all_on_separate_figures=True) # XXX To plot without saving
if saveallfigs:
print("\n\n==> To see the figures, do :\neog", os.path.join(plot_dir, "main*{}.png".format(hashvalue))) # DEBUG
# Done
print("Done for simulations main.py ...")
notify("Done for simulations main.py ...")
| 53.424929
| 189
| 0.651678
|
c8f41bcb84c03c892e260b0958c2977b8768ccad
| 4,193
|
py
|
Python
|
paradrop/daemon/paradrop/lib/misc/procmon.py
|
lhartung/paradrop-test
|
22a491bf3198bf61bcabaedfaecde5b9be97e76f
|
[
"Apache-2.0"
] | 1
|
2018-03-22T13:04:19.000Z
|
2018-03-22T13:04:19.000Z
|
paradrop/daemon/paradrop/lib/misc/procmon.py
|
VegetableChook/Paradrop
|
a38e1773877d5b136c3b626edd8c033a12b43e56
|
[
"Apache-2.0"
] | 7
|
2021-03-18T20:54:50.000Z
|
2022-03-11T23:27:40.000Z
|
paradrop/daemon/paradrop/lib/misc/procmon.py
|
lhartung/paradrop-test
|
22a491bf3198bf61bcabaedfaecde5b9be97e76f
|
[
"Apache-2.0"
] | null | null | null |
"""
The ProcessMonitor class ensures that a service is running and that its pid
file is consistent.
This addresses an issue we have had with Docker on Ubuntu Snappy, where its
pid file sometimes persists and prevents the service from starting.
"""
import glob
import os
import subprocess
import time
import psutil
class ProcessMonitor(object):
# Specify allowed corrective actions, which we can change when running
# locally to disable rebooting, for example.
#
# TODO: Implement a more general module for checking system health and
# applying corrective action.
allowedActions = set(["restart", "reboot"])
def __init__(self, service, cmdstring=None, pidfile=None, action="restart"):
"""
service: service name (used to restart it).
cmdstring: string to look for in running command name (e.g. "docker")
pidfile: None or path to look for pid file(s).
Bash-style globbing is supported, e.g. "/var/snap/docker/*/run/docker.pid".
action: "restart" the service or "reboot" the machine
"""
self.service = service
self.action = action
if cmdstring is not None:
self.cmdstring = cmdstring
else:
self.cmdstring = service
if pidfile is not None:
self.pidfiles = [ pidfile ]
else:
self.pidfiles = [
"/var/snap/{service}/current/run/{service}.pid".format(service=service)]
def check(self):
"""
Check that the service is running and consistent with pid file(s).
Returns True or False.
"""
# Set of pids (strings) where the command string matches what we are
# looking for.
detected_pids = set()
# Set of pids (strings) that are both running processes and found in
# pid files.
consistent_pids = set()
# Search for running processes that match our command string.
for proc in psutil.process_iter():
try:
if self.cmdstring in proc.name():
detected_pids.add(str(proc.pid))
# We could also get psutil.ZombieProcess or
# psutil.AccessDenied. We want those to be logged.
except psutil.NoSuchProcess:
pass
# Search for pid file(s) and check consistency.
for pidfile in self.pidfiles:
for path in glob.iglob(pidfile):
with open(path, 'r') as source:
pid = source.read().strip()
if pid in detected_pids:
consistent_pids.add(pid)
else:
# Delete the stale pid file.
os.remove(path)
return len(consistent_pids) > 0
def restart(self):
"""
Restart the service.
"""
if self.action == "restart":
cmd = ["snappy", "service", self.service, "restart"]
else:
cmd = ["shutdown", "-r", "now"]
if self.action in ProcessMonitor.allowedActions:
print("Running \"{}\" to fix {}".format(" ".join(cmd), self.service))
return subprocess.call(cmd)
else:
print("Warning: would run \"{}\" to fix {}, but not allowed.".format(
" ".join(cmd), self.service))
def ensureReady(self, delay=5, tries=3):
"""
Look through checking and restarting the service until it is ready or
the maximum number of tries has been reached.
delay: time delay (seconds) between retries.
tries: maximum number of restart-wait-check cycles.
"""
ready = self.check()
if ready:
return True
for t in range(tries):
time.sleep(delay)
ready = self.check()
if ready:
return True
else:
self.restart()
time.sleep(delay)
return self.check()
dockerMonitor = ProcessMonitor("docker", action="reboot")
containerdMonitor = ProcessMonitor("docker-containerd",
pidfile="/var/snap/docker/current/run/docker/libcontainerd/docker-containerd.pid",
action="reboot")
| 32.253846
| 90
| 0.584307
|
1957f5e26bdf58278258a8b7f2912801d0fefd20
| 2,204
|
py
|
Python
|
pytests/tuqquery/tuq_queryworkbench.py
|
pavithra-mahamani/testrunner
|
d204491caa23f1fbe90505646534ed7810d96289
|
[
"Apache-2.0"
] | null | null | null |
pytests/tuqquery/tuq_queryworkbench.py
|
pavithra-mahamani/testrunner
|
d204491caa23f1fbe90505646534ed7810d96289
|
[
"Apache-2.0"
] | null | null | null |
pytests/tuqquery/tuq_queryworkbench.py
|
pavithra-mahamani/testrunner
|
d204491caa23f1fbe90505646534ed7810d96289
|
[
"Apache-2.0"
] | 1
|
2020-07-24T07:15:59.000Z
|
2020-07-24T07:15:59.000Z
|
import time
from TestInput import TestInputSingleton
from basetestcase import BaseTestCase
from couchbase_helper.documentgenerator import BlobGenerator
from membase.api.rest_client import RestConnection
class QueryWorkbenchTests(BaseTestCase):
n1ql_port =8093
_input = TestInputSingleton.input
num_items = _input.param("items", 100)
_value_size = _input.param("value_size", 256)
gen_create = BlobGenerator('loadOne', 'loadOne', _value_size, end=num_items)
#bucket and ram quota
buckets_ram = {
"CUSTOMER": 100,
"DISTRICT": 100,
"HISTORY": 100,
"ITEM": 100,
"NEW_ORDER": 100,
"ORDERS": 100,
"ORDER_LINE": 100}
#"default:": 100}
def setUp(self):
super(QueryWorkbenchTests, self).setUp()
server = self.master
if self.input.tuq_client and "client" in self.input.tuq_client:
server = self.tuq_client
self.rest = RestConnection(server)
#self.rest.delete_bucket("default")
time.sleep(20)
# drop and recreate buckets
for i, bucket_name in enumerate(self.buckets_ram.keys()):
self.rest.create_bucket(bucket=bucket_name,
ramQuotaMB=int(self.buckets_ram[bucket_name]),
replicaNumber=0,
proxyPort=11218+i)
self.log.info(self.servers[0])
#bucket = self.src_cluster.get_bucket_by_name(bucket_name)
time.sleep(20)
#self.rest.create_bucket(bucket="default",
#ramQuotaMB=int(self.buckets_ram["default"]),
#replicaNumber=0,
#proxyPort=11218)
self._load_all_buckets(self, self.servers[0], self.gen_create, "create", 0)
#time.sleep(20)
def tearDown(self):
super(QueryWorkbenchTests, self).tearDown()
def test_describe(self):
for bucket_name in self.rest.get_buckets():
query = "infer %s" % bucket_name
self.log.info(query)
result = self.rest.query_tool(query, self.n1ql_port)
self.log.info(result)
| 38.666667
| 83
| 0.598457
|
fb7b33920348c9d6df96f724df004fb8546536df
| 3,490
|
py
|
Python
|
Algorithms/problem28/main28.py
|
lrussell21/ICPC_Template_Code
|
0aa5f202c17e2fd8101821685c9ce459a15e2f96
|
[
"MIT"
] | null | null | null |
Algorithms/problem28/main28.py
|
lrussell21/ICPC_Template_Code
|
0aa5f202c17e2fd8101821685c9ce459a15e2f96
|
[
"MIT"
] | null | null | null |
Algorithms/problem28/main28.py
|
lrussell21/ICPC_Template_Code
|
0aa5f202c17e2fd8101821685c9ce459a15e2f96
|
[
"MIT"
] | null | null | null |
import os
from collections import defaultdict
# Used Tarjan's algorithm mentioned in class.
# Got it from here https://www.geeksforgeeks.org/tarjan-algorithm-find-strongly-connected-components/
class Graph:
def __init__(self,vertices):
#No. of vertices
self.V= vertices
# default dictionary to store graph
self.graph = defaultdict(list)
self.Time = 0
self.outputnums = []
self.scc = 0
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
def tarjansUtil(self,u, low, disc, stackMember, st):
# Initialize discovery time and low value
disc[u] = self.Time
low[u] = self.Time
self.Time += 1
stackMember[u] = True
st.append(u)
# Go through all vertices adjacent to this
for v in self.graph[u]:
# If v is not visited yet, then recur for it
if disc[v] == -1 :
self.tarjansUtil(v, low, disc, stackMember, st)
# Check if the subtree rooted with v has a connection to
# one of the ancestors of u
# Case 1 (per above discussion on Disc and Low value)
low[u] = min(low[u], low[v])
elif stackMember[v] == True:
'''Update low value of 'u' only if 'v' is still in stack
(i.e. it's a back edge, not cross edge).
Case 2 (per above discussion on Disc and Low value) '''
low[u] = min(low[u], disc[v])
outputNums = []
# head node found, pop the stack and print an SCC
w = -1 #To store stack extracted vertices
if low[u] == disc[u]:
while w != u:
w = st.pop()
self.outputnums.append(w)
stackMember[w] = False
self.scc += 1
def tarjans(self):
# Mark all the vertices as not visited
# and Initialize parent and visited,
# and ap(articulation point) arrays
disc = [-1] * (self.V + 1)
low = [-1] * (self.V + 1)
stackMember = [False] * (self.V + 1)
st =[]
# Call the recursive helper function
# to find articulation points
# in DFS tree rooted with vertex 'i'
for i in range(self.V):
if disc[i] == -1:
self.tarjansUtil(i, low, disc, stackMember, st)
def main():
filename = "/input.txt"
dir_path = os.path.dirname(__file__)
f = open(str(dir_path) + filename)
numInput = f.readlines()
nodes = 0
listOfArrays = []
firstLineSkip = True
for x in numInput:
if firstLineSkip:
nodes = x.strip().split()
nodes = int(nodes[0])
print(nodes)
firstLineSkip = False
continue
listOfArrays.append(list(map(int,x.strip().split())))
print(listOfArrays)
g = Graph(nodes)
for edge in listOfArrays:
g.addEdge(edge[0], edge[1])
g.tarjans()
print(g.scc - 1)
# File Output
filename = "/output.txt"
dir_path = os.path.dirname(__file__)
filewrite = open(str(dir_path) + filename, 'w')
filewrite.write(str(g.scc - 1))
if __name__== "__main__":
main()
| 28.842975
| 101
| 0.513467
|
cc44b83a7d2676115a908689ca484abe77d6af67
| 687
|
py
|
Python
|
15/15_xjf.py
|
kbcao/leetcode
|
b7d90d3141546b353dd80a99864f4bc0578a7c63
|
[
"MIT"
] | null | null | null |
15/15_xjf.py
|
kbcao/leetcode
|
b7d90d3141546b353dd80a99864f4bc0578a7c63
|
[
"MIT"
] | null | null | null |
15/15_xjf.py
|
kbcao/leetcode
|
b7d90d3141546b353dd80a99864f4bc0578a7c63
|
[
"MIT"
] | null | null | null |
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
dic, res = {}, []
for n in nums:
dic.setdefault(n, 0)
dic[n] += 1
nums = list(dic.keys())
nums.sort()
for i in range(len(nums)):
n1 = nums[i]
if n1 > 0: break
if dic[n1] > (2 if n1 == 0 else 1) and n1 * -2 in dic:
res.append([n1, n1, n1 * -2])
for j in range(i + 1, len(nums)):
n2, n3 = nums[j], -n1 - nums[j]
if n3 < n2: break
if n3 in dic and (dic[n3] > 1 or n2 != n3):
res.append([n1, n2, n3])
return res
| 36.157895
| 66
| 0.413392
|
51d3892b97ab7b5bf7a0cd844028e5a069ddbadf
| 56,772
|
py
|
Python
|
rampwf/externals/tabulate.py
|
gregoire-colin/ramp_workflow
|
12512a3192bcc515c2da956a6a6704849cdadeee
|
[
"BSD-3-Clause"
] | null | null | null |
rampwf/externals/tabulate.py
|
gregoire-colin/ramp_workflow
|
12512a3192bcc515c2da956a6a6704849cdadeee
|
[
"BSD-3-Clause"
] | 1
|
2020-01-18T09:47:03.000Z
|
2020-01-20T15:33:11.000Z
|
rampwf/externals/tabulate.py
|
lucyleeow/ramp-workflow
|
0bd6f7bbea65255fd964ddefcb21fe628ab1abbc
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple, Iterable
from platform import python_version_tuple
from signal import signal, SIGPIPE, SIG_DFL
import re
import math
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = long
_float_type = float
_text_type = unicode
_binary_type = str
def _is_file(f):
return isinstance(f, file)
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
basestring = str
import io
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.8.3"
# minimum extra space in headers
MIN_PADDING = 2
# Whether or not to preserve leading/trailing whitespace in data.
PRESERVE_WHITESPACE = False
_DEFAULT_FLOATFMT="g"
_DEFAULT_MISSINGVAL=""
# if True, enable wide-character (CJK) support
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _textile_row_with_attrs(cell_values, colwidths, colaligns):
cell_values[0] += ' '
alignment = { "left": "<.", "right": ">.", "center": "=.", "decimal": ">." }
values = (alignment.get(a, '') + v for a, v in zip(colaligns, cell_values))
return '|' + '|'.join(values) + '|'
def _html_begin_table_without_header(colwidths_ignore, colaligns_ignore):
# this table header will be suppressed if there is a header row
return "\n".join(["<table>", "<tbody>"])
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"' }
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
rowhtml = "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
if celltag == "th": # it's a header row, create a new table header
rowhtml = "\n".join(["<table>",
"<thead>",
rowhtml,
"</thead>",
"<tbody>"])
return rowhtml
def _moin_row_with_attrs(celltag, cell_values, colwidths, colaligns, header=''):
alignment = { "left": '',
"right": '<style="text-align: right;">',
"center": '<style="text-align: center;">',
"decimal": '<style="text-align: right;">' }
values_with_attrs = ["{0}{1} {2} ".format(celltag,
alignment.get(a, ''),
header+c+header)
for c, a in zip(cell_values, colaligns)]
return "".join(values_with_attrs)+"||"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns, escrules=LATEX_ESCAPE_RULES):
def escape_char(c):
return escrules.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
def _rst_escape_first_column(rows, headers):
def escape_empty(val):
if isinstance(val, (_text_type, _binary_type)) and not val.strip():
return ".."
else:
return val
new_headers = list(headers)
new_rows = []
if headers:
new_headers[0] = escape_empty(headers[0])
for row in rows:
new_row = list(row)
if new_row:
new_row[0] = escape_empty(row[0])
new_rows.append(new_row)
return new_rows, new_headers
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("â•’", "â•", "╤", "â••"),
linebelowheader=Line("╞", "â•", "╪", "â•¡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "â•", "â•§", "â•›"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"jira":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("||", "||", "||"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"presto":
TableFormat(lineabove=None,
linebelowheader=Line("", "-", "+", ""),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", ""),
datarow=DataRow("", "|", ""),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"moinmoin":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=partial(_moin_row_with_attrs,"||",header="'''"),
datarow=partial(_moin_row_with_attrs,"||"),
padding=1, with_header_hide=None),
"youtrack":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|| ", " || ", " || "),
datarow=DataRow("| ", " | ", " |"),
padding=1, with_header_hide=None),
"html":
TableFormat(lineabove=_html_begin_table_without_header,
linebelowheader="",
linebetweenrows=None,
linebelow=Line("</tbody>\n</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=["lineabove"]),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_raw":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=partial(_latex_row, escrules={}),
datarow=partial(_latex_row, escrules={}),
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None),
"textile":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("|_. ", "|_.", "|"),
datarow=_textile_row_with_attrs,
padding=1, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
# The table formats for which multiline cells will be folded into subsequent
# table rows. The key is the original format specified at the API. The value is
# the format that will be used to represent the original format.
multiline_formats = {
"plain": "plain",
"simple": "simple",
"grid": "grid",
"fancy_grid": "fancy_grid",
"pipe": "pipe",
"orgtbl": "orgtbl",
"jira": "jira",
"presto": "presto",
"psql": "psql",
"rst": "rst",
}
# TODO: Add multiline support for the remaining table formats:
# - mediawiki: Replace \n with <br>
# - moinmoin: TBD
# - youtrack: TBD
# - html: Replace \n with <br>
# - latex*: Use "makecell" package: In header, replace X\nY with
# \thead{X\\Y} and in data row, replace X\nY with \makecell{X\\Y}
# - tsv: TBD
# - textile: Replace \n with <br/> (must be well-formed XML)
_multiline_codes = re.compile(r"\r|\n|\r\n")
_multiline_codes_bytes = re.compile(b"\r|\n|\r\n")
_invisible_codes = re.compile(r"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
>>> _isnumber("123e45678")
False
>>> _isnumber("inf")
True
"""
if not _isconvertible(float, string):
return False
elif isinstance(string, (_text_type, _binary_type)) and (
math.isinf(float(string)) or math.isnan(float(string))):
return string.lower() in ['inf', '-inf', 'nan']
return True
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is inttype or\
(isinstance(string, _binary_type) or isinstance(string, _text_type))\
and\
_isconvertible(inttype, string)
def _isbool(string):
"""
>>> _isbool(True)
True
>>> _isbool("False")
True
>>> _isbool(1)
False
"""
return type(string) is _bool_type or\
(isinstance(string, (_binary_type, _text_type))\
and\
string in ("True", "False"))
def _type(string, has_invisible=True, numparse=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isbool(string):
return _bool_type
elif _isint(string) and numparse:
return int
elif _isint(string, _long_type) and numparse:
return int
elif _isnumber(string) and numparse:
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
fmt = "{0:>%ds}" % width
return fmt.format(s)
def _padright(width, s):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
fmt = "{0:<%ds}" % width
return fmt.format(s)
def _padboth(width, s):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
fmt = "{0:^%ds}" % width
return fmt.format(s)
def _padnone(ignore_width, s):
return s
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
# optional wide-character support
if wcwidth is not None and WIDE_CHARS_MODE:
len_fn = wcwidth.wcswidth
else:
len_fn = len
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len_fn(_strip_invisible(s))
else:
return len_fn(_text_type(s))
def _is_multiline(s):
if isinstance(s, _text_type):
return bool(re.search(_multiline_codes, s))
else: # a bytestring
return bool(re.search(_multiline_codes_bytes, s))
def _multiline_width(multiline_s, line_width_fn=len):
"""Visible width of a potentially multiline content."""
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
width_fn = lambda s: _multiline_width(s, line_width_fn)
else:
width_fn = line_width_fn
return width_fn
def _align_column_choose_padfn(strings, alignment, has_invisible):
if alignment == "right":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
padfn = _padnone
else:
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padright
return strings, padfn
def _align_column(strings, alignment, minwidth=0,
has_invisible=True, enable_widechars=False, is_multiline=False):
"""[string] -> [padded_string]"""
strings, padfn = _align_column_choose_padfn(strings, alignment, has_invisible)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
# TODO: refactor column alignment in single-line and multiline modes
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
# enable wide-character width corrections
s_lens = [max((len(s) for s in re.split("[\r\n]", ms))) for ms in strings]
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = ["\n".join([padfn(w, s) for s in (ms.splitlines() or ms)])
for ms, w in zip(strings, visible_widths)]
else: # single-line cell values
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, _bool_type: 1, int: 2, float: 3, _binary_type: 4, _text_type: 5 }
invtypes = { 5: _text_type, 4: _binary_type, 3: float, 2: int, 1: _bool_type, 0: _none_type }
moregeneric = max(types.get(type1, 5), types.get(type2, 5))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True, numparse=True):
"""The least generic type all column values are convertible to.
>>> _column_type([True, False]) is _bool_type
True
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible, numparse) for s in strings ]
return reduce(_more_generic, types, _bool_type)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = has_invisible and isinstance(val, (_text_type, _binary_type))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width, is_multiline=False, width_fn=None):
"Pad string header to width chars given known visible_width of the header."
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, width_fn(h)) for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = len(header) - visible_width
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _prepend_row_index(rows, index):
"""Add a left-most index column."""
if index is None or index is False:
return rows
if len(index) != len(rows):
print('index=', index)
print('rows=', rows)
raise ValueError('index must be as long as the number of data rows')
rows = [[v]+list(row) for v,row in zip(index, rows)]
return rows
def _bool(val):
"A wrapper around standard bool() which doesn't throw on NumPy arrays"
try:
return bool(val)
except ValueError: # val is likely to be a numpy array with many elements
return False
def _normalize_tabular_data(tabular_data, headers, showindex="default"):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
If showindex="default", show row indices of the pandas.DataFrame.
If showindex="always", show row indices for all types of data.
If showindex="never", don't show row indices for all types of data.
If showindex is an iterable, show its values as row indices.
"""
try:
bool(headers)
is_headers2bool_broken = False
except ValueError: # numpy.ndarray, pandas.core.index.Index, ...
is_headers2bool_broken = True
headers = list(headers)
index = None
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data)
if tabular_data.index.name is not None:
if isinstance(tabular_data.index.name, list):
keys[:0] = tabular_data.index.name
else:
keys[:0] = [tabular_data.index.name]
vals = tabular_data.values # values matrix doesn't need to be transposed
# for DataFrames add an index per default
index = list(tabular_data.index)
rows = [list(row) for row in vals]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and not rows):
# an empty table (issue #81)
headers = []
elif (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
#Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif (headers == "keys"
and hasattr(tabular_data, "description")
and hasattr(tabular_data, "fetchone")
and hasattr(tabular_data, "rowcount")):
# Python Database API cursor object (PEP 0249)
# print tabulate(cursor, headers='keys')
headers = [column[0] for column in tabular_data.description]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
if index is not None:
headers = [index[0]] + list(rows[0])
index = index[1:]
else:
headers = rows[0]
headers = list(map(_text_type, headers)) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type,headers))
rows = list(map(list,rows))
# add or remove an index column
showindex_is_a_str = type(showindex) in [_text_type, _binary_type]
if showindex == "default" and index is not None:
rows = _prepend_row_index(rows, index)
elif isinstance(showindex, Iterable) and not showindex_is_a_str:
rows = _prepend_row_index(rows, list(showindex))
elif showindex == "always" or (_bool(showindex) and not showindex_is_a_str):
if index is None:
index = list(range(len(rows)))
rows = _prepend_row_index(rows, index)
elif showindex == "never" or (not _bool(showindex) and not showindex_is_a_str):
pass
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt=_DEFAULT_FLOATFMT, numalign="decimal", stralign="left",
missingval=_DEFAULT_MISSINGVAL, showindex="default", disable_numparse=False):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
By default, pandas.DataFrame data have an additional column called
row index. To add a similar column to all other types of data,
use `showindex="always"` or `showindex=True`. To suppress row indices
for all types of data, pass `showindex="never" or `showindex=False`.
To add a custom row index column, pass `showindex=some_iterable`.
>>> print(tabulate([["F",24],["M",19]], showindex="always"))
- - --
0 F 24
1 M 19
- - --
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point. This can also be
a list or tuple of format strings, one per column.
`None` values are replaced with a `missingval` string (like
`floatfmt`, this can also be a list of values for different
columns):
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', 'latex_raw' and 'latex_booktabs'. Variable `tabulate_formats`
contains the list of currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
â•’â•â•â•â•â•â•â•â•â•â•â•╤â•â•â•â•â•â•â•â•â•â•â•â••
│ strings │ numbers │
╞â•â•â•â•â•â•â•â•â•â•â•╪â•â•â•â•â•â•â•â•â•â•â•â•¡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘â•â•â•â•â•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•›
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
"presto" is like tables produce by the Presto CLI:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "presto"))
strings | numbers
-----------+-----------
spam | 41.9999
eggs | 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<thead>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
</thead>
<tbody>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</tbody>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_raw" is similar to "latex", but doesn't escape special characters,
such as backslash and underscore, so LaTeX commands may embedded into
cells' values:
>>> print(tabulate([["spam$_9$", 41.9999], ["\\\\emph{eggs}", "451.0"]], tablefmt="latex_raw"))
\\begin{tabular}{lr}
\\hline
spam$_9$ & 41.9999 \\\\
\\emph{eggs} & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
Number parsing
--------------
By default, anything which can be parsed as a number is a number.
This ensures numbers represented as strings are aligned properly.
This can lead to weird results for particular strings such as
specific git SHAs e.g. "42992e1" will be parsed into the number
429920 and aligned as such.
To completely disable number parsing (and alignment), use
`disable_numparse=True`. For more fine grained control, a list column
indices is used to disable number parsing only on those columns
e.g. `disable_numparse=[0, 2]` would disable number parsing only on the
first and third columns.
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(
tabular_data, headers, showindex=showindex)
# empty values in the first column of RST tables should be escaped (issue #82)
# "" should be escaped as "\\ " or ".."
if tablefmt == 'rst':
list_of_lists, headers = _rst_escape_first_column(list_of_lists, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\t'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
if tablefmt in multiline_formats and _is_multiline(plain_text):
tablefmt = multiline_formats.get(tablefmt, tablefmt)
is_multiline = True
else:
is_multiline = False
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(izip_longest(*list_of_lists))
numparses = _expand_numparse(disable_numparse, len(cols))
coltypes = [_column_type(col, numparse=np) for col, np in
zip(cols, numparses)]
if isinstance(floatfmt, basestring): #old version
float_formats = len(cols) * [floatfmt] # just duplicate the string to use in each column
else: # if floatfmt is list, tuple etc we have one per column
float_formats = list(floatfmt)
if len(float_formats) < len(cols):
float_formats.extend( (len(cols)-len(float_formats)) * [_DEFAULT_FLOATFMT] )
if isinstance(missingval, basestring):
missing_vals = len(cols) * [missingval]
else:
missing_vals = list(missingval)
if len(missing_vals) < len(cols):
missing_vals.extend( (len(cols)-len(missing_vals)) * [_DEFAULT_MISSINGVAL] )
cols = [[_format(v, ct, fl_fmt, miss_v, has_invisible) for v in c]
for c, ct, fl_fmt, miss_v in zip(cols, coltypes, float_formats, missing_vals)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, max(width_fn(cl) for cl in c)) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), is_multiline, width_fn)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [max(width_fn(cl) for cl in c) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline)
def _expand_numparse(disable_numparse, column_count):
"""
Return a list of bools of length `column_count` which indicates whether
number parsing should be used on each column.
If `disable_numparse` is a list of indices, each of those indices are False,
and everything else is True.
If `disable_numparse` is a bool, then the returned list is all the same.
"""
if isinstance(disable_numparse, Iterable):
numparses = [True] * column_count
for index in disable_numparse:
numparses[index] = False
return numparses
else:
return [not disable_numparse] * column_count
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):
lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))
return lines
def _append_multiline_row(lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad):
colwidths = [w - 2*pad for w in padded_widths]
cells_lines = [c.splitlines() for c in padded_multiline_cells]
nlines = max(map(len, cells_lines)) # number of lines in the row
# vertically pad cells where some lines are missing
cells_lines = [(cl + [' '*w]*(nlines - len(cl))) for cl, w in zip(cells_lines, colwidths)]
lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)]
for ln in lines_cells:
padded_ln = _pad_row(ln, pad)
_append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt)
return lines
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _append_line(lines, colwidths, colaligns, linefmt):
lines.append(_build_line(colwidths, colaligns, linefmt))
return lines
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
if is_multiline:
pad_row = lambda row, _: row # do it later, in _append_multiline_row
append_row = partial(_append_multiline_row, pad=pad)
else:
pad_row = _pad_row
append_row = _append_basic_row
padded_headers = pad_row(headers, pad)
padded_rows = [pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.lineabove)
if padded_headers:
append_row(lines, padded_headers, padded_widths, colaligns, headerrow)
if fmt.linebelowheader and "linebelowheader" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelowheader)
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
_append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)
# the last row without a line below
append_row(lines, padded_rows[-1], padded_widths, colaligns, fmt.datarow)
else:
for row in padded_rows:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
if fmt.linebelow and "linebelow" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelow)
if headers or rows:
return "\n".join(lines)
else: # a completely empty table
return ""
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data.
See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-o FILE, --output FILE print table to FILE (default: stdout)
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-F FPFMT, --float FPFMT floating point number format (default: g)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_raw,
latex_booktabs, tsv
(default: simple)
"""
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1o:s:F:f:",
["help", "header", "output", "sep=", "float=", "format="])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
floatfmt = _DEFAULT_FLOATFMT
tablefmt = "simple"
sep = r"\s+"
outfile = "-"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-o", "--output"]:
outfile = value
elif opt in ["-F", "--float"]:
floatfmt = value
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
with (sys.stdout if outfile == "-" else open(outfile, "w")) as out:
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out)
else:
with open(f) as fobj:
_pprint_file(fobj, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out)
def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, file):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows if r.strip()]
print(tabulate(table, headers, tablefmt, floatfmt=floatfmt), file=file)
if __name__ == "__main__":
signal(SIGPIPE, SIG_DFL)
_main()
| 37.423863
| 197
| 0.554005
|
5ceb7f1247afc7244865cebe4f94ecf65cbf1186
| 1,050
|
py
|
Python
|
code/findMotifs_std.py
|
NIEHS/P-MACD
|
82fa36f6ccbdccb63985d28b0c41c9084b9e2b18
|
[
"MIT"
] | 2
|
2021-06-02T20:34:27.000Z
|
2021-09-06T22:36:10.000Z
|
code/findMotifs_std.py
|
NIEHS/P-MACD
|
82fa36f6ccbdccb63985d28b0c41c9084b9e2b18
|
[
"MIT"
] | null | null | null |
code/findMotifs_std.py
|
NIEHS/P-MACD
|
82fa36f6ccbdccb63985d28b0c41c9084b9e2b18
|
[
"MIT"
] | 2
|
2021-04-03T00:31:13.000Z
|
2022-01-31T15:40:29.000Z
|
# source("/home/klimczakl/projects/yeast/context/findMotif.py")
motifs2Find = ("A", "T", "G", "C", "Cg", "cG", "tC[at]", "[at]Ga", "tCa", "tGa", "tCt", "aGa", "tC", "Ga", "tC[atc]", "[atg]Ga", "cC", "Gg", "[at][ag]C", "G[ct][at]", "Cc", "gG", "[at]A", "T[at]")
findTitles = ("A", "T", "G", "C", "Cg", "cG", "tCw", "wGa", "tCa", "tGa", "tCt", "aGa", "tC", "Ga", "tCh", "dGa", "cC", "Gg", "wrC", "Gyw", "Cc", "gG", "wA", "Tw")
# for countMotifs.R
motifs2Count = ("a", "t", "g", "c", "cg", "tc[at]", "[at]ga", "tca", "tga", "tct", "aga", "tc", "ga", "tc[atc]", "[atg]ga", "cc", "gg", "[at][ag]c", "g[ct][at]", "cc", "gg", "[at]a", "t[at]")
countTitles = ("a", "t", "g", "c", "cg", "tcw", "wga", "tca", "tga", "tct", "aga", "tc", "ga", "tch", "dga", "cc", "gg", "wrc", "gyw", "cc", "gg", "wa", "tw")
#countTitles = map(lambda x: x+"_counts", countTitles)
countTitles = tuple([x+"_counts" for x in countTitles])
apobecTitles = ("tC_mutation", "tC_mutation_to_G", "tC_mutation_to_T", "APOBEC_mutation", "APOBEC_mutation_to_G", "APOBEC_mutation_to_T")
| 70
| 196
| 0.510476
|
ba0fd526145ad558ee6ebc5b0dd7389c50904a77
| 1,048
|
py
|
Python
|
src/features/utils.py
|
jejjohnson/2019_rbig_rs
|
00df5c623d55895e0b43a4130bb6c601fae84890
|
[
"MIT"
] | 2
|
2020-05-15T17:31:39.000Z
|
2021-03-16T08:49:33.000Z
|
src/features/utils.py
|
jejjohnson/rbig_eo
|
00df5c623d55895e0b43a4130bb6c601fae84890
|
[
"MIT"
] | null | null | null |
src/features/utils.py
|
jejjohnson/rbig_eo
|
00df5c623d55895e0b43a4130bb6c601fae84890
|
[
"MIT"
] | null | null | null |
from typing import Tuple, Optional
from sklearn.utils import check_random_state
import numpy as np
import pandas as pd
def move_variables(df: pd.DataFrame, variable: str) -> pd.DataFrame:
# cond1 = df['variable1'] == variable
cond = df["variable2"] == variable
df.loc[cond, ["variable2", "variable1"]] = df.loc[
cond, ["variable1", "variable2"], ["rbig_H_x", "rbig_H_y"]
].values
return df
def subset_data(
X: np.ndarray, subsample: Optional[int] = None, random_state: int = 123,
) -> Tuple[np.ndarray, np.ndarray]:
idx = subset_indices(X, subsample, random_state)
return X[subset_indices, :]
def subset_indices(
X: np.ndarray, subsample: Optional[int] = None, random_state: int = 123,
) -> Tuple[np.ndarray, np.ndarray]:
if subsample is not None and subsample < X.shape[0]:
rng = check_random_state(random_state)
indices = np.arange(X.shape[0])
subset_indices = rng.permutation(indices)[:subsample]
return subset_indices
else:
return None
| 29.942857
| 76
| 0.664122
|
6d9bb4eb303b83893aa650e1904ae6dc87060c4a
| 1,215
|
py
|
Python
|
mmaction/core/bbox/transforms.py
|
rlleshi/mmaction2
|
6993693f178b1a59e5eb07f1a3db484d5e5de61a
|
[
"Apache-2.0"
] | 1,870
|
2020-07-11T09:33:46.000Z
|
2022-03-31T13:21:36.000Z
|
mmaction/core/bbox/transforms.py
|
rlleshi/mmaction2
|
6993693f178b1a59e5eb07f1a3db484d5e5de61a
|
[
"Apache-2.0"
] | 1,285
|
2020-07-11T11:18:57.000Z
|
2022-03-31T08:41:17.000Z
|
mmaction/core/bbox/transforms.py
|
rlleshi/mmaction2
|
6993693f178b1a59e5eb07f1a3db484d5e5de61a
|
[
"Apache-2.0"
] | 557
|
2020-07-11T09:51:57.000Z
|
2022-03-31T13:21:35.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
def bbox2result(bboxes, labels, num_classes, thr=0.01):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor): shape (n, 4)
labels (Tensor): shape (n, #num_classes)
num_classes (int): class number, including background class
thr (float): The score threshold used when converting predictions to
detection results
Returns:
list(ndarray): bbox results of each class
"""
if bboxes.shape[0] == 0:
return list(np.zeros((num_classes - 1, 0, 5), dtype=np.float32))
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()
# We only handle multilabel now
assert labels.shape[-1] > 1
scores = labels # rename for clarification
thr = (thr, ) * num_classes if isinstance(thr, float) else thr
assert scores.shape[1] == num_classes
assert len(thr) == num_classes
result = []
for i in range(num_classes - 1):
where = scores[:, i + 1] > thr[i + 1]
result.append(
np.concatenate((bboxes[where, :4], scores[where, i + 1:i + 2]),
axis=1))
return result
| 31.973684
| 76
| 0.609053
|
442f248390deb58225429480c3870eec42baf24a
| 1,956
|
py
|
Python
|
kms/api-client/iam_add_member.py
|
apecr/python-docs-samples
|
26b581bb6ce148e13a9c7f2cd801f138b8aa8412
|
[
"Apache-2.0"
] | 1
|
2020-06-04T16:50:49.000Z
|
2020-06-04T16:50:49.000Z
|
kms/api-client/iam_add_member.py
|
apecr/python-docs-samples
|
26b581bb6ce148e13a9c7f2cd801f138b8aa8412
|
[
"Apache-2.0"
] | null | null | null |
kms/api-client/iam_add_member.py
|
apecr/python-docs-samples
|
26b581bb6ce148e13a9c7f2cd801f138b8aa8412
|
[
"Apache-2.0"
] | 1
|
2020-05-29T20:33:18.000Z
|
2020-05-29T20:33:18.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# [START kms_iam_add_member]
def iam_add_member(project_id, location_id, key_ring_id, key_id, member):
"""
Add an IAM member to a resource.
Args:
project_id (string): Google Cloud project ID (e.g. 'my-project').
location_id (string): Cloud KMS location (e.g. 'us-east1').
key_ring_id (string): ID of the Cloud KMS key ring (e.g. 'my-key-ring').
key_id (string): ID of the key to use (e.g. 'my-key').
member (string): Member to add (e.g. 'user:foo@example.com')
Returns:
Policy: Updated Cloud IAM policy.
"""
# Import the client library.
from google.cloud import kms
# Create the client.
client = kms.KeyManagementServiceClient()
# Build the resource name.
resource_name = client.crypto_key_path(project_id, location_id, key_ring_id, key_id)
# The resource name could also be a key ring.
# resource_name = client.key_ring_path(project_id, location_id, key_ring_id);
# Get the current policy.
policy = client.get_iam_policy(resource_name)
# Add the member to the policy.
policy.bindings.add(
role='roles/cloudkms.cryptoKeyEncrypterDecrypter',
members=[member])
# Save the updated IAM policy.
updated_policy = client.set_iam_policy(resource_name, policy)
print('Added {} to {}'.format(member, resource_name))
return updated_policy
# [END kms_iam_add_member]
| 34.315789
| 88
| 0.699898
|
c11ab33609bc91c53b27be3ea3a51967a0ded354
| 3,703
|
py
|
Python
|
gaphor/plugins/diagramexport/gaphorconvert.py
|
bertob/gaphor
|
a1d6f8dd8c878f299980bba6c055436148573274
|
[
"Apache-2.0"
] | null | null | null |
gaphor/plugins/diagramexport/gaphorconvert.py
|
bertob/gaphor
|
a1d6f8dd8c878f299980bba6c055436148573274
|
[
"Apache-2.0"
] | null | null | null |
gaphor/plugins/diagramexport/gaphorconvert.py
|
bertob/gaphor
|
a1d6f8dd8c878f299980bba6c055436148573274
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import optparse
import os
import re
import sys
from gaphor.application import Session
from gaphor.core.modeling import Diagram
from gaphor.storage import storage
def pkg2dir(package):
"""Return directory path from package class."""
name = []
while package:
name.insert(0, package.name)
package = package.package
return "/".join(name)
def parse_options(argv):
usage = "usage: %prog [options] file1 file2..."
parser = optparse.OptionParser(usage=usage)
parser.add_option(
"-v", "--verbose", dest="verbose", action="store_true", help="verbose output"
)
parser.add_option(
"-u",
"--use-underscores",
dest="underscores",
action="store_true",
help="use underscores instead of spaces for output filenames",
)
parser.add_option(
"-d", "--dir", dest="dir", metavar="directory", help="output to directory"
)
parser.add_option(
"-f",
"--format",
dest="format",
metavar="format",
help="output file format, default pdf",
default="pdf",
choices=["pdf", "svg", "png"],
)
parser.add_option(
"-r",
"--regex",
dest="regex",
metavar="regex",
help="process diagrams which name matches given regular expression;"
" name includes package name; regular expressions are case insensitive",
)
options, args = parser.parse_args(argv)
if not args:
parser.print_help()
return options, args
def main(argv=sys.argv[1:]):
options, args = parse_options(argv)
def message(msg):
if options.verbose:
print(msg, file=sys.stderr)
session = Session(
services=[
"event_manager",
"component_registry",
"element_factory",
"element_dispatcher",
"modeling_language",
"diagram_export",
]
)
factory = session.get_service("element_factory")
modeling_language = session.get_service("modeling_language")
diagram_export = session.get_service("diagram_export")
name_re = None
if options.regex:
name_re = re.compile(options.regex, re.I)
# we should have some gaphor files to be processed at this point
for model in args:
message(f"loading model {model}")
storage.load(model, factory, modeling_language)
message("ready for rendering")
for diagram in factory.select(Diagram):
odir = pkg2dir(diagram.package)
# just diagram name
dname = diagram.name
# full diagram name including package path
pname = f"{odir}/{dname}"
if options.underscores:
odir = odir.replace(" ", "_")
dname = dname.replace(" ", "_")
if name_re and not name_re.search(pname):
message(f"skipping {pname}")
continue
if options.dir:
odir = f"{options.dir}/{odir}"
outfilename = f"{odir}/{dname}.{options.format}"
if not os.path.exists(odir):
message(f"creating dir {odir}")
os.makedirs(odir)
message(f"rendering: {pname} -> {outfilename}...")
if options.format == "pdf":
diagram_export.save_pdf(outfilename, diagram)
elif options.format == "svg":
diagram_export.save_svg(outfilename, diagram)
elif options.format == "png":
diagram_export.save_png(outfilename, diagram)
else:
raise RuntimeError(f"Unknown file format: {options.format}")
| 27.634328
| 85
| 0.57872
|
568e6944a5e1535e5d348279515c04c37c3f89bc
| 10,224
|
py
|
Python
|
blowdrycss/unitparser.py
|
acnagy/test-blowdrycss
|
bd9603dc87dc304b811213e3e6c3c97afa7f5966
|
[
"MIT"
] | null | null | null |
blowdrycss/unitparser.py
|
acnagy/test-blowdrycss
|
bd9603dc87dc304b811213e3e6c3c97afa7f5966
|
[
"MIT"
] | null | null | null |
blowdrycss/unitparser.py
|
acnagy/test-blowdrycss
|
bd9603dc87dc304b811213e3e6c3c97afa7f5966
|
[
"MIT"
] | null | null | null |
# python 2
from __future__ import absolute_import
# builtins
from string import digits
# custom
import blowdrycss_settings as settings
__author__ = 'chad nelson'
__project__ = 'blowdrycss'
class UnitParser(object):
"""
**Used in these cases:**
- No units are provided and default units need to be added to make it valid css.
- The user wants their pixel (px) based units to be converted to em or root em (rem)
so that their page scales / zooms properly.
**Assumption:** The value provided already has negative signs and decimal points. There are no dashes or
underscores present in the value e.g. -1.25 can be processed, but n1_25 cannot be processed.
**Contains a ``default_property_units_dict``** which maps property names to their default units.
**Note:** Shorthand properties are not supported.
**Why do I want to use em (named after the sound for the letter 'M') or root em (rem)?:**
*Because your webpage will scale with browser and device size.*
|
.. http://snook.ca/archives/html_and_css/font-size-with-rem
https://css-tricks.com/rems-ems/
**What does (em) actually stand for?:**
**Source:** W3C -- http://www.w3.org/WAI/GL/css2em.htm
The foremost tool for writing scalable style sheets is the "em" unit, and it therefore goes on top of
the list of guidelines that we will compile throughout this chapter: use ems to make scalable style sheets.
Named after the letter "M", the em unit has a long-standing tradition in typography where it has been used
to measure horizontal widths.
...
In CSS, the em unit is a general unit for measuring lengths, for example page margins and padding
around elements. You can use it both horizontally and vertically, and this shocks traditional
typographers who always have used em exclusively for horizontal measurements. By extending the em unit
to also work vertically, it has become a very powerful unit - so powerful that you seldom have to
use other length units.
**Source:** Wikipedia -- https://en.wikipedia.org/wiki/Em_%28typography%29
An em is a unit in the field of typography, equal to the currently specified point size. For example,
one em in a 16-point typeface is 16 points. Therefore, this unit is the same for all typefaces at a
given point size.
"""
def __init__(self, property_name=''):
self.property_name = property_name
self.allowed = set(digits + '-.px')
# Reference: http://www.w3.org/TR/CSS21/propidx.html
# Extracted all properties containing Values of <angle>, <percentage>, <length>, <time>, <frequency>
# IDEA: Build webscraper that auto-extracts these. May not be deterministic enough. Would need to build a
# Page based on the standard that includes all property name/value combos.
self.default_property_units_dict = { # Number of possible values:
'background-position': '%', # single or double
# 'border': 'px', # single Shorthand Property unit addition Not implemented
'border-top': 'px', # single
'border-right': 'px', # single
'border-bottom': 'px', # single
'border-left': 'px', # single
'border-spacing': 'px', # single
'border-width': 'px', # single
'border-top-width': 'px', # single
'border-right-width': 'px', # single
'border-bottom-width': 'px', # single
'border-left-width': 'px', # single
'border-radius': 'px', # single
'border-top-left-radius': 'px', # single
'border-top-right-radius': 'px', # single
'border-bottom-right-radius': 'px', # single
'border-bottom-left-radius': 'px',
'elevation': 'deg', # single
# 'font': 'px', # single Shorthand Property unit addition Not implemented
'font-size': 'px', # single
'height': 'px', # single
'max-height': 'px', # single
'min-height': 'px', # single
'letter-spacing': 'px', # single
'word-spacing': 'px', # single
'line-height': 'px', # single
'top': 'px', # single
'right': 'px', # single
'bottom': 'px', # single
'left': 'px', # single
'margin': 'px', # single, double, quadruple
'margin-top': 'px', # single
'margin-right': 'px', # single
'margin-bottom': 'px', # single
'margin-left': 'px', # single
# 'outline': 'px', # single Shorthand Property unit addition Not implemented
'outline-width': 'px', # single
'padding': 'px', # single, double, quadruple
'padding-top': 'px', # single
'padding-right': 'px', # single
'padding-bottom': 'px', # single
'padding-left': 'px', # single
'pause': 'ms', # single, double
'pause-after': 'ms', # single
'pause-before': 'ms', # single
'pitch': 'Hz', # single
'text-indent': 'px', # single
'text-shadow': 'px', # single, double, triple
'vertical-align': '%', # single
'volume': '%', # single
'width': 'px', # single
'max-width': 'px', # single
'min-width': 'px', # single
}
def default_units(self):
""" Returns the default units "if any" for the assigned ``self.property_name``.
:return: (*str*) -- Returns default units for the assigned ``self.property_name`` if they exist. Otherwise,
return an empty string ``''``.
"""
if self.property_name in self.default_property_units_dict:
return self.default_property_units_dict[self.property_name]
else:
return ''
def add_units(self, property_value=''):
""" If the property_name requires units, then apply the default units defined in default_property_units_dict.
**Rules:**
- If use_em is False apply the default units for the property name by looking it up in
default_property_units_dict.
- Unit that have default units of ``px`` are converted to ``em`` if use_em is True.
- If ``property_value`` has multiple property values, then split it apart.
- If the value already has units, then pass it through unchanged.
- The value provided shall possess negative signs and decimal points.
- Mixed units are allowed, but **not recommended**.
- Values shall only contain [] e.g. -1.25 can be processed, but n1_25 cannot be processed.
:type property_value: str
:param property_value: A string containing one or more space delimited alphanumeric characters.
:return: (str) -- Returns the property value with the default or converted units added.
>>> # Convert 'px' to 'em'
>>> unit_parser = UnitParser(property_name='padding', use_em=True)
>>> unit_parser.add_units('1 2 1 2')
0.0625em 0.125em 0.0625em 0.125em
>>> # Use default units
>>> unit_parser.use_em = False
>>> unit_parser.add_units('1 2 1 2')
1px 2px 1px 2px
>>> # Values already have units or are not parsable pass through
>>> # True produces the same output.
>>> unit_parser.use_em = False
>>> unit_parser.add_units('55zp')
55zp
>>> unit_parser.add_units('17rem')
17rem
>>> # Unitless ``property_name``
>>> # causes ``property_value`` to pass through.
>>> unit_parser.property_name = 'font-weight'
>>> unit_parser.add_units('200')
200
>>> # Mixed units cases - Not a Recommended Practice,
>>> # but represent valid CSS. Be careful.
>>> unit_parser.use_em = False
>>> unit_parser.add_units('5em 6 5em 6')
5em 6px 5em 6px
>>> unit_parser.use_em = True
>>> unit_parser.add_units('1em 100 4cm 9rem')
1em 6.25em 4cm 9rem
"""
new_value = []
try:
default_units = self.default_property_units_dict[self.property_name] # See if property_name has units.
for val in property_value.split(): # single, double and quadruple
if set(val) <= self.allowed:
val = val.replace('px', '') # Handle 'px' units case.
if settings.use_em and default_units == 'px': # Convert units if required.
new_value.append(settings.px_to_em(pixels=val))
else:
new_value.append(val + default_units) # Use default units.
else:
new_value.append(val) # Pass through and ignore value.
property_value = ' '.join(new_value) # Put the new values back together.
except KeyError:
pass # Property is unitless.
return property_value
| 46.899083
| 119
| 0.529245
|
b175bf3a54036468fe2e1be5e6519961a9d42b20
| 23,213
|
py
|
Python
|
tests/fnet/test_modeling_fnet.py
|
techthiyanes/transformers
|
705d65368fb28246534ef636fe62c008f4fb2682
|
[
"Apache-2.0"
] | 2
|
2021-11-25T13:27:29.000Z
|
2022-02-25T19:21:19.000Z
|
tests/fnet/test_modeling_fnet.py
|
techthiyanes/transformers
|
705d65368fb28246534ef636fe62c008f4fb2682
|
[
"Apache-2.0"
] | 1
|
2022-03-26T12:10:11.000Z
|
2022-03-26T12:10:11.000Z
|
tests/fnet/test_modeling_fnet.py
|
techthiyanes/transformers
|
705d65368fb28246534ef636fe62c008f4fb2682
|
[
"Apache-2.0"
] | 1
|
2022-01-12T14:45:41.000Z
|
2022-01-12T14:45:41.000Z
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch FNet model. """
import unittest
from typing import Dict, List, Tuple
from transformers import FNetConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tokenizers, require_torch, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetModel,
FNetTokenizerFast,
)
from transformers.models.fnet.modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetBasicFourierTransform,
is_scipy_available,
)
# Override ConfigTester
class FNetConfigTester(ConfigTester):
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
if self.has_text_modality:
self.parent.assertTrue(hasattr(config, "vocab_size"))
self.parent.assertTrue(hasattr(config, "hidden_size"))
self.parent.assertTrue(hasattr(config, "num_hidden_layers"))
class FNetModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
def get_config(self):
return FNetConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
tpu_short_seq_length=self.seq_length,
)
@require_torch
def create_and_check_fourier_transform(self, config):
hidden_states = floats_tensor([self.batch_size, self.seq_length, config.hidden_size])
transform = FNetBasicFourierTransform(config)
fftn_output = transform(hidden_states)
config.use_tpu_fourier_optimizations = True
if is_scipy_available():
transform = FNetBasicFourierTransform(config)
dft_output = transform(hidden_states)
config.max_position_embeddings = 4097
transform = FNetBasicFourierTransform(config)
fft_output = transform(hidden_states)
if is_scipy_available():
self.parent.assertTrue(torch.allclose(fftn_output[0][0], dft_output[0][0], atol=1e-4))
self.parent.assertTrue(torch.allclose(fft_output[0][0], dft_output[0][0], atol=1e-4))
self.parent.assertTrue(torch.allclose(fftn_output[0][0], fft_output[0][0], atol=1e-4))
def create_and_check_model(self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels):
model = FNetModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_pretraining(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
model = FNetForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
token_type_ids=token_type_ids,
labels=token_labels,
next_sentence_label=sequence_labels,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
model = FNetForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_next_sentence_prediction(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
model = FNetForNextSentencePrediction(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
token_type_ids=token_type_ids,
next_sentence_label=sequence_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
model = FNetForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = FNetForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = FNetForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = FNetForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids}
return config, inputs_dict
@require_torch
class FNetModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
FNetModel,
FNetForPreTraining,
FNetForMaskedLM,
FNetForNextSentencePrediction,
FNetForMultipleChoice,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
)
if is_torch_available()
else ()
)
# Skip Tests
test_pruning = False
test_head_masking = False
test_pruning = False
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["next_sentence_label"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
# Overriden Tests
def test_attention_outputs(self):
pass
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, Dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=f"Tuple and dict output are not equal. Difference: {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`: {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}.",
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
# tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
# dict_inputs = self._prepare_for_class(inputs_dict, model_class)
# check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
hidden_states = outputs.hidden_states[0]
hidden_states.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
def setUp(self):
self.model_tester = FNetModelTester(self)
self.config_tester = FNetConfigTester(self, config_class=FNetConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in FNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = FNetModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class FNetModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_for_masked_lm(self):
"""
For comparison:
1. Modify the pre-training model `__call__` to skip computing metrics and return masked_lm_output like so:
```
...
sequence_output, pooled_output = EncoderModel(
self.config, random_seed=self.random_seed, name="encoder")(
input_ids, input_mask, type_ids, deterministic=deterministic)
masked_lm_output = nn.Dense(
self.config.d_emb,
kernel_init=default_kernel_init,
name="predictions_dense")(
sequence_output)
masked_lm_output = nn.gelu(masked_lm_output)
masked_lm_output = nn.LayerNorm(
epsilon=LAYER_NORM_EPSILON, name="predictions_layer_norm")(
masked_lm_output)
masked_lm_logits = layers.OutputProjection(
kernel=self._get_embedding_table(), name="predictions_output")(
masked_lm_output)
next_sentence_logits = layers.OutputProjection(
n_out=2, kernel_init=default_kernel_init, name="classification")(
pooled_output)
return masked_lm_logits
...
```
2. Run the following:
>>> import jax.numpy as jnp
>>> import sentencepiece as spm
>>> from flax.training import checkpoints
>>> from f_net.models import PreTrainingModel
>>> from f_net.configs.pretraining import get_config, ModelArchitecture
>>> pretrained_params = checkpoints.restore_checkpoint('./f_net/f_net_checkpoint', None) # Location of original checkpoint
>>> pretrained_config = get_config()
>>> pretrained_config.model_arch = ModelArchitecture.F_NET
>>> vocab_filepath = "./f_net/c4_bpe_sentencepiece.model" # Location of the sentence piece model
>>> tokenizer = spm.SentencePieceProcessor()
>>> tokenizer.Load(vocab_filepath)
>>> with pretrained_config.unlocked():
>>> pretrained_config.vocab_size = tokenizer.GetPieceSize()
>>> tokens = jnp.array([[0, 1, 2, 3, 4, 5]])
>>> type_ids = jnp.zeros_like(tokens, dtype="i4")
>>> attention_mask = jnp.ones_like(tokens) # Dummy. This gets deleted inside the model.
>>> flax_pretraining_model = PreTrainingModel(pretrained_config)
>>> pretrained_model_params = freeze(pretrained_params['target'])
>>> flax_model_outputs = flax_pretraining_model.apply({"params": pretrained_model_params}, tokens, attention_mask, type_ids, None, None, None, None, deterministic=True)
>>> masked_lm_logits[:, :3, :3]
"""
model = FNetForMaskedLM.from_pretrained("google/fnet-base")
model.to(torch_device)
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=torch_device)
output = model(input_ids)[0]
vocab_size = 32000
expected_shape = torch.Size((1, 6, vocab_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-1.7819, -7.7384, -7.5002], [-3.4746, -8.5943, -7.7762], [-3.2052, -9.0771, -8.3468]]],
device=torch_device,
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
@slow
@require_tokenizers
def test_inference_long_sentence(self):
model = FNetForMaskedLM.from_pretrained("google/fnet-base")
model.to(torch_device)
tokenizer = FNetTokenizerFast.from_pretrained("google/fnet-base")
inputs = tokenizer(
"the man worked as a [MASK].",
"this is his [MASK].",
return_tensors="pt",
padding="max_length",
max_length=512,
)
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
logits = model(**inputs).logits
predictions_mask_1 = tokenizer.decode(logits[0, 6].topk(5).indices)
predictions_mask_2 = tokenizer.decode(logits[0, 12].topk(5).indices)
self.assertEqual(predictions_mask_1.split(" "), ["man", "child", "teacher", "woman", "model"])
self.assertEqual(predictions_mask_2.split(" "), ["work", "wife", "job", "story", "name"])
@slow
def test_inference_for_next_sentence_prediction(self):
model = FNetForNextSentencePrediction.from_pretrained("google/fnet-base")
model.to(torch_device)
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=torch_device)
output = model(input_ids)[0]
expected_shape = torch.Size((1, 2))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[-0.2234, -0.0226]], device=torch_device)
self.assertTrue(torch.allclose(output, expected_slice, atol=1e-4))
@slow
def test_inference_model(self):
model = FNetModel.from_pretrained("google/fnet-base")
model.to(torch_device)
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=torch_device)
output = model(input_ids)[0]
expected_shape = torch.Size((1, 6, model.config.hidden_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[4.1541, -0.1051, -0.1667], [-0.9144, 0.2939, -0.0086], [-0.8472, -0.7281, 0.0256]]], device=torch_device
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
| 41.525939
| 315
| 0.669711
|
1a128d5e56766edad28c97b713d77d3b3ef8da02
| 67
|
py
|
Python
|
mapping/enable/primitives/api.py
|
nmichaud/enable-mapping
|
421aae6c3c700406df0f2438cec190daf5074084
|
[
"BSD-3-Clause"
] | 1
|
2019-04-22T16:36:06.000Z
|
2019-04-22T16:36:06.000Z
|
mapping/enable/primitives/api.py
|
pombreda/enable-mapping
|
421aae6c3c700406df0f2438cec190daf5074084
|
[
"BSD-3-Clause"
] | null | null | null |
mapping/enable/primitives/api.py
|
pombreda/enable-mapping
|
421aae6c3c700406df0f2438cec190daf5074084
|
[
"BSD-3-Clause"
] | 2
|
2015-04-14T10:06:03.000Z
|
2020-10-03T03:56:47.000Z
|
from geo_circle import GeoCircle
from geo_marker import GeoMarker
| 16.75
| 32
| 0.865672
|
a267239e4373e2ae20914c31d23d992c10ead5af
| 365
|
py
|
Python
|
Python/Skrypty/Python - Szkolenie_11-2015/przyklady_rec_python/pickler.py
|
Elzei/show-off
|
fd6c46480160d795a7c1c833a798f3d49eddf144
|
[
"MIT"
] | null | null | null |
Python/Skrypty/Python - Szkolenie_11-2015/przyklady_rec_python/pickler.py
|
Elzei/show-off
|
fd6c46480160d795a7c1c833a798f3d49eddf144
|
[
"MIT"
] | null | null | null |
Python/Skrypty/Python - Szkolenie_11-2015/przyklady_rec_python/pickler.py
|
Elzei/show-off
|
fd6c46480160d795a7c1c833a798f3d49eddf144
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import pickle
class MyContainer(object):
def __init__(self, data):
self._data = data
def get_data(self):
return self._data
d1 = MyContainer([2, 5, 4, 3, [ 12, 3, 5 ], 32, { 'a': 12, 'b': 43}])
with open('/tmp/pickle_data.dat', "wb") as f:
p = pickle.Pickler(f, 2)
p.dump(d1)
| 16.590909
| 69
| 0.561644
|
0104993a0b4d2816985e9cb2c62962b04fe0f7c6
| 1,251
|
py
|
Python
|
userbot/plugins/fleaveme_IQ.py
|
noornoor600/telethon-iraq
|
f958af26e8686f432760ae9b0fce90b94d1d731a
|
[
"Apache-2.0"
] | 2
|
2022-02-27T11:39:58.000Z
|
2022-02-27T11:40:00.000Z
|
userbot/plugins/fleaveme_IQ.py
|
ForSimo/Telethon
|
70b6169d367321af55e74589482699b0e90e3c0f
|
[
"Apache-2.0"
] | null | null | null |
userbot/plugins/fleaveme_IQ.py
|
ForSimo/Telethon
|
70b6169d367321af55e74589482699b0e90e3c0f
|
[
"Apache-2.0"
] | null | null | null |
#redit: @KLANR
"""Emoji
Available Commands:
.fleave"""
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 17)
input_str = event.pattern_match.group(1)
if input_str == "fleave":
await event.edit(input_str)
animation_chars = [
"⬛⬛⬛\n⬛⬛⬛\n⬛⬛⬛",
"⬛⬛⬛\n⬛🔄⬛\n⬛⬛⬛",
"⬛⬆️⬛\n⬛🔄⬛\n⬛⬛⬛",
"⬛⬆️↗️\n⬛🔄⬛\n⬛⬛⬛",
"⬛⬆️↗️\n⬛🔄➡️\n⬛⬛⬛",
"⬛⬆️↗️\n⬛🔄➡️\n⬛⬛↘️",
"⬛⬆️↗️\n⬛🔄➡️\n⬛⬇️↘️",
"⬛⬆️↗️\n⬛🔄➡️\n↙️⬇️↘️",
"⬛⬆️↗️\n⬅️🔄➡️\n↙️⬇️↘️",
"↖️⬆️↗️\n⬅️🔄➡️\n↙️⬇️↘️",
"**Chat Message Exported To** `./Inpu/`",
"**Chat Message Exported To** `./Inpu/homework/`",
"**Chat Message Exported To** `./Inpu/homework/groupchat.txt`",
"__Legend is leaving this chat.....! Gaand Marao Bc..__",
"__Legend is leaving this chat.....! Gaand Marao Bc..__"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 17])
| 21.20339
| 75
| 0.453237
|
50840b1c592e2ed25c9ef59a755b548812ce851a
| 18,790
|
py
|
Python
|
lib/googlecloudsdk/calliope/exceptions.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/calliope/exceptions.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/calliope/exceptions.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2013 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions that can be thrown by calliope tools.
The exceptions in this file, and those that extend them, can be thrown by
the Run() function in calliope tools without worrying about stack traces
littering the screen in CLI mode. In interpreter mode, they are not caught
from within calliope.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import errno
from functools import wraps
import os
import sys
from googlecloudsdk.api_lib.util import exceptions as api_exceptions
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.console import console_attr_os
import six
def NewErrorFromCurrentException(error, *args):
"""Creates a new error based on the current exception being handled.
If no exception is being handled, a new error with the given args
is created. If there is a current exception, the original exception is
first logged (to file only). A new error is then created with the
same args as the current one.
Args:
error: The new error to create.
*args: The standard args taken by the constructor of Exception for the new
exception that is created. If None, the args from the exception
currently being handled will be used.
Returns:
The generated error exception.
"""
(_, current_exception, _) = sys.exc_info()
# Log original exception details and traceback to the log file if we are
# currently handling an exception.
if current_exception:
file_logger = log.file_only_logger
file_logger.error('Handling the source of a tool exception, '
'original details follow.')
file_logger.exception(current_exception)
if args:
return error(*args)
elif current_exception:
return error(*current_exception.args)
return error('An unknown error has occurred')
# TODO(b/32328530): Remove ToolException when the last ref is gone
class ToolException(core_exceptions.Error):
"""ToolException is for Run methods to throw for non-code-bug errors.
Attributes:
command_name: The dotted group and command name for the command that threw
this exception. This value is set by calliope.
"""
@staticmethod
def FromCurrent(*args):
return NewErrorFromCurrentException(ToolException, *args)
class ExitCodeNoError(core_exceptions.Error):
"""A special exception for exit codes without error messages.
If this exception is raised, it's identical in behavior to returning from
the command code, except the overall exit code will be different.
"""
class FailedSubCommand(core_exceptions.Error):
"""Exception capturing a subcommand which did sys.exit(code)."""
def __init__(self, cmd, code):
super(FailedSubCommand, self).__init__(
'Failed command: [{0}] with exit code [{1}]'.format(
' '.join(cmd), code),
exit_code=code)
def RaiseErrorInsteadOf(error, *error_types):
"""A decorator that re-raises as an error.
If any of the error_types are raised in the decorated function, this decorator
will re-raise as an error.
Args:
error: Exception, The new exception to raise.
*error_types: [Exception], A list of exception types that this decorator
will watch for.
Returns:
The decorated function.
"""
def Wrap(func):
"""Wrapper function for the decorator."""
@wraps(func)
def TryFunc(*args, **kwargs):
try:
return func(*args, **kwargs)
except error_types:
core_exceptions.reraise(NewErrorFromCurrentException(error))
return TryFunc
return Wrap
# TODO(b/32328530): Remove RaiseToolExceptionInsteadOf when the last ref is gone
def RaiseToolExceptionInsteadOf(*error_types):
"""A decorator that re-raises as ToolException."""
return RaiseErrorInsteadOf(ToolException, *error_types)
def _TruncateToLineWidth(string, align, width, fill=''):
"""Truncate string to line width, right aligning at align.
Examples (assuming a screen width of 10):
>>> _TruncateToLineWidth('foo', 0)
'foo'
>>> # Align to the beginning. Should truncate the end.
... _TruncateToLineWidth('0123456789abcdef', 0)
'0123456789'
>>> _TruncateToLineWidth('0123456789abcdef', 0, fill='...')
'0123456...'
>>> # Align to the end. Should truncate the beginning.
... _TruncateToLineWidth('0123456789abcdef', 16)
'6789abcdef'
>>> _TruncateToLineWidth('0123456789abcdef', 16, fill='...')
'...9abcdef'
>>> # Align to the middle (note: the index is toward the end of the string,
... # because this function right-aligns to the given index).
... # Should truncate the begnining and end.
... _TruncateToLineWidth('0123456789abcdef', 12)
'23456789ab'
>>> _TruncateToLineWidth('0123456789abcdef', 12, fill='...')
'...5678...'
Args:
string: string to truncate
align: index to right-align to
width: maximum length for the resulting string
fill: if given, indicate truncation with this string. Must be shorter than
terminal width / 2.
Returns:
str, the truncated string
Raises:
ValueError, if provided fill is too long for the terminal.
"""
if len(fill) >= width // 2:
# Either the caller provided a fill that's way too long, or the user has a
# terminal that's way too narrow. In either case, we aren't going to be able
# to make this look nice, but we don't want to throw an error because that
# will mask the original error.
log.warning('Screen not wide enough to display correct error message.')
return string
if len(string) <= width:
return string
if align > width:
string = fill + string[align-width+len(fill):]
if len(string) <= width:
return string
string = string[:width-len(fill)] + fill
return string
_MARKER = '^ invalid character'
def _NonAsciiIndex(s):
"""Returns the index of the first non-ascii char in s, -1 if all ascii."""
if isinstance(s, six.text_type):
for i, c in enumerate(s):
try:
c.encode('ascii')
except (AttributeError, UnicodeError):
return i
else:
for i, b in enumerate(s):
try:
b.decode('ascii')
except (AttributeError, UnicodeError):
return i
return -1
# pylint: disable=g-doc-bad-indent
def _FormatNonAsciiMarkerString(args):
r"""Format a string that will mark the first non-ASCII character it contains.
Example:
>>> args = ['command.py', '--foo=\xce\x94']
>>> _FormatNonAsciiMarkerString(args) == (
... 'command.py --foo=\u0394\n'
... ' ^ invalid character'
... )
True
Args:
args: The arg list for the command executed
Returns:
unicode, a properly formatted string with two lines, the second of which
indicates the non-ASCII character in the first.
Raises:
ValueError: if the given string is all ASCII characters
"""
# pos is the position of the first non-ASCII character in ' '.join(args)
pos = 0
for arg in args:
first_non_ascii_index = _NonAsciiIndex(arg)
if first_non_ascii_index >= 0:
pos += first_non_ascii_index
break
# this arg was all ASCII; add 1 for the ' ' between args
pos += len(arg) + 1
else:
raise ValueError(
'The command line is composed entirely of ASCII characters.')
# Make a string that, when printed in parallel, will point to the non-ASCII
# character
marker_string = ' ' * pos + _MARKER
# Make sure that this will still print out nicely on an odd-sized screen
align = len(marker_string)
args_string = ' '.join(
[console_attr.SafeText(arg) for arg in args])
width, _ = console_attr_os.GetTermSize()
fill = '...'
if width < len(_MARKER) + len(fill):
# It's hopeless to try to wrap this and make it look nice. Preserve it in
# full for logs and so on.
return '\n'.join((args_string, marker_string))
# If len(args_string) < width < len(marker_string) (ex:)
#
# args_string = 'command BAD'
# marker_string = ' ^ invalid character'
# width = len('----------------')
#
# then the truncation can give a result like the following:
#
# args_string = 'command BAD'
# marker_string = ' ^ invalid character'
#
# (This occurs when args_string is short enough to not be truncated, but
# marker_string is long enough to be truncated.)
#
# ljust args_string to make it as long as marker_string before passing to
# _TruncateToLineWidth, which will yield compatible truncations. rstrip at the
# end to get rid of the new trailing spaces.
formatted_args_string = _TruncateToLineWidth(args_string.ljust(align), align,
width, fill=fill).rstrip()
formatted_marker_string = _TruncateToLineWidth(marker_string, align, width)
return '\n'.join((formatted_args_string, formatted_marker_string))
class InvalidCharacterInArgException(ToolException):
"""InvalidCharacterInArgException is for non-ASCII CLI arguments."""
def __init__(self, args, invalid_arg):
self.invalid_arg = invalid_arg
cmd = os.path.basename(args[0])
if cmd.endswith('.py'):
cmd = cmd[:-3]
args = [cmd] + args[1:]
super(InvalidCharacterInArgException, self).__init__(
'Failed to read command line argument [{0}] because it does '
'not appear to be valid 7-bit ASCII.\n\n'
'{1}'.format(
console_attr.SafeText(self.invalid_arg),
_FormatNonAsciiMarkerString(args)))
class BadArgumentException(ToolException):
"""For arguments that are wrong for reason hard to summarize."""
def __init__(self, argument_name, message):
super(BadArgumentException, self).__init__(
'Invalid value for [{0}]: {1}'.format(argument_name, message))
self.argument_name = argument_name
# TODO(b/35938745): Eventually use api_exceptions.HttpException exclusively.
class HttpException(api_exceptions.HttpException):
"""HttpException is raised whenever the Http response status code != 200.
See api_lib.util.exceptions.HttpException for full documentation.
"""
class InvalidArgumentException(ToolException):
"""InvalidArgumentException is for malformed arguments."""
def __init__(self, parameter_name, message):
super(InvalidArgumentException, self).__init__(
'Invalid value for [{0}]: {1}'.format(parameter_name, message))
self.parameter_name = parameter_name
class ConflictingArgumentsException(ToolException):
"""ConflictingArgumentsException arguments that are mutually exclusive."""
def __init__(self, *parameter_names):
super(ConflictingArgumentsException, self).__init__(
'arguments not allowed simultaneously: ' + ', '.join(parameter_names))
self.parameter_names = parameter_names
class UnknownArgumentException(ToolException):
"""UnknownArgumentException is for arguments with unexpected values."""
def __init__(self, parameter_name, message):
super(UnknownArgumentException, self).__init__(
'Unknown value for [{0}]: {1}'.format(parameter_name, message))
self.parameter_name = parameter_name
class RequiredArgumentException(ToolException):
"""An exception for when a usually optional argument is required in this case.
"""
def __init__(self, parameter_name, message):
super(RequiredArgumentException, self).__init__(
'Missing required argument [{0}]: {1}'.format(parameter_name, message))
self.parameter_name = parameter_name
class OneOfArgumentsRequiredException(ToolException):
"""An exception for when one of usually optional arguments is required.
"""
def __init__(self, parameters, message):
super(OneOfArgumentsRequiredException, self).__init__(
'One of arguments [{0}] is required: {1}'.format(
', '.join(parameters), message))
self.parameters = parameters
class MinimumArgumentException(ToolException):
"""An exception for when one of several arguments is required."""
def __init__(self, parameter_names, message=None):
if message:
message = ': {}'.format(message)
else:
message = ''
super(MinimumArgumentException, self).__init__(
'One of [{0}] must be supplied{1}.'.format(
', '.join(['{0}'.format(p) for p in parameter_names]),
message)
)
class BadFileException(ToolException):
"""BadFileException is for problems reading or writing a file."""
# pylint: disable=g-import-not-at-top, Delay the import of this because
# importing store is relatively expensive.
def _GetTokenRefreshError(exc):
from googlecloudsdk.core.credentials import store
return store.TokenRefreshError(exc)
# In general, lower level libraries should be catching exceptions and re-raising
# exceptions that extend core.Error so nice error messages come out. There are
# some error classes that want to be handled as recoverable errors, but cannot
# import the core_exceptions module (and therefore the Error class) for various
# reasons (e.g. circular dependencies). To work around this, we keep a list of
# known "friendly" error types, which we handle in the same way as core.Error.
# Additionally, we provide an alternate exception class to convert the errors
# to which may add additional information. We use strings here so that we don't
# have to import all these libraries all the time, just to be able to handle the
# errors when they come up. Only add errors here if there is no other way to
# handle them.
_KNOWN_ERRORS = {
'apitools.base.py.exceptions.HttpError': HttpException,
'googlecloudsdk.calliope.parser_errors.ArgumentError': lambda x: None,
'googlecloudsdk.core.util.files.Error': lambda x: None,
'httplib.ResponseNotReady': core_exceptions.NetworkIssueError,
# Same error but different location on PY3.
'http.client.ResponseNotReady': core_exceptions.NetworkIssueError,
'oauth2client.client.AccessTokenRefreshError': _GetTokenRefreshError,
'ssl.SSLError': core_exceptions.NetworkIssueError,
'socket.error': core_exceptions.NetworkIssueError,
}
def _GetExceptionName(cls):
"""Returns the exception name used as index into _KNOWN_ERRORS from type."""
return cls.__module__ + '.' + cls.__name__
_SOCKET_ERRNO_NAMES = {
'EADDRINUSE', 'EADDRNOTAVAIL', 'EAFNOSUPPORT', 'EBADMSG', 'ECOMM',
'ECONNABORTED', 'ECONNREFUSED', 'ECONNRESET', 'EDESTADDRREQ', 'EHOSTDOWN',
'EHOSTUNREACH', 'EISCONN', 'EMSGSIZE', 'EMULTIHOP', 'ENETDOWN', 'ENETRESET',
'ENETUNREACH', 'ENOBUFS', 'ENOPROTOOPT', 'ENOTCONN', 'ENOTSOCK', 'ENOTUNIQ',
'EOPNOTSUPP', 'EPFNOSUPPORT', 'EPROTO', 'EPROTONOSUPPORT', 'EPROTOTYPE',
'EREMCHG', 'EREMOTEIO', 'ESHUTDOWN', 'ESOCKTNOSUPPORT', 'ETIMEDOUT',
'ETOOMANYREFS',
}
def _IsSocketError(exc):
"""Returns True if exc is a socket error exception."""
# I've a feeling we're not in python 2 anymore. PEP 3151 eliminated module
# specific exceptions in favor of builtin exceptions like OSError. Good
# for some things, bad for others. For instance, this brittle errno check
# for "network" errors. We use names because errnos are system dependent.
return errno.errorcode[exc.errno] in _SOCKET_ERRNO_NAMES
def ConvertKnownError(exc):
"""Convert the given exception into an alternate type if it is known.
Searches backwards through Exception type hierarchy until it finds a match.
Args:
exc: Exception, the exception to convert.
Returns:
(exception, bool), exception is None if this is not a known type, otherwise
a new exception that should be logged. The boolean is True if the error
should be printed, or False to just exit without printing.
"""
if isinstance(exc, ExitCodeNoError):
return exc, False
elif isinstance(exc, core_exceptions.Error):
return exc, True
known_err = None
classes = [type(exc)]
processed = set([]) # To avoid circular dependencies
while classes:
cls = classes.pop(0)
processed.add(cls)
name = _GetExceptionName(cls)
if name == 'builtins.OSError' and _IsSocketError(exc):
known_err = core_exceptions.NetworkIssueError
else:
known_err = _KNOWN_ERRORS.get(name)
if known_err:
break
bases = [bc for bc in cls.__bases__
if bc not in processed and issubclass(bc, Exception)]
classes.extend([base for base in bases if base is not Exception])
if not known_err:
# This is not a known error type
return None, True
# If there is no known exception just return the original exception.
new_exc = known_err(exc)
return (new_exc, True) if new_exc else (exc, True)
def HandleError(exc, command_path, known_error_handler=None):
"""Handles an error that occurs during command execution.
It calls ConvertKnownError to convert exceptions to known types before
processing. If it is a known type, it is printed nicely as as error. If not,
it is raised as a crash.
Args:
exc: Exception, The original exception that occurred.
command_path: str, The name of the command that failed (for error
reporting).
known_error_handler: f(): A function to report the current exception as a
known error.
"""
known_exc, print_error = ConvertKnownError(exc)
if known_exc:
_LogKnownError(known_exc, command_path, print_error)
# Uncaught errors will be handled in gcloud_main.
if known_error_handler:
known_error_handler()
if properties.VALUES.core.print_handled_tracebacks.GetBool():
core_exceptions.reraise(exc)
_Exit(known_exc)
else:
# Make sure any uncaught exceptions still make it into the log file.
log.debug(console_attr.SafeText(exc), exc_info=sys.exc_info())
core_exceptions.reraise(exc)
def _LogKnownError(known_exc, command_path, print_error):
msg = '({0}) {1}'.format(
console_attr.SafeText(command_path),
console_attr.SafeText(known_exc))
log.debug(msg, exc_info=sys.exc_info())
if print_error:
log.error(msg)
def _Exit(exc):
"""This method exists so we can mock this out during testing to not exit."""
# exit_code won't be defined in the KNOWN_ERRORs classes
sys.exit(getattr(exc, 'exit_code', 1))
| 34.796296
| 80
| 0.714476
|
e9ce0845f3b552a47981f84f1c94393045958930
| 8,804
|
py
|
Python
|
tests/tensortrade/orders/test_broker.py
|
Kukunin/tensortrade
|
c5b5c40232a334d9b38359eab0c0ce0e4c9e43ed
|
[
"Apache-2.0"
] | 6
|
2020-03-05T14:49:01.000Z
|
2022-02-28T01:55:50.000Z
|
tests/tensortrade/orders/test_broker.py
|
Machine-Learning-Labs/tensortrade
|
3fe7793a6c1d3d7bfe772166578f624f3f572eca
|
[
"Apache-2.0"
] | null | null | null |
tests/tensortrade/orders/test_broker.py
|
Machine-Learning-Labs/tensortrade
|
3fe7793a6c1d3d7bfe772166578f624f3f572eca
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import unittest.mock as mock
from tensortrade.orders import Broker, OrderStatus, Order, OrderSpec
from tensortrade.orders.criteria import Stop
from tensortrade.wallets import Wallet, Portfolio
from tensortrade.trades import TradeSide, TradeType
from tensortrade.instruments import USD, BTC, Quantity
@mock.patch('tensortrade.exchanges.Exchange')
def test_init(mock_exchange_class):
exchange = mock_exchange_class.return_value
broker = Broker(exchange)
assert broker
assert broker.exchanges == [exchange]
assert broker.unexecuted == []
assert broker.executed == {}
assert broker.trades == {}
exchanges = [
mock_exchange_class.return_value,
mock_exchange_class.return_value,
mock_exchange_class.return_value
]
broker = Broker(exchanges)
assert broker
assert broker.exchanges == exchanges
assert broker.unexecuted == []
assert broker.executed == {}
assert broker.trades == {}
@mock.patch('tensortrade.orders.Order')
@mock.patch('tensortrade.exchanges.Exchange')
def test_submit(mock_order_class,
mock_exchange_class):
exchange = mock_exchange_class.return_value
broker = Broker(exchange)
order = mock_order_class.return_value
assert broker.unexecuted == []
broker.submit(order)
assert order in broker.unexecuted
@mock.patch('tensortrade.orders.Order')
@mock.patch('tensortrade.exchanges.Exchange')
def test_cancel_unexecuted_order(mock_order_class,
mock_exchange_class):
exchange = mock_exchange_class.return_value
broker = Broker(exchange)
order = mock_order_class.return_value
order.cancel = mock.Mock(return_value=None)
order.status = OrderStatus.PENDING
broker.submit(order)
assert order in broker.unexecuted
broker.cancel(order)
assert order not in broker.unexecuted
order.cancel.assert_called_once_with(exchange)
@mock.patch('tensortrade.orders.Order')
@mock.patch('tensortrade.exchanges.Exchange')
def test_cancel_executed_order(mock_order_class,
mock_exchange_class):
exchange = mock_exchange_class.return_value
broker = Broker(exchange)
order = mock_order_class.return_value
order.cancel = mock.Mock(return_value=None)
broker.submit(order)
assert order in broker.unexecuted
order.status = OrderStatus.OPEN
with pytest.raises(Warning):
broker.cancel(order)
order.status = OrderStatus.PARTIALLY_FILLED
with pytest.raises(Warning):
broker.cancel(order)
order.status = OrderStatus.FILLED
with pytest.raises(Warning):
broker.cancel(order)
order.status = OrderStatus.CANCELLED
with pytest.raises(Warning):
broker.cancel(order)
@mock.patch('tensortrade.orders.Order')
@mock.patch('tensortrade.exchanges.Exchange')
def test_update_on_single_exchange_with_single_order(mock_order_class,
mock_exchange_class):
exchange = mock_exchange_class.return_value
broker = Broker(exchange)
order = mock_order_class.return_value
order.id = "fake_id"
order.is_executable_on = mock.Mock(side_effect=[False, True])
order.attach = mock.Mock(return_value=None)
broker.submit(order)
# Test order does not execute on first update
broker.update()
assert order in broker.unexecuted
assert order.id not in broker.executed
# Test order does execute on second update
broker.update()
assert order not in broker.unexecuted
assert order.id in broker.executed
order.attach.assert_called_once_with(broker)
@mock.patch('tensortrade.exchanges.Exchange')
def test_update_on_single_exchange_with_multiple_orders(mock_exchange_class):
exchange = mock_exchange_class.return_value
exchange.id = "fake_exchange_id"
wallets = [Wallet(exchange, 10000 * USD), Wallet(exchange, 0 * BTC)]
portfolio = Portfolio(USD, wallets)
broker = Broker(exchange)
# Submit order 1
o1 = Order(side=TradeSide.BUY,
trade_type=TradeType.MARKET,
pair=USD / BTC,
quantity=5200.00 * USD,
portfolio=portfolio,
price=7000.00)
o1.is_executable_on = mock.MagicMock(side_effect=[False, True])
broker.submit(o1)
# Submit order 2
o2 = Order(side=TradeSide.BUY,
trade_type=TradeType.MARKET,
pair=USD / BTC,
quantity=230.00 * USD,
portfolio=portfolio,
price=7300.00)
o2.is_executable_on = mock.MagicMock(side_effect=[True, False])
broker.submit(o2)
# No updates have been made yet
assert o1 in broker.unexecuted and o1 not in broker.executed
assert o2 in broker.unexecuted and o2 not in broker.executed
# First update
broker.update()
assert o1 in broker.unexecuted and o1.id not in broker.executed
assert o2 not in broker.unexecuted and o2.id in broker.executed
# Second update
broker.update()
assert o1 not in broker.unexecuted and o1.id in broker.executed
assert o2 not in broker.unexecuted and o2.id in broker.executed
@mock.patch('tensortrade.exchanges.Exchange')
@mock.patch('tensortrade.trades.Trade')
def test_on_fill(mock_trade_class,
mock_exchange_class):
exchange = mock_exchange_class.return_value
exchange.id = "fake_exchange_id"
broker = Broker(exchange)
wallets = [Wallet(exchange, 10000 * USD), Wallet(exchange, 0 * BTC)]
portfolio = Portfolio(USD, wallets)
order = Order(side=TradeSide.BUY,
trade_type=TradeType.MARKET,
pair=USD / BTC,
quantity=5200.00 * USD,
portfolio=portfolio,
price=7000.00)
order.attach(broker)
order.execute(exchange)
broker._executed[order.id] = order
trade = mock_trade_class.return_value
trade.size = 5197.00
trade.commission = 3.00 * USD
trade.order_id = order.id
assert order.status == OrderStatus.OPEN
order.fill(exchange, trade)
assert order.status == OrderStatus.FILLED
assert order.remaining_size == 0
assert trade in broker.trades[order.id]
@mock.patch('tensortrade.exchanges.Exchange')
@mock.patch('tensortrade.trades.Trade')
def test_on_fill_with_complex_order(mock_trade_class,
mock_exchange_class):
exchange = mock_exchange_class.return_value
exchange.id = "fake_exchange_id"
broker = Broker(exchange)
wallets = [Wallet(exchange, 10000 * USD), Wallet(exchange, 0 * BTC)]
portfolio = Portfolio(USD, wallets)
side = TradeSide.BUY
order = Order(side=TradeSide.BUY,
trade_type=TradeType.MARKET,
pair=USD / BTC,
quantity=5200.00 * USD,
portfolio=portfolio,
price=7000.00)
risk_criteria = Stop("down", 0.03) ^ Stop("up", 0.02)
risk_management = OrderSpec(side=TradeSide.SELL if side == TradeSide.BUY else TradeSide.BUY,
trade_type=TradeType.MARKET,
pair=USD / BTC,
criteria=risk_criteria)
order += risk_management
order.attach(broker)
order.execute(exchange)
broker._executed[order.id] = order
# Execute fake trade
price = 7000.00
scale = order.price / price
commission = 3.00 * USD
base_size = scale * order.size - commission.size
trade = mock_trade_class.return_value
trade.order_id = order.id
trade.size = base_size
trade.price = price
trade.commission = commission
base_wallet = portfolio.get_wallet(exchange.id, USD)
quote_wallet = portfolio.get_wallet(exchange.id, BTC)
base_size = trade.size + trade.commission.size
quote_size = (order.price / trade.price) * (trade.size / trade.price)
base_wallet -= Quantity(USD, size=base_size, path_id=order.path_id)
quote_wallet += Quantity(BTC, size=quote_size, path_id=order.path_id)
assert trade.order_id in broker.executed.keys()
assert trade not in broker.trades
assert broker.unexecuted == []
order.fill(exchange, trade)
assert order.remaining_size == 0
assert trade in broker.trades[order.id]
assert broker.unexecuted != []
@mock.patch('tensortrade.exchanges.Exchange')
def test_reset(mock_exchange_class):
exchange = mock_exchange_class.return_value
exchange.id = "fake_exchange_id"
broker = Broker(exchange)
broker._unexecuted = [78, 98, 100]
broker._executed = {'a': 1, 'b': 2}
broker._trades = {'a': 2, 'b': 3}
broker.reset()
assert broker.unexecuted == []
assert broker.executed == {}
assert broker.trades == {}
| 29.249169
| 96
| 0.677079
|
94292bbe36047d5efe647010767d8fd6a0f692e4
| 1,403
|
py
|
Python
|
setup.py
|
metaist/attrbox
|
68c595f58e641f6e8ab1d5eb0bd163819823dd25
|
[
"MIT"
] | null | null | null |
setup.py
|
metaist/attrbox
|
68c595f58e641f6e8ab1d5eb0bd163819823dd25
|
[
"MIT"
] | null | null | null |
setup.py
|
metaist/attrbox
|
68c595f58e641f6e8ab1d5eb0bd163819823dd25
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
"""Install library package."""
# native
from pathlib import Path
import site
import sys
# lib
from setuptools import setup, find_namespace_packages
# pkg
pkg = {}
here = Path(__file__).parent.resolve()
exec( # pylint: disable=exec-used
(here / "src" / "attrbox" / "__about__.py").open(encoding="utf-8").read(), pkg
)
# See: https://github.com/pypa/pip/issues/7953
site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
# See: https://github.com/pypa/pipenv/issues/1911
# See: https://caremad.io/posts/2013/07/setup-vs-requirement/
setup(
python_requires=">=3.8",
name="attrbox",
version=pkg["__version__"],
description=pkg["__doc__"].split("\n")[0],
long_description=(here / "README.md").read_text(encoding="utf-8"),
long_description_content_type="text/markdown",
license=pkg["__license__"],
author=pkg["__author__"],
author_email=pkg["__email__"],
url=pkg["__url__"],
download_url=pkg["__url__"],
package_dir={"": "src"},
packages=find_namespace_packages(where="src"),
install_requires=["setuptools"],
keywords=["attr", "attributes", "dict", "list"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
],
)
| 28.06
| 82
| 0.655738
|
69602341b98c1a1c9600bbf47271386f31dbffb5
| 2,065
|
py
|
Python
|
autorest/python/emsapi/models/adi_ems_web_api_v2_dto_ems_profile_ems_profile.py
|
ge-flight-analytics/ems-api-wrappers
|
5e787e0cbc72e7a3b06fa83ff6ba07968231f89c
|
[
"MIT"
] | 2
|
2017-02-20T18:32:02.000Z
|
2018-08-01T11:45:29.000Z
|
autorest/python/emsapi/models/adi_ems_web_api_v2_dto_ems_profile_ems_profile.py
|
ge-flight-analytics/ems-api-wrappers
|
5e787e0cbc72e7a3b06fa83ff6ba07968231f89c
|
[
"MIT"
] | 10
|
2017-02-20T16:17:04.000Z
|
2019-04-02T16:52:49.000Z
|
autorest/python/emsapi/models/adi_ems_web_api_v2_dto_ems_profile_ems_profile.py
|
ge-flight-analytics/ems-api-wrappers
|
5e787e0cbc72e7a3b06fa83ff6ba07968231f89c
|
[
"MIT"
] | 2
|
2017-02-18T23:22:20.000Z
|
2017-02-20T19:35:38.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AdiEmsWebApiV2DtoEmsProfileEmsProfile(Model):
"""Represents an APM (Automated Parameter Measurement) profile in an EMS
system.
:param profile_id: The local identifier for a profile
:type profile_id: int
:param profile_guid: The unique identifier of a profile in the system
:type profile_guid: str
:param profile_name: The name of the profile
:type profile_name: str
:param library: Flag for if a profile is a library profile
:type library: bool
:param current_version: The version of the profile
:type current_version: int
:param path: Path to the profile's location
:type path: str
"""
_validation = {
'profile_id': {'required': True},
'profile_guid': {'required': True},
'profile_name': {'required': True},
'library': {'required': True},
'current_version': {'required': True},
'path': {'required': True},
}
_attribute_map = {
'profile_id': {'key': 'profileId', 'type': 'int'},
'profile_guid': {'key': 'profileGuid', 'type': 'str'},
'profile_name': {'key': 'profileName', 'type': 'str'},
'library': {'key': 'library', 'type': 'bool'},
'current_version': {'key': 'currentVersion', 'type': 'int'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(self, profile_id, profile_guid, profile_name, library, current_version, path):
super(AdiEmsWebApiV2DtoEmsProfileEmsProfile, self).__init__()
self.profile_id = profile_id
self.profile_guid = profile_guid
self.profile_name = profile_name
self.library = library
self.current_version = current_version
self.path = path
| 37.545455
| 95
| 0.599031
|
37920055d0056c7586e982605ed94ee9ce144795
| 242
|
py
|
Python
|
03/get_entropy.py
|
MarBry111/KiBIF
|
06cc6d969323045b38a52831a695d13bb5ebc2ab
|
[
"MIT"
] | null | null | null |
03/get_entropy.py
|
MarBry111/KiBIF
|
06cc6d969323045b38a52831a695d13bb5ebc2ab
|
[
"MIT"
] | null | null | null |
03/get_entropy.py
|
MarBry111/KiBIF
|
06cc6d969323045b38a52831a695d13bb5ebc2ab
|
[
"MIT"
] | null | null | null |
import struct
import time
import os
t = 0.5
T = 100
for ii in range(int(T*60/t)):
time.sleep(t)
#os.system('cat /proc/sys/kernel/random/entropy_avail')
os.system('cat /proc/sys/kernel/random/entropy_avail >> ~/Desktop/entropy.txt')
| 16.133333
| 80
| 0.698347
|
dbe01842bebe419900324ef00b06f3f69ed5c950
| 4,771
|
py
|
Python
|
rlmolecule/alphazero/alphazero.py
|
dmdu/rlmolecule
|
5c9187775ef99ea6a06992788116754b1b308a8c
|
[
"BSD-3-Clause"
] | null | null | null |
rlmolecule/alphazero/alphazero.py
|
dmdu/rlmolecule
|
5c9187775ef99ea6a06992788116754b1b308a8c
|
[
"BSD-3-Clause"
] | null | null | null |
rlmolecule/alphazero/alphazero.py
|
dmdu/rlmolecule
|
5c9187775ef99ea6a06992788116754b1b308a8c
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import math
import numpy as np
from rlmolecule.alphazero.alphazero_problem import AlphaZeroProblem
from rlmolecule.alphazero.alphazero_vertex import AlphaZeroVertex
from rlmolecule.mcts.mcts import MCTS
from rlmolecule.mcts.mcts_vertex import MCTSVertex
from rlmolecule.tree_search.graph_search_state import GraphSearchState
from rlmolecule.tree_search.reward import Reward
logger = logging.getLogger(__name__)
class AlphaZero(MCTS):
"""
This class defines the interface for implementing AlphaZero-based games within this framework.
Such a game overrides the abstract methods below with application-specific implementations.
AlphaZeroGame interacts with AlphaZeroVertex. See AlphaZeroVertex for more details.
"""
def __init__(self,
problem: AlphaZeroProblem,
min_reward: float = 0.0,
pb_c_base: float = 1.0,
pb_c_init: float = 1.25,
dirichlet_noise: bool = True,
dirichlet_alpha: float = 1.0,
dirichlet_x: float = 0.25,
**kwargs) -> None:
"""
Constructor.
:param min_reward: Minimum reward to return for invalid actions
:param pb_c_base:
:param pb_c_init:
:param dirichlet_noise: whether to add dirichlet noise
:param dirichlet_alpha: dirichlet 'shape' parameter. Larger values spread out probability over more moves.
:param dirichlet_x: percentage to favor dirichlet noise vs. prior estimation. Smaller means less noise
"""
super().__init__(problem, vertex_class=AlphaZeroVertex, **kwargs)
self._min_reward: float = min_reward
self._pb_c_base: float = pb_c_base
self._pb_c_init: float = pb_c_init
self._dirichlet_noise: bool = dirichlet_noise
self._dirichlet_alpha: float = dirichlet_alpha
self._dirichlet_x: float = dirichlet_x
@property
def problem(self) -> AlphaZeroProblem:
# noinspection PyTypeChecker
return self._problem
def _accumulate_path_data(self, vertex: MCTSVertex, path: []):
children = vertex.children
visit_sum = sum(child.visit_count for child in children)
child_visits = [(child, child.visit_count / visit_sum) for child in children]
path.append((vertex, child_visits))
def _evaluate(
self,
search_path: [AlphaZeroVertex],
) -> Reward:
"""
Expansion step of AlphaZero, overrides MCTS evaluate step.
Estimates the value of a leaf vertex.
"""
assert len(search_path) > 0, 'Invalid attempt to evaluate an empty search path.'
leaf = search_path[-1]
self._expand(leaf)
children = leaf.children
if len(children) == 0:
return self.problem.reward_wrapper(leaf)
# get value estimate and child priors
value, child_priors = self.problem.get_value_and_policy(leaf)
# Store prior values for child vertices predicted from the policy network, and add dirichlet noise as
# specified in the game configuration.
prior_array: np.ndarray = np.array([child_priors[child] for child in children])
if self._dirichlet_noise:
random_state = np.random.RandomState()
noise = random_state.dirichlet(np.ones_like(prior_array) * self._dirichlet_alpha)
prior_array = prior_array * (1 - self._dirichlet_x) + (noise * self._dirichlet_x)
child_priors = prior_array.tolist()
normalization_factor = sum(child_priors)
leaf.child_priors = {child: prior / normalization_factor for child, prior in zip(children, child_priors)}
return self.problem.reward_class(scaled_reward=value)
def run(self, *args, **kwargs) -> ([], float):
path, reward = MCTS.run(self, *args, **kwargs)
self.problem.store_search_statistics(path, reward)
return path, reward
def _ucb_score(self, parent: AlphaZeroVertex, child: AlphaZeroVertex) -> float:
"""
A modified upper confidence bound score for the vertices value, incorporating the prior prediction.
:param child: Vertex for which the UCB score is desired
:return: UCB score for the given child
"""
child_priors = parent.child_priors
if child_priors is None:
return math.inf
pb_c = np.log((parent.visit_count + self._pb_c_base + 1) / self._pb_c_base) + self._pb_c_init
pb_c *= np.sqrt(parent.visit_count) / (child.visit_count + 1)
prior_score = pb_c * child_priors[child]
return prior_score + child.value
def _make_new_vertex(self, state: GraphSearchState) -> AlphaZeroVertex:
return AlphaZeroVertex(state)
| 40.777778
| 114
| 0.673653
|
1c317c6f990469410000fc123e1ab7a1df626cbd
| 314
|
py
|
Python
|
LC/70.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | 2
|
2018-02-24T17:20:02.000Z
|
2018-02-24T17:25:43.000Z
|
LC/70.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | null | null | null |
LC/70.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | null | null | null |
class Solution(object):
dict={1:1, 2:2}
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
if n in self.dict:
return self.dict[n]
else:
self.dict[n] = self.climbStairs(n-2) + njh
return self.dict[n]
| 26.166667
| 74
| 0.43949
|
4527fbaf17dcc4265c1148afd93472cbb2c8112c
| 7,325
|
py
|
Python
|
mmseg/core/evaluation/class_names.py
|
weiyx16/mmsegmentation
|
6d35d76195f173fbc6b119a7d7815e67d78024c6
|
[
"Apache-2.0"
] | 18
|
2022-03-28T12:36:21.000Z
|
2022-03-31T10:47:07.000Z
|
mmseg/core/evaluation/class_names.py
|
weiyx16/mmsegmentation
|
6d35d76195f173fbc6b119a7d7815e67d78024c6
|
[
"Apache-2.0"
] | 13
|
2022-02-15T20:05:18.000Z
|
2022-02-15T20:05:21.000Z
|
mmseg/core/evaluation/class_names.py
|
weiyx16/mmsegmentation
|
6d35d76195f173fbc6b119a7d7815e67d78024c6
|
[
"Apache-2.0"
] | 4
|
2022-03-28T14:19:41.000Z
|
2022-03-30T08:06:55.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
def cityscapes_classes():
"""Cityscapes class names for external use."""
return [
'road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
]
def ade_classes():
"""ADE20K class names for external use."""
return [
'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
'clock', 'flag'
]
def voc_classes():
"""Pascal VOC class names for external use."""
return [
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor'
]
def cityscapes_palette():
"""Cityscapes palette for external use."""
return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100],
[0, 0, 230], [119, 11, 32]]
def ade_palette():
"""ADE20K palette for external use."""
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
[102, 255, 0], [92, 0, 255]]
def voc_palette():
"""Pascal VOC palette for external use."""
return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
dataset_aliases = {
'cityscapes': ['cityscapes'],
'ade': ['ade', 'ade20k'],
'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug']
}
def get_classes(dataset):
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
def get_palette(dataset):
"""Get class palette (RGB) of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_palette()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
| 47.564935
| 79
| 0.484642
|
5de8398b704d04047899f2ed2ed5eff0cf9f95aa
| 1,371
|
py
|
Python
|
src/models/activity_paper.py
|
mirgee/thesis_project
|
296f292a84fe4756374d87c81e657ac991766a60
|
[
"MIT"
] | null | null | null |
src/models/activity_paper.py
|
mirgee/thesis_project
|
296f292a84fe4756374d87c81e657ac991766a60
|
[
"MIT"
] | 2
|
2020-03-24T17:03:19.000Z
|
2020-03-31T03:19:19.000Z
|
src/models/activity_paper.py
|
mirgee/thesis_project
|
296f292a84fe4756374d87c81e657ac991766a60
|
[
"MIT"
] | null | null | null |
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Activation, Dropout, Flatten, Dense, BatchNormalization
from keras.models import Sequential
from keras import optimizers
from keras import initializers
# From 'Classification of Recurrence Plots’ Distance Matrices with a Convolutional Neural Network for
# Activity Recognition' paper
def activity_model():
model = Sequential()
ki = initializers.RandomNormal()
model.add(Conv2D(16, (3, 3), activation='relu', input_shape=(image_height,image_width,num_channels),
kernel_initializer=ki))
model.add(Conv2D(16, (3, 3), activation='relu', kernel_initializer=ki))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer=ki))
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer=ki))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu', kernel_initializer=ki))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer=optimizers.Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0.0, amsgrad=False),
loss='binary_crossentropy',
metrics=['accuracy'])
return model
| 45.7
| 119
| 0.706054
|
32e0425be109268a570067cd95b0b8a86ca6bf4d
| 199
|
py
|
Python
|
controller/test_op.py
|
airlovelq/flask-frame
|
86a97522a6eff4e34f0cd5c131ebf68f7c78390a
|
[
"Apache-2.0"
] | null | null | null |
controller/test_op.py
|
airlovelq/flask-frame
|
86a97522a6eff4e34f0cd5c131ebf68f7c78390a
|
[
"Apache-2.0"
] | null | null | null |
controller/test_op.py
|
airlovelq/flask-frame
|
86a97522a6eff4e34f0cd5c131ebf68f7c78390a
|
[
"Apache-2.0"
] | null | null | null |
from .base_op import BaseOp
class TestOp(BaseOp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def test(self, msg):
return self._database.get_user_by_id(user_id)
| 24.875
| 53
| 0.673367
|
f18c0483ef053224feda41a53b5a7770de9e095f
| 2,373
|
py
|
Python
|
examples/fetch.py
|
adbenitez/fbchat
|
8052b818de5de9682f6d64405579ad640cd657ee
|
[
"BSD-3-Clause"
] | 1
|
2020-08-06T00:51:25.000Z
|
2020-08-06T00:51:25.000Z
|
examples/fetch.py
|
adbenitez/fbchat
|
8052b818de5de9682f6d64405579ad640cd657ee
|
[
"BSD-3-Clause"
] | null | null | null |
examples/fetch.py
|
adbenitez/fbchat
|
8052b818de5de9682f6d64405579ad640cd657ee
|
[
"BSD-3-Clause"
] | null | null | null |
from itertools import islice
from fbchat import Client
from fbchat.models import *
client = Client("<email>", "<password>")
# Fetches a list of all users you're currently chatting with, as `User` objects
users = client.fetchAllUsers()
print("users' IDs: {}".format([user.uid for user in users]))
print("users' names: {}".format([user.name for user in users]))
# If we have a user id, we can use `fetchUserInfo` to fetch a `User` object
user = client.fetchUserInfo("<user id>")["<user id>"]
# We can also query both mutiple users together, which returns list of `User` objects
users = client.fetchUserInfo("<1st user id>", "<2nd user id>", "<3rd user id>")
print("user's name: {}".format(user.name))
print("users' names: {}".format([users[k].name for k in users]))
# `searchForUsers` searches for the user and gives us a list of the results,
# and then we just take the first one, aka. the most likely one:
user = client.searchForUsers("<name of user>")[0]
print("user ID: {}".format(user.uid))
print("user's name: {}".format(user.name))
print("user's photo: {}".format(user.photo))
print("Is user client's friend: {}".format(user.is_friend))
# Fetches a list of the 20 top threads you're currently chatting with
threads = client.fetchThreadList()
# Fetches the next 10 threads
threads += client.fetchThreadList(offset=20, limit=10)
print("Threads: {}".format(threads))
# Gets the last 10 messages sent to the thread
messages = client.fetchThreadMessages(thread_id="<thread id>", limit=10)
# Since the message come in reversed order, reverse them
messages.reverse()
# Prints the content of all the messages
for message in messages:
print(message.text)
# If we have a thread id, we can use `fetchThreadInfo` to fetch a `Thread` object
thread = client.fetchThreadInfo("<thread id>")["<thread id>"]
print("thread's name: {}".format(thread.name))
print("thread's type: {}".format(thread.type))
# `searchForThreads` searches works like `searchForUsers`, but gives us a list of threads instead
thread = client.searchForThreads("<name of thread>")[0]
print("thread's name: {}".format(thread.name))
print("thread's type: {}".format(thread.type))
# Here should be an example of `getUnread`
# Print image url for 20 last images from thread.
images = client.fetchThreadImages("<thread id>")
for image in islice(image, 20):
print(image.large_preview_url)
| 33.9
| 97
| 0.719343
|
277594fdad58a976f3b0de7bf2e8d6864cfe83ac
| 2,850
|
py
|
Python
|
SMS-Back-End/microservicio1/APIDB/aprovisionadorDBCrudo.py
|
mresti/StudentsManagementSystem
|
a1d67af517379b249630cac70a55bdfd9f77c54a
|
[
"Apache-2.0"
] | null | null | null |
SMS-Back-End/microservicio1/APIDB/aprovisionadorDBCrudo.py
|
mresti/StudentsManagementSystem
|
a1d67af517379b249630cac70a55bdfd9f77c54a
|
[
"Apache-2.0"
] | null | null | null |
SMS-Back-End/microservicio1/APIDB/aprovisionadorDBCrudo.py
|
mresti/StudentsManagementSystem
|
a1d67af517379b249630cac70a55bdfd9f77c54a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#Fichero que cargará de contenido la base de datos para pruebas o cualquier otro menester.
import MySQLdb
db = MySQLdb.connect(host="localhost", user="root", passwd="root", db="smm");
cursor = db.cursor()
MAX=5
#Aprovisionamos de contenido la tabla Alumnos
'''
Esquema de la tabla:
CREATE TABLE Alumno(
nombre CHAR(20),
dni CHAR(9),
direccion CHAR(100),
localidad CHAR(50),
provincia CHAR(50),
fecha_nacimiento DATE,
telefono CHAR(50),
PRIMARY KEY (dni)
);
'''
for i in range(0,MAX):
nombre='\'Alumno'+str(i)+'\''
dni=str(i)
direccion='\'Direccion'+str(i)+'\''
localidad='\'Localidad'+str(i)+'\''
provincia='\'Provincia'+str(i)+'\''
fecha_nac='\'1988-10-'+str(i+1)+'\''
telefono='\''+str(i)+str(i)+str(i)+str(i)+'\''
query="INSERT INTO Alumno VALUES("+nombre+","+dni+","+direccion+","+localidad+","+provincia+","+fecha_nac+","+telefono+");"
print query
salida = cursor.execute(query);
#Ejecutamos la acción
db.commit()
###### Aprovisionamos de contenido la tabla Profesor
'''
Esquema de la tabla:
CREATE TABLE Profesor(
nombre CHAR(20),
dni CHAR(9),
direccion CHAR(100),
localidad CHAR(50),
provincia CHAR(50),
fecha_nacimiento CHAR(50),
telefonoA CHAR(50),
telefonoB CHAR(50),
PRIMARY KEY (dni)
);
'''
for i in range(0, MAX):
nombre='\'Profesor'+str(i)+'\''
dni=str(i)
direccion='\'Direccion'+str(i)+'\''
localidad='\'Localidad'+str(i)+'\''
provincia='\'Provincia'+str(i)+'\''
fecha_nac='\'1988-10-'+str(i+1)+'\''
telefonoA='\''+str(i)+str(i)+str(i)+str(i)+'\''
telefonoB='\''+str(i)+str(i)+str(i)+str(i)+'\''
query="INSERT INTO Profesor VALUES("+nombre+","+dni+","+direccion+","+localidad+","+provincia+","+fecha_nac+","+telefonoA+","+telefonoB+");"
print query
salida = cursor.execute(query);
#Ejecutamos la acción
db.commit()
######Aprovisionamos de contenido la tabla Asignatura
'''
Esquema de la tabla:
CREATE TABLE Asignatura(
id CHAR(10),
nombre CHAR(20),
PRIMARY KEY (id)
);
'''
for i in range(0, MAX):
id='\''+str(i)+'\''
nombre='\'Asignatura'+str(i)+'\''
query="INSERT INTO Asignatura VALUES("+id+","+nombre+");"
print query
salida = cursor.execute(query);
#Ejecutamos la acción
db.commit()
######Aprovisionamos de contenido la tabla Curso
'''
Esquema de la tabla:
CREATE TABLE Curso(
curso INT(1),
grupo CHAR(1),
nivel CHAR(20),
PRIMARY KEY (curso)
);
'''
for i in range(0, MAX):
curso='\''+str(i+1)+'\'' #curso 1º :1
grupo='\''+str(i)+'\'' #grupo B :B
nivel='\'Nivel'+str(i)+'\'' #nivel ESO: ESO
query="INSERT INTO Curso VALUES("+curso+","+grupo+","+nivel+");"
print query
salida = cursor.execute(query);
#Ejecutamos la acción
db.commit()
#Cerramos la conexión
cursor.close()
db.close()
| 24.358974
| 144
| 0.614035
|
b2039665fd176be713ad304e6c5ecfb6d43592a4
| 92
|
py
|
Python
|
.history/myblog/admin_20200416025918.py
|
abhinavmarwaha/demo-django-blog
|
c80a7d825e44d7e1589d9272c3583764562a2515
|
[
"MIT"
] | null | null | null |
.history/myblog/admin_20200416025918.py
|
abhinavmarwaha/demo-django-blog
|
c80a7d825e44d7e1589d9272c3583764562a2515
|
[
"MIT"
] | null | null | null |
.history/myblog/admin_20200416025918.py
|
abhinavmarwaha/demo-django-blog
|
c80a7d825e44d7e1589d9272c3583764562a2515
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Post
class PostAdmin(admin.ModelAdmin)
| 23
| 33
| 0.836957
|
0887893cdb5abb36440f8755024552eea744deff
| 172
|
py
|
Python
|
app/__main__.py
|
heaptracetechnology/github
|
7b7eaddf2e2eec4d28855c81d68ded65dc05cc09
|
[
"MIT"
] | null | null | null |
app/__main__.py
|
heaptracetechnology/github
|
7b7eaddf2e2eec4d28855c81d68ded65dc05cc09
|
[
"MIT"
] | null | null | null |
app/__main__.py
|
heaptracetechnology/github
|
7b7eaddf2e2eec4d28855c81d68ded65dc05cc09
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from . import app
from . import api
from . import graphql
from . import login
from . import webhooks
if __name__ == '__main__':
app.api.run()
| 15.636364
| 26
| 0.656977
|
92ae209efc825a19636a0334eb49d982509a2f72
| 28,081
|
py
|
Python
|
Guidelet/GuideletLib/Guidelet.py
|
basharbme/SlicerIGT
|
00ff9bf070d538d5c713bfc375f544ee4e8033bc
|
[
"BSD-3-Clause"
] | 1
|
2019-07-10T02:43:48.000Z
|
2019-07-10T02:43:48.000Z
|
Guidelet/GuideletLib/Guidelet.py
|
rprueckl/SlicerIGT
|
00ff9bf070d538d5c713bfc375f544ee4e8033bc
|
[
"BSD-3-Clause"
] | null | null | null |
Guidelet/GuideletLib/Guidelet.py
|
rprueckl/SlicerIGT
|
00ff9bf070d538d5c713bfc375f544ee4e8033bc
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from __main__ import vtk, qt, ctk, slicer
import logging
import time
# fix unicode error by aliasing str as unicode in Python 3
if slicer.app.majorVersion >= 5 or (slicer.app.majorVersion >= 4 and slicer.app.minorVersion >= 11):
unicode = str
from .UltraSound import UltraSound
class Guidelet(object):
@staticmethod
def showToolbars(show):
# Show/hide all existing toolbars
for toolbar in slicer.util.mainWindow().findChildren('QToolBar'):
toolbar.setVisible(show)
# Prevent sequence browser toolbar showing up automatically
# when a sequence is loaded.
# (put in try block because Sequence Browser module is not always installed)
try:
slicer.modules.sequencebrowser.autoShowToolBar = show
except:
pass
@staticmethod
def showPythonConsole(show):
slicer.util.mainWindow().pythonConsole().parent().setVisible(show)
@staticmethod
def showMenuBar(show):
for menubar in slicer.util.mainWindow().findChildren('QMenuBar'):
menubar.setVisible(show)
@staticmethod
def onGenericCommandResponseReceived(commandId, responseNode):
if responseNode:
logging.debug("Response from PLUS: {0}".format(responseNode.GetText(0)))
else:
logging.debug("Timeout. Command Id: {0}".format(commandId))
# Guidelet layout name definitions
VIEW_ULTRASOUND = unicode("Ultrasound")
VIEW_ULTRASOUND_3D = unicode("Ultrasound + 3D")
VIEW_3D_ULTRASOUND = unicode("3D + Ultrasound")
VIEW_ULTRASOUND_CAM_3D = unicode("Ultrasound + Webcam + 3D")
VIEW_ULTRASOUND_DUAL_3D = unicode("Ultrasound + Dual 3D")
VIEW_3D = unicode("3D")
VIEW_DUAL_3D = unicode("Dual 3D")
VIEW_TRIPLE_3D = unicode("Triple 3D")
VIEW_TRIPLE_3D_PARALLEL = unicode("Triple 3D Parallel")
VIEW_QUAD_3D = unicode("Quad 3D")
def __init__(self, parent, logic, configurationName='Default', sliceletDockWidgetPosition = qt.Qt.LeftDockWidgetArea):
logging.debug('Guidelet.__init__')
self.sliceletDockWidgetPosition = sliceletDockWidgetPosition
self.parent = parent
self.logic = logic
self.configurationName = configurationName
self.parameterNodeObserver = None
self.parameterNode = self.logic.getParameterNode()
self.layoutManager = slicer.app.layoutManager()
self.layoutNameToIdMap = {}
self.layoutNameToSelectCallbackMap = {}
self.defaultLayoutName = self.VIEW_ULTRASOUND
self.logic.updateParameterNodeFromSettings(self.parameterNode, self.configurationName)
self.setAndObserveParameterNode(self.parameterNode)
self.ultrasound = self.getUltrasoundClass()
self.fitUltrasoundImageToViewOnConnect = True
self.setupConnectorNode()
self.sliceletDockWidget = qt.QDockWidget(self.parent)
self.mainWindow=slicer.util.mainWindow()
self.sliceletDockWidget.setParent(self.mainWindow)
self.mainWindow.addDockWidget(self.sliceletDockWidgetPosition, self.sliceletDockWidget)
self.sliceletPanel = qt.QFrame(self.sliceletDockWidget)
self.sliceletPanelLayout = qt.QVBoxLayout(self.sliceletPanel)
self.sliceletDockWidget.setWidget(self.sliceletPanel)
self.topPanelLayout = qt.QGridLayout(self.sliceletPanel)
self.sliceletPanelLayout.addLayout(self.topPanelLayout)
self.setupTopPanel()
self.setupFeaturePanelList()
self.setupAdvancedPanel()
self.setupAdditionalPanel()
self.addConnectorObservers()
self.setupConnections()
self.sliceletDockWidget.setStyleSheet(self.loadStyleSheet())
def showModulePanel(self, show):
modulePanelDockWidget = slicer.util.mainWindow().findChildren('QDockWidget','PanelDockWidget')[0]
modulePanelDockWidget.setVisible(show)
if show:
mainWindow=slicer.util.mainWindow()
if self.sliceletDockWidgetPosition == qt.Qt.LeftDockWidgetArea:
mainWindow.tabifyDockWidget(self.sliceletDockWidget, modulePanelDockWidget)
self.sliceletDockWidget.setFeatures(qt.QDockWidget.DockWidgetClosable+qt.QDockWidget.DockWidgetMovable+qt.QDockWidget.DockWidgetFloatable)
else:
if self.sliceletDockWidgetPosition == qt.Qt.LeftDockWidgetArea:
# Prevent accidental closing or undocking of the slicelet's left panel
self.sliceletDockWidget.setFeatures(0)
def setupTopPanel(self):
"""
Reimplement this function and put widgets in self.topPanelLayout (QGridLayout)
"""
pass
def loadStyleSheet(self):
moduleDir = os.path.dirname(__file__)
style = self.parameterNode.GetParameter('StyleSheet')
styleFile = os.path.join(moduleDir, 'Resources', 'StyleSheets', style)
f = qt.QFile(styleFile)
if not f.exists():
logging.debug("Unable to load stylesheet, file not found")
return ""
else:
f.open(qt.QFile.ReadOnly | qt.QFile.Text)
ts = qt.QTextStream(f)
stylesheet = ts.readAll()
return stylesheet
def setupFeaturePanelList(self):
featurePanelList = self.createFeaturePanels()
self.collapsibleButtonGroup = qt.QButtonGroup()
for panel in featurePanelList:
self.collapsibleButtonGroup.addButton(panel)
def getUltrasoundClass(self):
return UltraSound(self)
def preCleanup(self):
self.sliceletDockWidget.setWidget(None)
self.sliceletPanel = None
self.mainWindow.removeDockWidget(self.sliceletDockWidget)
self.sliceletDockWidget = None
self.ultrasound.preCleanup()
self.disconnect()
def createFeaturePanels(self):
self.ultrasoundCollapsibleButton, self.ultrasoundLayout, self.procedureLayout = self.ultrasound.setupPanel(self.sliceletPanelLayout)
self.advancedCollapsibleButton = ctk.ctkCollapsibleButton()
featurePanelList = [self.ultrasoundCollapsibleButton, self.advancedCollapsibleButton]
return featurePanelList
def setupAdvancedPanel(self):
logging.debug('setupAdvancedPanel')
self.advancedCollapsibleButton.setProperty('collapsedHeight', 20)
self.advancedCollapsibleButton.text = "Settings"
self.sliceletPanelLayout.addWidget(self.advancedCollapsibleButton)
self.advancedLayout = qt.QFormLayout(self.advancedCollapsibleButton)
self.advancedLayout.setContentsMargins(12, 4, 4, 4)
self.advancedLayout.setSpacing(4)
# Layout selection combo box
self.viewSelectorComboBox = qt.QComboBox(self.advancedCollapsibleButton)
self.advancedLayout.addRow("Layout: ", self.viewSelectorComboBox)
self.registerDefaultGuideletLayouts()
self.selectView(self.VIEW_ULTRASOUND_3D)
# OpenIGTLink connector node selection
self.linkInputSelector = slicer.qMRMLNodeComboBox()
self.linkInputSelector.nodeTypes = ("vtkMRMLIGTLConnectorNode", "")
self.linkInputSelector.selectNodeUponCreation = True
self.linkInputSelector.addEnabled = False
self.linkInputSelector.removeEnabled = True
self.linkInputSelector.noneEnabled = False
self.linkInputSelector.showHidden = False
self.linkInputSelector.showChildNodeTypes = False
self.linkInputSelector.setMRMLScene( slicer.mrmlScene )
self.linkInputSelector.setToolTip( "Select connector node" )
self.advancedLayout.addRow("OpenIGTLink connector: ", self.linkInputSelector)
self.showFullSlicerInterfaceButton = qt.QPushButton()
self.showFullSlicerInterfaceButton.setText("Show 3D Slicer user interface")
self.advancedLayout.addRow(self.showFullSlicerInterfaceButton)
self.showGuideletFullscreenButton = qt.QPushButton()
self.showGuideletFullscreenButton.setText("Show Guidelet in full screen")
self.advancedLayout.addRow(self.showGuideletFullscreenButton)
self.saveSceneButton = qt.QPushButton()
self.saveSceneButton.setText("Save Guidelet scene")
self.advancedLayout.addRow(self.saveSceneButton)
self.saveDirectoryLineEdit = ctk.ctkPathLineEdit()
node = self.logic.getParameterNode()
sceneSaveDirectory = node.GetParameter('SavedScenesDirectory')
self.saveDirectoryLineEdit.currentPath = sceneSaveDirectory
self.saveDirectoryLineEdit.filters = ctk.ctkPathLineEdit.Dirs
self.saveDirectoryLineEdit.options = ctk.ctkPathLineEdit.DontUseSheet
self.saveDirectoryLineEdit.options = ctk.ctkPathLineEdit.ShowDirsOnly
self.saveDirectoryLineEdit.showHistoryButton = False
self.saveDirectoryLineEdit.setMinimumWidth(100)
self.saveDirectoryLineEdit.setMaximumWidth(500)
saveLabel = qt.QLabel()
saveLabel.setText("Save scene directory:")
hbox = qt.QHBoxLayout()
hbox.addWidget(saveLabel)
hbox.addWidget(self.saveDirectoryLineEdit)
self.advancedLayout.addRow(hbox)
self.exitButton = qt.QPushButton()
self.exitButton.setText("Exit")
self.advancedLayout.addRow(self.exitButton)
def setupAdditionalPanel(self):
pass
def registerLayout(self, layoutName, layoutId, layoutXmlDescription, layoutSelectCallback=None):
if (type(layoutName) != str and type(layoutName) != unicode) or len(layoutName) < 1:
logging.error('Failed to register layout, because layout name must be a non-empty string. Got ' + repr(layoutName))
return False
if not isinstance(layoutId, int):
logging.error('Failed to register layout named ' + str(layoutName) + ', because given layout ID is not an integer. Got ' + str(layoutId))
return False
if layoutName in self.layoutNameToIdMap:
logging.error('Failed to register layout, because a layout with name ' + str(layoutName) + ' is already registered')
return False
layoutLogic = self.layoutManager.layoutLogic()
if not isinstance(layoutId, slicer.vtkMRMLLayoutNode.SlicerLayout) and layoutLogic.GetLayoutNode().IsLayoutDescription(layoutId):
logging.error('Failed to register layout, because a layout with ID ' + str(layoutId) + ' is already registered. Try to choose a larger number')
return False
logging.info('Registering layout ' + str(layoutName) + ', ' + str(layoutId))
# Remember layout
self.layoutNameToIdMap[layoutName] = layoutId
self.layoutNameToSelectCallbackMap[layoutName] = layoutSelectCallback
# Add layout to view selector combobox
self.viewSelectorComboBox.addItem(layoutName)
# Register layout to layout logic
if not layoutLogic.GetLayoutNode().IsLayoutDescription(layoutId):
layoutLogic.GetLayoutNode().AddLayoutDescription(layoutId, layoutXmlDescription)
return True
def registerDefaultGuideletLayouts(self): # Common
customLayout = (
"<layout type=\"horizontal\" split=\"false\" >"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"1\">"
" <property name=\"viewlabel\" action=\"default\">1</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"2\" type=\"secondary\">"
" <property name=\"viewlabel\" action=\"default\">2</property>"
" </view>"
" </item>"
"</layout>")
self.registerLayout(self.VIEW_DUAL_3D, 503, customLayout, self.hideUltrasoundSliceIn3DView)
customLayout = (
"<layout type=\"horizontal\" split=\"false\" >"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"1\">"
" <property name=\"viewlabel\" action=\"default\">1</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLSliceNode\" singletontag=\"Red\">"
" <property name=\"orientation\" action=\"default\">Axial</property>"
" <property name=\"viewlabel\" action=\"default\">R</property>"
" <property name=\"viewcolor\" action=\"default\">#F34A33</property>"
" </view>"
" </item>"
"</layout>")
self.registerLayout(self.VIEW_ULTRASOUND_3D, 504, customLayout, self.delayedFitAndShowUltrasoundSliceIn3dView)
customLayout = (
"<layout type=\"horizontal\" split=\"false\" >"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"1\">"
" <property name=\"viewlabel\" action=\"default\">1</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"2\" type=\"secondary\">"
" <property name=\"viewlabel\" action=\"default\">2</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLSliceNode\" singletontag=\"Red\">"
" <property name=\"orientation\" action=\"default\">Axial</property>"
" <property name=\"viewlabel\" action=\"default\">R</property>"
" <property name=\"viewcolor\" action=\"default\">#F34A33</property>"
" </view>"
" </item>"
"</layout>")
self.registerLayout(self.VIEW_ULTRASOUND_DUAL_3D, 505, customLayout, self.delayedFitAndShowUltrasoundSliceIn3dView)
customLayout = (
"<layout type=\"vertical\" split=\"true\" >"
" <item>"
" <layout type=\"horizontal\" split=\"false\" >"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"1\">"
" <property name=\"viewlabel\" action=\"default\">1</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"2\" type=\"secondary\">"
" <property name=\"viewlabel\" action=\"default\">2</property>"
" </view>"
" </item>"
" </layout>"
" </item>"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"3\">"
" <property name=\"viewlabel\" action=\"default\">3</property>"
" </view>"
" </item>"
"</layout>")
self.registerLayout(self.VIEW_TRIPLE_3D, 506, customLayout, self.hideUltrasoundSliceIn3DView)
customLayout = (
"<layout type=\"horizontal\" split=\"false\" >"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"1\">"
" <property name=\"viewlabel\" action=\"default\">1</property>"
" </view>"
" </item>"
" <item>"
" <layout type=\"vertical\" split=\"false\" >"
" <item>"
" <view class=\"vtkMRMLSliceNode\" singletontag=\"Red\">"
" <property name=\"orientation\" action=\"default\">Axial</property>"
" <property name=\"viewlabel\" action=\"default\">R</property>"
" <property name=\"viewcolor\" action=\"default\">#F34A33</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLSliceNode\" singletontag=\"Yellow\">"
" <property name=\"orientation\" action=\"default\">Sagittal</property>"
" <property name=\"viewlabel\" action=\"default\">Y</property>"
" <property name=\"viewcolor\" action=\"default\">#F34A33</property>"
" </view>"
" </item>"
" </layout>"
" </item>"
"</layout>")
self.registerLayout(self.VIEW_ULTRASOUND_CAM_3D, 507, customLayout, self.delayedFitAndShowUltrasoundSliceIn3dView)
customLayout = (
"<layout type=\"horizontal\" split=\"false\" >"
" <item>"
" <view class=\"vtkMRMLSliceNode\" singletontag=\"Red\">"
" <property name=\"orientation\" action=\"default\">Axial</property>"
" <property name=\"viewlabel\" action=\"default\">R</property>"
" <property name=\"viewcolor\" action=\"default\">#F34A33</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"1\">"
" <property name=\"viewlabel\" action=\"default\">1</property>"
" </view>"
" </item>"
"</layout>")
self.registerLayout(self.VIEW_3D_ULTRASOUND, 508, customLayout, self.delayedFitAndShowUltrasoundSliceIn3dView)
customLayout = (
"<layout type=\"horizontal\" split=\"false\" >"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"1\">"
" <property name=\"viewlabel\" action=\"default\">1</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"2\" type=\"secondary\">"
" <property name=\"viewlabel\" action=\"default\">2</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"3\" type=\"secondary\">"
" <property name=\"viewlabel\" action=\"default\">3</property>"
" </view>"
" </item>"
"</layout>")
self.registerLayout(self.VIEW_TRIPLE_3D_PARALLEL, 509, customLayout, self.hideUltrasoundSliceIn3DView)
customLayout = (
"<layout type=\"horizontal\" split=\"false\" >"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"1\">"
" <property name=\"viewlabel\" action=\"default\">1</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"2\" type=\"secondary\">"
" <property name=\"viewlabel\" action=\"default\">2</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"3\" type=\"secondary\">"
" <property name=\"viewlabel\" action=\"default\">3</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"4\" type=\"secondary\">"
" <property name=\"viewlabel\" action=\"default\">4</property>"
" </view>"
" </item>"
"</layout>")
self.registerLayout(self.VIEW_QUAD_3D, 510, customLayout, self.hideUltrasoundSliceIn3DView)
# Add existing Slicer layouts with callbacks
layoutNode = self.layoutManager.layoutLogic().GetLayoutNode()
ultrasoundViewId = slicer.vtkMRMLLayoutNode.SlicerLayoutOneUpRedSliceView
self.registerLayout(self.VIEW_ULTRASOUND, ultrasoundViewId, \
layoutNode.GetLayoutDescription(ultrasoundViewId), self.delayedFitAndHideUltrasoundSliceIn3dView)
threeDViewId = slicer.vtkMRMLLayoutNode.SlicerLayoutOneUp3DView
self.registerLayout(self.VIEW_3D, threeDViewId, \
layoutNode.GetLayoutDescription(threeDViewId), self.showUltrasoundSliceIn3DView)
def onSceneLoaded(self):
""" Derived classes can override this function
"""
pass
def setupScene(self):
""" setup feature scene
"""
self.ultrasound.setupScene()
def onSaveDirectoryPreferencesChanged(self):
sceneSaveDirectory = str(self.saveDirectoryLineEdit.currentPath)
self.logic.updateSettings({'SavedScenesDirectory' : sceneSaveDirectory}, self.configurationName)
node = self.logic.getParameterNode()
self.logic.updateParameterNodeFromUserPreferences(node, {'SavedScenesDirectory' : sceneSaveDirectory})
def onSaveSceneClicked(self):#common
#
# save the mrml scene to a temp directory, then zip it
#
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
node = self.logic.getParameterNode()
sceneSaveDirectory = node.GetParameter('SavedScenesDirectory')
sceneSaveDirectory = sceneSaveDirectory + "/" + self.logic.moduleName + "-" + time.strftime("%Y%m%d-%H%M%S")
logging.info("Saving scene to: {0}".format(sceneSaveDirectory))
if not os.access(sceneSaveDirectory, os.F_OK):
os.makedirs(sceneSaveDirectory)
applicationLogic = slicer.app.applicationLogic()
if applicationLogic.SaveSceneToSlicerDataBundleDirectory(sceneSaveDirectory, None):
logging.info("Scene saved to: {0}".format(sceneSaveDirectory))
else:
logging.error("Scene saving failed")
qt.QApplication.restoreOverrideCursor()
slicer.util.showStatusMessage("Saved!", 2000)
def onExitButtonClicked(self):
mainwindow = slicer.util.mainWindow()
mainwindow.close()
def setupConnections(self):
logging.debug('Guidelet.setupConnections()')
self.ultrasoundCollapsibleButton.connect('toggled(bool)', self.onUltrasoundPanelToggled)
self.ultrasound.setupConnections()
#advanced settings panel
self.showFullSlicerInterfaceButton.connect('clicked()', self.onShowFullSlicerInterfaceClicked)
self.showGuideletFullscreenButton.connect('clicked()', self.onShowGuideletFullscreenButton)
self.saveSceneButton.connect('clicked()', self.onSaveSceneClicked)
self.linkInputSelector.connect("nodeActivated(vtkMRMLNode*)", self.onConnectorNodeActivated)
self.viewSelectorComboBox.connect('activated(int)', self.onViewSelect)
self.exitButton.connect('clicked()', self.onExitButtonClicked)
self.saveDirectoryLineEdit.connect('currentPathChanged(QString)', self.onSaveDirectoryPreferencesChanged)
def disconnect(self):
self.removeConnectorObservers()
# Remove observer to old parameter node
self.removeParameterNodeObserver()
self.ultrasoundCollapsibleButton.disconnect('toggled(bool)', self.onUltrasoundPanelToggled)
#advanced settings panel
self.showFullSlicerInterfaceButton.disconnect('clicked()', self.onShowFullSlicerInterfaceClicked)
self.showGuideletFullscreenButton.disconnect('clicked()', self.onShowGuideletFullscreenButton)
self.saveSceneButton.disconnect('clicked()', self.onSaveSceneClicked)
self.linkInputSelector.disconnect("nodeActivated(vtkMRMLNode*)", self.onConnectorNodeActivated)
self.viewSelectorComboBox.disconnect('activated(int)', self.onViewSelect)
self.exitButton.disconnect('clicked()', self.onExitButtonClicked)
self.saveDirectoryLineEdit.disconnect('currentPathChanged(QString)', self.onSaveDirectoryPreferencesChanged)
def showFullScreen(self):
# We hide all toolbars, etc. which is inconvenient as a default startup setting,
# therefore disable saving of window setup.
settings = qt.QSettings()
settings.setValue('MainWindow/RestoreGeometry', 'false')
self.showToolbars(False)
self.showModulePanel(False)
self.showMenuBar(False)
self.showPythonConsole(False)
self.sliceletDockWidget.show()
mainWindow=slicer.util.mainWindow()
mainWindow.showFullScreen()
def onShowFullSlicerInterfaceClicked(self):
self.showToolbars(True)
self.showModulePanel(True)
self.showMenuBar(True)
slicer.util.mainWindow().showMaximized()
# Save current state
settings = qt.QSettings()
settings.setValue('MainWindow/RestoreGeometry', 'true')
def onShowGuideletFullscreenButton(self):
self.showFullScreen()
def executeCommand(self, command, commandResponseCallback):
command.SetCommandAttribute('Name', command.GetCommandName())
command.RemoveObservers(slicer.vtkSlicerOpenIGTLinkCommand.CommandCompletedEvent)
command.AddObserver(slicer.vtkSlicerOpenIGTLinkCommand.CommandCompletedEvent, commandResponseCallback)
self.connectorNode.SendCommand(command)
def setAndObserveParameterNode(self, parameterNode):
if parameterNode == self.parameterNode and self.parameterNodeObserver:
# no change and node is already observed
return
# Remove observer to old parameter node
self.removeParameterNodeObserver()
# Set and observe new parameter node
self.parameterNode = parameterNode
if self.parameterNode:
self.parameterNodeObserver = self.parameterNode.AddObserver(vtk.vtkCommand.ModifiedEvent,
self.onParameterNodeModified)
# Update GUI
self.updateGUIFromParameterNode()
def removeParameterNodeObserver(self):
if self.parameterNode and self.parameterNodeObserver:
self.parameterNode.RemoveObserver(self.parameterNodeObserver)
self.parameterNodeObserver = None
def onParameterNodeModified(self, observer, eventid):
logging.debug('onParameterNodeModified')
self.updateGUIFromParameterNode()
def updateGUIFromParameterNode(self):#TODO
parameterNode = self.parameterNode
if not parameterNode:
return
def setupConnectorNode(self):
logging.info("setupConnectorNode")
self.connectorNodeObserverTagList = []
self.connectorNodeConnected = False
self.connectorNode = self.ultrasound.createPlusConnector()
self.connectorNode.Start()
def onConnectorNodeConnected(self, caller, event, force=False):
logging.info("onConnectorNodeConnected")
# Multiple notifications may be sent when connecting/disconnecting,
# so we just if we know about the state change already
if self.connectorNodeConnected and not force:
return
self.connectorNodeConnected = True
self.ultrasound.onConnectorNodeConnected()
if self.fitUltrasoundImageToViewOnConnect:
self.delayedFitUltrasoundImageToView(3000)
def onConnectorNodeDisconnected(self, caller, event, force=False):
logging.info("onConnectorNodeDisconnected")
# Multiple notifications may be sent when connecting/disconnecting,
# so we just if we know about the state change already
if not self.connectorNodeConnected and not force:
return
self.connectorNodeConnected = False
self.ultrasound.onConnectorNodeDisconnected()
def onConnectorNodeActivated(self):
logging.debug('onConnectorNodeActivated')
self.removeConnectorObservers()
# Start using new connector.
self.connectorNode = self.linkInputSelector.currentNode()
if not self.connectorNode:
logging.warning('No connector node found!')
return
self.addConnectorObservers()
def removeConnectorObservers(self):
# Clean up observers to old connector.
if self.connectorNode and self.connectorNodeObserverTagList:
for tag in self.connectorNodeObserverTagList:
self.connectorNode.RemoveObserver(tag)
self.connectorNodeObserverTagList = []
def addConnectorObservers(self):
# Force initial update
if self.connectorNode.GetState() == slicer.vtkMRMLIGTLConnectorNode.StateConnected:
self.onConnectorNodeConnected(None, None, True)
else:
self.onConnectorNodeDisconnected(None, None, True)
# Add observers for connect/disconnect events
events = [[slicer.vtkMRMLIGTLConnectorNode.ConnectedEvent, self.onConnectorNodeConnected],
[slicer.vtkMRMLIGTLConnectorNode.DisconnectedEvent, self.onConnectorNodeDisconnected]]
for tagEventHandler in events:
connectorNodeObserverTag = self.connectorNode.AddObserver(tagEventHandler[0], tagEventHandler[1])
self.connectorNodeObserverTagList.append(connectorNodeObserverTag)
def showUltrasoundSliceIn3DView(self):
self.setUltrasoundSliceVisibilityIn3dView(True)
def hideUltrasoundSliceIn3DView(self):
self.setUltrasoundSliceVisibilityIn3dView(False)
def setUltrasoundSliceVisibilityIn3dView(self, visible):
redNode = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceNodeRed')
if visible:
redNode.SetSliceVisible(1)
else:
redNode.SetSliceVisible(0)
def fitUltrasoundImageToView(self):
redWidget = self.layoutManager.sliceWidget('Red')
redWidget.sliceController().fitSliceToBackground()
def delayedFitUltrasoundImageToView(self, delayMsec=500):
qt.QTimer.singleShot(delayMsec, self.fitUltrasoundImageToView)
def delayedFitAndShowUltrasoundSliceIn3dView(self):
self.delayedFitUltrasoundImageToView()
self.showUltrasoundSliceIn3DView()
def delayedFitAndHideUltrasoundSliceIn3dView(self):
self.delayedFitUltrasoundImageToView()
self.hideUltrasoundSliceIn3DView()
def selectView(self, viewName):
index = self.viewSelectorComboBox.findText(viewName)
if index == -1:
index = 0
self.viewSelectorComboBox.setCurrentIndex(index)
self.onViewSelect(index)
def onViewSelect(self, layoutIndex):
layoutName = self.viewSelectorComboBox.currentText
logging.debug('onViewSelect: {0}'.format(layoutName))
if layoutName not in self.layoutNameToIdMap:
logging.error('Layout called ' + str(layoutName) + ' has not been registered to the guidelet')
return
layoutId = self.layoutNameToIdMap[layoutName]
callback = self.layoutNameToSelectCallbackMap[layoutName]
self.layoutManager.setLayout(layoutId)
callback()
def onUltrasoundPanelToggled(self, toggled):
logging.debug('onUltrasoundPanelToggled: {0}'.format(toggled))
if not toggled:
# deactivate placement mode
interactionNode = slicer.app.applicationLogic().GetInteractionNode()
interactionNode.SetCurrentInteractionMode(interactionNode.ViewTransform)
return
self.showDefaultView()
def showDefaultView(self):
self.selectView(self.defaultLayoutName) # Red only layout by default
| 40.57948
| 149
| 0.712083
|
84958f398cb4096fd9d491ac28249cf18157a758
| 1,171
|
py
|
Python
|
python/plots/plot_event.py
|
billy000400/ImageTrk
|
085817e5ab76f3f753a8075bec54f5604a5c9b3d
|
[
"MIT"
] | null | null | null |
python/plots/plot_event.py
|
billy000400/ImageTrk
|
085817e5ab76f3f753a8075bec54f5604a5c9b3d
|
[
"MIT"
] | 1
|
2021-01-03T08:57:34.000Z
|
2021-01-03T23:41:22.000Z
|
python/plots/plot_event.py
|
billy000400/ImageTrk
|
085817e5ab76f3f753a8075bec54f5604a5c9b3d
|
[
"MIT"
] | null | null | null |
import sys
from pathlib import Path
import numpy as np
from matplotlib import pyplot as plt
util_dir = Path.cwd().parent.joinpath('Utility')
sys.path.insert(1, str(util_dir))
from Information import *
from HitGenerators import Event
track_dir = Path("../../tracks")
db_files = [track_dir.joinpath('train_CeEndpoint-mix.db')]
# dist, db_files, hitNumCut=20):
gen = Event(db_files, 10)
windowNum = 100
trackNums = []
for idx in range(windowNum):
sys.stdout.write(t_info(f'Parsing windows {idx+1}/{windowNum}', special='\r'))
if idx+1 == windowNum:
sys.stdout.write('\n')
sys.stdout.flush()
hit_all, track_all = gen.generate(mode='eval')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for trkIdx, hitIdcPdgId in track_all.items():
hitIdc = hitIdcPdgId[0:-1]
hits = [hit_all[hitIdx] for hitIdx in hitIdc]
xs = [coord[0] for coord in hits]
ys = [coord[1] for coord in hits]
zs = [coord[2] for coord in hits]
ax.scatter(xs, zs, ys, alpha=1, label=trkIdx)
ax.legend()
ax.set(xlim=[-810, 810], ylim=[-1600, 1600], zlim=[-810, 810])
plt.show()
plt.close()
| 27.232558
| 82
| 0.649018
|
384d856e8183dea6702c32e0bd60e0367df90cdf
| 1,126
|
py
|
Python
|
rundeck/defaults.py
|
otupman/rundeckrun
|
ca78bc7e9fb1becf940c949e20b820af89f9cecc
|
[
"Apache-2.0"
] | 49
|
2015-01-21T10:07:16.000Z
|
2021-11-15T11:43:19.000Z
|
rundeck/defaults.py
|
otupman/rundeckrun
|
ca78bc7e9fb1becf940c949e20b820af89f9cecc
|
[
"Apache-2.0"
] | 19
|
2015-01-21T10:18:42.000Z
|
2019-10-04T03:32:32.000Z
|
rundeck/defaults.py
|
otupman/rundeckrun
|
ca78bc7e9fb1becf940c949e20b820af89f9cecc
|
[
"Apache-2.0"
] | 32
|
2015-09-09T04:58:39.000Z
|
2022-03-17T10:10:25.000Z
|
"""
:summary: Default values
:license: Apache License, Version 2.0
:author: Mark LaPerriere
:contact: rundeckrun@mindmind.com
:copyright: Mark LaPerriere 2015
"""
__docformat__ = "restructuredtext en"
def enum(name, *sequential, **named):
values = dict(zip(sequential, range(len(sequential))), **named)
values['values'] = list(values.values())
values['keys'] = list(values.keys())
return type(name, (), values)
Status = enum(
'Status',
RUNNING='running',
SUCCEEDED='succeeded',
FAILED='failed',
ABORTED='aborted',
SKIPPED='skipped',
PENDING='pending'
)
DupeOption = enum(
'DupeOption',
SKIP='skip',
CREATE='create',
UPDATE='update'
)
UuidOption = enum(
'UuidOption',
PRESERVE='preserve',
REMOVE='remove'
)
JobDefFormat = enum(
'JobDefFormat',
XML='xml',
YAML='yaml'
)
ExecutionOutputFormat = enum(
'ExecutionOutputFormat',
TEXT='text',
**dict(zip(JobDefFormat.keys, JobDefFormat.values))
)
RUNDECK_API_VERSION = 11
GET = 'get'
POST = 'post'
DELETE = 'delete'
JOB_RUN_TIMEOUT = 60
JOB_RUN_INTERVAL = 3
| 19.084746
| 67
| 0.645648
|
f77244a584ed40646735855711930fa9229515da
| 3,550
|
py
|
Python
|
homeassistant/components/ness_alarm/alarm_control_panel.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 23
|
2017-11-15T21:03:53.000Z
|
2021-03-29T21:33:48.000Z
|
homeassistant/components/ness_alarm/alarm_control_panel.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 9
|
2022-01-27T06:32:10.000Z
|
2022-03-31T07:07:51.000Z
|
homeassistant/components/ness_alarm/alarm_control_panel.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 10
|
2018-01-01T00:12:51.000Z
|
2021-12-21T23:08:05.000Z
|
"""Support for Ness D8X/D16X alarm panel."""
import logging
from nessclient import ArmingState
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_TRIGGER,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DATA_NESS, SIGNAL_ARMING_STATE_CHANGED
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Ness Alarm alarm control panel devices."""
if discovery_info is None:
return
device = NessAlarmPanel(hass.data[DATA_NESS], "Alarm Panel")
async_add_entities([device])
class NessAlarmPanel(alarm.AlarmControlPanel):
"""Representation of a Ness alarm panel."""
def __init__(self, client, name):
"""Initialize the alarm panel."""
self._client = client
self._name = name
self._state = None
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(
self.hass, SIGNAL_ARMING_STATE_CHANGED, self._handle_arming_state_change
)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def code_format(self):
"""Return the regex for code format or None if no code is required."""
return alarm.FORMAT_NUMBER
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_TRIGGER
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
await self._client.disarm(code)
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
await self._client.arm_away(code)
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
await self._client.arm_home(code)
async def async_alarm_trigger(self, code=None):
"""Send trigger/panic command."""
await self._client.panic(code)
@callback
def _handle_arming_state_change(self, arming_state):
"""Handle arming state update."""
if arming_state == ArmingState.UNKNOWN:
self._state = None
elif arming_state == ArmingState.DISARMED:
self._state = STATE_ALARM_DISARMED
elif arming_state == ArmingState.ARMING:
self._state = STATE_ALARM_ARMING
elif arming_state == ArmingState.EXIT_DELAY:
self._state = STATE_ALARM_ARMING
elif arming_state == ArmingState.ARMED:
self._state = STATE_ALARM_ARMED_AWAY
elif arming_state == ArmingState.ENTRY_DELAY:
self._state = STATE_ALARM_PENDING
elif arming_state == ArmingState.TRIGGERED:
self._state = STATE_ALARM_TRIGGERED
else:
_LOGGER.warning("Unhandled arming state: %s", arming_state)
self.async_schedule_update_ha_state()
| 30.869565
| 86
| 0.68338
|
2f1d8d40dd3ee600424a9d83658a463e94a5f2d9
| 566
|
py
|
Python
|
fair/forcing/ozone_st.py
|
OMS-NetZero/FAIR-pro
|
61b068858a043c21916f5e73bedd91eec0d27c57
|
[
"Apache-2.0"
] | 4
|
2017-09-26T12:04:04.000Z
|
2020-04-16T16:29:06.000Z
|
fair/forcing/ozone_st.py
|
OMS-NetZero/FAIR-pro
|
61b068858a043c21916f5e73bedd91eec0d27c57
|
[
"Apache-2.0"
] | 16
|
2017-06-17T07:42:50.000Z
|
2018-07-27T16:01:03.000Z
|
fair/forcing/ozone_st.py
|
OMS-NetZero/FAIR-pro
|
61b068858a043c21916f5e73bedd91eec0d27c57
|
[
"Apache-2.0"
] | 2
|
2017-07-04T12:06:23.000Z
|
2017-07-04T12:07:41.000Z
|
import numpy as np
from ..constants import cl_atoms, br_atoms, fracrel
def magicc(C_ODS,
C0,
eta1=-1.46030698e-5,
eta2=2.05401270e-3,
eta3=1.03143308):
Cl = np.array(cl_atoms.aslist)
Br = np.array(br_atoms.aslist)
FC = np.array(fracrel.aslist)
EESC = (np.sum(Cl * 1000.*(C_ODS-C0) * FC/FC[0]) +
45*np.sum(Br * 1000.*(C_ODS-C0) * FC/FC[0])) * FC[0]
# EESC takes ODS concentrations in ppb, we provide ppt.
EESC = np.max((EESC,0))
F = eta1 * (eta2 * EESC) ** eta3
return F
| 24.608696
| 65
| 0.565371
|
3befa25b4a4efeb95aefb397ce5b2c674720a76d
| 414
|
py
|
Python
|
EstruturaSequencial/exercicio8.py
|
EugenioAntunes/lista-de-exercicios
|
4f19d30b502da064171d7d148b4e235e253fe992
|
[
"MIT"
] | null | null | null |
EstruturaSequencial/exercicio8.py
|
EugenioAntunes/lista-de-exercicios
|
4f19d30b502da064171d7d148b4e235e253fe992
|
[
"MIT"
] | null | null | null |
EstruturaSequencial/exercicio8.py
|
EugenioAntunes/lista-de-exercicios
|
4f19d30b502da064171d7d148b4e235e253fe992
|
[
"MIT"
] | null | null | null |
'''
8.Faça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês. Calcule e mostre o total do seu salário no referido mês.
'''
valor_hora = float(input('Valor da hora: '))
qunt_hora = int(input('Quantidade de horas trabalhadas: '))
def salario(valor_hora, qunt_hora):
return valor_hora * qunt_hora
print('O seu salário este mes será de ', salario(valor_hora, qunt_hora))
| 41.4
| 156
| 0.746377
|
6532f4b9ca309604bb35641c5de385784d406c75
| 17,771
|
py
|
Python
|
mgetool/imports.py
|
MGEdata/mgetool
|
6f5a46b47c7abe58b26727080de4b82e76746112
|
[
"BSD-3-Clause"
] | 1
|
2021-12-28T09:27:10.000Z
|
2021-12-28T09:27:10.000Z
|
mgetool/imports.py
|
boliqq07/mgetool
|
6f5a46b47c7abe58b26727080de4b82e76746112
|
[
"BSD-3-Clause"
] | null | null | null |
mgetool/imports.py
|
boliqq07/mgetool
|
6f5a46b47c7abe58b26727080de4b82e76746112
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2019/7/29 19:47
# @Author : Administrator
# @Software: PyCharm
# @License: BSD 3-Clause
"""
Notes:
import data simply.
# Just a copy from xenonpy
for pictures use skimage.io.ImageCollection
"""
import copy
import glob
import os
import re
import shutil
from collections import defaultdict
from functools import partial
from pathlib import Path
from warnings import warn
import joblib
import pandas as pd
import requests
from skimage import io
from tqdm import tqdm
from mgetool.tool import def_pwd, parallelize
class Call(object):
"""
Call file in paths.
When there are four files in pwd path:
(file1.csv, file2.csv, file3.txt, file4.png)
Examples
---------
>>> call = Call(".",backend="csv")
>>> file1 = call.file1
>>> file2 = call.file2
>>> call = Call(".",backend="txt")
>>> file = call.file3
"""
@staticmethod
def extension(index_col=0):
read_csv = partial(pd.read_csv, index_col=index_col)
read_excel = partial(pd.read_excel, index_col=index_col)
extension = dict(
pkl_pd=('pkl_pd', pd.read_pickle),
csv=('csv', read_csv),
xlsx=('xlsx', read_excel),
pkl_sk=('pkl_sk', joblib.load),
png=("png", io.imread),
jpg=("jpg", io.imread),
)
return extension
__re__ = re.compile(r'[\s\-.]')
def __init__(self, *paths, backend='pkl_pd', prefix_with_upper=None, index_col=0):
"""
Parameters
----------
paths:list
list of path
backend:str
default imported type to show
prefix_with_upper:str
prefix_with_upper for all file add to file in this code to escape same name
index_col:
use the first column as index in table.
"""
self._backend = backend
self.index_col = index_col
self._files = None
self.__extension__ = self.extension(index_col)
if len(paths) == 0:
self._paths = ('.',)
else:
self._paths = paths
if not prefix_with_upper:
prefix_with_upper = ()
self._prefix = prefix_with_upper
self._make_index(prefix_with_upper=prefix_with_upper)
def _make_index(self, *, prefix_with_upper):
def make(path_):
patten = self.__extension__[self._backend][0]
files = glob.glob(str(path_ / ('*.' + patten)))
def _nest(_f):
f_ = _f
return lambda s: s.__extension__[s._backend][1](f_)
for f in files:
# selection data_cluster
f = Path(f).resolve()
parent = re.split(r'[\\/]', str(f.parent))[-1]
# parent = str(f.parent).split('\\/')[-1]
fn = f.name[:-(1 + len(patten))]
fn = self.__re__.sub('_', fn)
if prefix_with_upper:
fn = '_'.join([parent, fn])
if fn in self._files:
warn("file %s with x_name %s already bind to %s and will be ignored" %
(str(f), fn, self._files[fn]), RuntimeWarning)
else:
self._files[fn] = str(f)
setattr(self.__class__, fn, property(_nest(str(f))))
self._files = defaultdict(str)
for path in self._paths:
path = Path(path).expanduser().absolute()
if not path.exists():
raise RuntimeError('%s not exists' % str(path))
make(path)
@classmethod
def from_http(cls, url, save_to, *, filename=None, chunk_size=256 * 1024, params=None,
**kwargs):
"""
Get file object via a http request.
Parameters
----------
url: str
The resource url.
save_to: str
The path of a path to save the downloaded object into it.
filename: str, optional
Specific the file x_name when saving.
Set to ``None`` (default) to use a inferred x_name from http header.
chunk_size: int, optional
Chunk size.
params: any, optional
Parameters will be passed to ``requests.get`` function.
See Also: `requests <http://docs.python-requests.org/>`_
kwargs: dict, optional
Pass to ``requests.get`` function as the ``kwargs`` parameters.
Returns
-------
str
File path contains file x_name.
"""
r = requests.get(url, params, **kwargs)
r.raise_for_status()
if not filename:
if 'filename' in r.headers:
filename = r.headers['filename']
else:
filename = url.split('/')[-1]
if isinstance(save_to, str):
save_to = Path(save_to)
if not isinstance(save_to, Path) or not save_to.is_dir():
raise RuntimeError('%s is not a legal path or not point to a path' % save_to)
file_ = str(save_to / filename)
with open(file_, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return file_
def __repr__(self):
cont_ls = ['<{}> includes:'.format(self.__class__.__name__)]
for k, v in self._files.items():
cont_ls.append('"{}": {}'.format(k, v))
return '\n'.join(cont_ls)
def csv(self):
return Call(*self._paths, backend='csv', prefix_with_upper=self._prefix, index_col=self.index_col)
def pickle_pd(self):
return Call(*self._paths, backend='pkl_pd', prefix_with_upper=self._prefix, index_col=self.index_col)
def pickle_sk(self):
return Call(*self._paths, backend='pkl_sk', prefix_with_upper=self._prefix, index_col=self.index_col)
def xlsx(self):
return Call(*self._paths, backend='xlsx', prefix_with_upper=self._prefix, index_col=self.index_col)
def png(self):
return Call(*self._paths, backend='png', prefix_with_upper=self._prefix, index_col=self.index_col)
def jpg(self):
return Call(*self._paths, backend='jpg', prefix_with_upper=self._prefix, index_col=self.index_col)
def __call__(self, *args, **kwargs):
return self.__extension__[self._backend][1](*args, **kwargs)
def __getattr__(self, name):
"""
Returns sub-dataset.
Parameters
----------
name: str
Dataset x_name.
Returns
-------
spath
"""
if name in self.__extension__:
return self.__class__(*self._paths, backend=name, prefix_with_upper=self._prefix)
else:
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
def check_file(spath, file_path, suffix=None):
os.chdir(file_path)
# print(os.path.abspath(os.curdir))
all_file = os.listdir('.')
files = []
for f in all_file:
if os.path.isdir(f):
ff = os.path.join(file_path, f)
files.extend(check_file(spath, ff, suffix=suffix))
os.chdir(file_path)
else:
if not suffix:
che = True
elif suffix == "":
che = "" == os.path.splitext(f)[1]
else:
che = "".join((".", suffix)) == os.path.splitext(f)[1]
if che:
rel_path = file_path.replace(spath, "")
parents = re.split(r'[\\/]', str(rel_path))
files.append([parents, f])
else:
pass
return files
class BatchFile:
r"""
Search files and filter files and re-site files.
Examples
---------
>>> a = BatchFile(".")
>>> a.filter_dir_name("a")
>>> a.filter_file_name("2")
>>> print(a.file_list)
...
#copy the file to new path and keep the dir structure
>>> a.to_path(r"C:\Users\Admin\Desktop\d2",flatten=False)
#copy the file to new path, flatten the file and add the dir name on file: dirname_1_filename.
>>> a.to_path(r"C:\Users\Admin\Desktop\d2", add_dir=[-1], flatten=True)
#copy the file to new path, flatten the file and add the dir name on file: dirname_2_dirname_1_filename.
>>> a.to_path(r"C:\Users\Admin\Desktop\d2", add_dir=[-2,-1], flatten=True)
"""
def __init__(self, path=None, suffix=None):
"""
Parameters
----------
path:str
total dir of all file
suffix:str
suffix of file
Examples:
.txt
"""
path = def_pwd(path)
self.path = path
parents = re.split(r'[\\/]', str(path))
self.parents = parents
self.file_list = check_file(path, path, suffix=suffix)
self.init_file = tuple(self.file_list)
self.file_list_merge = []
self.file_list_merge_new = []
self.file_dir = []
def filter_file_name(self, include=None, exclude=None):
"""
Parameters
----------
include:str
get the filename with include str
such as hold "ast_tep" with "ast" string
exclude: str
delete the filename with exclude str
such as hold "ast_cap" and delete "ast_tep" with "tep" str,
"""
if include is None and exclude is None:
return
assert include != []
assert exclude != []
if isinstance(include, str):
include = [include, ]
if isinstance(exclude, str):
exclude = [exclude, ]
file_list_filter = []
for file_i in self.file_list:
name = file_i[1]
if include and not exclude:
if any([i in name for i in include]):
file_list_filter.append(file_i)
elif not include and exclude:
if not any([i in name for i in exclude]):
file_list_filter.append(file_i)
elif include and exclude:
if any([i in name for i in include]) and not any([i in name for i in exclude]):
file_list_filter.append(file_i)
else:
raise TypeError("one of include, exclude must be str or list of str")
self.file_list = file_list_filter
def filter_dir_name(self, include=None, exclude=None, layer=-1):
"""
Filter the dir(and its sub_file).
Parameters
----------
include:str,list of str
get the filename with include str
such as hold "ast_tep" with "ast" string
exclude: str, list of str
delete the filename with exclude str
such as hold "ast_cap" and delete "ast_tep" with "tep" str,
layer:int,list
if list, check the sum name of the layers.
Filter dir with target layer,all the dir should contain the sublayer!
Examples:
for /home,
/home/ast, -1
/home/ast/eag, -2
/home/ast/eag/kgg, -3
"""
if include is None and exclude is None:
return
assert include != []
assert exclude != []
if isinstance(include, str):
include = [include, ]
if isinstance(exclude, str):
exclude = [exclude, ]
file_list_filter = []
for file_i in self.file_list:
try:
if isinstance(layer, int):
layer = [layer, ]
if isinstance(layer, list):
name = [file_i[0][i] for i in layer]
else:
name = file_i[0]
name = "".join(name)
if include and not exclude:
if any([i in name for i in include]):
file_list_filter.append(file_i)
elif not include and exclude:
if not any([i in name for i in exclude]):
file_list_filter.append(file_i)
elif include and exclude:
if any([i in name for i in include]) and not any([i in name for i in exclude]):
file_list_filter.append(file_i)
else:
raise TypeError("one of include, exclude must be str or list of str")
except IndexError:
pass
self.file_list = file_list_filter
def merge(self, path=None, flatten=False, add_dir="3-layer", refresh_file_list=True, pop=0):
"""
Merge dir and file name together.
Parameters
----------
path:str
new path
flatten:True
flatten the filtered file.
if flatten is dict, the key is the specific dir name,and value is True.
Examples:
flatten = {"asp":True}
add_dir:int,list
add the top dir_name to file to escape same name file.
only valid for flatten=True
refresh_file_list:bool
refresh file_list or not.
pop: int (negative)
pop the last n layer. default =0
used for copy by dir rather than files. just used for flatten=False
Returns
-------
new filename
Args:
refresh_file_list:
refresh_file_list:
"""
if not path:
path = self.path
flatten = False
if not add_dir:
add_dir = []
elif add_dir == "3-layer":
add_dir = [-1, -2, -3]
if isinstance(add_dir, int):
add_dir = [add_dir, ]
if flatten is not False:
assert pop == 0
assert pop <= 0
file_list_merge = []
for file_i in self.file_list:
site = copy.copy(file_i[0])
if isinstance(flatten, dict):
site = [site[_] for _ in add_dir]
site_c = ""
for i, j in enumerate(site):
i -= len(site)
if i in flatten.keys():
if flatten[i] in [True, "layer", "dir", "folder", 1, "s"]:
site_c += "".join((j, "/"))
else:
site_c += "".join((j, "_"))
else:
site_c += "".join((j, "_"))
site_c = re.split(r'[\\/]', str(site_c))
site_c[-1] += file_i[1]
file_list_merge.append(os.path.join(path, *site_c))
elif flatten:
site = [site[_] for _ in add_dir]
site.append(file_i[1])
site = "_".join(site)
file_list_merge.append(os.path.join(path, site))
else:
site.append(file_i[1])
if pop != 0:
site = site[:pop]
namei = os.path.join(path, *site)
if len(file_list_merge) == 0 or namei != file_list_merge[-1]:
file_list_merge.append(namei)
if refresh_file_list:
self.file_list_merge = file_list_merge
fdir = list(set([os.path.dirname(i) for i in file_list_merge]))
fdir.sort()
self.file_dir = fdir
return file_list_merge
def to_path(self, new_path, flatten=False, add_dir="3-layer", pop=0, n_jobs=1):
"""
Parameters
----------
new_path:str
new path
flatten:bool,dict
flatten the filtered file.
if flatten is dict, the key is the specific dir name,and value is True.
Examples:
flatten = {"asp":True}
add_dir:list, int
add the top dir_name to file to escape same name file.
only valid for flatten=True
pop: int (negative)
pop the last n layer. default =0
used for copy by dir rather than files. just used for flatten=False
n_jobs:int
n_jobs
Returns
-------
file in path.
"""
self.file_list_merge = self.merge(pop=pop)
new_path = def_pwd(new_path)
self.file_list_merge_new = self.merge(path=new_path, flatten=flatten, add_dir=add_dir,
refresh_file_list=False, pop=pop)
if len(set(self.file_list_merge_new)) < len(set(self.file_list_merge)):
raise UserWarning("There are same name files after flatten folders. "
"you can change add_dir to add difference prefix to files", )
if n_jobs != 1:
parallelize(n_jobs, self.copy_user, zip(self.file_list_merge, self.file_list_merge_new, ),
mode="j",
respective=False)
else:
for ij in tqdm(list(zip(self.file_list_merge, self.file_list_merge_new))):
self.copy_user(ij)
@staticmethod
def copy_user(k):
i, j = k
if os.path.isdir(i):
shutil.copytree(i, j)
else:
path_i = os.path.split(j)[0]
if not os.path.exists(path_i):
os.makedirs(path_i)
shutil.copy(i, j)
# if __name__ == "__main__":
# others please use shutil
# shutil.copytree()
# a = BatchFile(r"C:\Users\wangchangxin\Desktop\d1")
# a.filter_dir_name("a", layer=-1)
# a.filter_file_name("2")
# a.to_path(r"C:\Users\wangchangxin\Desktop\d2", add_dir=[-2, -1], flatten=True)
# bf = BatchFile(r"/home/iap13/wcx/CHG")
# bf.filter_dir_name(include="Mo")
# filenames = bf.file_list
| 32.193841
| 109
| 0.533622
|
985c9f7b13b348633d97babe0fd8230b8c09c946
| 2,018
|
py
|
Python
|
pontoon/insights/models.py
|
dothq/pontoon
|
fa85710f56e50d500e6bf8e6c82626ce64440a62
|
[
"BSD-3-Clause"
] | 1
|
2021-10-03T20:48:42.000Z
|
2021-10-03T20:48:42.000Z
|
pontoon/insights/models.py
|
dothq/pontoon
|
fa85710f56e50d500e6bf8e6c82626ce64440a62
|
[
"BSD-3-Clause"
] | 14
|
2021-06-05T00:09:20.000Z
|
2021-09-03T01:48:36.000Z
|
pontoon/insights/models.py
|
dothq/pontoon
|
fa85710f56e50d500e6bf8e6c82626ce64440a62
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import timedelta
from django.db import models
from django.utils import timezone
from pontoon.base.models import AggregatedStats
def active_users_default():
return {
"managers": 0,
"reviewers": 0,
"contributors": 0,
}
class InsightsSnapshot(AggregatedStats, models.Model):
created_at = models.DateField(default=timezone.now)
# Active users
total_managers = models.PositiveIntegerField(default=0)
total_reviewers = models.PositiveIntegerField(default=0)
total_contributors = models.PositiveIntegerField(default=0)
active_users_last_12_months = models.JSONField(default=active_users_default)
active_users_last_6_months = models.JSONField(default=active_users_default)
active_users_last_3_months = models.JSONField(default=active_users_default)
active_users_last_month = models.JSONField(default=active_users_default)
# Unreviewed lifespan
unreviewed_suggestions_lifespan = models.DurationField(default=timedelta)
# Translation activity
completion = models.FloatField()
human_translations = models.PositiveIntegerField(default=0)
machinery_translations = models.PositiveIntegerField(default=0)
new_source_strings = models.PositiveIntegerField(default=0)
# Review activity
peer_approved = models.PositiveIntegerField(default=0)
self_approved = models.PositiveIntegerField(default=0)
rejected = models.PositiveIntegerField(default=0)
new_suggestions = models.PositiveIntegerField(default=0)
class Meta:
abstract = True
class LocaleInsightsSnapshot(InsightsSnapshot):
locale = models.ForeignKey("base.Locale", models.CASCADE)
class ProjectInsightsSnapshot(InsightsSnapshot):
project = models.ForeignKey("base.Project", models.CASCADE)
class ProjectLocaleInsightsSnapshot(AggregatedStats):
project_locale = models.ForeignKey("base.ProjectLocale", models.CASCADE)
created_at = models.DateField(default=timezone.now)
completion = models.FloatField()
| 34.20339
| 80
| 0.777502
|
479a0f434cfdf5d2c2915f8fa16ad9980e630b8c
| 4,904
|
py
|
Python
|
constance/backends/database/__init__.py
|
Anders-Linden/django-constance
|
41762b3dd6a9194c4d062261246d95ddb3677d7e
|
[
"BSD-3-Clause"
] | null | null | null |
constance/backends/database/__init__.py
|
Anders-Linden/django-constance
|
41762b3dd6a9194c4d062261246d95ddb3677d7e
|
[
"BSD-3-Clause"
] | null | null | null |
constance/backends/database/__init__.py
|
Anders-Linden/django-constance
|
41762b3dd6a9194c4d062261246d95ddb3677d7e
|
[
"BSD-3-Clause"
] | null | null | null |
from django.core.cache import caches
from django.http import request
from django.contrib.sites.shortcuts import get_current_site
from django.core.cache.backends.locmem import LocMemCache
from django.core.exceptions import ImproperlyConfigured
from django.db import (
IntegrityError,
OperationalError,
ProgrammingError,
transaction,
)
from django.db.models.signals import post_save
from .. import Backend
from ... import settings, signals, config
class DatabaseBackend(Backend):
def __init__(self):
from .models import Constance
self._model = Constance
self._prefix = settings.DATABASE_PREFIX
self._autofill_timeout = settings.DATABASE_CACHE_AUTOFILL_TIMEOUT
self._autofill_cachekey = 'autofilled'
if not self._model._meta.installed:
raise ImproperlyConfigured(
"The constance.backends.database app isn't installed "
"correctly. Make sure it's in your INSTALLED_APPS setting.")
if settings.DATABASE_CACHE_BACKEND:
self._cache = caches[settings.DATABASE_CACHE_BACKEND]
if isinstance(self._cache, LocMemCache):
raise ImproperlyConfigured(
"The CONSTANCE_DATABASE_CACHE_BACKEND setting refers to a "
"subclass of Django's local-memory backend (%r). Please "
"set it to a backend that supports cross-process caching."
% settings.DATABASE_CACHE_BACKEND)
else:
self._cache = None
self.autofill()
# Clear simple cache.
post_save.connect(self.clear, sender=self._model)
def add_prefix(self, key):
return "%s%s" % (self._prefix, key)
def autofill(self):
if not self._autofill_timeout or not self._cache:
return
full_cachekey = self.add_prefix(self._autofill_cachekey)
if self._cache.get(full_cachekey):
return
autofill_values = {}
autofill_values[full_cachekey] = 1
for key, value in self.mget(settings.CONFIG):
autofill_values[self.add_prefix(key)] = value
self._cache.set_many(autofill_values, timeout=self._autofill_timeout)
def mget(self, keys):
if not keys:
return
keys = {self.add_prefix(key): key for key in keys}
try:
stored = self._model._default_manager.filter(site=get_current_site(request)).filter(key__in=keys)
for const in stored:
yield keys[const.key], const.value
except (OperationalError, ProgrammingError):
pass
def get(self, key):
key = self.add_prefix(key)
if self._cache:
value = self._cache.get(key)
if value is None:
self.autofill()
value = self._cache.get(key)
else:
value = None
if value is None:
try:
value = self._model._default_manager.get(key=key, site=get_current_site(request)).value
except (OperationalError, ProgrammingError, self._model.DoesNotExist):
pass
else:
if self._cache:
self._cache.add(key, value)
return value
def set(self, key, value):
key = self.add_prefix(key)
created = False
queryset = self._model._default_manager.filter(site=get_current_site(request)).all()
# Set _for_write attribute as get_or_create method does
# https://github.com/django/django/blob/2.2.11/django/db/models/query.py#L536
queryset._for_write = True
try:
constance = queryset.get(key=key, site=get_current_site(request))
except (OperationalError, ProgrammingError):
# database is not created, noop
return
except self._model.DoesNotExist:
try:
with transaction.atomic(using=queryset.db):
queryset.create(key=key, value=value, site=get_current_site(request))
created = True
except IntegrityError as error:
# Allow concurrent writes
constance = queryset.get(key=key, site=get_current_site(request))
if not created:
old_value = constance.value
constance.value = value
constance.save()
else:
old_value = None
if self._cache:
self._cache.set(key, value)
signals.config_updated.send(
sender=config, key=key, old_value=old_value, new_value=value
)
def clear(self, sender, instance, created, **kwargs):
if self._cache and not created:
keys = [self.add_prefix(k) for k in settings.CONFIG]
keys.append(self.add_prefix(self._autofill_cachekey))
self._cache.delete_many(keys)
self.autofill()
| 37.151515
| 109
| 0.618679
|
1ea9e1c3a17d30ac35b053d4e2563897802a1f5d
| 360
|
py
|
Python
|
desktop.py
|
babyabdul/GUI_Tkinter
|
7c2bf09fede95d22a90da786f55dfd2052b8f87f
|
[
"MIT"
] | null | null | null |
desktop.py
|
babyabdul/GUI_Tkinter
|
7c2bf09fede95d22a90da786f55dfd2052b8f87f
|
[
"MIT"
] | null | null | null |
desktop.py
|
babyabdul/GUI_Tkinter
|
7c2bf09fede95d22a90da786f55dfd2052b8f87f
|
[
"MIT"
] | null | null | null |
from tkinter import *
root = Tk()
#1. Creating a label Widget
myLabek1 = Label(root, text="Hello From TKinter")
myLabek2 = Label(root, text="My name is Abdul Rafik")
myLabel3 = Label(root, text="")
# 2.Showing it onto screen
myLabek1.grid(row=0, column=0)
myLabek2.grid(row=1, column=5)
myLabel3.grid(row=1, column=1)
root.mainloop()
| 20
| 54
| 0.672222
|
b8027c36538607548a4aeda994432706ea9cf9de
| 3,633
|
py
|
Python
|
yandex/cloud/mdb/kafka/v1/common_pb2.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 36
|
2018-12-23T13:51:50.000Z
|
2022-03-25T07:48:24.000Z
|
yandex/cloud/mdb/kafka/v1/common_pb2.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 15
|
2019-02-28T04:55:09.000Z
|
2022-03-06T23:17:24.000Z
|
yandex/cloud/mdb/kafka/v1/common_pb2.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 18
|
2019-02-23T07:10:57.000Z
|
2022-03-28T14:41:08.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/mdb/kafka/v1/common.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/mdb/kafka/v1/common.proto',
package='yandex.cloud.mdb.kafka.v1',
syntax='proto3',
serialized_options=b'\n\035yandex.cloud.api.mdb.kafka.v1ZCgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/kafka/v1;kafka',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n&yandex/cloud/mdb/kafka/v1/common.proto\x12\x19yandex.cloud.mdb.kafka.v1*\xe2\x01\n\x0f\x43ompressionType\x12 \n\x1c\x43OMPRESSION_TYPE_UNSPECIFIED\x10\x00\x12!\n\x1d\x43OMPRESSION_TYPE_UNCOMPRESSED\x10\x01\x12\x19\n\x15\x43OMPRESSION_TYPE_ZSTD\x10\x02\x12\x18\n\x14\x43OMPRESSION_TYPE_LZ4\x10\x03\x12\x1b\n\x17\x43OMPRESSION_TYPE_SNAPPY\x10\x04\x12\x19\n\x15\x43OMPRESSION_TYPE_GZIP\x10\x05\x12\x1d\n\x19\x43OMPRESSION_TYPE_PRODUCER\x10\x06\x42\x64\n\x1dyandex.cloud.api.mdb.kafka.v1ZCgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/kafka/v1;kafkab\x06proto3'
)
_COMPRESSIONTYPE = _descriptor.EnumDescriptor(
name='CompressionType',
full_name='yandex.cloud.mdb.kafka.v1.CompressionType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='COMPRESSION_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='COMPRESSION_TYPE_UNCOMPRESSED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='COMPRESSION_TYPE_ZSTD', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='COMPRESSION_TYPE_LZ4', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='COMPRESSION_TYPE_SNAPPY', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='COMPRESSION_TYPE_GZIP', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='COMPRESSION_TYPE_PRODUCER', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=70,
serialized_end=296,
)
_sym_db.RegisterEnumDescriptor(_COMPRESSIONTYPE)
CompressionType = enum_type_wrapper.EnumTypeWrapper(_COMPRESSIONTYPE)
COMPRESSION_TYPE_UNSPECIFIED = 0
COMPRESSION_TYPE_UNCOMPRESSED = 1
COMPRESSION_TYPE_ZSTD = 2
COMPRESSION_TYPE_LZ4 = 3
COMPRESSION_TYPE_SNAPPY = 4
COMPRESSION_TYPE_GZIP = 5
COMPRESSION_TYPE_PRODUCER = 6
DESCRIPTOR.enum_types_by_name['CompressionType'] = _COMPRESSIONTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 39.48913
| 585
| 0.784476
|
e0ea9c53ed365f420bf3f60807d3393e9a519ec5
| 239
|
py
|
Python
|
unzip_all/unzip_all.py
|
samuelyeungkc/PythonToolBox
|
fba65bd87239ede54e9a206cd4a90344723d619d
|
[
"MIT"
] | null | null | null |
unzip_all/unzip_all.py
|
samuelyeungkc/PythonToolBox
|
fba65bd87239ede54e9a206cd4a90344723d619d
|
[
"MIT"
] | null | null | null |
unzip_all/unzip_all.py
|
samuelyeungkc/PythonToolBox
|
fba65bd87239ede54e9a206cd4a90344723d619d
|
[
"MIT"
] | null | null | null |
import os
import subprocess
list = os.listdir(".")
unzipped = []
for f in list:
if f != __file__ and f.endswith(".zip"):
subprocess.call(["unzip", f])
unzipped.append(f)
print "Unzipped file : " + str(unzipped)
print os.listdir(".")
| 19.916667
| 41
| 0.661088
|
92946d8d839cf10a23473640c52b39d653738fdd
| 1,647
|
py
|
Python
|
tests/TestScripts/testConfigureDisable.py
|
sketchylizard/Catch2
|
c9df70e34e631ca22dad6480a13a65c24034a4af
|
[
"BSL-1.0"
] | 62
|
2021-09-21T18:58:02.000Z
|
2022-03-07T02:17:43.000Z
|
tests/TestScripts/testConfigureDisable.py
|
sketchylizard/Catch2
|
c9df70e34e631ca22dad6480a13a65c24034a4af
|
[
"BSL-1.0"
] | 8
|
2017-11-03T12:08:09.000Z
|
2017-11-03T12:08:10.000Z
|
tests/TestScripts/testConfigureDisable.py
|
sketchylizard/Catch2
|
c9df70e34e631ca22dad6480a13a65c24034a4af
|
[
"BSL-1.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright Catch2 Authors
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# https://www.boost.org/LICENSE_1_0.txt)
# SPDX-License-Identifier: BSL-1.0
from ConfigureTestsCommon import configure_and_build, run_and_return_output
import os
import re
import sys
"""
Tests the CMake configure option for CATCH_CONFIG_DISABLE
Requires 2 arguments, path folder where the Catch2's main CMakeLists.txt
exists, and path to where the output files should be stored.
"""
if len(sys.argv) != 3:
print('Wrong number of arguments: {}'.format(len(sys.argv)))
print('Usage: {} catch2-top-level-dir base-build-output-dir'.format(sys.argv[0]))
exit(1)
catch2_source_path = os.path.abspath(sys.argv[1])
build_dir_path = os.path.join(os.path.abspath(sys.argv[2]), 'CMakeConfigTests', 'Disable')
configure_and_build(catch2_source_path,
build_dir_path,
[("CATCH_CONFIG_DISABLE", "ON"),
# We need to turn off WERROR, because the compilers
# can see that the various variables inside test cases
# are set but unused.
("CATCH_ENABLE_WERROR", "OFF")])
stdout, _ = run_and_return_output(os.path.join(build_dir_path, 'tests'),
'SelfTest',
['--allow-running-no-tests'])
summary_line = 'No tests ran'
if not summary_line in stdout:
print("Could not find '{}' in the stdout".format(summary_line))
print('stdout: "{}"'.format(stdout))
exit(2)
| 33.612245
| 90
| 0.645416
|
5c6c0dd154f2cbf7f644c35c4a7bf94ea93eac69
| 259
|
py
|
Python
|
app.py
|
Orange9887/taxSystem
|
2ebf51826f1f997c999581613a7a1ce12cae8397
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
Orange9887/taxSystem
|
2ebf51826f1f997c999581613a7a1ce12cae8397
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
Orange9887/taxSystem
|
2ebf51826f1f997c999581613a7a1ce12cae8397
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/')
def hello_world():
return 'Hello World1!'
if __name__ == '__main__':
app.run()
if __name__ == '__main__':
app.run()
| 12.333333
| 26
| 0.621622
|
f0c362fe516a7bc9b5fe0168a106fab58a691ca3
| 12,176
|
py
|
Python
|
yellowbox/extras/webserver/webserver.py
|
bharel/Yellowbox
|
d397d878ccd074af21f552cb1375714ba97e9e22
|
[
"MIT"
] | 1
|
2020-08-07T20:02:12.000Z
|
2020-08-07T20:02:12.000Z
|
yellowbox/extras/webserver/webserver.py
|
bharel/yellowbox
|
d397d878ccd074af21f552cb1375714ba97e9e22
|
[
"MIT"
] | null | null | null |
yellowbox/extras/webserver/webserver.py
|
bharel/yellowbox
|
d397d878ccd074af21f552cb1375714ba97e9e22
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from contextlib import contextmanager
from threading import Lock, Thread
from time import sleep
from typing import ContextManager, Iterator, Mapping, Optional, Union, overload
from requests import ConnectionError, HTTPError, get
from starlette.applications import Starlette
from starlette.responses import PlainTextResponse
from starlette.routing import Route, WebSocketRoute
from uvicorn import Config, Server
from yellowbox import YellowService
from yellowbox.extras.webserver.class_endpoint import HTTPEndpointTemplate, WSEndpointTemplate
from yellowbox.extras.webserver.endpoints import (
HTTP_SIDE_EFFECT, METHODS, WS_SIDE_EFFECT, MockHTTPEndpoint, MockWSEndpoint, http_endpoint, ws_endpoint
)
from yellowbox.retry import RetrySpec
from yellowbox.utils import docker_host_name
class HandlerError(Exception):
"""
An exception occurred while handling an endpoint in the webserver thread
"""
class WebServer(YellowService):
"""
An easy-to-modify HTTP and websocket server, wrapping a starlette application
"""
_PORT_ACCESS_MAX_RETRIES = 100 # the maximum number of attempts to make when accessing a binding port. Each attempt
# has an interval of 0.01 seconds
_CLASS_ENDPOINT_TEMPLATES: Mapping[str, Union[HTTPEndpointTemplate, WSEndpointTemplate]] = {}
def __init__(self, name: str, port: Optional[int] = None, **kwargs):
"""
Args:
name: the name of the service
port: the port to bind to when serving, default will bind to an available port
**kwargs: forwarded to the uvicorn configuration.
"""
self.__name__ = name
self._app = Starlette(debug=True)
self._route_lock = Lock()
# since the main thread won't catch errors in handlers, this class will store any error raised while handling,
# and raise them in the main thread as soon as we can
self._pending_exception: Optional[Exception] = None
if 'log_config' not in kwargs:
kwargs['log_config'] = None
kwargs.setdefault('host', '0.0.0.0')
self._port = port
config = Config(self._app, **kwargs, port=self._port)
self._server = Server(config)
self._serve_thread = Thread(name=f'{name}_thread', target=self._server.run)
@property
def port(self) -> Optional[int]:
"""
Returns:
The port the service is bound to, if the service is binding to anything.
Notes:
Will only return None if the port was not provided during construction and the service thread is not running
If the service is starting up, this property will block until the port is binded, or raise an error if
blocked for longer than 1 second.
"""
if self._port or not self._serve_thread.is_alive():
return self._port
for _ in range(self._PORT_ACCESS_MAX_RETRIES):
servers = getattr(self._server, 'servers', None)
if servers:
sockets = getattr(servers[0], 'sockets', None)
if sockets:
socket = sockets[0]
break
sleep(0.01)
else:
raise RuntimeError('timed out when getting binding port')
self._port = socket.getsockname()[1]
return self._port
@overload
def add_http_endpoint(self, endpoint: MockHTTPEndpoint) -> MockHTTPEndpoint:
...
@overload
def add_http_endpoint(self, methods: METHODS, rule_string: str, side_effect: HTTP_SIDE_EFFECT, *,
auto_read_body: bool = True, forbid_implicit_head_verb: bool = True, name: str = None) \
-> MockHTTPEndpoint:
...
def add_http_endpoint(self, *args, **kwargs) -> MockHTTPEndpoint:
"""
Add an http endpoint to the server
Args:
*args: either a single mock http endpoint, or parameters forwarded to http_endpoint construct one
**kwargs: forwarded to http_endpoint to construct an endpoint
Returns:
the http endpoint added to the server
"""
self._raise_from_pending()
if len(args) == 1 and not kwargs:
ep, = args
else:
ep = http_endpoint(*args, **kwargs)
if ep.owner is not None:
raise RuntimeError('an endpoint cannot be added twice')
with self._route_lock:
self._app.routes.append(
ep.route()
)
ep.owner = self
return ep
def remove_http_endpoint(self, endpoint: MockHTTPEndpoint):
"""
Remove an http endpoint previously added to the server
Args:
endpoint: the endpoint to remove
"""
self._raise_from_pending()
if endpoint.owner is not self:
raise RuntimeError('endpoint is not added to the server')
with self._route_lock:
for i, route in enumerate(self._app.router.routes):
if isinstance(route, Route) and route.endpoint == endpoint.get:
break
else:
raise RuntimeError('endpoint is not found in the server')
self._app.router.routes.pop(i)
endpoint.owner = None
@overload
def patch_http_endpoint(self, endpoint: MockHTTPEndpoint) -> ContextManager[MockHTTPEndpoint]:
...
@overload
def patch_http_endpoint(self, methods: METHODS, rule_string: str, side_effect: HTTP_SIDE_EFFECT, *,
auto_read_body: bool = True, forbid_implicit_head_verb: bool = True, name: str = None) \
-> ContextManager[MockHTTPEndpoint]:
...
@contextmanager # type:ignore[misc]
def patch_http_endpoint(self, *args, **kwargs) -> Iterator[MockHTTPEndpoint]:
"""
A context manager to add and then remove an http endpoint
Args:
*args: forwarded to self.add_http_endpoint
**kwargs: forwarded to self.add_http_endpoint
Returns:
The temporarily added endpoint
"""
ep = self.add_http_endpoint(*args, **kwargs)
try:
yield ep
finally:
self.remove_http_endpoint(ep)
@overload
def add_ws_endpoint(self, endpoint: MockWSEndpoint) -> MockWSEndpoint:
...
@overload
def add_ws_endpoint(self, rule_string: str, side_effect: WS_SIDE_EFFECT, *, name: str = None) -> MockWSEndpoint:
...
def add_ws_endpoint(self, *args, **kwargs):
"""
Add a websocket endpoint to the server
Args:
*args: either a single mock ws endpoint, or parameters forwarded to ws_endpoint construct one
**kwargs: forwarded to ws_endpoint to construct an endpoint
Returns:
the websocket endpoint added to the server
"""
self._raise_from_pending()
if len(args) == 1 and not kwargs:
ep, = args
else:
ep = ws_endpoint(*args, **kwargs)
if ep.owner is not None:
raise RuntimeError('an endpoint cannot be added twice')
with self._route_lock:
self._app.routes.append(
WebSocketRoute(ep.rule_string, ep.endpoint, name=ep.__name__)
)
ep.owner = self
return ep
def remove_ws_endpoint(self, endpoint: MockWSEndpoint):
"""
Remove a websocket endpoint previously added to the server
Args:
endpoint: the endpoint to remove
"""
self._raise_from_pending()
if endpoint.owner is not self:
raise RuntimeError('endpoint is not added to the server')
with self._route_lock:
for i, route in enumerate(self._app.router.routes):
if isinstance(route, WebSocketRoute) and route.app == endpoint.endpoint:
break
else:
raise RuntimeError('endpoint is not found in the server')
self._app.router.routes.pop(i)
endpoint.owner = None
@overload
def patch_ws_endpoint(self, endpoint: MockWSEndpoint) -> ContextManager[MockWSEndpoint]:
...
@overload
def patch_ws_endpoint(self, rule_string: str, side_effect: WS_SIDE_EFFECT, *, name: str = None)\
-> ContextManager[MockWSEndpoint]:
...
@contextmanager # type:ignore[misc]
def patch_ws_endpoint(self, *args, **kwargs):
"""
A context manager to add and then remove a ws endpoint
Args:
*args: forwarded to self.add_ws_endpoint
**kwargs: forwarded to self.add_ws_endpoint
Returns:
The temporarily added endpoint
"""
ep = self.add_ws_endpoint(*args, **kwargs)
try:
yield ep
finally:
self.remove_ws_endpoint(ep)
def local_url(self, schema: Optional[str] = 'http') -> str:
"""
Get the url to access this server from the local machine
Args:
schema: the optional schema of the url, defaults to http
"""
if schema is None:
return f'localhost:{self.port}'
return f'{schema}://localhost:{self.port}'
def container_url(self, schema='http') -> str:
"""
Get the url to access this server from a docker container running in the local machine
Args:
schema: the optional schema of the url, defaults to http
"""
if schema is None:
return f'{docker_host_name}:{self.port}'
return f'{schema}://{docker_host_name}:{self.port}'
def start(self, retry_spec: Optional[RetrySpec] = None) -> WebServer:
if self._serve_thread.is_alive():
raise RuntimeError('thread cannot be started twice')
self._serve_thread.start()
with self.patch_http_endpoint('GET', '/__yellowbox/ping', side_effect=PlainTextResponse('')):
retry_spec = retry_spec or RetrySpec(interval=0.1, timeout=5)
retry_spec.retry(
lambda: get(self.local_url() + '/__yellowbox/ping').raise_for_status(),
(ConnectionError, HTTPError)
)
# add all the class endpoints
for name, template in type(self)._CLASS_ENDPOINT_TEMPLATES.items():
ep: Union[MockHTTPEndpoint, MockWSEndpoint]
if isinstance(template, HTTPEndpointTemplate):
ep = template.construct(self)
self.add_http_endpoint(ep)
else:
assert isinstance(template, WSEndpointTemplate)
ep = template.construct(self)
self.add_ws_endpoint(ep)
setattr(self, name, ep)
return super().start()
def stop(self):
self._server.should_exit = True
self._serve_thread.join()
super().stop()
self._raise_from_pending()
def is_alive(self) -> bool:
self._raise_from_pending()
return self._serve_thread.is_alive()
def _raise_from_pending(self):
# if there is a pending exception, this will raise it
if self._pending_exception:
pending = self._pending_exception
self._pending_exception = None
raise HandlerError() from pending
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
_cls_endpoints = {}
for base in cls.__bases__:
base_http_templates = getattr(base, '_CLASS_ENDPOINT_TEMPLATES', None)
if base_http_templates:
overlapping_keys = base_http_templates.keys() & _cls_endpoints.keys()
if overlapping_keys:
raise TypeError(f'overlapping cls endpoints: {overlapping_keys}')
_cls_endpoints.update(base_http_templates)
for k, v in vars(cls).items():
if isinstance(v, (HTTPEndpointTemplate, WSEndpointTemplate)):
if k in _cls_endpoints:
raise TypeError(f'cls endpoint {k} already defined')
_cls_endpoints[k] = v
cls._CLASS_ENDPOINT_TEMPLATES = _cls_endpoints
| 37.235474
| 120
| 0.620894
|
c882caa8a1bf937b8d42e3e5d4b4f79a49d8f4a6
| 970
|
py
|
Python
|
samples/python_durable_bindings/DurableOrchestrationTrigger/__init__.py
|
msarm/azure-functions-durable-python
|
8ecd30574502f34332e9e61d269f79a3fd66b666
|
[
"MIT"
] | 9
|
2019-08-16T15:37:51.000Z
|
2020-05-12T17:33:50.000Z
|
samples/python_durable_bindings/DurableOrchestrationTrigger/__init__.py
|
msarm/azure-functions-durable-python
|
8ecd30574502f34332e9e61d269f79a3fd66b666
|
[
"MIT"
] | 7
|
2019-07-26T00:24:20.000Z
|
2020-01-29T16:30:06.000Z
|
samples/python_durable_bindings/DurableOrchestrationTrigger/__init__.py
|
msarm/azure-functions-durable-python
|
8ecd30574502f34332e9e61d269f79a3fd66b666
|
[
"MIT"
] | 11
|
2019-07-22T17:40:47.000Z
|
2020-06-24T14:43:18.000Z
|
import logging
import azure.functions as func
import azure.durable_functions as df
def generator_function(context):
outputs = []
task1 = yield context.df.callActivity("DurableActivity", "One")
logging.warn(f"!!!task1: {task1}")
task2 = yield context.df.callActivity("DurableActivity", "Two")
logging.warn(f"!!!task2: {task2}")
task3 = yield context.df.callActivity("DurableActivity", "Three")
logging.warn(f"!!!task3: {task3}")
outputs.append(task1)
outputs.append(task2)
outputs.append(task3)
return outputs
def main(context: str):
logging.warn("Durable Orchestration Trigger: " + context)
orchestrate = df.Orchestrator.create(generator_function)
logging.warn("!!!type(orchestrate) " + str(type(orchestrate)))
result = orchestrate(context)
logging.warn("!!!serialized json : " + result)
logging.warn("!!!type(result) " + str(type(result)))
return result
| 28.529412
| 70
| 0.663918
|
3d380ff0385206b1628eb8afe98892d7b36dd5de
| 20,268
|
py
|
Python
|
tensorflow/lite/testing/zip_test_utils.py
|
leike666666/tensorflow
|
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
|
[
"Apache-2.0"
] | 2
|
2021-08-03T18:03:33.000Z
|
2021-08-03T18:03:49.000Z
|
tensorflow/lite/testing/zip_test_utils.py
|
leike666666/tensorflow
|
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:14:24.000Z
|
2022-02-10T02:58:17.000Z
|
tensorflow/lite/testing/zip_test_utils.py
|
leike666666/tensorflow
|
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
|
[
"Apache-2.0"
] | 1
|
2017-11-27T02:55:11.000Z
|
2017-11-27T02:55:11.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for make_zip tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import operator
import os
import re
import string
import traceback
import zipfile
import numpy as np
from six import StringIO
# pylint: disable=g-import-not-at-top
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.lite.testing import generate_examples_report as report_lib
from tensorflow.lite.testing import string_util_wrapper
from tensorflow.python.framework import graph_util as tf_graph_util
# A map from names to functions which make test cases.
_MAKE_TEST_FUNCTIONS_MAP = {}
# A decorator to register the make test functions.
# Usage:
# All the make_*_test should be registered. Example:
# @register_make_test_function()
# def make_conv_tests(options):
# # ...
# If a function is decorated by other decorators, it's required to specify the
# name explicitly. Example:
# @register_make_test_function(name="make_unidirectional_sequence_lstm_tests")
# @test_util.enable_control_flow_v2
# def make_unidirectional_sequence_lstm_tests(options):
# # ...
def register_make_test_function(name=None):
def decorate(function, name=name):
if name is None:
name = function.__name__
_MAKE_TEST_FUNCTIONS_MAP[name] = function
return decorate
def get_test_function(test_function_name):
"""Get the test function according to the test function name."""
if test_function_name not in _MAKE_TEST_FUNCTIONS_MAP:
return None
return _MAKE_TEST_FUNCTIONS_MAP[test_function_name]
RANDOM_SEED = 342
TF_TYPE_INFO = {
tf.float32: (np.float32, "FLOAT"),
tf.float16: (np.float16, "FLOAT"),
tf.int32: (np.int32, "INT32"),
tf.uint8: (np.uint8, "QUANTIZED_UINT8"),
tf.int16: (np.int16, "QUANTIZED_INT16"),
tf.int64: (np.int64, "INT64"),
tf.bool: (np.bool, "BOOL"),
tf.string: (np.string_, "STRING"),
}
class ExtraTocoOptions(object):
"""Additional toco options besides input, output, shape."""
def __init__(self):
# Whether to ignore control dependency nodes.
self.drop_control_dependency = False
# Allow custom ops in the toco conversion.
self.allow_custom_ops = False
# Rnn states that are used to support rnn / lstm cells.
self.rnn_states = None
# Split the LSTM inputs from 5 inoputs to 18 inputs for TFLite.
self.split_tflite_lstm_inputs = None
# The inference input type passed to TFLiteConvert.
self.inference_input_type = None
# The inference output type passed to TFLiteConvert.
self.inference_output_type = None
def create_tensor_data(dtype, shape, min_value=-100, max_value=100):
"""Build tensor data spreading the range [min_value, max_value)."""
if dtype in TF_TYPE_INFO:
dtype = TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16):
value = (max_value - min_value) * np.random.random_sample(shape) + min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value + 1, shape)
elif dtype == tf.bool:
value = np.random.choice([True, False], size=shape)
elif dtype == np.string_:
# Not the best strings, but they will do for some basic testing.
letters = list(string.ascii_uppercase)
return np.random.choice(letters, size=shape).astype(dtype)
return np.dtype(dtype).type(value) if np.isscalar(value) else value.astype(
dtype)
def create_scalar_data(dtype, min_value=-100, max_value=100):
"""Build scalar tensor data range from min_value to max_value exclusively."""
if dtype in TF_TYPE_INFO:
dtype = TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16):
value = (max_value - min_value) * np.random.random() + min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value + 1)
return np.array(value, dtype=dtype)
def freeze_graph(session, outputs):
"""Freeze the current graph.
Args:
session: Tensorflow sessions containing the graph
outputs: List of output tensors
Returns:
The frozen graph_def.
"""
return tf_graph_util.convert_variables_to_constants(
session, session.graph.as_graph_def(), [x.op.name for x in outputs])
def format_result(t):
"""Convert a tensor to a format that can be used in test specs."""
if t.dtype.kind not in [np.dtype(np.string_).kind, np.dtype(np.object_).kind]:
# Output 9 digits after the point to ensure the precision is good enough.
values = ["{:.9f}".format(value) for value in list(t.flatten())]
return ",".join(values)
else:
return string_util_wrapper.SerializeAsHexString(t.flatten())
def write_examples(fp, examples):
"""Given a list `examples`, write a text format representation.
The file format is csv like with a simple repeated pattern. We would ike
to use proto here, but we can't yet due to interfacing with the Android
team using this format.
Args:
fp: File-like object to write to.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
def write_tensor(fp, x):
"""Write tensor in file format supported by TFLITE example."""
fp.write("dtype,%s\n" % x.dtype)
fp.write("shape," + ",".join(map(str, x.shape)) + "\n")
fp.write("values," + format_result(x) + "\n")
fp.write("test_cases,%d\n" % len(examples))
for example in examples:
fp.write("inputs,%d\n" % len(example["inputs"]))
for i in example["inputs"]:
write_tensor(fp, i)
fp.write("outputs,%d\n" % len(example["outputs"]))
for i in example["outputs"]:
write_tensor(fp, i)
def write_test_cases(fp, model_name, examples):
"""Given a dictionary of `examples`, write a text format representation.
The file format is protocol-buffer-like, even though we don't use proto due
to the needs of the Android team.
Args:
fp: File-like object to write to.
model_name: Filename where the model was written to, relative to filename.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
fp.write("load_model: %s\n" % os.path.basename(model_name))
for example in examples:
fp.write("reshape {\n")
for t in example["inputs"]:
fp.write(" input: \"" + ",".join(map(str, t.shape)) + "\"\n")
fp.write("}\n")
fp.write("invoke {\n")
for t in example["inputs"]:
fp.write(" input: \"" + format_result(t) + "\"\n")
for t in example["outputs"]:
fp.write(" output: \"" + format_result(t) + "\"\n")
fp.write(" output_shape: \"" + ",".join([str(dim) for dim in t.shape]) +
"\"\n")
fp.write("}\n")
def get_input_shapes_map(input_tensors):
"""Gets a map of input names to shapes.
Args:
input_tensors: List of input tensor tuples `(name, shape, type)`.
Returns:
{string : list of integers}.
"""
input_arrays = [tensor[0] for tensor in input_tensors]
input_shapes_list = []
for _, shape, _ in input_tensors:
dims = None
if shape:
dims = [dim.value for dim in shape.dims]
input_shapes_list.append(dims)
input_shapes = {
name: shape
for name, shape in zip(input_arrays, input_shapes_list)
if shape
}
return input_shapes
def _normalize_output_name(output_name):
"""Remove :0 suffix from tensor names."""
return output_name.split(":")[0] if output_name.endswith(
":0") else output_name
# How many test cases we may have in a zip file. Too many test cases will
# slow down the test data generation process.
_MAX_TESTS_PER_ZIP = 500
def make_zip_of_tests(options,
test_parameters,
make_graph,
make_test_inputs,
extra_toco_options=ExtraTocoOptions(),
use_frozen_graph=False,
expected_tf_failures=0):
"""Helper to make a zip file of a bunch of TensorFlow models.
This does a cartestian product of the dictionary of test_parameters and
calls make_graph() for each item in the cartestian product set.
If the graph is built successfully, then make_test_inputs() is called to
build expected input/output value pairs. The model is then converted to tflite
with toco, and the examples are serialized with the tflite model into a zip
file (2 files per item in the cartesian product set).
Args:
options: An Options instance.
test_parameters: Dictionary mapping to lists for each parameter.
e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}`
make_graph: function that takes current parameters and returns tuple
`[input1, input2, ...], [output1, output2, ...]`
make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,
`output_tensors` and returns tuple `(input_values, output_values)`.
extra_toco_options: Additional toco options.
use_frozen_graph: Whether or not freeze graph before toco converter.
expected_tf_failures: Number of times tensorflow is expected to fail in
executing the input graphs. In some cases it is OK for TensorFlow to fail
because the one or more combination of parameters is invalid.
Raises:
RuntimeError: if there are converter errors that can't be ignored.
"""
zip_path = os.path.join(options.output_path, options.zip_to_output)
parameter_count = 0
for parameters in test_parameters:
parameter_count += functools.reduce(
operator.mul, [len(values) for values in parameters.values()])
all_parameter_count = parameter_count
if options.multi_gen_state:
all_parameter_count += options.multi_gen_state.parameter_count
if not options.no_tests_limit and all_parameter_count > _MAX_TESTS_PER_ZIP:
raise RuntimeError(
"Too many parameter combinations for generating '%s'.\n"
"There are at least %d combinations while the upper limit is %d.\n"
"Having too many combinations will slow down the tests.\n"
"Please consider splitting the test into multiple functions.\n" %
(zip_path, all_parameter_count, _MAX_TESTS_PER_ZIP))
if options.multi_gen_state:
options.multi_gen_state.parameter_count = all_parameter_count
# TODO(aselle): Make this allow multiple inputs outputs.
if options.multi_gen_state:
archive = options.multi_gen_state.archive
else:
archive = zipfile.PyZipFile(zip_path, "w")
zip_manifest = []
convert_report = []
toco_errors = 0
processed_labels = set()
if options.make_edgetpu_tests:
extra_toco_options.inference_input_type = tf.uint8
extra_toco_options.inference_output_type = tf.uint8
# Only count parameters when fully_quantize is True.
parameter_count = 0
for parameters in test_parameters:
if True in parameters.get("fully_quantize", []):
parameter_count += functools.reduce(operator.mul, [
len(values)
for key, values in parameters.items()
if key != "fully_quantize"
])
label_base_path = zip_path
if options.multi_gen_state:
label_base_path = options.multi_gen_state.label_base_path
for parameters in test_parameters:
keys = parameters.keys()
for curr in itertools.product(*parameters.values()):
label = label_base_path.replace(".zip", "_") + (",".join(
"%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", ""))
if label[0] == "/":
label = label[1:]
if label in processed_labels:
# Do not populate data for the same label more than once. It will cause
# errors when unzipping.
continue
processed_labels.add(label)
param_dict = dict(zip(keys, curr))
if options.make_edgetpu_tests and not param_dict.get(
"fully_quantize", False):
continue
def generate_inputs_outputs(tflite_model_binary,
min_value=0,
max_value=255):
"""Generate input values and output values of the given tflite model.
Args:
tflite_model_binary: A serialized flatbuffer as a string.
min_value: min value for the input tensor.
max_value: max value for the input tensor.
Returns:
(input_values, output_values): input values and output values built.
"""
interpreter = tf.lite.Interpreter(model_content=tflite_model_binary)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
input_values = []
for input_detail in input_details:
input_value = create_tensor_data(
input_detail["dtype"],
input_detail["shape"],
min_value=min_value,
max_value=max_value)
interpreter.set_tensor(input_detail["index"], input_value)
input_values.append(input_value)
interpreter.invoke()
output_details = interpreter.get_output_details()
output_values = []
for output_detail in output_details:
output_values.append(interpreter.get_tensor(output_detail["index"]))
return input_values, output_values
def build_example(label, param_dict_real):
"""Build the model with parameter values set in param_dict_real.
Args:
label: Label of the model (i.e. the filename in the zip).
param_dict_real: Parameter dictionary (arguments to the factories
make_graph and make_test_inputs)
Returns:
(tflite_model_binary, report) where tflite_model_binary is the
serialized flatbuffer as a string and report is a dictionary with
keys `toco_log` (log of toco conversion), `tf_log` (log of tf
conversion), `toco` (a string of success status of the conversion),
`tf` (a string success status of the conversion).
"""
np.random.seed(RANDOM_SEED)
report = {"toco": report_lib.NOTRUN, "tf": report_lib.FAILED}
# Build graph
report["tf_log"] = ""
report["toco_log"] = ""
tf.compat.v1.reset_default_graph()
with tf.device("/cpu:0"):
try:
inputs, outputs = make_graph(param_dict_real)
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
sess = tf.compat.v1.Session()
try:
baseline_inputs, baseline_outputs = (
make_test_inputs(param_dict_real, sess, inputs, outputs))
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
report["toco"] = report_lib.FAILED
report["tf"] = report_lib.SUCCESS
# Convert graph to toco
input_tensors = [(input_tensor.name.split(":")[0], input_tensor.shape,
input_tensor.dtype) for input_tensor in inputs]
output_tensors = [_normalize_output_name(out.name) for out in outputs]
# pylint: disable=g-long-ternary
graph_def = freeze_graph(
sess,
tf.global_variables() + inputs +
outputs) if use_frozen_graph else sess.graph_def
if "split_tflite_lstm_inputs" in param_dict_real:
extra_toco_options.split_tflite_lstm_inputs = param_dict_real[
"split_tflite_lstm_inputs"]
tflite_model_binary, toco_log = options.tflite_convert_function(
options,
graph_def,
input_tensors,
output_tensors,
extra_toco_options=extra_toco_options,
test_params=param_dict_real)
report["toco"] = (
report_lib.SUCCESS
if tflite_model_binary is not None else report_lib.FAILED)
report["toco_log"] = toco_log
if options.save_graphdefs:
archive.writestr(label + ".pbtxt",
text_format.MessageToString(graph_def),
zipfile.ZIP_DEFLATED)
if tflite_model_binary:
if options.make_edgetpu_tests:
# Set proper min max values according to input dtype.
baseline_inputs, baseline_outputs = generate_inputs_outputs(
tflite_model_binary, min_value=0, max_value=255)
archive.writestr(label + ".bin", tflite_model_binary,
zipfile.ZIP_DEFLATED)
example = {"inputs": baseline_inputs, "outputs": baseline_outputs}
example_fp = StringIO()
write_examples(example_fp, [example])
archive.writestr(label + ".inputs", example_fp.getvalue(),
zipfile.ZIP_DEFLATED)
example_fp2 = StringIO()
write_test_cases(example_fp2, label + ".bin", [example])
archive.writestr(label + "_tests.txt", example_fp2.getvalue(),
zipfile.ZIP_DEFLATED)
zip_manifest.append(label + "\n")
return tflite_model_binary, report
_, report = build_example(label, param_dict)
if report["toco"] == report_lib.FAILED:
ignore_error = False
if not options.known_bugs_are_errors:
for pattern, bug_number in options.known_bugs.items():
if re.search(pattern, label):
print("Ignored converter error due to bug %s" % bug_number)
ignore_error = True
if not ignore_error:
toco_errors += 1
print("-----------------\nconverter error!\n%s\n-----------------\n" %
report["toco_log"])
convert_report.append((param_dict, report))
if not options.no_conversion_report:
report_io = StringIO()
report_lib.make_report_table(report_io, zip_path, convert_report)
if options.multi_gen_state:
archive.writestr("report_" + options.multi_gen_state.test_name + ".html",
report_io.getvalue())
else:
archive.writestr("report.html", report_io.getvalue())
if options.multi_gen_state:
options.multi_gen_state.zip_manifest.extend(zip_manifest)
else:
archive.writestr("manifest.txt", "".join(zip_manifest),
zipfile.ZIP_DEFLATED)
# Log statistics of what succeeded
total_conversions = len(convert_report)
tf_success = sum(
1 for x in convert_report if x[1]["tf"] == report_lib.SUCCESS)
toco_success = sum(
1 for x in convert_report if x[1]["toco"] == report_lib.SUCCESS)
percent = 0
if tf_success > 0:
percent = float(toco_success) / float(tf_success) * 100.
tf.compat.v1.logging.info(
("Archive %s Considered %d graphs, %d TF evaluated graphs "
" and %d TOCO converted graphs (%.1f%%"), zip_path, total_conversions,
tf_success, toco_success, percent)
tf_failures = parameter_count - tf_success
if tf_failures / parameter_count > 0.8:
raise RuntimeError(("Test for '%s' is not very useful. "
"TensorFlow fails in %d percent of the cases.") %
(zip_path, int(100 * tf_failures / parameter_count)))
if not options.make_edgetpu_tests and tf_failures != expected_tf_failures:
raise RuntimeError(("Expected TF to fail %d times while generating '%s', "
"but that happened %d times") %
(expected_tf_failures, zip_path, tf_failures))
if not options.ignore_converter_errors and toco_errors > 0:
raise RuntimeError("Found %d errors while generating toco models" %
toco_errors)
| 37.120879
| 80
| 0.667357
|
bfbe662442307b6d1c359f9d2e939fc870461f2c
| 2,624
|
py
|
Python
|
shape_bruteforce/training.py
|
ahmedkhalf/Shape-Bruteforce
|
4a9c205c9777c07a1fa7ecf7f4b27549b2d7dc7a
|
[
"MIT"
] | 2
|
2020-07-27T15:02:57.000Z
|
2022-03-12T02:41:02.000Z
|
shape_bruteforce/training.py
|
ahmedkhalf/Shape-Bruteforce
|
4a9c205c9777c07a1fa7ecf7f4b27549b2d7dc7a
|
[
"MIT"
] | null | null | null |
shape_bruteforce/training.py
|
ahmedkhalf/Shape-Bruteforce
|
4a9c205c9777c07a1fa7ecf7f4b27549b2d7dc7a
|
[
"MIT"
] | null | null | null |
import random
import math
import cairo
import numpy as np
import tqdm
from shape_bruteforce import shapes
class Image:
def __init__(self, width, height):
self.width = width
self.height = height
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
self.ctx = cairo.Context(self.surface)
def draw_background(self, r, g, b, a=1.0):
self.ctx.set_source_rgba(r, g, b, a)
self.ctx.rectangle(0, 0, self.width, self.height)
self.ctx.fill()
def copy_image(self, img):
self.ctx.set_source_surface(img.surface)
self.ctx.paint()
def to_array(self):
buf = self.surface.get_data()
array = np.ndarray(shape=(self.height, self.width, 4), dtype=np.uint8, buffer=buf)
return array
class Training:
def __init__(self, target):
self.th, self.tw = target.shape[0:2] # target height, target width
self.target = target.astype("float") # to make error function work
self.parent = Image(self.tw, self.th)
self.parent.draw_background(0, 0, 0)
def mse(self, img):
arr = img.to_array()
return float(np.square(np.subtract(arr, self.target)).mean())
def add_random_circle(self, img):
child = Image(self.tw, self.th)
child.copy_image(img)
child.ctx.set_source_rgba(random.random(), random.random(), random.random(), random.random())
child.ctx.arc(
random.uniform(0, self.tw), random.uniform(0, self.th), # x, y
random.uniform(0, (self.tw + self.th) / 2 / 2), # radius
0, 2 * math.pi)
child.ctx.fill()
return child
def train(self, shape_count=256, gens_per_shape=2000):
pbar = tqdm.trange(shape_count)
for i in pbar:
best_child = self.add_random_circle(self.parent)
best_child_fit = self.mse(best_child)
for j in range(gens_per_shape):
child = self.add_random_circle(self.parent)
child_fit = self.mse(child)
if child_fit < best_child_fit:
best_child = child
best_child_fit = child_fit
pbar.set_description("ERR " + str(int(best_child_fit)))
self.parent = best_child
if __name__ == "__main__":
from shape_bruteforce import utils
target = utils.load_image("mona-lisa.jpg")
target = utils.resize_image(target, 64)
target = utils.normalize_image(target)
trainer = Training(target)
trainer.train()
arr = trainer.parent.to_array()
utils.show_image(arr)
| 32
| 101
| 0.61471
|
a20ea0117cb13cb33ce797f281755d892050b2ea
| 2,094
|
py
|
Python
|
pierre-feuille-ciseaux.py
|
xeorion/pierre-feuille-ciseaux
|
abc880dcc4c2290765d606854b168793e640164e
|
[
"CC0-1.0"
] | null | null | null |
pierre-feuille-ciseaux.py
|
xeorion/pierre-feuille-ciseaux
|
abc880dcc4c2290765d606854b168793e640164e
|
[
"CC0-1.0"
] | null | null | null |
pierre-feuille-ciseaux.py
|
xeorion/pierre-feuille-ciseaux
|
abc880dcc4c2290765d606854b168793e640164e
|
[
"CC0-1.0"
] | null | null | null |
#Voici un petit pierre, feuille, ciseaux. Créé le 21/11/2021
import numpy as np #on importe la blibliothèque qui nous permet de faire un choix aléatoire pour l'ordianateur
choix=['pierre', 'feuille', 'ciseaux'] #on initialise une variable listant les différents choix possibles pour l'ordinateur
gg='Bravo, vous avez gagnez' # à afficher si le joueur gagne
nope="Désolé, c'est perdu. ;)" # à afficher si le joueur perd
eql="Egalité, on recommence ?" # à affiher en cas d'égalité
nbtour=int(input("Nombre de tour")) #on demande au joueur le nombre de tour qu'il voudrait faire
ppc=0 #on initialise la variable des points du joueur
pj=0 #on initialise la variable des point de l'ordinateur
for k in range(0,nbtour): #boule for qui tournera jusqu'à que nbtour soient effectués
cj=input("pierre, feuille, ou ciseaux ?") #on demande au joueur qu'est-ce qu'il veut jouer
if cj=='pierre': # si le joueur joue pierre
if np.random.choice(choix)=='pierre': #si l'ordinateur joue pierre
print(eql) # afficher eql (phrase d'égalité)
elif np.random.choice(choix)=='feuille': #sinon, si l'ordinateur joue la feuille
print(nope) #afficher nope (phrase de défaite)
ppc+=1 # ajouter 1 aux points de l'ordinateur
else: #sinon
print(gg) #afficher gg (phrase de victoire)
pj+=1 #ajouter 1 aux points du joueur
elif cj=='feuille':
if np.random.choice(choix)=='feuille':
print(eql)
elif np.random.choice(choix)=='ciseaux':
print(nope)
ppc+=1
else:
print(gg)
pj+=1
elif cj=='ciseaux':
if np.random.choice(choix)=='ciseaux':
print(eql)
elif np.random.choice(choix)=='pierre':
print(nope)
ppc+=1
else:
print(gg)
pj+=1
else:
pass
if pj==ppc: #si le joueur et l'ordinateur ont le même nombre de point
print(f"Vous avez une égalité de point(s) de {pj}.") #afficher égalité et nombre de point (pj)
elif pj>ppc:
print(f"Vous avez {pj} point(s) et la machine a {ppc} point(s), bravo !")
else:
print(f"Vous avez {pj} point(s) et la machine a {ppc} point(s), dommage !")
| 38.777778
| 123
| 0.680038
|
56bc55b0350bab8733573425a2e63ed484c5752c
| 2,260
|
py
|
Python
|
tests/e2e/test_running_cluster_monitoring_with_persistent_storage.py
|
jhutar/ocs-ci
|
da604424550ffa4af0bd1cfc4447a539a85164e6
|
[
"MIT"
] | null | null | null |
tests/e2e/test_running_cluster_monitoring_with_persistent_storage.py
|
jhutar/ocs-ci
|
da604424550ffa4af0bd1cfc4447a539a85164e6
|
[
"MIT"
] | null | null | null |
tests/e2e/test_running_cluster_monitoring_with_persistent_storage.py
|
jhutar/ocs-ci
|
da604424550ffa4af0bd1cfc4447a539a85164e6
|
[
"MIT"
] | null | null | null |
import logging
import pytest
from ocs_ci.ocs.ocp import OCP
from ocs_ci.framework.testlib import tier1, E2ETest
from ocs_ci.ocs.resources.pvc import delete_pvcs
from ocs_ci.ocs.monitoring import (
create_configmap_cluster_monitoring_pod,
validate_pvc_created_and_bound_on_monitoring_pods,
validate_pvc_are_mounted_on_monitoring_pods,
validate_monitoring_pods_are_respinned_and_running_state,
get_list_pvc_objs_created_on_monitoring_pods
)
from tests.fixtures import (
create_rbd_storageclass, create_ceph_block_pool,
create_rbd_secret
)
logger = logging.getLogger(__name__)
ocp = OCP('v1', 'ConfigMap', 'openshift-monitoring')
@pytest.fixture()
def test_fixture(request):
"""
Setup and teardown
"""
self = request.node.cls
def finalizer():
teardown(self)
request.addfinalizer(finalizer)
def teardown(self):
"""
Delete pvc and config map created
"""
assert ocp.delete(resource_name='cluster-monitoring-config')
pvc_obj_list = get_list_pvc_objs_created_on_monitoring_pods()
assert delete_pvcs(pvc_obj_list)
@pytest.mark.usefixtures(
create_rbd_secret.__name__,
create_ceph_block_pool.__name__,
create_rbd_storageclass.__name__,
test_fixture.__name__
)
class TestRunningClusterMonitoringWithPersistentStorage(E2ETest):
"""
Configure the persistent volume claim on monitoring
"""
pods_list = ['prometheus-k8s-0', 'prometheus-k8s-1',
'alertmanager-main-0', 'alertmanager-main-1',
'alertmanager-main-2']
@tier1
def test_running_cluster_mointoring_with_persistent_stoarge(self):
"""
A test case to configure the persistent volume on monitoring pods
"""
# Create configmap cluster-monitoring-config
create_configmap_cluster_monitoring_pod(self.sc_obj.name)
# Validate the pods are respinned and in running state
validate_monitoring_pods_are_respinned_and_running_state(
self.pods_list
)
# Validate the pvc is created on monitoring pods
validate_pvc_created_and_bound_on_monitoring_pods()
# Validate the pvc are mounted on pods
validate_pvc_are_mounted_on_monitoring_pods(self.pods_list)
| 28.974359
| 73
| 0.738053
|
8369f46a608b102cf95e62939ee8f97e34633932
| 7,267
|
py
|
Python
|
tests/test_snap.py
|
aalexanderkevin/midtrans-python-client
|
b026075d2a38f86c96627d16d60cc02ebd3ed9b2
|
[
"MIT"
] | null | null | null |
tests/test_snap.py
|
aalexanderkevin/midtrans-python-client
|
b026075d2a38f86c96627d16d60cc02ebd3ed9b2
|
[
"MIT"
] | null | null | null |
tests/test_snap.py
|
aalexanderkevin/midtrans-python-client
|
b026075d2a38f86c96627d16d60cc02ebd3ed9b2
|
[
"MIT"
] | null | null | null |
import pytest
from .helpers import is_str
from .context import midtransclient
import datetime
from pprint import pprint
reused_order_id = "py-midtransclient-test-"+str(datetime.datetime.now()).replace(" ", "").replace(":", "")
def test_snap_class():
snap = generate_snap_instance()
methods = dir(snap)
assert "create_transaction" in methods
assert "create_transaction_token" in methods
assert "create_transaction_redirect_url" in methods
assert is_str(snap.api_config.server_key)
assert is_str(snap.api_config.client_key)
def test_snap_create_transaction_min():
snap = generate_snap_instance()
param = generate_param_min()
param['transaction_details']['order_id'] = reused_order_id
transaction = snap.create_transaction(param)
assert isinstance(transaction, dict)
assert is_str(transaction['token'])
assert is_str(transaction['redirect_url'])
def test_snap_create_transaction_max():
snap = generate_snap_instance()
param = generate_param_max()
transaction = snap.create_transaction(param)
assert isinstance(transaction, dict)
assert is_str(transaction['token'])
assert is_str(transaction['redirect_url'])
def test_snap_create_transaction_token():
snap = generate_snap_instance()
param = generate_param_min()
token = snap.create_transaction_token(param)
assert is_str(token)
def test_snap_create_transaction_redirect_url():
snap = generate_snap_instance()
param = generate_param_min()
redirect_url = snap.create_transaction_redirect_url(param)
assert is_str(redirect_url)
def test_snap_status_fail_404():
snap = generate_snap_instance()
err = ''
try:
response = snap.transactions.status('non-exist-order-id')
except Exception as e:
err = e
assert 'MidtransAPIError' in err.__class__.__name__
assert '404' in err.message
assert 'exist' in err.message
def test_snap_request_fail_401():
snap = generate_snap_instance()
snap.api_config.server_key=''
param = generate_param_min()
err = ''
try:
transaction = snap.create_transaction(param)
except Exception as e:
err = e
assert 'MidtransAPIError' in err.__class__.__name__
assert '401' in err.message
assert 'unauthorized' in err.message
def test_snap_request_fail_empty_param():
snap = generate_snap_instance()
param = None
err = ''
try:
transaction = snap.create_transaction(param)
except Exception as e:
err = e
assert 'MidtransAPIError' in err.__class__.__name__
assert '400' in err.message
assert 'is required' in err.message
def test_snap_request_fail_zero_gross_amount():
snap = generate_snap_instance()
param = generate_param_min()
param['transaction_details']['gross_amount'] = 0
err = ''
try:
transaction = snap.create_transaction(param)
except Exception as e:
err = e
assert 'MidtransAPIError' in err.__class__.__name__
def test_snap_exception_MidtransAPIError():
snap = generate_snap_instance()
snap.api_config.server_key=''
param = generate_param_min()
err = ''
try:
transaction = snap.create_transaction(param)
except Exception as e:
err = e
assert 'MidtransAPIError' in err.__class__.__name__
assert is_str(err.message)
assert isinstance(err.api_response_dict, dict)
assert isinstance(err.http_status_code,int)
# ======== HELPER FUNCTIONS BELOW ======== #
def generate_snap_instance():
snap = midtransclient.Snap(is_production=False,
server_key='SB-Mid-server-GwUP_WGbJPXsDzsNEBRs8IYA',
client_key='SB-Mid-client-61XuGAwQ8Bj8LxSS')
return snap
def generate_param_min():
return {
"transaction_details": {
"order_id": "py-midtransclient-test-"+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"gross_amount": 200000
}, "credit_card":{
"secure" : True
}
}
def generate_param_max():
return {
"transaction_details": {
"order_id": "py-midtransclient-test-"+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"gross_amount": 10000
},
"item_details": [{
"id": "ITEM1",
"price": 10000,
"quantity": 1,
"name": "Midtrans Bear",
"brand": "Midtrans",
"category": "Toys",
"merchant_name": "Midtrans"
}],
"customer_details": {
"first_name": "John",
"last_name": "Watson",
"email": "test@example.com",
"phone": "+628123456",
"billing_address": {
"first_name": "John",
"last_name": "Watson",
"email": "test@example.com",
"phone": "081 2233 44-55",
"address": "Sudirman",
"city": "Jakarta",
"postal_code": "12190",
"country_code": "IDN"
},
"shipping_address": {
"first_name": "John",
"last_name": "Watson",
"email": "test@example.com",
"phone": "0 8128-75 7-9338",
"address": "Sudirman",
"city": "Jakarta",
"postal_code": "12190",
"country_code": "IDN"
}
},
"enabled_payments": ["credit_card", "mandiri_clickpay", "cimb_clicks","bca_klikbca", "bca_klikpay", "bri_epay", "echannel", "indosat_dompetku","mandiri_ecash", "permata_va", "bca_va", "bni_va", "other_va", "gopay","kioson", "indomaret", "gci", "danamon_online"],
"credit_card": {
"secure": True,
"channel": "migs",
"bank": "bca",
"installment": {
"required": False,
"terms": {
"bni": [3, 6, 12],
"mandiri": [3, 6, 12],
"cimb": [3],
"bca": [3, 6, 12],
"offline": [6, 12]
}
},
"whitelist_bins": [
"48111111",
"41111111"
]
},
"bca_va": {
"va_number": "12345678911",
"free_text": {
"inquiry": [
{
"en": "text in English",
"id": "text in Bahasa Indonesia"
}
],
"payment": [
{
"en": "text in English",
"id": "text in Bahasa Indonesia"
}
]
}
},
"bni_va": {
"va_number": "12345678"
},
"permata_va": {
"va_number": "1234567890",
"recipient_name": "SUDARSONO"
},
"callbacks": {
"finish": "https://demo.midtrans.com"
},
"expiry": {
"start_time": "2020-12-20 18:11:08 +0700",
"unit": "minutes",
"duration": 1
},
"custom_field1": "custom field 1 content",
"custom_field2": "custom field 2 content",
"custom_field3": "custom field 3 content"
}
| 32.882353
| 270
| 0.562818
|
08d15b9c17723f4a0c65eef69635972c7a21108c
| 1,340
|
py
|
Python
|
aa2020/python/plot_shortest_path_tree.py
|
gianlucacovini/opt4ds
|
c8927ad36cace51c501527b2f8e8e93857c80d95
|
[
"MIT"
] | 14
|
2020-03-04T18:02:47.000Z
|
2022-02-27T17:40:09.000Z
|
aa2020/python/plot_shortest_path_tree.py
|
gianlucacovini/opt4ds
|
c8927ad36cace51c501527b2f8e8e93857c80d95
|
[
"MIT"
] | 1
|
2021-03-23T11:47:24.000Z
|
2021-03-28T12:23:21.000Z
|
aa2020/python/plot_shortest_path_tree.py
|
mathcoding/opt4ds
|
42904fd56c18a83fd5ff6f068bbd20b055a40734
|
[
"MIT"
] | 7
|
2020-03-12T23:41:21.000Z
|
2022-03-03T13:41:29.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 11:14:53 2020
@author: Gualandi
"""
import networkx as nx
import matplotlib.pyplot as plt
Ls = [('a', 'b', 5), ('a', 'c', 3), ('a', 'd', 3), ('b', 'c', 2),
('b', 'd', 5), ('c', 'e', 2), ('c', 'd', 3), ('d', 'e', 2),
('d', 'f', 3), ('e', 'g', 3), ('f', 'c', 4), ('g', 'f', 2)]
Cs = dict([((i,j),c) for i,j,c in Ls])
As = [(i,j) for i,j,_ in Ls]
# NetworkX Digraph
G = nx.DiGraph()
G.add_edges_from(As)
val_map = {'g': 0.5714285714285714,
'a': 0.0}
values = [val_map.get(node, 0.2) for node in G.nodes()]
# Specify the edges you want here
red_edges = [('e', 'g'), ('b', 'c'), ('c', 'e'), ('f', 'c'), ('d', 'e'), ('a', 'c')]
black_edges = [edge for edge in G.edges() if edge not in red_edges]
# Need to create a layout when doing
# separate calls to draw nodes and edges
pos = nx.kamada_kawai_layout(G)
nx.draw_networkx_nodes(G, pos, cmap=plt.get_cmap('coolwarm'),
node_color = values, node_size = 400)
nx.draw_networkx_labels(G, pos)
nx.draw_networkx_edges(G, pos, edgelist=red_edges, lw=2,
edge_color='r', arrows=True)
nx.draw_networkx_edges(G, pos, edgelist=black_edges, arrows=True)
nx.draw_networkx_edge_labels(G, pos, edge_labels=Cs)
plt.savefig("ShortestPathGraph.pdf", bbox_inches='tight')
plt.show()
| 29.130435
| 84
| 0.578358
|
e7681b697a99068749e1c13b5d99139a9495e0ff
| 14,891
|
py
|
Python
|
oslo-modules/oslo_db/sqlalchemy/engines.py
|
esse-io/zen-common
|
8ede82ab81bad53c3b947084b812c44e329f159b
|
[
"Apache-2.0"
] | null | null | null |
oslo-modules/oslo_db/sqlalchemy/engines.py
|
esse-io/zen-common
|
8ede82ab81bad53c3b947084b812c44e329f159b
|
[
"Apache-2.0"
] | null | null | null |
oslo-modules/oslo_db/sqlalchemy/engines.py
|
esse-io/zen-common
|
8ede82ab81bad53c3b947084b812c44e329f159b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Core SQLAlchemy connectivity routines.
"""
import itertools
import logging
import os
import re
import time
import six
import sqlalchemy
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import pool
from sqlalchemy.sql.expression import select
from oslo_db._i18n import _LW
from oslo_db import exception
from oslo_db.sqlalchemy import exc_filters
from oslo_db.sqlalchemy import utils
LOG = logging.getLogger(__name__)
def _thread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
execute instead of time.sleep(0).
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
time.sleep(0)
def _connect_ping_listener(connection, branch):
"""Ping the server at connection startup.
Ping the server at transaction begin and transparently reconnect
if a disconnect exception occurs.
"""
if branch:
return
# turn off "close with result". This can also be accomplished
# by branching the connection, however just setting the flag is
# more performant and also doesn't get involved with some
# connection-invalidation awkardness that occurs (see
# https://bitbucket.org/zzzeek/sqlalchemy/issue/3215/)
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
try:
# run a SELECT 1. use a core select() so that
# any details like that needed by Oracle, DB2 etc. are handled.
connection.scalar(select([1]))
except exception.DBConnectionError:
# catch DBConnectionError, which is raised by the filter
# system.
# disconnect detected. The connection is now
# "invalid", but the pool should be ready to return
# new connections assuming they are good now.
# run the select again to re-validate the Connection.
connection.scalar(select([1]))
finally:
connection.should_close_with_result = save_should_close_with_result
def _setup_logging(connection_debug=0):
"""setup_logging function maps SQL debug level to Python log level.
Connection_debug is a verbosity of SQL debugging information.
0=None(default value),
1=Processed only messages with WARNING level or higher
50=Processed only messages with INFO level or higher
100=Processed only messages with DEBUG level
"""
if connection_debug >= 0:
logger = logging.getLogger('sqlalchemy.engine')
if connection_debug >= 100:
logger.setLevel(logging.DEBUG)
elif connection_debug >= 50:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
idle_timeout=3600,
connection_debug=0, max_pool_size=None, max_overflow=None,
pool_timeout=None, sqlite_synchronous=True,
connection_trace=False, max_retries=10, retry_interval=10,
thread_checkin=True, logging_name=None):
"""Return a new SQLAlchemy engine."""
url = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": idle_timeout,
'convert_unicode': True,
'connect_args': {},
'logging_name': logging_name
}
_setup_logging(connection_debug)
_init_connection_args(
url, engine_args,
sqlite_fk=sqlite_fk,
max_pool_size=max_pool_size,
max_overflow=max_overflow,
pool_timeout=pool_timeout
)
engine = sqlalchemy.create_engine(url, **engine_args)
_init_events(
engine,
mysql_sql_mode=mysql_sql_mode,
sqlite_synchronous=sqlite_synchronous,
sqlite_fk=sqlite_fk,
thread_checkin=thread_checkin,
connection_trace=connection_trace
)
# register alternate exception handler
exc_filters.register_engine(engine)
# register engine connect handler
event.listen(engine, "engine_connect", _connect_ping_listener)
# initial connect + test
# NOTE(viktors): the current implementation of _test_connection()
# does nothing, if max_retries == 0, so we can skip it
if max_retries:
test_conn = _test_connection(engine, max_retries, retry_interval)
test_conn.close()
return engine
@utils.dispatch_for_dialect('*', multiple=True)
def _init_connection_args(
url, engine_args,
max_pool_size=None, max_overflow=None, pool_timeout=None, **kw):
pool_class = url.get_dialect().get_pool_class(url)
if issubclass(pool_class, pool.QueuePool):
if max_pool_size is not None:
engine_args['pool_size'] = max_pool_size
if max_overflow is not None:
engine_args['max_overflow'] = max_overflow
if pool_timeout is not None:
engine_args['pool_timeout'] = pool_timeout
@_init_connection_args.dispatch_for("sqlite")
def _init_connection_args(url, engine_args, **kw):
pool_class = url.get_dialect().get_pool_class(url)
# singletonthreadpool is used for :memory: connections;
# replace it with StaticPool.
if issubclass(pool_class, pool.SingletonThreadPool):
engine_args["poolclass"] = pool.StaticPool
engine_args['connect_args']['check_same_thread'] = False
@_init_connection_args.dispatch_for("postgresql")
def _init_connection_args(url, engine_args, **kw):
if 'client_encoding' not in url.query:
# Set encoding using engine_args instead of connect_args since
# it's supported for PostgreSQL 8.*. More details at:
# http://docs.sqlalchemy.org/en/rel_0_9/dialects/postgresql.html
engine_args['client_encoding'] = 'utf8'
@_init_connection_args.dispatch_for("mysql")
def _init_connection_args(url, engine_args, **kw):
if 'charset' not in url.query:
engine_args['connect_args']['charset'] = 'utf8'
@_init_connection_args.dispatch_for("mysql+mysqlconnector")
def _init_connection_args(url, engine_args, **kw):
# mysqlconnector engine (<1.0) incorrectly defaults to
# raise_on_warnings=True
# https://bitbucket.org/zzzeek/sqlalchemy/issue/2515
if 'raise_on_warnings' not in url.query:
engine_args['connect_args']['raise_on_warnings'] = False
@_init_connection_args.dispatch_for("mysql+mysqldb")
@_init_connection_args.dispatch_for("mysql+oursql")
def _init_connection_args(url, engine_args, **kw):
# Those drivers require use_unicode=0 to avoid performance drop due
# to internal usage of Python unicode objects in the driver
# http://docs.sqlalchemy.org/en/rel_0_9/dialects/mysql.html
if 'use_unicode' not in url.query:
if six.PY3:
engine_args['connect_args']['use_unicode'] = 1
else:
engine_args['connect_args']['use_unicode'] = 0
@utils.dispatch_for_dialect('*', multiple=True)
def _init_events(engine, thread_checkin=True, connection_trace=False, **kw):
"""Set up event listeners for all database backends."""
_add_process_guards(engine)
if connection_trace:
_add_trace_comments(engine)
if thread_checkin:
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
@_init_events.dispatch_for("mysql")
def _init_events(engine, mysql_sql_mode=None, **kw):
"""Set up event listeners for MySQL."""
if mysql_sql_mode is not None:
@sqlalchemy.event.listens_for(engine, "connect")
def _set_session_sql_mode(dbapi_con, connection_rec):
cursor = dbapi_con.cursor()
cursor.execute("SET SESSION sql_mode = %s", [mysql_sql_mode])
@sqlalchemy.event.listens_for(engine, "first_connect")
def _check_effective_sql_mode(dbapi_con, connection_rec):
if mysql_sql_mode is not None:
_set_session_sql_mode(dbapi_con, connection_rec)
cursor = dbapi_con.cursor()
cursor.execute("SHOW VARIABLES LIKE 'sql_mode'")
realmode = cursor.fetchone()
if realmode is None:
LOG.warning(_LW('Unable to detect effective SQL mode'))
else:
realmode = realmode[1]
LOG.debug('MySQL server mode set to %s', realmode)
if 'TRADITIONAL' not in realmode.upper() and \
'STRICT_ALL_TABLES' not in realmode.upper():
LOG.warning(
_LW(
"MySQL SQL mode is '%s', "
"consider enabling TRADITIONAL or STRICT_ALL_TABLES"),
realmode)
@_init_events.dispatch_for("sqlite")
def _init_events(engine, sqlite_synchronous=True, sqlite_fk=False, **kw):
"""Set up event listeners for SQLite.
This includes several settings made on connections as they are
created, as well as transactional control extensions.
"""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
@sqlalchemy.event.listens_for(engine, "connect")
def _sqlite_connect_events(dbapi_con, con_record):
# Add REGEXP functionality on SQLite connections
dbapi_con.create_function('regexp', 2, regexp)
if not sqlite_synchronous:
# Switch sqlite connections to non-synchronous mode
dbapi_con.execute("PRAGMA synchronous = OFF")
# Disable pysqlite's emitting of the BEGIN statement entirely.
# Also stops it from emitting COMMIT before any DDL.
# below, we emit BEGIN ourselves.
# see http://docs.sqlalchemy.org/en/rel_0_9/dialects/\
# sqlite.html#serializable-isolation-savepoints-transactional-ddl
dbapi_con.isolation_level = None
if sqlite_fk:
# Ensures that the foreign key constraints are enforced in SQLite.
dbapi_con.execute('pragma foreign_keys=ON')
@sqlalchemy.event.listens_for(engine, "begin")
def _sqlite_emit_begin(conn):
# emit our own BEGIN, checking for existing
# transactional state
if 'in_transaction' not in conn.info:
conn.execute("BEGIN")
conn.info['in_transaction'] = True
@sqlalchemy.event.listens_for(engine, "rollback")
@sqlalchemy.event.listens_for(engine, "commit")
def _sqlite_end_transaction(conn):
# remove transactional marker
conn.info.pop('in_transaction', None)
def _test_connection(engine, max_retries, retry_interval):
if max_retries == -1:
attempts = itertools.count()
else:
attempts = six.moves.range(max_retries)
# See: http://legacy.python.org/dev/peps/pep-3110/#semantic-changes for
# why we are not using 'de' directly (it can be removed from the local
# scope).
de_ref = None
for attempt in attempts:
try:
return engine.connect()
except exception.DBConnectionError as de:
msg = _LW('SQL connection failed. %s attempts left.')
LOG.warning(msg, max_retries - attempt)
time.sleep(retry_interval)
de_ref = de
else:
if de_ref is not None:
six.reraise(type(de_ref), de_ref)
def _add_process_guards(engine):
"""Add multiprocessing guards.
Forces a connection to be reconnected if it is detected
as having been shared to a sub-process.
"""
@sqlalchemy.event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
connection_record.info['pid'] = os.getpid()
@sqlalchemy.event.listens_for(engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
pid = os.getpid()
if connection_record.info['pid'] != pid:
LOG.debug(_LW(
"Parent process %(orig)s forked (%(newproc)s) with an open "
"database connection, "
"which is being discarded and recreated."),
{"newproc": pid, "orig": connection_record.info['pid']})
connection_record.connection = connection_proxy.connection = None
raise exc.DisconnectionError(
"Connection record belongs to pid %s, "
"attempting to check out in pid %s" %
(connection_record.info['pid'], pid)
)
def _add_trace_comments(engine):
"""Add trace comments.
Augment statements with a trace of the immediate calling code
for a given statement.
"""
import os
import sys
import traceback
target_paths = set([
os.path.dirname(sys.modules['oslo_db'].__file__),
os.path.dirname(sys.modules['sqlalchemy'].__file__)
])
try:
skip_paths = set([
os.path.dirname(sys.modules['oslo_db.tests'].__file__),
])
except KeyError:
skip_paths = set()
@sqlalchemy.event.listens_for(engine, "before_cursor_execute", retval=True)
def before_cursor_execute(conn, cursor, statement, parameters, context,
executemany):
# NOTE(zzzeek) - if different steps per DB dialect are desirable
# here, switch out on engine.name for now.
stack = traceback.extract_stack()
our_line = None
for idx, (filename, line, method, function) in enumerate(stack):
for tgt in skip_paths:
if filename.startswith(tgt):
break
else:
for tgt in target_paths:
if filename.startswith(tgt):
our_line = idx
break
if our_line:
break
if our_line:
trace = "; ".join(
"File: %s (%s) %s" % (
line[0], line[1], line[2]
)
# include three lines of context.
for line in stack[our_line - 3:our_line]
)
statement = "%s -- %s" % (statement, trace)
return statement, parameters
| 35.454762
| 79
| 0.665704
|
001d2b53fb66094a41d034ab82c4992d3f4992ea
| 3,575
|
py
|
Python
|
vg_analyze_relationships.py
|
MeMAD-project/statistic-tools
|
6257d3913b4a359516dd93a89730ca4e3bf0565b
|
[
"MIT"
] | 1
|
2020-06-10T11:17:00.000Z
|
2020-06-10T11:17:00.000Z
|
vg_analyze_relationships.py
|
MeMAD-project/statistical-tools
|
6257d3913b4a359516dd93a89730ca4e3bf0565b
|
[
"MIT"
] | null | null | null |
vg_analyze_relationships.py
|
MeMAD-project/statistical-tools
|
6257d3913b4a359516dd93a89730ca4e3bf0565b
|
[
"MIT"
] | null | null | null |
import sys
import analysis_funs as va
import argparse
def main(args):
print("Loading relationships data from: {}".format(args.relationships_json))
data = va.load_json(args.relationships_json)
assert len(data) == 108077
print("=" * 80)
if args.rel_counts:
print("Count all relationships (verbs and non-verbs):")
print("=" * 80)
rels, subjs, objs = va.count_relationships(data, va.human_synsets)
va.plot_and_output_csv(rels, ['relationship name', 'relationship synset'], 40,
"Relationships with people as subjects",
'relationships/rel_subj_people', batch=True)
va.plot_and_output_csv(subjs, ['subject name', ' subject synset'], 40,
"Subjects with people as subjects",
'relationships/subj_subj_people', batch=True)
va.plot_and_output_csv(objs, ['object name', 'object synset'], 40,
"Objects with people as subjects",
'relationships/obj_subj_people', batch=True)
print("=" * 80)
print("Count verb-only relationships:")
print("=" * 80)
rels, subjs, objs = va.count_relationships(data, va.human_synsets, verbs=True)
va.plot_and_output_csv(rels, ['relationship name', 'relationship synset'], 40,
"Relationships with people as subjects, verbs only",
'relationships/rel_subj_people_verbs', batch=True)
va.plot_and_output_csv(subjs, ['subject name', ' subject synset'], 40,
"Subjects with people as subjects, verbs only",
'relationships/subj_subj_people_verbs', batch=True)
va.plot_and_output_csv(objs, ['object name', 'object synset'], 40,
"Objects with people as subjects, verbs only",
'relationships/obj_subj_people_verbs', batch=True)
print("=" * 80)
counts, indices = va.stats_on_humans_in_relationships(data)
print("=" * 80)
print("Plotting venn diagrams...")
print("=" * 80)
va.plot_venn(counts['rels']['all'], ['Human subjects', 'Human objects', 'Other'],
'Relationships with humans as subjects or objects',
filename='relationships/venn_rels_all', batch=True)
va.plot_venn(counts['rels']['verbs'], ['Human subjects', 'Human objects', 'Other'],
"Relationships with humans as subjects or objects,\nverbs only",
filename='relationships/venn_rels_verbs', batch=True)
va.plot_venn(counts['imgs']['all'], ['Human subjects', 'Human objects', 'Other'],
'Images with humans as subjects or objects',
filename='relationships/venn_imgs_all', batch=True)
va.plot_venn(counts['imgs']['verbs'], ['Human subjects', 'Human objects', 'Other'],
"Images with humans as subjects or objects,\nverbs only",
filename='relationships/venn_imgs_verbs', batch=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--relationships_json', type=str,
help='location of Visual Genome relationships JSON file')
parser.add_argument('--rel_counts', action='store_true',
help='Count different types of relationships, '
'otherwise show summary stats only')
args = parser.parse_args()
main(args=args)
| 44.135802
| 87
| 0.593287
|
a9965939b0e0fc629e29e9ff3a0bb6a34a23ddaf
| 347
|
py
|
Python
|
Classes/TP2/Lat-long/lat_long-v2.py
|
LuisPereira23/PL-2021
|
951190835d8989e3afda1fd0f8f9ef08f5d85e07
|
[
"MIT"
] | null | null | null |
Classes/TP2/Lat-long/lat_long-v2.py
|
LuisPereira23/PL-2021
|
951190835d8989e3afda1fd0f8f9ef08f5d85e07
|
[
"MIT"
] | null | null | null |
Classes/TP2/Lat-long/lat_long-v2.py
|
LuisPereira23/PL-2021
|
951190835d8989e3afda1fd0f8f9ef08f5d85e07
|
[
"MIT"
] | null | null | null |
import re
latLong = re.compile(r'(^\([+\-]?([1-8]?[0-9](\.[0-9]+)?|90(\.0+)?), [+\-]?((([1-9]?[0-9]|1[0-7][0-9])(\.[0-9]+)?)|180(\.0+)?)\)$)')
n = int(input())
for i in range(n):
linha = input()
res = latLong.search(linha)
if(res):
print(res)
print("VALIDO")
else:
print(linha)
print("Invalido")
| 21.6875
| 132
| 0.43804
|
a37017aeea4d3700f8a5d481a1f64b45645a38f6
| 6,420
|
py
|
Python
|
aiida/backends/sqlalchemy/migrations/versions/a6048f0ffca8_update_linktypes.py
|
astamminger/aiida_core
|
b01ad8236f21804f273c9d2a0365ecee62255cbb
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/sqlalchemy/migrations/versions/a6048f0ffca8_update_linktypes.py
|
astamminger/aiida_core
|
b01ad8236f21804f273c9d2a0365ecee62255cbb
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/sqlalchemy/migrations/versions/a6048f0ffca8_update_linktypes.py
|
astamminger/aiida_core
|
b01ad8236f21804f273c9d2a0365ecee62255cbb
|
[
"BSD-2-Clause"
] | 1
|
2018-12-21T11:10:09.000Z
|
2018-12-21T11:10:09.000Z
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Updating link types - This is a copy of the Django migration script
Revision ID: a6048f0ffca8
Revises:
Create Date: 2017-10-17 10:51:23.327195
"""
from alembic import op
from sqlalchemy.sql import text
# revision identifiers, used by Alembic.
revision = 'a6048f0ffca8'
down_revision = '70c7d732f1b2'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
# I am first migrating the wrongly declared returnlinks out of
# the InlineCalculations.
# This bug is reported #628 https://github.com/aiidateam/aiida_core/issues/628
# There is an explicit check in the code of the inline calculation
# ensuring that the calculation returns UNSTORED nodes.
# Therefore, no cycle can be created with that migration!
#
# this command:
# 1) selects all links that
# - joins an InlineCalculation (or subclass) as input
# - joins a Data (or subclass) as output
# - is marked as a returnlink.
# 2) set for these links the type to 'createlink'
stmt1 = text("""
UPDATE db_dblink set type='createlink' WHERE db_dblink.id IN (
SELECT db_dblink_1.id
FROM db_dbnode AS db_dbnode_1
JOIN db_dblink AS db_dblink_1 ON db_dblink_1.input_id = db_dbnode_1.id
JOIN db_dbnode AS db_dbnode_2 ON db_dblink_1.output_id = db_dbnode_2.id
WHERE db_dbnode_1.type LIKE 'calculation.inline.%'
AND db_dbnode_2.type LIKE 'data.%'
AND db_dblink_1.type = 'returnlink'
)
""")
conn.execute(stmt1)
# Now I am updating the link-types that are null because of either an export and subsequent import
# https://github.com/aiidateam/aiida_core/issues/685
# or because the link types don't exist because the links were added before the introduction of link types.
# This is reported here: https://github.com/aiidateam/aiida_core/issues/687
#
# The following sql statement:
# 1) selects all links that
# - joins Data (or subclass) or Code as input
# - joins Calculation (or subclass) as output. This includes WorkCalculation, InlineCalcuation, JobCalculations...
# - has no type (null)
# 2) set for these links the type to 'inputlink'
stmt2 = text("""
UPDATE db_dblink set type='inputlink' where id in (
SELECT db_dblink_1.id
FROM db_dbnode AS db_dbnode_1
JOIN db_dblink AS db_dblink_1 ON db_dblink_1.input_id = db_dbnode_1.id
JOIN db_dbnode AS db_dbnode_2 ON db_dblink_1.output_id = db_dbnode_2.id
WHERE ( db_dbnode_1.type LIKE 'data.%' or db_dbnode_1.type = 'code.Code.' )
AND db_dbnode_2.type LIKE 'calculation.%'
AND ( db_dblink_1.type = null OR db_dblink_1.type = '')
);
""")
conn.execute(stmt2)
#
# The following sql statement:
# 1) selects all links that
# - join JobCalculation (or subclass) or InlineCalculation as input
# - joins Data (or subclass) as output.
# - has no type (null)
# 2) set for these links the type to 'createlink'
stmt3 = text("""
UPDATE db_dblink set type='createlink' where id in (
SELECT db_dblink_1.id
FROM db_dbnode AS db_dbnode_1
JOIN db_dblink AS db_dblink_1 ON db_dblink_1.input_id = db_dbnode_1.id
JOIN db_dbnode AS db_dbnode_2 ON db_dblink_1.output_id = db_dbnode_2.id
WHERE db_dbnode_2.type LIKE 'data.%'
AND (
db_dbnode_1.type LIKE 'calculation.job.%'
OR
db_dbnode_1.type = 'calculation.inline.InlineCalculation.'
)
AND ( db_dblink_1.type = null OR db_dblink_1.type = '')
)
""")
conn.execute(stmt3)
# The following sql statement:
# 1) selects all links that
# - join WorkCalculation as input. No subclassing was introduced so far, so only one type string is checked for.
# - join Data (or subclass) as output.
# - has no type (null)
# 2) set for these links the type to 'returnlink'
stmt4 = text("""
UPDATE db_dblink set type='returnlink' where id in (
SELECT db_dblink_1.id
FROM db_dbnode AS db_dbnode_1
JOIN db_dblink AS db_dblink_1 ON db_dblink_1.input_id = db_dbnode_1.id
JOIN db_dbnode AS db_dbnode_2 ON db_dblink_1.output_id = db_dbnode_2.id
WHERE db_dbnode_2.type LIKE 'data.%'
AND db_dbnode_1.type = 'calculation.work.WorkCalculation.'
AND ( db_dblink_1.type = null OR db_dblink_1.type = '')
)
""")
conn.execute(stmt4)
# Now I update links that are CALLS:
# The following sql statement:
# 1) selects all links that
# - join WorkCalculation as input. No subclassing was introduced so far, so only one type string is checked for.
# - join Calculation (or subclass) as output. Includes JobCalculation and WorkCalculations and all subclasses.
# - has no type (null)
# 2) set for these links the type to 'calllink'
stmt5 = text("""
UPDATE db_dblink set type='calllink' where id in (
SELECT db_dblink_1.id
FROM db_dbnode AS db_dbnode_1
JOIN db_dblink AS db_dblink_1 ON db_dblink_1.input_id = db_dbnode_1.id
JOIN db_dbnode AS db_dbnode_2 ON db_dblink_1.output_id = db_dbnode_2.id
WHERE db_dbnode_1.type = 'calculation.work.WorkCalculation.'
AND db_dbnode_2.type LIKE 'calculation.%'
AND ( db_dblink_1.type = null OR db_dblink_1.type = '')
)
""")
conn.execute(stmt5)
def downgrade():
print "There is no downgrade for the link types"
| 45.211268
| 120
| 0.614953
|
a3e346c11ade601676eb14a61f45c970e1f2887e
| 593
|
py
|
Python
|
webempresa/pages/migrations/0003_auto_20190924_1131.py
|
FelixCastillo798/web-empresa-curso-django-2
|
26d24e62175160cf8e1b57f411361b17e5fdcc20
|
[
"Apache-2.0"
] | null | null | null |
webempresa/pages/migrations/0003_auto_20190924_1131.py
|
FelixCastillo798/web-empresa-curso-django-2
|
26d24e62175160cf8e1b57f411361b17e5fdcc20
|
[
"Apache-2.0"
] | null | null | null |
webempresa/pages/migrations/0003_auto_20190924_1131.py
|
FelixCastillo798/web-empresa-curso-django-2
|
26d24e62175160cf8e1b57f411361b17e5fdcc20
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0.2 on 2019-09-24 16:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0002_auto_20190920_1958'),
]
operations = [
migrations.AlterModelOptions(
name='page',
options={'ordering': ['order', 'title'], 'verbose_name': 'pagina', 'verbose_name_plural': 'paginas'},
),
migrations.AddField(
model_name='page',
name='order',
field=models.SmallIntegerField(default=0, verbose_name='Orden'),
),
]
| 25.782609
| 113
| 0.588533
|
0afbb9809034c9265c2172633d0222221386eb73
| 16,170
|
py
|
Python
|
src/data_loader/loader.py
|
zsscode/MGP-AttTCN
|
7659af8c69204a3ad557f22593ea0027ee3003a5
|
[
"MIT"
] | null | null | null |
src/data_loader/loader.py
|
zsscode/MGP-AttTCN
|
7659af8c69204a3ad557f22593ea0027ee3003a5
|
[
"MIT"
] | null | null | null |
src/data_loader/loader.py
|
zsscode/MGP-AttTCN
|
7659af8c69204a3ad557f22593ea0027ee3003a5
|
[
"MIT"
] | 1
|
2020-09-01T12:17:36.000Z
|
2020-09-01T12:17:36.000Z
|
import os
import pickle
import sys
import tensorflow as tf
# appending head path
cwd = os.path.dirname(os.path.abspath(__file__))
head = os.path.abspath(os.path.join(cwd, os.pardir, os.pardir))
sys.path.append(head)
from src.utils.debug import t_print
from src.data_loader.utils import reduce_data, new_indices, pad_raw_data, all_horizons, separating_and_resampling
class DataGenerator:
def __init__(self,
no_mc_samples=10,
max_no_dtpts=None,
min_no_dtpts=None,
batch_size=10,
fast_load=False,
to_save=False,
debug=False,
fixed_idx_per_class=False,
features=None):
t_print("DataGenerator -- init")
cwd = os.path.dirname(os.path.abspath(__file__))
self.head = os.path.abspath(os.path.join(cwd, os.pardir, os.pardir))
self.no_mc_samples = no_mc_samples
self.max_no_dtpts = max_no_dtpts
self.min_no_dtpts = min_no_dtpts
self.debug = debug
"""
Data loader for MIMIC III data preprocessed according to
"""
if fast_load:
self.fast_load(features)
else:
self.long_load(to_save, features)
# data = [Y, T, ind_K_D, ind_T, len_T, X, len_X, labels, static, classes, ids, ind_Y]
# data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
if debug == False:
# remove IDs & debugging cat
self.train_data = self.train_data[:-2]
self.val_data = self.val_data[:-2]
self.test_data = self.test_data[:-2]
# data = [Y, T, ind_K_D, ind_T, len_T, X, len_X, labels, static, classes]
# data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# separating two prediction classes
self.train_case_data, self.train_control_data = separating_and_resampling(self.train_data)
self.len_data = len(self.train_case_data)
self.train_case_idx = np.arange(len(self.train_case_data[-1]))
self.train_control_idx = np.arange(len(self.train_control_data[-1]))
self.val_idx = np.arange(len(self.val_data[-1]))
# creating a small dev set
if fixed_idx_per_class:
self.idx_per_class = np.asarray(
[[343, 3476, 4378, 1297, 2695, 1498, 1119, 2788, 5468, 5217, 3505,
5441, 3895, 4177, 5678, 1108, 5739, 1510, 7, 5055],
[5311, 2932, 2091, 6683, 568, 6851, 6273, 2796, 4336, 5342, 3150,
1835, 7040, 7106, 3495, 2538, 6053, 2949, 64, 2382],
[1976, 2652, 4208, 1472, 3718, 4287, 3972, 2683, 1112, 2083, 3960,
5617, 403, 6244, 4370, 886, 3416, 5687, 5226, 6358],
[2597, 1086, 6930, 286, 2492, 3794, 21, 1794, 4680, 4477, 6460,
6293, 4636, 4788, 5134, 6544, 7139, 2516, 2617, 351],
[2812, 1503, 1677, 6553, 6333, 7023, 4310, 5546, 7054, 4522, 4473,
1218, 422, 242, 6286, 944, 109, 4896, 3611, 4737],
[4837, 3445, 4256, 465, 2720, 7117, 2665, 4109, 590, 5680, 2672,
6070, 5697, 3772, 4219, 1298, 6515, 2965, 1788, 3352],
[5496, 1159, 3029, 4189, 848, 4778, 2966, 4159, 2101, 6102, 4191,
7135, 349, 7003, 483, 4068, 4420, 2885, 2103, 2460]]
)
else:
self.idx_per_class = np.zeros((7, batch_size * 2), dtype=np.int32)
for k in range(7):
self.idx_per_class[k] = np.random.choice(np.where(self.val_data[9] == k)[0],
min(batch_size * 2, len(np.where(self.val_data[9] == k)[0])),
replace=False, p=None)
# list of patients present at horizon 6
# train
self.late_case_patients = list(self.train_case_data[10][self.train_case_data[9] == 6])
self.late_control_patients = list(self.train_control_data[10][self.train_control_data[9] == 6])
self.later_case_patients = list(self.train_case_data[10][self.train_case_data[9] == 6])
# val
self.late_val_patients = list(self.val_data[10][self.val_data[9] == 6])
late_val_pat_id = [self.val_data[10][i] in self.late_val_patients for i in range(len(self.val_data[9]))]
self.late_val_pat_id = np.where(late_val_pat_id)[0]
self.horizon0_val_patients = np.where(late_val_pat_id & (self.val_data[9] == 0))[0]
def apply_reshuffle(self):
"""
Function linked to training class: the dataset is reshuffled at the beginning of each epoch
the training class reshuffles the indices, then calls 'apply_reshuffle' to reshuffle the dataset itself
"""
self.train_case_data = [self.train_case_data[i][self.train_case_idx] for i in range(self.len_data)]
self.train_control_data = [self.train_control_data[i][self.train_control_idx] for i in range(self.len_data)]
late_case_pat_id = [self.train_case_data[10][i] in self.late_case_patients
for i in range(len(self.train_case_data[9]))]
late_control_pat_id = [self.train_control_data[10][i] in self.late_control_patients
for i in range(len(self.train_control_data[9]))]
self.late_case_pat_id = np.where(late_case_pat_id)[0]
self.late_control_pat_id = np.where(late_control_pat_id)[0]
self.horizon0_case_patients = np.where(late_case_pat_id & (self.train_case_data[9] == 0))[0]
self.horizon0_control_patients = np.where(late_control_pat_id & (self.train_control_data[9] == 0))[0]
def next_batch(self, batch_size, batch, loss='uni', alignment=-1, time_window=25, late=False, horizon0=False):
# first: create new dataset
if late:
data = [np.concatenate((self.train_case_data[i][self.late_case_pat_id[batch * batch_size:
(batch + 1) * batch_size]],
self.train_control_data[i][self.late_control_pat_id[batch * batch_size:
(batch + 1) * batch_size]]))
for i in range(self.len_data)]
elif horizon0:
data = [np.concatenate((self.train_case_data[i][self.horizon0_case_patients[batch * batch_size:
(batch + 1) * batch_size]],
self.train_control_data[i][self.horizon0_control_patients[batch * batch_size:
(
batch + 1) * batch_size]]))
for i in range(self.len_data)]
else:
data = [np.concatenate((self.train_case_data[i][batch * batch_size: (batch + 1) * batch_size],
self.train_control_data[i][batch * batch_size: (batch + 1) * batch_size]))
for i in range(self.len_data)]
# then reshuffle it
idx = np.random.choice(np.arange(len(data[4])), len(data[4]), replace=False)
data = [data[i][idx] for i in range(self.len_data)]
output = self.extract_data(data)
if loss == 'uni':
yield output
else:
output[7] = self.expand_labels(output[7], alignment=alignment, time_window=time_window)
yield output
def next_batch_dev_small(self, batch):
data = [self.val_data[i][self.idx_per_class[batch]] for i in range(len(self.val_data))]
yield self.extract_data(data)
def next_batch_dev_all(self, batch_size, batch, late=False, horizon0=False):
if late:
data = [self.val_data[i][self.late_val_pat_id[batch * batch_size: (batch + 1) * batch_size]]
for i in range(len(self.val_data))]
"""
elif horizon0:
data = [self.val_data[i][self.horizon0_val_patients[batch * batch_size: (batch + 1) * batch_size]]
for i in range(len(self.val_data))]
"""
else:
data = [self.val_data[i][batch * batch_size: (batch + 1) * batch_size] for i in range(len(self.val_data))]
yield self.extract_data(data)
def next_batch_test_all(self, batch_size, batch, late=False, horizon0=False):
data = [self.test_data[i][batch * batch_size: (batch + 1) * batch_size] for i in range(len(self.test_data))]
yield self.extract_data(data)
def extract_data(self, data):
# data = [Y, T, ind_K_D, ind_T, num_distinct_Y, X, num_distinct_X, labels, static, classes]
# data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# second: extract
# list of datapoints collected per patient
Y = data[0]
Y = tf.convert_to_tensor(Y, dtype=tf.float32, name='Y')
# list of corresponding timestamps
T = data[1]
T = tf.convert_to_tensor(T, dtype=tf.float32, name="T")
# indices of feature corresponding to each datapoint
ind_K_D = data[2]
ind_K_D = tf.convert_to_tensor(ind_K_D, dtype=tf.int32, name="ind_K_D")
ind_T = data[3]
ind_T = tf.convert_to_tensor(ind_T, dtype=tf.int32, name="ind_T")
# output to be predicted
labels = data[7]
# list of target timestamps to interpolate
X = data[5]
X = tf.convert_to_tensor(X, dtype=tf.float32, name="X")
# counts
num_distinct_X = data[6]
num_distinct_X = tf.convert_to_tensor(num_distinct_X, dtype=tf.int32, name="num_distinct_X")
num_distinct_Y = data[4]
num_distinct_Y = tf.convert_to_tensor(num_distinct_Y, dtype=tf.int32, name="num_distinct_Y")
# static data
static = data[8]
static = tf.convert_to_tensor(static, dtype=tf.float32, name="static")
# classes
classes = data[9]
# repeat for all MC samples
classes = np.repeat(classes, self.no_mc_samples)
labels = np.repeat(labels, self.no_mc_samples)
labels = tf.convert_to_tensor(labels, dtype=tf.int32, name="labels")
if self.debug:
return Y, T, ind_K_D, ind_T, num_distinct_Y, X, num_distinct_X, static, labels, classes, data[10]
else:
return Y, T, ind_K_D, ind_T, num_distinct_Y, X, num_distinct_X, static, labels, classes
def expand_labels(self, y, alignment=-1, time_window=25):
y_broad = tf.expand_dims(tf.broadcast_to(tf.expand_dims(y, -1), [y.shape[0], 12 - (alignment + 1)]), -1)
labelled_time = tf.concat([y_broad, 1 - y_broad], -1)
early_time = tf.concat([tf.zeros([y.shape[0], time_window - labelled_time.shape[1], 1], dtype=tf.int32),
tf.ones([y.shape[0], time_window - labelled_time.shape[1], 1], dtype=tf.int32)], -1)
return tf.concat([early_time, labelled_time], 1)
def fast_load(self, features):
try:
All = {}
if features is None:
date = "19-08-12"
else:
date = '19-08-30-{}'.format(features)
for split in ["train", "val", "test"]:
path = head + "/data/{}/{}-prep-data-min{}-max{}.pkl".format(split,
date,
self.min_no_dtpts,
self.max_no_dtpts)
with open(path, "rb") as f:
All[split] = pickle.load(f)
self.train_data = All["train"]
self.val_data = All["val"]
self.test_data = All["test"]
except:
self.long_load(True, features=features)
def long_load(self, to_save, features):
t_print("DataGenerator -- loading data")
if features is None:
path = self.head + "/data/train/GP_prep_v2.pkl"
with open(path, "rb") as f:
self.train_data = pickle.load(f)
path = self.head + "/data/val/GP_prep_v2.pkl"
with open(path, "rb") as f:
self.val_data = pickle.load(f)
path = self.head + "/data/test/GP_prep_v2.pkl"
with open(path, "rb") as f:
self.test_data = pickle.load(f)
else:
path = self.head + "/data/train/GP_prep_{}_v2.pkl".format(features)
with open(path, "rb") as f:
self.train_data = pickle.load(f)
path = self.head + "/data/val/GP_prep_{}_v2.pkl".format(features)
with open(path, "rb") as f:
self.val_data = pickle.load(f)
path = self.head + "/data/test/GP_prep_{}_v2.pkl".format(features)
with open(path, "rb") as f:
self.test_data = pickle.load(f)
# shorten TS too long
self.train_data, no = reduce_data(self.train_data, n_max=self.max_no_dtpts)
self.val_data, no = reduce_data(self.val_data, n_max=self.max_no_dtpts)
self.test_data, no = reduce_data(self.test_data, n_max=self.max_no_dtpts)
# pad data to have same shape
self.train_data = pad_raw_data(self.train_data)
self.val_data = pad_raw_data(self.val_data)
self.test_data = pad_raw_data(self.test_data)
# augment data to cater for all prediction horizons
self.train_data = all_horizons(self.train_data)
self.val_data = all_horizons(self.val_data)
self.test_data = all_horizons(self.test_data)
# remove TS too short
temp = []
self.train_data, no = reduce_data(self.train_data, n_min=self.min_no_dtpts)
temp.append(no)
self.val_data, no = reduce_data(self.val_data, n_min=self.min_no_dtpts)
temp.append(no)
self.test_data, no = reduce_data(self.test_data, n_min=self.min_no_dtpts)
temp.append(no)
t_print("""Removed patients out of the bound {4} < no_datapoints < {0}.
Train removed: {1} Train remaining: {5}
Val removed: {2} Val remaining: {6}
Test removed: {3} Test remaining: {7}""".format(self.max_no_dtpts, temp[0], temp[1], temp[2],
self.min_no_dtpts,
len(self.train_data[4]),
len(self.val_data[4]),
len(self.test_data[4])))
del temp
# extract new indices
self.train_data = new_indices(self.train_data)
self.val_data = new_indices(self.val_data)
self.test_data = new_indices(self.test_data)
# new data format
# data = [Y, T, ind_K_D, ind_T, len_T, X, len_X, labels, static, classes, ids, ind_Y]
# data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
if to_save:
All = {"train": self.train_data,
"val": self.val_data,
"test": self.test_data}
if features is None:
date = "19-08-12"
else:
date = '19-08-30-{}'.format(features)
for split in ["train", "val", "test"]:
path = head + "/data/{}/{}-prep-data-min{}-max{}.pkl".format(split,
date,
self.min_no_dtpts,
self.max_no_dtpts)
with open(path, "wb") as f:
pickle.dump(All[split], f)
| 53.016393
| 133
| 0.54026
|
a2ccb17ecf6e2e53b98e52198074a94a115ae93f
| 633
|
py
|
Python
|
contrib/qt_translations.py
|
zero24x/billiecoin
|
1b943b84aa687136edeb6c1fa258705a99157463
|
[
"MIT"
] | 3
|
2020-02-06T11:26:43.000Z
|
2020-03-29T16:16:30.000Z
|
contrib/qt_translations.py
|
zero24x/billiecoin
|
1b943b84aa687136edeb6c1fa258705a99157463
|
[
"MIT"
] | null | null | null |
contrib/qt_translations.py
|
zero24x/billiecoin
|
1b943b84aa687136edeb6c1fa258705a99157463
|
[
"MIT"
] | 3
|
2020-01-30T20:11:16.000Z
|
2021-08-09T05:59:05.000Z
|
#!/usr/bin/env python
# Helpful little script that spits out a comma-separated list of
# language codes for Qt icons that should be included
# in binary Billiecoin Core distributions
import glob
import os
import re
import sys
if len(sys.argv) != 3:
sys.exit("Usage: %s $QTDIR/translations $BILLIECOINDIR/src/qt/locale"%sys.argv[0])
d1 = sys.argv[1]
d2 = sys.argv[2]
l1 = set([ re.search(r'qt_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d1, 'qt_*.qm')) ])
l2 = set([ re.search(r'billiecoin_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d2, 'billiecoin_*.qm')) ])
print ",".join(sorted(l1.intersection(l2)))
| 27.521739
| 114
| 0.688784
|
c14f5244df62e3be421bf19941eea7e7fbb8fb3e
| 1,786
|
py
|
Python
|
molecool/visualize.py
|
molssi-workshops/molecool
|
e875876e8333b1ae5f97c2f8907836b861264e50
|
[
"BSD-3-Clause"
] | null | null | null |
molecool/visualize.py
|
molssi-workshops/molecool
|
e875876e8333b1ae5f97c2f8907836b861264e50
|
[
"BSD-3-Clause"
] | 1
|
2021-09-17T18:19:04.000Z
|
2021-09-17T18:19:04.000Z
|
molecool/visualize.py
|
molssi-workshops/molecool
|
e875876e8333b1ae5f97c2f8907836b861264e50
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Functions for visualization of molecules.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from .atom_data import atom_colors
def draw_molecule(coordinates, symbols, draw_bonds=None, save_location=None, dpi=300):
# Draw a picture of a molecule using matplotlib.
# Create figure
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Get colors - based on atom name
colors = []
for atom in symbols:
colors.append(atom_colors[atom])
size = np.array(plt.rcParams['lines.markersize'] ** 2)*200/(len(coordinates))
ax.scatter(coordinates[:,0], coordinates[:,1], coordinates[:,2], marker="o",
edgecolors='k', facecolors=colors, alpha=1, s=size)
# Draw bonds
if draw_bonds:
for atoms, bond_length in draw_bonds.items():
atom1 = atoms[0]
atom2 = atoms[1]
ax.plot(coordinates[[atom1,atom2], 0], coordinates[[atom1,atom2], 1],
coordinates[[atom1,atom2], 2], color='k')
# Save figure
if save_location:
plt.savefig(save_location, dpi=dpi, graph_min=0, graph_max=2)
return ax
def bond_histogram(bond_list, save_location=None, dpi=300, graph_min=0, graph_max=2):
# Draw a histogram of bond lengths based on a bond_list (output from build_bond_list function)
lengths = []
for atoms, bond_length in bond_list.items():
lengths.append(bond_length)
bins = np.linspace(graph_min, graph_max)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlabel('Bond Length (angstrom)')
plt.ylabel('Number of Bonds')
ax.hist(lengths, bins=bins)
# Save figure
if save_location:
plt.savefig(save_location, dpi=dpi)
return ax
| 25.514286
| 98
| 0.655655
|
4c56bb1eccef5f9d87378c531608b9d2152a7365
| 2,828
|
py
|
Python
|
Algorithms_medium/1429. First Unique Number.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | 4
|
2020-08-11T20:45:15.000Z
|
2021-03-12T00:33:34.000Z
|
Algorithms_medium/1429. First Unique Number.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | null | null | null |
Algorithms_medium/1429. First Unique Number.py
|
VinceW0/Leetcode_Python_solutions
|
09e9720afce21632372431606ebec4129eb79734
|
[
"Xnet",
"X11"
] | null | null | null |
"""
1429. First Unique Number
Medium
You have a queue of integers, you need to retrieve the first unique integer in the queue.
Implement the FirstUnique class:
FirstUnique(int[] nums) Initializes the object with the numbers in the queue.
int showFirstUnique() returns the value of the first unique integer of the queue, and returns -1 if there is no such integer.
void add(int value) insert value to the queue.
Example 1:
Input:
["FirstUnique","showFirstUnique","add","showFirstUnique","add","showFirstUnique","add","showFirstUnique"]
[[[2,3,5]],[],[5],[],[2],[],[3],[]]
Output:
[null,2,null,2,null,3,null,-1]
Explanation:
FirstUnique firstUnique = new FirstUnique([2,3,5]);
firstUnique.showFirstUnique(); // return 2
firstUnique.add(5); // the queue is now [2,3,5,5]
firstUnique.showFirstUnique(); // return 2
firstUnique.add(2); // the queue is now [2,3,5,5,2]
firstUnique.showFirstUnique(); // return 3
firstUnique.add(3); // the queue is now [2,3,5,5,2,3]
firstUnique.showFirstUnique(); // return -1
Example 2:
Input:
["FirstUnique","showFirstUnique","add","add","add","add","add","showFirstUnique"]
[[[7,7,7,7,7,7]],[],[7],[3],[3],[7],[17],[]]
Output:
[null,-1,null,null,null,null,null,17]
Explanation:
FirstUnique firstUnique = new FirstUnique([7,7,7,7,7,7]);
firstUnique.showFirstUnique(); // return -1
firstUnique.add(7); // the queue is now [7,7,7,7,7,7,7]
firstUnique.add(3); // the queue is now [7,7,7,7,7,7,7,3]
firstUnique.add(3); // the queue is now [7,7,7,7,7,7,7,3,3]
firstUnique.add(7); // the queue is now [7,7,7,7,7,7,7,3,3,7]
firstUnique.add(17); // the queue is now [7,7,7,7,7,7,7,3,3,7,17]
firstUnique.showFirstUnique(); // return 17
Example 3:
Input:
["FirstUnique","showFirstUnique","add","showFirstUnique"]
[[[809]],[],[809],[]]
Output:
[null,809,null,-1]
Explanation:
FirstUnique firstUnique = new FirstUnique([809]);
firstUnique.showFirstUnique(); // return 809
firstUnique.add(809); // the queue is now [809,809]
firstUnique.showFirstUnique(); // return -1
Constraints:
1 <= nums.length <= 10^5
1 <= nums[i] <= 10^8
1 <= value <= 10^8
At most 50000 calls will be made to showFirstUnique and add.
"""
class FirstUnique:
def __init__(self, nums: List[int]):
self.unique = {}
self.total = set()
for n in nums:
self.add(n)
def showFirstUnique(self) -> int:
return next(iter(self.unique), -1)
def add(self, value: int) -> None:
if value in self.total:
self.unique.pop(value, 1)
else:
self.total.add(value)
self.unique[value] = 1
# Your FirstUnique object will be instantiated and called as such:
# obj = FirstUnique(nums)
# param_1 = obj.showFirstUnique()
# obj.add(value)
| 31.076923
| 125
| 0.641089
|
f5a35cfc9b81c0c6a6c195f2fbc4735513435ff4
| 1,033
|
py
|
Python
|
profiling/bspline_point_calculation.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | 515
|
2017-01-25T05:46:52.000Z
|
2022-03-29T09:52:27.000Z
|
profiling/bspline_point_calculation.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | 417
|
2017-01-25T10:01:17.000Z
|
2022-03-29T09:22:04.000Z
|
profiling/bspline_point_calculation.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | 149
|
2017-02-01T15:52:02.000Z
|
2022-03-17T10:33:38.000Z
|
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
from typing import Iterable
import time
import ezdxf
from pathlib import Path
import math
from ezdxf.math import global_bspline_interpolation, linspace
from ezdxf.render import random_3d_path
DIR = Path("~/Desktop/Outbox").expanduser()
path = list(random_3d_path(100, max_step_size=10, max_heading=math.pi * 0.8))
spline = global_bspline_interpolation(path)
def profile_bspline_point_new(count, spline):
for _ in range(count):
for t in linspace(0, 1.0, 100):
spline.point(t)
def profile_bspline_derivatives_new(count, spline):
for _ in range(count):
list(spline.derivatives(t=linspace(0, 1.0, 100)))
def profile(text, func, *args):
t0 = time.perf_counter()
func(*args)
t1 = time.perf_counter()
print(f"{text} {t1 - t0:.3f}s")
profile("B-spline point new 300x: ", profile_bspline_point_new, 300, spline)
profile(
"B-spline derivatives new 300x: ",
profile_bspline_derivatives_new,
300,
spline,
)
| 24.595238
| 77
| 0.712488
|
6ca1963479ff1683b88dfd03c218e1a40b956b67
| 173
|
py
|
Python
|
pacote-download/CursoemVideo/ex066.py
|
pemedeiros/python-CeV
|
e34eebdfd6f5cf254a9ad1ce076083c735f68f28
|
[
"MIT"
] | null | null | null |
pacote-download/CursoemVideo/ex066.py
|
pemedeiros/python-CeV
|
e34eebdfd6f5cf254a9ad1ce076083c735f68f28
|
[
"MIT"
] | null | null | null |
pacote-download/CursoemVideo/ex066.py
|
pemedeiros/python-CeV
|
e34eebdfd6f5cf254a9ad1ce076083c735f68f28
|
[
"MIT"
] | null | null | null |
c = s = 0
while True:
n = int(input('Digite um número(999 para parar): '))
if n == 999:
break
c += 1
s += n
print(f'A soma dos {c} números foi {s}')
| 19.222222
| 56
| 0.508671
|
c723fb4c711526c86b4199ba294c9f333a4f5b74
| 930
|
py
|
Python
|
view_breadcrumbs/generic/delete.py
|
sveetch/django-view-breadcrumbs
|
95943340b6cf5ffa98b73aa8fa553d96cd57c6fe
|
[
"BSD-3-Clause"
] | 29
|
2020-10-17T05:28:52.000Z
|
2022-03-10T21:14:06.000Z
|
view_breadcrumbs/generic/delete.py
|
sveetch/django-view-breadcrumbs
|
95943340b6cf5ffa98b73aa8fa553d96cd57c6fe
|
[
"BSD-3-Clause"
] | 225
|
2020-08-17T13:21:41.000Z
|
2022-03-31T11:58:50.000Z
|
view_breadcrumbs/generic/delete.py
|
sveetch/django-view-breadcrumbs
|
95943340b6cf5ffa98b73aa8fa553d96cd57c6fe
|
[
"BSD-3-Clause"
] | 5
|
2021-04-24T21:30:21.000Z
|
2021-11-01T20:28:19.000Z
|
from django.urls import reverse
from ..utils import action_view_name, classproperty
from .list import ListBreadcrumbMixin
class DeleteBreadcrumbMixin(ListBreadcrumbMixin):
@classproperty
def delete_view_name(self):
return action_view_name(
model=self.model,
action=self.delete_view_suffix,
app_name=self.app_name,
full=False,
)
@property
def __delete_view_name(self):
return action_view_name(
model=self.model, action=self.detail_view_suffix, app_name=self.app_name
)
def delete_view_url(self, instance):
if self.breadcrumb_use_pk:
return reverse(
self.__delete_view_name, kwargs={self.pk_url_kwarg: instance.pk}
)
return reverse(
self.__delete_view_name,
kwargs={self.slug_url_kwarg: getattr(instance, self.slug_field)},
)
| 28.181818
| 84
| 0.648387
|
38719afcc01b477ec643c795a4d82dada574aa33
| 2,156
|
py
|
Python
|
mak/libs/pyxx/cxx/grammar/template/name.py
|
bugengine/BugEngine
|
1b3831d494ee06b0bd74a8227c939dd774b91226
|
[
"BSD-3-Clause"
] | 4
|
2015-05-13T16:28:36.000Z
|
2017-05-24T15:34:14.000Z
|
mak/libs/pyxx/cxx/grammar/template/name.py
|
bugengine/BugEngine
|
1b3831d494ee06b0bd74a8227c939dd774b91226
|
[
"BSD-3-Clause"
] | null | null | null |
mak/libs/pyxx/cxx/grammar/template/name.py
|
bugengine/BugEngine
|
1b3831d494ee06b0bd74a8227c939dd774b91226
|
[
"BSD-3-Clause"
] | 1
|
2017-03-21T08:28:07.000Z
|
2017-03-21T08:28:07.000Z
|
"""
simple-template-id:
template-name < template-argument-list? >
template-id:
simple-template-id
operator-function-id < template-argument-list? >
literal-operator-id < template-argument-list? >
template-name:
identifier
template-argument-list:
template-argument ...?
template-argument-list , template-argument ...?
template-argument:
constant-expression
type-id
id-expression
typename-specifier:
typename nested-name-specifier identifier
typename nested-name-specifier template? simple-template-id
"""
import glrp
from ...parser import cxx98
from be_typing import TYPE_CHECKING
@glrp.rule('simple-template-id : template-name [split]"<" template-argument-list? ">"')
@cxx98
def simple_template_id(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('template-id[split] : [split]simple-template-id')
@glrp.rule('template-id : operator-function-id [split]"<" template-argument-list? ">"')
@glrp.rule('template-id : literal-operator-id [split]"<" template-argument-list? ">"')
@cxx98
def template_id(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('template-name[split] : [split]"identifier"')
@cxx98
def template_name(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('template-argument-list : template-argument "..."?')
@glrp.rule('template-argument-list : template-argument-list "," template-argument "..."?')
@cxx98
def template_argument_list(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('template-argument : constant-expression')
@glrp.rule('template-argument : type-id')
@glrp.rule('template-argument[split] : id-expression')
@cxx98
def template_argument(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
@glrp.rule('typename-specifier : [split]typename nested-name-specifier "identifier"')
@glrp.rule('typename-specifier : [split]typename nested-name-specifier "template"? simple-template-id')
@cxx98
def typename_specifier(self, p):
# type: (CxxParser, glrp.Production) -> None
pass
if TYPE_CHECKING:
from ...parser import CxxParser
| 26.617284
| 103
| 0.707328
|
b5ff7d8bc8ffb893045f23875c17c07ae96def75
| 1,038
|
py
|
Python
|
tripleoclient/tests/v1/overcloud_ffwd_upgrade/fakes.py
|
cloudnull/python-tripleoclient
|
93952566d6615deb2c81467df7743d872ff77e8d
|
[
"Apache-2.0"
] | null | null | null |
tripleoclient/tests/v1/overcloud_ffwd_upgrade/fakes.py
|
cloudnull/python-tripleoclient
|
93952566d6615deb2c81467df7743d872ff77e8d
|
[
"Apache-2.0"
] | null | null | null |
tripleoclient/tests/v1/overcloud_ffwd_upgrade/fakes.py
|
cloudnull/python-tripleoclient
|
93952566d6615deb2c81467df7743d872ff77e8d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from tripleoclient.tests import fakes
class TestFFWDUpgradePrepare(fakes.FakePlaybookExecution):
def setUp(self):
super(TestFFWDUpgradePrepare, self).setUp()
class TestFFWDUpgradeRun(fakes.FakePlaybookExecution):
def setUp(self):
super(TestFFWDUpgradeRun, self).setUp()
class TestFFWDUpgradeConverge(fakes.FakePlaybookExecution):
def setUp(self):
super(TestFFWDUpgradeConverge, self).setUp()
| 29.657143
| 77
| 0.739884
|
795eb767d48958deae999ffe011691cd57517ba6
| 1,451
|
py
|
Python
|
Codementor.io/GarethDwyer/apps/flask-crud-app/bookmanager.py
|
nitin-cherian/Webapps
|
fbfbef6cb22fc742ee66460268afe6ff7834faa1
|
[
"MIT"
] | 1
|
2017-11-22T08:56:06.000Z
|
2017-11-22T08:56:06.000Z
|
Codementor.io/GarethDwyer/apps/flask-crud-app/bookmanager.py
|
nitin-cherian/Webapps
|
fbfbef6cb22fc742ee66460268afe6ff7834faa1
|
[
"MIT"
] | null | null | null |
Codementor.io/GarethDwyer/apps/flask-crud-app/bookmanager.py
|
nitin-cherian/Webapps
|
fbfbef6cb22fc742ee66460268afe6ff7834faa1
|
[
"MIT"
] | null | null | null |
# bookmanager.py
import os
from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
project_dir = os.path.dirname(os.path.abspath(__file__))
database_file = "sqlite:///{}".format(os.path.join(project_dir, "bookdatabase.db"))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = database_file
db = SQLAlchemy(app)
class Book(db.Model):
title = db.Column(db.String(80), unique=True, nullable=False, primary_key=True)
def __repr__(self):
return "<Title>: {}".format(self.title)
@app.route("/", methods=['GET', 'POST'])
def home():
added = False
if request.form:
title = request.form.get('title')
book = Book(title=title)
db.session.add(book)
db.session.commit()
added = True
books = Book.query.all()
return render_template('home.html', books=books, added=added)
@app.route("/update", methods=['POST'])
def update():
new_title = request.form.get('new-title')
old_title = request.form.get('old-title')
book = Book.query.filter_by(title=old_title).first()
book.title = new_title
db.session.commit()
return redirect('/')
@app.route("/delete", methods=['POST'])
def delete():
title = request.form.get('title')
book = Book.query.filter_by(title=title).first()
db.session.delete(book)
db.session.commit()
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
| 25.017241
| 83
| 0.665748
|
36f1d988ebf12f6af33a2f9420d4202e3693dc32
| 342
|
py
|
Python
|
pydash/Dash/Properties/Components.py
|
ensomniac/dash
|
5a5cabd1a1d057015dd1446b6b1000af1e521355
|
[
"MIT"
] | null | null | null |
pydash/Dash/Properties/Components.py
|
ensomniac/dash
|
5a5cabd1a1d057015dd1446b6b1000af1e521355
|
[
"MIT"
] | null | null | null |
pydash/Dash/Properties/Components.py
|
ensomniac/dash
|
5a5cabd1a1d057015dd1446b6b1000af1e521355
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# 2022 Ryan Martin, ryan@ensomniac.com
# Andrew Stet, stetandrew@gmail.com
import os
import sys
from Dash.Properties.Configuration import Configuration
class Components(Configuration):
def __init__(self, dash_context_asset_path):
Configuration.__init__(self, dash_context_asset_path, "configuration")
| 22.8
| 78
| 0.77193
|
39fa63cbbe0404392afbf01b3659827d53536475
| 2,897
|
py
|
Python
|
file/fetch_data_tse_party_members.py
|
gabrielacaesar/partei-brasilien
|
356a139b495949b00afaa70f78e15631f8dbadbd
|
[
"MIT"
] | null | null | null |
file/fetch_data_tse_party_members.py
|
gabrielacaesar/partei-brasilien
|
356a139b495949b00afaa70f78e15631f8dbadbd
|
[
"MIT"
] | null | null | null |
file/fetch_data_tse_party_members.py
|
gabrielacaesar/partei-brasilien
|
356a139b495949b00afaa70f78e15631f8dbadbd
|
[
"MIT"
] | null | null | null |
""""
This script downloads and format some data from TSE website.
The first objective with this data is to obtain a list of members of parties in Brazil.
In july 2017, the data available in TSE website contained information about membership and disfellowship in brazilian parties of each state.
The data is available in csv format. On TSE's website, you have to filter choosing party and state.
The csv files from TSE contain headers. All the csv files present the same header, which we have translated below, so more people can access and reuse the code of Serenata Project.
with @HugoLnx and @jtemporal
"""
import pandas as pd
import numpy as np
import os
import urllib
import zipfile
import glob
import codecs
from tempfile import mkdtemp
TEMP_PATH = "./temp"
if not os.path.exists(TEMP_PATH):
os.makedirs(TEMP_PATH)
FILENAME_PREFIX = 'filiados_{}_{}.zip'
TSE_PARTYMEMBERS_STATE_URL = 'http://agencia.tse.jus.br/estatistica/sead/eleitorado/filiados/uf/'
TODAY = pd.datetime.today().date()
OUTPUT_FILENAME = TODAY.isoformat() + '-tse-partymembers.xz'
OUTPUT_DATASET_PATH = os.path.join('data', OUTPUT_FILENAME)
# the array with parties has considered all mentioned on TSE's website until 21/07/2017
party_list = ["DEM", "NOVO", "PEN", "PC_DO_B", "PCB", "PCO", "PDT", "PHS", "PMDB", "PMB", "PMN", "PP",
"PPL", "PPS", "PR", "PRB", "PROS", "PRP", "PRTB", "PSB", "PSC", "PSD", "PSDB", "PSDC", "PSL",
"PSOL", "PSTU", "PT", "PT_DO_B", "PTB", "PTC", "PTN", "PV", "REDE", "SD"]
#state_list = ["RS", "SC", "PR", "RJ", "SP", "ES", "MG", "GO", "DF", "TO", "MS", "MT", "AM", "AC",
# "RO", "RR", "PA", "AP", "MA", "AL", "PI", "RN", "PE", "CE", "SE", "BA", "PB"]
#party_list = ["DEM", "NOVO"]
state_list = ["RS"]
# Download files
for party in party_list:
for state in state_list:
filename = FILENAME_PREFIX.format(party.lower(), state.lower())
file_url = TSE_PARTYMEMBERS_STATE_URL + filename
print(file_url)
output_file = os.path.join(TEMP_PATH, filename)
print(output_file)
urllib.request.urlretrieve(file_url, output_file)
# Unzip downloaded files
for party in party_list:
for state in state_list:
filename = FILENAME_PREFIX.format(party.lower(), state.lower())
file_path = os.path.join(TEMP_PATH, filename)
print(file_path)
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall(TEMP_PATH)
zip_ref.close()
csv_pattern = os.path.join(TEMP_PATH, "aplic/sead/lista_filiados/uf/filiados_*.csv")
csv_files = glob.glob(csv_pattern)
f = codecs.open("./filiadosRS.csv", "w", "iso8859-1")
f2 = codecs.open(csv_files[0], "r", "iso8859-1")
f.write(f2.readlines()[0])
f2.close()
for csv_path in csv_files:
csv_file = codecs.open(csv_path, "r", "iso8859-1")
data = csv_file.readlines()[1:]
f.write("".join(data))
csv_file.close()
f.close()
| 35.329268
| 180
| 0.672075
|
53e49c5c4427d89ef236180fd2b3b978003aa968
| 1,395
|
py
|
Python
|
tests/groups/test_views.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2018-03-20T11:19:07.000Z
|
2021-10-05T07:53:11.000Z
|
tests/groups/test_views.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 802
|
2018-02-05T14:16:13.000Z
|
2022-02-10T10:59:21.000Z
|
tests/groups/test_views.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2019-01-22T13:19:37.000Z
|
2019-07-01T10:35:26.000Z
|
import pytest
from django.conf import settings
from django.urls import reverse
from groups.models import GroupInfo
@pytest.mark.django_db
def test_group_info_modal_only_available_to_authenticated_users(client):
response = client.get(reverse('group-info'))
assert response.status_code == 302
assert response.url.startswith(settings.LOGIN_URL)
@pytest.mark.django_db
def test_group_info_modal_shows_unlimited_visibility_groups_only(
admin_client, groups_with_info
):
url = reverse('group-info')
response = admin_client.get(url)
assert response.status_code == 200
# Visbility is set to 'unrestricted' for all groups in `groups_with_info`,
# so the same groups should be diplayed
expected_items = tuple(group.info for group in groups_with_info)
actual_items = tuple(response.context['queryset'])
assert actual_items == expected_items
# Check that expected groups are actually rendered
modal_html = response.json()['html']
for item in expected_items:
assert '<dt>' + item.name_singular + '</dt>' in modal_html
assert '<dd>' + item.role_match_description + '</dd>' in modal_html
# Change the visibility of groups and try again
GroupInfo.objects.all().update(
visibility=GroupInfo.VISIBILITY_MANAGERS_ONLY)
response = admin_client.get(url)
assert response.context['queryset'].exists() is False
| 34.875
| 78
| 0.74552
|
310bdc6709c2e41934f9b1c8dc4fa1af1c65912e
| 44,505
|
py
|
Python
|
python/ccxt/bitbay.py
|
caoshitong369/ccxt
|
e0f183448bbf8f95e84c71e5f185404dabab3955
|
[
"MIT"
] | 3
|
2020-06-02T10:48:48.000Z
|
2022-03-12T20:46:01.000Z
|
python/ccxt/bitbay.py
|
caoshitong369/ccxt
|
e0f183448bbf8f95e84c71e5f185404dabab3955
|
[
"MIT"
] | 3
|
2020-09-08T00:13:39.000Z
|
2021-05-08T20:05:48.000Z
|
python/ccxt/bitbay.py
|
caoshitong369/ccxt
|
e0f183448bbf8f95e84c71e5f185404dabab3955
|
[
"MIT"
] | 1
|
2020-03-16T03:22:17.000Z
|
2020-03-16T03:22:17.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import InvalidNonce
class bitbay(Exchange):
def describe(self):
return self.deep_extend(super(bitbay, self).describe(), {
'id': 'bitbay',
'name': 'BitBay',
'countries': ['MT', 'EU'], # Malta
'rateLimit': 1000,
'has': {
'CORS': True,
'withdraw': True,
'fetchMyTrades': True,
'fetchOpenOrders': True,
},
'urls': {
'referral': 'https://auth.bitbay.net/ref/jHlbB4mIkdS1',
'logo': 'https://user-images.githubusercontent.com/1294454/27766132-978a7bd8-5ece-11e7-9540-bc96d1e9bbb8.jpg',
'www': 'https://bitbay.net',
'api': {
'public': 'https://bitbay.net/API/Public',
'private': 'https://bitbay.net/API/Trading/tradingApi.php',
'v1_01Public': 'https://api.bitbay.net/rest',
'v1_01Private': 'https://api.bitbay.net/rest',
},
'doc': [
'https://bitbay.net/public-api',
'https://bitbay.net/en/private-api',
'https://bitbay.net/account/tab-api',
'https://github.com/BitBayNet/API',
'https://docs.bitbay.net/v1.0.1-en/reference',
],
'fees': 'https://bitbay.net/en/fees',
},
'api': {
'public': {
'get': [
'{id}/all',
'{id}/market',
'{id}/orderbook',
'{id}/ticker',
'{id}/trades',
],
},
'private': {
'post': [
'info',
'trade',
'cancel',
'orderbook',
'orders',
'transfer',
'withdraw',
'history',
'transactions',
],
},
'v1_01Public': {
'get': [
'trading/ticker',
'trading/ticker/{symbol}',
'trading/stats',
'trading/orderbook/{symbol}',
'trading/transactions/{symbol}',
'trading/candle/history/{symbol}/{resolution}',
],
},
'v1_01Private': {
'get': [
'payments/withdrawal/{detailId}',
'payments/deposit/{detailId}',
'trading/offer',
'trading/config/{symbol}',
'trading/history/transactions',
'balances/BITBAY/history',
'balances/BITBAY/balance',
'fiat_cantor/rate/{baseId}/{quoteId}',
'fiat_cantor/history',
],
'post': [
'trading/offer/{symbol}',
'trading/config/{symbol}',
'balances/BITBAY/balance',
'balances/BITBAY/balance/transfer/{source}/{destination}',
'fiat_cantor/exchange',
],
'delete': [
'trading/offer/{symbol}/{id}/{side}/{price}',
],
'put': [
'balances/BITBAY/balance/{id}',
],
},
},
'fees': {
'trading': {
'maker': 0.3 / 100,
'taker': 0.0043,
},
'funding': {
'withdraw': {
'BTC': 0.0009,
'LTC': 0.005,
'ETH': 0.00126,
'LSK': 0.2,
'BCH': 0.0006,
'GAME': 0.005,
'DASH': 0.001,
'BTG': 0.0008,
'PLN': 4,
'EUR': 1.5,
},
},
},
'exceptions': {
'400': ExchangeError, # At least one parameter wasn't set
'401': InvalidOrder, # Invalid order type
'402': InvalidOrder, # No orders with specified currencies
'403': InvalidOrder, # Invalid payment currency name
'404': InvalidOrder, # Error. Wrong transaction type
'405': InvalidOrder, # Order with self id doesn't exist
'406': InsufficientFunds, # No enough money or crypto
# code 407 not specified are not specified in their docs
'408': InvalidOrder, # Invalid currency name
'501': AuthenticationError, # Invalid public key
'502': AuthenticationError, # Invalid sign
'503': InvalidNonce, # Invalid moment parameter. Request time doesn't match current server time
'504': ExchangeError, # Invalid method
'505': AuthenticationError, # Key has no permission for self action
'506': AuthenticationError, # Account locked. Please contact with customer service
# codes 507 and 508 are not specified in their docs
'509': ExchangeError, # The BIC/SWIFT is required for self currency
'510': ExchangeError, # Invalid market name
'FUNDS_NOT_SUFFICIENT': InsufficientFunds,
'OFFER_FUNDS_NOT_EXCEEDING_MINIMUMS': InvalidOrder,
'OFFER_NOT_FOUND': OrderNotFound,
},
})
def fetch_markets(self, params={}):
response = self.v1_01PublicGetTradingTicker(params)
#
# {
# status: 'Ok',
# items: {
# 'BSV-USD': {
# market: {
# code: 'BSV-USD',
# first: {currency: 'BSV', minOffer: '0.00035', scale: 8},
# second: {currency: 'USD', minOffer: '5', scale: 2}
# },
# time: '1557569762154',
# highestBid: '52.31',
# lowestAsk: '62.99',
# rate: '63',
# previousRate: '51.21',
# },
# },
# }
#
result = []
items = self.safe_value(response, 'items')
keys = list(items.keys())
for i in range(0, len(keys)):
key = keys[i]
item = items[key]
market = self.safe_value(item, 'market', {})
first = self.safe_value(market, 'first', {})
second = self.safe_value(market, 'second', {})
baseId = self.safe_string(first, 'currency')
quoteId = self.safe_string(second, 'currency')
id = baseId + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(first, 'scale'),
'price': self.safe_integer(second, 'scale'),
}
# todo: check that the limits have ben interpreted correctly
# todo: parse the fees page
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'precision': precision,
'active': None,
'fee': None,
'limits': {
'amount': {
'min': self.safe_float(first, 'minOffer'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_float(second, 'minOffer'),
'max': None,
},
},
'info': item,
})
return result
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
response = self.v1_01PrivateGetTradingOffer(self.extend(request, params))
items = self.safe_value(response, 'items', [])
return self.parse_orders(items, None, since, limit, {'status': 'open'})
def parse_order(self, order, market=None):
#
# {
# market: 'ETH-EUR',
# offerType: 'Sell',
# id: '93d3657b-d616-11e9-9248-0242ac110005',
# currentAmount: '0.04',
# lockedAmount: '0.04',
# rate: '280',
# startAmount: '0.04',
# time: '1568372806924',
# postOnly: False,
# hidden: False,
# mode: 'limit',
# receivedAmount: '0.0',
# firstBalanceId: '5b816c3e-437c-4e43-9bef-47814ae7ebfc',
# secondBalanceId: 'ab43023b-4079-414c-b340-056e3430a3af'
# }
#
marketId = self.safe_string(order, 'market')
symbol = None
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if symbol is None:
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'time')
amount = self.safe_float(order, 'startAmount')
remaining = self.safe_float(order, 'currentAmount')
filled = None
if amount is not None:
if remaining is not None:
filled = max(0, amount - remaining)
return {
'id': self.safe_string(order, 'id'),
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': self.safe_string(order, 'mode'),
'side': self.safe_string_lower(order, 'offerType'),
'price': self.safe_float(order, 'rate'),
'amount': amount,
'cost': None,
'filled': filled,
'remaining': remaining,
'average': None,
'fee': None,
'trades': None,
}
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
if symbol:
markets = [self.market_id(symbol)]
request['markets'] = markets
query = {'query': self.json(self.extend(request, params))}
response = self.v1_01PrivateGetTradingHistoryTransactions(query)
#
# {
# status: 'Ok',
# totalRows: '67',
# items: [
# {
# id: 'b54659a0-51b5-42a0-80eb-2ac5357ccee2',
# market: 'BTC-EUR',
# time: '1541697096247',
# amount: '0.00003',
# rate: '4341.44',
# initializedBy: 'Sell',
# wasTaker: False,
# userAction: 'Buy',
# offerId: 'bd19804a-6f89-4a69-adb8-eb078900d006',
# commissionValue: null
# },
# ]
# }
#
items = self.safe_value(response, 'items')
result = self.parse_trades(items, None, since, limit)
if symbol is None:
return result
return self.filter_by_symbol(result, symbol)
def fetch_balance(self, params={}):
self.load_markets()
response = self.v1_01PrivateGetBalancesBITBAYBalance(params)
balances = self.safe_value(response, 'balances')
if balances is None:
raise ExchangeError(self.id + ' empty balance response ' + self.json(response))
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['used'] = self.safe_float(balance, 'lockedFunds')
account['free'] = self.safe_float(balance, 'availableFunds')
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'id': self.market_id(symbol),
}
orderbook = self.publicGetIdOrderbook(self.extend(request, params))
return self.parse_order_book(orderbook)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
request = {
'id': self.market_id(symbol),
}
ticker = self.publicGetIdTicker(self.extend(request, params))
timestamp = self.milliseconds()
baseVolume = self.safe_float(ticker, 'volume')
vwap = self.safe_float(ticker, 'vwap')
quoteVolume = None
if baseVolume is not None and vwap is not None:
quoteVolume = baseVolume * vwap
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'max'),
'low': self.safe_float(ticker, 'min'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_float(ticker, 'average'),
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
balanceCurrencies = []
if code is not None:
currency = self.currency(code)
balanceCurrencies.append(currency['id'])
request = {
'balanceCurrencies': balanceCurrencies,
}
if since is not None:
request['fromTime'] = since
if limit is not None:
request['limit'] = limit
request = self.extend(request, params)
response = self.v1_01PrivateGetBalancesBITBAYHistory({'query': self.json(request)})
items = response['items']
return self.parse_ledger(items, None, since, limit)
def parse_ledger_entry(self, item, currency=None):
#
# FUNDS_MIGRATION
# {
# "historyId": "84ea7a29-7da5-4de5-b0c0-871e83cad765",
# "balance": {
# "id": "821ec166-cb88-4521-916c-f4eb44db98df",
# "currency": "LTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "LTC"
# },
# "detailId": null,
# "time": 1506128252968,
# "type": "FUNDS_MIGRATION",
# "value": 0.0009957,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.0009957, "available": 0.0009957, "locked": 0},
# "change": {"total": 0.0009957, "available": 0.0009957, "locked": 0}
# }
#
# CREATE_BALANCE
# {
# "historyId": "d0fabd8d-9107-4b5e-b9a6-3cab8af70d49",
# "balance": {
# "id": "653ffcf2-3037-4ebe-8e13-d5ea1a01d60d",
# "currency": "BTG",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTG"
# },
# "detailId": null,
# "time": 1508895244751,
# "type": "CREATE_BALANCE",
# "value": 0,
# "fundsBefore": {"total": null, "available": null, "locked": null},
# "fundsAfter": {"total": 0, "available": 0, "locked": 0},
# "change": {"total": 0, "available": 0, "locked": 0}
# }
#
# BITCOIN_GOLD_FORK
# {
# "historyId": "2b4d52d3-611c-473d-b92c-8a8d87a24e41",
# "balance": {
# "id": "653ffcf2-3037-4ebe-8e13-d5ea1a01d60d",
# "currency": "BTG",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTG"
# },
# "detailId": null,
# "time": 1508895244778,
# "type": "BITCOIN_GOLD_FORK",
# "value": 0.00453512,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.00453512, "available": 0.00453512, "locked": 0},
# "change": {"total": 0.00453512, "available": 0.00453512, "locked": 0}
# }
#
# ADD_FUNDS
# {
# "historyId": "3158236d-dae5-4a5d-81af-c1fa4af340fb",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "8e83a960-e737-4380-b8bb-259d6e236faa",
# "time": 1520631178816,
# "type": "ADD_FUNDS",
# "value": 0.628405,
# "fundsBefore": {"total": 0.00453512, "available": 0.00453512, "locked": 0},
# "fundsAfter": {"total": 0.63294012, "available": 0.63294012, "locked": 0},
# "change": {"total": 0.628405, "available": 0.628405, "locked": 0}
# }
#
# TRANSACTION_PRE_LOCKING
# {
# "historyId": "e7d19e0f-03b3-46a8-bc72-dde72cc24ead",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1520706403868,
# "type": "TRANSACTION_PRE_LOCKING",
# "value": -0.1,
# "fundsBefore": {"total": 0.63294012, "available": 0.63294012, "locked": 0},
# "fundsAfter": {"total": 0.63294012, "available": 0.53294012, "locked": 0.1},
# "change": {"total": 0, "available": -0.1, "locked": 0.1}
# }
#
# TRANSACTION_POST_OUTCOME
# {
# "historyId": "c4010825-231d-4a9c-8e46-37cde1f7b63c",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "bf2876bc-b545-4503-96c8-ef4de8233876",
# "time": 1520706404032,
# "type": "TRANSACTION_POST_OUTCOME",
# "value": -0.01771415,
# "fundsBefore": {"total": 0.63294012, "available": 0.53294012, "locked": 0.1},
# "fundsAfter": {"total": 0.61522597, "available": 0.53294012, "locked": 0.08228585},
# "change": {"total": -0.01771415, "available": 0, "locked": -0.01771415}
# }
#
# TRANSACTION_POST_INCOME
# {
# "historyId": "7f18b7af-b676-4125-84fd-042e683046f6",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": "f5fcb274-0cc7-4385-b2d3-bae2756e701f",
# "time": 1520706404035,
# "type": "TRANSACTION_POST_INCOME",
# "value": 628.78,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 628.78, "available": 628.78, "locked": 0},
# "change": {"total": 628.78, "available": 628.78, "locked": 0}
# }
#
# TRANSACTION_COMMISSION_OUTCOME
# {
# "historyId": "843177fa-61bc-4cbf-8be5-b029d856c93b",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": "f5fcb274-0cc7-4385-b2d3-bae2756e701f",
# "time": 1520706404050,
# "type": "TRANSACTION_COMMISSION_OUTCOME",
# "value": -2.71,
# "fundsBefore": {"total": 766.06, "available": 766.06, "locked": 0},
# "fundsAfter": {"total": 763.35,"available": 763.35, "locked": 0},
# "change": {"total": -2.71, "available": -2.71, "locked": 0}
# }
#
# TRANSACTION_OFFER_COMPLETED_RETURN
# {
# "historyId": "cac69b04-c518-4dc5-9d86-e76e91f2e1d2",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1520714886425,
# "type": "TRANSACTION_OFFER_COMPLETED_RETURN",
# "value": 0.00000196,
# "fundsBefore": {"total": 0.00941208, "available": 0.00941012, "locked": 0.00000196},
# "fundsAfter": {"total": 0.00941208, "available": 0.00941208, "locked": 0},
# "change": {"total": 0, "available": 0.00000196, "locked": -0.00000196}
# }
#
# WITHDRAWAL_LOCK_FUNDS
# {
# "historyId": "03de2271-66ab-4960-a786-87ab9551fc14",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "6ad3dc72-1d6d-4ec2-8436-ca43f85a38a6",
# "time": 1522245654481,
# "type": "WITHDRAWAL_LOCK_FUNDS",
# "value": -0.8,
# "fundsBefore": {"total": 0.8, "available": 0.8, "locked": 0},
# "fundsAfter": {"total": 0.8, "available": 0, "locked": 0.8},
# "change": {"total": 0, "available": -0.8, "locked": 0.8}
# }
#
# WITHDRAWAL_SUBTRACT_FUNDS
# {
# "historyId": "b0308c89-5288-438d-a306-c6448b1a266d",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "6ad3dc72-1d6d-4ec2-8436-ca43f85a38a6",
# "time": 1522246526186,
# "type": "WITHDRAWAL_SUBTRACT_FUNDS",
# "value": -0.8,
# "fundsBefore": {"total": 0.8, "available": 0, "locked": 0.8},
# "fundsAfter": {"total": 0, "available": 0, "locked": 0},
# "change": {"total": -0.8, "available": 0, "locked": -0.8}
# }
#
# TRANSACTION_OFFER_ABORTED_RETURN
# {
# "historyId": "b1a3c075-d403-4e05-8f32-40512cdd88c0",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1522512298662,
# "type": "TRANSACTION_OFFER_ABORTED_RETURN",
# "value": 0.0564931,
# "fundsBefore": {"total": 0.44951311, "available": 0.39302001, "locked": 0.0564931},
# "fundsAfter": {"total": 0.44951311, "available": 0.44951311, "locked": 0},
# "change": {"total": 0, "available": 0.0564931, "locked": -0.0564931}
# }
#
# WITHDRAWAL_UNLOCK_FUNDS
# {
# "historyId": "0ed569a2-c330-482e-bb89-4cb553fb5b11",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "0c7be256-c336-4111-bee7-4eb22e339700",
# "time": 1527866360785,
# "type": "WITHDRAWAL_UNLOCK_FUNDS",
# "value": 0.05045,
# "fundsBefore": {"total": 0.86001578, "available": 0.80956578, "locked": 0.05045},
# "fundsAfter": {"total": 0.86001578, "available": 0.86001578, "locked": 0},
# "change": {"total": 0, "available": 0.05045, "locked": -0.05045}
# }
#
# TRANSACTION_COMMISSION_RETURN
# {
# "historyId": "07c89c27-46f1-4d7a-8518-b73798bf168a",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": null,
# "time": 1528304043063,
# "type": "TRANSACTION_COMMISSION_RETURN",
# "value": 0.6,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.6, "available": 0.6, "locked": 0},
# "change": {"total": 0.6, "available": 0.6, "locked": 0}
# }
#
timestamp = self.safe_integer(item, 'time')
balance = self.safe_value(item, 'balance', {})
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
change = self.safe_value(item, 'change', {})
amount = self.safe_float(change, 'total')
direction = 'in'
if amount < 0:
direction = 'out'
amount = -amount
id = self.safe_string(item, 'historyId')
# there are 2 undocumented api calls: (v1_01PrivateGetPaymentsDepositDetailId and v1_01PrivateGetPaymentsWithdrawalDetailId)
# that can be used to enrich the transfers with txid, address etc(you need to use info.detailId as a parameter)
referenceId = self.safe_string(item, 'detailId')
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
fundsBefore = self.safe_value(item, 'fundsBefore', {})
before = self.safe_float(fundsBefore, 'total')
fundsAfter = self.safe_value(item, 'fundsAfter', {})
after = self.safe_float(fundsAfter, 'total')
return {
'info': item,
'id': id,
'direction': direction,
'account': None,
'referenceId': referenceId,
'referenceAccount': None,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': 'ok',
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def parse_ledger_entry_type(self, type):
types = {
'ADD_FUNDS': 'transaction',
'BITCOIN_GOLD_FORK': 'transaction',
'CREATE_BALANCE': 'transaction',
'FUNDS_MIGRATION': 'transaction',
'WITHDRAWAL_LOCK_FUNDS': 'transaction',
'WITHDRAWAL_SUBTRACT_FUNDS': 'transaction',
'WITHDRAWAL_UNLOCK_FUNDS': 'transaction',
'TRANSACTION_COMMISSION_OUTCOME': 'fee',
'TRANSACTION_COMMISSION_RETURN': 'fee',
'TRANSACTION_OFFER_ABORTED_RETURN': 'trade',
'TRANSACTION_OFFER_COMPLETED_RETURN': 'trade',
'TRANSACTION_POST_INCOME': 'trade',
'TRANSACTION_POST_OUTCOME': 'trade',
'TRANSACTION_PRE_LOCKING': 'trade',
}
return self.safe_string(types, type, type)
def parse_trade(self, trade, market=None):
#
# createOrder trades
#
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# }
#
# fetchMyTrades(private)
#
# {
# amount: "0.29285199",
# commissionValue: "0.00125927",
# id: "11c8203a-a267-11e9-b698-0242ac110007",
# initializedBy: "Buy",
# market: "ETH-EUR",
# offerId: "11c82038-a267-11e9-b698-0242ac110007",
# rate: "277",
# time: "1562689917517",
# userAction: "Buy",
# wasTaker: True,
# }
#
# fetchTrades(public)
#
# {
# id: 'df00b0da-e5e0-11e9-8c19-0242ac11000a',
# t: '1570108958831',
# a: '0.04776653',
# r: '0.02145854',
# ty: 'Sell'
# }
#
timestamp = self.safe_integer_2(trade, 'time', 't')
userAction = self.safe_string(trade, 'userAction')
side = 'buy' if (userAction == 'Buy') else 'sell'
wasTaker = self.safe_value(trade, 'wasTaker')
takerOrMaker = None
if wasTaker is not None:
takerOrMaker = 'taker' if wasTaker else 'maker'
price = self.safe_float_2(trade, 'rate', 'r')
amount = self.safe_float_2(trade, 'amount', 'a')
cost = None
if amount is not None:
if price is not None:
cost = price * amount
feeCost = self.safe_float(trade, 'commissionValue')
marketId = self.safe_string(trade, 'market')
base = None
quote = None
symbol = None
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
base = market['base']
quote = market['quote']
else:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if market is not None:
if symbol is None:
symbol = market['symbol']
if base is None:
base = market['base']
fee = None
if feeCost is not None:
feeCcy = base if (side == 'buy') else quote
fee = {
'currency': feeCcy,
'cost': feeCost,
}
order = self.safe_string(trade, 'offerId')
# todo: check self logic
type = None
if order is not None:
type = 'limit' if order else 'market'
return {
'id': self.safe_string(trade, 'id'),
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'takerOrMaker': takerOrMaker,
'fee': fee,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
}
if since is not None:
request['fromTime'] = since - 1 # result does not include exactly `since` time therefore decrease by 1
if limit is not None:
request['limit'] = limit # default - 10, max - 300
response = self.v1_01PublicGetTradingTransactionsSymbol(self.extend(request, params))
items = self.safe_value(response, 'items')
return self.parse_trades(items, symbol, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'offerType': side,
'amount': amount,
'mode': type,
}
if type == 'limit':
request['rate'] = price
response = self.v1_01PrivatePostTradingOfferSymbol(self.extend(request, params))
#
# unfilled(open order)
#
# {
# status: 'Ok',
# completed: False, # can deduce status from here
# offerId: 'ce9cc72e-d61c-11e9-9248-0242ac110005',
# transactions: [], # can deduce order info from here
# }
#
# filled(closed order)
#
# {
# "status": "Ok",
# "offerId": "942a4a3e-e922-11e9-8c19-0242ac11000a",
# "completed": True,
# "transactions": [
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# },
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# },
# {
# "rate": "0.02196207",
# "amount": "0.27704177"
# }
# ]
# }
#
# partially-filled(open order)
#
# {
# "status": "Ok",
# "offerId": "d0ebefab-f4d7-11e9-8c19-0242ac11000a",
# "completed": False,
# "transactions": [
# {
# "rate": "0.02106404",
# "amount": "0.0019625"
# },
# {
# "rate": "0.02106404",
# "amount": "0.0019625"
# },
# {
# "rate": "0.02105901",
# "amount": "0.00975256"
# }
# ]
# }
#
timestamp = self.milliseconds() # the real timestamp is missing in the response
id = self.safe_string(response, 'offerId')
completed = self.safe_value(response, 'completed', False)
status = 'closed' if completed else 'open'
filled = 0
cost = None
transactions = self.safe_value(response, 'transactions')
trades = None
if transactions is not None:
trades = self.parse_trades(transactions, market, None, None, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'side': side,
'type': type,
'orderId': id,
})
cost = 0
for i in range(0, len(trades)):
filled = self.sum(filled, trades[i]['amount'])
cost = self.sum(cost, trades[i]['cost'])
remaining = amount - filled
return {
'id': id,
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': float(price),
'amount': float(amount),
'cost': cost,
'filled': filled,
'remaining': remaining,
'average': None,
'fee': None,
'trades': trades,
}
def cancel_order(self, id, symbol=None, params={}):
side = self.safe_string(params, 'side')
if side is None:
raise ExchangeError(self.id + ' cancelOrder() requires a `side` parameter("buy" or "sell")')
price = self.safe_value(params, 'price')
if price is None:
raise ExchangeError(self.id + ' cancelOrder() requires a `price` parameter(float or string)')
self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'id': id,
'side': side,
'price': price,
}
# {status: 'Fail', errors: ['NOT_RECOGNIZED_OFFER_TYPE']} -- if required params are missing
# {status: 'Ok', errors: []}
return self.v1_01PrivateDeleteTradingOfferSymbolIdSidePrice(self.extend(request, params))
def is_fiat(self, currency):
fiatCurrencies = {
'USD': True,
'EUR': True,
'PLN': True,
}
return self.safe_value(fiatCurrencies, currency, False)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
method = None
currency = self.currency(code)
request = {
'currency': currency['id'],
'quantity': amount,
}
if self.is_fiat(code):
method = 'privatePostWithdraw'
# request['account'] = params['account'] # they demand an account number
# request['express'] = params['express'] # whatever it means, they don't explain
# request['bic'] = ''
else:
method = 'privatePostTransfer'
if tag is not None:
address += '?dt=' + str(tag)
request['address'] = address
response = getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
if api == 'public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params) + '.json'
if query:
url += '?' + self.urlencode(query)
elif api == 'v1_01Public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
elif api == 'v1_01Private':
self.check_required_credentials()
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params)
nonce = str(self.milliseconds())
payload = None
if method != 'POST':
if query:
url += '?' + self.urlencode(query)
payload = self.apiKey + nonce
elif body is None:
body = self.json(query)
payload = self.apiKey + nonce + body
headers = {
'Request-Timestamp': nonce,
'Operation-Id': self.uuid(),
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha512),
'Content-Type': 'application/json',
}
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'method': path,
'moment': self.nonce(),
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'code' in response:
#
# bitbay returns the integer 'success': 1 key from their private API
# or an integer 'code' value from 0 to 510 and an error message
#
# {'success': 1, ...}
# {'code': 502, 'message': 'Invalid sign'}
# {'code': 0, 'message': 'offer funds not exceeding minimums'}
#
# 400 At least one parameter wasn't set
# 401 Invalid order type
# 402 No orders with specified currencies
# 403 Invalid payment currency name
# 404 Error. Wrong transaction type
# 405 Order with self id doesn't exist
# 406 No enough money or crypto
# 408 Invalid currency name
# 501 Invalid public key
# 502 Invalid sign
# 503 Invalid moment parameter. Request time doesn't match current server time
# 504 Invalid method
# 505 Key has no permission for self action
# 506 Account locked. Please contact with customer service
# 509 The BIC/SWIFT is required for self currency
# 510 Invalid market name
#
code = self.safe_string(response, 'code') # always an integer
feedback = self.id + ' ' + body
exceptions = self.exceptions
if code in self.exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
elif 'status' in response:
#
# {"status":"Fail","errors":["OFFER_FUNDS_NOT_EXCEEDING_MINIMUMS"]}
#
status = self.safe_string(response, 'status')
if status == 'Fail':
errors = self.safe_value(response, 'errors')
feedback = self.id + ' ' + self.json(response)
for i in range(0, len(errors)):
error = errors[i]
if error in self.exceptions:
raise self.exceptions[error](feedback)
raise ExchangeError(feedback)
| 40.718207
| 132
| 0.469003
|
335d349c493109f3d1b7529de06909067996fd5c
| 5,499
|
py
|
Python
|
kerMIT/kerMIT/explain/genericMultiLayer_LRP.py
|
ART-Group-it/kerMIT
|
ff309ce3154c5292602c53cd19633d789bf759e2
|
[
"MIT"
] | 1
|
2020-07-03T13:29:12.000Z
|
2020-07-03T13:29:12.000Z
|
kerMIT/kerMIT/explain/genericMultiLayer_LRP.py
|
ART-Group-it/kerMIT
|
ff309ce3154c5292602c53cd19633d789bf759e2
|
[
"MIT"
] | null | null | null |
kerMIT/kerMIT/explain/genericMultiLayer_LRP.py
|
ART-Group-it/kerMIT
|
ff309ce3154c5292602c53cd19633d789bf759e2
|
[
"MIT"
] | 1
|
2020-05-07T17:16:41.000Z
|
2020-05-07T17:16:41.000Z
|
import torch
#### INIZIO DARIO
#activation = {}
def get_activation(name, activation):
def hook(model, input, output):
activation[name] = output.detach()
return hook
def getWeightAnBiasByName(model, layer_name):
# weight, bias = _, _
weight, bias = None, None
for name, param in model.named_parameters():
if name == layer_name + '.weight' and param.requires_grad:
weight = param.data
elif name == layer_name + '.bias' and param.requires_grad:
bias = param.data
return weight, bias
def lrp_linear_torch(hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor=0.0, debug=False):
"""
LRP for a linear layer with input dim D and output dim M.
Args:
- hin: forward pass input, of shape (D,)
- w: connection weights, of shape (D, M)
- b: biases, of shape (M,)
- hout: forward pass output, of shape (M,) (unequal to np.dot(w.T,hin)+b if more than one incoming layer!)
- Rout: relevance at layer output, of shape (M,)
- bias_nb_units: total number of connected lower-layer units (onto which the bias/stabilizer contribution is redistributed for sanity check)
- eps: stabilizer (small positive number)
- bias_factor: set to 1.0 to check global relevance conservation, otherwise use 0.0 to ignore bias/stabilizer redistribution (recommended)
Returns:
- Rin: relevance at layer input, of shape (D,)
"""
sign_out = torch.where(hout.cpu() >= 0, torch.Tensor([1.]), torch.Tensor([-1.])).view(1, -1) # shape (1, M)
numer = (w * hin.view(-1, 1)).cpu() + (
bias_factor * (b.view(1, -1) * 1. + eps * sign_out * 1.) / bias_nb_units) # shape (D, M)
# Note: here we multiply the bias_factor with both the bias b and the stabilizer eps since in fact
# using the term (b[na,:]*1. + eps*sign_out*1.) / bias_nb_units in the numerator is only useful for sanity check
# (in the initial paper version we were using (bias_factor*b[na,:]*1. + eps*sign_out*1.) / bias_nb_units instead)
denom = hout.view(1, -1) + (eps * sign_out * 1.) # shape (1, M)
message = (numer / denom) * Rout.view(1, -1) # shape (D, M)
Rin = message.sum(axis=1) # shape (D,)
if debug:
print("local diff: ", Rout.sum() - Rin.sum())
# Note:
# - local layer relevance conservation if bias_factor==1.0 and bias_nb_units==D (i.e. when only one incoming layer)
# - global network relevance conservation if bias_factor==1.0 and bias_nb_units set accordingly to the total number of lower-layer connections
# -> can be used for sanity check
return Rin
def prepare_single_pass(model, activation, start_layer, end_layer, isFirstCompute = True):
hout = activation[start_layer].reshape(-1)
if end_layer != None:
hin = activation[end_layer].reshape(-1).cpu()
else:
hin = None
w, b = getWeightAnBiasByName(model, start_layer)
w = w.reshape(w.shape[1], w.shape[0])
bias_nb_units = b.shape[0]
eps = 0.001
# eps = 0.2
bias_factor = 1.0
if isFirstCompute:
mask = torch.zeros(hout.shape[0])
mask[torch.argmax(hout)] = hout[torch.argmax(hout)]
Rout = torch.Tensor(mask).cpu()
else:
Rout = None
return hin, w.cpu(), b.cpu(), hout.cpu(), Rout, bias_nb_units, eps, bias_factor
##### FMZ Trying an intuition
def compute_LRP_FFNN(model, activation, layer_names, on_demand_embedding_matrix, single_test, demux_layer=None,
demux_span=(None, None)):
isFirstCompute = True
for i in range(len(layer_names) - 1):
print(layer_names[i], layer_names[i + 1])
hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor = prepare_single_pass(model, activation, layer_names[i],
layer_names[i + 1], isFirstCompute)
if not isFirstCompute:
Rout = Rin
Rin = lrp_linear_torch(hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor)
# Handling the demultiplexing of the transformer and the distributed structure encoder MLP
# and isolating the contribution for the distributed structure encoder MLP
if demux_layer != None and demux_layer == layer_names[i]:
Rin = Rin[demux_span[0], demux_span[1]]
isFirstCompute = False
# compute the last layer
_, w, b, hout, Rout, bias_nb_units, eps, bias_factor = prepare_single_pass(model, activation, layer_names[-1], None,
isFirstCompute)
# Handling the demultiplexing of the transformer and the distributed structure encoder MLP
# and isolating the contribution for the distributed structure encoder MLP
if not isFirstCompute:
Rout = Rin
if demux_layer != None and demux_layer == layer_names[-1]:
print(Rout)
print(w.shape)
# Rout = Rout[demux_span[0],demux_span[1]]
w = w[demux_span[0]:demux_span[1]]
hin = single_test.reshape(-1).cpu()
# FMZ Rin = lrp_linear_torch(hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor, debug=False)
# print(on_demand_embedding_matrix.shape)
# print(w.shape)
Rin = lrp_linear_torch(hin, torch.matmul(on_demand_embedding_matrix, w), b, hout, Rout, bias_nb_units, eps,
bias_factor, debug=False)
return Rin
#### FINE DARIO
| 44.346774
| 146
| 0.626659
|
b4ca080ffaec77847ca5ae2bafcc13bd4ee79e42
| 963
|
py
|
Python
|
wueevents/webapi/tests/test_private_api_tests.py
|
suspect22/wueevents
|
c8fb54c76da74d1c553418d04ea38cda810913ab
|
[
"MIT"
] | null | null | null |
wueevents/webapi/tests/test_private_api_tests.py
|
suspect22/wueevents
|
c8fb54c76da74d1c553418d04ea38cda810913ab
|
[
"MIT"
] | null | null | null |
wueevents/webapi/tests/test_private_api_tests.py
|
suspect22/wueevents
|
c8fb54c76da74d1c553418d04ea38cda810913ab
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from rest_framework.test import APIClient
from django.urls import reverse
from django.contrib.auth import get_user_model
class PrivateApiTests(TestCase):
"""Holds all API Tests which will doesn't require authenticated Users"""
API_ENDPOINT_WEBSITE = reverse("webapi:website-list")
def setUp(self):
self.apiClient = APIClient()
self.authenticatedUser = get_user_model().objects.create_user(
username="testuser",
email="testemail@bla.com",
password="TestPassword123"
)
self.apiClient.force_authenticate(self.authenticatedUser)
def test_create_website(self):
pass
def test_create_website_with_invalid_values(self):
pass
def test_create_scheduled_element(self):
pass
def test_create_scheduled_element_with_invalid_values(self):
pass
def tearDown(self):
self.authenticatedUser.delete()
| 27.514286
| 76
| 0.707165
|
ef855ddae8558366409489bae703fdc1f3d38f6e
| 9,869
|
py
|
Python
|
c-deps/krb5/src/tests/t_proxy.py
|
Yangjxxxxx/ZNBase
|
dcf993b73250dd5cb63041f4d9cf098941f67b2b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
c-deps/krb5/src/tests/t_proxy.py
|
Yangjxxxxx/ZNBase
|
dcf993b73250dd5cb63041f4d9cf098941f67b2b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
c-deps/krb5/src/tests/t_proxy.py
|
Yangjxxxxx/ZNBase
|
dcf993b73250dd5cb63041f4d9cf098941f67b2b
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
from k5test import *
# Skip this test if we're missing proxy functionality or parts of the proxy.
if runenv.tls_impl == 'no':
skip_rest('HTTP proxy tests', 'TLS build support not enabled')
try:
from paste import httpserver
except:
skip_rest('HTTP proxy tests', 'Python paste module not found')
try:
import kdcproxy
except:
skip_rest('HTTP proxy tests', 'Python kdcproxy module not found')
# Construct a krb5.conf fragment configuring the client to use a local proxy
# server.
proxysubjectpem = os.path.join(srctop, 'tests', 'dejagnu', 'proxy-certs',
'proxy-subject.pem')
proxysanpem = os.path.join(srctop, 'tests', 'dejagnu', 'proxy-certs',
'proxy-san.pem')
proxyidealpem = os.path.join(srctop, 'tests', 'dejagnu', 'proxy-certs',
'proxy-ideal.pem')
proxywrongpem = os.path.join(srctop, 'tests', 'dejagnu', 'proxy-certs',
'proxy-no-match.pem')
proxybadpem = os.path.join(srctop, 'tests', 'dejagnu', 'proxy-certs',
'proxy-badsig.pem')
proxyca = os.path.join(srctop, 'tests', 'dejagnu', 'proxy-certs', 'ca.pem')
proxyurl = 'https://localhost:$port5/KdcProxy'
proxyurlupcase = 'https://LocalHost:$port5/KdcProxy'
proxyurl4 = 'https://127.0.0.1:$port5/KdcProxy'
proxyurl6 = 'https://[::1]:$port5/KdcProxy'
unanchored_krb5_conf = {'realms': {'$realm': {
'kdc': proxyurl,
'kpasswd_server': proxyurl}}}
anchored_name_krb5_conf = {'realms': {'$realm': {
'kdc': proxyurl,
'kpasswd_server': proxyurl,
'http_anchors': 'FILE:%s' % proxyca}}}
anchored_upcasename_krb5_conf = {'realms': {'$realm': {
'kdc': proxyurlupcase,
'kpasswd_server': proxyurlupcase,
'http_anchors': 'FILE:%s' % proxyca}}}
anchored_kadmin_krb5_conf = {'realms': {'$realm': {
'kdc': proxyurl,
'admin_server': proxyurl,
'http_anchors': 'FILE:%s' % proxyca}}}
anchored_ipv4_krb5_conf = {'realms': {'$realm': {
'kdc': proxyurl4,
'kpasswd_server': proxyurl4,
'http_anchors': 'FILE:%s' % proxyca}}}
kpasswd_input = (password('user') + '\n' + password('user') + '\n' +
password('user') + '\n')
def start_proxy(realm, keycertpem):
proxy_conf_path = os.path.join(realm.testdir, 'kdcproxy.conf')
proxy_exec_path = os.path.join(srctop, 'util', 'paste-kdcproxy.py')
conf = open(proxy_conf_path, 'w')
conf.write('[%s]\n' % realm.realm)
conf.write('kerberos = kerberos://localhost:%d\n' % realm.portbase)
conf.write('kpasswd = kpasswd://localhost:%d\n' % (realm.portbase + 2))
conf.close()
realm.env['KDCPROXY_CONFIG'] = proxy_conf_path
cmd = [sys.executable, proxy_exec_path, str(realm.server_port()),
keycertpem]
return realm.start_server(cmd, sentinel='proxy server ready')
# Fail: untrusted issuer and hostname doesn't match.
mark('untrusted issuer, hostname mismatch')
output("running pass 1: issuer not trusted and hostname doesn't match\n")
realm = K5Realm(krb5_conf=unanchored_krb5_conf, get_creds=False,
create_host=False)
proxy = start_proxy(realm, proxywrongpem)
realm.kinit(realm.user_princ, password=password('user'), expected_code=1)
stop_daemon(proxy)
realm.stop()
# Fail: untrusted issuer, host name matches subject.
mark('untrusted issuer, hostname subject match')
output("running pass 2: subject matches, issuer not trusted\n")
realm = K5Realm(krb5_conf=unanchored_krb5_conf, get_creds=False,
create_host=False)
proxy = start_proxy(realm, proxysubjectpem)
realm.kinit(realm.user_princ, password=password('user'), expected_code=1)
stop_daemon(proxy)
realm.stop()
# Fail: untrusted issuer, host name matches subjectAltName.
mark('untrusted issuer, hostname SAN match')
output("running pass 3: subjectAltName matches, issuer not trusted\n")
realm = K5Realm(krb5_conf=unanchored_krb5_conf, get_creds=False,
create_host=False)
proxy = start_proxy(realm, proxysanpem)
realm.kinit(realm.user_princ, password=password('user'), expected_code=1)
stop_daemon(proxy)
realm.stop()
# Fail: untrusted issuer, certificate signature is bad.
mark('untrusted issuer, bad signature')
output("running pass 4: subject matches, issuer not trusted\n")
realm = K5Realm(krb5_conf=unanchored_krb5_conf, get_creds=False,
create_host=False)
proxy = start_proxy(realm, proxybadpem)
realm.kinit(realm.user_princ, password=password('user'), expected_code=1)
stop_daemon(proxy)
realm.stop()
# Fail: trusted issuer but hostname doesn't match.
mark('trusted issuer, hostname mismatch')
output("running pass 5: issuer trusted but hostname doesn't match\n")
realm = K5Realm(krb5_conf=anchored_name_krb5_conf, get_creds=False,
create_host=False)
proxy = start_proxy(realm, proxywrongpem)
realm.kinit(realm.user_princ, password=password('user'), expected_code=1)
stop_daemon(proxy)
realm.stop()
# Succeed: trusted issuer and host name matches subject.
mark('trusted issuer, hostname subject match')
output("running pass 6: issuer trusted, subject matches\n")
realm = K5Realm(krb5_conf=anchored_name_krb5_conf, start_kadmind=True,
get_creds=False)
proxy = start_proxy(realm, proxysubjectpem)
realm.kinit(realm.user_princ, password=password('user'))
realm.run([kvno, realm.host_princ])
realm.run([kpasswd, realm.user_princ], input=kpasswd_input)
stop_daemon(proxy)
realm.stop()
# Succeed: trusted issuer and host name matches subjectAltName.
mark('trusted issuer, hostname SAN match')
output("running pass 7: issuer trusted, subjectAltName matches\n")
realm = K5Realm(krb5_conf=anchored_name_krb5_conf, start_kadmind=True,
get_creds=False)
proxy = start_proxy(realm, proxysanpem)
realm.kinit(realm.user_princ, password=password('user'))
realm.run([kvno, realm.host_princ])
realm.run([kpasswd, realm.user_princ], input=kpasswd_input)
stop_daemon(proxy)
realm.stop()
# Fail: certificate signature is bad.
mark('bad signature')
output("running pass 8: issuer trusted and subjectAltName matches, sig bad\n")
realm = K5Realm(krb5_conf=anchored_name_krb5_conf,
get_creds=False,
create_host=False)
proxy = start_proxy(realm, proxybadpem)
realm.kinit(realm.user_princ, password=password('user'), expected_code=1)
stop_daemon(proxy)
realm.stop()
# Fail: trusted issuer but IP doesn't match.
mark('trusted issuer, IP mismatch')
output("running pass 9: issuer trusted but no name matches IP\n")
realm = K5Realm(krb5_conf=anchored_ipv4_krb5_conf, get_creds=False,
create_host=False)
proxy = start_proxy(realm, proxywrongpem)
realm.kinit(realm.user_princ, password=password('user'), expected_code=1)
stop_daemon(proxy)
realm.stop()
# Fail: trusted issuer, but subject does not match.
mark('trusted issuer, IP mismatch (hostname in subject)')
output("running pass 10: issuer trusted, but subject does not match IP\n")
realm = K5Realm(krb5_conf=anchored_ipv4_krb5_conf, get_creds=False,
create_host=False)
proxy = start_proxy(realm, proxysubjectpem)
realm.kinit(realm.user_princ, password=password('user'), expected_code=1)
stop_daemon(proxy)
realm.stop()
# Succeed: trusted issuer and host name matches subjectAltName.
mark('trusted issuer, IP SAN match')
output("running pass 11: issuer trusted, subjectAltName matches IP\n")
realm = K5Realm(krb5_conf=anchored_ipv4_krb5_conf, start_kadmind=True,
get_creds=False)
proxy = start_proxy(realm, proxysanpem)
realm.kinit(realm.user_princ, password=password('user'))
realm.run([kvno, realm.host_princ])
realm.run([kpasswd, realm.user_princ], input=kpasswd_input)
stop_daemon(proxy)
realm.stop()
# Fail: certificate signature is bad.
mark('bad signature (IP hostname)')
output("running pass 12: issuer trusted, names don't match, signature bad\n")
realm = K5Realm(krb5_conf=anchored_ipv4_krb5_conf, get_creds=False,
create_host=False)
proxy = start_proxy(realm, proxybadpem)
realm.kinit(realm.user_princ, password=password('user'), expected_code=1)
stop_daemon(proxy)
realm.stop()
# Succeed: trusted issuer and host name matches subject, using kadmin
# configuration to find kpasswdd.
mark('trusted issuer, hostname subject match (kadmin)')
output("running pass 13: issuer trusted, subject matches\n")
realm = K5Realm(krb5_conf=anchored_kadmin_krb5_conf, start_kadmind=True,
get_creds=False, create_host=False)
proxy = start_proxy(realm, proxysubjectpem)
realm.run([kpasswd, realm.user_princ], input=kpasswd_input)
stop_daemon(proxy)
realm.stop()
# Succeed: trusted issuer and host name matches subjectAltName, using
# kadmin configuration to find kpasswdd.
mark('trusted issuer, hostname SAN match (kadmin)')
output("running pass 14: issuer trusted, subjectAltName matches\n")
realm = K5Realm(krb5_conf=anchored_kadmin_krb5_conf, start_kadmind=True,
get_creds=False, create_host=False)
proxy = start_proxy(realm, proxysanpem)
realm.run([kpasswd, realm.user_princ], input=kpasswd_input)
stop_daemon(proxy)
realm.stop()
# Succeed: trusted issuer and host name matches subjectAltName (give or take
# case).
mark('trusted issuer, hostname SAN case-insensitive match')
output("running pass 15: issuer trusted, subjectAltName case-insensitive\n")
realm = K5Realm(krb5_conf=anchored_upcasename_krb5_conf, start_kadmind=True,
get_creds=False, create_host=False)
proxy = start_proxy(realm, proxysanpem)
realm.run([kpasswd, realm.user_princ], input=kpasswd_input)
stop_daemon(proxy)
realm.stop()
success('MS-KKDCP proxy')
| 43.09607
| 78
| 0.704124
|
8a9ebb7da56f301be015af6363fcaa5ebdbaa02c
| 5,545
|
py
|
Python
|
pypyr/pipelinerunner.py
|
mofm/pypyr
|
f417f69ba9a607d8a93019854105cfbc4dc9c36d
|
[
"Apache-2.0"
] | 1
|
2021-12-30T20:47:18.000Z
|
2021-12-30T20:47:18.000Z
|
pypyr/pipelinerunner.py
|
mofm/pypyr
|
f417f69ba9a607d8a93019854105cfbc4dc9c36d
|
[
"Apache-2.0"
] | null | null | null |
pypyr/pipelinerunner.py
|
mofm/pypyr
|
f417f69ba9a607d8a93019854105cfbc4dc9c36d
|
[
"Apache-2.0"
] | null | null | null |
"""pypyr pipeline runner.
This is the entrypoint for the pypyr API.
Use run() to run a pipeline.
"""
# can remove __future__ once py 3.10 the lowest supported version
from __future__ import annotations
import logging
from os import PathLike
from pypyr.context import Context
from pypyr.pipeline import Pipeline
logger = logging.getLogger(__name__)
def run(
pipeline_name: str,
args_in: list[str] | None = None,
parse_args: bool | None = None,
dict_in: dict | None = None,
groups: list[str] | None = None,
success_group: str | None = None,
failure_group: str | None = None,
loader: str | None = None,
py_dir: str | bytes | PathLike | None = None
) -> Context:
"""Run a pipeline. pypyr's entrypoint.
Call me if you want to run a pypyr pipeline from your own code.
If you want to run a pipeline exactly like the cli does, use args_in to
pass a list of str arguments for the pipeline's context_parser. If you
already have a dict-like structure you want to use to initialize context,
use dict_in instead. If you provide dict_in and no args_in, pypyr will
assume you mean not to run the context_parser on the pipeline
(parse_args=False) - if you do want to run the context_parser in this case,
explicitly set parse_args=True.
If you're invoking pypyr from your own application via the API, it's your
responsibility to set up and configure logging. If you just want to
replicate the log handlers & formatters that the pypyr cli uses, you can
call pypyr.log.logger.set_root_logger() once and only once before invoking
run() for every pipeline you want to run.
Be aware that pypyr adds a NOTIFY - 25 custom log-level and notify()
function to logging.
{pipeline_name}.yaml should resolve from the current working directory if
you are using the default file loader.
You only need to specify py_dir if your pipeline relies on custom modules
that are NOT installed in the current Python environment. For convenience,
pypyr allows pipeline authors to use ad hoc python modules that are not
installed in the current environment by looking for these in py_dir 1st.
Regardless of whether you set py_dir or not, be aware that if you are using
the default file loader, pypyr will also add the pipeline's immediate
parent directory to sys.path (only if it's not been added already), so that
each pipeline can reference ad hoc modules relative to itself in the
filesystem.
Therefore you do NOT need to set py_dir if your ad hoc custom modules are
relative to the pipeline itself.
If your pipelines are only using built-in functionality, you don't need to
set py_dir.
Example: Run ./dir/pipe-name.yaml, resolve ad hoc custom modules from the
current directory and initialize context with dict {'a': 'b'}:
context = run('dir/pipe-name', dict_in={'a': 'b'}, py_dir=Path.cwd())
Args:
pipeline_name (str): Name of pipeline, sans .yaml at end.
args_in (list[str]): All the input arguments after the pipeline name
from cli.
parse_args (bool): run context_parser in pipeline. Default True.
dict_in (dict): Dict-like object to initialize the Context.
groups: (list[str]): Step-group names to run in pipeline.
Default is ['steps'].
success_group (str): Step-group name to run on success completion.
Default is on_success.
failure_group: (str): Step-group name to run on pipeline failure.
Default is on_failure.
loader (str): optional. Absolute name of pipeline loader module.
If not specified will use pypyr.loaders.file.
py_dir (Path-like): Custom python modules resolve from this dir.
Returns:
pypyr.context.Context(): The pypyr context as it is after the pipeline
completes.
"""
logger.debug("starting pypyr")
parse_input = _get_parse_input(parse_args=parse_args,
args_in=args_in,
dict_in=dict_in)
context = Context(dict_in) if dict_in else Context()
pipeline = Pipeline(name=pipeline_name,
context_args=args_in,
parse_input=parse_input,
loader=loader,
groups=groups,
success_group=success_group,
failure_group=failure_group,
py_dir=py_dir)
pipeline.run(context)
logger.debug("pypyr done")
return context
def _get_parse_input(parse_args, args_in, dict_in):
"""Return default for parse_input.
This is to decide if context_parser should run or not.
To make it easy on an API consumer, default behavior is ALWAYS to run
parser UNLESS dict_in initializes context and there is no args_in.
If dict_in specified, but no args_in: False
If dict_in specified, AND args_in too: True
If no dict_in specified, but args_in is: True
If no dict_in AND no args_in: True
If parse_args explicitly set, always honor its value.
Args:
parse_args (bool): Whether to run context parser.
args_in (list[str]): String arguments as passed from the cli.
dict_in (dict): Initialize context with this dict.
Returns:
Boolean. True if should parse input.
"""
if parse_args is None:
return not (args_in is None and dict_in is not None)
return parse_args
| 38.241379
| 79
| 0.676826
|
3292ba661c9beb7131e7285f0de31ecb1e6e3e19
| 480
|
py
|
Python
|
Intermediate/sending.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | 1
|
2021-12-17T11:03:13.000Z
|
2021-12-17T11:03:13.000Z
|
Intermediate/sending.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | 1
|
2020-02-05T00:14:43.000Z
|
2020-02-06T09:22:49.000Z
|
Intermediate/sending.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Infinite looper with send."""
def infinite_looper(objects):
"""Loop indefinitely."""
count = 0
while True:
if count >= len(objects):
count = 0
message = yield objects[count]
if message is not None:
count = 0 if message < 0 else message
else:
count += 1
X = infinite_looper("A string with some words")
print(next(X))
print(X.send(9))
print(X.send(12))
print(X.send(-10))
| 20.869565
| 49
| 0.577083
|
8afe9e58590fc7520b54516894082d7dd1aa3d3a
| 303
|
py
|
Python
|
configs/hpt-pretrain/resisc-ucmerced/imagenet_r50_supervised_resisc_50000it_basetrain/5000-iters.py
|
Berkeley-Data/OpenSelfSup
|
221191b88d891de57725b149caf237ffef72e529
|
[
"Apache-2.0"
] | null | null | null |
configs/hpt-pretrain/resisc-ucmerced/imagenet_r50_supervised_resisc_50000it_basetrain/5000-iters.py
|
Berkeley-Data/OpenSelfSup
|
221191b88d891de57725b149caf237ffef72e529
|
[
"Apache-2.0"
] | 6
|
2021-03-11T05:35:54.000Z
|
2021-04-03T22:25:11.000Z
|
configs/hpt-pretrain/resisc-ucmerced/imagenet_r50_supervised_resisc_50000it_basetrain/5000-iters.py
|
Berkeley-Data/OpenSelfSup
|
221191b88d891de57725b149caf237ffef72e529
|
[
"Apache-2.0"
] | 1
|
2021-07-04T00:47:46.000Z
|
2021-07-04T00:47:46.000Z
|
_base_="../base-resisc-ucmerced-config.py"
# this will merge with the parent
model=dict(pretrained='work_dirs/hpt-pretrain/resisc/imagenet_r50_supervised_basetrain/50000-iters/imagenet_r50_supervised_resisc_50000it.pth')
# epoch related
total_iters=5000
checkpoint_config = dict(interval=total_iters)
| 33.666667
| 143
| 0.838284
|
6a3ec4b49d6d7efc3b1c93e2bf9f9469544c47a9
| 13,345
|
py
|
Python
|
src/roslibpy/comm/comm_cli.py
|
kikass13/roslibpy
|
e090c01906e076f95d75f9d7890cd55279544746
|
[
"MIT"
] | null | null | null |
src/roslibpy/comm/comm_cli.py
|
kikass13/roslibpy
|
e090c01906e076f95d75f9d7890cd55279544746
|
[
"MIT"
] | null | null | null |
src/roslibpy/comm/comm_cli.py
|
kikass13/roslibpy
|
e090c01906e076f95d75f9d7890cd55279544746
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import logging
import math
from System import Action
from System import Array
from System import ArraySegment
from System import Byte
from System import TimeSpan
from System import Uri
from System import UriBuilder
from System.Net.WebSockets import ClientWebSocket
from System.Net.WebSockets import WebSocketCloseStatus
from System.Net.WebSockets import WebSocketMessageType
from System.Net.WebSockets import WebSocketReceiveResult
from System.Net.WebSockets import WebSocketState
from System.Text import Encoding
from System.Threading import CancellationToken
from System.Threading import CancellationTokenSource
from System.Threading import ManualResetEventSlim
from System.Threading import SemaphoreSlim
from System.Threading import Thread
from System.Threading.Tasks import Task
from ..event_emitter import EventEmitterMixin
from . import RosBridgeException
from . import RosBridgeProtocol
LOGGER = logging.getLogger('roslibpy')
RECEIVE_CHUNK_SIZE = 1024
SEND_CHUNK_SIZE = 1024
class CliRosBridgeProtocol(RosBridgeProtocol):
"""Implements the ROS Bridge protocol on top of CLI WebSockets.
This implementation is mainly intended to be used on IronPython
implementations and makes use of the Tasks library of .NET for
most internal scheduling and cancellation signals."""
def __init__(self, factory, socket, *args, **kwargs):
super(CliRosBridgeProtocol, self).__init__(*args, **kwargs)
self.factory = factory
self.socket = socket
# According to docs, exactly one send and one receive is supported on each ClientWebSocket object in parallel.
# https://msdn.microsoft.com/en-us/library/system.net.websockets.clientwebsocket.receiveasync(v=vs.110).aspx
# So we configure the semaphore to allow for 2 concurrent requests
# User-code might still end up in a race if multiple requests are triggered from different threads
self.semaphore = SemaphoreSlim(2)
def on_open(self, task):
"""Triggered when the socket connection has been established.
This will kick-start the listening thread."""
LOGGER.info('Connection to ROS MASTER ready.')
self.factory.ready(self)
self.factory.manager.call_in_thread(self.start_listening)
def receive_chunk_async(self, task_result, context):
"""Handle the reception of a message chuck asynchronously."""
try:
if task_result:
result = task_result.Result
if result.MessageType == WebSocketMessageType.Close:
LOGGER.info('WebSocket connection closed: [Code=%s] Description=%s',
result.CloseStatus, result.CloseStatusDescription)
return self.send_close()
else:
chunk = Encoding.UTF8.GetString(context['buffer'], 0, result.Count)
context['content'].append(chunk)
# Signal the listener thread if we're done parsing chunks
if result.EndOfMessage:
# NOTE: Once we reach the end of the message
# we release the lock (Semaphore)
self.semaphore.Release()
# And signal the manual reset event
context['mre'].Set()
return task_result
# NOTE: We will enter the lock (Semaphore) at the start of receive
# to make sure we're accessing the socket read/writes at most from
# two threads, one for receiving and one for sending
if not task_result:
self.semaphore.Wait(self.factory.manager.cancellation_token)
receive_task = self.socket.ReceiveAsync(ArraySegment[Byte](
context['buffer']), self.factory.manager.cancellation_token)
receive_task.ContinueWith.Overloads[Action[Task[WebSocketReceiveResult], object], object](
self.receive_chunk_async, context)
except Exception:
error_message = 'Exception on receive_chunk_async, processing will be aborted'
if task_result:
error_message += '; Task status: {}, Inner exception: {}'.format(task_result.Status, task_result.Exception)
LOGGER.exception(error_message)
raise
def start_listening(self):
"""Starts listening asynchronously while the socket is open.
The inter-thread synchronization between this and the async
reception threads is sync'd with a manual reset event."""
try:
LOGGER.debug(
'About to start listening, socket state: %s', self.socket.State)
while self.socket and self.socket.State == WebSocketState.Open:
mre = ManualResetEventSlim(False)
content = []
buffer = Array.CreateInstance(Byte, RECEIVE_CHUNK_SIZE)
self.receive_chunk_async(None, dict(
buffer=buffer, content=content, mre=mre))
LOGGER.debug('Waiting for messages...')
try:
mre.Wait(self.factory.manager.cancellation_token)
except SystemError:
LOGGER.debug('Cancelation detected on listening thread, exiting...')
break
try:
message_payload = ''.join(content)
LOGGER.debug('Message reception completed|<pre>%s</pre>', message_payload)
self.on_message(message_payload)
except Exception:
LOGGER.exception('Exception on start_listening while trying to handle message received.' +
'It could indicate a bug in user code on message handlers. Message skipped.')
except Exception:
LOGGER.exception(
'Exception on start_listening, processing will be aborted')
raise
finally:
LOGGER.debug('Leaving the listening thread')
def send_close(self):
"""Trigger the closure of the websocket indicating normal closing process."""
if self.socket:
close_task = self.socket.CloseAsync(
WebSocketCloseStatus.NormalClosure, '', CancellationToken.None) # noqa: E999 (disable flake8 error, which incorrectly parses None as the python keyword)
self.factory.emit('close', self)
# NOTE: Make sure reconnets are possible.
# Reconnection needs to be handled on a higher layer.
return close_task
def send_chunk_async(self, task_result, message_data):
"""Send a message chuck asynchronously."""
try:
if not task_result:
self.semaphore.Wait(self.factory.manager.cancellation_token)
message_buffer, message_length, chunks_count, i = message_data
offset = SEND_CHUNK_SIZE * i
is_last_message = (i == chunks_count - 1)
if is_last_message:
count = message_length - offset
else:
count = SEND_CHUNK_SIZE
message_chunk = ArraySegment[Byte](message_buffer, offset, count)
LOGGER.debug('Chunk %d of %d|From offset=%d, byte count=%d, Is last=%s',
i + 1, chunks_count, offset, count, str(is_last_message))
task = self.socket.SendAsync(
message_chunk, WebSocketMessageType.Text, is_last_message, self.factory.manager.cancellation_token)
if not is_last_message:
task.ContinueWith(self.send_chunk_async, [
message_buffer, message_length, chunks_count, i + 1])
else:
# NOTE: If we've reached the last chunck of the message
# we can release the lock (Semaphore) again.
task.ContinueWith(lambda _res: self.semaphore.Release())
return task
except Exception:
LOGGER.exception('Exception while on send_chunk_async')
raise
def send_message(self, payload):
"""Start sending a message over the websocket asynchronously."""
if self.socket.State != WebSocketState.Open:
raise RosBridgeException(
'Connection is not open. Socket state: %s' % self.socket.State)
try:
message_buffer = Encoding.UTF8.GetBytes(payload)
message_length = len(message_buffer)
chunks_count = int(math.ceil(float(message_length) / SEND_CHUNK_SIZE))
send_task = self.send_chunk_async(
None, [message_buffer, message_length, chunks_count, 0])
return send_task
except Exception:
LOGGER.exception('Exception while sending message')
raise
def dispose(self, *args):
"""Dispose the resources held by this protocol instance, i.e. socket."""
self.factory.manager.terminate()
if self.socket:
self.socket.Dispose()
self.socket = None
LOGGER.debug('Websocket disposed')
def __del__(self):
"""Dispose correctly the connection."""
self.dispose()
class CliRosBridgeClientFactory(EventEmitterMixin):
"""Factory to create instances of the ROS Bridge protocol built on top of .NET WebSockets."""
def __init__(self, url, *args, **kwargs):
super(CliRosBridgeClientFactory, self).__init__(*args, **kwargs)
self._manager = CliEventLoopManager()
self.proto = None
self.url = url
@property
def is_connected(self):
"""Indicate if the WebSocket connection is open or not.
Returns:
bool: True if WebSocket is connected, False otherwise.
"""
return self.proto and self.proto.socket and self.proto.socket.State == WebSocketState.Open
def connect(self):
"""Establish WebSocket connection to the ROS server defined for this factory.
Returns:
async_task: The async task for the connection.
"""
LOGGER.debug('Started to connect...')
socket = ClientWebSocket()
socket.Options.KeepAliveInterval = TimeSpan.FromSeconds(5)
connect_task = socket.ConnectAsync(
self.url, self.manager.cancellation_token)
protocol = CliRosBridgeProtocol(self, socket)
connect_task.ContinueWith(protocol.on_open)
return connect_task
def ready(self, proto):
self.proto = proto
self.emit('ready', proto)
def on_ready(self, callback):
if self.proto:
callback(self.proto)
else:
self.once('ready', callback)
@property
def manager(self):
"""Get an instance of the event loop manager for this factory."""
return self._manager
@classmethod
def create_url(cls, host, port=None, is_secure=False):
if port is None:
return Uri(host)
else:
scheme = 'wss' if is_secure else 'ws'
builder = UriBuilder(scheme, host, port)
return builder.Uri
class CliEventLoopManager(object):
"""Manage the main event loop using .NET threads.
For the time being, this implementation is pretty light
and mostly relies on .NET async doing "the right thing(tm)"
with a sprinkle of threading here and there.
"""
def __init__(self):
self._init_cancellation()
self._disconnect_event = ManualResetEventSlim(False)
def _init_cancellation(self):
"""Initialize the cancellation source and token."""
self.cancellation_token_source = CancellationTokenSource()
self.cancellation_token = self.cancellation_token_source.Token
self.cancellation_token.Register(lambda: LOGGER.debug('Started token cancelation'))
def run(self):
"""Kick-starts a non-blocking event loop.
In this implementation, this is a no-op."""
pass
def run_forever(self):
"""Kick-starts a blocking loop while the ROS client is connected."""
self._disconnect_event.Wait(self.cancellation_token)
LOGGER.debug('Received disconnect event on main loop')
def call_later(self, delay, callback):
"""Call the given function after a certain period of time has passed.
Args:
delay (:obj:`int`): Number of seconds to wait before invoking the callback.
callback (:obj:`callable`): Callable function to be invoked when the delay has elapsed.
"""
# NOTE: Maybe there's a more elegant way of doing this
def closure():
Thread.Sleep(delay * 1000)
callback()
Task.Factory.StartNew(closure, self.cancellation_token)
def call_in_thread(self, callback):
"""Call the given function on a thread.
Args:
callback (:obj:`callable`): Callable function to be invoked in a thread.
"""
Task.Factory.StartNew(callback, self.cancellation_token)
def terminate(self):
"""Signals the termination of the main event loop."""
self._disconnect_event.Set()
if self.cancellation_token_source:
self.cancellation_token_source.Cancel()
# Renew to allow re-connects
self._init_cancellation()
| 39.25
| 169
| 0.638891
|
54589df1b49c587a653ef88179415c9d1ff34413
| 1,382
|
py
|
Python
|
twitter/direct_message.py
|
kwnktks0515/Twitter_with_Python
|
80dff5e0f0080a7e5b64dfa134f2e33aba0ed5f8
|
[
"MIT"
] | null | null | null |
twitter/direct_message.py
|
kwnktks0515/Twitter_with_Python
|
80dff5e0f0080a7e5b64dfa134f2e33aba0ed5f8
|
[
"MIT"
] | null | null | null |
twitter/direct_message.py
|
kwnktks0515/Twitter_with_Python
|
80dff5e0f0080a7e5b64dfa134f2e33aba0ed5f8
|
[
"MIT"
] | null | null | null |
"""direct_messages"""
from twitter.core import DirectMessageData
class DirectMessages:
"""direct_messages"""
def __init__(self, twitter):
self.twitter = twitter
def get(self, params):
"""Hello"""
url = "direct_messages"
result = self.twitter.get(url, params=params)
result.data = [DirectMessageData(url, text) for text in result.texts]
return result
def sent(self, params):
"""Hello"""
url = "/".join(["direct_messages", "sent"])
result = self.twitter.get(url, params=params)
result.data = [DirectMessageData(url, text) for text in result.texts]
return result
def show(self, params):
"""Hello"""
url = "/".join(["direct_messages", "show"])
result = self.twitter.get(url, params=params)
result.data = [DirectMessageData(url, result.texts)]
return result
def destroy(self, params):
"""Hello"""
url = "/".join(["direct_messages", "destroy"])
result = self.twitter.post(url, params=params)
result.data = [DirectMessageData(url, result.texts)]
return result
def new(self, params):
"""Hello"""
url = "/".join(["direct_messages", "new"])
result = self.twitter.post(url, params=params)
result.data = [DirectMessageData(url, result.texts)]
return result
| 36.368421
| 77
| 0.600579
|
beb5fc7736a00a037c2b289e0d9642bcded557ae
| 11,571
|
py
|
Python
|
auctions/views.py
|
nmk0462/commerce
|
a5dee3a63d986bf8630cccc859f372133d5084f9
|
[
"MIT"
] | 14
|
2020-07-26T07:45:54.000Z
|
2022-03-31T00:05:58.000Z
|
auctions/views.py
|
nmk0462/commerce
|
a5dee3a63d986bf8630cccc859f372133d5084f9
|
[
"MIT"
] | null | null | null |
auctions/views.py
|
nmk0462/commerce
|
a5dee3a63d986bf8630cccc859f372133d5084f9
|
[
"MIT"
] | 17
|
2020-07-20T06:22:12.000Z
|
2021-04-12T11:12:09.000Z
|
from django.contrib.auth import authenticate, login, logout
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render,redirect
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from .models import User,Bid,Listing,Comment,Watchlist,Closedbid,Alllisting
from datetime import datetime
def index(request):
items=Listing.objects.all()
try:
w = Watchlist.objects.filter(user=request.user.username)
wcount=len(w)
except:
wcount=None
return render(request, "auctions/index.html",{
"items":items,
"wcount":wcount
})
def categories(request):
items=Listing.objects.raw("SELECT * FROM auctions_listing GROUP BY category")
try:
w = Watchlist.objects.filter(user=request.user.username)
wcount=len(w)
except:
wcount=None
return render(request,"auctions/categpage.html",{
"items": items,
"wcount":wcount
})
def category(request,category):
catitems = Listing.objects.filter(category=category)
try:
w = Watchlist.objects.filter(user=request.user.username)
wcount=len(w)
except:
wcount=None
return render(request,"auctions/category.html",{
"items":catitems,
"cat":category,
"wcount":wcount
})
def create(request):
try:
w = Watchlist.objects.filter(user=request.user.username)
wcount=len(w)
except:
wcount=None
return render(request,"auctions/create.html",{
"wcount":wcount
})
def submit(request):
if request.method == "POST":
listtable = Listing()
now = datetime.now()
dt = now.strftime(" %d %B %Y %X ")
listtable.owner = request.user.username
listtable.title = request.POST.get('title')
listtable.description = request.POST.get('description')
listtable.price = request.POST.get('price')
listtable.category = request.POST.get('category')
if request.POST.get('link'):
listtable.link = request.POST.get('link')
else :
listtable.link = "https://wallpaperaccess.com/full/1605486.jpg"
listtable.time = dt
listtable.save()
all = Alllisting()
items = Listing.objects.all()
for i in items:
try:
if Alllisting.objects.get(listingid=i.id):
pass
except:
all.listingid=i.id
all.title = i.title
all.description = i.description
all.link = i.link
all.save()
return redirect('index')
else:
return redirect('index')
def listingpage(request,id):
try:
item = Listing.objects.get(id=id)
except:
return redirect('index')
try:
comments = Comment.objects.filter(listingid=id)
except:
comments = None
if request.user.username:
try:
if Watchlist.objects.get(user=request.user.username,listingid=id):
added=True
except:
added = False
try:
l = Listing.objects.get(id=id)
if l.owner == request.user.username :
owner=True
else:
owner=False
except:
return redirect('index')
else:
added=False
owner=False
try:
w = Watchlist.objects.filter(user=request.user.username)
wcount=len(w)
except:
wcount=None
return render(request,"auctions/listingpage.html",{
"i":item,
"error":request.COOKIES.get('error'),
"errorgreen":request.COOKIES.get('errorgreen'),
"comments":comments,
"added":added,
"owner":owner,
"wcount":wcount
})
def bidsubmit(request,listingid):
current_bid = Listing.objects.get(id=listingid)
current_bid=current_bid.price
if request.method == "POST":
user_bid = int(request.POST.get("bid"))
if user_bid > current_bid:
listing_items = Listing.objects.get(id=listingid)
listing_items.price = user_bid
listing_items.save()
try:
if Bid.objects.filter(id=listingid):
bidrow = Bid.objects.filter(id=listingid)
bidrow.delete()
bidtable = Bid()
bidtable.user=request.user.username
bidtable.title = listing_items.title
bidtable.listingid = listingid
bidtable.bid = user_bid
bidtable.save()
except:
bidtable = Bid()
bidtable.user=request.user.username
bidtable.title = listing_items.title
bidtable.listingid = listingid
bidtable.bid = user_bid
bidtable.save()
response = redirect('listingpage',id=listingid)
response.set_cookie('errorgreen','bid successful!!!',max_age=3)
return response
else :
response = redirect('listingpage',id=listingid)
response.set_cookie('error','Bid should be greater than current price',max_age=3)
return response
else:
return redirect('index')
def cmntsubmit(request,listingid):
if request.method == "POST":
now = datetime.now()
dt = now.strftime(" %d %B %Y %X ")
c = Comment()
c.comment = request.POST.get('comment')
c.user = request.user.username
c.time = dt
c.listingid = listingid
c.save()
return redirect('listingpage',id=listingid)
else :
return redirect('index')
def addwatchlist(request,listingid):
if request.user.username:
w = Watchlist()
w.user = request.user.username
w.listingid = listingid
w.save()
return redirect('listingpage',id=listingid)
else:
return redirect('index')
def removewatchlist(request,listingid):
if request.user.username:
try:
w = Watchlist.objects.get(user=request.user.username,listingid=listingid)
w.delete()
return redirect('listingpage',id=listingid)
except:
return redirect('listingpage',id=listingid)
else:
return redirect('index')
def watchlistpage(request,username):
if request.user.username:
try:
w = Watchlist.objects.filter(user=username)
items = []
for i in w:
items.append(Listing.objects.filter(id=i.listingid))
try:
w = Watchlist.objects.filter(user=request.user.username)
wcount=len(w)
except:
wcount=None
return render(request,"auctions/watchlistpage.html",{
"items":items,
"wcount":wcount
})
except:
try:
w = Watchlist.objects.filter(user=request.user.username)
wcount=len(w)
except:
wcount=None
return render(request,"auctions/watchlistpage.html",{
"items":None,
"wcount":wcount
})
else:
return redirect('index')
def closebid(request,listingid):
if request.user.username:
try:
listingrow = Listing.objects.get(id=listingid)
except:
return redirect('index')
cb = Closedbid()
title = listingrow.title
cb.owner = listingrow.owner
cb.listingid = listingid
try:
bidrow = Bid.objects.get(listingid=listingid,bid=listingrow.price)
cb.winner = bidrow.user
cb.winprice = bidrow.bid
cb.save()
bidrow.delete()
except:
cb.winner = listingrow.owner
cb.winprice = listingrow.price
cb.save()
try:
if Watchlist.objects.filter(listingid=listingid):
watchrow = Watchlist.objects.filter(listingid=listingid)
watchrow.delete()
else:
pass
except:
pass
try:
crow = Comment.objects.filter(listingid=listingid)
crow.delete()
except:
pass
try:
brow = Bid.objects.filter(listingid=listingid)
brow.delete()
except:
pass
try:
cblist=Closedbid.objects.get(listingid=listingid)
except:
cb.owner = listingrow.owner
cb.winner = listingrow.owner
cb.listingid = listingid
cb.winprice = listingrow.price
cb.save()
cblist=Closedbid.objects.get(listingid=listingid)
listingrow.delete()
try:
w = Watchlist.objects.filter(user=request.user.username)
wcount=len(w)
except:
wcount=None
return render(request,"auctions/winningpage.html",{
"cb":cblist,
"title":title,
"wcount":wcount
})
else:
return redirect('index')
def mywinnings(request):
if request.user.username:
items=[]
try:
wonitems = Closedbid.objects.filter(winner=request.user.username)
for w in wonitems:
items.append(Alllisting.objects.filter(listingid=w.listingid))
except:
wonitems = None
items = None
try:
w = Watchlist.objects.filter(user=request.user.username)
wcount=len(w)
except:
wcount=None
return render(request,'auctions/mywinnings.html',{
"items":items,
"wcount":wcount,
"wonitems":wonitems
})
else:
return redirect('index')
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "auctions/login.html", {
"message": "Invalid username and/or password."
})
else:
return render(request, "auctions/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "auctions/register.html", {
"message": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "auctions/register.html", {
"message": "Username already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "auctions/register.html")
| 31.188679
| 93
| 0.566762
|
c67cb2bf07b7878ad6b6c2c0426dd128f3b75435
| 166
|
py
|
Python
|
models3CwProject/models3CwProblem1App/views.py
|
cs-fullstack-2019-spring/django-models3-cw-MelaatiJ
|
cba6396f46f959b9b89fe22430de541aee164e60
|
[
"Apache-2.0"
] | null | null | null |
models3CwProject/models3CwProblem1App/views.py
|
cs-fullstack-2019-spring/django-models3-cw-MelaatiJ
|
cba6396f46f959b9b89fe22430de541aee164e60
|
[
"Apache-2.0"
] | null | null | null |
models3CwProject/models3CwProblem1App/views.py
|
cs-fullstack-2019-spring/django-models3-cw-MelaatiJ
|
cba6396f46f959b9b89fe22430de541aee164e60
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from .models import Book
def printAll():
def filtergte():
| 10.375
| 36
| 0.753012
|
11977ff74b22671df42367f516ff86357300e1a4
| 782
|
py
|
Python
|
tests/keras2onnx_applications/model_source/densenet_1/tensorflow_backend.py
|
pbeukema/tensorflow-onnx
|
a8d5a3cc72d24ca18d64572588ad06490940a230
|
[
"Apache-2.0"
] | 1,473
|
2018-03-16T02:47:33.000Z
|
2022-03-31T03:43:52.000Z
|
tests/keras2onnx_applications/model_source/densenet_1/tensorflow_backend.py
|
pbeukema/tensorflow-onnx
|
a8d5a3cc72d24ca18d64572588ad06490940a230
|
[
"Apache-2.0"
] | 1,208
|
2018-03-14T09:58:49.000Z
|
2022-03-31T17:56:20.000Z
|
tests/keras2onnx_applications/model_source/densenet_1/tensorflow_backend.py
|
pbeukema/tensorflow-onnx
|
a8d5a3cc72d24ca18d64572588ad06490940a230
|
[
"Apache-2.0"
] | 350
|
2018-04-03T03:48:40.000Z
|
2022-03-30T11:23:55.000Z
|
# SPDX-License-Identifier: Apache-2.0
# From https://github.com/titu1994/DenseNet/blob/master/tensorflow_backend.py
# Modifications Copyright (c) Microsoft.
import tensorflow as tf
from mock_keras2onnx.proto import keras
from keras.backend import tensorflow_backend as KTF
from keras.backend.common import image_data_format
py_all = all
def depth_to_space(input, scale, data_format=None):
''' Uses phase shift algorithm to convert channels/depth for spatial resolution '''
if data_format is None:
data_format = image_data_format()
if data_format == 'channels_first':
data_format = 'NCHW'
else:
data_format = 'NHWC'
data_format = data_format.lower()
out = tf.depth_to_space(input, scale, data_format=data_format)
return out
| 28.962963
| 87
| 0.742967
|
eff0d3faf7c5f0777b777317268bb2c83bc43a5e
| 6,694
|
py
|
Python
|
dialogs/contact_to_infected.py
|
Maxwingber/corobot
|
a959e0deba734d3900d7b8a826b3fb56964db4c6
|
[
"MIT"
] | null | null | null |
dialogs/contact_to_infected.py
|
Maxwingber/corobot
|
a959e0deba734d3900d7b8a826b3fb56964db4c6
|
[
"MIT"
] | null | null | null |
dialogs/contact_to_infected.py
|
Maxwingber/corobot
|
a959e0deba734d3900d7b8a826b3fb56964db4c6
|
[
"MIT"
] | 2
|
2020-03-22T11:38:54.000Z
|
2020-03-24T11:11:56.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from datetime import time
from typing import List
from botbuilder.dialogs import (
WaterfallDialog,
WaterfallStepContext,
DialogTurnResult,
ComponentDialog)
from botbuilder.dialogs.prompts import ChoicePrompt, PromptOptions, ConfirmPrompt, NumberPrompt, DateTimePrompt
from botbuilder.dialogs.choices import Choice, FoundChoice
from botbuilder.core import MessageFactory, UserState
from data_models import UserProfile
class ContactsSelectionDialog(ComponentDialog):
def __init__(self, dialog_id: str = None):
super(ContactsSelectionDialog, self).__init__(
dialog_id or ContactsSelectionDialog.__name__
)
self.add_dialog(
WaterfallDialog(
WaterfallDialog.__name__,
[
self.confirm_confirmedcasecontact_step,
self.date_confirmedcasecontact_step,
self.confirm_suspectedcasecontact_step,
self.date_suspectedcasecontact_step,
self.contacts_dates_step]
)
)
self.add_dialog(ChoicePrompt(ChoicePrompt.__name__))
self.add_dialog(
NumberPrompt(NumberPrompt.__name__)
)
self.add_dialog(ConfirmPrompt(ConfirmPrompt.__name__))
self.add_dialog(DateTimePrompt(DateTimePrompt.__name__))
self.initial_dialog_id = WaterfallDialog.__name__
async def confirm_confirmedcasecontact_step(
self, step_context: WaterfallStepContext
) -> DialogTurnResult:
await step_context.context.send_activity(
MessageFactory.text(
"Finden wir heraus, ob Sie engen Kontakt zu einem bestätigten Covid-19-Fall hatten.")
)
#time.sleep(1)
await step_context.context.send_activity(
MessageFactory.text(
f"Als enger Kontakt gilt Kontakt von Angesicht zu Angesicht länger als 15 Minuten, oder direkter, physischer Kontakt (Berührung, Händeschütteln, Küssen), oder Kontakt mit oder Austausch von Körperflüssigkeiten, oder Teilen einer Wohnung.")
)
#time.sleep(2)
return await step_context.prompt(
ChoicePrompt.__name__,
PromptOptions(
choices=[Choice("Ja"), Choice("Nein")],
prompt=MessageFactory.text("Hatten Sie engen Kontakt zu einem **bestätigten Covid-19-Fall**?")
),
)
async def date_confirmedcasecontact_step(
self, step_context: WaterfallStepContext
) -> DialogTurnResult:
print("[DEBUG] Received by German choice prompt: " + step_context.result.value)
if step_context.result.value == "Ja":
# User said "yes" so we will be prompting for the date of their contact.
# WaterfallStep always finishes with the end of the Waterfall or with another dialog,
# here it is a Prompt Dialog.
return await step_context.prompt(
DateTimePrompt.__name__,
PromptOptions(
prompt=MessageFactory.text("An welchem Tag hatten Sie das letzte Mal Kontakt? Bitte nennen Sie es im Format TT.MM.JJJJ (z.B. 03.03.2020)."),
),
)
# User said "no" so we will skip the next step. Give 00000000 as the date and asks whether there was contact to a suspected case.
return await step_context.next(None)
async def confirm_suspectedcasecontact_step(
self, step_context: WaterfallStepContext
) -> DialogTurnResult:
# Set the last contact date to a confirmed case to what they entered in response to the name prompt.
self.FIRST_DATE = "value-firstDate"
if step_context.result:
step_context.values[self.FIRST_DATE] = str(step_context.result[0].value)
else:
step_context.values[self.FIRST_DATE] = None
print("[DEBUG] First date is " + str(step_context.values[self.FIRST_DATE]))
await step_context.context.send_activity(
MessageFactory.text(
"Finden wir heraus, ob Sie engen Kontakt zu einem Covid-19-Verdachtsfall hatten.")
)
#time.sleep(1)
await step_context.context.send_activity(
MessageFactory.text(
f"Als enger Kontakt gilt Kontakt von Angesicht zu Angesicht länger als 15 Minuten, oder direkter, physischer Kontakt (Berührung, Händeschütteln, Küssen), oder Kontakt mit oder Austausch von Körperflüssigkeiten, oder Teilen einer Wohnung.")
)
#time.sleep(2)
return await step_context.prompt(
ChoicePrompt.__name__,
PromptOptions(
choices=[Choice("Ja"), Choice("Nein")],
prompt=MessageFactory.text("Hatten Sie engen Kontakt zu einem **Covid-19-Verdachtsfall**?")
),
)
async def date_suspectedcasecontact_step(
self, step_context: WaterfallStepContext
) -> DialogTurnResult:
print("[DEBUG] Received by German choice prompt: " + step_context.result.value)
if step_context.result.value == "Ja":
# User said "yes" so we will be prompting for the date of their contact.
# WaterfallStep always finishes with the end of the Waterfall or with another dialog,
# here it is a Prompt Dialog.
return await step_context.prompt(
DateTimePrompt.__name__,
PromptOptions(
prompt=MessageFactory.text("An welchem Tag hatten Sie das letzte Mal Kontakt? Bitte nennen Sie es im Format TT.MM.JJJJ (z.B. 03.03.2020)."),
),
)
# User said "no" so we will skip the next step. Give 00000000 as the date and asks whether there was contact to a suspected case.
return await step_context.next(None)
async def contacts_dates_step(
self, step_context: WaterfallStepContext
) -> DialogTurnResult:
# Set the last contact date to a confirmed case to what they entered in response to the name prompt.
self.SECOND_DATE = "value-secondDate"
if not step_context.result == None:
step_context.values[self.SECOND_DATE] = str(step_context.result[0].value)
else:
step_context.values[self.SECOND_DATE] = None
print("[DEBUG] Second date is " + str(step_context.values[self.SECOND_DATE]))
dates = [step_context.values[self.FIRST_DATE], step_context.values[self.SECOND_DATE]]
print("[DEBUG] The dates are " + str(dates[0]) + " and " + str(dates[1]))
return await step_context.end_dialog(dates)
| 44.039474
| 255
| 0.655363
|
8f5f488da6ad8704cb411e23fc770e1456f58843
| 1,826
|
py
|
Python
|
lambdas/stepfunctions/CTE_InvokeCreateAccountFn/src/main.py
|
meh485/aws-sdlc-controltower-extension
|
ce08b639bd97a0b017aa67e5d9697b7177e77539
|
[
"Apache-2.0"
] | null | null | null |
lambdas/stepfunctions/CTE_InvokeCreateAccountFn/src/main.py
|
meh485/aws-sdlc-controltower-extension
|
ce08b639bd97a0b017aa67e5d9697b7177e77539
|
[
"Apache-2.0"
] | null | null | null |
lambdas/stepfunctions/CTE_InvokeCreateAccountFn/src/main.py
|
meh485/aws-sdlc-controltower-extension
|
ce08b639bd97a0b017aa67e5d9697b7177e77539
|
[
"Apache-2.0"
] | null | null | null |
# (c) 2021 Amazon Web Services, Inc. or its affiliates. All Rights Reserved.
# This AWS Content is provided subject to the terms of the AWS Customer Agreement
# available at http://aws.amazon.com/agreement or other written agreement between
# Customer and Amazon Web Services, Inc.
import json
import logging
import cfnresponse
import boto3
logging.basicConfig()
logger = logging.getLogger()
logging.getLogger("botocore").setLevel(logging.ERROR)
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
"""This function will initiate the AWS Step Function for building an AWS Account.
Args:
event (dict): Event information passed in by the CloudFormation from the Custom Resource
context (object): Lambda Function context information
Returns:
N/A
"""
print(json.dumps(event))
response_body = dict()
sfn_client = boto3.client('stepfunctions')
resource_properties = event["ResourceProperties"]
state_machine_arn = resource_properties["CreateAccountSfn"]
if event['RequestType'] == "Delete":
cfnresponse.send(
event=event,
context=context,
responseStatus=cfnresponse.SUCCESS,
responseData=response_body
)
else:
try:
logger.info(f"Invoking State Machine: {state_machine_arn} with input: {event}")
sfn_client.start_execution(
stateMachineArn=state_machine_arn,
input=json.dumps(event)
)
except Exception as e:
logger.error(e, exc_info=True)
response_body['ERROR'] = str(e)
cfnresponse.send(
event=event,
context=context,
responseStatus=cfnresponse.FAILED,
responseData=response_body
)
| 31.482759
| 96
| 0.653341
|
d0073ef87352146884b7f9d622563379f32e3c90
| 3,988
|
py
|
Python
|
tests/test_exceptions.py
|
gouttegd/click-shell
|
a6b4f5c712c569897f4aeb4d76504740e3b63be1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_exceptions.py
|
gouttegd/click-shell
|
a6b4f5c712c569897f4aeb4d76504740e3b63be1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_exceptions.py
|
gouttegd/click-shell
|
a6b4f5c712c569897f4aeb4d76504740e3b63be1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# incenp.click_shell - A shell extension for Click
# Copyright © 2021 Niall Byrne
# Copyright © 2021 Damien Goutte-Gattat
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of click-shell nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import click
import pytest
from unittest.mock import patch, Mock
from incenp.click_shell.exceptions import ClickShellCleanExit, ClickShellUncleanExit
from incenp.click_shell.core import Shell, get_invoke
@pytest.fixture()
def mock_cli_command():
shell = Shell()
@shell.command()
def mock_command():
pass
return mock_command
@patch('click.Command.main')
def test_unclean_exit_default_code(m_main, mock_cli_command):
expected_error_code = ClickShellUncleanExit.default_error_code
m_main.side_effect = ClickShellUncleanExit("Boom!")
invoke = get_invoke(mock_cli_command)
with pytest.raises(SystemExit) as exc:
invoke(Mock(), "mock args")
assert exc.value.args[0] == expected_error_code
@patch('click.Command.main')
def test_unclean_exit_specific_code(m_main, mock_cli_command):
expected_error_code = 127
m_main.side_effect = ClickShellUncleanExit("Boom!", expected_error_code)
invoke = get_invoke(mock_cli_command)
with pytest.raises(SystemExit) as exc:
invoke(Mock(), "mock args")
assert exc.value.args[0] == expected_error_code
@patch('click.Command.main')
def test_clean_exit(m_main, mock_cli_command):
m_main.side_effect = ClickShellCleanExit("Boom!")
invoke = get_invoke(mock_cli_command)
with pytest.raises(SystemExit) as exc:
invoke(Mock(), "mock args")
assert exc.value.args[0] == 0
@patch('click.Command.main')
def test_normal_sys_exit(m_main, mock_cli_command):
m_main.side_effect = SystemExit("Boom!")
invoke = get_invoke(mock_cli_command)
invoke(Mock(), "mock args")
@patch('click.Command.main')
def test_click_exception(m_main, mock_cli_command):
m_main.side_effect = click.ClickException("Boom!")
invoke = get_invoke(mock_cli_command)
invoke(Mock(), "mock args")
@patch('click.Command.main')
def test_click_abort(m_main, mock_cli_command):
m_main.side_effect = click.Abort("Boom!")
invoke = get_invoke(mock_cli_command)
invoke(Mock(), "mock args")
@patch('click.Command.main')
@patch('traceback.print_exception')
def test_regular_exception(m_trace, m_main, mock_cli_command):
m_main.side_effect = Exception("Boom!")
invoke = get_invoke(mock_cli_command)
invoke(Mock(), "mock args")
m_trace.assert_called_once_with(Exception, m_main.side_effect, None)
| 32.422764
| 84
| 0.754012
|
3c61a27331a27e5283b40ddf9da7ea69c2deabc2
| 1,329
|
py
|
Python
|
middleware.py
|
davidbetz/middleware
|
1f6b0dce915099e8ff85ab5f433e70b96e1424a1
|
[
"MIT"
] | null | null | null |
middleware.py
|
davidbetz/middleware
|
1f6b0dce915099e8ff85ab5f433e70b96e1424a1
|
[
"MIT"
] | null | null | null |
middleware.py
|
davidbetz/middleware
|
1f6b0dce915099e8ff85ab5f433e70b96e1424a1
|
[
"MIT"
] | null | null | null |
import types
class Middleware():
def __init__(self, action=None):
self._action = action
def read(self, context, *args):
return [context[_] for _ in args]
def write(self, context, **kwargs):
context.update(kwargs)
def execute(self, mwa, context):
if self._action is not None:
self._action(mwa, context)
else:
self.process(mwa, context)
class Handler():
def __init__(self, **kwargs):
self.middleware_array = []
self._context = kwargs or {}
def __getitem__(self, name):
try:
return self._context[name]
except:
return None
def __setitem__(self, name, value):
self._context[name] = value
def set(self, middleware_array):
for middleware in middleware_array:
self.add(middleware)
def add(self, middleware):
if isinstance(middleware, types.FunctionType):
self.middleware_array.append(middleware)
else:
self.middleware_array.append(middleware().create())
def execute(self):
iteration = iter(self.middleware_array)
try:
wm = next(iteration)
while wm is not None:
wm = wm(iteration, self._context)
except StopIteration:
pass
| 25.557692
| 63
| 0.585403
|
c79f72116981a076fae5d5414fea2ba429a637cb
| 10,369
|
py
|
Python
|
estimators/tabular_bayes_dice.py
|
SnowflyLXF/FedDICE
|
a63a3233037e37ae27d6c130f37ffc4b92190d5e
|
[
"Apache-2.0"
] | null | null | null |
estimators/tabular_bayes_dice.py
|
SnowflyLXF/FedDICE
|
a63a3233037e37ae27d6c130f37ffc4b92190d5e
|
[
"Apache-2.0"
] | null | null | null |
estimators/tabular_bayes_dice.py
|
SnowflyLXF/FedDICE
|
a63a3233037e37ae27d6c130f37ffc4b92190d5e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tf_agents.specs import tensor_spec
from tf_agents.policies import tf_policy
from typing import Any, Callable, Iterable, Optional, Sequence, Text, Tuple, Union
import dice_rl.data.dataset as dataset_lib
import dice_rl.utils.common as common_lib
import dice_rl.estimators.estimator as estimator_lib
class TabularBayesDice(object):
"""Robust policy evaluation."""
def __init__(self,
dataset_spec,
gamma: Union[float, tf.Tensor],
reward_fn: Callable = None,
solve_for_state_action_ratio: bool = True,
nu_learning_rate: Union[float, tf.Tensor] = 0.1,
zeta_learning_rate: Union[float, tf.Tensor] = 0.1,
kl_regularizer: Union[float, tf.Tensor] = 1.,
eps_std: Union[float, tf.Tensor] = 1):
"""Initializes the solver.
Args:
dataset_spec: The spec of the dataset that will be given.
gamma: The discount factor to use.
reward_fn: A function that takes in an EnvStep and returns the reward for
that step. If not specified, defaults to just EnvStep.reward.
solve_for_state_action_ratio: Whether to solve for state-action density
ratio. Defaults to True. When solving an environment with a large
state/action space (taxi), better to set this to False to avoid OOM
issues.
nu_learning_rate: Learning rate for nu.
zeta_learning_rate: Learning rate for zeta.
kl_regularizer: Regularization constant for D_kl(q || p).
eps_std: epsilon standard deviation for sampling from the posterior.
"""
self._dataset_spec = dataset_spec
self._gamma = gamma
if reward_fn is None:
reward_fn = lambda env_step: env_step.reward
self._reward_fn = reward_fn
self._kl_regularizer = kl_regularizer
self._eps_std = eps_std
self._solve_for_state_action_ratio = solve_for_state_action_ratio
if (not self._solve_for_state_action_ratio and
not self._dataset_spec.has_log_probability()):
raise ValueError('Dataset must contain log-probability when '
'solve_for_state_action_ratio is False.')
# Get number of states/actions.
observation_spec = self._dataset_spec.observation
action_spec = self._dataset_spec.action
if not common_lib.is_categorical_spec(observation_spec):
raise ValueError('Observation spec must be discrete and bounded.')
self._num_states = observation_spec.maximum + 1
if not common_lib.is_categorical_spec(action_spec):
raise ValueError('Action spec must be discrete and bounded.')
self._num_actions = action_spec.maximum + 1
self._dimension = (
self._num_states * self._num_actions
if self._solve_for_state_action_ratio else self._num_states)
self._td_residuals = np.zeros([self._dimension, self._dimension])
self._total_weights = np.zeros([self._dimension])
self._initial_weights = np.zeros([self._dimension])
self._nu_optimizer = tf.keras.optimizers.Adam(nu_learning_rate)
self._zeta_optimizer = tf.keras.optimizers.Adam(zeta_learning_rate)
# Initialize variational Bayes parameters
self._nu_mu = tf.Variable(tf.zeros([self._dimension]))
self._nu_log_sigma = tf.Variable(tf.zeros([self._dimension]))
self._prior_mu = tf.Variable(tf.zeros([self._dimension]), trainable=True)
self._prior_log_sigma = tf.Variable(
tf.zeros([self._dimension]), trainable=False)
def _get_index(self, state, action):
if self._solve_for_state_action_ratio:
return state * self._num_actions + action
else:
return state
def prepare_dataset(self, dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy):
episodes, valid_steps = dataset.get_all_episodes()
tfagents_episodes = dataset_lib.convert_to_tfagents_timestep(episodes)
for episode_num in range(tf.shape(valid_steps)[0]):
# Precompute probabilites for this episode.
this_episode = tf.nest.map_structure(lambda t: t[episode_num], episodes)
first_step = tf.nest.map_structure(lambda t: t[0], this_episode)
this_tfagents_episode = dataset_lib.convert_to_tfagents_timestep(
this_episode)
episode_target_log_probabilities = target_policy.distribution(
this_tfagents_episode).action.log_prob(this_episode.action)
episode_target_probs = target_policy.distribution(
this_tfagents_episode).action.probs_parameter()
for step_num in range(tf.shape(valid_steps)[1] - 1):
this_step = tf.nest.map_structure(lambda t: t[episode_num, step_num],
episodes)
next_step = tf.nest.map_structure(
lambda t: t[episode_num, step_num + 1], episodes)
if this_step.is_last() or not valid_steps[episode_num, step_num]:
continue
weight = 1.0
nu_index = self._get_index(this_step.observation, this_step.action)
self._td_residuals[nu_index, nu_index] += -weight
self._total_weights[nu_index] += weight
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
policy_ratio = tf.exp(episode_target_log_probabilities[step_num] -
this_step.get_log_probability())
# Need to weight next nu by importance weight.
next_weight = (
weight if self._solve_for_state_action_ratio else policy_ratio *
weight)
next_probs = episode_target_probs[step_num + 1]
for next_action, next_prob in enumerate(next_probs):
next_nu_index = self._get_index(next_step.observation, next_action)
self._td_residuals[next_nu_index, nu_index] += (
next_prob * self._gamma * next_weight)
initial_probs = episode_target_probs[0]
for initial_action, initial_prob in enumerate(initial_probs):
initial_nu_index = self._get_index(first_step.observation,
initial_action)
self._initial_weights[initial_nu_index] += weight * initial_prob
self._initial_weights = tf.cast(self._initial_weights, tf.float32)
self._total_weights = tf.cast(self._total_weights, tf.float32)
self._td_residuals = self._td_residuals / np.sqrt(
1e-8 + self._total_weights)[None, :]
self._td_errors = tf.cast(
np.dot(self._td_residuals, self._td_residuals.T), tf.float32)
self._td_residuals = tf.cast(self._td_residuals, tf.float32)
@tf.function
def train_step(self, regularizer: float = 1e-6):
# Solve primal form min (1-g) * E[nu0] + E[(B nu - nu)^2].
with tf.GradientTape() as tape:
nu_sigma = tf.sqrt(tf.exp(self._nu_log_sigma))
eps = tf.random.normal(tf.shape(nu_sigma), 0, self._eps_std)
nu = self._nu_mu + nu_sigma * eps
init_nu_loss = tf.einsum('m,m', (1 - self._gamma) * self._initial_weights,
nu)
residuals = tf.einsum('n,nm->m', nu, self._td_residuals)
bellman_loss = 0.5 * tf.einsum('m,m', residuals, residuals)
prior_sigma = tf.sqrt(tf.exp(self._prior_log_sigma))
prior_var = tf.square(prior_sigma)
prior_var = 1.
neg_kl = (0.5 * (1. - 2. * tf.math.log(prior_sigma / nu_sigma + 1e-8) -
(self._nu_mu - self._prior_mu)**2 / prior_var -
nu_sigma**2 / prior_var))
loss = init_nu_loss + bellman_loss - self._kl_regularizer * neg_kl
grads = tape.gradient(loss, [
self._nu_mu, self._nu_log_sigma, self._prior_mu, self._prior_log_sigma
])
self._nu_optimizer.apply_gradients(
zip(grads, [
self._nu_mu, self._nu_log_sigma, self._prior_mu,
self._prior_log_sigma
]))
return loss
def estimate_average_reward(self,
dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy,
num_samples=100):
"""Estimates value (average per-step reward) of policy.
The estimation is based on solved values of zeta, so one should call
solve() before calling this function.
Args:
dataset: The dataset to sample experience from.
target_policy: The policy whose value we want to estimate.
num_samples: number of posterior samples.
Returns:
A tensor with num_samples samples of estimated average per-step reward
of the target policy.
"""
nu_sigma = tf.sqrt(tf.exp(self._nu_log_sigma))
eps = tf.random.normal(
tf.concat([[num_samples], tf.shape(nu_sigma)], axis=-1), 0,
self._eps_std)
nu = self._nu_mu + nu_sigma * eps
self._zeta = (
tf.einsum('bn,nm->bm', nu, self._td_residuals) /
tf.math.sqrt(1e-8 + self._total_weights))
def weight_fn(env_step):
index = self._get_index(env_step.observation, env_step.action)
zeta = tf.gather(
self._zeta, tf.tile(index[None, :], [num_samples, 1]), batch_dims=1)
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
tfagents_timestep = dataset_lib.convert_to_tfagents_timestep(env_step)
target_log_probabilities = target_policy.distribution(
tfagents_timestep).action.log_prob(env_step.action)
policy_ratio = tf.exp(target_log_probabilities -
env_step.get_log_probability())
return tf.cast(zeta * policy_ratio, tf.float32)
return estimator_lib.get_fullbatch_average(
dataset,
limit=None,
by_steps=True,
reward_fn=self._reward_fn,
weight_fn=weight_fn)
| 43.024896
| 82
| 0.681647
|
de0574698f6a399dd13b607b34aaa00de2df1bdd
| 5,341
|
py
|
Python
|
mysite/settings.py
|
JenMart/Nat-Poll-App
|
db4f1a7a31930d78a5fa509045ade395179b49ec
|
[
"BSD-3-Clause"
] | null | null | null |
mysite/settings.py
|
JenMart/Nat-Poll-App
|
db4f1a7a31930d78a5fa509045ade395179b49ec
|
[
"BSD-3-Clause"
] | null | null | null |
mysite/settings.py
|
JenMart/Nat-Poll-App
|
db4f1a7a31930d78a5fa509045ade395179b49ec
|
[
"BSD-3-Clause"
] | null | null | null |
import os
# Django settings for mysite project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# print("pip1")
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
# 'PORT': '', # Set to empty string for default.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Zurich'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '05=^qgbhg3!6-dzb6#&2j^jmh-2fgc%22!z_!w*&8iy_m$2*$*'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mysite.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'polls'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| 32.969136
| 129
| 0.692754
|
8ed88856632ec51212d0c68880e23da60c53a623
| 4,889
|
py
|
Python
|
alipay/aop/api/domain/KoubeiCateringKmsBakingSyncModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/KoubeiCateringKmsBakingSyncModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/KoubeiCateringKmsBakingSyncModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KmsBakingCheckDTO import KmsBakingCheckDTO
from alipay.aop.api.domain.KmsBakingInventoryDTO import KmsBakingInventoryDTO
from alipay.aop.api.domain.KmsBakingPromotionDTO import KmsBakingPromotionDTO
class KoubeiCateringKmsBakingSyncModel(object):
def __init__(self):
self._action = None
self._check_data = None
self._inventory_data = None
self._promotion_data = None
self._shop_id = None
self._type = None
@property
def action(self):
return self._action
@action.setter
def action(self, value):
self._action = value
@property
def check_data(self):
return self._check_data
@check_data.setter
def check_data(self, value):
if isinstance(value, KmsBakingCheckDTO):
self._check_data = value
else:
self._check_data = KmsBakingCheckDTO.from_alipay_dict(value)
@property
def inventory_data(self):
return self._inventory_data
@inventory_data.setter
def inventory_data(self, value):
if isinstance(value, list):
self._inventory_data = list()
for i in value:
if isinstance(i, KmsBakingInventoryDTO):
self._inventory_data.append(i)
else:
self._inventory_data.append(KmsBakingInventoryDTO.from_alipay_dict(i))
@property
def promotion_data(self):
return self._promotion_data
@promotion_data.setter
def promotion_data(self, value):
if isinstance(value, list):
self._promotion_data = list()
for i in value:
if isinstance(i, KmsBakingPromotionDTO):
self._promotion_data.append(i)
else:
self._promotion_data.append(KmsBakingPromotionDTO.from_alipay_dict(i))
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.action:
if hasattr(self.action, 'to_alipay_dict'):
params['action'] = self.action.to_alipay_dict()
else:
params['action'] = self.action
if self.check_data:
if hasattr(self.check_data, 'to_alipay_dict'):
params['check_data'] = self.check_data.to_alipay_dict()
else:
params['check_data'] = self.check_data
if self.inventory_data:
if isinstance(self.inventory_data, list):
for i in range(0, len(self.inventory_data)):
element = self.inventory_data[i]
if hasattr(element, 'to_alipay_dict'):
self.inventory_data[i] = element.to_alipay_dict()
if hasattr(self.inventory_data, 'to_alipay_dict'):
params['inventory_data'] = self.inventory_data.to_alipay_dict()
else:
params['inventory_data'] = self.inventory_data
if self.promotion_data:
if isinstance(self.promotion_data, list):
for i in range(0, len(self.promotion_data)):
element = self.promotion_data[i]
if hasattr(element, 'to_alipay_dict'):
self.promotion_data[i] = element.to_alipay_dict()
if hasattr(self.promotion_data, 'to_alipay_dict'):
params['promotion_data'] = self.promotion_data.to_alipay_dict()
else:
params['promotion_data'] = self.promotion_data
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiCateringKmsBakingSyncModel()
if 'action' in d:
o.action = d['action']
if 'check_data' in d:
o.check_data = d['check_data']
if 'inventory_data' in d:
o.inventory_data = d['inventory_data']
if 'promotion_data' in d:
o.promotion_data = d['promotion_data']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'type' in d:
o.type = d['type']
return o
| 33.951389
| 90
| 0.58785
|
3b9980b4f20896863b64a6a254bc36480c5cd72f
| 369
|
py
|
Python
|
tests/env_test.py
|
geirem/pyconfig
|
e99693b7bc0acb3fe6b82acd29e8724336f95c43
|
[
"CC0-1.0"
] | 1
|
2020-05-15T16:22:36.000Z
|
2020-05-15T16:22:36.000Z
|
tests/env_test.py
|
geirem/pyconfig
|
e99693b7bc0acb3fe6b82acd29e8724336f95c43
|
[
"CC0-1.0"
] | 9
|
2020-05-14T08:31:48.000Z
|
2021-04-22T12:35:15.000Z
|
tests/env_test.py
|
geirem/pyconfig
|
e99693b7bc0acb3fe6b82acd29e8724336f95c43
|
[
"CC0-1.0"
] | null | null | null |
import envyconfig
def test_use_default_when_env_var_is_not_defined():
config = envyconfig.load('fixtures/basic_env.yaml')
assert config['foo'] == 'bar'
def test_with_env_var(monkeypatch):
expected = 'some string'
monkeypatch.setenv("TESTENVVAR", expected)
config = envyconfig.load('fixtures/basic_env.yaml')
assert config['foo'] == expected
| 26.357143
| 55
| 0.731707
|
694a848915959c4dcd606cc035b174b5bab5a86f
| 247
|
py
|
Python
|
configs/_base_/schedules/bdd100k.py
|
XDong18/mmclassification
|
115c39ed4673d9cdd7b5f543482c1038f0c77ab5
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/schedules/bdd100k.py
|
XDong18/mmclassification
|
115c39ed4673d9cdd7b5f543482c1038f0c77ab5
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/schedules/bdd100k.py
|
XDong18/mmclassification
|
115c39ed4673d9cdd7b5f543482c1038f0c77ab5
|
[
"Apache-2.0"
] | null | null | null |
# optimizer
optimizer = dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[80, 110])
runner = dict(type='EpochBasedRunner', max_epochs=120)
| 35.285714
| 73
| 0.744939
|
52059a9bbb955841c9d27ed207fea9f0791af8e7
| 3,737
|
py
|
Python
|
venv/Lib/site-packages/traits/observation/tests/test_parsing.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | 1
|
2022-01-18T17:56:51.000Z
|
2022-01-18T17:56:51.000Z
|
venv/Lib/site-packages/traits/observation/tests/test_parsing.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/traits/observation/tests/test_parsing.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | null | null | null |
# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import unittest
from traits.observation.parsing import parse
from traits.observation.expression import (
dict_items,
list_items,
metadata,
set_items,
trait,
)
class TestParsingSeriesJoin(unittest.TestCase):
def test_join(self):
actual = parse("a.b.c")
expected = trait("a").trait("b").trait("c")
self.assertEqual(actual, expected)
def test_join_with_colon(self):
actual = parse("a:b:c")
expected = trait("a", False).trait("b", False).trait("c")
self.assertEqual(actual, expected)
class TestParsingOr(unittest.TestCase):
def test_or_with_commas(self):
actual = parse("a,b,c")
expected = trait("a") | trait("b") | trait("c")
self.assertEqual(actual, expected)
def test_or_with_join_nested(self):
actual = parse("a.b.c,d.e")
expected = (
trait("a").trait("b").trait("c")
| trait("d").trait("e")
)
self.assertEqual(actual, expected)
class TestParsingGroup(unittest.TestCase):
def test_grouped_or(self):
actual = parse("root.[left,right]")
expected = trait("root").then(trait("left") | trait("right"))
self.assertEqual(actual, expected)
def test_grouped_or_extended(self):
actual = parse("root.[left,right].value")
expected = (
trait("root").then(
trait("left") | trait("right")).trait("value")
)
self.assertEqual(actual, expected)
def test_multi_branch_then_or_apply_notify_flag_to_last_item(self):
actual = parse("root.[a.b.c.d,value]:g")
expected = (
trait("root").then(
trait("a").trait("b").trait("c").trait("d", False)
| trait("value", False)
).trait("g")
)
self.assertEqual(actual, expected)
class TestParsingMetadata(unittest.TestCase):
def test_metadata(self):
actual = parse("+name")
expected = metadata("name", notify=True)
self.assertEqual(actual, expected)
def test_metadata_notify_false(self):
actual = parse("+name:+attr")
expected = metadata("name", notify=False).metadata("attr", notify=True)
self.assertEqual(actual, expected)
class TestParsingTrait(unittest.TestCase):
def test_simple_trait(self):
actual = parse("a")
expected = trait("a")
self.assertEqual(actual, expected)
def test_trait_not_notifiy(self):
actual = parse("a:b")
expected = trait("a", notify=False).trait("b")
self.assertEqual(actual, expected)
class TestParsingItems(unittest.TestCase):
def test_items(self):
actual = parse("items")
expected = (
trait("items", optional=True)
| dict_items(optional=True)
| list_items(optional=True)
| set_items(optional=True)
)
self.assertEqual(actual, expected)
def test_items_not_notify(self):
actual = parse("items:attr")
expected = (
trait("items", notify=False, optional=True)
| dict_items(notify=False, optional=True)
| list_items(notify=False, optional=True)
| set_items(notify=False, optional=True)
).trait("attr")
self.assertEqual(actual, expected)
| 29.65873
| 79
| 0.617875
|
4637a2fcc729a0ac7bffc55f7cf1462f4f2814c1
| 36
|
py
|
Python
|
passenger_wsgi.py
|
ericmuh/recruitment-system
|
d9964e7c48ac8af74995e28f489135c1d8f940be
|
[
"MIT"
] | null | null | null |
passenger_wsgi.py
|
ericmuh/recruitment-system
|
d9964e7c48ac8af74995e28f489135c1d8f940be
|
[
"MIT"
] | null | null | null |
passenger_wsgi.py
|
ericmuh/recruitment-system
|
d9964e7c48ac8af74995e28f489135c1d8f940be
|
[
"MIT"
] | null | null | null |
from recruit.wsgi import application
| 36
| 36
| 0.888889
|
6b94fed0eaad7d5e1f484a2ba802733b7fb8beb2
| 1,081
|
py
|
Python
|
main.py
|
flaviuvadan/explore-rl
|
9748038612872b90776675ed5db6272dbc6e5843
|
[
"MIT"
] | null | null | null |
main.py
|
flaviuvadan/explore-rl
|
9748038612872b90776675ed5db6272dbc6e5843
|
[
"MIT"
] | null | null | null |
main.py
|
flaviuvadan/explore-rl
|
9748038612872b90776675ed5db6272dbc6e5843
|
[
"MIT"
] | null | null | null |
""" Main file """
import gym
import numpy as np
import rl
if __name__ == '__main__':
env = gym.make('CartPole-v1')
observation_space = env.observation_space.shape[0]
action_space = env.action_space.n
model = rl.Model(observation_space, action_space)
while True:
current_state = env.reset()
current_state = np.reshape(current_state, (1, observation_space))
while True:
env.render()
action = model.get_action(current_state)
next_state, reward, done, info = env.step(action)
print('state info: ', next_state)
# penalize for being far from the center and a big pole angle
reward = reward - 2 * (abs(next_state[0]) + abs(next_state[2])) if not done else -reward
print(reward)
print()
next_state = np.reshape(next_state, (1, observation_space))
model.store(current_state, action, reward, next_state, done)
current_state = next_state
if done:
break
model.learn()
| 33.78125
| 100
| 0.601295
|
47a069db03b12de2c7e16a2e5347d2d87c24dfd5
| 189
|
py
|
Python
|
tests/web_platform/CSS2/normal_flow/test_block_in_inline_remove_004_nosplit_ref.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 71
|
2015-04-13T09:44:14.000Z
|
2019-03-24T01:03:02.000Z
|
tests/web_platform/CSS2/normal_flow/test_block_in_inline_remove_004_nosplit_ref.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 35
|
2019-05-06T15:26:09.000Z
|
2022-03-28T06:30:33.000Z
|
tests/web_platform/CSS2/normal_flow/test_block_in_inline_remove_004_nosplit_ref.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 139
|
2015-05-30T18:37:43.000Z
|
2019-03-27T17:14:05.000Z
|
from tests.utils import W3CTestCase
class TestBlockInInlineRemove004NosplitRef(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'block-in-inline-remove-004-nosplit-ref'))
| 31.5
| 93
| 0.814815
|
179065f80bfad5b59e29beb11633f0dd3b63cff5
| 2,026
|
py
|
Python
|
helpers/RulesStuff/stringParser v1.py
|
thejeswi/BobGoesToJail
|
ac8a6e4242446634837d6166158fc5401c2818ac
|
[
"MIT"
] | 3
|
2018-08-20T14:14:01.000Z
|
2020-06-15T17:39:24.000Z
|
helpers/RulesStuff/stringParser v1.py
|
thejeswi/BobGoesToJail
|
ac8a6e4242446634837d6166158fc5401c2818ac
|
[
"MIT"
] | null | null | null |
helpers/RulesStuff/stringParser v1.py
|
thejeswi/BobGoesToJail
|
ac8a6e4242446634837d6166158fc5401c2818ac
|
[
"MIT"
] | 1
|
2020-06-15T17:39:26.000Z
|
2020-06-15T17:39:26.000Z
|
import re
from nltk import ParentedTree
import os
clear = lambda: os.system('clear')
parsedSent = """(ROOT
(SBAR (IN If)
(S
(S
(NP (DT the) (NN perpetrator))
(VP (VBZ exceeds)
(S
(NP
(NP (DT the) (NNS limits))
(PP (IN of)
(NP (JJ necessary) (NN defense))))
(ADJP (JJ due)
(PP (TO to)
(NP (NN confusion) (, ,) (NN fear)
(CC or)
(NN fright)))))))
(, ,) (RB then)
(S
(NP (PRP he))
(VP (MD shall) (RB not)
(VP (VB be)
(VP (VBN punished))))))))"""
def toNLTKtree(str):
newTree = ParentedTree.fromstring(str)
return newTree
def removeWP(tree = parsedSent):
tree = str(tree)
tree = " ".join(" ".join(tree.split("\n")).split())
return tree
def ifThereIsNo(tree, toNotMatch):
for node in tree:
if type(node) is ParentedTree:
if re.match(toNotMatch, str(node.label())):
return False
return True
def tagChanger(TreeString, SubTreeString, toChange, newValue):
TreeString = removeWP(str(TreeString))
SubTreeString = removeWP(str(SubTreeString))
fixedSubTreeString = re.sub(toChange, newValue, SubTreeString, 1)
return toNLTKtree(re.sub(re.escape(SubTreeString), fixedSubTreeString, TreeString, 1))
def findUnary(parent, found=None):
if found:
return found
for node in parent:
if type(node) is ParentedTree:
if node.label() == 'Unary':
continue
if node.label() == 'NP':
if ifThereIsNo(node, "VP|Unary"):
found = node
found = findUnary(node, found)
return found
def toUnary(inputTree = toNLTKtree(parsedSent)):
while findUnary(inputTree):
unaryStr = removeWP(str(findUnary(inputTree)))
inputTree = tagChanger(inputTree, unaryStr, "NP", "Unary")
return inputTree
print toUnary()
| 28.138889
| 90
| 0.549852
|
dd450678db82fd00235bdd3f3c18c332b950dfd4
| 991
|
py
|
Python
|
examples/double_pendulum/double_pendulum_with_rrt.py
|
echoix/pyro
|
787920cb14e3669bc65c530fd8f91d4277a24279
|
[
"MIT"
] | null | null | null |
examples/double_pendulum/double_pendulum_with_rrt.py
|
echoix/pyro
|
787920cb14e3669bc65c530fd8f91d4277a24279
|
[
"MIT"
] | null | null | null |
examples/double_pendulum/double_pendulum_with_rrt.py
|
echoix/pyro
|
787920cb14e3669bc65c530fd8f91d4277a24279
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 20:28:17 2018
@author: Alexandre
"""
###############################################################################
import numpy as np
###############################################################################
from pyro.dynamic import pendulum
from pyro.planning import randomtree
###############################################################################
sys = pendulum.DoublePendulum()
x_start = np.array([-3.14,0,0,0])
x_goal = np.array([0,0,0,0])
planner = randomtree.RRT( sys , x_start )
t = 10
planner.u_options = [
np.array([-t,-t]),
np.array([-t,+t]),
np.array([+t,-t]),
np.array([+t,+t]),
np.array([ 0,+t]),
np.array([ 0,-t]),
np.array([ 0, 0]),
np.array([+t, 0]),
np.array([-t, 0])
]
planner.goal_radius = 0.8
planner.find_path_to_goal( x_goal )
planner.plot_tree()
planner.plot_open_loop_solution()
sys.animate_simulation()
| 23.595238
| 79
| 0.440969
|
6ab47373375712721a009a8cb6961d509bc5ea80
| 1,607
|
py
|
Python
|
python/server.template.py
|
Saevon/Recipes
|
ab8ca9b5244805d545da2dd1d80d249f1ec6057d
|
[
"MIT"
] | null | null | null |
python/server.template.py
|
Saevon/Recipes
|
ab8ca9b5244805d545da2dd1d80d249f1ec6057d
|
[
"MIT"
] | null | null | null |
python/server.template.py
|
Saevon/Recipes
|
ab8ca9b5244805d545da2dd1d80d249f1ec6057d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import bottle
app = bottle.Bottle()
@app.route('/<filename:path>')
def hive_js(filename):
'''
Allows access to any file in the static directory
'''
return bottle.static_file(filename, root="static")
##################################################
# Settings & Stratup
##################################################
app_settings = {
'debug': True,
'host': 'localhost',
'port': 7070,
'quiet': True,
}
from optparse import OptionParser
app_parser = OptionParser(usage="usage: %prog [host] [options]")
app_parser.add_option(
"-p", "--port",
dest="port",
)
app_parser.add_option(
"-v", "--debug", "--verbose",
dest="debug",
action="store_true",
)
app_parser.add_option(
"-q", "--quiet",
dest="debug",
action="store_false",
)
def parse_options():
'''
Reads any commandline options, returning a final dict of options
'''
(options, args) = app_parser.parse_args()
if len(args) > 1:
app_parser.error("Too many arguments")
elif len(args) == 1:
app_settings['host'] = args[0]
# Remove any unset options, using the defaults defined earlier instead
options = vars(options)
options = dict((key, options[key]) for key in options if options[key] is not None)
return options
if __name__ == '__main__':
app_settings.update(parse_options())
# Debug only settings go here
if app_settings["debug"]:
bottle.debug(True)
app_settings.update({
'reloader': True,
'quiet': False,
})
app.run(**app_settings)
| 21.144737
| 86
| 0.580585
|
2b406dacc5caa7e87c9c2fc4c193de1a6444d2ee
| 2,178
|
py
|
Python
|
workshop_2018/workshop_2018.py
|
rudolphpienaar/pl-workshop-2018
|
1b4c6a3b04e93b034e378d78c4e4875320855f7a
|
[
"MIT"
] | null | null | null |
workshop_2018/workshop_2018.py
|
rudolphpienaar/pl-workshop-2018
|
1b4c6a3b04e93b034e378d78c4e4875320855f7a
|
[
"MIT"
] | null | null | null |
workshop_2018/workshop_2018.py
|
rudolphpienaar/pl-workshop-2018
|
1b4c6a3b04e93b034e378d78c4e4875320855f7a
|
[
"MIT"
] | null | null | null |
# _
# workshop_2018 ds app
#
# (c) 2016 Fetal-Neonatal Neuroimaging & Developmental Science Center
# Boston Children's Hospital
#
# http://childrenshospital.org/FNNDSC/
# dev@babyMRI.org
#
import os
# import the Chris app superclass
from chrisapp.base import ChrisApp
class Workshop_2018(ChrisApp):
"""
An app to showcase making a plugin.
"""
AUTHORS = 'FNNDSC (dev@babyMRI.org)'
SELFPATH = os.path.dirname(os.path.abspath(__file__))
SELFEXEC = os.path.basename(__file__)
EXECSHELL = 'python3'
TITLE = 'Funky Workshop app'
CATEGORY = 'Fun'
TYPE = 'ds'
DESCRIPTION = 'An app to showcase making a plugin'
DOCUMENTATION = 'http://wiki'
VERSION = '0.1'
LICENSE = 'Opensource (MIT)'
MAX_NUMBER_OF_WORKERS = 1 # Override with integer value
MIN_NUMBER_OF_WORKERS = 1 # Override with integer value
MAX_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m'
MIN_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m'
MAX_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi'
MIN_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi'
MIN_GPU_LIMIT = 0 # Override with the minimum number of GPUs, as an integer, for your plugin
MAX_GPU_LIMIT = 0 # Override with the maximum number of GPUs, as an integer, for your plugin
# Fill out this with key-value output descriptive info (such as an output file path
# relative to the output dir) that you want to save to the output meta file when
# called with the --saveoutputmeta flag
OUTPUT_META_DICT = {}
def define_parameters(self):
"""
Define the CLI arguments accepted by this plugin app.
"""
def run(self, options):
"""
Define the code to be run by this plugin app.
"""
print('Hello, world!')
# ENTRYPOINT
if __name__ == "__main__":
app = Workshop_2018()
app.launch()
| 34.571429
| 105
| 0.59596
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.