hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
023fa6bbd20b990b812f3f037de938b8c58a24d0 | 2,811 | py | Python | cloudpredictionframework/anomaly_detection/algorithms/hybrid_algorithm.py | Fruktus/CloudPredictionFramework | 1474287cc9bdfd58ae92db7bc24966a7e600258f | [
"MIT"
] | 1 | 2021-11-19T13:13:20.000Z | 2021-11-19T13:13:20.000Z | cloudpredictionframework/anomaly_detection/algorithms/hybrid_algorithm.py | Fruktus/CloudPredictionFramework | 1474287cc9bdfd58ae92db7bc24966a7e600258f | [
"MIT"
] | null | null | null | cloudpredictionframework/anomaly_detection/algorithms/hybrid_algorithm.py | Fruktus/CloudPredictionFramework | 1474287cc9bdfd58ae92db7bc24966a7e600258f | [
"MIT"
] | null | null | null | from statistics import mean
from collections import defaultdict
from cloudpredictionframework.anomaly_detection.algorithms.base_algorithm import BaseAlgorithm
| 41.338235 | 116 | 0.644966 |
02426c5e9ebc5b6e7797b501d9a365d58338fa41 | 159 | py | Python | Defer/__init__.py | loynoir/defer.py | 46f37a046028b1854586301a45870c2b3a628f65 | [
"MIT"
] | null | null | null | Defer/__init__.py | loynoir/defer.py | 46f37a046028b1854586301a45870c2b3a628f65 | [
"MIT"
] | null | null | null | Defer/__init__.py | loynoir/defer.py | 46f37a046028b1854586301a45870c2b3a628f65 | [
"MIT"
] | null | null | null | __all__ = ['Defer']
from contextlib import contextmanager, ExitStack
| 19.875 | 48 | 0.72327 |
024385bec991016fbb9a7b197fba1d40d6b4f297 | 9,798 | py | Python | jsonmerge/strategies.py | open-contracting-archive/jsonmerge | 2b87eea10bed3aa380cb28034a96783ac3081a85 | [
"Unlicense"
] | null | null | null | jsonmerge/strategies.py | open-contracting-archive/jsonmerge | 2b87eea10bed3aa380cb28034a96783ac3081a85 | [
"Unlicense"
] | 3 | 2015-09-16T15:37:05.000Z | 2015-09-16T16:32:26.000Z | jsonmerge/strategies.py | open-contracting-archive/jsonmerge | 2b87eea10bed3aa380cb28034a96783ac3081a85 | [
"Unlicense"
] | null | null | null | # vim:ts=4 sw=4 expandtab softtabstop=4
from jsonmerge.exceptions import HeadInstanceError, \
BaseInstanceError, \
SchemaError
import jsonschema
import re
| 31.504823 | 112 | 0.557359 |
0243fa264d20be4663ad37da1958e0275ed6a559 | 3,100 | py | Python | ArcGISDesktop/reconcile_post_versions.py | jonhusen/ArcGIS | 1d39a627888ce6039c490cdad810cd6d8035cb77 | [
"MIT"
] | null | null | null | ArcGISDesktop/reconcile_post_versions.py | jonhusen/ArcGIS | 1d39a627888ce6039c490cdad810cd6d8035cb77 | [
"MIT"
] | null | null | null | ArcGISDesktop/reconcile_post_versions.py | jonhusen/ArcGIS | 1d39a627888ce6039c490cdad810cd6d8035cb77 | [
"MIT"
] | null | null | null | """
Reconcile and posting versions at 10.0
TODO:WIP
"""
import arcpy, os, sys, string
#Populate parent and child versions in the following manner('Parent':'Child', etc). DO NOT LIST DEFAULT
vTree = {'SDE.Parent':'SDE.Child','SDE.QA':'SDE.Edit'}
#Reconcile and post child versions with parent
#Reconcile and post with parent
#Compress database
if __name__=="__main__":
workspace = r"Database Connections\MXD2.sde"
defaultVersion = "sde.DEFAULT"
logName = "RecPostLog.txt"
logName2 = "RecPostDefaultLog.txt"
logName3 = "CompressLog.txt"
logWorkspace = r"C:\temp"
RecPostNonDefault(workspace,logWorkspace,logName)
RecPostDefault(workspace,logWorkspace,logName2,defaultVersion)
DeleteChildVersions(workspace)
DeleteParentVersions(workspace)
Compress(workspace,logWorkspace,logName3)
RecreateVersions(workspace, defaultVersion) | 40.789474 | 148 | 0.709677 |
024430ea1d89420e6939d1c770a6a86ca49668e5 | 4,626 | py | Python | example/F3Dp/F3D_syn.py | Chunfang/defmod-swpc | 74fe7c02b24a46aa24bca7438738aa5adb72e2b6 | [
"MIT"
] | 26 | 2017-05-12T08:11:57.000Z | 2022-03-06T01:44:24.000Z | example/F3Dp/F3D_syn.py | Soengmou/defmod-swpc | 75740fca3b36107e9d18201a5623c955f6010740 | [
"MIT"
] | 4 | 2019-09-11T15:35:16.000Z | 2020-06-23T10:49:34.000Z | example/F3Dp/F3D_syn.py | Chunfang/defmod-swpc | 74fe7c02b24a46aa24bca7438738aa5adb72e2b6 | [
"MIT"
] | 8 | 2017-05-22T18:40:13.000Z | 2021-02-10T08:04:39.000Z | #!/usr/bin/env python
import numpy as np
import os,sys
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import argparse
ap=argparse.ArgumentParser()
ap.add_argument('-vis') # 1 plot cropped point cloud
ap.add_argument('-refine') # 1 refine mesh
ap.add_argument('-clean') # 1 remove tmp files
if ap.parse_args().vis==None:
vis=0
else:
vis=int(ap.parse_args().vis)
if ap.parse_args().refine==None:
refine=0
else:
refine=int(ap.parse_args().refine)
if ap.parse_args().clean==None:
clean=0
else:
clean=int(ap.parse_args().clean)
# Synthetic fault pixels
z=np.linspace(.2, -.8, num=100)
y=np.linspace(-.625,.625, num=120)
grid=np.meshgrid(y,z)
x=np.zeros((len(z)*len(y),1),dtype=np.float)
dat_vert=np.hstack((x,grid[0].reshape(x.shape),grid[1].reshape(x.shape)))
# weak
wl=np.linspace(.12,.18,num=8); amp=.03125*np.sqrt(wl)
e=1.025; r=-.2
dip=70.; zcnt=-.35
omg=[ 0.82976173, 0.89624834, 0.03829284, -0.50016345, -1.06606012, 1.40505898, -1.24256034, 1.28623393]
#omg=(np.random.rand(wl.shape[0])-.5)*np.pi
L=dat_vert[1,:].max()-dat_vert[1,:].min()
zmax=z.max(); zmin=z.min()
for i in range(len(wl)):
phs=dat_vert[:,1]/wl[i]*np.pi+omg[i]
dat_vert[:,0]=dat_vert[:,0]+amp[i]*np.cos(phs)*(e*zmax-dat_vert[:,2])/(e*zmax-zmin)*np.exp(r*abs(phs)/np.pi)
dat_vert[:,0]=dat_vert[:,0]+(zcnt-dat_vert[:,2])*np.tan((90.-dip)/180.*np.pi)
# ridge patch
slope1=10.;slope2=-10.
trunc1=.1;trunc2=.6
hup=0.;hlw=.08
#dat_vert=flt_patch(dat_vert,slope1,slope2,trunc1,trunc2,hlw,hup)
print omg
fout='F3D_syn.xyz'
f=open(fout,'w+')
np.savetxt(f,dat_vert,delimiter=' ', fmt='%.6f '*3)
f.close()
from subprocess import call
fin=fout
fout=fout.rsplit('.')[0]+'.stl'
mxl='xyz2stl.mlx'
call(['meshlabserver', '-i',fin,'-o',fout,'-s',mxl])
if clean==1: os.remove(fin)
# Mesh
fin=fout
if refine==1:
fout=fout.rsplit('.')[0]+'_dns.exo'
else:
fout=fout.rsplit('.')[0]+'.exo'
jou='F3D_tet.jou'
txt_jou=open(jou,'r')
txt_jou_tmp=open('tmp.jou','w+')
hf=0.0025 # fault grid length (0.0025 for ~100 m tet model, 0.003 for ~40 m)
hm=0.0075 # matrix grid length (0.0075 for ~100 m tet model, 0.010 for ~40 m)
for line in txt_jou:
line=line.strip('\r\n')
if 'import' in line.lower():
line='import stl "'+fin+'"'
if 'export' in line.lower():
line='export mesh "'+fout+'" dimension 3 overwrite'
if 'surface 46 94 95 97 size' in line.lower():
line='surface 46 94 95 97 size %0.6f' %(2*hf)
if 'volume all size' in line.lower():
line='volume all size %0.6f' %(2*hm)
txt_jou_tmp.write(line+'\n')
if 'mesh volume all' in line.lower() and refine==1:
txt_jou_tmp.write('refine volume all\n')
txt_jou.close();txt_jou_tmp.close()
call(['trelis','-nojournal','-nographics','tmp.jou'])
if clean==1: os.remove('tmp.jou')
# Preprocessing msh=>inp
dt_dyn=2E-5 #1E-5 for dns 100 m tet model, 8E-5 for 40 m tet, 8E-4 for ~1 m tet
import F3D_msh2inp
_=F3D_msh2inp.msh2inp(fout,dt_dyn)
# Fault plot
if vis==1:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(dat_vert[:,0], dat_vert[:,1], dat_vert[:,2], c='b', marker='.')
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([np.max(dat_vert[:,0])-np.min(dat_vert[:,0]),np.max(dat_vert[:,1])\
-np.min(dat_vert[:,1]), np.max(dat_vert[:,2])-np.min(dat_vert[:,2])]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten()
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten()
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten()
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w',)
plt.title('fault [km]')
plt.grid()
plt.show()
| 34.266667 | 115 | 0.635754 |
0244e0d25129f6105b7892408951f27b584d128e | 2,850 | py | Python | fltk/util/data_loader_utils.py | tudelft-eemcs-dml/fltk-testbed-gr-5 | 72afa24a37cd1f8f5f49665c83ccbd730d76ad21 | [
"BSD-2-Clause"
] | null | null | null | fltk/util/data_loader_utils.py | tudelft-eemcs-dml/fltk-testbed-gr-5 | 72afa24a37cd1f8f5f49665c83ccbd730d76ad21 | [
"BSD-2-Clause"
] | 2 | 2021-05-11T12:48:14.000Z | 2021-05-11T12:49:24.000Z | fltk/util/data_loader_utils.py | tudelft-eemcs-dml/fltk-testbed-gr-5 | 72afa24a37cd1f8f5f49665c83ccbd730d76ad21 | [
"BSD-2-Clause"
] | 2 | 2021-05-03T17:40:18.000Z | 2021-05-11T09:34:30.000Z | import numpy
from torch.utils.data import DataLoader
import os
import pickle
import random
from ..datasets import Dataset
def generate_data_loaders_from_distributed_dataset(distributed_dataset, batch_size):
"""
Generate data loaders from a distributed dataset.
:param distributed_dataset: Distributed dataset
:type distributed_dataset: list(tuple)
:param batch_size: batch size for data loader
:type batch_size: int
"""
data_loaders = []
for worker_training_data in distributed_dataset:
data_loaders.append(Dataset.get_data_loader_from_data(batch_size, worker_training_data[0], worker_training_data[1], shuffle=True))
return data_loaders
def load_train_data_loader(logger, args):
"""
Loads the training data DataLoader object from a file if available.
:param logger: loguru.Logger
:param args: Arguments
"""
if os.path.exists(args.get_train_data_loader_pickle_path()):
dl = load_data_loader_from_file(logger, args.get_train_data_loader_pickle_path())
return dl
else:
logger.error("Couldn't find train data loader stored in file")
raise FileNotFoundError("Couldn't find train data loader stored in file")
def load_test_data_loader(logger, args):
"""
Loads the test data DataLoader object from a file if available.
:param logger: loguru.Logger
:param args: Arguments
"""
if os.path.exists(args.get_test_data_loader_pickle_path()):
return load_data_loader_from_file(logger, args.get_test_data_loader_pickle_path())
else:
logger.error("Couldn't find test data loader stored in file")
raise FileNotFoundError("Couldn't find train data loader stored in file")
def load_data_loader_from_file(logger, filename) -> DataLoader:
"""
Loads DataLoader object from a file if available.
:param logger: loguru.Logger
:param filename: string
"""
logger.info("Loading data loader from file: {}".format(filename))
with open(filename, "rb") as f:
return load_saved_data_loader(f)
| 31.318681 | 138 | 0.729825 |
024580a7ff506aa3cbda6d46122b84b1603a6c05 | 794 | py | Python | pywikibot/families/omegawiki_family.py | shizhao/pywikibot-core | 8441a1cd0e8dd5d3701f1c5e26077e40a40937ee | [
"MIT"
] | null | null | null | pywikibot/families/omegawiki_family.py | shizhao/pywikibot-core | 8441a1cd0e8dd5d3701f1c5e26077e40a40937ee | [
"MIT"
] | null | null | null | pywikibot/families/omegawiki_family.py | shizhao/pywikibot-core | 8441a1cd0e8dd5d3701f1c5e26077e40a40937ee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__version__ = '$Id: 024580a7ff506aa3cbda6d46122b84b1603a6c05 $'
from pywikibot import family
# Omegawiki, the Ultimate online dictionary
| 22.685714 | 82 | 0.632242 |
024a818dbea659d940b31f646bbc0d73684c65d8 | 4,781 | py | Python | tools/scripts/extract_features_WORLD.py | feelins/mcd_WORLD | 8a98c1c740ec5371a322d038b8498cb72f3f7750 | [
"BSD-3-Clause"
] | 5 | 2019-05-16T11:42:21.000Z | 2022-03-25T22:25:35.000Z | tools/scripts/extract_features_WORLD.py | feelins/mcd_WORLD | 8a98c1c740ec5371a322d038b8498cb72f3f7750 | [
"BSD-3-Clause"
] | null | null | null | tools/scripts/extract_features_WORLD.py | feelins/mcd_WORLD | 8a98c1c740ec5371a322d038b8498cb72f3f7750 | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
import shutil
import glob
import time
import multiprocessing as mp
if len(sys.argv)!=4:
print("Usage: ")
print("python extract_features_WORLD.py <path_to_wav_dir> <path_to_feat_dir> <sampling rate>")
sys.exit(1)
# top currently directory
current_dir = os.getcwd()
# input audio directory
wav_dir = sys.argv[1]
# Output features directory
out_dir = sys.argv[2]
# initializations
fs = int(sys.argv[3])
# tools directory
world = os.path.join(current_dir, "tools/bin/WORLD")
sptk = os.path.join(current_dir, "tools/bin/SPTK-3.11")
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if fs == 16000:
nFFTHalf = 1024
alpha = 0.58
elif fs == 22050:
nFFTHalf = 1024
alpha = 0.65
elif fs == 44100:
nFFTHalf = 2048
alpha = 0.76
elif fs == 48000:
nFFTHalf = 2048
alpha = 0.77
else:
print("As of now, we don't support %d Hz sampling rate." %(fs))
print("Please consider either downsampling to 16000 Hz or upsampling to 48000 Hz")
sys.exit(1)
#bap order depends on sampling rate.
mcsize=59
def process(filename):
'''
The function decomposes a wav file into F0, mel-cepstral coefficients, and aperiodicity
:param filename: path to wav file
:return: .lf0, .mgc and .bap files
'''
file_id = os.path.basename(filename).split(".")[0]
print('\n' + file_id)
### WORLD ANALYSIS -- extract vocoder parameters ###
### extract f0, sp, ap ###
world_analysis_cmd = "%s %s %s %s %s" % (os.path.join(world, 'analysis'), \
filename,
os.path.join(out_dir, file_id + '.f0'), \
os.path.join(out_dir, file_id + '.sp'), \
os.path.join(out_dir, file_id + '.bapd'))
os.system(world_analysis_cmd)
### convert f0 to lf0 ###
sptk_x2x_da_cmd = "%s +da %s > %s" % (os.path.join(sptk, 'x2x'), \
os.path.join(out_dir, file_id + '.f0'), \
os.path.join(out_dir, file_id + '.f0a'))
os.system(sptk_x2x_da_cmd)
sptk_x2x_af_cmd = "%s +af %s | %s > %s " % (os.path.join(sptk, 'x2x'), \
os.path.join(out_dir, file_id + '.f0a'), \
os.path.join(sptk, 'sopr') + ' -magic 0.0 -LN -MAGIC -1.0E+10', \
os.path.join(out_dir, file_id + '.lf0'))
os.system(sptk_x2x_af_cmd)
### convert sp to mgc ###
sptk_x2x_df_cmd1 = "%s +df %s | %s | %s >%s" % (os.path.join(sptk, 'x2x'), \
os.path.join(out_dir, file_id + '.sp'), \
os.path.join(sptk, 'sopr') + ' -R -m 32768.0', \
os.path.join(sptk, 'mcep') + ' -a ' + str(alpha) + ' -m ' + str(
mcsize) + ' -l ' + str(
nFFTHalf) + ' -e 1.0E-8 -j 0 -f 0.0 -q 3 ', \
os.path.join(out_dir, file_id + '.mgc'))
os.system(sptk_x2x_df_cmd1)
### convert bapd to bap ###
sptk_x2x_df_cmd2 = "%s +df %s > %s " % (os.path.join(sptk, "x2x"), \
os.path.join(out_dir, file_id + ".bapd"), \
os.path.join(out_dir, file_id + '.bap'))
os.system(sptk_x2x_df_cmd2)
print("--- Feature extraction started ---")
start_time = time.time()
# get wav files list
wav_files = get_wav_filelist(wav_dir)
# do multi-processing
pool = mp.Pool(mp.cpu_count())
pool.map(process, wav_files)
# clean temporal files
#shutil.rmtree(out_dir, ignore_errors=True)
#shutil.rmtree(out_dir, ignore_errors=True)
#for zippath in glob.iglob(os.path.join(out_dir, '*.bapd')):
# os.remove(zippath)
clean_temp_files_cmd = "rm -rf %s/*.bapd %s/*.f0a %s/*.f0 %s/*.sp" % (out_dir, out_dir, out_dir, out_dir)
os.system(clean_temp_files_cmd)
print("You should have your features ready in: "+out_dir)
(m, s) = divmod(int(time.time() - start_time), 60)
print(("--- Feature extraction completion time: %d min. %d sec ---" % (m, s)))
| 34.89781 | 116 | 0.535244 |
024b2b7d9d7075b55a314e3428f50fdfaf0a011e | 19,261 | py | Python | mmtbx/bulk_solvent/f_model_all_scales.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 155 | 2016-11-23T12:52:16.000Z | 2022-03-31T15:35:44.000Z | mmtbx/bulk_solvent/f_model_all_scales.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 590 | 2016-12-10T11:31:18.000Z | 2022-03-30T23:10:09.000Z | mmtbx/bulk_solvent/f_model_all_scales.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 115 | 2016-11-15T08:17:28.000Z | 2022-02-09T15:30:14.000Z | from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
from cctbx import adptbx
from mmtbx import bulk_solvent
from cctbx.array_family import flex
from cctbx import adptbx
import mmtbx
from libtbx import group_args
import mmtbx.arrays
import mmtbx.bulk_solvent.scaler
from libtbx.test_utils import approx_equal
from libtbx.math_utils import ifloor, iceil
import mmtbx.f_model
import mmtbx.bulk_solvent.bulk_solvent_and_scaling as bss
from six.moves import zip, range
| 44.380184 | 82 | 0.626343 |
024c1d679000935d415d1310cd2a49a746f73e4a | 4,704 | py | Python | pysparkpro/dsl/nodesbak.py | liaoxiong3x/pyspark | 2a16ad495780b1b37f5dc571cb7ea11260765366 | [
"Apache-2.0"
] | null | null | null | pysparkpro/dsl/nodesbak.py | liaoxiong3x/pyspark | 2a16ad495780b1b37f5dc571cb7ea11260765366 | [
"Apache-2.0"
] | null | null | null | pysparkpro/dsl/nodesbak.py | liaoxiong3x/pyspark | 2a16ad495780b1b37f5dc571cb7ea11260765366 | [
"Apache-2.0"
] | null | null | null | from session.abstract_class import PysparkPro
if __name__ == '__main__':
spark = DslAdaptor()
print(spark)
| 26.426966 | 83 | 0.650298 |
024c4ab64cff5513fb1d36a41a43c50162ebb3f1 | 821 | py | Python | backdoor/detect_buffer_overflow.py | Sanardi/bored | 2816395b99c05871f01fbbd55a833dcd13801014 | [
"MIT"
] | null | null | null | backdoor/detect_buffer_overflow.py | Sanardi/bored | 2816395b99c05871f01fbbd55a833dcd13801014 | [
"MIT"
] | null | null | null | backdoor/detect_buffer_overflow.py | Sanardi/bored | 2816395b99c05871f01fbbd55a833dcd13801014 | [
"MIT"
] | null | null | null | import socket
if __name__ == "__main__":
PORT = 12345
SERVER = '<THE HOSTNAME OR IP>'
s = connect(SERVER, PORT)
print(read_until(s))
| 23.457143 | 58 | 0.548112 |
024c6205dd81c6aee9436b9f31977f458d63fa70 | 3,384 | py | Python | tools/test.py | EMinsight/MPh | 2b967b77352f9ce7effcd50ad4774bf5eaf731ea | [
"MIT"
] | null | null | null | tools/test.py | EMinsight/MPh | 2b967b77352f9ce7effcd50ad4774bf5eaf731ea | [
"MIT"
] | null | null | null | tools/test.py | EMinsight/MPh | 2b967b77352f9ce7effcd50ad4774bf5eaf731ea | [
"MIT"
] | null | null | null | """
Runs all tests in the intended order.
Each test script (in the `tests` folder) contains a group of tests.
These scripts must be run in separate processes as most of them start
and stop the Java virtual machine, which can only be done once per
process. This is why simply calling pyTest (with `python -m pytest`
in the root folder) will not work.
This script here runs each test group in a new subprocess. It also
imposes a logical order: from the tests covering the most most basic
functionality to the high-level abstractions.
Here, as opposed to the similar script `coverage.py`, we don't actually
run the tests through pyTest. Rather, we run the scripts directly so
that the output is less verbose. Note, however, that pyTest still needs
to be installed as some of the test fixtures require it.
The verbosity can be increased by passing `--log` as a command-line
argument. This will display the log messages produced by MPh as the
tests are running. You can also pass the name of a test group to run
only that one. For example, passing "model" will only run the tests
defined in `test_model.py`.
"""
from subprocess import run
from pathlib import Path
from timeit import default_timer as now
from argparse import ArgumentParser
from sys import executable as python
from sys import exit
from os import environ, pathsep
# Define order of test groups.
groups = ['meta', 'config', 'discovery', 'server', 'session', 'standalone',
'client', 'multi', 'node', 'model', 'exit']
# Determine path of project root folder.
here = Path(__file__).resolve().parent
root = here.parent
# Run MPh in project folder, not a possibly different installed version.
if 'PYTHONPATH' in environ:
environ['PYTHONPATH'] = str(root) + pathsep + environ['PYTHONPATH']
else:
environ['PYTHONPATH'] = str(root)
# Parse command-line arguments.
parser = ArgumentParser(prog='test.py',
description='Runs the MPh test suite.',
add_help=False,
allow_abbrev=False)
parser.add_argument('--help',
help='Show this help message.',
action='help')
parser.add_argument('--log',
help='Display log output.',
action='store_true')
parser.add_argument('--groups',
help='List all test groups.',
action='store_true')
parser.add_argument('group',
help='Run only this group of tests.',
nargs='?')
arguments = parser.parse_args()
if arguments.groups:
for group in groups:
print(group)
exit()
if arguments.group:
group = arguments.group
if group.startswith('test_'):
group = group[5:]
if group.endswith('.py'):
group = group[:-3]
groups = [group]
options = []
if arguments.log:
options.append('--log')
# Run each test group in new process.
for group in groups:
if groups.index(group) > 0:
print()
print(f'Running test group "{group}".')
t0 = now()
process = run([python, f'test_{group}.py'] + options, cwd=root/'tests')
if process.returncode == 0:
print(f'Passed in {now()-t0:.0f} s.')
else:
print(f'Failed after {now()-t0:.0f} s.')
exit(1)
| 36 | 76 | 0.636525 |
024c8b636c73803ba5c14b996265676bb94e1dd0 | 592 | py | Python | notebooks/shared/ipypublish/export_plugins/html_standard.py | leonbett/debuggingbook | ae1fa940c306160429232fbc93a7a7f14b44efb7 | [
"MIT"
] | 728 | 2018-09-21T03:51:04.000Z | 2022-03-28T09:35:04.000Z | notebooks/shared/ipypublish/export_plugins/html_standard.py | leonbett/debuggingbook | ae1fa940c306160429232fbc93a7a7f14b44efb7 | [
"MIT"
] | 103 | 2018-09-02T12:26:32.000Z | 2022-02-09T07:19:08.000Z | notebooks/shared/ipypublish/export_plugins/html_standard.py | leonbett/debuggingbook | ae1fa940c306160429232fbc93a7a7f14b44efb7 | [
"MIT"
] | 157 | 2018-09-02T08:00:50.000Z | 2022-03-27T22:04:50.000Z | #!/usr/bin/env python
"""html in standard nbconvert format
"""
from ipypublish.html.create_tpl import create_tpl
from ipypublish.html.standard import content
from ipypublish.html.standard import content_tagging
from ipypublish.html.standard import document
from ipypublish.html.standard import inout_prompt
from ipypublish.html.standard import mathjax
from ipypublish.html.standard import widgets
oformat = 'HTML'
config = {}
template = create_tpl([
document.tpl_dict,
content.tpl_dict, content_tagging.tpl_dict,
mathjax.tpl_dict, widgets.tpl_dict,
inout_prompt.tpl_dict
])
| 28.190476 | 52 | 0.802365 |
024cdbf14b841e1da6f77d24cda6ea8444019523 | 1,320 | py | Python | application/src/app_pkg/routes/get_messages.py | eyardley/CSC648-SoftwareEngineering-Snapster | 6dbe1cf9b34de6d6dbc6be75db3a34583f67c01a | [
"MIT"
] | null | null | null | application/src/app_pkg/routes/get_messages.py | eyardley/CSC648-SoftwareEngineering-Snapster | 6dbe1cf9b34de6d6dbc6be75db3a34583f67c01a | [
"MIT"
] | 3 | 2021-06-08T21:39:12.000Z | 2022-01-13T02:46:20.000Z | application/src/app_pkg/routes/get_messages.py | eyardley/CSC648-SoftwareEngineering-Snapster | 6dbe1cf9b34de6d6dbc6be75db3a34583f67c01a | [
"MIT"
] | 1 | 2021-05-09T21:01:28.000Z | 2021-05-09T21:01:28.000Z | # from flask import render_template, request, make_response, jsonify
# from src.app_pkg.routes.common import validate_helper
# from src.app_pkg import app, db
# from src.app_pkg.forms import MessageForm
#
# ################################################
# # Show All Messages / User Profile #
# ################################################
# # AUTHOR: Bakulia Kurmant
# # NOTE: This function handles the route of the show all message functionality.
# # It show the list of messages the user sent or received and single view message modal with message body
# # Once the Database manager API returns a result (as a list), it passes that resulting list
# # to the HTML page to be rendered.
#
#
# @app.route('/user_profile', method=['GET'])
# def all_messages(msg_id):
# isloggedin = validate_helper(request.cookies.get('token'))
#
# if not isloggedin:
# return render_template('search.html')
#
# msg_result_size = 0
# msg_results = []
# print('calling db...')
# msg_result_size, msg_results = db.get_all_messages(isloggedin, msg_id)
#
# if msg_result_size == 0:
# print("You have no messages!")
#
# return render_template('user_profile.html', isloggedin=isloggedin, msg_result_size=msg_result_size,
# msg_results=msg_results)
#
#
| 37.714286 | 106 | 0.641667 |
024d5f02a7be6e61357ca017fedc52a6ef5e46ea | 18,681 | py | Python | tests/fixtures/test_product.py | oldarmyc/cap | 2e3e4b89d3d05f03876446d6f339167bd2805ea8 | [
"Apache-2.0"
] | 1 | 2017-12-13T20:19:29.000Z | 2017-12-13T20:19:29.000Z | tests/fixtures/test_product.py | oldarmyc/cap | 2e3e4b89d3d05f03876446d6f339167bd2805ea8 | [
"Apache-2.0"
] | null | null | null | tests/fixtures/test_product.py | oldarmyc/cap | 2e3e4b89d3d05f03876446d6f339167bd2805ea8 | [
"Apache-2.0"
] | 1 | 2018-09-21T15:26:42.000Z | 2018-09-21T15:26:42.000Z | # Copyright 2016 Dave Kludt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
sample_product = {
"title": "Test",
"us_url": "http://us.test.com",
"uk_url": "http://uk.test.com",
"active": True,
"db_name": "test",
"require_region": True,
"doc_url": "http://doc.test.com",
"pitchfork_url": "https://pitchfork/url"
}
sample_limit = {
"product": "test",
"title": "Test",
"uri": "/limits",
"slug": "test",
"active": True,
"absolute_path": "test/path",
"absolute_type": "list",
"limit_key": "test_limit",
"value_key": "test_value"
}
sample_log = {
"queried": ["dns"],
"queried_by": "skeletor",
"region": "dfw",
"ddi": "123456",
'query_results': []
}
sample_auth_failure = {
'message': (
'<strong>Error!</strong> Authentication has failed due to'
' incorrect token or DDI. Please check the token and DDI '
'and try again.'
)
}
""" DNS Tests """
dns = {
"title": "DNS",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "dns",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
dns_limit = {
"product": "dns",
"title": "Domains",
"uri": "/limits",
"slug": "domains",
"active": True,
"absolute_path": "limits.absolute",
"absolute_type": "dict",
"value_key": "",
"limit_key": "domains"
}
dns_limit_return = {
"limits": {
"rate": [
{
"regex": ".*/v\\d+\\.\\d+/(\\d+/domains/search).*",
"limit": [
{
"value": 20,
"verb": "GET",
"next-available": "2016-01-12T13:56:11.450Z",
"remaining": 20,
"unit": "MINUTE"
}
],
"uri": "*/domains/search*"
}
],
"absolute": {
"domains": 500,
"records per domain": 500
}
}
}
dns_list_return = {
"domains": [
{
"comment": "Test",
"updated": "2015-12-08T20:47:02.000+0000",
"name": "test.net",
"created": "2015-04-09T15:42:49.000+0000",
"emailAddress": "skeletor@rackspace.com",
"id": 123465798,
"accountId": 1234567
}
],
"totalEntries": 1
}
dns_full_return = {
'dns': {
'values': {'Domains': 1},
'limits': {'Domains': 500}
}
}
""" Autoscale """
autoscale = {
"title": "Autoscale",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "autoscale",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
autoscale_limit = {
"product": "autoscale",
"title": "Max Groups",
"absolute_path": "limits.absolute",
"uri": "/v1.0/{ddi}/limits",
"slug": "max_groups",
"value_key": "",
"absolute_type": "dict",
"active": True,
"limit_key": "maxGroups"
}
autoscale_limit_return = {
"limits": {
"rate": [
{
"regex": "/v1\\.0/execute/(.*)",
"limit": [
{
"value": 10,
"verb": "ALL",
"next-available": "2016-01-12T14:51:13.402Z",
"remaining": 10,
"unit": "SECOND"
}
],
"uri": "/v1.0/execute/*"
}
],
"absolute": {
"maxGroups": 1000,
"maxPoliciesPerGroup": 100,
"maxWebhooksPerPolicy": 25
}
}
}
autoscale_list_return = {
"groups": [
{
"state": {
"status": "ACTIVE",
"desiredCapacity": 0,
"paused": False,
"active": [],
"pendingCapacity": 0,
"activeCapacity": 0,
"name": "test"
},
"id": "d446f3c2-612f-41b8-92dc-4d6e1422bde2",
"links": [
{
"href": (
'https://dfw.autoscale.api.rackspacecloud.com/v1.0'
'/1234567/groups/d446f3c2-612f-41b8-92dc-4d6e1422bde2/'
),
"rel": "self"
}
]
}
],
"groups_links": []
}
autoscale_full_return = {
'autoscale': {
'values': {'Max Groups': 1},
'limits': {'Max Groups': 1000}
}
}
""" Big Data """
big_data = {
"title": "Big Data",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "big_data",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
big_data_limit = [
{
"product": "big_data",
"title": "Node Count",
"absolute_path": "limits.absolute.node_count",
"uri": "/v2/{ddi}/limits",
"slug": "node_count",
"value_key": "remaining",
"absolute_type": "dict",
"active": True,
"limit_key": "limit"
}, {
"product": "big_data",
"title": "Disk - MB",
"absolute_path": "limits.absolute.disk",
"uri": "/v2/{ddi}/limits",
"slug": "disk_-_mb",
"value_key": "remaining",
"absolute_type": "dict",
"active": True,
"limit_key": "limit"
}
]
big_data_limit_return = {
"limits": {
"absolute": {
"node_count": {
"limit": 15,
"remaining": 8
},
"disk": {
"limit": 50000,
"remaining": 25000
},
"ram": {
"limit": 655360,
"remaining": 555360
},
"vcpus": {
"limit": 200,
"remaining": 120
}
}
}
}
big_data_full_return = {
'big_data': {
'values': {'Node Count': 7, 'Disk - MB': 25000},
'limits': {'Node Count': 15, 'Disk - MB': 50000}
}
}
""" CBS """
cbs = {
"title": "CBS",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "cbs",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
cbs_limit = {
"product": "cbs",
"title": "SATA - GB",
"absolute_path": "quota_set.gigabytes_SATA",
"uri": "/v1/{ddi}/os-quota-sets/{ddi}?usage=True",
"slug": "sata_-_gb",
"value_key": "in_use",
"absolute_type": "dict",
"active": True,
"limit_key": "limit"
}
cbs_limit_return = {
"quota_set": {
"volumes": {
"limit": -1,
"reserved": 0,
"in_use": 3
},
"gigabytes_SATA": {
"limit": 10240,
"reserved": 0,
"in_use": 325
},
"gigabytes_SSD": {
"limit": 10240,
"reserved": 0,
"in_use": 50
}
}
}
cbs_full_return = {
'cbs': {
'values': {'SATA - GB': 9915},
'limits': {'SATA - GB': 10240}
}
}
""" Load Balancers """
clb = {
"title": "Load Balancers",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "load_balancers",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
clb_limit = [
{
"product": "load_balancers",
"title": "Total Load Balancers",
"uri": "/v1.0/{ddi}/loadbalancers/absolutelimits",
"slug": "total_load_balancers",
"active": True,
"path": "absolute['LOADBALANCER_LIMIT']",
"absolute_path": "absolute",
"value_key": "",
"absolute_type": "list",
"limit_key": "LOADBALANCER_LIMIT"
}, {
"product": "load_balancers",
"title": "Nodes per LB",
"uri": "/v1.0/{ddi}/loadbalancers/absolutelimits",
"slug": "nodes_per_lb",
"active": True,
"path": "absolute['NODE_LIMIT']",
"absolute_path": "absolute",
"value_key": "",
"absolute_type": "list",
"limit_key": "NODE_LIMIT"
}
]
clb_limit_return = {
"absolute": [
{
"name": "IPV6_LIMIT",
"value": 25
}, {
"name": "LOADBALANCER_LIMIT",
"value": 25
}, {
"name": "BATCH_DELETE_LIMIT",
"value": 10
}, {
"name": "ACCESS_LIST_LIMIT",
"value": 100
}, {
"name": "NODE_LIMIT",
"value": 25
}, {
"name": "NODE_META_LIMIT",
"value": 25
}, {
"name": "LOADBALANCER_META_LIMIT",
"value": 25
}, {
"name": "CERTIFICATE_MAPPING_LIMIT",
"value": 20
}
]
}
clb_list_return = {
"loadBalancers": [
{
"status": "ACTIVE",
"updated": {
"time": "2016-01-12T16:04:44Z"
},
"protocol": "HTTP",
"name": "test",
"algorithm": "LEAST_CONNECTIONS",
"created": {
"time": "2016-01-12T16:04:44Z"
},
"virtualIps": [
{
"ipVersion": "IPV4",
"type": "PUBLIC",
"id": 19875,
"address": "148.62.0.226"
}, {
"ipVersion": "IPV6",
"type": "PUBLIC",
"id": 9318325,
"address": "2001:4800:7904:0100:f46f:211b:0000:0001"
}
],
"id": 506497,
"timeout": 30,
"nodeCount": 0,
"port": 80
}
]
}
clb_full_return = {
'load_balancers': {
'values': {'Total Load Balancers': 1},
'limits': {'Total Load Balancers': 25, 'Nodes per LB': 25}
}
}
""" Servers """
server = {
"title": "Servers",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "servers",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
server_limit = [
{
"product": "servers",
"title": "Servers",
"uri": "/v2/{ddi}/limits",
"slug": "servers",
"active": True,
"path": "absolute['maxTotalInstances']",
"absolute_path": "limits.absolute",
"value_key": "",
"absolute_type": "dict",
"limit_key": "maxTotalInstances"
}, {
"product": "servers",
"title": "Private Networks",
"uri": "/v2/{ddi}/limits",
"slug": "private_networks",
"active": True,
"path": "absolute['maxTotalPrivateNetworks']",
"absolute_path": "limits.absolute",
"value_key": "",
"absolute_type": "dict",
"limit_key": "maxTotalPrivateNetworks"
}, {
"product": "servers",
"title": "Ram - MB",
"uri": "/v2/{ddi}/limits",
"slug": "ram_-_mb",
"active": True,
"path": "absolute['maxTotalRAMSize']",
"absolute_path": "limits.absolute",
"value_key": "",
"absolute_type": "dict",
"limit_key": "maxTotalRAMSize"
}
]
server_limit_return = {
"limits": {
"rate": [
{
"regex": "/[^/]*/?$",
"limit": [
{
"next-available": "2016-01-12T16:14:47.624Z",
"unit": "MINUTE",
"verb": "GET",
"remaining": 2200,
"value": 2200
}
],
"uri": "*"
}, {
"regex": (
"/v[^/]+/[^/]+/servers/([^/]+)/rax-si-image-schedule"
),
"limit": [
{
"next-available": "2016-01-12T16:14:47.624Z",
"unit": "SECOND",
"verb": "POST",
"remaining": 10,
"value": 10
}
],
"uri": "/servers/{id}/rax-si-image-schedule"
}
],
"absolute": {
"maxPersonalitySize": 1000,
"maxTotalCores": -1,
"maxPersonality": 5,
"totalPrivateNetworksUsed": 1,
"maxImageMeta": 40,
"maxTotalPrivateNetworks": 10,
"maxSecurityGroupRules": -1,
"maxTotalKeypairs": 100,
"totalRAMUsed": 4096,
"maxSecurityGroups": -1,
"totalFloatingIpsUsed": 0,
"totalInstancesUsed": 3,
"totalSecurityGroupsUsed": 0,
"maxServerMeta": 40,
"maxTotalFloatingIps": -1,
"maxTotalInstances": 200,
"totalCoresUsed": 4,
"maxTotalRAMSize": 256000
}
}
}
server_list_return = {
"servers": [
{
"OS-EXT-STS:task_state": None,
"addresses": {
"public": [
{
"version": 4,
"addr": "104.130.28.32"
}, {
"version": 6,
"addr": "2001:4802:7803:104:be76:4eff:fe21:51b7"
}
],
"private": [
{
"version": 4,
"addr": "10.176.205.68"
}
]
},
"flavor": {
"id": "general1-1",
"links": [
{
"href": (
"https://iad.servers.api.rackspacecloud.com"
"/766030/flavors/general1-1"
),
"rel": "bookmark"
}
]
},
"id": "3290e50d-888f-4500-a934-16c10f3b8a10",
"user_id": "284275",
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": "104.130.28.32",
"accessIPv6": "2001:4802:7803:104:be76:4eff:fe21:51b7",
"progress": 100,
"OS-EXT-STS:power_state": 1,
"config_drive": "",
"status": "ACTIVE",
"updated": "2016-01-12T15:16:37Z",
"name": "test-server",
"created": "2016-01-12T15:15:39Z",
"tenant_id": "1234567",
"metadata": {
"build_config": "",
"rax_service_level_automation": "Complete"
}
}
]
}
server_list_processed_return = [
{
'status': 'ACTIVE',
'updated': '2016-01-12T15:16:37Z',
'OS-EXT-STS:task_state': None,
'user_id': '284275',
'addresses': {
'public': [
{
'version': 4,
'addr': '104.130.28.32'
}, {
'version': 6,
'addr': '2001:4802:7803:104:be76:4eff:fe21:51b7'
}
],
'private': [
{
'version': 4,
'addr': '10.176.205.68'
}
]
},
'created': '2016-01-12T15:15:39Z',
'tenant_id': '1234567',
'OS-DCF:diskConfig': 'MANUAL',
'id': '3290e50d-888f-4500-a934-16c10f3b8a10',
'accessIPv4': '104.130.28.32',
'accessIPv6': '2001:4802:7803:104:be76:4eff:fe21:51b7',
'config_drive': '',
'progress': 100,
'OS-EXT-STS:power_state': 1,
'metadata': {
'build_config': '',
'rax_service_level_automation': 'Complete'
},
'flavor': {
'id': 'general1-1',
'links': [
{
'href': (
'https://iad.servers.api.rackspacecloud.com'
'/766030/flavors/general1-1'
),
'rel': 'bookmark'
}
]
},
'name': 'test-server'
}
]
network_list_return = {
"networks": [
{
"status": "ACTIVE",
"subnets": [
"879ff280-6f17-4fd8-b684-19237d88fc45"
],
"name": "test-network",
"admin_state_up": True,
"tenant_id": "1234567",
"shared": False,
"id": "e737483a-00d7-4517-afc3-bd1fbbbd4cd3"
}
]
}
network_processed_list = [
{
'status': 'ACTIVE',
'subnets': [
'879ff280-6f17-4fd8-b684-19237d88fc45'
],
'name': 'test-network',
'admin_state_up': True,
'tenant_id': '1234567',
'shared': False,
'id': 'e737483a-00d7-4517-afc3-bd1fbbbd4cd3'
}
]
server_flavor_return = {
"flavor": {
"ram": 1024,
"name": "1 GB General Purpose v1",
"OS-FLV-WITH-EXT-SPECS:extra_specs": {
"number_of_data_disks": "0",
"class": "general1",
"disk_io_index": "40",
"policy_class": "general_flavor"
},
"vcpus": 1,
"swap": "",
"rxtx_factor": 200.0,
"OS-FLV-EXT-DATA:ephemeral": 0,
"disk": 20,
"id": "general1-1"
}
}
server_full_return = {
'servers': {
'values': {
'Private Networks': 1,
'Ram - MB': 1024,
'Servers': 1
},
'limits': {
'Private Networks': 10,
'Ram - MB': 256000,
'Servers': 200
}
}
}
| 26.018106 | 79 | 0.429849 |
0251012874a85c99ece694f4c087c35e3ad1cb49 | 2,150 | py | Python | script/download_pretrained.py | cttsai1985/google-quest-qa-labeling-pipeline | ef4fb92c470e45c0a07b0ee0e474224d88d3d410 | [
"Apache-2.0"
] | 2 | 2020-04-08T17:05:01.000Z | 2020-06-30T18:02:03.000Z | script/download_pretrained.py | cttsai1985/google-quest-qa-labeling-pipeline | ef4fb92c470e45c0a07b0ee0e474224d88d3d410 | [
"Apache-2.0"
] | null | null | null | script/download_pretrained.py | cttsai1985/google-quest-qa-labeling-pipeline | ef4fb92c470e45c0a07b0ee0e474224d88d3d410 | [
"Apache-2.0"
] | null | null | null | """
fork THIS excellent downloader
https://www.kaggle.com/maroberti/transformers-model-downloader-pytorch-tf2-0
"""
from typing import Union
from pathlib import Path
import os
import transformers
from transformers import AutoConfig, AutoTokenizer, TFAutoModel
if "__main__" == __name__:
main()
| 28.289474 | 120 | 0.693953 |
0251ffe3075d234371ce4b6df85d16a4d7b3e648 | 28,128 | py | Python | scripts/icdcs2019/communication.py | HKBU-HPML/gtopkssgd | 6f57343f3749939b0345d36fcb2c24470942aefd | [
"Apache-2.0"
] | 33 | 2019-05-13T12:04:15.000Z | 2022-03-14T06:23:56.000Z | scripts/icdcs2019/communication.py | HKBU-HPML/gtopkssgd | 6f57343f3749939b0345d36fcb2c24470942aefd | [
"Apache-2.0"
] | 2 | 2019-04-24T02:38:07.000Z | 2021-05-31T11:22:24.000Z | scripts/icdcs2019/communication.py | HKBU-HPML/gtopkssgd | 6f57343f3749939b0345d36fcb2c24470942aefd | [
"Apache-2.0"
] | 10 | 2019-07-18T23:43:32.000Z | 2021-06-16T13:22:04.000Z | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from utils import read_log, plot_hist, update_fontsize, autolabel, read_p100_log
from plot_sth import Bar
import os
import plot_sth as Color
import math
OUTPUT_PATH = '/media/sf_Shared_Data/tmp/icdcs2019'
INPUT_PATH = '/media/sf_Shared_Data/tmp/icdcs2019'
num_of_nodes = [2, 4, 8, 16, 32]
#num_of_nodes = [2, 4, 8]
#num_of_nodes = [8, 80, 81, 82, 83, 85]
#num_of_nodes = [16, 32, 64]
B = 9.0 * 1024 * 1024 * 1024.0 / 8 # 10 Gbps Ethernet
#B = 56 * 1024 * 1024 * 1024.0 / 8 # 56 Gbps IB
markers = {2:'o',
4:'x',
8:'^'}
formats={2:'-', 4:'-.', 8:':', 16:'--', 32:'-*', 64: '-+'}
gmarkers = {'dense':'o',
'sparse':'x',
'topk':'x',
'gtopk':'^'}
gcolors = {'dense':'b',
'sparse':'r',
'topk':'r',
'gtopk':'g'}
def time_of_allreduce(n, M, B=B):
"""
n: number of nodes
M: size of message
B: bandwidth of link
"""
# Model 1, TernGrad, NIPS2017
#if True:
# ncost = 100 * 1e-6
# nwd = B
# return ncost * np.log2(n) + M / nwd * np.log2(n)
# Model 2, Lower bound, E. Chan, et al., 2007
if True:
#alpha = 7.2*1e-6 #Yang 2017, SC17, Scaling Deep Learning on GPU and Knights Landing clusters
#alpha = 6.25*1e-6*n # From the data gpuhome benchmark
#alpha = 12*1e-6*n # From the data gpuhome benchmark
alpha = 45.25*1e-6#*np.log2(n) # From the data gpuhome benchmark
beta = 1 / B *1.2
gamma = 1.0 / (16.0 * 1e9 * 4) * 160
M = 4*M
#t = 2*(n)*alpha + 2*(n-1)*M*beta/n + (n-1)*M*gamma/n
t = 2*(n-1)*alpha + 2*(n-1)*M*beta/n + (n-1)*M*gamma/n
return t * 1e6
ts = 7.5/ (1000.0 * 1000)# startup time in second
#seconds = (np.ceil(np.log2(n)) + n - 1) * ts + (2*n - 1 + n-1) * M / n * 1/B
#seconds = (np.ceil(np.log2(n)) + n - 1) * ts + 2 * (n - 1) * 2*M/n * 1/B
#tcompute = 1. / (2.2 * 1000 * 1000 * 1000)
tcompute = 1. / (1 * 1000 * 1000 * 1000)
#seconds = 2 * (n - 1) * ts + 2 * (n - 1) * M/n * 1/B + (n-1)*M/n * tcompute
#C = 1024.0 * 1024 # segmented_size
#if M > C * n:
# # ring_segmented allreduce
# seconds = (M / C + (n - 2)) * (ts + C / B + C * tcompute)
#else:
# ring allreduce, better than the above
#seconds = (n - 1) * ts + 2 * (n - 1) * M/n * 1/B + (n-1)*M/n * tcompute
seconds = 2*(n-1)*n*ts + 2 * (n - 1) * M/n * 1/B + (n-1)*M/n * tcompute
#C = 512.0
#seconds = (M / C + n-2) * (ts + C/B)
return seconds * 1000 * 1000 # micro seconds
start = 1024*16
end = 1024*1024*4
if __name__ == '__main__':
#plot_all_communication_overheads()
#plot_p2platency()
plot_allreduce_comparison()
#realdata_speedup()
#plot_breakdown()
| 38.478796 | 141 | 0.581342 |
02527978354f0193255cdacc1cd11fc9125db75e | 2,188 | py | Python | app/routers/post.py | thiere18/fastapi-boilerplate | 6760e0e49caa915563d44897262d493b012207c0 | [
"MIT"
] | 5 | 2021-12-10T17:35:31.000Z | 2021-12-30T18:36:23.000Z | app/routers/post.py | thiere18/fastapi-boilerplate | 6760e0e49caa915563d44897262d493b012207c0 | [
"MIT"
] | 1 | 2021-11-21T13:59:03.000Z | 2021-11-21T13:59:03.000Z | app/routers/post.py | thiere18/fastapi-boilerplate | 6760e0e49caa915563d44897262d493b012207c0 | [
"MIT"
] | 1 | 2021-12-07T14:08:12.000Z | 2021-12-07T14:08:12.000Z | from logging import raiseExceptions
from typing import List
from fastapi import APIRouter,Depends,HTTPException, Response,status
from sqlalchemy.orm.session import Session
from .. database import get_db
from .. import models,schemas ,oauth2
router=APIRouter(
prefix='/posts',
tags=['Post']
) | 39.781818 | 137 | 0.743601 |
0252f8eedc296b4ab429a47459f42ba29b283dbc | 8,766 | py | Python | src/util.py | thanhnhan311201/via-line-detection | 1ba986110f7522df1b82c2cdeacd5c8bc27ac896 | [
"Unlicense"
] | null | null | null | src/util.py | thanhnhan311201/via-line-detection | 1ba986110f7522df1b82c2cdeacd5c8bc27ac896 | [
"Unlicense"
] | null | null | null | src/util.py | thanhnhan311201/via-line-detection | 1ba986110f7522df1b82c2cdeacd5c8bc27ac896 | [
"Unlicense"
] | null | null | null | import torch.nn as nn
import cv2
import torch
from copy import deepcopy
import numpy as np
from torch.autograd import Variable
from torch.autograd import Function as F
from numpy.polynomial import Polynomial as P
try:
from parameters import Parameters
except:
from src.parameters import Parameters
import math
p = Parameters()
###############################################################
##
## visualize
##
###############################################################
###############################################################
##
## calculate
##
###############################################################
| 27.828571 | 98 | 0.544832 |
0253374b375e14e18b7b22c7b40e9e638b1ad7cf | 3,322 | py | Python | src/tests/unit_tests/io_tools_test.py | samueljackson92/major-project | 5d82b875944fcf1f001f9beb5e5419ba60be3bf1 | [
"MIT"
] | 8 | 2015-01-26T16:23:29.000Z | 2020-03-17T00:57:42.000Z | src/tests/unit_tests/io_tools_test.py | samueljackson92/major-project | 5d82b875944fcf1f001f9beb5e5419ba60be3bf1 | [
"MIT"
] | 64 | 2015-02-05T06:34:56.000Z | 2015-05-03T15:46:49.000Z | src/tests/unit_tests/io_tools_test.py | samueljackson92/major-project | 5d82b875944fcf1f001f9beb5e5419ba60be3bf1 | [
"MIT"
] | null | null | null | import nose.tools
import unittest
import os
import json
import pandas as pd
import numpy as np
import mia
from mia.io_tools import *
from ..test_utils import get_file_path
| 35.340426 | 81 | 0.669175 |
0254feaa1c998dfb2faf7f35247b0cc22066d85a | 326 | py | Python | main/migrations_old/0007_remove_profile_rated_recipes.py | ggetzie/nnr | a8b1b1d771027edee2c19062f39fa982cfd024b0 | [
"MIT"
] | null | null | null | main/migrations_old/0007_remove_profile_rated_recipes.py | ggetzie/nnr | a8b1b1d771027edee2c19062f39fa982cfd024b0 | [
"MIT"
] | 5 | 2020-07-28T12:41:50.000Z | 2022-01-21T23:27:15.000Z | main/migrations_old/0007_remove_profile_rated_recipes.py | ggetzie/nnr | a8b1b1d771027edee2c19062f39fa982cfd024b0 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.4 on 2019-09-29 13:12
from django.db import migrations
| 18.111111 | 47 | 0.588957 |
0255255ddce0aede915e8004ff48e8619c540430 | 126 | py | Python | src/timber_clay_hybrid/assembly/__init__.py | augmentedfabricationlab/Timber_Clay_Hybrid | 243efddac77970c989b551697a0e188932064849 | [
"MIT"
] | 1 | 2020-12-16T01:25:07.000Z | 2020-12-16T01:25:07.000Z | src/timber_clay_hybrid/assembly/__init__.py | augmentedfabricationlab/timber_clay_hybrid | 243efddac77970c989b551697a0e188932064849 | [
"MIT"
] | null | null | null | src/timber_clay_hybrid/assembly/__init__.py | augmentedfabricationlab/timber_clay_hybrid | 243efddac77970c989b551697a0e188932064849 | [
"MIT"
] | null | null | null | from .assembly import HRCAssembly
from .element import HRCElement
from .artist import AssemblyArtist
from .utilities import *
| 25.2 | 34 | 0.833333 |
025829c61e2b13a8ebf606a7afdd54a016dd8119 | 3,674 | py | Python | backend/api/tests/schema/test_newsletter.py | pauloxnet/pycon | 82b6eff76dcc785865ea3ffd97a45e931c0add26 | [
"MIT"
] | 2 | 2017-07-18T21:51:25.000Z | 2017-12-23T11:08:39.000Z | backend/api/tests/schema/test_newsletter.py | pauloxnet/pycon | 82b6eff76dcc785865ea3ffd97a45e931c0add26 | [
"MIT"
] | 23 | 2017-07-18T20:22:38.000Z | 2018-01-05T05:45:15.000Z | backend/api/tests/schema/test_newsletter.py | pauloxnet/pycon | 82b6eff76dcc785865ea3ffd97a45e931c0add26 | [
"MIT"
] | 2 | 2017-07-18T21:27:33.000Z | 2017-07-18T22:07:03.000Z | from unittest.mock import patch
import pytest
from pytest import mark
from integrations.mailchimp import SubscriptionResult
from newsletters.models import Subscription
| 27.833333 | 78 | 0.617583 |
02591832a76c44befd1384a4984c9e645f451a38 | 3,077 | py | Python | conference_lib/confemailrecipients.py | allankellynet/mimas | 10025d43bba9e84f502a266760786842e7158a05 | [
"MIT"
] | null | null | null | conference_lib/confemailrecipients.py | allankellynet/mimas | 10025d43bba9e84f502a266760786842e7158a05 | [
"MIT"
] | 1 | 2020-02-05T13:00:29.000Z | 2020-02-05T13:00:29.000Z | conference_lib/confemailrecipients.py | allankellynet/mimas | 10025d43bba9e84f502a266760786842e7158a05 | [
"MIT"
] | null | null | null | #-----------------------------------------------------
# Mimas: conference submission and review system
# (c) Allan Kelly 2016-2020 http://www.allankelly.net
# Licensed under MIT License, see LICENSE file
# -----------------------------------------------------
# System imports
# Google imports
from google.appengine.ext import ndb
# Local imports
import confoptions
from scaffold import sorrypage, userrightsnames
import basehandler
| 40.486842 | 109 | 0.653559 |
0259184a3f3d6c2f7159bf04b270b9b14a650178 | 891 | py | Python | jexam/argparser.py | chrispyles/jexam | ebe83b170f51c5820e0c93955824c3798922f097 | [
"BSD-3-Clause"
] | 1 | 2020-07-25T02:36:38.000Z | 2020-07-25T02:36:38.000Z | jexam/argparser.py | chrispyles/jexam | ebe83b170f51c5820e0c93955824c3798922f097 | [
"BSD-3-Clause"
] | null | null | null | jexam/argparser.py | chrispyles/jexam | ebe83b170f51c5820e0c93955824c3798922f097 | [
"BSD-3-Clause"
] | null | null | null | #################################
##### jExam Argument Parser #####
#################################
import argparse
def get_parser():
"""
Creates and returns the argument parser for jExam
Returns:
``argparse.ArgumentParser``: the argument parser for jExam
"""
parser = argparse.ArgumentParser()
parser.add_argument("master", type=str, help="Path to exam master notebook")
parser.add_argument("result", nargs="?", default="dist", help="Path at which to write output notebooks")
parser.add_argument("-f", "--format", type=str, default="otter", help="Name of autograder format; 'otter' or 'ok'")
parser.add_argument("-s", "--seed", type=int, default=None, help="Random seed for NumPy to run before execution")
parser.add_argument("-q", "--quiet", default=False, action="store_true", help="Run without printing status")
return parser
| 42.428571 | 119 | 0.628507 |
0259bea6f07ec94194968114adbb7688e3c79035 | 236 | py | Python | basic/Pyshop/products/models.py | IsAlbertLiu/Python-basics | 49c0c93fb7d1abb70548854b69346eb5837ba00d | [
"MIT"
] | null | null | null | basic/Pyshop/products/models.py | IsAlbertLiu/Python-basics | 49c0c93fb7d1abb70548854b69346eb5837ba00d | [
"MIT"
] | null | null | null | basic/Pyshop/products/models.py | IsAlbertLiu/Python-basics | 49c0c93fb7d1abb70548854b69346eb5837ba00d | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
| 23.6 | 43 | 0.724576 |
0259fbe373b86b3d2859b384b23af03bfb7c829a | 758 | py | Python | examples/delta_setitem/001_check_setitem.py | pkicsiny/xpart | cddf3eb65ffc198c22dd37204139ce3177a9bd96 | [
"MIT"
] | null | null | null | examples/delta_setitem/001_check_setitem.py | pkicsiny/xpart | cddf3eb65ffc198c22dd37204139ce3177a9bd96 | [
"MIT"
] | null | null | null | examples/delta_setitem/001_check_setitem.py | pkicsiny/xpart | cddf3eb65ffc198c22dd37204139ce3177a9bd96 | [
"MIT"
] | null | null | null | import numpy as np
import xpart as xp
import xobjects as xo
#context = xo.ContextPyopencl()
context = xo.ContextCpu()
ctx2np = context.nparray_from_context_array
particles = xp.Particles(_context=context, p0c=26e9, delta=[1,2,3])
assert ctx2np(particles.delta[2]) == 3
assert np.isclose(ctx2np(particles.rvv[2]), 1.00061, rtol=0, atol=1e-5)
assert np.isclose(ctx2np(particles.rpp[2]), 0.25, rtol=0, atol=1e-10)
assert np.isclose(ctx2np(particles.ptau[2]), 3.001464*particles._xobject.beta0[0],
rtol=0, atol=1e-6)
particles.delta[1] = particles.delta[2]
assert particles.delta[2] == particles.delta[1]
assert particles.ptau[2] == particles.ptau[1]
assert particles.rpp[2] == particles.rpp[1]
assert particles.rvv[2] == particles.rvv[1]
| 32.956522 | 82 | 0.726913 |
025a143f5cc2381ed79e2e47f4c56370b64d62d8 | 9,628 | py | Python | tests/test_train_eval_mode.py | glmcdona/stable-baselines3-contrib | 91f9b1ed34fbaa9243a044ea67aa4c677663bfc2 | [
"MIT"
] | 93 | 2020-10-22T14:44:58.000Z | 2022-03-25T20:06:47.000Z | tests/test_train_eval_mode.py | glmcdona/stable-baselines3-contrib | 91f9b1ed34fbaa9243a044ea67aa4c677663bfc2 | [
"MIT"
] | 36 | 2020-10-26T11:13:23.000Z | 2022-03-31T15:11:05.000Z | tests/test_train_eval_mode.py | glmcdona/stable-baselines3-contrib | 91f9b1ed34fbaa9243a044ea67aa4c677663bfc2 | [
"MIT"
] | 50 | 2020-12-06T14:21:10.000Z | 2022-03-31T14:25:36.000Z | from typing import Union
import gym
import numpy as np
import pytest
import torch as th
import torch.nn as nn
from stable_baselines3.common.preprocessing import get_flattened_obs_dim
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
from sb3_contrib import QRDQN, TQC, MaskablePPO
from sb3_contrib.common.envs import InvalidActionEnvDiscrete
from sb3_contrib.common.maskable.utils import get_action_masks
def clone_batch_norm_stats(batch_norm: nn.BatchNorm1d) -> (th.Tensor, th.Tensor):
"""
Clone the bias and running mean from the given batch norm layer.
:param batch_norm:
:return: the bias and running mean
"""
return batch_norm.bias.clone(), batch_norm.running_mean.clone()
def clone_qrdqn_batch_norm_stats(model: QRDQN) -> (th.Tensor, th.Tensor, th.Tensor, th.Tensor):
"""
Clone the bias and running mean from the quantile network and quantile-target network.
:param model:
:return: the bias and running mean from the quantile network and quantile-target network
"""
quantile_net_batch_norm = model.policy.quantile_net.features_extractor.batch_norm
quantile_net_bias, quantile_net_running_mean = clone_batch_norm_stats(quantile_net_batch_norm)
quantile_net_target_batch_norm = model.policy.quantile_net_target.features_extractor.batch_norm
quantile_net_target_bias, quantile_net_target_running_mean = clone_batch_norm_stats(quantile_net_target_batch_norm)
return quantile_net_bias, quantile_net_running_mean, quantile_net_target_bias, quantile_net_target_running_mean
def clone_tqc_batch_norm_stats(
model: TQC,
) -> (th.Tensor, th.Tensor, th.Tensor, th.Tensor, th.Tensor, th.Tensor):
"""
Clone the bias and running mean from the actor and critic networks and critic-target networks.
:param model:
:return: the bias and running mean from the actor and critic networks and critic-target networks
"""
actor_batch_norm = model.actor.features_extractor.batch_norm
actor_bias, actor_running_mean = clone_batch_norm_stats(actor_batch_norm)
critic_batch_norm = model.critic.features_extractor.batch_norm
critic_bias, critic_running_mean = clone_batch_norm_stats(critic_batch_norm)
critic_target_batch_norm = model.critic_target.features_extractor.batch_norm
critic_target_bias, critic_target_running_mean = clone_batch_norm_stats(critic_target_batch_norm)
return (actor_bias, actor_running_mean, critic_bias, critic_running_mean, critic_target_bias, critic_target_running_mean)
CLONE_HELPERS = {
QRDQN: clone_qrdqn_batch_norm_stats,
TQC: clone_tqc_batch_norm_stats,
MaskablePPO: clone_on_policy_batch_norm,
}
| 35.791822 | 125 | 0.745015 |
025a4cb24f7a49faae7c43b7347971470e80c885 | 880 | py | Python | test_harness.py | alexk307/server-exercise | 31c76a3b370334a22787e06b4c28f8c65f4dd4ff | [
"Apache-2.0"
] | null | null | null | test_harness.py | alexk307/server-exercise | 31c76a3b370334a22787e06b4c28f8c65f4dd4ff | [
"Apache-2.0"
] | null | null | null | test_harness.py | alexk307/server-exercise | 31c76a3b370334a22787e06b4c28f8c65f4dd4ff | [
"Apache-2.0"
] | null | null | null | from requests import post
from random import randrange
from uuid import uuid4
import base64
import json
PORT = 6789
MAX_SIZE_UDP = 65535
HEADER_SIZE = 12
NUM_TRANSACTIONS = 10
SERVER = 'http://localhost:1234/add'
if __name__ == '__main__':
main()
| 22 | 69 | 0.582955 |
025c24bac13de507908c7c75d29225711dbc0aef | 2,414 | py | Python | checkmate_comp/experiments/table_approx_speedup_ratios.py | uwsampl/dtr-prototype | eff53cc4804cc7d6246a6e5086861ce2b846f62b | [
"Linux-OpenIB"
] | 90 | 2020-06-18T05:32:06.000Z | 2022-03-28T13:05:17.000Z | checkmate_comp/experiments/table_approx_speedup_ratios.py | merrymercy/dtr-prototype | bf40e182453a7d8d23581ea68f32a9d7d2037d62 | [
"Linux-OpenIB"
] | 5 | 2020-07-02T02:25:16.000Z | 2022-03-24T05:50:30.000Z | checkmate_comp/experiments/table_approx_speedup_ratios.py | uwsampl/dtr-prototype | eff53cc4804cc7d6246a6e5086861ce2b846f62b | [
"Linux-OpenIB"
] | 13 | 2020-06-27T07:01:54.000Z | 2022-01-18T07:31:01.000Z | from experiments.common.definitions import remat_data_dir
import numpy as np
import pandas as pd
import glob
import re
# compute aggregated tables of max and geomean lp approximation ratios
exp_name_re = re.compile(r"^(?P<platform>.+?)_(?P<model_name>.+?)_(?P<batch_size>[0-9]+?)_(?P<input_shape>None|.+?)$")
dfs = []
for path in (remat_data_dir() / 'budget_sweep').glob('**/slowdowns.csv'):
slowdown_df = pd.read_csv(path)
matches = exp_name_re.match(path.parents[0].name)
model_name = matches.group('model_name')
slowdown_df['Model name'] = [model_name] * len(slowdown_df)
dfs.append(slowdown_df)
df = pd.concat(dfs)
del df['Unnamed: 0']
for valuekey in ['geomean_slowdown', 'max']:
pivot_df = pd.pivot_table(df, values=valuekey, index=['Model name'], columns=['method'])
pivot_df.to_csv(remat_data_dir() / 'budget_sweep' / f"{valuekey}_aggr.csv")
# compute lp relaxation speedups
ilp_runtime_dict = {}
lp_runtime_dict = {}
for model in ['p32xlarge_vgg_unet_32_None', 'p32xlarge_ResNet50_256_None', 'p32xlarge_MobileNet_512_None', 'p32xlarge_VGG16_256_None', 'p32xlarge_VGG19_256_None']:
ilp_matcher = re.compile(r"Explored [0-9]+ nodes \([0-9]+ simplex iterations\) in (?P<ilp_runtime>[0-9\.]+) seconds")
lp_matcher = re.compile(r"Solved in [0-9]+ iterations and (?P<lp_runtime>[0-9\.]+) seconds")
ilp_runtimes = []
for path in (remat_data_dir() / 'budget_sweep' / model / 'ilp_log').glob('./*.log'):
with path.open('r') as f:
file_contents = f.read()
if 'Model is infeasible' in file_contents:
continue
match = ilp_matcher.search(file_contents)
ilp_runtimes.append(float(match.group('ilp_runtime')))
lp_runtimes = []
for path in (remat_data_dir() / 'budget_sweep' / 'p32xlarge_vgg_unet_32_None' / 'lp_det_05').glob('./*.log'):
with path.open('r') as f:
file_contents = f.read()
if 'Model is infeasible' in file_contents:
continue
match = lp_matcher.search(file_contents)
lp_runtimes.append(float(match.group('lp_runtime')))
print("Speedup for {} is {:0.2f} ({:.2f} versus {:.2f}, count {} vs {})".format(model, np.median(ilp_runtimes) / np.median(lp_runtimes), np.mean(ilp_runtimes), np.mean(lp_runtimes), len(ilp_runtimes), len(lp_runtimes)))
ilp_runtime_dict[model] = ilp_runtimes
lp_runtime_dict[model] = lp_runtimes
| 47.333333 | 223 | 0.67937 |
025c491da627375770263331eb452c03d4b317b0 | 431 | py | Python | src/terra/contracts/levana.py | fentas/staketaxcsv | ad37a32d8864111dbf88e926b80eb4ccacb921c6 | [
"MIT"
] | null | null | null | src/terra/contracts/levana.py | fentas/staketaxcsv | ad37a32d8864111dbf88e926b80eb4ccacb921c6 | [
"MIT"
] | null | null | null | src/terra/contracts/levana.py | fentas/staketaxcsv | ad37a32d8864111dbf88e926b80eb4ccacb921c6 | [
"MIT"
] | null | null | null | # known contracts from protocol
CONTRACTS = [
# NFT - Meteor Dust
"terra1p70x7jkqhf37qa7qm4v23g4u4g8ka4ktxudxa7",
# NFT - Eggs
"terra1k0y373yxqne22pc9g7jvnr4qclpsxtafevtrpg",
# NFT - Dragons
"terra1vhuyuwwr4rkdpez5f5lmuqavut28h5dt29rpn6",
# NFT - Loot
"terra14gfnxnwl0yz6njzet4n33erq5n70wt79nm24el",
]
| 26.9375 | 51 | 0.723898 |
025c55086785bd2358aa07697fa9e5ff75a7e9fe | 2,268 | py | Python | github/migrations/0007_auto_20201003_1239.py | h3nnn4n/git-o-matic-9k | d8241cc768591e0f41c02b2057d7b56697a4cc86 | [
"MIT"
] | null | null | null | github/migrations/0007_auto_20201003_1239.py | h3nnn4n/git-o-matic-9k | d8241cc768591e0f41c02b2057d7b56697a4cc86 | [
"MIT"
] | null | null | null | github/migrations/0007_auto_20201003_1239.py | h3nnn4n/git-o-matic-9k | d8241cc768591e0f41c02b2057d7b56697a4cc86 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-03 12:39
from django.db import migrations, models
import django.utils.timezone
| 29.076923 | 74 | 0.543651 |
025c8c73c3dda45b9c81e36fafb6a8137598b6d5 | 254 | py | Python | tests/unit/test_databeardb.py | chrisrycx/pyDataLogger | 21094da9de54ab467519a26680247ddc3efa6696 | [
"MIT"
] | 1 | 2020-09-25T16:25:09.000Z | 2020-09-25T16:25:09.000Z | tests/unit/test_databeardb.py | chrisrycx/pyDataLogger | 21094da9de54ab467519a26680247ddc3efa6696 | [
"MIT"
] | 4 | 2020-10-06T17:16:58.000Z | 2020-12-18T17:06:16.000Z | tests/unit/test_databeardb.py | chrisrycx/pyDataLogger | 21094da9de54ab467519a26680247ddc3efa6696 | [
"MIT"
] | 2 | 2020-03-24T14:32:29.000Z | 2020-08-05T17:38:24.000Z | '''
A unit test for databearDB.py
Runs manually at this point...
'''
import unittest
from databear.databearDB import DataBearDB
#Tests
| 14.111111 | 42 | 0.622047 |
025ca2353166896f2415d32f2b2cf83266307837 | 19 | py | Python | dbt/adapters/athena/__version__.py | sacundim/dbt-athena | 120c9d3c88da98ec11ddfcf0a0a3fda49538f197 | [
"Apache-2.0"
] | 92 | 2019-03-23T07:23:55.000Z | 2021-06-15T18:18:32.000Z | dbt/adapters/athena/__version__.py | sacundim/dbt-athena | 120c9d3c88da98ec11ddfcf0a0a3fda49538f197 | [
"Apache-2.0"
] | 156 | 2019-03-21T03:26:58.000Z | 2021-06-29T15:30:51.000Z | dbt/adapters/athena/__version__.py | sacundim/dbt-athena | 120c9d3c88da98ec11ddfcf0a0a3fda49538f197 | [
"Apache-2.0"
] | 58 | 2019-04-12T09:09:43.000Z | 2021-06-24T15:25:11.000Z | version = "0.21.0"
| 9.5 | 18 | 0.578947 |
025d05b924cc7305e801b76dce5c6ec01a360e7c | 1,161 | py | Python | dxtbx/conftest.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | dxtbx/conftest.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | dxtbx/conftest.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | #
# See https://github.com/dials/dials/wiki/pytest for documentation on how to
# write and run pytest tests, and an overview of the available features.
#
from __future__ import absolute_import, division, print_function
import os
import pytest
def pytest_addoption(parser):
'''Add '--regression' options to pytest.'''
parser.addoption("--regression", action="store_true", default=False,
help="run (time-intensive) regression tests")
def pytest_collection_modifyitems(config, items):
'''Tests marked as regression are only run with --regression.
'''
if not config.getoption("--regression"):
skip_regression = pytest.mark.skip(reason="Test only runs with --regression")
for item in items:
if "regression" in item.keywords:
item.add_marker(skip_regression)
| 33.171429 | 81 | 0.731266 |
025e3d2d32267b02443190a02969375302ba67a9 | 978 | py | Python | ietf/review/migrations/0020_auto_20191115_2059.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 25 | 2022-03-05T08:26:52.000Z | 2022-03-30T15:45:42.000Z | ietf/review/migrations/0020_auto_20191115_2059.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 219 | 2022-03-04T17:29:12.000Z | 2022-03-31T21:16:14.000Z | ietf/review/migrations/0020_auto_20191115_2059.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 22 | 2022-03-04T15:34:34.000Z | 2022-03-28T13:30:59.000Z | # Copyright The IETF Trust 2019-2020, All Rights Reserved
# -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2019-11-15 20:59
from django.db import migrations, models
| 36.222222 | 231 | 0.677914 |
02618a7eed33bdfbec9b651a6841eb4fcf49a22c | 1,663 | py | Python | utils/auth.py | BudzynskiMaciej/notifai_recruitment | 56860db3a2dad6115747a675895b8f7947e7e12e | [
"MIT"
] | null | null | null | utils/auth.py | BudzynskiMaciej/notifai_recruitment | 56860db3a2dad6115747a675895b8f7947e7e12e | [
"MIT"
] | 2 | 2021-05-21T13:26:26.000Z | 2022-02-10T10:04:55.000Z | utils/auth.py | BudzynskiMaciej/notifai_recruitment | 56860db3a2dad6115747a675895b8f7947e7e12e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from rest_framework import authentication
from rest_framework import exceptions
from notifai_recruitment import settings
| 47.514286 | 120 | 0.736019 |
02623225e5d363b265ee6e56ba38be5191b44c1f | 435 | py | Python | scripts/issues/issue6.py | slamer59/awesome-panel | 91c30bd6d6859eadf9c65b1e143952f7e64d5290 | [
"Apache-2.0"
] | 179 | 2019-12-04T14:54:53.000Z | 2022-03-30T09:08:38.000Z | scripts/issues/issue6.py | slamer59/awesome-panel | 91c30bd6d6859eadf9c65b1e143952f7e64d5290 | [
"Apache-2.0"
] | 62 | 2019-12-14T16:51:28.000Z | 2022-03-19T18:47:12.000Z | scripts/issues/issue6.py | slamer59/awesome-panel | 91c30bd6d6859eadf9c65b1e143952f7e64d5290 | [
"Apache-2.0"
] | 35 | 2019-12-08T13:19:53.000Z | 2022-03-25T10:33:02.000Z | import panel as pn
main()
| 19.772727 | 75 | 0.542529 |
02649919ebe1649e2c617d8a536cb6343e919b0b | 18,257 | py | Python | Electronic_Arts_Software_Engineering_Virtual_Program/Task_1/Vaxman_in_Python/vaxman.py | melwyncarlo/Virtual_Internship_Programmes | 1d1ae99abd63765d69ce930438c4bd6d15bd3d45 | [
"CC0-1.0"
] | null | null | null | Electronic_Arts_Software_Engineering_Virtual_Program/Task_1/Vaxman_in_Python/vaxman.py | melwyncarlo/Virtual_Internship_Programmes | 1d1ae99abd63765d69ce930438c4bd6d15bd3d45 | [
"CC0-1.0"
] | null | null | null | Electronic_Arts_Software_Engineering_Virtual_Program/Task_1/Vaxman_in_Python/vaxman.py | melwyncarlo/Virtual_Internship_Programmes | 1d1ae99abd63765d69ce930438c4bd6d15bd3d45 | [
"CC0-1.0"
] | null | null | null | # Vax-Man, a re-implementation of Pacman, in Python, with PyGame.
# Forked from: https://github.com/hbokmann/Pacman
# Edited by Melwyn Francis Carlo (2021)
# Video link: https://youtu.be/ZrqZEC6DvMc
import time
import pygame
# Ghosts multiply themselves every thirty seconds.
GHOST_MULTIPLICATION_TIME_GAP = 30
# Thirty-two times for each ghost type.
MAXIMUM_GHOSTS = 32 * 4;
indigo = ( 85, 48, 141 )
yellow = ( 255, 255, 0 )
darkRed = ( 201, 33, 30 )
darkGrey = ( 28, 28, 28 )
lightGrey = ( 238, 238, 238 )
Vaxman_icon=pygame.image.load('images/Vaxman_Big.png')
pygame.display.set_icon(Vaxman_icon)
# Add music
# Spook4 by PeriTune | http://peritune.com
# Attribution 4.0 International (CC BY 4.0)
# https://creativecommons.org/licenses/by/4.0/
# Music promoted by https://www.chosic.com/free-music/all/
pygame.mixer.init()
pygame.mixer.music.load('peritune-spook4.mp3')
pygame.mixer.music.play(-1, 0.0)
# This class represents the bar at the bottom that the player controls
# This creates all the walls in room 1
def setupRoomOne(all_sprites_list):
# Make the walls. (x_pos, y_pos, width, height)
wall_list=pygame.sprite.RenderPlain()
# This is a list of walls. Each is in the form [x, y, width, height]
walls = [ [0,0,6,600],
[0,0,600,6],
[0,600,606,6],
[600,0,6,606],
[300,0,6,66],
[60,60,186,6],
[360,60,186,6],
[60,120,66,6],
[60,120,6,126],
[180,120,246,6],
[300,120,6,66],
[480,120,66,6],
[540,120,6,126],
[120,180,126,6],
[120,180,6,126],
[360,180,126,6],
[480,180,6,126],
[180,240,6,126],
[180,360,246,6],
[420,240,6,126],
[240,240,42,6],
[324,240,42,6],
[240,240,6,66],
[240,300,126,6],
[360,240,6,66],
[0,300,66,6],
[540,300,66,6],
[60,360,66,6],
[60,360,6,186],
[480,360,66,6],
[540,360,6,186],
[120,420,366,6],
[120,420,6,66],
[480,420,6,66],
[180,480,246,6],
[300,480,6,66],
[120,540,126,6],
[360,540,126,6]
]
# Loop through the list. Create the wall, add it to the list.
for item in walls:
wall = Wall(item[0], item[1], item[2], item[3], indigo)
wall_list.add(wall)
all_sprites_list.add(wall)
# Return our new list.
return wall_list
def setupGate(all_sprites_list):
gate = pygame.sprite.RenderPlain()
gate.add(Wall(282, 242, 42, 2, lightGrey))
all_sprites_list.add(gate)
return gate
# This class represents the ball
# It derives from the "Sprite" class in Pygame
# This class represents the bar at the bottom that the player controls
#Inheritime Player klassist
Pinky_directions = [
[0,-30,4],
[15,0,9],
[0,15,11],
[-15,0,23],
[0,15,7],
[15,0,3],
[0,-15,3],
[15,0,19],
[0,15,3],
[15,0,3],
[0,15,3],
[15,0,3],
[0,-15,15],
[-15,0,7],
[0,15,3],
[-15,0,19],
[0,-15,11],
[15,0,9]
]
Blinky_directions = [
[0,-15,4],
[15,0,9],
[0,15,11],
[15,0,3],
[0,15,7],
[-15,0,11],
[0,15,3],
[15,0,15],
[0,-15,15],
[15,0,3],
[0,-15,11],
[-15,0,3],
[0,-15,11],
[-15,0,3],
[0,-15,3],
[-15,0,7],
[0,-15,3],
[15,0,15],
[0,15,15],
[-15,0,3],
[0,15,3],
[-15,0,3],
[0,-15,7],
[-15,0,3],
[0,15,7],
[-15,0,11],
[0,-15,7],
[15,0,5]
]
Inky_directions = [
[30,0,2],
[0,-15,4],
[15,0,10],
[0,15,7],
[15,0,3],
[0,-15,3],
[15,0,3],
[0,-15,15],
[-15,0,15],
[0,15,3],
[15,0,15],
[0,15,11],
[-15,0,3],
[0,-15,7],
[-15,0,11],
[0,15,3],
[-15,0,11],
[0,15,7],
[-15,0,3],
[0,-15,3],
[-15,0,3],
[0,-15,15],
[15,0,15],
[0,15,3],
[-15,0,15],
[0,15,11],
[15,0,3],
[0,-15,11],
[15,0,11],
[0,15,3],
[15,0,1],
]
Clyde_directions = [
[-30,0,2],
[0,-15,4],
[15,0,5],
[0,15,7],
[-15,0,11],
[0,-15,7],
[-15,0,3],
[0,15,7],
[-15,0,7],
[0,15,15],
[15,0,15],
[0,-15,3],
[-15,0,11],
[0,-15,7],
[15,0,3],
[0,-15,11],
[15,0,9],
]
pl = len(Pinky_directions) - 1
bl = len(Blinky_directions) - 1
il = len(Inky_directions) - 1
cl = len(Clyde_directions) - 1
# Call this function so the Pygame library can initialize itself
pygame.init()
# Create an 606x606 sized screen
screen = pygame.display.set_mode([606, 606])
# This is a list of 'sprites.' Each block in the program is
# added to this list. The list is managed by a class called 'RenderPlain.'
# Set the title of the window
pygame.display.set_caption('Melly the Vax-Man')
# Create a surface we can draw on
background = pygame.Surface(screen.get_size())
# Used for converting color maps and such
background = background.convert()
# Fill the screen with a dark grey background
background.fill(darkGrey)
clock = pygame.time.Clock()
pygame.font.init()
font = pygame.font.Font("freesansbold.ttf", 24)
#default locations for Vax-Man and ghosts
w = 303 - 16 # Width
p_h = 19 + (7 * 60) # Vax-Man height
m_h = 19 + (4 * 60) # Monster height
b_h = 19 + (3 * 60) # Binky height
i_w = 303 - 16 - 32 # Inky width
c_w = 303 + (32 - 16) # Clyde width
startGame()
pygame.quit()
| 29.025437 | 143 | 0.570083 |
02672c292331f32c5416bda0b2eba29281a17676 | 1,320 | py | Python | examples/ecr/rl_formulations/common/state_shaper.py | zhawan/maro | d8c98deea4296cdcb90efd1fb59bc571cec3a2ef | [
"MIT"
] | null | null | null | examples/ecr/rl_formulations/common/state_shaper.py | zhawan/maro | d8c98deea4296cdcb90efd1fb59bc571cec3a2ef | [
"MIT"
] | null | null | null | examples/ecr/rl_formulations/common/state_shaper.py | zhawan/maro | d8c98deea4296cdcb90efd1fb59bc571cec3a2ef | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
from maro.rl import AbstractStateShaper
| 45.517241 | 117 | 0.724242 |
0267eac0bf1a3be3319a75260f8b10b9d6a39d75 | 2,834 | py | Python | src/runner.py | Shahrukh-Badar/DeepLearning | 5f6bbd6f8ace06014f10e35183442901d984b231 | [
"MIT"
] | null | null | null | src/runner.py | Shahrukh-Badar/DeepLearning | 5f6bbd6f8ace06014f10e35183442901d984b231 | [
"MIT"
] | null | null | null | src/runner.py | Shahrukh-Badar/DeepLearning | 5f6bbd6f8ace06014f10e35183442901d984b231 | [
"MIT"
] | null | null | null | from os import listdir
from os.path import join, isfile
import json
from random import randint
#########################################
## START of part that students may change
from code_completion_baseline import Code_Completion_Baseline
training_dir = "./../../programs_800/"
query_dir = "./../../programs_200/"
model_file = "./../../trained_model"
use_stored_model = False
max_hole_size = 2
simplify_tokens = True
## END of part that students may change
#########################################
# load sequences of tokens from files
# removes up to max_hole_size tokens
# checks if two sequences of tokens are identical
#########################################
## START of part that students may change
code_completion = Code_Completion_Baseline()
## END of part that students may change
#########################################
# train the network
training_token_lists = load_tokens(training_dir)
if use_stored_model:
code_completion.load(training_token_lists, model_file)
else:
code_completion.train(training_token_lists, model_file)
# query the network and measure its accuracy
query_token_lists = load_tokens(query_dir)
correct = incorrect = 0
for tokens in query_token_lists:
(prefix, expected, suffix) = create_hole(tokens)
completion = code_completion.query(prefix, suffix)
if same_tokens(completion, expected):
correct += 1
else:
incorrect += 1
accuracy = correct / (correct + incorrect)
print("Accuracy: " + str(correct) + " correct vs. " + str(incorrect) + " incorrect = " + str(accuracy))
| 32.953488 | 127 | 0.650318 |
0268d3f7d9cf4572520e699a426fa385cc8944bc | 4,491 | py | Python | superhelp/formatters/cli_formatter.py | grantps/superhelp | d8e861bf1ad91571ac23b9c833a8cd461bb1952f | [
"MIT"
] | 27 | 2020-05-17T20:48:43.000Z | 2022-01-08T21:32:30.000Z | superhelp/formatters/cli_formatter.py | grantps/superhelp | d8e861bf1ad91571ac23b9c833a8cd461bb1952f | [
"MIT"
] | null | null | null | superhelp/formatters/cli_formatter.py | grantps/superhelp | d8e861bf1ad91571ac23b9c833a8cd461bb1952f | [
"MIT"
] | null | null | null | from pathlib import Path
from textwrap import dedent
from superhelp import conf
from superhelp.conf import Level, Theme
from superhelp.formatters.cli_extras import md2cli
from superhelp.formatters.cli_extras.cli_colour import set_global_colours
from superhelp.gen_utils import (get_code_desc, get_intro,
get_line_numbered_snippet, layout_comment as layout)
"""
Note - displays properly in the terminal but not necessarily in other output
e.g. Eclipse console.
Lots in common with md displayer but risks of DRYing probably outweigh benefits
at this stage.
Probably should swap out for https://github.com/willmcgugan/rich
"""
TERMINAL_WIDTH = 80
MDV_CODE_BOUNDARY = "```"
def _need_snippet_displayed(overall_messages_dets, block_messages_dets, *,
multi_block=False):
"""
Don't need to see the code snippet displayed when it is already visible:
* because there is only one block in snippet and there is a block message
for it (which will display the block i.e. the entire snippet) UNLESS there
is an overall message separating them
Otherwise we need it displayed.
"""
mono_block_snippet = not multi_block
if mono_block_snippet and block_messages_dets and not overall_messages_dets:
return False
return True
def get_formatted_help(code: str, file_path: Path, messages_dets, *,
detail_level: Level = Level.BRIEF, theme_name: Theme = Theme.LIGHT,
warnings_only=False, multi_block=False) -> str:
"""
Show by code blocks.
"""
set_global_colours(theme_name)
md2cli.term_columns = TERMINAL_WIDTH
if warnings_only:
options_msg = conf.WARNINGS_ONLY_MSG
else:
options_msg = conf.ALL_HELP_SHOWING_MSG
intro = get_intro(file_path, multi_block=multi_block)
text = [
md2cli.main(layout(f"""\
# SuperHELP - Help for Humans!
{intro}
Currently showing {detail_level} content as requested.
{options_msg}.
{conf.MISSING_ADVICE_MESSAGE}
## Help by spreading the word about SuperHELP on social media.
{conf.FORCE_SPLIT}Twitter: {conf.TWITTER_HANDLE}. Thanks!
"""
)),
]
overall_messages_dets, block_messages_dets = messages_dets
display_snippet = _need_snippet_displayed(
overall_messages_dets, block_messages_dets, multi_block=multi_block)
if display_snippet:
line_numbered_snippet = get_line_numbered_snippet(code)
code_desc = get_code_desc(file_path)
text.append(md2cli.main(dedent(
f"## {code_desc}"
f"\n{MDV_CODE_BOUNDARY}\n"
+ line_numbered_snippet
+ f"\n{MDV_CODE_BOUNDARY}")))
for message_dets in overall_messages_dets:
message = get_message(message_dets, detail_level)
text.append(message)
block_messages_dets.sort(key=lambda nt: (nt.first_line_no, nt.warning))
prev_line_no = None
for message_dets in block_messages_dets:
## display code for line number (once ;-))
line_no = message_dets.first_line_no
new_block = (line_no != prev_line_no)
if new_block:
block_has_warning_header = False
text.append(md2cli.main(dedent(
f'## Code block starting line {line_no:,}'
f"\n{MDV_CODE_BOUNDARY}\n"
+ message_dets.code_str
+ f"\n{MDV_CODE_BOUNDARY}")))
prev_line_no = line_no
if message_dets.warning and not block_has_warning_header:
text.append(md2cli.main(layout("""\
### Questions / Warnings
There may be some issues with this code block you want to
address.
""")))
block_has_warning_header = True
## process message
message = get_message(message_dets, detail_level)
text.append(message)
formatted_help = '\n'.join(text)
return formatted_help
| 37.115702 | 93 | 0.674905 |
0268e7698751adcedb3a0f8d62ab2e3667fd33f3 | 4,941 | py | Python | atom/instance.py | enthought/atom | 1f194e3550d62c4ca1d79521dff97531ffe3f0ac | [
"BSD-3-Clause"
] | null | null | null | atom/instance.py | enthought/atom | 1f194e3550d62c4ca1d79521dff97531ffe3f0ac | [
"BSD-3-Clause"
] | 1 | 2020-12-04T10:11:07.000Z | 2020-12-04T10:13:46.000Z | atom/instance.py | enthought/atom | 1f194e3550d62c4ca1d79521dff97531ffe3f0ac | [
"BSD-3-Clause"
] | 1 | 2020-12-04T10:05:32.000Z | 2020-12-04T10:05:32.000Z | #------------------------------------------------------------------------------
# Copyright (c) 2013, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from .catom import (
Member, DEFAULT_FACTORY, DEFAULT_VALUE, USER_DEFAULT, VALIDATE_INSTANCE,
USER_VALIDATE
)
| 36.065693 | 79 | 0.608379 |
0268f15772e163a48707362a23538e64ee3c364e | 4,744 | py | Python | operators/elastic-cloud-eck/python/pulumi_pulumi_kubernetes_crds_operators_elastic_cloud_eck/_tables.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | null | null | null | operators/elastic-cloud-eck/python/pulumi_pulumi_kubernetes_crds_operators_elastic_cloud_eck/_tables.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | 2 | 2020-09-18T17:12:23.000Z | 2020-12-30T19:40:56.000Z | operators/elastic-cloud-eck/python/pulumi_pulumi_kubernetes_crds_operators_elastic_cloud_eck/_tables.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
SNAKE_TO_CAMEL_CASE_TABLE = {
"access_modes": "accessModes",
"api_group": "apiGroup",
"api_version": "apiVersion",
"app_protocol": "appProtocol",
"association_status": "associationStatus",
"available_nodes": "availableNodes",
"change_budget": "changeBudget",
"client_ip": "clientIP",
"cluster_ip": "clusterIP",
"config_ref": "configRef",
"daemon_set": "daemonSet",
"data_source": "dataSource",
"elasticsearch_association_status": "elasticsearchAssociationStatus",
"elasticsearch_ref": "elasticsearchRef",
"expected_nodes": "expectedNodes",
"external_i_ps": "externalIPs",
"external_name": "externalName",
"external_traffic_policy": "externalTrafficPolicy",
"file_realm": "fileRealm",
"health_check_node_port": "healthCheckNodePort",
"ip_family": "ipFamily",
"kibana_association_status": "kibanaAssociationStatus",
"kibana_ref": "kibanaRef",
"last_probe_time": "lastProbeTime",
"last_transition_time": "lastTransitionTime",
"load_balancer_ip": "loadBalancerIP",
"load_balancer_source_ranges": "loadBalancerSourceRanges",
"match_expressions": "matchExpressions",
"match_labels": "matchLabels",
"max_surge": "maxSurge",
"max_unavailable": "maxUnavailable",
"min_available": "minAvailable",
"node_port": "nodePort",
"node_sets": "nodeSets",
"pod_disruption_budget": "podDisruptionBudget",
"pod_template": "podTemplate",
"publish_not_ready_addresses": "publishNotReadyAddresses",
"remote_clusters": "remoteClusters",
"rolling_update": "rollingUpdate",
"secret_name": "secretName",
"secret_token_secret": "secretTokenSecret",
"secure_settings": "secureSettings",
"self_signed_certificate": "selfSignedCertificate",
"service_account_name": "serviceAccountName",
"session_affinity": "sessionAffinity",
"session_affinity_config": "sessionAffinityConfig",
"storage_class_name": "storageClassName",
"subject_alt_names": "subjectAltNames",
"target_port": "targetPort",
"timeout_seconds": "timeoutSeconds",
"topology_keys": "topologyKeys",
"update_strategy": "updateStrategy",
"volume_claim_templates": "volumeClaimTemplates",
"volume_mode": "volumeMode",
"volume_name": "volumeName",
}
CAMEL_TO_SNAKE_CASE_TABLE = {
"accessModes": "access_modes",
"apiGroup": "api_group",
"apiVersion": "api_version",
"appProtocol": "app_protocol",
"associationStatus": "association_status",
"availableNodes": "available_nodes",
"changeBudget": "change_budget",
"clientIP": "client_ip",
"clusterIP": "cluster_ip",
"configRef": "config_ref",
"daemonSet": "daemon_set",
"dataSource": "data_source",
"elasticsearchAssociationStatus": "elasticsearch_association_status",
"elasticsearchRef": "elasticsearch_ref",
"expectedNodes": "expected_nodes",
"externalIPs": "external_i_ps",
"externalName": "external_name",
"externalTrafficPolicy": "external_traffic_policy",
"fileRealm": "file_realm",
"healthCheckNodePort": "health_check_node_port",
"ipFamily": "ip_family",
"kibanaAssociationStatus": "kibana_association_status",
"kibanaRef": "kibana_ref",
"lastProbeTime": "last_probe_time",
"lastTransitionTime": "last_transition_time",
"loadBalancerIP": "load_balancer_ip",
"loadBalancerSourceRanges": "load_balancer_source_ranges",
"matchExpressions": "match_expressions",
"matchLabels": "match_labels",
"maxSurge": "max_surge",
"maxUnavailable": "max_unavailable",
"minAvailable": "min_available",
"nodePort": "node_port",
"nodeSets": "node_sets",
"podDisruptionBudget": "pod_disruption_budget",
"podTemplate": "pod_template",
"publishNotReadyAddresses": "publish_not_ready_addresses",
"remoteClusters": "remote_clusters",
"rollingUpdate": "rolling_update",
"secretName": "secret_name",
"secretTokenSecret": "secret_token_secret",
"secureSettings": "secure_settings",
"selfSignedCertificate": "self_signed_certificate",
"serviceAccountName": "service_account_name",
"sessionAffinity": "session_affinity",
"sessionAffinityConfig": "session_affinity_config",
"storageClassName": "storage_class_name",
"subjectAltNames": "subject_alt_names",
"targetPort": "target_port",
"timeoutSeconds": "timeout_seconds",
"topologyKeys": "topology_keys",
"updateStrategy": "update_strategy",
"volumeClaimTemplates": "volume_claim_templates",
"volumeMode": "volume_mode",
"volumeName": "volume_name",
}
| 39.533333 | 80 | 0.707841 |
026b557b15ada072d61283c89f10a088c8637df4 | 1,416 | py | Python | webapp/app.py | aleksandergurin/news | 9e7d3c35857600445cb6df42ba18d289dc0e37a9 | [
"BSD-3-Clause"
] | 3 | 2015-08-20T11:08:28.000Z | 2018-01-28T21:22:53.000Z | webapp/app.py | aleksandergurin/news | 9e7d3c35857600445cb6df42ba18d289dc0e37a9 | [
"BSD-3-Clause"
] | null | null | null | webapp/app.py | aleksandergurin/news | 9e7d3c35857600445cb6df42ba18d289dc0e37a9 | [
"BSD-3-Clause"
] | null | null | null |
from flask import Flask, render_template
from config import configs
from .extensions import login_manager, db
from .account import account
from .frontend import frontend
from webapp.session import RedisSessionInterface
| 24.413793 | 80 | 0.711864 |
026bd83279fbac0f51bacbf47138a5022a5dd278 | 27,723 | py | Python | src/ezcode/knapsack/__init__.py | zheng-gao/ez_code | fbf48990291aa57d6436d4548b0a6c25dfb8f82d | [
"MIT"
] | null | null | null | src/ezcode/knapsack/__init__.py | zheng-gao/ez_code | fbf48990291aa57d6436d4548b0a6c25dfb8f82d | [
"MIT"
] | null | null | null | src/ezcode/knapsack/__init__.py | zheng-gao/ez_code | fbf48990291aa57d6436d4548b0a6c25dfb8f82d | [
"MIT"
] | null | null | null | from typing import Callable
| 48.046794 | 145 | 0.537171 |
026d6883b4b4ef48ca95ca7facd1d38932ace6a3 | 26 | py | Python | env/lib/python3.7/site-packages/grpc/_grpcio_metadata.py | PrudhviGNV/speechemotion | c99b4a7f644e1fd495cb5e6750ada0dd50d8b86f | [
"MIT"
] | 5 | 2019-04-16T20:43:47.000Z | 2020-10-24T22:35:39.000Z | Lib/site-packages/grpc/_grpcio_metadata.py | caiyongji/Anaconda-py36.5-tensorflow-built-env | f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2 | [
"PSF-2.0"
] | 2 | 2021-04-30T20:43:55.000Z | 2021-06-10T21:34:23.000Z | Lib/site-packages/grpc/_grpcio_metadata.py | caiyongji/Anaconda-py36.5-tensorflow-built-env | f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2 | [
"PSF-2.0"
] | 3 | 2019-08-03T13:47:09.000Z | 2021-08-03T14:20:25.000Z | __version__ = """1.19.0""" | 26 | 26 | 0.576923 |
027134b2e08ff17613c7279b030cfe1fcf0d8e8e | 309 | py | Python | pycon/tutorials/urls.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 154 | 2015-01-17T02:29:24.000Z | 2022-03-20T20:37:24.000Z | pycon/tutorials/urls.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 316 | 2015-01-10T04:01:50.000Z | 2020-09-30T20:18:08.000Z | pycon/tutorials/urls.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 89 | 2015-01-10T05:25:21.000Z | 2022-02-27T03:28:59.000Z | from django.conf.urls import url, patterns
from .views import tutorial_email, tutorial_message
urlpatterns = patterns("", # flake8: noqa
url(r"^mail/(?P<pk>\d+)/(?P<pks>[0-9,]+)/$", tutorial_email, name="tutorial_email"),
url(r"^message/(?P<pk>\d+)/$", tutorial_message, name="tutorial_message"),
)
| 34.333333 | 88 | 0.679612 |
027194484ee86822b39b3b119ff07d71c83e4daa | 895 | py | Python | setup.py | oleks/gigalixir-cli | d1b1c303e24be548ddc895165e34652c378f4347 | [
"MIT"
] | null | null | null | setup.py | oleks/gigalixir-cli | d1b1c303e24be548ddc895165e34652c378f4347 | [
"MIT"
] | null | null | null | setup.py | oleks/gigalixir-cli | d1b1c303e24be548ddc895165e34652c378f4347 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='gigalixir',
url='https://github.com/gigalixir/gigalixir-cli',
author='Jesse Shieh',
author_email='jesse@gigalixir.com',
version='1.1.10',
packages=find_packages(),
include_package_data=True,
install_requires=[
'click~=6.7',
'requests~=2.20.0',
'stripe~=1.51.0',
'rollbar~=0.13.11',
'pygments~=2.2.0',
],
entry_points='''
[console_scripts]
gigalixir=gigalixir:cli
''',
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
'HTTPretty',
'sure',
],
extras_require={
'dev': [
'Sphinx',
'sphinx_rtd_theme',
'sphinx-tabs',
],
'test': [
'pytest',
'HTTPretty',
'sure',
],
}
)
| 20.813953 | 53 | 0.492737 |
02723743e00e16a13861c25c7c9d6a4bb4b3f93e | 254 | py | Python | runTest.py | Amedeo91/cushypost_integration | fc7ffc9daf535ed5bcfdee4933a7a57340a583b2 | [
"MIT"
] | 1 | 2021-10-06T06:23:40.000Z | 2021-10-06T06:23:40.000Z | runTest.py | Amedeo91/cushypost_integration | fc7ffc9daf535ed5bcfdee4933a7a57340a583b2 | [
"MIT"
] | null | null | null | runTest.py | Amedeo91/cushypost_integration | fc7ffc9daf535ed5bcfdee4933a7a57340a583b2 | [
"MIT"
] | null | null | null | import os
import unittest
dir_path = os.path.dirname(os.path.realpath(__file__))
suite = unittest.TestLoader().discover(dir_path, pattern='test_*.py')
result = unittest.TextTestRunner(verbosity=3).run(suite)
print(result)
assert result.wasSuccessful()
| 25.4 | 69 | 0.787402 |
02735e99efa8906c66196996cdf60aedba9354a2 | 6,145 | py | Python | tests/test_pydent/test_models/models/test_plan.py | aquariumbio/trident | d1712cae544103fb145e3171894e4b35141f6813 | [
"MIT"
] | 5 | 2019-01-21T11:12:05.000Z | 2020-03-05T20:52:14.000Z | tests/test_pydent/test_models/models/test_plan.py | aquariumbio/pydent | d1712cae544103fb145e3171894e4b35141f6813 | [
"MIT"
] | 28 | 2020-11-18T02:07:09.000Z | 2021-06-08T15:49:41.000Z | tests/test_pydent/test_models/models/test_plan.py | aquariumbio/trident | d1712cae544103fb145e3171894e4b35141f6813 | [
"MIT"
] | 2 | 2021-02-27T19:23:45.000Z | 2021-09-14T10:29:07.000Z | import pytest
from pydent.models import Plan
def test_plan_copy(example_plan):
"""Copying plans should anonymize operations and wires."""
copied_plan = example_plan.copy()
assert copied_plan.operations
for op in copied_plan.operations:
assert op.id is None
assert op.operation_type_id is not None
assert op.field_values is not None
for fv in op.field_values:
assert fv.id is None
assert fv.parent_id is None
assert fv.field_type_id is not None
# TODO: make this adeterministic test
"""def test_new_plan(session):
p = fake_session.Plan.new()
p.connect_to_session(session)
assert p.operations is None
assert p.plan_associations is None
p.id = 1000000
assert p.operations == []
assert p.plan_associations == []"""
# def test_submit(session):
# primer = session.SampleType.find(1).samples[-1]
#
# # get Order Primer operation type
# ot = session.OperationType.find(328)
#
# # create an operation
# order_primer = ot.instance()
#
# # set io
# order_primer.set_output("Primer", sample=primer)
# order_primer.set_input("Urgent?", value="no")
#
# # create a new plan and add operations
# p = session.Plan(name="MyPlan")
# p.add_operation(order_primer)
#
# # save the plan
# p.create()
#
# # estimate the cost
# p.estimate_cost()
#
# # show the plan
# p.show()
#
# # submit the plan
# p.submit(session.current_user, session.current_user.budgets[0])
# def test_submit_pcr(session):
# def get_op(name):
# return session.OperationType.where(
# {'name': name, 'deployed': True})[-1].instance()
#
# make_pcr_fragment = get_op('Make PCR Fragment')
# pour_gel = get_op('Pour Gel')
# run_gel = get_op('Run Gel')
# extract_gel_slice = get_op('Extract Gel Slice')
# purify_gel = get_op('Purify Gel Slice')
#
# # setup pcr
# make_pcr_fragment.set_input('Forward Primer',
# item=session.Item.find(81867))
# make_pcr_fragment.set_input('Reverse Primer',
# item=session.Item.find(57949))
# make_pcr_fragment.set_input('Template', item=session.Item.find(61832))
# make_pcr_fragment.set_output('Fragment',
# sample=session.Sample.find(16976))
#
# # setup outputs
# # run_gel.set_output(sample=session.Sample.find(16976))
# # extract_gel_slice.set_output(sample=session.Sample.find(16976))
# # purify_gel.set_output(sample=session.Sample.find(16976))
# # purify_gel.pour_gel(sample=session.Sample.find(16976))
#
# # new plan
# p = session.fake_session.Plan.new()
# p.add_operations([make_pcr_fragment, pour_gel, run_gel,
# extract_gel_slice, purify_gel])
#
# p.add_wires([
# (make_pcr_fragment.output("Fragment"), run_gel.input("Fragment")),
# (pour_gel.output("Lane"), run_gel.input("Gel")),
# (run_gel.output("Fragment"), extract_gel_slice.input("Fragment")),
# (extract_gel_slice.output("Fragment"), purify_gel.input("Gel"))
# ])
#
# make_pcr_fragment.set_output("Fragment",
# sample=session.Sample.find(16976))
#
#
# pdata = p.to_save_json()
#
# # wire up the operations
# # p.wire(make_pcr_fragment.outputs[0], run_gel.input('Fragment'))
# # p.wire(pour_gel.outputs[0], run_gel.input('Gel'))
# # p.wire(run_gel.outputs[0], extract_gel_slice.input('Fragment'))
# # p.wire(extract_gel_slice.outputs[0], purify_gel.input('Gel'))
#
# # save the plan
# p.create()
#
# # estimate the cost
# p.estimate_cost()
#
# p.validate()
#
# # show the plan
# p.show()
#
# # submit the plan
# p.submit(session.current_user, session.current_user.budgets[0])
# # TODO: having difficulty patching plans/operations here...
# def test_replan(session):
#
# p = session.Plan.find(79797)
# newplan = p.replan()
# newplan.print()
#
# for op in newplan.operations:
# if op.operation_type.name == "Make PCR Fragment":
# op.set_input('Template', item=session.Item.find(57124))
# newplan.patch(newplan.to_save_json())
| 28.581395 | 76 | 0.621318 |
0273c9fe7bf28f09a7dc46bd636570ab46c8a8fa | 611 | py | Python | FusionIIIT/applications/gymkhana/migrations/0007_auto_20200608_2210.py | sabhishekpratap5/sonarcubeTest2 | 9bd8105e457f6feb8c38fa94b335e54783fca99e | [
"bzip2-1.0.6"
] | 2 | 2020-01-24T16:34:54.000Z | 2020-08-01T05:09:24.000Z | FusionIIIT/applications/gymkhana/migrations/0007_auto_20200608_2210.py | sabhishekpratap5/sonarcubeTest2 | 9bd8105e457f6feb8c38fa94b335e54783fca99e | [
"bzip2-1.0.6"
] | 1 | 2021-05-05T09:50:22.000Z | 2021-05-05T09:50:22.000Z | FusionIIIT/applications/gymkhana/migrations/0007_auto_20200608_2210.py | sabhishekpratap5/sonarcubeTest2 | 9bd8105e457f6feb8c38fa94b335e54783fca99e | [
"bzip2-1.0.6"
] | 4 | 2020-01-16T17:00:08.000Z | 2020-06-30T15:58:32.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-06-08 22:10
from __future__ import unicode_literals
from django.db import migrations, models
| 24.44 | 101 | 0.610475 |
027588263d8cfcf1854016d6bcb09a5b8fcae300 | 1,899 | py | Python | config/presets/Modes/Python/T - Bits H/main.py | The-XOR/EYESY_OS | 6a5e3d0bc5574ba2311e0c7e81c600c3af7a3e34 | [
"BSD-3-Clause"
] | 18 | 2021-03-06T05:39:30.000Z | 2022-03-25T17:59:23.000Z | presets/Modes/Python/T - Bits H/main.py | jqrsound/EYESY_OS_for_RasPiSound | ac117b91cd84ad4c0566bd1a7d4c7b1ccc01cf62 | [
"BSD-3-Clause"
] | null | null | null | presets/Modes/Python/T - Bits H/main.py | jqrsound/EYESY_OS_for_RasPiSound | ac117b91cd84ad4c0566bd1a7d4c7b1ccc01cf62 | [
"BSD-3-Clause"
] | 4 | 2021-03-14T18:38:42.000Z | 2021-07-11T14:31:18.000Z | import os
import pygame
import random
trigger = False
x = 0
y = 0
height = 720
width = 1280
linelength = 50
lineAmt = 20
displace = 10
xpos = [random.randrange(-200,1280) for i in range(0, lineAmt + 2)]
xpos1 = [(xpos[i]+displace) for i in range(0, lineAmt + 2)]
xr = 360
yr = 240
| 31.131148 | 97 | 0.604529 |
0275d85ad826b0b81b83f4f373f69ae66117d9ed | 2,577 | py | Python | ext/std/code/mi.py | iazarov/metrixplusplus | 322777cba4e089502dd6053749b07a7be9da65b2 | [
"MIT"
] | null | null | null | ext/std/code/mi.py | iazarov/metrixplusplus | 322777cba4e089502dd6053749b07a7be9da65b2 | [
"MIT"
] | null | null | null | ext/std/code/mi.py | iazarov/metrixplusplus | 322777cba4e089502dd6053749b07a7be9da65b2 | [
"MIT"
] | null | null | null | #
# Metrix++, Copyright 2009-2019, Metrix++ Project
# Link: https://github.com/metrixplusplus/metrixplusplus
#
# This file is a part of Metrix++ Tool.
#
import mpp.api
| 45.210526 | 100 | 0.568879 |
027b51903bbc31466f05349aa598a39bb4d2919d | 447 | py | Python | 6.00.1x/quiz/flatten.py | NicholasAsimov/courses | d60981f25816445578eb9e89bbbeef2d38eaf014 | [
"MIT"
] | null | null | null | 6.00.1x/quiz/flatten.py | NicholasAsimov/courses | d60981f25816445578eb9e89bbbeef2d38eaf014 | [
"MIT"
] | null | null | null | 6.00.1x/quiz/flatten.py | NicholasAsimov/courses | d60981f25816445578eb9e89bbbeef2d38eaf014 | [
"MIT"
] | null | null | null | def flatten(aList):
'''
aList: a list
Returns a copy of aList, which is a flattened version of aList
'''
if aList == []:
return aList
if type(aList[0]) == list:
return flatten(aList[0]) + flatten(aList[1:])
return aList[:1] + flatten(aList[1:])
aList = [[1, 'a', ['cat'], 2], [[[3]], 'dog'], 4, 5]
print flatten(aList)
testCase = [1, 'a', 'cat', 2, 3, 'dog', 4, 5]
print flatten(aList) == testCase
| 22.35 | 66 | 0.548098 |
027cdd147516550681b095c7591faaa5e2b26a2b | 9,960 | py | Python | copo_code/copo/algo_svo/svo_env.py | decisionforce/CoPO | 3a06a48522b901db2e380a62a0efb5e8a30cd079 | [
"Apache-2.0"
] | 37 | 2021-11-01T03:30:30.000Z | 2022-03-29T08:38:12.000Z | copo_code/copo/algo_svo/svo_env.py | decisionforce/CoPO | 3a06a48522b901db2e380a62a0efb5e8a30cd079 | [
"Apache-2.0"
] | null | null | null | copo_code/copo/algo_svo/svo_env.py | decisionforce/CoPO | 3a06a48522b901db2e380a62a0efb5e8a30cd079 | [
"Apache-2.0"
] | 4 | 2021-11-05T06:55:34.000Z | 2022-01-04T07:08:37.000Z | """
Usage: Call get_svo_env(env_class) to get the real env class!
"""
from collections import defaultdict
from math import cos, sin
import numpy as np
from gym.spaces import Box
from metadrive.envs.marl_envs.marl_tollgate import TollGateObservation, MultiAgentTollgateEnv
from metadrive.obs.state_obs import LidarStateObservation
from metadrive.utils import get_np_random, norm, clip
from copo.utils import get_rllib_compatible_env
def get_svo_env(env_class, return_env_class=False):
name = env_class.__name__
TMP.__name__ = name
TMP.__qualname__ = name
if return_env_class:
return TMP
return get_rllib_compatible_env(TMP)
if __name__ == '__main__':
# env = SVOEnv({"num_agents": 8, "neighbours_distance": 3, "svo_mode": "angle", "force_svo": 0.9})
env = get_svo_env(
MultiAgentTollgateEnv, return_env_class=True
)({
"num_agents": 8,
"neighbours_distance": 3,
"svo_mode": "angle",
"svo_dist": "normal"
})
o = env.reset()
assert env.observation_space.contains(o)
assert all([0 <= oo[-1] <= 1.0 for oo in o.values()])
total_r = 0
ep_s = 0
for i in range(1, 100000):
o, r, d, info = env.step({k: [0.0, 1.0] for k in env.vehicles.keys()})
assert env.observation_space.contains(o)
assert all([0 <= oo[-1] <= 1.0 for oo in o.values()])
for r_ in r.values():
total_r += r_
print("SVO: {}".format({kkk: iii["svo"] if "svo" in iii else None for kkk, iii in info.items()}))
ep_s += 1
if d["__all__"]:
print(
"Finish! Current step {}. Group Reward: {}. Average reward: {}".format(
i, total_r, total_r / env.agent_manager.next_agent_count
)
)
break
if len(env.vehicles) == 0:
total_r = 0
print("Reset")
env.reset()
env.close()
| 35.44484 | 113 | 0.570482 |
027d3d4607b5f1e18cfb2663664c754672a047c8 | 1,995 | py | Python | tests/test_renderer.py | derlin/get-html | ea6d81f424ed0a60a37a52b95dd5b27c85cf0852 | [
"Apache-2.0"
] | 11 | 2020-03-02T08:38:37.000Z | 2021-11-19T05:03:20.000Z | tests/test_renderer.py | derlin/get-html | ea6d81f424ed0a60a37a52b95dd5b27c85cf0852 | [
"Apache-2.0"
] | 2 | 2020-03-02T11:43:12.000Z | 2020-03-10T07:59:07.000Z | tests/test_renderer.py | derlin/get-html | ea6d81f424ed0a60a37a52b95dd5b27c85cf0852 | [
"Apache-2.0"
] | 2 | 2020-03-02T08:13:53.000Z | 2020-03-09T21:15:26.000Z | from get_html.html_renderer import HtmlRenderer
import pytest
import re
| 32.704918 | 148 | 0.680201 |
027e6a3b136fbe978f346957d7b86c2022fa6ea2 | 724 | py | Python | resources/include-lists/string_manipulator_util.py | e-loughlin/CppCodeGenerator | 638f80f9df21d709d1240bb3bd43f9d43dd2e3ac | [
"MIT"
] | 6 | 2019-09-30T10:27:15.000Z | 2020-12-20T14:46:24.000Z | resources/include-lists/string_manipulator_util.py | e-loughlin/CppCodeGenerator | 638f80f9df21d709d1240bb3bd43f9d43dd2e3ac | [
"MIT"
] | 4 | 2019-11-25T18:14:29.000Z | 2019-12-09T20:47:29.000Z | resources/include-lists/string_manipulator_util.py | emloughl/CppCodeGenerator | 638f80f9df21d709d1240bb3bd43f9d43dd2e3ac | [
"MIT"
] | 1 | 2021-12-01T07:03:31.000Z | 2021-12-01T07:03:31.000Z |
import sys
import os
import ntpath
if __name__ == "__main__":
main()
| 20.111111 | 53 | 0.574586 |
027e798c00ba61f438e908e5871d0e08cf7a12f8 | 2,205 | py | Python | build/lib/henmedlib/functions/hounsfield.py | schmitzhenninglmu/henmedlib | 196b63710f092470ab21173cfcc0b14e65778f33 | [
"MIT"
] | null | null | null | build/lib/henmedlib/functions/hounsfield.py | schmitzhenninglmu/henmedlib | 196b63710f092470ab21173cfcc0b14e65778f33 | [
"MIT"
] | null | null | null | build/lib/henmedlib/functions/hounsfield.py | schmitzhenninglmu/henmedlib | 196b63710f092470ab21173cfcc0b14e65778f33 | [
"MIT"
] | 1 | 2019-09-20T10:59:25.000Z | 2019-09-20T10:59:25.000Z | __author__ = "Henning Schmitz"
import numpy as np
def calculate_hounsfield_unit(mu, mu_water, mu_air):
"""
Given linear attenuation coefficients the function calculates the corresponding Hounsfield units.
:param mu: Attenuation coefficient to determine corresponding Hounsfield unit.
:param mu_water: Constant linear attenuation coefficient for water
:param mu_air: Constant linear attenuation coefficient for air
:return: Hounsfield unit corresponding to mu
"""
HU = 1000 * ((mu - mu_water) / (mu_water - mu_air))
return HU
def calculate_hounsfield_unit_parameterless(mu):
"""
Given linear attenuation coefficients the function calculates the corresponding Hounsfield units.
:param mu: Attenuation coefficient to determine corresponding Hounsfield unit.
:return: Hounsfield unit corresponding to mu
"""
HU = mu * 65536-1024
return HU
def create_array_with_hounsfield_units(image_data, mu_water, mu_air):
"""
Given 3d array with linear attenuation coefficients the function calculates the corresponding Hounsfield units.
:param image_data: 3d array corresponding to image
:param mu: Attenuation coefficient to determine corresponding Hounsfield unit.
:param mu_water: Constant linear attenuation coefficient for water
:param mu_air: Constant linear attenuation coefficient for air
:return: 3d array calculated in Hounsfield unit
"""
# print dimensions of array
dim_x = np.size(image_data, 0)
dim_y = np.size(image_data, 1)
dim_slice = np.size(image_data, 2)
# loop through array
count = 0
iterations = dim_x * dim_y * dim_slice
# loop through x direction
for i in range(0, dim_x):
# loop through y direction
for j in range(0, dim_y):
# loop through slices
for k in range(0, dim_slice):
image_data[i][j][k] = calculate_hounsfield_unit(image_data[i][j][k], mu_water, mu_air)
count += 1
if count % (0.1 * iterations) == 0:
print(round(count / iterations, 1) * 100, "% progress")
return image_data
| 38.017241 | 116 | 0.677098 |
027ff59d51aedead00128b3b38fec073cc323ee3 | 1,028 | py | Python | coaddExtract.py | rbliu/LSST_DM_Scripts | 0a32ba629a2b52d3add407e92ab8ff4bc3cbd64d | [
"MIT"
] | null | null | null | coaddExtract.py | rbliu/LSST_DM_Scripts | 0a32ba629a2b52d3add407e92ab8ff4bc3cbd64d | [
"MIT"
] | null | null | null | coaddExtract.py | rbliu/LSST_DM_Scripts | 0a32ba629a2b52d3add407e92ab8ff4bc3cbd64d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
## last modified by Robert Liu at 7/29/2019
## This script is used to extract data (and WCS info) in the image extension of a coadd patch.
## Ext 0 = primaryHDU, Ext 1 = image, Ext 2 = mask, Ext 3 = variancce.
## Then, the output fits file can be used by SWarp to assemble a mosaic coadd image.
import re
import sys
import numpy as np
from astropy.io import fits
from astropy import wcs
if len(sys.argv) != 3:
print("Usage: python coaddExtract.py {coadd_image} {extracted_image}", file=sys.stderr)
exit(1);
coadd_patch = sys.argv[1]
extracted_patch = sys.argv[2]
# Open the fits image
hdu = fits.open(coadd_patch)
# Create a new HDU. Save the data of Ext1 to it.
hdu1 = fits.PrimaryHDU(hdu[1].data)
print('Coadd patch loaded.')
# Extract WCS info and append to the new HDU.
w = wcs.WCS(hdu[1].header)
wcs_keys = w.to_header()
hdu1.header += wcs_keys
print('WCS information appened.')
# Write the new HDU
hdu1.writeto(extracted_patch)
print('New coadd image saved!\n')
| 28.555556 | 94 | 0.715953 |
02824286d75d00e50642afe49b18a9fd9681523d | 22 | py | Python | backend_server/backend_globals.py | MSNLAB/SmartEye | 40b38190aeff5d5b970c8cbf43e8781634b38028 | [
"MIT",
"Unlicense"
] | 17 | 2021-06-27T04:33:13.000Z | 2022-03-21T02:54:52.000Z | backend_server/backend_globals.py | MSNLAB/SmartEye | 40b38190aeff5d5b970c8cbf43e8781634b38028 | [
"MIT",
"Unlicense"
] | null | null | null | backend_server/backend_globals.py | MSNLAB/SmartEye | 40b38190aeff5d5b970c8cbf43e8781634b38028 | [
"MIT",
"Unlicense"
] | 2 | 2021-10-31T05:14:24.000Z | 2022-03-25T18:53:49.000Z |
global loaded_model
| 5.5 | 19 | 0.818182 |
02842784fc821e743357ee9efac57212bf1f6827 | 326 | py | Python | src/utils.py | fabiob/wwwsqldesigner-aws | 5518eae682e8228be30b094c6015054b3cddf8f3 | [
"MIT"
] | null | null | null | src/utils.py | fabiob/wwwsqldesigner-aws | 5518eae682e8228be30b094c6015054b3cddf8f3 | [
"MIT"
] | null | null | null | src/utils.py | fabiob/wwwsqldesigner-aws | 5518eae682e8228be30b094c6015054b3cddf8f3 | [
"MIT"
] | 1 | 2021-04-04T09:41:51.000Z | 2021-04-04T09:41:51.000Z | from .env import S3_PREFIX
| 20.375 | 64 | 0.650307 |
028456bd34d14ef1d7f23ca7f443c4b9f0404a35 | 4,071 | py | Python | waferscreen/inst_control/inactive/agilent_34970A.py | chw3k5/WaferScreen | c0ca7fe939fe7cd0b722b7d6129b148c03a7505c | [
"Apache-2.0"
] | 1 | 2021-07-30T19:06:07.000Z | 2021-07-30T19:06:07.000Z | waferscreen/inst_control/inactive/agilent_34970A.py | chw3k5/WaferScreen | c0ca7fe939fe7cd0b722b7d6129b148c03a7505c | [
"Apache-2.0"
] | 8 | 2021-04-22T20:47:48.000Z | 2021-07-30T19:06:01.000Z | waferscreen/inst_control/inactive/agilent_34970A.py | chw3k5/WaferScreen | c0ca7fe939fe7cd0b722b7d6129b148c03a7505c | [
"Apache-2.0"
] | null | null | null | import serial
| 39.911765 | 119 | 0.503316 |
0285c8a2ee84e232d1b5d465f4047d255ab9153e | 2,318 | py | Python | force_wfmanager/gui/tests/test_click_run.py | force-h2020/force-wfmanager | bcd488cd37092cacd9d0c81b544ee8c1654d1d92 | [
"BSD-2-Clause"
] | 1 | 2019-08-19T16:02:20.000Z | 2019-08-19T16:02:20.000Z | force_wfmanager/gui/tests/test_click_run.py | force-h2020/force-wfmanager | bcd488cd37092cacd9d0c81b544ee8c1654d1d92 | [
"BSD-2-Clause"
] | 396 | 2017-07-18T15:19:55.000Z | 2021-05-03T06:23:06.000Z | force_wfmanager/gui/tests/test_click_run.py | force-h2020/force-wfmanager | bcd488cd37092cacd9d0c81b544ee8c1654d1d92 | [
"BSD-2-Clause"
] | 2 | 2019-03-05T16:23:10.000Z | 2020-04-16T08:59:11.000Z | # (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
import unittest
import sys
import os
from unittest import mock
from click.testing import CliRunner
import force_wfmanager.gui.run
from force_wfmanager.tests.dummy_classes.dummy_wfmanager import \
DummyWfManager
from force_wfmanager.version import __version__
| 34.088235 | 74 | 0.615617 |
0286818653d925685a7dbe2ea01784b7a5521b18 | 675 | py | Python | menu.py | shaolinbertrand/RPG | 77292c54baa14baf9e09d036be67592bb8f2c093 | [
"MIT"
] | null | null | null | menu.py | shaolinbertrand/RPG | 77292c54baa14baf9e09d036be67592bb8f2c093 | [
"MIT"
] | null | null | null | menu.py | shaolinbertrand/RPG | 77292c54baa14baf9e09d036be67592bb8f2c093 | [
"MIT"
] | null | null | null | from cadastrarJogador import cadastra_jogador
from cadastrarMonstros import cadastra_monstro
from atualizaJogador import atualiza
from combate import combate_iniciado
while True:
print('Bem vindo ao RPG selecione a opo desenjada')
print('[0] - Cadastrar Novo Jogador\n[1] - Atualizar Jogador\n[2] - Cadastrar Novo Monstro\n[3] Iniciar Combate\n[4]-Sair do sistema')
o = int(input('Entre com o numero da opo desejada: '))
if o == 0:
cadastra_jogador()
elif o == 1:
cadastra_monstro()
elif o == 2:
atualiza()
elif o == 3:
combate_iniciado()
elif o == 4:
break
else:
print('Opo invalida') | 33.75 | 138 | 0.665185 |
02869a45220bc3cd768ae9f192b46417fa96c690 | 4,354 | py | Python | plugin_manager/accounts/models.py | ahharu/plugin-manager | 43d5e2c6e25ed8f50eedf7fd876fbc04f75d94bb | [
"MIT"
] | null | null | null | plugin_manager/accounts/models.py | ahharu/plugin-manager | 43d5e2c6e25ed8f50eedf7fd876fbc04f75d94bb | [
"MIT"
] | null | null | null | plugin_manager/accounts/models.py | ahharu/plugin-manager | 43d5e2c6e25ed8f50eedf7fd876fbc04f75d94bb | [
"MIT"
] | null | null | null | """
Custom user model for deployments.
"""
import urllib
import hashlib
import base64
import random
from authtools.models import AbstractEmailUser
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save
from .managers import DeployUserManager
from plugin_manager.hosts.models import Host
from plugin_manager.accounts.model_managers import DeployUserActiveManager
from plugin_manager.core.mixins.models import TrackingFields
def generate_APIKey(sender, instance, created, **kwargs):
if created:
apikey = APIKey()
apikey.apikey = base64.b64encode(hashlib.sha256(
str(random.getrandbits(256))).digest(),
random.choice(
['rA', 'aZ', 'gQ', 'hH', 'hG',
'aR', 'DD'])).rstrip('==')
apikey.deployuser = instance
apikey.save()
post_save.connect(generate_APIKey, sender=DeployUser)
| 29.221477 | 78 | 0.595315 |
02871af56c42a72cf7ba11b3dac2fc5de68923f2 | 1,007 | py | Python | heads/fc1024_normalize.py | ahmdtaha/tf_retrieval_baseline | 31b1588f888cecc1d4287f77bd046314956482d5 | [
"Apache-2.0"
] | 37 | 2019-06-01T02:11:48.000Z | 2021-12-31T06:27:42.000Z | heads/fc1024_normalize.py | ahmdtaha/tf_retrieval_baseline | 31b1588f888cecc1d4287f77bd046314956482d5 | [
"Apache-2.0"
] | 1 | 2019-06-21T03:20:59.000Z | 2019-09-03T14:20:04.000Z | heads/fc1024_normalize.py | ahmdtaha/tf_retrieval_baseline | 31b1588f888cecc1d4287f77bd046314956482d5 | [
"Apache-2.0"
] | 6 | 2019-10-11T10:21:56.000Z | 2022-03-09T06:22:57.000Z | import tensorflow as tf
from tensorflow.contrib import slim
| 33.566667 | 105 | 0.675273 |
02875f5951726e518af5547e018727a57f4c2846 | 1,144 | py | Python | vendor/github.com/elastic/beats/topbeat/tests/system/test_base.py | ninjasftw/libertyproxybeat | b8acafe86ad285f091bf69b59d2ebd1da80dcf5e | [
"Apache-2.0"
] | 37 | 2016-01-25T10:52:59.000Z | 2021-05-08T11:44:39.000Z | vendor/github.com/elastic/beats/topbeat/tests/system/test_base.py | ninjasftw/libertyproxybeat | b8acafe86ad285f091bf69b59d2ebd1da80dcf5e | [
"Apache-2.0"
] | 35 | 2016-01-25T09:19:28.000Z | 2017-11-20T23:29:35.000Z | vendor/github.com/elastic/beats/topbeat/tests/system/test_base.py | ninjasftw/libertyproxybeat | b8acafe86ad285f091bf69b59d2ebd1da80dcf5e | [
"Apache-2.0"
] | 23 | 2016-01-25T09:15:05.000Z | 2020-12-14T06:08:31.000Z | from topbeat import BaseTest
import os
import shutil
import time
"""
Contains tests for base config
"""
| 28.6 | 79 | 0.612762 |
0289963af258cded39c2b0dcfaad0d26f59c24b0 | 7,133 | py | Python | JapanSize.py | AleksanderLidtke/XKCD | 47c5029d9737390a910184adc66efc1347b84441 | [
"MIT"
] | null | null | null | JapanSize.py | AleksanderLidtke/XKCD | 47c5029d9737390a910184adc66efc1347b84441 | [
"MIT"
] | null | null | null | JapanSize.py | AleksanderLidtke/XKCD | 47c5029d9737390a910184adc66efc1347b84441 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Throughout my travels I've discovered that most people, including myself, do not
realise many things about our Planet's size. For example, the latitude and
longitude of certain regions (South America is much further east than the US)
or the relative size of countries (Japan is surprisingly long).
Thus, I've created this script to understand such things a bit better. It
compares the sizes of Japan and Europe, which is the most recent surprise
I came across.
The shape data were aquired from [Global Administrative Areas](http://www.gadm.org/country)
website. Thus, their **redistribution, or commercial use is not allowed without
prior permission**.
Created on Sun May 7 14:13:47 2017
@author: Alek
"""
from mpl_toolkits.basemap import Basemap
import numpy, shapefile, os, matplotlib.pyplot
matplotlib.pyplot.xkcd() # Here we go.
def plotPrefecture(*,shp,colour,bMap,axes,latOff=0,longOff=0,lwdth=0.5):
""" Plot a prefecture from a shapefile.
Kwargs
-------
* shp - shape as returned by :func:`shapefile.Reader.shapes`,
* colour - colour accepted by :func:`matplotlib.pyplot.Axes.plot',
* bMap - instance of :class:`mpl_toolkits.basemap.Basemap` used to project
the shape onto a map,
* axes - :class:`matplotlib.pyplot.Axes` instance where to plot,
* latOff,longOff - deg, by how much to offset the `shp` lattitudes and
longitudes before plotting,
* lwdth - line width as accepted by :func:`matplotlib.pyplot.Axes.plot'.
"""
if len(shp.parts)==1: # Only one region in this shape.
vertices=numpy.array(shp.points)
bMap.plot(vertices[:,0]+longOff,vertices[:,1]+latOff,color=colour,
lw=lwdth,ls='-',latlon=True,ax=axes)
else: # This shape has islands, disjoint regions and what-not.
for ip in range(len(shp.parts)): # For every part of the shape.
# Indices that get the slice with this part of the shape.
lower=shp.parts[ip]
if ip==len(shp.parts)-1:
upper=len(shp.points) # Last part.
else:
upper=shp.parts[ip+1] # Next part starts at idx parts[ip+1]
partVertices=numpy.array(shp.points[lower:upper])
bMap.plot(partVertices[:,0]+longOff,partVertices[:,1]+latOff,
color=colour,lw=lwdth,ls='-',latlon=True,ax=axes)
# Various font sizes.
ticksFontSize=18
labelsFontSizeSmall=20
labelsFontSize=30
titleFontSize=34
legendFontSize=20
matplotlib.rc('xtick',labelsize=ticksFontSize)
matplotlib.rc('ytick',labelsize=ticksFontSize)
cm=matplotlib.pyplot.cm.get_cmap('viridis')
# Read a shapefile with Japan's cartography data.
shapeRdr0=shapefile.Reader(os.path.join('borders','JPN_adm0')) # Country.
shapeRdr1=shapefile.Reader(os.path.join('borders','JPN_adm1')) # Prefectures.
shapeRdr2=shapefile.Reader(os.path.join('borders','JPN_adm2')) # Towns.
shape=shapeRdr0.shapes()[0]
if shape.shapeType != shapefile.POLYGON:
raise ValueError('Shape not polygon with shapeType={}'.format(shape.shapeType ))
vertices=numpy.array(shape.points) # 2D array of coordinates.
# Where to centre different maps and where to translate Japan to.
latJpn=37 # Where to centre one map, i.e. over Japan. Lat/lon in degrees.
lonJpn=138
latCtr=40 # Where to centre the Europe's map. Lat/lon in degrees.
lonCtr=10
dLonJ=10 # Plot Japan at these coordinates over the map of Europe.
dLatJ=50
' Mercator projection, a.k.a. "the things you learn in schools".'
fig,ax=matplotlib.pyplot.subplots(1,2,figsize=(16,8))
# The whole Planet.
mercMapP=Basemap(projection='merc',llcrnrlat=-80,urcrnrlat=80,llcrnrlon=-180,
urcrnrlon=180,lat_ts=10,ax=ax[0],resolution='c')
mercMapP.drawcoastlines(linewidth=0.5)
mercMapP.drawcountries(linewidth=0.25)
mercMapP.drawparallels(numpy.arange(-90.,91.,30.))
mercMapP.drawmeridians(numpy.arange(-180.,181.,60.))
ax[0].set_title(r'$Our\ Planet$',fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=1,bMap=mercMapP,axes=ax[0])
# Only Europe.
mercMapE=Basemap(projection='merc',llcrnrlat=30,urcrnrlat=75,llcrnrlon=-25,
urcrnrlon=40,lat_ts=10,ax=ax[1],resolution='l')
mercMapE.drawcoastlines(linewidth=0.5)
mercMapE.drawcountries(linewidth=0.25)
mercMapE.drawparallels(numpy.arange(mercMapE.latmin,mercMapE.latmax,10.))
mercMapE.drawmeridians(numpy.arange(mercMapE.lonmin,mercMapE.lonmax,15.))
ax[1].set_title(r'$Europe$',fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=mercMapE,axes=ax[1],
latOff=dLatJ-latJpn,longOff=dLonJ-lonJpn)
fig.show()
' One figure with orthonormal maps centred on Japan and Europe.'
fig,ax=matplotlib.pyplot.subplots(1,2,figsize=(16,8))
# Centred on Japan.
ortnMapJ=Basemap(projection='ortho',lat_0=latJpn,lon_0=lonJpn,resolution='c',
ax=ax[0])
ortnMapJ.drawcoastlines(linewidth=0.5)
ortnMapJ.drawcountries(linewidth=0.25)
ortnMapJ.drawmeridians(numpy.arange(0,360,30))
ortnMapJ.drawparallels(numpy.arange(-90,90,30))
ax[0].set_title(r'${}$'.format(shapeRdr0.records()[0][4]),fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=ortnMapJ,axes=ax[0])
# Plot all the prefectures.
cNorm=matplotlib.colors.Normalize(vmin=0,vmax=shapeRdr1.numRecords)
scalarMap=matplotlib.cm.ScalarMappable(norm=cNorm,cmap=cm)
prefectures=shapeRdr1.shapes()
prefRecords=shapeRdr1.records()
for i in range(shapeRdr1.numRecords):
if prefRecords[i][9]=='Prefecture':
plotPrefecture(shp=prefectures[i],colour=scalarMap.to_rgba(i),
lwdth=0.5,bMap=ortnMapJ,axes=ax[0])
# Centred on Europe.
ortnMapE=Basemap(projection='ortho',lat_0=latCtr,lon_0=lonCtr,resolution='c',
ax=ax[1])
ortnMapE.drawcoastlines(linewidth=0.5)
ortnMapE.drawcountries(linewidth=0.25)
ortnMapE.drawmeridians(numpy.arange(0,360,30))
ortnMapE.drawparallels(numpy.arange(-90,90,30))
ax[1].set_title(r'${}\ over\ Europe$'.format(shapeRdr0.records()[0][4]),
fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=ortnMapE,axes=ax[1],
latOff=dLatJ-latJpn,longOff=dLonJ-lonJpn)
fig.show()
' Japan and Kitakyushu overlaid on Europe.'
fig,ax=matplotlib.pyplot.subplots(1,1,figsize=(16,8))
mercMapE=Basemap(projection='merc',llcrnrlat=30,urcrnrlat=75,llcrnrlon=-25,
urcrnrlon=40,lat_ts=10,ax=ax,resolution='l')
mercMapE.drawcoastlines(linewidth=0.5)
mercMapE.drawcountries(linewidth=0.25)
mercMapE.drawparallels(numpy.arange(mercMapE.latmin,mercMapE.latmax,10.))
mercMapE.drawmeridians(numpy.arange(mercMapE.lonmin,mercMapE.lonmax,15.))
ax.set_title(r'$Europe,\ true\ lat.$',fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=mercMapE,axes=ax,
latOff=0,longOff=dLonJ-lonJpn)
# Show annotation at the true latitude.
xKIT,yKIT=mercMapE.projtran(130.834730+dLonJ-lonJpn,33.8924837)
xTXT,yTXT=mercMapE.projtran(110.834730+dLonJ-lonJpn,45.8924837)
ax.scatter([xKIT],[yKIT],s=50,c='crimson')
ax.annotate('Here', xy=(xKIT,yKIT),xytext=(xTXT,yTXT),color='crimson',
arrowprops=dict(facecolor='crimson', shrink=0.05))
fig.show() | 43.493902 | 91 | 0.728305 |
028a79224d1b3b0d7d2cc26a3b2408f89ff5f8c5 | 7,252 | py | Python | lstm_toyexample.py | dsriaditya999/LSTM-Toy-Example | 850f7923122b547c1fd25b3b1dc739e8c5db2570 | [
"MIT"
] | null | null | null | lstm_toyexample.py | dsriaditya999/LSTM-Toy-Example | 850f7923122b547c1fd25b3b1dc739e8c5db2570 | [
"MIT"
] | null | null | null | lstm_toyexample.py | dsriaditya999/LSTM-Toy-Example | 850f7923122b547c1fd25b3b1dc739e8c5db2570 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Importing Libraries
"""
# Commented out IPython magic to ensure Python compatibility.
import torch
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import torch.nn as nn
from tqdm import tqdm_notebook
from sklearn.preprocessing import MinMaxScaler
# %matplotlib inline
torch.manual_seed(0)
"""# Loading Dataset"""
sns.get_dataset_names()
flight_data = sns.load_dataset("flights")
flight_data.head()
"""# Preprocessing"""
# Changing the plot size
figsize = plt.rcParams["figure.figsize"]
figsize[0] = 15
figsize[1] = 5
plt.rcParams["figure.figsize"] = figsize
# Plotting the data
plt.title("Time Series Representation of Data")
plt.xlabel("Months")
plt.ylabel("Passengers")
plt.grid(True)
plt.autoscale(axis = "x",tight=True)
plt.plot(flight_data["passengers"])
#Please note that this is univariate time series data : consisting of one variable passengers
#
data = flight_data["passengers"].values.astype(float)
print(data)
print(len(data))
# Train-Test Split
# Consider the last the 12 months data as evaluation data for testing the model's behaviour
train_window = 12
train_data = data[:-train_window]
test_data = data[-train_window:]
print(len(train_data))
print(len(test_data))
# Normalizing the train-data
scaler = MinMaxScaler(feature_range=(-1,1))
train_data_normalized = scaler.fit_transform(train_data.reshape(-1,1))
print(train_data_normalized[:10])
# Converting to Torch Tensor
train_data_normalized = torch.FloatTensor(train_data_normalized).view(-1)
print(train_data_normalized)
# Final step is creating sequences of length 12 (12 months data) from the train-data and
# the label for each sequence is the passenger_data for the (12+1)th Month
# Therefore, we get 120 train sequences along with the label value
train_in_seq = create_in_sequences(train_data_normalized,train_window)
print(len(train_in_seq))
print(train_in_seq[:5])
"""# The Model
Please note that the model considered here is:
1. LSTM layer with a univariate input sequence of length 12 and LSTM's previous hidden cell consisting of previous hidden state and previous cell state of length 100 and also , the size of LSTM's output is 100
2. The second layer is a Linear layer of 100 inputs from the LSTM's output and a single output size
"""
model = LSTM()
print(model)
"""# Loss Function and Learning Algorithm (Optimizer)
Please note that for this simple model ,
* Loss Function considered is *Mean Squared Error* and
* Optimization Function used is Stochastic Version of **Adam** *Optimizer*.
"""
loss_fn = nn.MSELoss() # Mean Squared Error Loss Function
optimizer = torch.optim.Adam(model.parameters(),lr = 0.0002) # Adam Learning Algorithm
"""# Training"""
epochs = 450
loss_plot = []
for epoch in tqdm_notebook(range(epochs), total=epochs, unit="epoch"):
for seq,label in train_in_seq:
optimizer.zero_grad() # makes the gradients zero for each new sequence
model.hidden_cell = (torch.zeros(1,1,model.hidden_layer_size), # Initialising the previous hidden state and cell state for each new sequence
torch.zeros(1,1,model.hidden_layer_size))
y_pred = model(seq) # Automatically calls the forward pass
loss = loss_fn(y_pred,label) # Determining the loss
loss.backward() # Backpropagation of loss and gradients computation
optimizer.step() # Weights and Bias Updation
loss_plot.append(loss.item()) # Some Bookkeeping
plt.plot(loss_plot,'r-')
plt.xlabel("Epochs")
plt.ylabel("Loss : MSE")
plt.show()
print(loss_plot[-1])
"""# Making Prediction
Please note that for comparison purpose we use the training data's values and predicted data values to predict the number of passengers for the test data months and then compare them
"""
fut_pred = 12
test_inputs = train_data_normalized[-train_window: ].tolist()
print(test_inputs)
print(len(test_inputs))
model.eval() # Makes the model ready for evaluation
for i in range(fut_pred):
seq = torch.FloatTensor(test_inputs[-train_window: ]) # Converting to a tensor
with torch.no_grad(): # Stops adding to the computational flow graph (stops being prepared for backpropagation)
model.hidden_cell = (torch.zeros(1,1,model.hidden_layer_size),
torch.zeros(1,1,model.hidden_layer_size))
test_inputs.append(model(seq).item())
predicted_outputs_normalized = []
predicted_outputs_normalized = test_inputs[-train_window: ]
print(predicted_outputs_normalized)
print(len(predicted_outputs_normalized))
"""# Postprocessing"""
predicted_outputs = scaler.inverse_transform(np.array(predicted_outputs_normalized).reshape(-1,1))
print(predicted_outputs)
x = np.arange(132, 144, 1)
print(x)
"""# Final Output"""
figsize = plt.rcParams["figure.figsize"]
figsize[0] = 15
figsize[1] = 5
plt.rcParams["figure.figsize"] = figsize
plt.title('Month vs Passenger')
plt.ylabel('Total Passengers')
plt.grid(True)
plt.autoscale(axis='x', tight=True)
plt.plot(flight_data['passengers'])
plt.plot(x,predicted_outputs)
plt.show()
figsize = plt.rcParams["figure.figsize"]
figsize[0] = 15
figsize[1] = 5
plt.rcParams["figure.figsize"] = figsize
plt.title('Month vs Passenger')
plt.ylabel('Total Passengers')
plt.grid(True)
plt.autoscale(axis='x', tight=True)
plt.plot(flight_data['passengers'][-train_window-5: ])
plt.plot(x,predicted_outputs)
plt.show()
"""**Please observe that the model is able to get the trend of the passengers but it can be further fine-tuned by adding appropriate regularization methods**"""
| 34.046948 | 212 | 0.734694 |
028b6c5908aab150cc0d4d671ccfb977919ebe32 | 22,929 | py | Python | api/chat.py | Jecosine/blivechat | d398e4913e0c76d93d3f5402938dc59ea1424ec6 | [
"MIT"
] | null | null | null | api/chat.py | Jecosine/blivechat | d398e4913e0c76d93d3f5402938dc59ea1424ec6 | [
"MIT"
] | null | null | null | api/chat.py | Jecosine/blivechat | d398e4913e0c76d93d3f5402938dc59ea1424ec6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import asyncio
import enum
import json
import logging
import random
import time
import uuid
from typing import *
import aiohttp
import tornado.websocket
import api.base
import blivedm.blivedm as blivedm
import config
import models.avatar
import models.translate
import models.log
logger = logging.getLogger(__name__)
_http_session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=10))
room_manager: Optional['RoomManager'] = None
def _del_room(self, room_id):
room = self._rooms.get(room_id, None)
if room is None:
return
logger.info('Removing room %d', room_id)
for client in room.clients:
client.close()
room.stop_and_close()
self._rooms.pop(room_id, None)
logger.info('%d rooms', len(self._rooms))
# noinspection PyAbstractClass
# noinspection PyAbstractClass
# noinspection PyAbstractClass
# noinspection PyAbstractClass
# handle reply message
| 34.019288 | 111 | 0.584326 |
65f43d030f26c2fcb657f044a4435543df49146f | 954 | py | Python | gan.py | AtlantixJJ/LBSGAN | e91d500d4a9c02dd5e3bfcbd9a9eca96dc60102a | [
"BSD-2-Clause"
] | 1 | 2019-06-09T02:43:35.000Z | 2019-06-09T02:43:35.000Z | gan.py | AtlantixJJ/LBSGAN | e91d500d4a9c02dd5e3bfcbd9a9eca96dc60102a | [
"BSD-2-Clause"
] | null | null | null | gan.py | AtlantixJJ/LBSGAN | e91d500d4a9c02dd5e3bfcbd9a9eca96dc60102a | [
"BSD-2-Clause"
] | null | null | null | import argparse
import os
import sys
import time
import torch
import torch.nn.functional as F
import torchvision
import models, lib
cfg = lib.config.BaseConfig()
cfg.parse()
print('Preparing model')
gen_model = cfg.gen_function(
upsample=cfg.upsample,
map_size=cfg.map_size,
out_dim=cfg.out_dim)
disc_model = cfg.disc_function(
downsample=cfg.downsample,
in_dim=cfg.out_dim)
if cfg.num_gpu > 1:
gen_model = torch.nn.DataParallel(gen_model)
disc_model = torch.nn.DataParallel(disc_model)
gen_model.cuda()
disc_model.cuda()
print(gen_model)
print(disc_model)
print("=> Generator")
print(gen_model)
print("=> Discriminator")
print(disc_model)
if cfg.args.delayed_batch_size > -1:
trainer = lib.train.DelayLBSTrainer(gen_model=gen_model, disc_model=disc_model, dataloader=cfg.dl, cfg=cfg)
else:
trainer = lib.train.BaseGANTrainer(gen_model=gen_model, disc_model=disc_model, dataloader=cfg.dl, cfg=cfg)
trainer.train()
| 24.461538 | 111 | 0.765199 |
65f47a4c6cbf9c3cbfef8996d91a66023d1ce4f0 | 1,475 | py | Python | leetcode/minimumAreaRectangle.py | federicoemartinez/problem_solving | d0352f76bc21ed67d6851a159a00f70a892934b9 | [
"MIT"
] | null | null | null | leetcode/minimumAreaRectangle.py | federicoemartinez/problem_solving | d0352f76bc21ed67d6851a159a00f70a892934b9 | [
"MIT"
] | null | null | null | leetcode/minimumAreaRectangle.py | federicoemartinez/problem_solving | d0352f76bc21ed67d6851a159a00f70a892934b9 | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/minimum-area-rectangle/description/
"""
Given a set of points in the xy-plane, determine the minimum area of a rectangle formed from these points, with sides parallel to the x and y axes.
If there isn't any rectangle, return 0.
Example 1:
Input: [[1,1],[1,3],[3,1],[3,3],[2,2]]
Output: 4
Example 2:
Input: [[1,1],[1,3],[3,1],[3,3],[4,1],[4,3]]
Output: 2
Note:
1 <= points.length <= 500
0 <= points[i][0] <= 40000
0 <= points[i][1] <= 40000
All points are distinct.
"""
from collections import defaultdict
| 27.314815 | 147 | 0.553898 |
65f96718aa17ce886b225fbdf113223d6df0b594 | 3,002 | py | Python | code/google_sheet_writing.py | BastinFlorian/BoondManager-Auto-Holidays-Validation | 28ae01d997132745018666952829771d5f8d99a3 | [
"MIT"
] | null | null | null | code/google_sheet_writing.py | BastinFlorian/BoondManager-Auto-Holidays-Validation | 28ae01d997132745018666952829771d5f8d99a3 | [
"MIT"
] | 18 | 2020-03-24T17:24:10.000Z | 2022-03-12T00:29:56.000Z | code/google_sheet_writing.py | BastinFlorian/BoondManager-Auto-Holidays-Validation | 28ae01d997132745018666952829771d5f8d99a3 | [
"MIT"
] | null | null | null | '''Functions writing the needed informations in the google drive spreadsheet
From CP, RTT and holidays request : create a worksheet per employee --
write_info_in_worksheet(info_paie, out_attente, out_valide, name, sh, problemes_date, problemes_type_conge)
'''
from google_sheet_access import *
# Write in worksheet at the specific cells
| 37.061728 | 112 | 0.651899 |
65f9d6849276abc9d2abce58b864383e8eca894c | 531 | py | Python | madlib.py | Yukthi-C/python_learing | 340579e2bb767e8fdb209f705fdf12058e8e150f | [
"MIT"
] | null | null | null | madlib.py | Yukthi-C/python_learing | 340579e2bb767e8fdb209f705fdf12058e8e150f | [
"MIT"
] | null | null | null | madlib.py | Yukthi-C/python_learing | 340579e2bb767e8fdb209f705fdf12058e8e150f | [
"MIT"
] | null | null | null | ad1 = input(f"Adjective1: ")
ad2 = input(f"Adjective2: ")
part1 = input(f"body part: ")
dish = input(f"Dish: ")
madlib=f"One day, a {ad1} fox invited a stork for dinner. \
Stork was very {ad2} with the invitation she reached the foxs home on time and knocked at the door with her {part1}.\
The fox took her to the dinner table and served some {dish} in shallow bowls for both of them.\
As the bowl was too shallow for the stork, she couldnt have soup at all. But, the fox licked up his soup quickly."
print(f"{madlib}") | 59 | 121 | 0.706215 |
65fb489a3669c5076b79a0d2bdaf7df0aec3faeb | 3,114 | py | Python | algofi/v1/send_keyreg_online_transaction.py | Algofiorg/algofi-py-sdk | 6100a6726d36db4d4d3287064f0ad1d0b9a05e03 | [
"MIT"
] | 38 | 2021-12-30T02:32:57.000Z | 2022-03-23T22:09:16.000Z | algofi/v1/send_keyreg_online_transaction.py | Algofiorg/algofi-py-sdk | 6100a6726d36db4d4d3287064f0ad1d0b9a05e03 | [
"MIT"
] | 4 | 2021-11-03T00:14:46.000Z | 2022-03-28T02:17:33.000Z | algofi/v1/send_keyreg_online_transaction.py | Algofiorg/algofi-py-sdk | 6100a6726d36db4d4d3287064f0ad1d0b9a05e03 | [
"MIT"
] | 8 | 2021-12-15T05:29:55.000Z | 2022-02-08T03:45:11.000Z |
from algosdk.future.transaction import ApplicationNoOpTxn
from .prepend import get_init_txns
from ..utils import Transactions, TransactionGroup, int_to_bytes
from ..contract_strings import algofi_manager_strings as manager_strings
def prepare_send_keyreg_online_transactions(sender, suggested_params, storage_account, vote_pk, selection_pk, state_proof_pk, vote_first, vote_last, vote_key_dilution,
manager_app_id, supported_market_app_ids, supported_oracle_app_ids):
"""Returns a :class:`TransactionGroup` object representing a send keyreg
transaction group transaction against the algofi protocol. The sender instructs
the algo vault to register itself online to participate in Algorand's consensus.
NOTE: The storage account address must be registered with a participation node
in order for the account to participate in consensus. It is unsafe to register
an account online without registering it with a participation node. See
https://developer.algorand.org/docs/run-a-node/participate/generate_keys
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param vote_pk: vote key
:type vote_pk: bytes
:param selection_pk: selection key
:type selection_pk: bytes
:param state_proof_pk: state proof key
:type state_proof_pk: bytes
:param vote_first: first round to vote in consensus
:type vote_first: int
:param vote_last: last round to vote in consensus
:type vote_last: int
:param vote_key_dilution: vote key dilution
:type vote_key_dilution: int
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param supported_market_app_ids: list of supported market application ids
:type supported_market_app_ids: list
:param supported_oracle_app_ids: list of supported oracle application ids
:type supported_oracle_app_ids: list
:return: :class:`TransactionGroup` object representing a claim rewards transaction
:rtype: :class:`TransactionGroup`
"""
prefix_transactions = get_init_txns(
transaction_type=Transactions.SEND_KEYREG_ONLINE_TXN,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.send_keyreg_txn.encode(), vote_pk, selection_pk, state_proof_pk,
int_to_bytes(vote_first), int_to_bytes(vote_last), int_to_bytes(vote_key_dilution)],
accounts=[storage_account],
)
txn_group = TransactionGroup(prefix_transactions + [txn0])
return txn_group | 48.65625 | 168 | 0.756583 |
65fdd0400541291beac65b8a408eaf8121f2b56b | 402 | py | Python | server/resources/platform.py | simon-dube/CARMIN-server | 1481d2c4231458d33119c57ab2e3e480375da63b | [
"MIT"
] | 1 | 2018-03-12T23:08:12.000Z | 2018-03-12T23:08:12.000Z | server/resources/platform.py | simon-dube/CARMIN-server | 1481d2c4231458d33119c57ab2e3e480375da63b | [
"MIT"
] | 15 | 2018-03-15T04:23:31.000Z | 2018-06-28T21:46:15.000Z | server/resources/platform.py | simon-dube/CARMIN-server | 1481d2c4231458d33119c57ab2e3e480375da63b | [
"MIT"
] | null | null | null | from flask_restful import Resource
from server.platform_properties import PLATFORM_PROPERTIES
from server.resources.models.platform_properties import PlatformPropertiesSchema
from server.resources.decorators import marshal_response
| 36.545455 | 80 | 0.840796 |
65febfc830676365453c5d43b397d3e86ac87c5f | 471 | py | Python | invenio_flow/decorators.py | egabancho/invenio-flow | 583e55d17ab6aabd20bc4a46d098f034c0d0f693 | [
"MIT"
] | null | null | null | invenio_flow/decorators.py | egabancho/invenio-flow | 583e55d17ab6aabd20bc4a46d098f034c0d0f693 | [
"MIT"
] | null | null | null | invenio_flow/decorators.py | egabancho/invenio-flow | 583e55d17ab6aabd20bc4a46d098f034c0d0f693 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Esteban J. G. Gabancho.
#
# Invenio-Flow is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Useful decorators."""
from celery import shared_task
from .api import Task
def task(*args, **kwargs):
"""Wrapper around shared task to set default base class."""
kwargs.setdefault('base', Task)
return shared_task(*args, **kwargs)
| 23.55 | 73 | 0.694268 |
65ff6cff89c7853c15b51290646017146b4909fa | 2,460 | py | Python | backend/offchain/types/fund_pull_pre_approval_types.py | tanshuai/reference-wallet | e8efec4acc6af6e319cf075c10693ddf7754cc83 | [
"Apache-2.0"
] | 14 | 2020-12-17T08:03:51.000Z | 2022-03-26T04:21:18.000Z | backend/offchain/types/fund_pull_pre_approval_types.py | tanshuai/reference-wallet | e8efec4acc6af6e319cf075c10693ddf7754cc83 | [
"Apache-2.0"
] | 20 | 2020-12-15T12:02:56.000Z | 2021-05-19T23:37:34.000Z | backend/offchain/types/fund_pull_pre_approval_types.py | tanshuai/reference-wallet | e8efec4acc6af6e319cf075c10693ddf7754cc83 | [
"Apache-2.0"
] | 12 | 2020-12-10T16:35:27.000Z | 2022-02-01T04:06:10.000Z | import typing
from dataclasses import dataclass, field as datafield
from .command_types import CommandType
class FundPullPreApprovalType:
save_sub_account = "save_sub_account"
consent = "consent"
| 27.032967 | 88 | 0.681301 |
65ffb62169d811cc14af150c5eafa69ec8772792 | 19,924 | py | Python | data/battle_animation_scripts.py | kielbasiago/WorldsCollide | 5aa7cffdecd14754c9eaa83cd0ad4d0282cc2cc2 | [
"MIT"
] | 7 | 2022-01-15T02:53:53.000Z | 2022-02-17T00:51:32.000Z | data/battle_animation_scripts.py | asilverthorn/WorldsCollide | 5aa7cffdecd14754c9eaa83cd0ad4d0282cc2cc2 | [
"MIT"
] | 8 | 2022-01-16T02:45:24.000Z | 2022-03-21T02:08:27.000Z | data/battle_animation_scripts.py | asilverthorn/WorldsCollide | 5aa7cffdecd14754c9eaa83cd0ad4d0282cc2cc2 | [
"MIT"
] | 5 | 2022-01-15T02:53:38.000Z | 2022-01-19T17:42:10.000Z | # List of addresses within the Battle Animation Scripts for the following commands which cause screen flashes:
# B0 - Set background palette color addition (absolute)
# B5 - Add color to background palette (relative)
# AF - Set background palette color subtraction (absolute)
# B6 - Subtract color from background palette (relative)
# By changing address + 1 to E0 (for absolute) or F0 (for relative), it causes no change to the background color (that is, no flash)
BATTLE_ANIMATION_FLASHES = {
"Goner": [
0x100088, # AF E0 - set background color subtraction to 0 (black)
0x10008C, # B6 61 - increase background color subtraction by 1 (red)
0x100092, # B6 31 - decrease background color subtraction by 1 (yellow)
0x100098, # B6 81 - increase background color subtraction by 1 (cyan)
0x1000A1, # B6 91 - decrease background color subtraction by 1 (cyan)
0x1000A3, # B6 21 - increase background color subtraction by 1 (yellow)
0x1000D3, # B6 8F - increase background color subtraction by 15 (cyan)
0x1000DF, # B0 FF - set background color addition to 31 (white)
0x100172, # B5 F2 - decrease background color addition by 2 (white)
],
"Final KEFKA Death": [
0x10023A, # B0 FF - set background color addition to 31 (white)
0x100240, # B5 F4 - decrease background color addition by 4 (white)
0x100248, # B0 FF - set background color addition to 31 (white)
0x10024E, # B5 F4 - decrease background color addition by 4 (white)
],
"Atom Edge": [ # Also True Edge
0x1003D0, # AF E0 - set background color subtraction to 0 (black)
0x1003DD, # B6 E1 - increase background color subtraction by 1 (black)
0x1003E6, # B6 E1 - increase background color subtraction by 1 (black)
0x10044B, # B6 F1 - decrease background color subtraction by 1 (black)
0x100457, # B6 F1 - decrease background color subtraction by 1 (black)
],
"Boss Death": [
0x100476, # B0 FF - set background color addition to 31 (white)
0x10047C, # B5 F4 - decrease background color addition by 4 (white)
0x100484, # B0 FF - set background color addition to 31 (white)
0x100497, # B5 F4 - decrease background color addition by 4 (white)
],
"Transform into Magicite": [
0x100F30, # B0 FF - set background color addition to 31 (white)
0x100F3F, # B5 F2 - decrease background color addition by 2 (white)
0x100F4E, # B5 F2 - decrease background color addition by 2 (white)
],
"Purifier": [
0x101340, # AF E0 - set background color subtraction to 0 (black)
0x101348, # B6 62 - increase background color subtraction by 2 (red)
0x101380, # B6 81 - increase background color subtraction by 1 (cyan)
0x10138A, # B6 F1 - decrease background color subtraction by 1 (black)
],
"Wall": [
0x10177B, # AF E0 - set background color subtraction to 0 (black)
0x10177F, # B6 61 - increase background color subtraction by 1 (red)
0x101788, # B6 51 - decrease background color subtraction by 1 (magenta)
0x101791, # B6 81 - increase background color subtraction by 1 (cyan)
0x10179A, # B6 31 - decrease background color subtraction by 1 (yellow)
0x1017A3, # B6 41 - increase background color subtraction by 1 (magenta)
0x1017AC, # B6 91 - decrease background color subtraction by 1 (cyan)
0x1017B5, # B6 51 - decrease background color subtraction by 1 (magenta)
],
"Pearl": [
0x10190E, # B0 E0 - set background color addition to 0 (white)
0x101913, # B5 E2 - increase background color addition by 2 (white)
0x10191E, # B5 F1 - decrease background color addition by 1 (white)
0x10193E, # B6 C2 - increase background color subtraction by 2 (blue)
],
"Ice 3": [
0x101978, # B0 FF - set background color addition to 31 (white)
0x10197B, # B5 F4 - decrease background color addition by 4 (white)
0x10197E, # B5 F4 - decrease background color addition by 4 (white)
0x101981, # B5 F4 - decrease background color addition by 4 (white)
0x101984, # B5 F4 - decrease background color addition by 4 (white)
0x101987, # B5 F4 - decrease background color addition by 4 (white)
0x10198A, # B5 F4 - decrease background color addition by 4 (white)
0x10198D, # B5 F4 - decrease background color addition by 4 (white)
0x101990, # B5 F4 - decrease background color addition by 4 (white)
],
"Fire 3": [
0x1019FA, # B0 9F - set background color addition to 31 (red)
0x101A1C, # B5 94 - decrease background color addition by 4 (red)
],
"Sleep": [
0x101A23, # AF E0 - set background color subtraction to 0 (black)
0x101A29, # B6 E1 - increase background color subtraction by 1 (black)
0x101A33, # B6 F1 - decrease background color subtraction by 1 (black)
],
"7-Flush": [
0x101B43, # AF E0 - set background color subtraction to 0 (black)
0x101B47, # B6 61 - increase background color subtraction by 1 (red)
0x101B4D, # B6 51 - decrease background color subtraction by 1 (magenta)
0x101B53, # B6 81 - increase background color subtraction by 1 (cyan)
0x101B59, # B6 31 - decrease background color subtraction by 1 (yellow)
0x101B5F, # B6 41 - increase background color subtraction by 1 (magenta)
0x101B65, # B6 91 - decrease background color subtraction by 1 (cyan)
0x101B6B, # B6 51 - decrease background color subtraction by 1 (magenta)
],
"H-Bomb": [
0x101BC5, # B0 E0 - set background color addition to 0 (white)
0x101BC9, # B5 E1 - increase background color addition by 1 (white)
0x101C13, # B5 F1 - decrease background color addition by 1 (white)
],
"Revenger": [
0x101C62, # AF E0 - set background color subtraction to 0 (black)
0x101C66, # B6 81 - increase background color subtraction by 1 (cyan)
0x101C6C, # B6 41 - increase background color subtraction by 1 (magenta)
0x101C72, # B6 91 - decrease background color subtraction by 1 (cyan)
0x101C78, # B6 21 - increase background color subtraction by 1 (yellow)
0x101C7E, # B6 51 - decrease background color subtraction by 1 (magenta)
0x101C84, # B6 81 - increase background color subtraction by 1 (cyan)
0x101C86, # B6 31 - decrease background color subtraction by 1 (yellow)
0x101C8C, # B6 91 - decrease background color subtraction by 1 (cyan)
],
"Phantasm": [
0x101DFD, # AF E0 - set background color subtraction to 0 (black)
0x101E03, # B6 E1 - increase background color subtraction by 1 (black)
0x101E07, # B0 FF - set background color addition to 31 (white)
0x101E0D, # B5 F4 - decrease background color addition by 4 (white)
0x101E15, # B6 E2 - increase background color subtraction by 2 (black)
0x101E1F, # B0 FF - set background color addition to 31 (white)
0x101E27, # B5 F4 - decrease background color addition by 4 (white)
0x101E2F, # B6 E2 - increase background color subtraction by 2 (black)
0x101E3B, # B6 F1 - decrease background color subtraction by 1 (black)
],
"TigerBreak": [
0x10240D, # B0 FF - set background color addition to 31 (white)
0x102411, # B5 F2 - decrease background color addition by 2 (white)
0x102416, # B5 F2 - decrease background color addition by 2 (white)
],
"Metamorph": [
0x102595, # AF E0 - set background color subtraction to 0 (black)
0x102599, # B6 61 - increase background color subtraction by 1 (red)
0x1025AF, # B6 71 - decrease background color subtraction by 1 (red)
],
"Cat Rain": [
0x102677, # B0 FF - set background color addition to 31 (white)
0x10267B, # B5 F1 - decrease background color addition by 1 (white)
],
"Charm": [
0x1026EE, # B0 FF - set background color addition to 31 (white)
0x1026FB, # B5 F1 - decrease background color addition by 1 (white)
],
"Mirager": [
0x102791, # B0 FF - set background color addition to 31 (white)
0x102795, # B5 F2 - decrease background color addition by 2 (white)
],
"SabreSoul": [
0x1027D3, # B0 FF - set background color addition to 31 (white)
0x1027DA, # B5 F2 - decrease background color addition by 2 (white)
],
"Back Blade": [
0x1028D3, # AF FF - set background color subtraction to 31 (black)
0x1028DF, # B6 F4 - decrease background color subtraction by 4 (black)
],
"RoyalShock": [
0x102967, # B0 FF - set background color addition to 31 (white)
0x10296B, # B5 F2 - decrease background color addition by 2 (white)
0x102973, # B5 F2 - decrease background color addition by 2 (white)
],
"Overcast": [
0x102C3A, # AF E0 - set background color subtraction to 0 (black)
0x102C55, # B6 E1 - increase background color subtraction by 1 (black)
0x102C8D, # B6 F1 - decrease background color subtraction by 1 (black)
0x102C91, # B6 F1 - decrease background color subtraction by 1 (black)
],
"Disaster": [
0x102CEE, # AF E0 - set background color subtraction to 0 (black)
0x102CF2, # B6 E1 - increase background color subtraction by 1 (black)
0x102D19, # B6 F1 - decrease background color subtraction by 1 (black)
],
"ForceField": [
0x102D3A, # B0 E0 - set background color addition to 0 (white)
0x102D48, # B5 E1 - increase background color addition by 1 (white)
0x102D64, # B5 F1 - decrease background color addition by 1 (white)
],
"Terra/Tritoch Lightning": [
0x102E05, # B0 E0 - set background color addition to 0 (white)
0x102E09, # B5 81 - increase background color addition by 1 (red)
0x102E24, # B5 61 - increase background color addition by 1 (cyan)
],
"S. Cross": [
0x102EDA, # AF E0 - set background color subtraction to 0 (black)
0x102EDE, # B6 E2 - increase background color subtraction by 2 (black)
0x102FA8, # B6 F2 - decrease background color subtraction by 2 (black)
0x102FB1, # B0 E0 - set background color addition to 0 (white)
0x102FBE, # B5 E2 - increase background color addition by 2 (white)
0x102FD9, # B5 F2 - decrease background color addition by 2 (white)
],
"Mind Blast": [
0x102FED, # B0 E0 - set background color addition to 0 (white)
0x102FF1, # B5 81 - increase background color addition by 1 (red)
0x102FF7, # B5 91 - decrease background color addition by 1 (red)
0x102FF9, # B5 21 - increase background color addition by 1 (blue)
0x102FFF, # B5 31 - decrease background color addition by 1 (blue)
0x103001, # B5 C1 - increase background color addition by 1 (yellow)
0x103007, # B5 91 - decrease background color addition by 1 (red)
0x10300D, # B5 51 - decrease background color addition by 1 (green)
0x103015, # B5 E2 - increase background color addition by 2 (white)
0x10301F, # B5 F1 - decrease background color addition by 1 (white)
],
"Flare Star": [
0x1030F5, # B0 E0 - set background color addition to 0 (white)
0x103106, # B5 81 - increase background color addition by 1 (red)
0x10310D, # B5 E2 - increase background color addition by 2 (white)
0x103123, # B5 71 - decrease background color addition by 1 (cyan)
0x10312E, # B5 91 - decrease background color addition by 1 (red)
],
"Quasar": [
0x1031D2, # AF E0 - set background color subtraction to 0 (black)
0x1031D6, # B6 E1 - increase background color subtraction by 1 (black)
0x1031FA, # B6 F1 - decrease background color subtraction by 1 (black)
],
"R.Polarity": [
0x10328B, # B0 FF - set background color addition to 31 (white)
0x103292, # B5 F1 - decrease background color addition by 1 (white)
],
"Rippler": [
0x1033C6, # B0 FF - set background color addition to 31 (white)
0x1033CA, # B5 F1 - decrease background color addition by 1 (white)
],
"Step Mine": [
0x1034D9, # B0 FF - set background color addition to 31 (white)
0x1034E0, # B5 F4 - decrease background color addition by 4 (white)
],
"L.5 Doom": [
0x1035E6, # B0 FF - set background color addition to 31 (white)
0x1035F6, # B5 F4 - decrease background color addition by 4 (white)
],
"Megazerk": [
0x103757, # B0 80 - set background color addition to 0 (red)
0x103761, # B5 82 - increase background color addition by 2 (red)
0x10378F, # B5 92 - decrease background color addition by 2 (red)
0x103795, # B5 92 - decrease background color addition by 2 (red)
0x10379B, # B5 92 - decrease background color addition by 2 (red)
0x1037A1, # B5 92 - decrease background color addition by 2 (red)
0x1037A7, # B5 92 - decrease background color addition by 2 (red)
0x1037AD, # B5 92 - decrease background color addition by 2 (red)
0x1037B3, # B5 92 - decrease background color addition by 2 (red)
0x1037B9, # B5 92 - decrease background color addition by 2 (red)
0x1037C0, # B5 92 - decrease background color addition by 2 (red)
],
"Schiller": [
0x103819, # B0 FF - set background color addition to 31 (white)
0x10381D, # B5 F4 - decrease background color addition by 4 (white)
],
"WallChange": [
0x10399E, # B0 FF - set background color addition to 31 (white)
0x1039A3, # B5 F2 - decrease background color addition by 2 (white)
0x1039A9, # B5 F2 - decrease background color addition by 2 (white)
0x1039AF, # B5 F2 - decrease background color addition by 2 (white)
0x1039B5, # B5 F2 - decrease background color addition by 2 (white)
0x1039BB, # B5 F2 - decrease background color addition by 2 (white)
0x1039C1, # B5 F2 - decrease background color addition by 2 (white)
0x1039C7, # B5 F2 - decrease background color addition by 2 (white)
0x1039CD, # B5 F2 - decrease background color addition by 2 (white)
0x1039D4, # B5 F2 - decrease background color addition by 2 (white)
],
"Ultima": [
0x1056CB, # AF 60 - set background color subtraction to 0 (red)
0x1056CF, # B6 C2 - increase background color subtraction by 2 (blue)
0x1056ED, # B0 FF - set background color addition to 31 (white)
0x1056F5, # B5 F1 - decrease background color addition by 1 (white)
],
"Bolt 3": [ # Also Giga Volt
0x10588E, # B0 FF - set background color addition to 31 (white)
0x105893, # B5 F4 - decrease background color addition by 4 (white)
0x105896, # B5 F4 - decrease background color addition by 4 (white)
0x105899, # B5 F4 - decrease background color addition by 4 (white)
0x10589C, # B5 F4 - decrease background color addition by 4 (white)
0x1058A1, # B5 F4 - decrease background color addition by 4 (white)
0x1058A6, # B5 F4 - decrease background color addition by 4 (white)
0x1058AB, # B5 F4 - decrease background color addition by 4 (white)
0x1058B0, # B5 F4 - decrease background color addition by 4 (white)
],
"X-Zone": [
0x105A5D, # B0 FF - set background color addition to 31 (white)
0x105A6A, # B5 F2 - decrease background color addition by 2 (white)
0x105A79, # B5 F2 - decrease background color addition by 2 (white)
],
"Dispel": [
0x105DC2, # B0 FF - set background color addition to 31 (white)
0x105DC9, # B5 F1 - decrease background color addition by 1 (white)
0x105DD2, # B5 F1 - decrease background color addition by 1 (white)
0x105DDB, # B5 F1 - decrease background color addition by 1 (white)
0x105DE4, # B5 F1 - decrease background color addition by 1 (white)
0x105DED, # B5 F1 - decrease background color addition by 1 (white)
],
"Muddle": [ # Also L.3 Muddle, Confusion
0x1060EA, # B0 FF - set background color addition to 31 (white)
0x1060EE, # B5 F1 - decrease background color addition by 1 (white)
],
"Shock": [
0x1068BE, # B0 FF - set background color addition to 31 (white)
0x1068D0, # B5 F1 - decrease background color addition by 1 (white)
],
"Bum Rush": [
0x106C3E, # B0 E0 - set background color addition to 0 (white)
0x106C47, # B0 E0 - set background color addition to 0 (white)
0x106C53, # B0 E0 - set background color addition to 0 (white)
0x106C7E, # B0 FF - set background color addition to 31 (white)
0x106C87, # B0 E0 - set background color addition to 0 (white)
0x106C95, # B0 FF - set background color addition to 31 (white)
0x106C9E, # B0 E0 - set background color addition to 0 (white)
],
"Stunner": [
0x1071BA, # B0 20 - set background color addition to 0 (blue)
0x1071C1, # B5 24 - increase background color addition by 4 (blue)
0x1071CA, # B5 24 - increase background color addition by 4 (blue)
0x1071D5, # B5 24 - increase background color addition by 4 (blue)
0x1071DE, # B5 24 - increase background color addition by 4 (blue)
0x1071E9, # B5 24 - increase background color addition by 4 (blue)
0x1071F2, # B5 24 - increase background color addition by 4 (blue)
0x1071FD, # B5 24 - increase background color addition by 4 (blue)
0x107206, # B5 24 - increase background color addition by 4 (blue)
0x107211, # B5 24 - increase background color addition by 4 (blue)
0x10721A, # B5 24 - increase background color addition by 4 (blue)
0x10725A, # B5 32 - decrease background color addition by 2 (blue)
],
"Quadra Slam": [ # Also Quadra Slice
0x1073DC, # B0 FF - set background color addition to 31 (white)
0x1073EE, # B5 F2 - decrease background color addition by 2 (white)
0x1073F3, # B5 F2 - decrease background color addition by 2 (white)
0x107402, # B0 5F - set background color addition to 31 (green)
0x107424, # B5 54 - decrease background color addition by 4 (green)
0x107429, # B5 54 - decrease background color addition by 4 (green)
0x107436, # B0 3F - set background color addition to 31 (blue)
0x107458, # B5 34 - decrease background color addition by 4 (blue)
0x10745D, # B5 34 - decrease background color addition by 4 (blue)
0x107490, # B0 9F - set background color addition to 31 (red)
0x1074B2, # B5 94 - decrease background color addition by 4 (red)
0x1074B7, # B5 94 - decrease background color addition by 4 (red)
],
"Slash": [
0x1074F4, # B0 FF - set background color addition to 31 (white)
0x1074FD, # B5 F2 - decrease background color addition by 2 (white)
0x107507, # B5 F2 - decrease background color addition by 2 (white)
],
"Flash": [
0x107850, # B0 FF - set background color addition to 31 (white)
0x10785C, # B5 F1 - decrease background color addition by 1 (white)
]
}
| 58.428152 | 133 | 0.630546 |
65ffed323033ff0ac5225d3d784dead8adf418b4 | 2,643 | py | Python | Jumpscale/tools/capacity/reality_parser.py | threefoldtech/JumpscaleX | 5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa | [
"Apache-2.0"
] | 2 | 2019-05-09T07:21:25.000Z | 2019-08-05T06:37:53.000Z | Jumpscale/tools/capacity/reality_parser.py | threefoldtech/JumpscaleX | 5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa | [
"Apache-2.0"
] | 664 | 2018-12-19T12:43:44.000Z | 2019-08-23T04:24:42.000Z | Jumpscale/tools/capacity/reality_parser.py | threefoldtech/jumpscale10 | 5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa | [
"Apache-2.0"
] | 7 | 2019-05-03T07:14:37.000Z | 2019-08-05T12:36:52.000Z | """
this module contain the logic of parsing the actual usage of the ressource unit of a zero-os node
"""
from .units import GiB
from sal_zos.disks.Disks import StorageType
__str__ = __repr__
def _parse_memory(used_memory):
"""
convert the used memory in bytes to ressource units
:param used_memory: amount of used memory in bytes
:type used_memory: float
:return: number of MRU
:rtype: float
"""
return used_memory / GiB
| 26.69697 | 97 | 0.626182 |
5a01dafbd8cdef4d174904ccd475a2627ada858d | 3,314 | py | Python | fsem/similarity_measures/jaro.py | sajith-rahim/fs-em | 2e8dde8b5f36ee1e1dfc5407611ec2fb91630c2a | [
"BSD-3-Clause"
] | null | null | null | fsem/similarity_measures/jaro.py | sajith-rahim/fs-em | 2e8dde8b5f36ee1e1dfc5407611ec2fb91630c2a | [
"BSD-3-Clause"
] | null | null | null | fsem/similarity_measures/jaro.py | sajith-rahim/fs-em | 2e8dde8b5f36ee1e1dfc5407611ec2fb91630c2a | [
"BSD-3-Clause"
] | null | null | null | import math
__all__ = ['get_jaro_distance']
__author__ = 'Jean-Bernard Ratte - jean.bernard.ratte@unary.ca'
""" Find the Jaro Winkler Distance which indicates the similarity score between two Strings.
The Jaro measure is the weighted sum of percentage of matched characters from each file and transposed characters.
Winkler increased this measure for matching initial characters.
This implementation is based on the Jaro Winkler similarity algorithm from
http://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance
This Python implementation is based on the Apache StringUtils implementation from
http://commons.apache.org/proper/commons-lang/apidocs/src-html/org/apache/commons/lang3/StringUtils.html#line.7141
"""
def get_jaro_distance(first, second, winkler=True, winkler_ajustment=True, scaling=0.1):
"""
:param first: word to calculate distance for
:param second: word to calculate distance with
:param winkler: same as winkler_ajustment
:param winkler_ajustment: add an adjustment factor to the Jaro of the distance
:param scaling: scaling factor for the Winkler adjustment
:return: Jaro distance adjusted (or not)
"""
if not first or not second:
raise JaroDistanceException("Cannot calculate distance from NoneType ({0}, {1})".format(
first.__class__.__name__,
second.__class__.__name__))
jaro = _score(first, second)
cl = min(len(_get_prefix(first, second)), 4)
if all([winkler, winkler_ajustment]): # 0.1 as scaling factor
return round((jaro + (scaling * cl * (1.0 - jaro))) * 100.0) / 100.0
return jaro
| 31.561905 | 118 | 0.658117 |
5a0258dc0630fde008fae59e8ca2f2322000aca2 | 732 | py | Python | UnitTests/FullAtomModel/PDB2Coords/test.py | dendisuhubdy/TorchProteinLibrary | 89f0f6c311658b9313484cd92804682a251b1b97 | [
"MIT"
] | null | null | null | UnitTests/FullAtomModel/PDB2Coords/test.py | dendisuhubdy/TorchProteinLibrary | 89f0f6c311658b9313484cd92804682a251b1b97 | [
"MIT"
] | null | null | null | UnitTests/FullAtomModel/PDB2Coords/test.py | dendisuhubdy/TorchProteinLibrary | 89f0f6c311658b9313484cd92804682a251b1b97 | [
"MIT"
] | null | null | null | import sys
import os
import matplotlib.pylab as plt
import numpy as np
import mpl_toolkits.mplot3d.axes3d as p3
import seaborn as sea
import torch
from TorchProteinLibrary import FullAtomModel
if __name__=='__main__':
# p2c = FullAtomModel.PDB2Coords.PDB2CoordsBiopython()
p2c = FullAtomModel.PDB2CoordsUnordered()
coords, res, anames, num_atoms = p2c(["f4TQ1_B.pdb"])
print (coords.size())
print (res.size())
print (anames.size())
print (num_atoms)
coords = coords.numpy()
coords = coords.reshape(int(coords.shape[1]/3), 3)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = coords[:,0]
y = coords[:,1]
z = coords[:,2]
ax.scatter(x,y,z)
plt.show()
| 24.4 | 58 | 0.674863 |
5a025cdbfc11bf834d39b1a16efe1582cdd5e329 | 4,306 | py | Python | vae/scripts/gm_vae_fc_toy.py | ondrejba/vae | 23f179637ca45c20d4e5f74e8c56b62f57554ef4 | [
"MIT"
] | 1 | 2019-11-23T20:51:58.000Z | 2019-11-23T20:51:58.000Z | vae/scripts/gm_vae_fc_toy.py | ondrejba/vae | 23f179637ca45c20d4e5f74e8c56b62f57554ef4 | [
"MIT"
] | null | null | null | vae/scripts/gm_vae_fc_toy.py | ondrejba/vae | 23f179637ca45c20d4e5f74e8c56b62f57554ef4 | [
"MIT"
] | 1 | 2021-12-01T07:29:39.000Z | 2021-12-01T07:29:39.000Z | import argparse
import collections
import os
import numpy as np
import matplotlib.pyplot as plt
from .. import toy_dataset
from .. import gm_vae_fc
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num-training-steps", type=int, default=100000)
parser.add_argument("--learning-rate", type=float, default=0.0001)
parser.add_argument("--batch-size", type=int, default=50)
parser.add_argument("--weight-decay", type=float, default=0.0)
parser.add_argument("--clip-z-prior", type=float, default=1.4)
parser.add_argument("--gpus", default=None)
parser.add_argument("--gpu-memory-fraction", default=None, type=float)
parsed = parser.parse_args()
main(parsed)
| 31.202899 | 107 | 0.65699 |
5a054c6f2f48cad9dc180b59f6e0034f5b144f73 | 331 | py | Python | codes/day06/03.py | Youngfellows/HPyBaseCode | 94d11872795d85b8c4387b650e82edcd20da0667 | [
"Apache-2.0"
] | null | null | null | codes/day06/03.py | Youngfellows/HPyBaseCode | 94d11872795d85b8c4387b650e82edcd20da0667 | [
"Apache-2.0"
] | null | null | null | codes/day06/03.py | Youngfellows/HPyBaseCode | 94d11872795d85b8c4387b650e82edcd20da0667 | [
"Apache-2.0"
] | null | null | null |
wangcai = Dog("")
#wangcai.printColor()
xiaoqiang = Dog("")
#xiaoqiang.printColor()
test(wangcai)
| 15.045455 | 34 | 0.592145 |
5a05e2efcbe249cfc654b1e6e98561ecca3c15b5 | 1,158 | py | Python | LC_problems/699.py | Howardhuang98/Blog | cf58638d6d0bbf55b95fe08e43798e7dd14219ac | [
"MIT"
] | null | null | null | LC_problems/699.py | Howardhuang98/Blog | cf58638d6d0bbf55b95fe08e43798e7dd14219ac | [
"MIT"
] | null | null | null | LC_problems/699.py | Howardhuang98/Blog | cf58638d6d0bbf55b95fe08e43798e7dd14219ac | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 699.py
@Contact : huanghoward@foxmail.com
@Modify Time : 2022/5/26 17:22
------------
"""
from typing import List
if __name__ == '__main__':
s = Solution()
print(s.fallingSquares([[9, 6], [2, 2], [2, 6]]))
| 28.95 | 105 | 0.443005 |
5a05e7be3fed210c95055f9564a15535552003ac | 5,150 | py | Python | plastid/test/functional/test_metagene.py | joshuagryphon/plastid | e63a818e33766b01d84b3ac9bc9f55e6a1ece42f | [
"BSD-3-Clause"
] | 31 | 2016-04-05T09:58:29.000Z | 2022-01-18T11:58:30.000Z | plastid/test/functional/test_metagene.py | joshuagryphon/plastid | e63a818e33766b01d84b3ac9bc9f55e6a1ece42f | [
"BSD-3-Clause"
] | 49 | 2015-09-15T19:50:13.000Z | 2022-01-06T18:17:35.000Z | plastid/test/functional/test_metagene.py | joshuagryphon/plastid | e63a818e33766b01d84b3ac9bc9f55e6a1ece42f | [
"BSD-3-Clause"
] | 14 | 2017-02-08T09:38:57.000Z | 2020-09-16T02:32:46.000Z | #!/usr/bin/env python
"""Test suite for :py:mod:`plastid.bin.metagene`"""
import tempfile
import os
from pkg_resources import resource_filename, cleanup_resources
from nose.plugins.attrib import attr
from plastid.test.functional.base import execute_helper
from plastid.test.ref_files import (
RPATH,
REF_FILES,
COUNT_OPTIONS,
ANNOTATION_OPTIONS,
MASK_OPTIONS,
)
from plastid.bin.test_table_equality import main as table_test
from plastid.bin.metagene import main
from plastid.util.services.decorators import catch_stderr
#===============================================================================
# INDEX: global constants used by tests
#===============================================================================
TEST_INFO = {
"test_method": catch_stderr()(main),
"module_name": "plastid.bin.metagene",
"ref_file_path": resource_filename("plastid", "test/data/command_line"),
"temp_file_path": tempfile.mkdtemp(prefix="metagene"),
}
_basename = os.path.join(TEST_INFO["temp_file_path"], "test_metagene")
#===============================================================================
# INDEX: tests
#===============================================================================
tests = [
# test generate cds start
(
"generate %s_cds_start --downstream 100 %s %s" %
(_basename, ANNOTATION_OPTIONS, MASK_OPTIONS),
[REF_FILES["yeast_metagene_cds_start"], REF_FILES["yeast_metagene_cds_start_bed"]], [
_basename + "_cds_start_rois.txt",
_basename + "_cds_start_rois.bed",
], ["", "--no_header"]
),
# test generate cds stop
(
"generate %s_cds_stop --upstream 100 --landmark cds_stop %s %s" %
(_basename, ANNOTATION_OPTIONS, MASK_OPTIONS), [
REF_FILES["yeast_metagene_cds_stop"],
REF_FILES["yeast_metagene_cds_stop_bed"],
], [
_basename + "_cds_stop_rois.txt",
_basename + "_cds_stop_rois.bed",
], ["", "--no_header"]
),
# test count cds start with --norm_region
(
"count %s %s_cds_start --keep --norm_region 70 150 %s" %
(REF_FILES["yeast_metagene_cds_start"], _basename, COUNT_OPTIONS), [
REF_FILES["yeast_metagene_cds_start_profile"],
REF_FILES["yeast_metagene_cds_start_normcounts"],
REF_FILES["yeast_metagene_cds_start_rawcounts"],
], [
_basename + "_cds_start_metagene_profile.txt",
_basename + "_cds_start_normcounts.txt.gz", _basename + "_cds_start_rawcounts.txt.gz"
], ["", "--no_header", "--no_header"]
),
# test count cds stop with --norm_region
(
"count %s %s_cds_stop --keep --norm_region 0 80 %s" %
(REF_FILES["yeast_metagene_cds_stop"], _basename, COUNT_OPTIONS), [
REF_FILES["yeast_metagene_cds_stop_profile"],
REF_FILES["yeast_metagene_cds_stop_normcounts"],
REF_FILES["yeast_metagene_cds_stop_rawcounts"],
], [
_basename + "_cds_stop_metagene_profile.txt", _basename + "_cds_stop_normcounts.txt.gz",
_basename + "_cds_stop_rawcounts.txt.gz"
], ["", "--no_header", "--no_header"]
),
# test count cds start, using --normalize_over
(
"count %s %s_cds_start --keep --normalize_over 20 100 %s" %
(REF_FILES["yeast_metagene_cds_start"], _basename, COUNT_OPTIONS), [
REF_FILES["yeast_metagene_cds_start_profile"],
REF_FILES["yeast_metagene_cds_start_normcounts"],
REF_FILES["yeast_metagene_cds_start_rawcounts"],
], [
_basename + "_cds_start_metagene_profile.txt",
_basename + "_cds_start_normcounts.txt.gz", _basename + "_cds_start_rawcounts.txt.gz"
], ["", "--no_header", "--no_header"]
),
# test count cds stop, using --normalize_over
(
"count %s %s_cds_stop --keep --normalize_over '-100' '-20' %s" %
(REF_FILES["yeast_metagene_cds_stop"], _basename, COUNT_OPTIONS), [
REF_FILES["yeast_metagene_cds_stop_profile"],
REF_FILES["yeast_metagene_cds_stop_normcounts"],
REF_FILES["yeast_metagene_cds_stop_rawcounts"],
], [
_basename + "_cds_stop_metagene_profile.txt", _basename + "_cds_stop_normcounts.txt.gz",
_basename + "_cds_stop_rawcounts.txt.gz"
], ["", "--no_header", "--no_header"]
),
]
"""Functional tests of :py:mod:`plastid.bin.metagene`.
Tests are specified as tuples of:
1. Command-line style arguments to pass to :py:func:`main`
2. A list of reference files that output should be compared against
3. A list of output files created by running :py:func:`main`
with the arguments provided in (1)
4. A list of strings specifying how equality should be evaluated
"""
#===============================================================================
# INDEX: test functions
#===============================================================================
| 39.615385 | 100 | 0.594175 |
5a0619271eef494f524dc719a9ba4f63c1373613 | 4,967 | py | Python | tests/router/test_router.py | macneiln/ombott | f18f6e0e639f20efb63b137edbab8c8b3871d354 | [
"MIT"
] | null | null | null | tests/router/test_router.py | macneiln/ombott | f18f6e0e639f20efb63b137edbab8c8b3871d354 | [
"MIT"
] | null | null | null | tests/router/test_router.py | macneiln/ombott | f18f6e0e639f20efb63b137edbab8c8b3871d354 | [
"MIT"
] | null | null | null | import pytest
from ombott.router import RadiRouter, Route
from ombott.router.errors import RouteMethodError
route_meth_handler_path = [
('/foo/bar', 'GET', 'foo_bar:get', '/foo/bar'),
('/foo/bar', 'POST', 'foo_bar:post', '/foo/bar/'),
('/foo/bar', ['PUT', 'PATCH'], 'foo_bar:put,patch', 'foo/bar'),
('foo@/named/foo', ['PUT', 'PATCH'], 'foo@:put,patch', '/named/foo'),
('bar@/named/bar', ['PUT', 'PATCH'], 'bar@:put,patch', '/named/bar'),
('/foo/bar1', 'GET', 404, ['/foo/ba', '/foo/ba12']),
('/foo/bar1', 'POST', 405, '/foo/bar1:PATCH'),
('/foo/<re(pro.+?(?=l))>le/:user/bar', 'GET', dict(user='tom'), '/foo/profile/tom/bar'),
('/re/{re(to.)}/bar', 'GET', 're:get', '/re/tom/bar'),
('/re/{:re(to.)}/bar', 'PUT', 're:put', '/re/tom/bar'),
('/re/{name:re(to.)}/bar', 'POST', dict(name='tom'), '/re/tom/bar'),
('/re/{name:re(to.)}/bar1', 'GET', dict(name='tos'), '/re/tos/bar1'),
('/re/{surname:re(to.)}/bar2', 'GET', dict(surname='tok'), '/re/tok/bar2/'),
('/path/{pth:path()}/end', 'GET', dict(pth='this/path/to'), '/path/this/path/to/end'),
('/path1/{pth:path()}end', 'GET', dict(pth='this/path/to-'), '/path1/this/path/to-end'),
]
class TestRoutes:
def test_routes(self, router, routes_iter):
name, rule, meth, handler, path = routes_iter
path, _, path_meth = path.partition(':')
end_point, err404_405 = router.resolve(path, path_meth or meth)
if end_point is None:
assert handler in {404, 405}
assert err404_405[0] == handler
else:
assert handler is not None
route_meth, params, hooks = end_point
assert route_meth.handler == handler
if isinstance(meth, str):
assert route_meth.name == meth
else:
assert route_meth.name == meth[0]
if params:
assert params == handler
if name:
assert router[name][meth] is route_meth
| 29.742515 | 92 | 0.582444 |
5a06baf447f7c7644ae324b314d4d848bee4ba67 | 12,225 | py | Python | app_api/serializers.py | pkucsie/SIEPServer | 00b0637eb8302135dfc772fccd18cd749a93e5c6 | [
"Apache-2.0"
] | 2 | 2021-02-12T10:02:42.000Z | 2021-03-15T13:08:04.000Z | app_api/serializers.py | pkucsie/SIEPServer | 00b0637eb8302135dfc772fccd18cd749a93e5c6 | [
"Apache-2.0"
] | null | null | null | app_api/serializers.py | pkucsie/SIEPServer | 00b0637eb8302135dfc772fccd18cd749a93e5c6 | [
"Apache-2.0"
] | null | null | null | import datetime
import time
from utils import utils
from rest_framework import serializers
from rest_framework.relations import StringRelatedField
from app_api.models import Album, Info, Order, Coupon, Integral, Notice, Lesson, Question, Cart, Setup, User, Bill, Address, Catalog, Log, \
ReadType, Teacher, Comment, \
Hot, Recharge, LabelFollow, Student, Navigation, Read, Article, History, Qa, ArticleType, UserNotice, Slider, \
UserLesson, Nav, LabelType, \
IntegralType, Label, Footer, CommonPathConfig, StudentType, LessonType, LessonHardType, Chapter, Term, QaType, \
RechargeAction, RechargePay, \
CouponRange, CouponStatus, OrderItem, OrderStatus, Consult, ReadChapterItem, ReadChapter, LogType, VipGuest, Judge, \
Organization, TaskTimeline, Project, Score, WXAdmin, WXUser
| 23.41954 | 140 | 0.674683 |
5a07b3f93f0df160b35b13e2ca081e2f2413ce44 | 718 | py | Python | 6_API/pytorch/configure.py | misoA/DeepCalendar | 50cafc1e70f125f3b6b42cd88e1e9dd071676b49 | [
"MIT"
] | null | null | null | 6_API/pytorch/configure.py | misoA/DeepCalendar | 50cafc1e70f125f3b6b42cd88e1e9dd071676b49 | [
"MIT"
] | 3 | 2019-01-14T06:59:24.000Z | 2019-01-14T07:48:38.000Z | 6_API/pytorch/configure.py | misoA/DeepCalendar | 50cafc1e70f125f3b6b42cd88e1e9dd071676b49 | [
"MIT"
] | 5 | 2019-01-08T05:01:26.000Z | 2021-05-17T23:34:51.000Z | # -*- coding: utf-8 -*-
# This file is made to configure every file number at one place
# Choose the place you are training at
# AWS : 0, Own PC : 1
PC = 1
path_list = ["/jet/prs/workspace/", "."]
url = path_list[PC]
clothes = ['shirt',
'jeans',
'blazer',
'chino-pants',
'jacket',
'coat',
'hoody',
'training-pants',
't-shirt',
'polo-shirt',
'knit',
'slacks',
'sweat-shirt']
schedule = ['party',
'trip',
'sport',
'work',
'speech',
'daily',
'school',
'date']
weather = ['snow',
'sunny',
'cloudy',
'rain']
| 17.95 | 63 | 0.431755 |
5a0835b17e7c0f765c8aa93d7341da5395fe71d2 | 32 | py | Python | provider/__init__.py | depop/django-oauth2-provider | afcdef72747233dc0259a4bc068a8086ba7a69d3 | [
"MIT"
] | 1 | 2020-05-10T00:11:05.000Z | 2020-05-10T00:11:05.000Z | provider/__init__.py | depop/django-oauth2-provider | afcdef72747233dc0259a4bc068a8086ba7a69d3 | [
"MIT"
] | 1 | 2016-05-23T15:22:41.000Z | 2016-05-23T15:22:41.000Z | provider/__init__.py | depop/django-oauth2-provider | afcdef72747233dc0259a4bc068a8086ba7a69d3 | [
"MIT"
] | null | null | null | __version__ = "0.2.7+depop.6.1"
| 16 | 31 | 0.65625 |
5a0841fa1b97d80f5fc2c97be82b59ce57dfb2d4 | 7,381 | py | Python | python/craftassist/voxel_models/subcomponent_classifier.py | kayburns/craftassist | 07909493d320afc2c9ff428d0891bc3acd4dc68f | [
"MIT"
] | null | null | null | python/craftassist/voxel_models/subcomponent_classifier.py | kayburns/craftassist | 07909493d320afc2c9ff428d0891bc3acd4dc68f | [
"MIT"
] | null | null | null | python/craftassist/voxel_models/subcomponent_classifier.py | kayburns/craftassist | 07909493d320afc2c9ff428d0891bc3acd4dc68f | [
"MIT"
] | null | null | null | """
Copyright (c) Facebook, Inc. and its affiliates.
"""
import logging
import queue
from multiprocessing import Queue, Process
import sys
import os
from mc_memory_nodes import InstSegNode, PropSegNode
from heuristic_perception import all_nearby_objects
from shapes import get_bounds
VISION_DIR = os.path.dirname(os.path.realpath(__file__))
CRAFTASSIST_DIR = os.path.join(VISION_DIR, "../")
SEMSEG_DIR = os.path.join(VISION_DIR, "semantic_segmentation/")
sys.path.append(CRAFTASSIST_DIR)
sys.path.append(SEMSEG_DIR)
import build_utils as bu
from semseg_models import SemSegWrapper
# TODO all "subcomponent" operations are replaced with InstSeg
| 38.243523 | 116 | 0.582441 |
5a0b2d031fe808c99bfba67eaa85c3e839cc5992 | 197 | py | Python | tests/test_problem16.py | nolanwrightdev/blind-75-python | b92ef3449eb0143c760ddd339897a3f0a2972830 | [
"MIT"
] | 6 | 2020-02-01T23:29:51.000Z | 2022-02-20T20:46:56.000Z | tests/test_problem16.py | nolanwrightdev/blind-75-python | b92ef3449eb0143c760ddd339897a3f0a2972830 | [
"MIT"
] | null | null | null | tests/test_problem16.py | nolanwrightdev/blind-75-python | b92ef3449eb0143c760ddd339897a3f0a2972830 | [
"MIT"
] | null | null | null | import unittest
from problems.problem16 import solution
| 21.888889 | 45 | 0.71066 |
5a0b50f8318c63395085bc807823eccbb8a5e4b9 | 510 | py | Python | project/dynamic.py | andresitodeguzman/smspy | 29b9feb4356de5dbd1a5d222d38d45396a349d23 | [
"Apache-2.0"
] | 4 | 2017-01-27T05:15:09.000Z | 2020-12-08T13:24:19.000Z | project/dynamic.py | andresitodeguzman/smspy | 29b9feb4356de5dbd1a5d222d38d45396a349d23 | [
"Apache-2.0"
] | 1 | 2019-05-20T15:09:53.000Z | 2019-05-20T15:09:53.000Z | project/dynamic.py | andresitodeguzman/smspy | 29b9feb4356de5dbd1a5d222d38d45396a349d23 | [
"Apache-2.0"
] | null | null | null | ##
## DYNAMIC
##
## Import the module explicitly (import dynamics.<module_name> as module_name)
import dynamics.root as root
## Register all modules for checking here. If something interferes, rearrange the order
## module_name_ = module_name.do(params)
| 21.25 | 88 | 0.666667 |
5a0bac916180eec03144ad684ddb2ec3547f8ee7 | 288 | py | Python | accounts/urls.py | mishrakeshav/Django-Real-Estate-Website | 4f6146ad8d13003f890677c2c1af82b26c69678b | [
"MIT"
] | null | null | null | accounts/urls.py | mishrakeshav/Django-Real-Estate-Website | 4f6146ad8d13003f890677c2c1af82b26c69678b | [
"MIT"
] | 7 | 2021-04-08T20:21:35.000Z | 2022-01-13T03:27:33.000Z | accounts/urls.py | mishrakeshav/Django-Real-Estate-Website | 4f6146ad8d13003f890677c2c1af82b26c69678b | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('login', views.login, name = 'login'),
path('register', views.register, name = 'register'),
path('logout', views.logout, name = 'logout'),
path('dashboard', views.dashboard, name = 'dashboard'),
] | 28.8 | 59 | 0.645833 |
5a0e378937b9fd8ab97a5e345d693d92224ab800 | 4,333 | py | Python | src/past/types/oldstr.py | kianmeng/python-future | 80523f383fbba1c6de0551e19d0277e73e69573c | [
"MIT"
] | 908 | 2015-01-01T21:20:45.000Z | 2022-03-29T20:47:16.000Z | src/past/types/oldstr.py | kianmeng/python-future | 80523f383fbba1c6de0551e19d0277e73e69573c | [
"MIT"
] | 402 | 2015-01-04T01:30:19.000Z | 2022-03-24T11:56:38.000Z | src/past/types/oldstr.py | kianmeng/python-future | 80523f383fbba1c6de0551e19d0277e73e69573c | [
"MIT"
] | 305 | 2015-01-18T19:29:37.000Z | 2022-03-24T09:40:09.000Z | """
Pure-Python implementation of a Python 2-like str object for Python 3.
"""
from numbers import Integral
from past.utils import PY2, with_metaclass
if PY2:
from collections import Iterable
else:
from collections.abc import Iterable
_builtin_bytes = bytes
def unescape(s):
r"""
Interprets strings with escape sequences
Example:
>>> s = unescape(r'abc\\def') # i.e. 'abc\\\\def'
>>> print(s)
'abc\def'
>>> s2 = unescape('abc\\ndef')
>>> len(s2)
8
>>> print(s2)
abc
def
"""
return s.encode().decode('unicode_escape')
__all__ = ['oldstr']
| 31.860294 | 95 | 0.558505 |
5a0e75196f538319c5078d09117599bf367b0df0 | 1,208 | py | Python | app/api/utlis/models.py | jurekpawlikowski/flask-boilerplate | 15b7e6c4e0241a7d59dbca543e023a22b17b9903 | [
"MIT"
] | 3 | 2017-08-05T08:57:37.000Z | 2021-03-03T09:09:03.000Z | app/api/utlis/models.py | jurekpawlikowski/flask-boilerplate | 15b7e6c4e0241a7d59dbca543e023a22b17b9903 | [
"MIT"
] | null | null | null | app/api/utlis/models.py | jurekpawlikowski/flask-boilerplate | 15b7e6c4e0241a7d59dbca543e023a22b17b9903 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from sqlalchemy.event import listen
from app.factory import db
def set_updated_at(target, value, oldvalue):
"""
Set updated_at value
"""
value.updated_at = datetime.now()
listen(BaseModel, "before_update", set_updated_at)
| 23.686275 | 80 | 0.647351 |
5a0fd6978a62253af90bdbf0d79e056e97e5921d | 1,391 | py | Python | source/tweaks/cms_plugins.py | mverleg/svsite | 5c9dbcacf81020cf0c1960e337bdd33113acd597 | [
"BSD-3-Clause"
] | null | null | null | source/tweaks/cms_plugins.py | mverleg/svsite | 5c9dbcacf81020cf0c1960e337bdd33113acd597 | [
"BSD-3-Clause"
] | 142 | 2015-06-05T07:53:09.000Z | 2020-03-31T18:37:07.000Z | source/tweaks/cms_plugins.py | mdilli/svsite | 5c9dbcacf81020cf0c1960e337bdd33113acd597 | [
"BSD-3-Clause"
] | null | null | null |
"""
Raw HTML widget.
Adapted/copied from https://github.com/makukha/cmsplugin-raw-html
"""
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.template import Template
from django.utils.safestring import mark_safe
from .models import RawHtmlModel, CMSMember
from django.utils.translation import ugettext as _
plugin_pool.register_plugin(RawHtmlPlugin)
plugin_pool.register_plugin(MemberPlugin) # register the plugin
| 24.839286 | 79 | 0.757009 |
5a1046d61cc7585c8ffb76dc65a2afa1c14d62a9 | 3,296 | py | Python | tests/test_trackings.py | EugeneLiu/aftership-sdk-python | 37184272869452734d616b31295a4ac883051f5d | [
"MIT"
] | null | null | null | tests/test_trackings.py | EugeneLiu/aftership-sdk-python | 37184272869452734d616b31295a4ac883051f5d | [
"MIT"
] | null | null | null | tests/test_trackings.py | EugeneLiu/aftership-sdk-python | 37184272869452734d616b31295a4ac883051f5d | [
"MIT"
] | null | null | null | from unittest import TestCase, mock
import pytest
from requests import Response
import aftership
| 34.694737 | 116 | 0.681129 |
5a105c110cc6114a77deee02c167af5066ada602 | 1,089 | py | Python | 071_caixaeletronico.py | laissilveira/python-exercises | 906f7e46878b296ecb9b9df9fd39ec1e362ce3a4 | [
"MIT"
] | null | null | null | 071_caixaeletronico.py | laissilveira/python-exercises | 906f7e46878b296ecb9b9df9fd39ec1e362ce3a4 | [
"MIT"
] | null | null | null | 071_caixaeletronico.py | laissilveira/python-exercises | 906f7e46878b296ecb9b9df9fd39ec1e362ce3a4 | [
"MIT"
] | null | null | null | # Calcula a quantidade de notas de cada valor a serem sacadas em uma caixa eletrnico
print('=' * 30)
print('{:^30}'.format('CAIXA ELETRNICO'))
print('=' * 30)
valor = int(input('Valor a ser sacado: R$ '))
# notas de real (R$) existentes
tot200 = valor // 200
tot100 = (valor % 200) // 100
tot50 = ((valor % 200) % 100) // 50
tot20 = (((valor % 200) % 100) % 50) // 20
tot10 = ((((valor % 200) % 100) % 50) % 20) //10
tot5 = (((((valor % 200) % 100) % 50) % 20) % 10) // 5
tot2 = ((((((valor % 200) % 100) % 50) % 20) % 10) % 5) // 2
while True:
if tot200 > 0:
print(f'Total de {tot200} cdula(s) de R$ 200,00.')
if tot100 > 0:
print(f'Total de {tot100} cdula(s) de R$ 100,00.')
if tot50 > 0:
print(f'Total de {tot50} cdula(s) de R$ 50,00.')
if tot20 > 0:
print(f'Total de {tot20} cdula(s) de R$ 20,00.')
if tot10 > 0:
print(f'Total de {tot10} cdula(s) de R$ 10,00.')
if tot5 > 0:
print(f'Total de {tot5} cdula(s) de R$ 5,00.')
if tot2 > 0:
print(f'Total de {tot2} cdula(s) de R$ 2,00.')
break
| 36.3 | 85 | 0.543618 |