hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0bc3737f6d7feadb52b4789ffa0325c0fcb19d3c | 1,766 | py | Python | nmtpytorch/datasets/text.py | toshohirasawa/mmt-with-monolingual-data | 3f80f3a1807e1a837ef82d75917c1cf581270b84 | [
"MIT"
] | 4 | 2019-05-22T00:17:28.000Z | 2020-08-26T02:03:33.000Z | nmtpytorch/datasets/text.py | toshohirasawa/mmt-with-monolingual-data | 3f80f3a1807e1a837ef82d75917c1cf581270b84 | [
"MIT"
] | 3 | 2019-07-14T09:17:11.000Z | 2022-01-13T01:15:10.000Z | nmtpytorch/datasets/text.py | toshohirasawa/mmt-with-monolingual-data | 3f80f3a1807e1a837ef82d75917c1cf581270b84 | [
"MIT"
] | 1 | 2020-07-22T19:25:53.000Z | 2020-07-22T19:25:53.000Z | # -*- coding: utf-8 -*-
import logging
from pathlib import Path
import torch
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
from ..utils.data import read_sentences
logger = logging.getLogger('nmtpytorch')
class TextDataset(Dataset):
r"""A PyTorch dataset for sentences.
Arguments:
fname (str or Path): A string or ``pathlib.Path`` object giving
the corpus.
vocab (Vocabulary): A ``Vocabulary`` instance for the given corpus.
bos (bool, optional): If ``True``, a special beginning-of-sentence
"<bos>" marker will be prepended to sentences.
"""
def __init__(self, fname, vocab, bos=False, **kwargs):
self.path = Path(fname)
self.vocab = vocab
self.bos = bos
# Detect glob patterns
self.fnames = sorted(self.path.parent.glob(self.path.name))
if len(self.fnames) == 0:
raise RuntimeError('{} does not exist.'.format(self.path))
elif len(self.fnames) > 1:
logger.info('Multiple files found, using first: {}'.format(self.fnames[0]))
# Read the sentences and map them to vocabulary
self.data, self.lengths = read_sentences(
self.fnames[0], self.vocab, bos=self.bos)
# Dataset size
self.size = len(self.data)
@staticmethod
def to_torch(batch):
return pad_sequence(
[torch.tensor(b, dtype=torch.long) for b in batch], batch_first=False)
def __getitem__(self, idx):
return self.data[idx]
def __len__(self):
return self.size
def __repr__(self):
s = "{} '{}' ({} sentences)\n".format(
self.__class__.__name__, self.fnames[0].name, self.__len__())
return s
| 28.95082 | 87 | 0.620045 | 1,520 | 0.860702 | 0 | 0 | 150 | 0.084938 | 0 | 0 | 569 | 0.322197 |
0bc4245d09646c8c52b1a3d0b366dc4f5a5be7e6 | 9,194 | py | Python | ChorusNGSfilter.py | zhangtaolab/Chorus2 | faf6250901eefc9390bf6cd7ea185454c1c2712e | [
"MIT"
] | 11 | 2019-05-23T13:28:39.000Z | 2022-02-10T00:24:54.000Z | ChorusNGSfilter.py | zhangtaolab/Chorus2 | faf6250901eefc9390bf6cd7ea185454c1c2712e | [
"MIT"
] | 3 | 2019-03-15T01:47:53.000Z | 2021-12-16T07:41:08.000Z | ChorusNGSfilter.py | zhangtaolab/Chorus2 | faf6250901eefc9390bf6cd7ea185454c1c2712e | [
"MIT"
] | 11 | 2019-02-28T08:17:02.000Z | 2021-12-13T11:44:02.000Z | import argparse
import sys
from Choruslib import jellyfish
import os
from multiprocessing import Pool, Process
from pyfasta import Fasta
import pyBigWig
import math
def main():
args = check_options(get_options())
# jfgeneratorscount(jfpath, mer, output, generators,threads=1, size='100M'):
# make generators
print(args.input)
if args.jfile:
# args.jfile
jfkmerfile = args.jfile
else:
jellyfish.makegenerator(filenames=args.input.split(','), type=args.gzip, generators='generators')
jfkmerfile = args.output+'.jf'
jellyfish.jfgeneratorscount(jfpath=args.jellyfish, mer=args.kmer, output=jfkmerfile,
generators='generators',threads=args.threads, size='100M')
bwfile = args.output + '.bw'
outfilename = args.output
spsize = 10000000
fastain = Fasta(args.genome)
bw = pyBigWig.open(bwfile, "w")
seqlenth = dict()
seqname = dict()
genomesize = 0
for chrom in sorted(fastain.keys()):
infor = chrom.split()
seqlenth[infor[0]] = len(fastain[chrom])
seqname[infor[0]] = chrom
genomesize += seqlenth[infor[0]]
print("Genome Size: %s" % genomesize)
bw.addHeader(list(seqlenth.items()))
jfscoerlist = list()
for seqfullname in sorted(fastain.keys()):
infor = seqfullname.split()
chrlen = len(fastain[seqfullname])
if chrlen < spsize:
start = 0
end = chrlen - 1
jfscoer = jellyfish.JFNGSScoer(jfpath=args.jellyfish, jfkmerfile=jfkmerfile, mer=args.kmer,
start=start, end=end, seqfullname=seqfullname, pyfasta=fastain)
jfscoerlist.append(jfscoer)
else:
chrblock = int(chrlen / spsize) + 1
for i in range(chrblock):
start = i * spsize
end = start + spsize - 1
if i > 0:
start = start - args.kmer + 1
if end >= chrlen:
end = chrlen - 1
jfscoer = jellyfish.JFNGSScoer(jfpath=args.jellyfish, jfkmerfile=jfkmerfile, mer=args.kmer,
start=start, end=end, seqfullname=seqfullname, pyfasta=fastain)
jfscoerlist.append(jfscoer)
tmppath = os.path.dirname(args.output)
jfsllength = int(len(jfscoerlist) / args.threads + 1)
for jt in range(jfsllength):
if jt == jfsllength:
nowlist = jfscoerlist[jt * args.threads:]
else:
nowlist = jfscoerlist[(jt * args.threads):((jt + 1) * args.threads)]
processes = list()
for jfscoer in nowlist:
p = Process(target=jellyfish.jfngsscoerlargegenome, args=(jfscoer,tmppath))
processes.append(p)
for p in processes:
p.start()
for p in processes:
p.join()
# jfngsscoerlargegenome
for jfscoer in nowlist:
tmpfile = jfscoer.seqname + '_' + str(jfscoer.start) + "_" + str(jfscoer.end)
tmpfilename = os.path.join(tmppath, tmpfile)
score = list()
with open(tmpfilename) as inio:
for i in inio:
score = i.rstrip().split()
bw.addEntries(jfscoer.seqname, jfscoer.start, values=list(map(float, score)), span=1, step=1)
print(jfscoer.seqname, jfscoer.start, 'OK')
inio.close()
os.remove(tmpfilename)
bw.close()
bwforcount = pyBigWig.open(bwfile)
outio = open(outfilename, 'w')
with open(args.probe) as inio:
for i in inio:
#(chrom, start, end, seq) = i.rstrip().split()
probeloc = i.rstrip().split()
chrom = probeloc[0]
start = probeloc[1]
end = probeloc[2]
seq = probeloc[3]
score = sum(bwforcount.values(chrom, int(start) - 1, int(end) - args.kmer))
if math.isnan(score):
score = 0
print(chrom, start, end, seq, int(score), '+', sep='\t', file=outio)
outio.close()
print("finished!")
def check_options(parser):
args = parser.parse_args()
if args.jellyfish:
if not os.path.exists(args.jellyfish):
print("Can not locate jellyfish, please input full path of jellyfish\n")
parser.print_help()
sys.exit(1)
jellyfishversion = jellyfish.jfversion(args.jellyfish)
if jellyfishversion == 'None':
print("Can not locate jellyfish, please input full path of jellyfish\n")
parser.print_help()
sys.exit(1)
else:
jellyfishpath = which('jellyfish')
if jellyfishpath:
jellyfishversion = jellyfish.jfversion(jellyfishpath[0])
if jellyfishversion == 'None':
print("Can not locate jellyfish, please input full path of jellyfish\n")
parser.print_help()
sys.exit(1)
else:
args.jellyfish = jellyfishpath[0]
else:
print("Can not locate jellyfish, please input full path of jellyfish\n")
parser.print_help()
sys.exit(1)
# End check jellyfish
if not os.path.exists(args.genome):
print("Can not locate genome file, please input genome file.\n")
parser.print_help()
sys.exit(1)
if not os.path.exists(args.probe):
print("Can not locate probe file, please input genome file.\n")
parser.print_help()
sys.exit(1)
if args.input:
inputfiles = args.input.split(',')
for inputfile in inputfiles:
if not os.path.exists(inputfile):
print("Can not locate %s file.\n" % inputfile)
parser.print_help()
sys.exit(1)
elif args.jfile:
if not os.path.exists(args.jfile):
print("Can not locate %s file.\n" % args.jfile)
parser.print_help()
sys.exit(1)
else:
print("Can not locate input or jellyfish index file, please input jellyfish index or input file.\n")
parser.print_help()
sys.exit(1)
return args
def which(filename):
"""docstring for which"""
locations = os.environ.get("PATH").split(os.pathsep)
candidates = []
for location in locations:
candidate = os.path.join(location, filename)
if os.path.isfile(candidate):
candidates.append(candidate)
return candidates
def get_options():
parser = argparse.ArgumentParser(description="ChorusNGSfilter for counting Oligo FISH probe k-mer score using NGS data", prog='ChorusNGSfilter',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="Example:\n"
" ChorusNGSfilter -i 1.fq.gz,2.fq.gz -z gz -t 4 -g TAIR10_chr_all.fas \\ \n"
" -j /opt/software/jellyfish/bin/jellyfish -p probe.bed -o output.bed"
)
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument('-j', '--jellyfish', dest='jellyfish', help='The path where Jellyfish software installed')
parser.add_argument('-g', '--genome', dest='genome', help='Fasta format genome file, should include all sequences from genome', required=True)
parser.add_argument('-i', '--input', dest='input',
help='Fastq format input files contain reads from whole genome shotgun sequencing, files can be gzipped.'
' Multiple files separate with \",\". For example: 1.fq.gz,2.fq.gz ', type=str)
parser.add_argument('-jfile','--jellyfishfile', dest='jfile',
help='prebuild jellyfish index file, conflict with input argument.', type=str)
parser.add_argument('-z', '--gzipped', dest='gzip', help='Input fastq file is gzipped(gz) or uncompressed(text). (Default: gz)', choices=('gz', 'text'), default='gz')
# parser.add_argument('-s', '--save', dest='saved', help='result saved folder', default='probes')
parser.add_argument('-t', '--threads', dest='threads', help='Number of threads or CPUs to use. (Default: 1)',
default=1, type=int)
parser.add_argument('-k', '--kmer', dest='kmer', help='Length of k-mer used for counting k-mers in input fastq files. (Default: 17)', default=17, type=int)
parser.add_argument('-p', '--probe', dest='probe', help='The bed format probe file generated by Chorus')
parser.add_argument('-o', '--output', dest='output', help='Output bed format probe file with k-mer score. (Default: output.bed)', default='output.bed')
# args = parser.parse_args()
return parser
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.stderr.write("User interrupt\n")
sys.exit(0)
| 26.045326 | 170 | 0.570698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,210 | 0.240374 |
0bc43b9539ad1613a8c75e0fca099de18da296b5 | 224 | py | Python | nnunet/utilities/nii2niigz.py | kvpratama/nnunet | f4868fc96a7c3e5faca064e3b78b283d004da40b | [
"Apache-2.0"
] | null | null | null | nnunet/utilities/nii2niigz.py | kvpratama/nnunet | f4868fc96a7c3e5faca064e3b78b283d004da40b | [
"Apache-2.0"
] | null | null | null | nnunet/utilities/nii2niigz.py | kvpratama/nnunet | f4868fc96a7c3e5faca064e3b78b283d004da40b | [
"Apache-2.0"
] | null | null | null | import glob
import nibabel as nib
import pdb
nii_files = glob.glob('./train3d/*.nii')
for nii_file in nii_files:
nii = nib.load(nii_file)
nib.save(nii, nii_file[:-4] + '_0000.nii.gz')
print(nii_file[:-4] + '.nii.gz')
| 17.230769 | 46 | 0.678571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.178571 |
0bc45b8494e9a9362ddc0e4af12b5d4c8995da50 | 5,259 | py | Python | docs/source/conf.py | NASA-PDS/naif-pds4-bundler | bd7207d157ec9cae60f42cb9ea387ac194b1671c | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | NASA-PDS/naif-pds4-bundler | bd7207d157ec9cae60f42cb9ea387ac194b1671c | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | NASA-PDS/naif-pds4-bundler | bd7207d157ec9cae60f42cb9ea387ac194b1671c | [
"Apache-2.0"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'NAIF PDS4 Bundler'
copyright = '2021 California Institute of Technology'
author = 'Marc Costa Sitja'
# Obtain version from NPB
version_file = "../../src/pds/naif_pds4_bundler/VERSION.txt"
with open(version_file, 'r') as v:
for line in v:
if line:
# The full version, including alpha/beta/rc tags.
release = line
version = '.'.join(release.split('.')[0:-1])
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.autosectionlabel',
'sphinx_rtd_theme'
]
# Make sure the target is unique
autosectionlabel_prefix_document = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NAIF PDS4 Bundler'
copyright = u'2021, Caltech/JPL/NASA'
author = u'M. Costa Sitja'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'NAIFPDS4BundlerDoc'
#html_logo = '_static/images/PDS_Planets.png'
#
#html_context = {
# 'css_files': [
# '_static/theme_overrides.css', # override wide tables in RTD theme
# ],
# }
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'naif_pds4_bundler.tex', u'NAIF PDS4 Bundler Documentation',
u'M. Costa Sitja', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'naifpds4bundler',
u'NAIF PDS4 Bundler Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NAIFPDS4Bundler',
u'NAIF PDS4 Bundler Documentation',
author, 'NAIFPDS4Bundler', 'Generates a PDS4 SPICE kernel archive.',
'Miscellaneous'),
]
| 29.880682 | 79 | 0.659441 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,291 | 0.815935 |
0bc7213c09c10b1bc55de9468af8657cffd677c7 | 1,020 | py | Python | Tests/testApi.py | garnachod/ConcursoPolicia | f123595afc697ddfa862114a228d7351e2f8fd73 | [
"Apache-2.0"
] | null | null | null | Tests/testApi.py | garnachod/ConcursoPolicia | f123595afc697ddfa862114a228d7351e2f8fd73 | [
"Apache-2.0"
] | null | null | null | Tests/testApi.py | garnachod/ConcursoPolicia | f123595afc697ddfa862114a228d7351e2f8fd73 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
lib_path = os.path.abspath('../')
sys.path.append(lib_path)
from API.APITextos import APITextos
if __name__ == '__main__':
"""
users = APITextos.getUsersSimilar_user_all_topic("Taxigate", "ar", 100, 1)
for user in users:
print user.screen_name + "\t"+ user.location
users = APITextos.getUsersSimilar_user_all_topic("abosofyan7", "ar", 100, 1)
for user in users:
print user.screen_name + "\t"+ user.location
print ""
print ""
users = APITextos.getUsersSimilar_user_all_topic("nightwalker_109", "ar", 100, 1)
for user in users:
print user.screen_name + "\t"+ user.location
"""
users = APITextos.getUsersSimilar_user_all_topic("@nvidia", "en", 100, 897)
if users != False:
for user in users:
print user.screen_name + "\t"+ user.location
"""
users = APITextos.getUsersSimilar_text_all_topic("hola mundo", "es", 100, 714)
if users != False:
for user in users:
print user.screen_name + "\t"+ user.location
""" | 29.142857 | 83 | 0.67549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 720 | 0.705882 |
0bc74e8bc8d0d296ee7ed7304833b2fbef90e20a | 1,333 | py | Python | src/visualize.py | skvis/Multi_Class_Image_Classification | 3d0a85a9cf4de66522653389c3672982084e6533 | [
"MIT"
] | null | null | null | src/visualize.py | skvis/Multi_Class_Image_Classification | 3d0a85a9cf4de66522653389c3672982084e6533 | [
"MIT"
] | null | null | null | src/visualize.py | skvis/Multi_Class_Image_Classification | 3d0a85a9cf4de66522653389c3672982084e6533 | [
"MIT"
] | null | null | null | import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import config
def view_data():
nrows, ncols = 3, 3
pic_index = 0
fig = plt.gcf()
fig.set_size_inches(nrows * 4, ncols * 4)
pic_index = 2
next_rock = [os.path.join(config.TRAIN_DIR, 'rock', fname) for fname in os.listdir(os.path.join(config.TRAIN_DIR, 'rock'))[pic_index-2:pic_index]]
next_paper = [os.path.join(config.TRAIN_DIR, 'paper', fname) for fname in os.listdir(os.path.join(config.TRAIN_DIR, 'paper'))[pic_index-2:pic_index]]
next_scissors = [os.path.join(config.TRAIN_DIR, 'scissors', fname) for fname in os.listdir(os.path.join(config.TRAIN_DIR, 'scissors'))[pic_index-2:pic_index]]
for i, img_path in enumerate(next_rock+next_paper+next_scissors):
sp = plt.subplot(nrows, ncols, i+1)
img = mpimg.imread(img_path)
plt.imshow(img)
plt.axis('off')
plt.show()
def plot_graphs(history, string):
plt.plot(history[string])
plt.plot(history['val_'+string])
plt.xlabel('Epochs')
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
if __name__ == '__main__':
view_data()
# history = np.load(f'{config.MODEL_PATH}my_history.npy', allow_pickle=True).item()
# plot_graphs(history, 'accuracy')
# plot_graphs(history, 'loss')
| 31.738095 | 162 | 0.67817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.171043 |
0bc7dbc7a380ff68f44a21a984691d373a93d819 | 454 | py | Python | tests/__init__.py | coras-io/lint-review | 0df19429a265a79edecb53b4371bf63db7e61617 | [
"MIT"
] | 2 | 2017-11-24T13:27:36.000Z | 2019-08-23T15:02:36.000Z | tests/__init__.py | coras-io/lint-review | 0df19429a265a79edecb53b4371bf63db7e61617 | [
"MIT"
] | null | null | null | tests/__init__.py | coras-io/lint-review | 0df19429a265a79edecb53b4371bf63db7e61617 | [
"MIT"
] | 2 | 2016-09-15T11:56:56.000Z | 2016-10-11T21:27:24.000Z | import os
import json
from github3.pulls import PullFile
from github3.repos.commit import RepoCommit
def load_fixture(filename):
path = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(path, 'fixtures', filename)
fh = open(filename, 'r')
return fh.read()
def create_pull_files(data):
return [PullFile(f) for f in json.loads(data)]
def create_commits(data):
return [RepoCommit(f) for f in json.loads(data)]
| 22.7 | 55 | 0.720264 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.028634 |
0bca0000f68d1403d736225607fef98baa9c210f | 1,206 | py | Python | pypy/lib/cPickle.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/lib/cPickle.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | null | null | null | pypy/lib/cPickle.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z | #
# One-liner implementation of cPickle
#
from pickle import *
from pickle import __doc__, __version__, format_version, compatible_formats
BadPickleGet = KeyError
UnpickleableError = PicklingError
# ____________________________________________________________
# XXX some temporary dark magic to produce pickled dumps that are
# closer to the ones produced by cPickle in CPython
from pickle import StringIO
PythonPickler = Pickler
class Pickler(PythonPickler):
def __init__(self, *args, **kw):
self.__f = None
if len(args) == 1 and isinstance(args[0], int):
self.__f = StringIO()
PythonPickler.__init__(self, self.__f, args[0], **kw)
else:
PythonPickler.__init__(self, *args, **kw)
def memoize(self, obj):
self.memo[None] = None # cPickle starts counting at one
return PythonPickler.memoize(self, obj)
def getvalue(self):
return self.__f and self.__f.getvalue()
def dump(obj, file, protocol=None, bin=None):
Pickler(file, protocol, bin).dump(obj)
def dumps(obj, protocol=None, bin=None):
file = StringIO()
Pickler(file, protocol, bin).dump(obj)
return file.getvalue()
| 29.414634 | 75 | 0.685738 | 542 | 0.44942 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.209784 |
0bcce6782aa31e7dd2aa944b38edc0071b4e58f7 | 847 | py | Python | boardgame/connectfour/connectfourviewer.py | suryaambrose/boardgame | 459f9ae26ce571d34da88c295eb577b835f3ad13 | [
"MIT"
] | null | null | null | boardgame/connectfour/connectfourviewer.py | suryaambrose/boardgame | 459f9ae26ce571d34da88c295eb577b835f3ad13 | [
"MIT"
] | null | null | null | boardgame/connectfour/connectfourviewer.py | suryaambrose/boardgame | 459f9ae26ce571d34da88c295eb577b835f3ad13 | [
"MIT"
] | null | null | null | import os
import sys
from ..gameviewer import GameViewer
class ConnectFourViewer(GameViewer):
def __init__(self):
super(ConnectFourViewer, self).__init__([6,7])
def showState(self, state):
os.system("clear")
sys.stdout.write("x\y|")
for k in range(0, self.map_width):
sys.stdout.write("%d "%(k))
sys.stdout.write("\n")
for i in range(0, self.map_height):
sys.stdout.write(" %d "%(i))
for j in range(0, self.map_width):
sys.stdout.write("|")
if state._board[i][j] is not None:
sys.stdout.write(self.symbol_map[state._board[i][j]])
else:
sys.stdout.write(" ")
sys.stdout.write("|\n")
def waitForAMove(self):
while True:
try:
played_column = raw_input("Type where you wish to play (e.g. 1 for column 1):")
c = int(played_column)
break
except Exception, e:
print e
return c | 25.666667 | 83 | 0.654073 | 789 | 0.931523 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.107438 |
0bcd3adf32fd21aaedcee456a99e43a34a9cfc38 | 5,806 | py | Python | contrib/kodi/script.service.launchcontrol/launchcontrol.py | funjack/launchcontrol | 1d99f15d3cb56b987c922053c2e7a3369f0b24ac | [
"BSD-3-Clause"
] | 20 | 2017-04-09T20:49:05.000Z | 2020-12-28T21:51:55.000Z | contrib/kodi/script.service.launchcontrol/launchcontrol.py | funjack/launchcontrol | 1d99f15d3cb56b987c922053c2e7a3369f0b24ac | [
"BSD-3-Clause"
] | 16 | 2017-04-26T19:28:18.000Z | 2020-05-25T15:11:50.000Z | contrib/kodi/script.service.launchcontrol/launchcontrol.py | funjack/launchcontrol | 1d99f15d3cb56b987c922053c2e7a3369f0b24ac | [
"BSD-3-Clause"
] | 5 | 2018-11-09T15:46:49.000Z | 2021-04-08T10:38:11.000Z | """Launchcontrol client
The module exposes the Launchcontrol API as a Client object.
Data:
scripttypes -- list of a dictionaries containing supported script formats.
"""
import urllib2
scripttypes = [
{
"name" : "funscript",
"extensions": ["funscript"],
"mediaType" : "application/prs.funscript+json",
},
{
"name" : "raw",
"extensions": ["launch"],
"mediaType" : "application/prs.launchraw+json",
},
{
"name" : "kiiroo",
"extensions": ["kiiroo"],
"mediaType" : "text/prs.kiiroo",
},
{
"name" : "feelme",
"extensions": ["meta"],
"mediaType" : "application/prs.kiiroo+json",
},
{
"name" : "realtouch",
"extensions": ["realtouch", "ott"],
"mediaType" : "text/prs.realtouch",
},
{
"name" : "vorze",
"extensions": ["vorze"],
"mediaType" : "text/prs.vorze",
},
{
"name" : "json",
"extensions": ["json"],
"mediaType" : "application/json",
},
{
"name" : "text",
"extensions": ["txt"],
"mediaType" : "text/plain",
},
{
"name" : "csv",
"extensions": ["csv"],
"mediaType" : "text/csv",
},
]
class NotNowException(Exception):
"""Raise when an operation it not compatible with current state"""
class NotSupportedException(Exception):
"""Raise when the specified type is not supported"""
class Client() :
"""Client communicates with a Launchcontrol server.
Args:
url: Launchcontrol server url
latency: Time adjustment in milliseconds
positionmin: Lowest position in percent the Launch should move to
positionmax: Highest position in percent the Launch should move to
speedmin: Slowest speed in percent the Launch should move at
speedmax: Highest speed in percent the Launch should move to
"""
def __init__ (self, url="http://127.0.0.1:6969", latency=0,
positionmin=0, positionmax=100, speedmin=20, speedmax=100):
self._url = url
self.latency = int(latency)
self.positionMin = int(positionmin)
self.positionMax = int(positionmax)
self.speedMin = int(speedmin)
self.speedMax = int(speedmax)
def Play(self, data, mediaType):
"""Play by sending data as specified mediatype.
Args:
data: Raw script data in bytes
mediaType: Mimetype of the script in data
Raises:
NotSupportedException: The script and or mediaType is not
supported.
"""
if mediaType != "":
params = [ "latency=%d" % self.latency,
"positionmin=%d" % self.positionMin,
"positionmax=%d" % self.positionMax,
"speedmin=%d" % self.speedMin,
"speedmax=%d" % self.speedMax ]
req = urllib2.Request(self._url+'/v1/play?%s' % "&".join(params),
data=data, headers={'Content-Type': mediaType})
try:
r = urllib2.urlopen(req)
except urllib2.HTTPError as e:
# Unsupported Media Type (415): Can't handle script.
if e.code == 415:
raise NotSupportedException("script is not supported")
else:
raise e
def Stop(self):
"""Stop playback.
Raises:
NotNowException: Stop can not be performed now, eg because there
is no script loaded.
"""
req = urllib2.Request(self._url+'/v1/stop')
try:
r = urllib2.urlopen(req)
except urllib2.HTTPError as e:
# Conflict (409): Player not in a state that can pause.
if e.code == 409:
raise NotNowException("cannot stop script now")
else:
raise e
def Pause(self):
"""Pause playback.
Raises:
NotNowException: Pause can not be performed now, eg because there
is no script loaded.
"""
req = urllib2.Request(self._url+'/v1/pause')
try:
r = urllib2.urlopen(req)
except urllib2.HTTPError as e:
# Conflict (409): Player not in a state that can pause.
if e.code == 409:
raise NotNowException("cannot pause script now")
else:
raise e
def Resume(self):
"""Resume playback.
Raises:
NotNowException: Pause can not be performed now, eg because there
is no script loaded.
"""
req = urllib2.Request(self._url+'/v1/resume')
try:
r = urllib2.urlopen(req)
except urllib2.HTTPError as e:
# Conflict (409): Player not in a state that can resume.
if e.code == 409:
raise NotNowException("cannot resume now")
else:
raise e
def Skip(self, time):
"""Skip jumps to a timecode.
Raises:
NotNowException: Skip can not be performed now, eg because there
is no script loaded.
"""
req = urllib2.Request(self._url+'/v1/skip?p=%dms' % time)
try:
r = urllib2.urlopen(req)
except urllib2.HTTPError as e:
# Conflict (409): Player not in a state that can skip.
if e.code == 409:
raise NotNowException("cannot skip now")
else:
raise e
| 31.726776 | 77 | 0.514468 | 4,302 | 0.740958 | 0 | 0 | 0 | 0 | 0 | 0 | 2,818 | 0.48536 |
0bcd6aee9769157bc7389908284096f10fb88a81 | 7,614 | py | Python | python/fleetx/applications/trainer.py | ForFishes/FleetX | ffb01eb3513eabce77f389c0a0b10b7a5d5afede | [
"Apache-2.0"
] | 1 | 2021-03-02T09:24:40.000Z | 2021-03-02T09:24:40.000Z | python/fleetx/applications/trainer.py | ForFishes/FleetX | ffb01eb3513eabce77f389c0a0b10b7a5d5afede | [
"Apache-2.0"
] | null | null | null | python/fleetx/applications/trainer.py | ForFishes/FleetX | ffb01eb3513eabce77f389c0a0b10b7a5d5afede | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import paddle
import paddle.fluid as fluid
import numpy as np
import paddle.distributed.fleet as fleet
class Trainer(object):
def __init__(self):
"""
"""
self.place = None
class CPUTrainer(Trainer):
def __init__(self):
super(CPUTrainer, self).__init__()
self.place = fluid.CPUPlace()
self.exe = fluid.Executor(self.place)
def fit(self, model, dataloader, epoch, start_step=10):
fleet.init_worker()
self.exe.run(fluid.default_startup_program())
for epoch_id in range(epoch):
total_time = 0
step = 0
for data in dataloader():
if step > start_step:
start_time = time.time()
loss = self.exe.run(fluid.default_main_program(),
feed=data,
fetch_list=[model.loss.name])
if step > start_step:
end_time = time.time()
total_time += (end_time - start_time)
print(
"worker_index: %d, step%d, train_loss: %f, total time cost = %f, step per second: %f, speed: %f"
% (fleet.worker_index(), step, loss[0], total_time,
(step - start_step) / total_time,
1 / (end_time - start_time)))
step += 1
fleet.stop_worker()
class MultiGPUTrainer(Trainer):
def __init__(self):
super(MultiGPUTrainer, self).__init__()
self.place = fluid.CUDAPlace(
int(os.environ.get('FLAGS_selected_gpus', 0)))
self.exe = fluid.Executor(self.place)
self.exe.run(fluid.default_startup_program())
def fit(self, model, dataloader, epoch, use_dali=False, start_step=10):
for epoch_id in range(epoch):
total_time = 0
step = 0
for data in dataloader:
if step > start_step:
start_time = time.time()
loss = self.exe.run(fluid.default_main_program(),
feed=data,
fetch_list=[model.loss.name],
use_program_cache=True)
if step > start_step:
end_time = time.time()
total_time += (end_time - start_time)
print(
"epoch id: %d, step%d, train_loss: %f, total time cost = %f, step per second: %f, speed: %f"
% (epoch_id, step, loss[0], total_time,
(step - start_step) / total_time,
1 / (end_time - start_time)))
step += 1
if use_dali:
dataloader.reset()
def val(self,
model,
dataloader,
target_list,
current_epoch=-1,
use_dali=False):
self.test_program = model.main_prog.clone(for_test=True)
fetch_target = []
results = {}
for item in target_list:
if item in model.target.keys():
fetch_target.append(model.target[item].name)
results[item] = []
else:
raise Exception("ERROR: Current model only support target: {}".
format(model.target.keys()))
for data in dataloader:
result = self.exe.run(self.test_program,
feed=data,
fetch_list=fetch_target,
use_program_cache=True)
for item in target_list:
results[item].append(np.mean(result[target_list.index(item)]))
log_info = ""
for item in target_list:
log_info += ", {} = {}".format(item, np.mean(results[item]))
if current_epoch > 0:
print("Test Epoch {}{}".format(current_epoch, log_info))
else:
print("Test Result {}".format(log_info))
if use_dali:
dataloader.reset()
def quick_benchmark(self,
model,
dataloader,
start_step=20,
end_step=200):
step = 0
total_time = 0
total_step = 0
counting_time = False
for data in dataloader:
if step > start_step and step <= end_step:
start_time = time.time()
loss = self.exe.run(fluid.default_main_program(),
feed=data,
fetch_list=[],
use_program_cache=True)
if step > start_step and step <= end_step:
end_time = time.time()
total_time += (end_time - start_time)
if step > end_step:
break
step += 1
mean_qps = (end_step - start_step) / total_time
return mean_qps
def benchmark(self,
model,
dataloader,
epoch,
use_dali=False,
start_step=20):
for epoch_id in range(epoch):
total_time = 0
step = 0
for data in dataloader:
if step > start_step and step <= start_step + 100:
start_time = time.time()
loss = self.exe.run(fluid.default_main_program(),
feed=data,
fetch_list=[model.loss.name],
use_program_cache=True)
if step > start_step and step <= start_step + 100:
end_time = time.time()
total_time += (end_time - start_time)
step += 1
average_speed = 100 / total_time
if use_dali:
dataloader.reset()
return average_speed
def benchmark_val(self, model, dataloader, target_list, use_dali=False):
self.test_program = model.main_prog.clone(for_test=True)
fetch_target = []
results = {}
for item in target_list:
if item in model.target.keys():
fetch_target.append(model.target[item].name)
results[item] = []
else:
raise Exception("ERROR: Current model only support target: {}".
format(model.target.keys()))
for data in dataloader:
result = self.exe.run(self.test_program,
feed=data,
fetch_list=fetch_target,
use_program_cache=True)
for item in target_list:
results[item].append(np.mean(result[target_list.index(item)]))
if use_dali:
dataloader.reset()
return results
| 37.141463 | 120 | 0.503809 | 6,870 | 0.902285 | 0 | 0 | 0 | 0 | 0 | 0 | 968 | 0.127134 |
0bcd749609107612c02325ec0ccb67862b6e1914 | 2,858 | py | Python | bin/kraken_bin_file.py | CSB5/OPERA-MS | ae46e005322774efc896d7c21ec265ad35748bc0 | [
"MIT"
] | 81 | 2018-03-22T15:01:08.000Z | 2022-01-17T17:52:31.000Z | bin/kraken_bin_file.py | CSB5/OPERA-MS | ae46e005322774efc896d7c21ec265ad35748bc0 | [
"MIT"
] | 68 | 2017-09-14T08:17:53.000Z | 2022-03-09T18:56:12.000Z | bin/kraken_bin_file.py | CSB5/OPERA-MS | ae46e005322774efc896d7c21ec265ad35748bc0 | [
"MIT"
] | 21 | 2017-09-14T06:15:18.000Z | 2021-09-30T03:19:22.000Z | #! /mnt/software/unstowable/anaconda/bin/python
import sys
import os
import argparse
import subprocess
def generating_mapping(kraken_report):
kraken_dict = {}
with open (kraken_report, "r") as fp:
for line in fp:
line = line.split("\t")
if line[3] != 'S' and line[3] != '-':
continue
species = line[-1].split()
try:
species = species[0] + "_" + species[1]
except:
print("not a species?? : {}".format(species))
continue
kraken_dict[line[4]] = [species]
return kraken_dict
def kraken_binning(kraken_dict, kraken_out, output_file, abund_dict):
cmd = "cat {} | cut -f1-4".format(kraken_out)
kraken_result = subprocess.check_output(cmd, shell=True)
#print(kraken_result)
kraken_result = kraken_result.strip()
kraken_result = kraken_result.split("\n")
contigs_bin_dict = {}
with open (output_file, "w") as fp:
for item in kraken_result:
tax_id = item.split("\t")[2]
contigs = item.split("\t")[1]
if tax_id in kraken_dict:
species = kraken_dict[tax_id][0]
fp.write(contigs + "\t" + species + "\n")
#output_file = ["{}/BINNING/{}.fasta".format(output_folder, species), "{}/ABUND/sample_{}_bin_{}.abund1".format(output_folder, sample, species)]
#contigs_bin_dict[contigs] = output_file
return contigs_bin_dict
def main(args):
kraken_dict = generating_mapping(args.kraken_report)
abund_dict = {}
#with open(args.abund, "r") as fp:
# for line in fp:
# abund_dict[line.split("\t")[0]] = line
contigs_bin_dict = kraken_binning(kraken_dict, args.kraken_out, args.output, abund_dict)
if __name__ == "__main__":
parser = argparse.ArgumentParser("kraken binning")
mandatory = parser.add_argument_group("mandatory arguments")
mandatory.add_argument("-k", "--kraken_out",
required=True,
help="kraken out file")
mandatory.add_argument("-r", "--kraken_report",
required=True,
help="kraken out report file")
mandatory.add_argument("-c", "--contig",
#required=True,
help="assembled contigs fasta file")
mandatory.add_argument("-o", "--output",
required=True,
help="output file")
mandatory.add_argument("-a", "--abund",
#required=True,
help="abundance file")
args=parser.parse_args()
main(args)
| 31.755556 | 160 | 0.53219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 703 | 0.245976 |
0bcdead7e276e3e5c2f148291206586512252d42 | 19,394 | py | Python | preprocessing/reid_preprocessing.py | Mithilesh1609/assembled-cnn | e0227eecbf3a7fd4fe99a954068c85ffbed94c53 | [
"Apache-2.0"
] | 363 | 2020-01-20T01:58:28.000Z | 2022-03-29T04:17:58.000Z | preprocessing/reid_preprocessing.py | Mithilesh1609/assembled-cnn | e0227eecbf3a7fd4fe99a954068c85ffbed94c53 | [
"Apache-2.0"
] | 9 | 2020-02-01T14:51:22.000Z | 2021-05-27T11:29:29.000Z | preprocessing/reid_preprocessing.py | Mithilesh1609/assembled-cnn | e0227eecbf3a7fd4fe99a954068c85ffbed94c53 | [
"Apache-2.0"
] | 46 | 2020-01-20T04:53:45.000Z | 2021-12-06T09:37:24.000Z | # This code is adapted from the https://github.com/tensorflow/models/tree/master/official/r1/resnet.
# ==========================================================================================
# NAVER’s modifications are Copyright 2020 NAVER corp. All rights reserved.
# ==========================================================================================
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images.
Training images are sampled using the provided bounding boxes, and subsequently
cropped to the sampled bounding box. Images are additionally flipped randomly,
then resized to the target output size (without aspect-ratio preservation).
Images used during evaluation are resized (with aspect-ratio preservation) and
centrally cropped.
All images undergo mean color subtraction.
Note that these steps are colloquially referred to as "ResNet preprocessing,"
and they differ from "VGG preprocessing," which does not use bounding boxes
and instead does an aspect-preserving resize followed by random crop during
training. (These both differ from "Inception preprocessing," which introduces
color distortion steps.)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from preprocessing import autoaugment
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]
_MEAN = [0.485, 0.456, 0.406]
_STD = [0.229, 0.224, 0.225]
# The lower bound for the smallest side of the image for aspect-preserving
# resizing. For example, if an image is 500 x 1000, it will be resized to
# _RESIZE_MIN x (_RESIZE_MIN * 2).
_RESIZE_MIN = 256
def central_crop(image, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image: a 3-D image tensor
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
3-D tensor with cropped image.
"""
shape = tf.shape(image)
height, width = shape[0], shape[1]
amount_to_be_cropped_h = (height - crop_height)
crop_top = amount_to_be_cropped_h // 2
amount_to_be_cropped_w = (width - crop_width)
crop_left = amount_to_be_cropped_w // 2
return tf.slice(
image, [crop_top, crop_left, 0], [crop_height, crop_width, -1])
def _mean_image_subtraction(image, means, num_channels):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
num_channels: number of color channels in the image that will be distorted.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
# We have a 1-D tensor of means; convert to 3-D.
# Note(b/130245863): we explicitly call `broadcast` instead of simply
# expanding dimensions for better performance.
means = tf.broadcast_to(means, tf.shape(image))
return image - means
def _normalization(image, means, stds, num_channels):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
num_channels: number of color channels in the image that will be distorted.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
# We have a 1-D tensor of means; convert to 3-D.
# Note(b/130245863): we explicitly call `broadcast` instead of simply
# expanding dimensions for better performance.
means = tf.broadcast_to(means, tf.shape(image))
stds = tf.broadcast_to(stds, tf.shape(image))
return (image - means) / stds
def _smallest_size_at_least(height, width, resize_min):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
resize_min: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: an int32 scalar tensor indicating the new width.
"""
resize_min = tf.cast(resize_min, tf.float32)
# Convert to floats to make subsequent calculations go smoothly.
height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)
smaller_dim = tf.minimum(height, width)
scale_ratio = resize_min / smaller_dim
# Convert back to ints to make heights and widths that TF ops will accept.
new_height = tf.cast(height * scale_ratio, tf.int32)
new_width = tf.cast(width * scale_ratio, tf.int32)
return new_height, new_width
def _aspect_preserving_resize(image, resize_min):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
resize_min: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
shape = tf.shape(image)
height, width = shape[0], shape[1]
new_height, new_width = _smallest_size_at_least(height, width, resize_min)
return _resize_image(image, new_height, new_width)
def _resize_image(image, height, width):
"""Simple wrapper around tf.resize_images.
This is primarily to make sure we use the same `ResizeMethod` and other
details each time.
Args:
image: A 3-D image `Tensor`.
height: The target height for the resized image.
width: The target width for the resized image.
Returns:
resized_image: A 3-D tensor containing the resized image. The first two
dimensions have the shape [height, width].
"""
return tf.image.resize_images(
image, [height, width], method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
def _ten_crop(image, crop_h, crop_w):
def _crop(img, center_offset):
# input img shape is [h,w,c]
img = tf.image.extract_glimpse(
[img], [crop_w, crop_h], offsets=tf.to_float([center_offset]),
centered=False, normalized=False)
return tf.squeeze(img, 0)
def _crop5(img):
# img shape is [h,w,c]
im_shape = tf.shape(image)
height, width = im_shape[0], im_shape[1]
ch, cw = tf.to_int32(height / 2), tf.to_int32(width / 2) # center offset
hh, hw = tf.to_int32(crop_h / 2), tf.to_int32(crop_w / 2) # half crop size
ct = _crop(img, [ch, cw])
lu = _crop(img, [hh, hw])
ld = _crop(img, [height - hh, hw])
ru = _crop(img, [hh, width - hw])
rd = _crop(img, [height - hh, width - hw])
return tf.stack([lu, ru, ld, rd, ct])
lhs = _crop5(image)
rhs = tf.image.flip_left_right(lhs)
return tf.concat([lhs, rhs], axis=0)
def preprocess_image_ten_crop(image_buffer, output_height, output_width, num_channels):
image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
image = _aspect_preserving_resize(image, _RESIZE_MIN)
images = _ten_crop(image, output_height, output_width)
images.set_shape([10, output_height, output_width, num_channels])
images = tf.map_fn(lambda x: _mean_image_subtraction(x, _CHANNEL_MEANS, num_channels), images)
return images
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def _get_random_crop_coord(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = _random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
image_shape = control_flow_ops.with_dependencies(
[rank_assertions[0]],
tf.shape(image_list[0]))
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
shape = control_flow_ops.with_dependencies([rank_assertions[i]],
tf.shape(image))
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
max_offset_height = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_height - crop_height + 1, []))
max_offset_width = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_width - crop_width + 1, []))
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return tf.stack([offset_height, offset_width, crop_height, crop_width])
def _random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = _random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
image_shape = control_flow_ops.with_dependencies(
[rank_assertions[0]],
tf.shape(image_list[0]))
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
shape = control_flow_ops.with_dependencies([rank_assertions[i]],
tf.shape(image))
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
max_offset_height = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_height - crop_height + 1, []))
max_offset_width = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_width - crop_width + 1, []))
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def pad_shorter(image):
shape = tf.shape(image)
height, width = shape[0], shape[1]
larger_dim = tf.maximum(height, width)
h1 = (larger_dim - height) // 2
h2 = (larger_dim - height) - h1
w1 = tf.maximum((larger_dim - width) // 2, 0)
w2 = (larger_dim - width) - w1
pad_shape = [[h1, h2], [w1, w2], [0, 0]]
return tf.pad(image, pad_shape)
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def resize_func(image, size, method):
if method == 0:
image = _resize_image(image, _RESIZE_MIN, _RESIZE_MIN)
image = _random_crop([image], size[0], size[1])[0]
else:
image = _resize_image(image, size[0], size[1])
return image
def preprocess_image(image_buffer,
output_height,
output_width,
num_channels,
dct_method='',
is_training=False,
autoaugment_type=None,
eval_large_resolution=True):
if is_training:
image = tf.image.decode_jpeg(image_buffer, channels=num_channels, dct_method=dct_method)
image = apply_with_random_selector(
image,
lambda x, method: resize_func(x, [output_height, output_width], method),
num_cases=2)
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
image = tf.image.random_flip_left_right(image)
if autoaugment_type:
tf.logging.info('Apply AutoAugment policy {}'.format(autoaugment_type))
image = tf.clip_by_value(image, 0.0, 255.0)
dtype = image.dtype
image = tf.cast(image, dtype=tf.uint8)
image = autoaugment.distort_image_with_autoaugment(
image, autoaugment_type)
image = tf.cast(image, dtype=dtype)
image.set_shape([output_height, output_width, num_channels])
else:
if eval_large_resolution:
output_height = int(output_height * (1.0 / 0.875))
output_width = int(output_width * (1.0 / 0.875))
# For validation, we want to decode, resize, then just crop the middle.
image = tf.image.decode_jpeg(image_buffer, channels=num_channels, dct_method=dct_method)
image = _resize_image(image, output_height, output_width)
image = tf.to_float(image)
image.set_shape([output_height, output_width, num_channels])
return _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels)
| 35.848429 | 100 | 0.699856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,103 | 0.469324 |
0bcf39723db18d75741e6691c7334c3160163a28 | 16,442 | py | Python | dumbcpm/__main__.py | handicraftsman/dumbcpm | 6fab04f822c6c8de05a8fb20b699b6f4c45ae907 | [
"MIT"
] | null | null | null | dumbcpm/__main__.py | handicraftsman/dumbcpm | 6fab04f822c6c8de05a8fb20b699b6f4c45ae907 | [
"MIT"
] | 2 | 2019-01-04T10:12:02.000Z | 2019-01-04T18:55:37.000Z | dumbcpm/__main__.py | handicraftsman/dumbcpm | 6fab04f822c6c8de05a8fb20b699b6f4c45ae907 | [
"MIT"
] | null | null | null | import argparse
import os
import os.path
import glob
import re
import sys
import subprocess
import shlex
from pathlib import Path
from functools import cmp_to_key
from structlog import get_logger, wrap_logger
from structlog.dev import ConsoleRenderer
import yaml
import semver
import pkgconfig
glog = wrap_logger(get_logger(), processors=[ConsoleRenderer(pad_event=42, colors=True)])
cc = os.getenv('CC', 'gcc')
cxx = os.getenv('CXX', 'g++')
syslibs = []
libpaths = re.findall(r'SEARCH_DIR\("(.+?)"\);', subprocess.check_output(shlex.split('bash -c "ld --verbose | grep SEARCH_DIR"')).decode('utf-8')) + os.getenv('LD_LIBRARY_PATH', '').split(':')
def find_lib(lib, pth):
glog.info('find_lib()', lib=lib)
for lp in pth:
if os.path.isfile(lp + '/' + lib):
return lp + '/' + lib
glog.failure('unable to find a library', lib=lib)
sys.exit(1)
def system(cmd):
glog.info('execute', cmd=cmd)
c = os.system(cmd)
if c != 0:
glog.failure('unable to execute a command', cmd=cmd)
sys.exit(1)
def has_updated_headers(std, flags, ifile):
glog.info('has_updated_headers()', file=ifile)
c = ''
if ifile.endswith('.c'):
c = cc
elif ifile.endswith('.cpp'):
c = cxx
else:
glog.failure('invalid file extension', file=ifile)
sys.exit(1)
out = subprocess.check_output(shlex.split(c + ' ' + flags + ' -std=' + std + ' -M ' + ifile)).decode('utf-8')
sp = re.compile(r'[\\\r\n\s]+').split(out)
sp.pop(0)
sp.pop(0)
for f in sp:
if not f:
continue
if os.path.getmtime(f) > os.path.getmtime(ifile):
return True
return False
def has_updated_sources(std, flags, ofile, ifiles):
glog.info('has_updated_sources()', ofile=ofile)
if not os.path.isfile(ofile):
return True
for f in ifiles:
if not f:
continue
if os.path.getmtime(f) > os.path.getmtime(ofile):
return True
return False
class Target:
def __init__(self, ctx, pm, name, data):
self.log = glog.bind(pkg=pm.name, name=name, type=data['type'])
self.log.info('Target()')
self.ctx = ctx
self.pm = pm
self.name = name
self.data = data
self.built = False
self.linked = False
self.link_info = data.get('link', [])
self.link = {}
self.type = data.get('type', 'executable')
def upd(d):
if ctx.this_package.name == pm.name:
return d
else:
return './dumbcpm-packages/' + pm.name + '/' + d + '/'
self.include_dirs = set([upd(d) for d in data.get('include-dirs', [])])
self.sources = data.get('sources', [])
self.cpp = data.get('cpp', 'c++11')
self.c = data.get('c', 'c11')
self.pkg_config = data.get('pkg-config', [])
self.libs = data.get('libs', [])
self.libflags = ' '.join(['-l' + lib for lib in self.libs])
self.flags = ' -Wl,-rpath=\'$ORIGIN\' -fPIC -L./dumbcpm-build/ ' + self.libflags + ' ' + data.get('flags', '') + ' '
self.after_flags = ' ' + data.get('after-flags', '') + ' '
self.has_cpp_sources = False
for s in self.sources:
if s.endswith('.cpp'):
self.has_cpp_sources = True
break
class PackageManifest:
def __init__(self, ctx, data):
self.log = glog.bind(name=data['name'], version=data['version'])
self.log.info('PackageManifest()')
self.ctx = ctx
self.data = data
self.name = data['name']
self.version = data['version']
self.git_repo = data['git-repo']
self.git_tag = data['git-tag']
self.depends = data.get('depends', {})
self.targets_base = data.get('targets', {})
self.targets = {}
self.also_build = data.get('also-build', [])
self.loaded = False
self.built = False
self.loaded_targets = False
def load_targets(self):
if self.loaded_targets:
return
self.loaded_targets = True
for k, v in self.targets_base.items():
self.targets[k] = Target(self.ctx, self, k, v)
def fetch(self):
cwd = os.getcwd()
os.chdir('./dumbcpm-packages')
if not os.path.isdir('./' + self.name):
system('git clone ' + self.git_repo + ' -b ' + self.git_tag + ' ' + self.name)
else:
os.chdir('./' + self.name)
system('git pull')
system('git checkout ' + self.git_tag)
os.chdir(cwd)
class Repository:
def __init__(self, ctx, path):
self.log = glog.bind(path=path)
self.log.info('Repository()')
self.ctx = ctx
self.path = path
with open(self.path + '/repository.yaml', 'r') as f:
self.data = yaml.load(f)
self.package_names = self.data['packages']
self.versions = {}
def list_versions(self, name):
self.log.info('Repository.list_versions()', name=name)
if name in self.versions:
return self.versions[name]
vv = glob.glob(self.path + '/' + name + '/' + name + '-*.*.*.yaml')
vr = []
for v in vv:
s = re.search(r'^.*?-(\d+\.\d+\.\d+)\.yaml$', v, re.IGNORECASE)
if s:
vr.append(s.group(1))
self.versions[name] = vr
return self.versions[name]
def has_package(self, name, version):
if os.path.isfile(self.path + '/' + name + '/' + name + '-' + version + '.yaml'):
return True
return False
class PMContext:
def __init__(self):
self.log = glog
self.log.info('Hello, World!')
with open(str(Path.home()) + '/.dumbcpm.yaml') as f:
self.data = yaml.load(f)
self.repository_paths = self.data['repositories']
self.repositories = []
for path in self.repository_paths:
self.repositories.append(Repository(self, path))
self.versions = {}
self.packages = {}
self.packages_v = {}
self.this_package = None
self.include_dirs = self.data.get('include-dirs', [])
self.library_dirs = self.data.get('library-dirs', [])
self.ctxflags = ' '.join(['-I' + d for d in self.include_dirs] + ['-L' + d for d in self.library_dirs])
def list_versions(self, name):
self.log.info('PMContext.list_versions()', name=name)
if name in self.versions:
return self.versions[name]
s = set()
for r in self.repositories:
vv = r.list_versions(name)
for v in vv:
s.add(v)
self.versions[name] = sorted(list(s), key=cmp_to_key(semver.compare))
return self.versions[name]
def load(self, name, version='latest', build=False):
self.log.info('PMContext.load()', name=name, version=version, build=build)
if version == 'latest':
vv = self.list_versions(name)
version = vv[-1]
if (name + '-' + version) in self.packages:
return self.packages[name + '-' + version]
for r in self.repositories:
if build or r.has_package(name, version):
pt = None
if build:
pt = './dumbcpm-packages/' + name + '/package.yaml'
else:
pt = r.path + '/' + name + '/' + name + '-' + version + '.yaml'
with open(pt, 'r') as f:
p = PackageManifest(self, yaml.load(f))
self.packages[name + '-' + version] = p
if not name in self.packages_v:
self.packages_v[name] = {}
self.packages_v[name][version] = p
return self.packages[name + '-' + version]
self.log.failure('unable to find a package', name=name, version=version)
sys.exit(1)
def load_this(self, build=False):
self.log.info('PMContext.load_this()', build=build)
with open('./package.yaml', 'r') as f:
data = yaml.load(f)
self.this_package = PackageManifest(self, data)
self.packages[self.this_package.name + '-' + self.this_package.version] = self.this_package
if not self.this_package.name in self.packages_v:
self.packages_v[self.this_package.name] = {}
self.packages_v[self.this_package.name][self.this_package.version] = self.this_package
self.versions[self.this_package.name] = [self.this_package.version]
self.load_dependencies(self.this_package, build)
def limit_package_versions(self, name, spec):
self.log.info('PMContext.limit_package_versions()', pkg=name, spec=spec)
specs = spec.split(',')
def matches(v):
for s in specs:
if not semver.match(v, s):
return False
return True
vv = self.versions[name]
vv = [v for v in vv if matches(v)]
vv = sorted(vv, key=cmp_to_key(semver.compare))
if not vv:
self.log.failure('unable to find at least one matching version', pkg=name, spec=spec)
self.versions[name] = vv
def load_dependencies(self, pkg, build=False):
self.log.info('PMContext.load_dependencies()', pkg=pkg.name, build=build)
if pkg.loaded:
return
pkg.loaded = True
for k, v in pkg.depends.items():
self.list_versions(k)
self.limit_package_versions(k, v)
self.load_dependencies(self.load(k, 'latest', build), build)
def fetch(self):
self.log.info('PMContext.fetch()')
if not os.path.isdir('./dumbcpm-packages/'):
os.mkdir('./dumbcpm-packages/')
for name, vers in self.packages_v.items():
if name == self.this_package.name:
continue
p = vers[self.versions[name][-1]]
p.fetch()
def build_file(self, pkg, target, ifile, ofile):
self.log.info('PMContext.build_file()', pkg=pkg.name, target=target.name, ifile=ifile, ofile=ofile)
c = ''
std = ''
if ifile.endswith('.c'):
c = cc
std = target.c
elif ifile.endswith('.cpp'):
c = cxx
std = target.cpp
else:
self.log.failure('invalid file extension', file=ifile)
sys.exit(1)
if (not os.path.isfile(ofile)) or (os.path.getmtime(ofile) < os.path.getmtime(ifile)) or has_updated_headers(std, target.flags, ifile):
system(c + ' -c ' + target.flags + ' -std=' + std + ' -o ' + ofile + ' ' + ifile + ' ' + target.after_flags)
def build_target(self, pkg, target):
if target.built:
return
target.built = True
for tn, to in target.link.items():
self.build_target(to.pm, to)
self.log.info('PMContext.build_target()', pkg=pkg.name, target=target.name)
if not target.sources:
return
ofiles = []
for ifile in target.sources:
if pkg.name != self.this_package.name:
ifile = './dumbcpm-packages/' + pkg.name + '/' + ifile
ofile = ifile + '.o'
ofiles.append(ofile)
self.build_file(pkg, target, ifile, ofile)
c = ''
std = ''
if target.has_cpp_sources:
c = cxx
std = target.cpp
else:
c = cc
std = target.c
if target.type == 'executable':
if has_updated_sources(std, target.flags, './dumbcpm-build/' + target.name, ofiles):
system(c + ' -std=' + std + ' ' + target.flags + ' -o ./dumbcpm-build/' + target.name + ' ' + ' '.join(ofiles) + ' ' + target.after_flags)
elif target.type == 'library':
if has_updated_sources(std, target.flags, './dumbcpm-build/lib' + target.name + '.so', ofiles):
system(c + ' -shared -std=' + std + ' ' + target.flags + ' -o ./dumbcpm-build/lib' + target.name + '.so ' + ' '.join(ofiles) + ' ' + target.after_flags)
else:
self.log.failure('invalid target type', target=target.name, type=target.type)
sys.exit(1)
def build_pkg(self, pkg):
self.log.info('PMContext.build_pkg()', pkg=pkg.name)
if pkg.built:
return
pkg.built = True
for tn, to in pkg.targets.items():
self.build_target(pkg, to)
def link_target_pkgconfig(self, pkg, target, pc):
self.log.info('PMContext.link_target_pkgconfig()', pkg=pkg.name, target=target.name, pc=pc)
if not pkgconfig.exists(pc):
self.log.failure('unable to find a pkg-config', pc=pc)
target.flags += (' ' + pkgconfig.libs(pc) + ' ' + pkgconfig.cflags(pc))
def link_target(self, pkg, target):
self.log.info('PMContext.link_target()', pkg=pkg.name, target=target.name)
if target.linked:
return
target.linked = True
target.flags = self.ctxflags + ' ' + target.flags
for pc in target.pkg_config:
self.link_target_pkgconfig(pkg, target, pc)
for dn in target.link_info:
sp = dn.split('/')
p = sp[0]
t = sp[1]
if (not p in self.packages_v) or (not self.packages_v[p]):
self.log.failure('unable to find package for target', pkg=p, target=t)
sys.exit(1)
po = self.packages_v[p][self.versions[p][-1]]
po.load_targets()
if not t in po.targets:
self.log.failure('unable to find target in package', pkg=p, target=t)
sys.exit(1)
to = po.targets[t]
if to.type != 'library':
self.log.failure('unable to link to a non-library target', pkg=p, target=t)
sys.exit(1)
target.link[dn] = to
target.include_dirs = target.include_dirs.union(to.include_dirs)
for d in target.include_dirs:
target.flags += (' -I' + d)
for dn, to in target.link.items():
if not to.sources:
continue
target.flags += (' -l' + to.name)
def build(self):
self.log.info('PMContext.build()')
for name, vv in self.packages_v.items():
pkg = vv[self.versions[name][-1]]
pkg.load_targets()
for tn, t in pkg.targets.items():
self.link_target(pkg, t)
self.build_pkg(self.this_package)
for dn in self.this_package.also_build:
sp = dn.split('/')
pn = sp[0]
tn = sp[1]
pkg = self.packages_v[pn][self.versions[pn][-1]]
t = pkg.targets[tn]
self.build_target(pkg, t)
def libs_pkg(self, pkg):
self.log.info('PMContext.libs_pkg()', pkg=pkg.name)
if pkg.built:
return
pkg.built = True
for tn, to in pkg.targets.items():
self.libs_target(pkg, to)
def libs_target(self, pkg, target):
if target.built:
return
target.built = True
for dn, to in target.link.items():
self.libs_target(to.pm, to)
self.log.info('PMContext.libs_target()', pkg=pkg.name, target=target.name)
libs = pkgconfig.parse(' '.join(target.pkg_config))['libraries'] + target.libs
for lib in libs:
lp = find_lib('lib' + lib + '.so', libpaths + self.library_dirs)
if os.path.isfile('./dumbcpm-build/lib' + lib + '.so') and (os.path.getmtime('./dumbcpm-build/lib' + lib + '.so') > os.path.getmtime(lp)):
continue
system('cp ' + lp + ' ./dumbcpm-build/lib' + lib + '.so')
system('patchelf --set-rpath \'$ORIGIN\' ./dumbcpm-build/lib' + lib + '.so')
def libs(self):
self.log.info('PMContext.libs()')
for name, vv in self.packages_v.items():
pkg = vv[self.versions[name][-1]]
pkg.load_targets()
for tn, t in pkg.targets.items():
self.link_target(pkg, t)
self.libs_pkg(self.this_package)
for dn in self.this_package.also_build:
sp = dn.split('/')
pn = sp[0]
tn = sp[1]
pkg = self.packages_v[pn][self.versions[pn][-1]]
t = pkg.targets[tn]
self.libs_target(pkg, t)
if __name__ == "__main__":
home = str(Path.home())
if not os.path.isfile(home + '/.dumbcpm.yaml'):
with open(home + '/.dumbcpm.yaml', 'w+') as f:
f.write('repositories:\n - ' + home + '/dumbcpm-repos/handicraftsman-dumbcpm-packages')
if not os.path.isdir(home + '/dumbcpm-repos/'):
os.mkdir(home + '/dumbcpm-repos/')
if not os.path.isdir(home + '/dumbcpm-repos/handicraftsman-dumbcpm-packages'):
system('bash -c "cd ' + home + '/dumbcpm-repos/; git clone https://github.com/handicraftsman/dumbcpm-packages handicraftsman-dumbcpm-packages"')
parser = argparse.ArgumentParser(prog='dumbcpm')
subparsers = parser.add_subparsers(help='available subcommands', dest='which')
parser_fetch = subparsers.add_parser('fetch', help='fetches package dependencies')
parser_fetch.set_defaults(which='fetch')
parser_build = subparsers.add_parser('build', help='builds current package and its dependencies')
parser_build.set_defaults(which = 'build')
parser_libs = subparsers.add_parser('libs', help='copies system libraries into the build directory')
parser_libs.set_defaults(which = 'libs')
res = parser.parse_args()
if res.which == 'fetch':
ctx = PMContext()
ctx.load_this()
ctx.fetch()
elif res.which == 'build':
if not os.path.isdir('./dumbcpm-build'):
os.mkdir('./dumbcpm-build')
ctx = PMContext()
ctx.load_this(True)
ctx.build()
elif res.which == 'libs':
if not os.path.isdir('./dumbcpm-build'):
os.mkdir('./dumbcpm-build')
ctx = PMContext()
ctx.load_this(True)
ctx.libs()
else:
print('Invalid command')
sys.exit(2)
| 34.182952 | 192 | 0.61501 | 12,878 | 0.783238 | 0 | 0 | 0 | 0 | 0 | 0 | 2,794 | 0.169931 |
0bcf6189d3d5a0a37bc27a9c69ab2228205c0630 | 5,103 | py | Python | test/simple_imputation/test_logistic_regression.py | macarro/imputena | 3a94ae1419a2af0d9707b20546ee078929ce99e8 | [
"MIT"
] | 6 | 2020-04-27T21:21:47.000Z | 2022-03-30T03:02:54.000Z | test/simple_imputation/test_logistic_regression.py | macarro/imputena | 3a94ae1419a2af0d9707b20546ee078929ce99e8 | [
"MIT"
] | 1 | 2021-07-01T18:49:27.000Z | 2021-07-01T18:49:27.000Z | test/simple_imputation/test_logistic_regression.py | macarro/imputena | 3a94ae1419a2af0d9707b20546ee078929ce99e8 | [
"MIT"
] | null | null | null | import unittest
from imputena import logistic_regression
from test.example_data import *
class TestLogisticRegression(unittest.TestCase):
# Positive tests ----------------------------------------------------------
def test_logistic_regression_returning(self):
"""
Positive test
data: Correct data frame (df_breast_cancer)
The data frame (df_breast_cancer) contains 15 NA values.
logistic_regression() should impute 7 of them.
Checks that the original series remains unmodified and that the
returned series contains 8 NA values.
"""
# 1. Arrange
df = generate_df_breast_cancer()
# 2. Act
df2 = logistic_regression(df, 'class', ['thickness', 'uniformity'])
# 3. Assert
self.assertEqual(df.isna().sum().sum(), 15)
self.assertEqual(df2.isna().sum().sum(), 8)
def test_logistic_regression_inplace(self):
"""
Positive test
data: Correct data frame (df_breast_cancer)
The data frame (df_breast_cancer) contains 15 NA values.
logistic_regression() should impute 7 of them.
Checks that the data frame contains 8 NA values after the operation.
"""
# 1. Arrange
df = generate_df_breast_cancer()
# 2. Act
logistic_regression(
df, 'class', ['thickness', 'uniformity'], inplace=True)
# 3. Assert
self.assertEqual(df.isna().sum().sum(), 8)
def test_logistic_regression_implicit_predictors(self):
"""
Positive test
data: Correct data frame (df_breast_cancer)
predictors: None
The data frame (df_breast_cancer) contains 15 NA values.
logistic_regression() should impute 7 of them.
Checks that the original series remains unmodified and that the
returned series contains 8 NA values.
"""
# 1. Arrange
df = generate_df_breast_cancer()
# 2. Act
df2 = logistic_regression(df, 'class')
# 3. Assert
self.assertEqual(df.isna().sum().sum(), 15)
self.assertEqual(df2.isna().sum().sum(), 8)
def test_logistic_regression_complete(self):
"""
Positive test
data: Correct data frame (df_breast_cancer)
regressions: 'complete'
The data frame (df_breast_cancer) contains 15 NA values.
logistic_regression() should impute 3 of them.
Checks that the original series remains unmodified and that the
returned series contains 12 NA values.
"""
# 1. Arrange
df = generate_df_breast_cancer()
# 2. Act
df2 = logistic_regression(
df, 'class', ['thickness', 'uniformity'], regressions='complete')
# 3. Assert
self.assertEqual(df.isna().sum().sum(), 15)
self.assertEqual(df2.isna().sum().sum(), 12)
# Negative tests ----------------------------------------------------------
def test_logistic_regression_wrong_type(self):
"""
Negative test
data: array (unsupported type)
Checks that the function raises a TypeError if the data is passed as
an array.
"""
# 1. Arrange
data = [2, 4, np.nan, 1]
# 2. Act & 3. Assert
with self.assertRaises(TypeError):
logistic_regression(data)
def test_logistic_regression_wrong_dependent(self):
"""
Negative test
data: Correct data frame (df_breast_cancer)
dependent: 'z' (not a column of df_breast_cancer)
Checks that the function raises a ValueError if the column specified as
the dependent variable doesn't exist in the data.
"""
# 1. Arrange
df = generate_df_breast_cancer()
# 2. Act & 3. Assert
with self.assertRaises(ValueError):
logistic_regression(df, 'z', ['thickness', 'uniformity'])
def test_logistic_regression_wrong_predictor(self):
"""
Negative test
data: Correct data frame (df_breast_cancer)
predictors: ['thickness', 'z'] ('z' is not a column of
df_breast_cancer)
Checks that the function raises a ValueError if one of the column s
specified as the predictor variables doesn't exist in the data.
"""
# 1. Arrange
df = generate_df_breast_cancer()
# 2. Act & 3. Assert
with self.assertRaises(ValueError):
logistic_regression(df, 'class', ['thickness', 'z'])
def test_logistic_regression_wrong_regressions(self):
"""
Negative test
data: Correct data frame (df_breast_cancer)
regressions: 'z' (not a valid value)
Checks that the function raises a ValueError if the value passed for
the parameter regressions is not valid.
"""
# 1. Arrange
df = generate_df_breast_cancer()
# 2. Act & 3. Assert
with self.assertRaises(ValueError):
logistic_regression(
df, 'class', ['thickness', 'uniformity'], regressions='z')
| 31.89375 | 79 | 0.600039 | 5,009 | 0.981579 | 0 | 0 | 0 | 0 | 0 | 0 | 2,975 | 0.58299 |
0bcf8429b193b3275e3163004c7c46be310b26a2 | 3,349 | py | Python | src/robot/libdocpkg/jsonbuilder.py | rdagum/robotframework | b7069d505374e9f09a140ed5a9727d2a40716446 | [
"ECL-2.0",
"Apache-2.0"
] | 7,073 | 2015-01-01T17:19:16.000Z | 2022-03-31T22:01:29.000Z | src/robot/libdocpkg/jsonbuilder.py | rdagum/robotframework | b7069d505374e9f09a140ed5a9727d2a40716446 | [
"ECL-2.0",
"Apache-2.0"
] | 2,412 | 2015-01-02T09:29:05.000Z | 2022-03-31T13:10:46.000Z | src/robot/libdocpkg/jsonbuilder.py | rdagum/robotframework | b7069d505374e9f09a140ed5a9727d2a40716446 | [
"ECL-2.0",
"Apache-2.0"
] | 2,298 | 2015-01-03T02:47:15.000Z | 2022-03-31T02:00:16.000Z | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os.path
from robot.running import ArgInfo, ArgumentSpec
from robot.errors import DataError
from .model import LibraryDoc, KeywordDoc
class JsonDocBuilder:
def build(self, path):
spec = self._parse_spec_json(path)
return self.build_from_dict(spec)
def build_from_dict(self, spec):
libdoc = LibraryDoc(name=spec['name'],
doc=spec['doc'],
version=spec['version'],
type=spec['type'],
scope=spec['scope'],
doc_format=spec['docFormat'],
source=spec['source'],
lineno=int(spec.get('lineno', -1)))
libdoc.data_types.update(spec['dataTypes'].get('enums', []))
libdoc.data_types.update(spec['dataTypes'].get('typedDicts', []))
libdoc.inits = [self._create_keyword(kw) for kw in spec['inits']]
libdoc.keywords = [self._create_keyword(kw) for kw in spec['keywords']]
return libdoc
def _parse_spec_json(self, path):
if not os.path.isfile(path):
raise DataError("Spec file '%s' does not exist." % path)
with open(path) as json_source:
libdoc_dict = json.load(json_source)
return libdoc_dict
def _create_keyword(self, kw):
return KeywordDoc(name=kw.get('name'),
args=self._create_arguments(kw['args']),
doc=kw['doc'],
shortdoc=kw['shortdoc'],
tags=kw['tags'],
source=kw['source'],
lineno=int(kw.get('lineno', -1)))
def _create_arguments(self, arguments):
spec = ArgumentSpec()
setters = {
ArgInfo.POSITIONAL_ONLY: spec.positional_only.append,
ArgInfo.POSITIONAL_ONLY_MARKER: lambda value: None,
ArgInfo.POSITIONAL_OR_NAMED: spec.positional_or_named.append,
ArgInfo.VAR_POSITIONAL: lambda value: setattr(spec, 'var_positional', value),
ArgInfo.NAMED_ONLY_MARKER: lambda value: None,
ArgInfo.NAMED_ONLY: spec.named_only.append,
ArgInfo.VAR_NAMED: lambda value: setattr(spec, 'var_named', value),
}
for arg in arguments:
name = arg['name']
setters[arg['kind']](name)
default = arg.get('defaultValue')
if default is not None:
spec.defaults[name] = default
arg_types = arg['types']
if not spec.types:
spec.types = {}
spec.types[name] = tuple(arg_types)
return spec
| 39.869048 | 89 | 0.589131 | 2,548 | 0.760824 | 0 | 0 | 0 | 0 | 0 | 0 | 888 | 0.265154 |
0bcf92384a83dc5131845482a249092051ba39f4 | 24,131 | py | Python | webapp/tests/test_storage.py | TimWhalen/graphite-web | e150af45e01d01141a8767ec0597e218105b9914 | [
"Apache-2.0"
] | 1 | 2021-01-16T20:10:45.000Z | 2021-01-16T20:10:45.000Z | webapp/tests/test_storage.py | TimWhalen/graphite-web | e150af45e01d01141a8767ec0597e218105b9914 | [
"Apache-2.0"
] | null | null | null | webapp/tests/test_storage.py | TimWhalen/graphite-web | e150af45e01d01141a8767ec0597e218105b9914 | [
"Apache-2.0"
] | null | null | null | import os
import random
import shutil
import time
import whisper
from django.conf import settings
from django.test import override_settings
from mock import patch, Mock
from .base import TestCase
from graphite.finders.utils import BaseFinder
from graphite.intervals import Interval, IntervalSet
from graphite.node import LeafNode, BranchNode
from graphite.readers.utils import BaseReader
from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index
from graphite.tags.localdatabase import LocalDatabaseTagDB
from graphite.worker_pool.pool import PoolTimeoutError
from graphite.render.datalib import TimeSeries
from graphite.render.evaluator import evaluateTarget
from graphite.util import epoch_to_dt
class StorageTest(TestCase):
def test_fetch(self):
disabled_finder = get_finders('tests.test_storage.DisabledFinder')[0]
legacy_finder = get_finders('tests.test_storage.LegacyFinder')[0]
test_finder = get_finders('tests.test_storage.TestFinder')[0]
remote_finder = get_finders('tests.test_storage.RemoteFinder')[0]
store = Store(
finders=[disabled_finder, legacy_finder, test_finder, remote_finder],
tagdb=get_tagdb('graphite.tags.localdatabase.LocalDatabaseTagDB')
)
# tagb is properly initialized
self.assertIsInstance(store.tagdb, LocalDatabaseTagDB)
# get all enabled finders
finders = store.get_finders()
self.assertEqual(list(finders), [legacy_finder, test_finder, remote_finder])
# get only local finders
finders = store.get_finders(local=True)
self.assertEqual(list(finders), [legacy_finder, test_finder])
# fetch with empty patterns
result = store.fetch([], 1, 2, 3, {})
self.assertEqual(result, [])
# fetch
result = store.fetch(['a.**'], 1, 2, 3, {})
self.assertEqual(len(result), 3)
result.sort(key=lambda node: node['name'])
self.assertEqual(result[0]['name'], 'a.b.c.d')
self.assertEqual(result[0]['pathExpression'], 'a.**')
self.assertEqual(result[1]['name'], 'a.b.c.d')
self.assertEqual(result[1]['pathExpression'], 'a.**')
self.assertEqual(result[2]['name'], 'a.b.c.e')
self.assertEqual(result[2]['pathExpression'], 'a.**')
def test_fetch_pool_timeout(self):
# pool timeout
store = Store(
finders=[RemoteFinder()]
)
def mock_pool_exec(pool, jobs, timeout):
raise PoolTimeoutError()
message = 'Timed out after [-.e0-9]+s for fetch for \[\'a\'\]'
with patch('graphite.storage.pool_exec', mock_pool_exec):
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], message)
def test_fetch_all_failed(self):
# all finds failed
store = Store(
finders=[TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'All requests failed for fetch for \[\'a\'\] \(1\)'):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes')
store = Store(
finders=[TestFinder(), TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'All requests failed for fetch for \[\'a\'\] \(2\)'):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes')
def test_fetch_some_failed(self):
# some finders failed
store = Store(
finders=[TestFinder(), RemoteFinder()]
)
with patch('graphite.storage.log.info') as log_info:
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 1)
store = Store(
finders=[TestFinder(), TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'All requests failed for fetch for \[\'a\'\] \(2\)'):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes')
@override_settings(STORE_FAIL_ON_ERROR=True)
def test_fetch_some_failed_hard_fail_enabled(self):
# all finds failed
store = Store(
finders=[TestFinder(), RemoteFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, '1 request\(s\) failed for fetch for \[\'a\'\] \(2\)'):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes')
store = Store(
finders=[TestFinder(), TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'All requests failed for fetch for \[\'a\'\] \(2\)'):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes')
def test_find(self):
disabled_finder = DisabledFinder()
legacy_finder = LegacyFinder()
test_finder = TestFinder()
remote_finder = RemoteFinder()
store = Store(
finders=[disabled_finder, legacy_finder, test_finder, remote_finder],
tagdb=get_tagdb('graphite.tags.localdatabase.LocalDatabaseTagDB')
)
# find nodes
result = list(store.find('a'))
self.assertEqual(len(result), 5)
for node in result:
if node.path in ['a.b.c.d', 'a.b.c.e']:
self.assertIsInstance(node, LeafNode)
else:
self.assertIsInstance(node, BranchNode)
self.assertTrue(node.path in ['a', 'a.b', 'a.b.c'])
# find leaves only
result = list(store.find('a', leaves_only=True))
self.assertEqual(len(result), 2)
for node in result:
self.assertIsInstance(node, LeafNode)
self.assertTrue(node.path in ['a.b.c.d', 'a.b.c.e'])
# failure threshold
with self.settings(METRICS_FIND_FAILURE_THRESHOLD=1):
with self.assertRaisesRegexp(Exception, 'Query a yields too many results and failed \(failure threshold is 1\)'):
list(store.find('a'))
# warning threshold
with self.settings(METRICS_FIND_WARNING_THRESHOLD=1):
with patch('graphite.storage.log.warning') as log_warning:
list(store.find('a'))
self.assertEqual(log_warning.call_count, 1)
self.assertEqual(
log_warning.call_args[0][0],
'Query a yields large number of results up to 2 (warning threshold is 1)'
)
def test_find_pool_timeout(self):
# pool timeout
store = Store(
finders=[RemoteFinder()]
)
def mock_pool_exec(pool, jobs, timeout):
raise PoolTimeoutError()
message = 'Timed out after [-.e0-9]+s for find <FindQuery: a from \* until \*>'
with patch('graphite.storage.pool_exec', mock_pool_exec):
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.find('a'))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], message)
def test_find_all_failed(self):
# all finds failed
store = Store(
finders=[TestFinder()]
)
message = 'All requests failed for find <FindQuery: a from \* until \*>'
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.find('a'))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(
log_info.call_args[0][0],
'Exception during find <FindQuery: a from \* until \*> after [-.e0-9]+s: TestFinder.find_nodes'
)
store = Store(
finders=[TestFinder(), TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.find('a'))
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(
log_info.call_args[0][0],
'Exception during find <FindQuery: a from \* until \*> after [-.e0-9]+s: TestFinder.find_nodes'
)
@override_settings(REMOTE_STORE_FORWARD_HEADERS=['X-Test1', 'X-Test2'])
def test_extractForwardHeaders(self):
class DummyRequest(object):
META = {
'HTTP_X_TEST1': 'test',
}
headers = extractForwardHeaders(DummyRequest())
self.assertEqual(headers, {'X-Test1': 'test'})
def test_get_index(self):
disabled_finder = DisabledFinder()
# use get_finders so legacy_finder is patched with get_index
legacy_finder = get_finders('tests.test_storage.LegacyFinder')[0]
test_finder = TestFinder()
remote_finder = RemoteFinder()
store = Store(
finders=[disabled_finder, legacy_finder, test_finder, remote_finder],
tagdb=get_tagdb('graphite.tags.localdatabase.LocalDatabaseTagDB')
)
# get index
result = store.get_index()
self.assertEqual(result, ['a.b.c.d', 'a.b.c.e'])
# get local index
result = store.get_index({'localOnly': True})
self.assertEqual(result, ['a.b.c.d'])
def test_get_index_pool_timeout(self):
# pool timeout
store = Store(
finders=[RemoteFinder()]
)
def mock_pool_exec(pool, jobs, timeout):
raise PoolTimeoutError()
with patch('graphite.storage.pool_exec', mock_pool_exec):
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'Timed out after .*'):
store.get_index()
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], 'Timed out after [-.e0-9]+s')
def test_get_index_all_failed(self):
# all finders failed
store = Store(
finders=[TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'All requests failed for get_index'):
store.get_index()
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during get_index after [-.e0-9]+s: TestFinder.find_nodes')
store = Store(
finders=[TestFinder(), TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'All requests failed for get_index \(2\)'):
store.get_index()
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during get_index after [-.e0-9]+s: TestFinder.find_nodes')
@override_settings(USE_WORKER_POOL=False)
def test_fetch_tag_support(self):
class TestFinderTags(BaseFinder):
tags = True
def find_nodes(self, query):
pass
def fetch(self, patterns, start_time, end_time, now=None, requestContext=None):
if patterns != ['seriesByTag("hello=tiger")', 'seriesByTag("name=notags")', 'seriesByTag("name=testtags")', 'testtags;hello=tiger']:
raise Exception('Unexpected patterns %s' % str(patterns))
return [
{
'pathExpression': 'testtags;hello=tiger',
'name': 'testtags;hello=tiger',
'time_info': (0, 60, 1),
'values': [],
},
{
'pathExpression': 'seriesByTag("hello=tiger")',
'name': 'testtags;hello=tiger',
'time_info': (0, 60, 1),
'values': [],
},
{
'pathExpression': 'seriesByTag("name=testtags")',
'name': 'testtags;hello=tiger',
'time_info': (0, 60, 1),
'values': [],
},
]
tagdb = Mock()
store = Store(
finders=[TestFinderTags()],
tagdb=tagdb
)
request_context = {
'startTime': epoch_to_dt(0),
'endTime': epoch_to_dt(60),
'now': epoch_to_dt(60),
}
with patch('graphite.render.datalib.STORE', store):
results = evaluateTarget(request_context, ['testtags;hello=tiger', 'seriesByTag("hello=tiger")', 'seriesByTag("name=testtags")', 'seriesByTag("name=notags")'])
self.assertEqual(results, [
TimeSeries('testtags;hello=tiger', 0, 60, 1, []),
TimeSeries('testtags;hello=tiger', 0, 60, 1, [], pathExpression='seriesByTag("hello=tiger")'),
TimeSeries('testtags;hello=tiger', 0, 60, 1, [], pathExpression='seriesByTag("name=testtags")'),
])
@override_settings(USE_WORKER_POOL=True)
def test_fetch_no_tag_support(self):
class TestFinderNoTags(BaseFinder):
tags = False
def find_nodes(self, query):
pass
def fetch(self, patterns, start_time, end_time, now=None, requestContext=None):
if patterns != ['notags;hello=tiger']:
raise Exception('Unexpected patterns %s' % str(patterns))
return [
{
'pathExpression': 'notags;hello=tiger',
'name': 'notags;hello=tiger',
'time_info': (0, 60, 1),
'values': [],
}
]
tagdb = Mock()
def mockFindSeries(exprs, requestContext=None):
self.assertEqual(requestContext, request_context)
if exprs == ('hello=tiger',) or exprs == ('name=notags',):
return ['notags;hello=tiger']
if exprs == ('name=testtags',):
return []
raise Exception('Unexpected exprs %s' % str(exprs))
tagdb.find_series.side_effect = mockFindSeries
store = Store(
finders=[TestFinderNoTags()],
tagdb=tagdb
)
with patch('graphite.render.datalib.STORE', store):
request_context = {
'startTime': epoch_to_dt(0),
'endTime': epoch_to_dt(60),
'now': epoch_to_dt(60),
}
results = evaluateTarget(request_context, ['notags;hello=tiger', 'seriesByTag("hello=tiger")', 'seriesByTag("name=testtags")', 'seriesByTag("name=notags")'])
self.assertEqual(tagdb.find_series.call_count, 3)
self.assertEqual(results, [
TimeSeries('notags;hello=tiger', 0, 60, 1, []),
TimeSeries('notags;hello=tiger', 0, 60, 1, [], pathExpression='seriesByTag("hello=tiger")'),
TimeSeries('notags;hello=tiger', 0, 60, 1, [], pathExpression='seriesByTag("name=notags")'),
])
def test_autocomplete(self):
test = self
class TestFinderTags(BaseFinder):
tags = True
def __init__(self, request_limit=100, request_context=None):
self.limit = request_limit
self.context = request_context or {}
def find_nodes(self, query):
pass
def auto_complete_tags(self, exprs, tagPrefix=None, limit=None, requestContext=None):
test.assertEqual(exprs, ['tag1=value1'])
test.assertEqual(tagPrefix, 'test')
test.assertEqual(limit, self.limit)
test.assertEqual(requestContext, self.context)
return ['testtags']
def auto_complete_values(self, exprs, tag, valuePrefix=None, limit=None, requestContext=None):
test.assertEqual(exprs, ['tag1=value1'])
test.assertEqual(tag, 'tag2')
test.assertEqual(valuePrefix, 'test')
test.assertEqual(limit, self.limit)
test.assertEqual(requestContext, self.context)
return ['testtags']
class TestFinderNoTags(BaseFinder):
tags = False
def find_nodes(self, query):
pass
class TestFinderTagsException(TestFinderTags):
def auto_complete_tags(self, exprs, tagPrefix=None, limit=None, requestContext=None):
raise Exception('TestFinderTagsException.auto_complete_tags')
def auto_complete_values(self, exprs, tag, valuePrefix=None, limit=None, requestContext=None):
raise Exception('TestFinderTagsException.auto_complete_values')
class TestFinderTagsTimeout(TestFinderTags):
def auto_complete_tags(self, exprs, tagPrefix=None, limit=None, requestContext=None):
time.sleep(0.1)
return ['testtags']
def auto_complete_values(self, exprs, tag, valuePrefix=None, limit=None, requestContext=None):
time.sleep(0.1)
return ['testtags']
def mockStore(finders, request_limit=100, request_context=None):
tagdb = Mock()
def mockAutoCompleteTags(exprs, tagPrefix=None, limit=None, requestContext=None):
self.assertEqual(exprs, ['tag1=value1'])
self.assertEqual(tagPrefix, 'test')
self.assertEqual(limit, request_limit)
self.assertEqual(requestContext, request_context or {})
return ['testnotags']
tagdb.auto_complete_tags.side_effect = mockAutoCompleteTags
def mockAutoCompleteValues(exprs, tag, valuePrefix=None, limit=None, requestContext=None):
self.assertEqual(exprs, ['tag1=value1'])
self.assertEqual(tag, 'tag2')
self.assertEqual(valuePrefix, 'test')
self.assertEqual(limit, request_limit)
self.assertEqual(requestContext, request_context or {})
return ['testnotags']
tagdb.auto_complete_values.side_effect = mockAutoCompleteValues
return Store(
finders=finders,
tagdb=tagdb,
)
request_context = {}
# test with both tag-enabled and non-tag-enabled finders
store = mockStore([TestFinderTags(), TestFinderNoTags()])
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 1)
self.assertEqual(result, ['testnotags', 'testtags'])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_values.call_count, 1)
self.assertEqual(result, ['testnotags', 'testtags'])
# test with no limit & no requestContext
store = mockStore([TestFinderTags(None, {}), TestFinderNoTags()], None, {})
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test')
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 1)
self.assertEqual(result, ['testnotags', 'testtags'])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test')
self.assertEqual(store.tagdb.auto_complete_values.call_count, 1)
self.assertEqual(result, ['testnotags', 'testtags'])
# test with only tag-enabled finder
store = mockStore([TestFinderTags()])
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 0)
self.assertEqual(result, ['testtags'])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_values.call_count, 0)
self.assertEqual(result, ['testtags'])
# test with only non-tag-enabled finder
store = mockStore([TestFinderNoTags()])
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 1)
self.assertEqual(result, ['testnotags'])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_values.call_count, 1)
self.assertEqual(result, ['testnotags'])
# test with no finders
store = mockStore([])
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 0)
self.assertEqual(result, [])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_values.call_count, 0)
self.assertEqual(result, [])
# test exception handling with one finder
store = mockStore([TestFinderTagsException()])
with self.assertRaisesRegexp(Exception, 'All requests failed for tags for \[\'tag1=value1\'\] test.*'):
store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
with self.assertRaisesRegexp(Exception, 'All requests failed for values for \[\'tag1=value1\'\] tag2 test.*'):
store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
# test exception handling with more than one finder
store = mockStore([TestFinderTagsException(), TestFinderTagsException()])
with self.assertRaisesRegexp(Exception, 'All requests failed for tags for \[\'tag1=value1\'\] test'):
store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
with self.assertRaisesRegexp(Exception, 'All requests failed for values for \[\'tag1=value1\'\] tag2 test'):
store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
# test pool timeout handling
store = mockStore([TestFinderTagsTimeout()])
with self.settings(USE_WORKER_POOL=True, FIND_TIMEOUT=0):
with self.assertRaisesRegexp(Exception, 'Timed out after [-.e0-9]+s for tags for \[\'tag1=value1\'\]'):
store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
with self.assertRaisesRegexp(Exception, 'Timed out after [-.e0-9]+s for values for \[\'tag1=value1\'\] tag2 test'):
store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
# test write_index
hostcpu = os.path.join(settings.WHISPER_DIR, 'hosts/hostname/cpu.wsp')
def create_whisper_hosts(self):
worker1 = self.hostcpu.replace('hostname', 'worker1')
worker2 = self.hostcpu.replace('hostname', 'worker2')
bogus_file = os.path.join(settings.WHISPER_DIR, 'a/b/c/bogus_file.txt')
try:
os.makedirs(worker1.replace('cpu.wsp', ''))
os.makedirs(worker2.replace('cpu.wsp', ''))
os.makedirs(bogus_file.replace('bogus_file.txt', ''))
except OSError:
pass
open(bogus_file, 'a').close()
whisper.create(worker1, [(1, 60)])
whisper.create(worker2, [(1, 60)])
ts = int(time.time())
whisper.update(worker1, 1, ts)
whisper.update(worker2, 2, ts)
def wipe_whisper_hosts(self):
try:
os.remove(self.hostcpu.replace('hostname', 'worker1'))
os.remove(self.hostcpu.replace('hostname', 'worker2'))
os.remove(os.path.join(settings.WHISPER_DIR, 'a/b/c/bogus_file.txt'))
shutil.rmtree(self.hostcpu.replace('hostname/cpu.wsp', ''))
shutil.rmtree(os.path.join(settings.WHISPER_DIR, 'a'))
except OSError:
pass
def test_write_index(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
self.assertEqual(None, write_index() )
self.assertEqual(None, write_index(settings.INDEX_FILE) )
class DisabledFinder(object):
disabled = True
def find_nodes(self, query):
pass
class LegacyFinder(object):
def find_nodes(self, query):
yield BranchNode('a')
yield BranchNode('a.b')
yield BranchNode('a.b.c')
yield LeafNode('a.b.c.d', DummyReader('a.b.c.d'))
class DummyReader(BaseReader):
__slots__ = ('path',)
def __init__(self, path):
self.path = path
def fetch(self, startTime, endTime, now=None, requestContext=None):
npoints = (endTime - startTime) // 10
return (startTime, endTime, 10), [
random.choice([None, 1, 2, 3]) for i in range(npoints)
]
def get_intervals(self):
return IntervalSet([Interval(time.time() - 3600, time.time())])
class RemoteFinder(BaseFinder):
local = False
def find_nodes(self, query):
yield BranchNode('a.b.c')
yield LeafNode('a.b.c.d', DummyReader('a.b.c.d'))
yield LeafNode('a.b.c.e', DummyReader('a.b.c.e'))
class TestFinder(BaseFinder):
def find_nodes(self, query):
raise Exception('TestFinder.find_nodes')
| 36.451662 | 165 | 0.668062 | 23,376 | 0.968712 | 332 | 0.013758 | 4,972 | 0.206042 | 0 | 0 | 5,844 | 0.242178 |
0bd0f2dad56c9dc7b13a05354bfcba835f52effa | 804 | py | Python | sfzgen_test.py | tavasti/sfzscripts | c8b68343fd8eb62cdd7eed04c4d449672b4ed55f | [
"MIT"
] | null | null | null | sfzgen_test.py | tavasti/sfzscripts | c8b68343fd8eb62cdd7eed04c4d449672b4ed55f | [
"MIT"
] | null | null | null | sfzgen_test.py | tavasti/sfzscripts | c8b68343fd8eb62cdd7eed04c4d449672b4ed55f | [
"MIT"
] | 1 | 2020-02-07T01:04:13.000Z | 2020-02-07T01:04:13.000Z | import sys
from sfzgen import GetNote
# Verify that GetNote gives us back the notes that we expect.
midi_val = 24 # Starting midi value.
for scale in range(1, 8):
for note in 'cdefgab':
for sharp in ('', 's', '#'):
name = note + sharp + str(scale)
if name[:2] in ('es', 'e#', 'bs', 'b#'):
continue
assert GetNote(name, 'unused_filename') == midi_val
# For sharps, we want to test the same note value for 's' and '#', so
# don't increment for 's'.
if sharp != 's':
midi_val += 1
# verify that we the correct error for unrecognized patterns.
err_code = 0
def ExitFake(code):
global err_code
err_code = code
org_exit = sys.exit
sys.exit = ExitFake
GetNote('blech', 'unused_filename')
assert(err_code == 1)
sys.exit = org_exit
print('ok')
| 24.363636 | 75 | 0.633085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 320 | 0.39801 |
0bd12a0dfa02e72aa6f565510969aa9408cd87e2 | 338 | py | Python | hardhat/recipes/python/py.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | hardhat/recipes/python/py.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | hardhat/recipes/python/py.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null |
from .base import PipBaseRecipe
class PyRecipe(PipBaseRecipe):
def __init__(self, *args, **kwargs):
super(PyRecipe, self).__init__(*args, **kwargs)
self.sha256 = '1f9a981438f2acc20470b301a07a4963' \
'75641f902320f70e31916fe3377385a9'
self.name = 'py'
self.version = '1.4.33'
| 24.142857 | 58 | 0.627219 | 300 | 0.887574 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.236686 |
0bd3fe284b31a90b04c8a385e5ac021eadf08bd7 | 577 | py | Python | tests/test_article.py | JohnKarima/news-hub | 261969fe949bf7efbdc6dabb502b7b9b9eecabac | [
"MIT"
] | null | null | null | tests/test_article.py | JohnKarima/news-hub | 261969fe949bf7efbdc6dabb502b7b9b9eecabac | [
"MIT"
] | null | null | null | tests/test_article.py | JohnKarima/news-hub | 261969fe949bf7efbdc6dabb502b7b9b9eecabac | [
"MIT"
] | null | null | null | import unittest
from app.models import Article
class ArticleTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Article class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_article = Article('NewsDaily', 'NewsDailyTrue','Larry Madowo', 'Hummus...thoughts?','Literally talking about hummus sir','www.newsdaily.net','www.newsdaily.net/picOfHummus6', '2020/2/3', 'lorem gang et all')
def test_instance(self):
self.assertTrue(isinstance(self.new_article,Article))
| 33.941176 | 224 | 0.679376 | 528 | 0.915078 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.544194 |
0bd443d5f3e142040b4314347094c6eb56ae8072 | 4,979 | py | Python | flatten.py | FauxFaux/jdk9-repotools | b578c1465ca63946f1240fe3281d2fd1eec34513 | [
"MIT"
] | null | null | null | flatten.py | FauxFaux/jdk9-repotools | b578c1465ca63946f1240fe3281d2fd1eec34513 | [
"MIT"
] | null | null | null | flatten.py | FauxFaux/jdk9-repotools | b578c1465ca63946f1240fe3281d2fd1eec34513 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import collections
import datetime
import subprocess
import dateutil.parser
import os
from typing import Dict, Iterator, List, TypeVar, Tuple, Optional
T = TypeVar('T')
RawLog = collections.namedtuple('RawLog', ['commit', 'tree', 'date'])
TreeInfo = collections.namedtuple('TreeInfo', ['mode', 'object_type', 'sha'])
def commits(path: str) -> Iterator[RawLog]:
git_dir = path + '/.git'
if os.path.isdir(git_dir) and not os.listdir(git_dir + '/refs/heads'):
return
for log_line in subprocess.check_output(
['/usr/bin/git', '--git-dir=' + git_dir, 'log', '--first-parent', '--format=%H %T %cd', '--date=iso']
).split(b'\n'):
if not log_line:
break
parts = log_line.decode('utf-8').split(' ', 2)
when = dateutil.parser.parse(parts[2]) # type: datetime.datetime
yield RawLog(parts[0], parts[1], when)
def last(of: List[T]) -> T:
return of[len(of) - 1]
class Repo:
def __init__(self, path: str):
self.path = path
self.commits = list(commits(path))
self.sub = {} # type: Dict[str, Repo]
try:
submodules = set(os.listdir(path + '/.git/modules'))
except IOError:
return
for mod in submodules:
self.sub[mod] = Repo('{}/{}'.format(path, mod))
def read_tree(self, tree: str) -> Dict[str, TreeInfo]:
the_tree = {} # type: Dict[str, TreeInfo]
for tree_line in subprocess.check_output(
['git', '--git-dir={}/.git'.format(self.path), 'ls-tree', '-z', tree]
).split(b'\0'):
if not tree_line:
break
(mode, object_type, sha_name) = tree_line.decode('utf-8').split(' ', 2)
(sha, name) = sha_name.split('\t', 1)
the_tree[name] = TreeInfo(mode, object_type, sha)
return the_tree
def set_alternates(self, src: 'Repo') -> None:
with open(self.path + '/.git/objects/info/alternates', 'w') as f:
f.write('../../../{}/.git/objects\n'.format(src.path))
for sub in src.sub.keys():
f.write('../../../{}/.git/modules/{}/objects\n'.format(src.path, sub))
def earliest_commit(self) -> Tuple[Optional[str], Optional[RawLog]]:
commit = None
if self.commits:
commit = last(self.commits)
src = None
for name, repo in sorted(self.sub.items()):
if not repo.commits:
continue
candidate = last(repo.commits)
if not commit or candidate.date < commit.date:
commit = candidate
src = name
if not commit:
return None, None
if src:
self.sub[src].commits.pop()
else:
self.commits.pop()
return src, commit
def write_tree(self, tree: Dict[str, TreeInfo]) -> str:
ret = ''
for name, info in tree.items():
ret += '{} {} {}\t{}\0'.format(info.mode, info.object_type, info.sha, name)
return subprocess.check_output(
['git', '--git-dir=' + self.path + '/.git', 'mktree', '-z'],
input=ret.encode('utf-8')).decode('utf-8').strip()
def load_commit(self, sha: str) -> List[str]:
return subprocess.check_output(
['git', '--git-dir=' + self.path + '/.git', 'cat-file', 'commit', sha]
).decode('utf-8').split('\n')
def commit_tree(self, tree: str, ref: str, parents: Iterator[str]) -> str:
new_lines = ['tree ' + tree]
for parent in parents:
new_lines.append('parent ' + parent)
original_lines = self.load_commit(ref)
original_lines.pop(0) # tree
if original_lines[0].startswith('parent '):
original_lines.pop(0)
new_lines.extend(original_lines)
return subprocess.check_output(
['git', '--git-dir=' + self.path + '/.git', 'hash-object', '-w', '--stdin', '-t', 'commit'],
input='\n'.join(new_lines).encode('utf-8')
).decode('utf-8').strip()
def update_ref(self, ref: str, to: str):
subprocess.check_call(
['git', '--git-dir=' + self.path + '/.git', 'update-ref', ref, to]
)
def main():
new = Repo('flattened')
orig = Repo('jdk9')
new.set_alternates(orig)
overlay = {} # type: Dict[str, TreeInfo]
tree = {}
head = None
while True:
src, found_commit = orig.earliest_commit()
if not found_commit:
break
if src:
# update to a child repo
overlay[src] = TreeInfo('040000', 'tree', found_commit.tree)
else:
# update to the root
tree = orig.read_tree(found_commit.tree)
tree.update(overlay)
head = new.commit_tree(new.write_tree(tree), found_commit.commit, [head] if head else [])
new.update_ref('refs/heads/master', head)
if '__main__' == __name__:
main()
| 31.713376 | 113 | 0.55011 | 3,283 | 0.659369 | 561 | 0.112673 | 0 | 0 | 0 | 0 | 860 | 0.172725 |
0bd4eab1a0789e3a62397b13129e735e19c46a8a | 8,651 | py | Python | src/ros/rosmake/test/test_parallel_build.py | jungleni/ros_code_reading | 499e98c0b0d309da78060b19b55c420c22110d65 | [
"Apache-2.0"
] | 742 | 2017-07-05T02:49:36.000Z | 2022-03-30T12:55:43.000Z | src/ros/rosmake/test/test_parallel_build.py | jungleni/ros_code_reading | 499e98c0b0d309da78060b19b55c420c22110d65 | [
"Apache-2.0"
] | 73 | 2017-07-06T12:50:51.000Z | 2022-03-07T08:07:07.000Z | src/ros/rosmake/test/test_parallel_build.py | jungleni/ros_code_reading | 499e98c0b0d309da78060b19b55c420c22110d65 | [
"Apache-2.0"
] | 425 | 2017-07-04T22:03:29.000Z | 2022-03-29T06:59:06.000Z | #!/usr/bin/env python
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import unittest
from rosmake import parallel_build
class TestDependencyTracker(unittest.TestCase):
def setUp(self):
self.deps = {}
self.deps1 = {}
self.deps["a"] = [ "b", "c", "d","e"]
self.deps1["a"] = ["b"]
self.deps["b"] = ["c"]
self.deps1["b"] = ["c"]
self.deps["d"] = ["c", "e"]
self.deps1["d"] = ["c", "e"]
self.dt = parallel_build.DependencyTracker()
self.dt.load_fake_deps(self.deps, self.deps1)
def test_deps_1(self):
self.assertEquals(self.deps1["a"], self.dt.get_deps_1("a"))
self.assertEquals(self.deps1["b"], self.dt.get_deps_1("b"))
self.assertEquals(self.deps1["d"], self.dt.get_deps_1("d"))
def test_deps(self):
self.assertEquals(self.deps["a"], self.dt.get_deps("a"))
self.assertEquals(self.deps["b"], self.dt.get_deps("b"))
self.assertEquals(self.deps["d"], self.dt.get_deps("d"))
def test_not_package(self):
self.assertEquals([], self.dt.get_deps("This is not a valid package name"))
self.assertEquals([], self.dt.get_deps_1("This is not a valid package name"))
class TestBuildQueue(unittest.TestCase):
def setUp(self):
deps = {}
deps1 = {}
deps1["a"] = ["b"]
deps["a"] = ["b", "c", "d", "e", "f"]
deps1["b"] = ["c"]
deps["b"] = ["c", "d", "e", "f"]
deps1["c"] = ["d"]
deps["c"] = ["d", "e", "f"]
deps1["d"] = ["e"]
deps["d"] = ["e", "f"]
deps["e"] = ["f"]
deps1["e"] = ["f"]
deps["f"] = []
deps1["f"] = []
self.serial_tracker = parallel_build.DependencyTracker()
self.serial_tracker.load_fake_deps(deps, deps1)
deps = {}
deps1 = {}
deps["a"] = ["b", "c", "d", "e", "f"]
deps1["a"] = ["b", "c", "d", "e", "f"]
deps["b"] = []
deps1["b"] = []
deps["c"] = []
deps1["c"] = []
deps["d"] = []
deps1["d"] = []
deps["e"] = []
deps1["e"] = []
deps["f"] = []
deps1["f"] = []
self.parallel_tracker = parallel_build.DependencyTracker()
self.parallel_tracker.load_fake_deps(deps, deps1)
# full queue
def test_full_build(self):
bq = parallel_build.BuildQueue(["a", "b", "c", "d", "e", "f"], self.serial_tracker)
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("f", bq.get_valid_package())
self.assertEqual(0, len(bq.built))
bq.return_built("f")
self.assertEqual(1, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("e", bq.get_valid_package())
bq.return_built("e")
self.assertEqual(2, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("d", bq.get_valid_package())
bq.return_built("d")
self.assertEqual(3, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("c", bq.get_valid_package())
bq.return_built("c")
self.assertEqual(4, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("b", bq.get_valid_package())
bq.return_built("b")
self.assertEqual(5, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("a", bq.get_valid_package())
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
bq.return_built("a")
self.assertEqual(6, len(bq.built))
self.assertTrue (bq.is_done())
self.assertTrue (bq.succeeded())
# partial build
def test_partial_build(self):
bq = parallel_build.BuildQueue(["d", "e", "f"], self.serial_tracker)
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("f", bq.get_valid_package())
self.assertEqual(0, len(bq.built))
bq.return_built("f")
self.assertEqual(1, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("e", bq.get_valid_package())
bq.return_built("e")
self.assertEqual(2, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("d", bq.get_valid_package())
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
bq.return_built("d")
self.assertEqual(3, len(bq.built))
self.assertTrue(bq.is_done())
self.assertTrue(bq.succeeded())
# abort early
def test_abort_early(self):
bq = parallel_build.BuildQueue(["a", "b", "c", "d", "e", "f"], self.serial_tracker)
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual(0, len(bq.built))
self.assertEqual("f", bq.get_valid_package())
bq.return_built("f")
self.assertEqual(1, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("e", bq.get_valid_package())
bq.return_built("e")
self.assertEqual(2, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("d", bq.get_valid_package())
bq.return_built("d")
self.assertEqual(3, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
bq.stop()
self.assertTrue(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual(None, bq.get_valid_package())
# many parallel
def test_parallel_build(self):
bq = parallel_build.BuildQueue(["a", "b", "c", "d", "e", "f"], self.parallel_tracker)
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
dependents = ["b", "c", "d", "e", "f"]
count = 0
total = 6
while len(dependents) > 0:
result= bq.get_valid_package()
done = len(bq.built)
pkgs = bq._total_pkgs
self.assertTrue(result in dependents)
#print result, done, pkgs
dependents.remove(result)
self.assertEqual(count, done)
self.assertEqual(total, pkgs)
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
bq.return_built(result)
count = count + 1
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("a", bq.get_valid_package())
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
bq.return_built("a")
self.assertTrue (bq.is_done())
self.assertTrue (bq.succeeded())
# stalled(future)
| 35.747934 | 93 | 0.596116 | 6,971 | 0.805803 | 0 | 0 | 0 | 0 | 0 | 0 | 2,146 | 0.248064 |
0bd5c6b04b7d3a4ccf8589e0b2129df29191d0f5 | 737 | py | Python | miqa/core/models/image.py | davidshq/miqa-1 | aeb5fbf40a65a6fdb82b6e3d3aff8fe47474792f | [
"Apache-2.0"
] | null | null | null | miqa/core/models/image.py | davidshq/miqa-1 | aeb5fbf40a65a6fdb82b6e3d3aff8fe47474792f | [
"Apache-2.0"
] | null | null | null | miqa/core/models/image.py | davidshq/miqa-1 | aeb5fbf40a65a6fdb82b6e3d3aff8fe47474792f | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from uuid import uuid4
from django.db import models
from django_extensions.db.models import TimeStampedModel
class Image(TimeStampedModel, models.Model):
class Meta:
indexes = [models.Index(fields=['scan', 'name'])]
ordering = ['name']
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
scan = models.ForeignKey('Scan', related_name='images', on_delete=models.CASCADE)
raw_path = models.CharField(max_length=500, blank=False, unique=True)
name = models.CharField(max_length=255, blank=False)
@property
def path(self) -> Path:
return Path(self.raw_path)
@property
def size(self) -> int:
return self.path.stat().st_size
| 29.48 | 85 | 0.700136 | 599 | 0.812754 | 0 | 0 | 148 | 0.200814 | 0 | 0 | 32 | 0.043419 |
0bd684f449fb29bb6b0b014c1fddbe47cd12fbe1 | 302 | py | Python | src/utils/libraries/index.py | Shellyda/Algorithms-Sorting-Project | 205f76b5127a53829056889e46cf240e0d75cbb5 | [
"MIT"
] | null | null | null | src/utils/libraries/index.py | Shellyda/Algorithms-Sorting-Project | 205f76b5127a53829056889e46cf240e0d75cbb5 | [
"MIT"
] | null | null | null | src/utils/libraries/index.py | Shellyda/Algorithms-Sorting-Project | 205f76b5127a53829056889e46cf240e0d75cbb5 | [
"MIT"
] | null | null | null | from utils.libraries.Get_duration_execution_time import Get_duration_execution_time
from utils.libraries.Bubble_sort import Bubble_sort
from utils.libraries.Insertion_sort import Insertion_sort
from utils.libraries.Merge_sort import Merge_sort
from utils.libraries.Selection_sort import Selection_sort
| 50.333333 | 83 | 0.900662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0bd90dcd08f531dd13aaeed5fdef14dcf683d7de | 2,897 | py | Python | Python_practice.py | ftercero/Election_Analysis | 52ae67ffa1e4e552653d96b42fd388bacfcd2227 | [
"Apache-2.0"
] | null | null | null | Python_practice.py | ftercero/Election_Analysis | 52ae67ffa1e4e552653d96b42fd388bacfcd2227 | [
"Apache-2.0"
] | null | null | null | Python_practice.py | ftercero/Election_Analysis | 52ae67ffa1e4e552653d96b42fd388bacfcd2227 | [
"Apache-2.0"
] | null | null | null | #print ("Hello World")
#counties=["Arapahoes","Denver","Jefferson"]
#if counties[1]=='Denver':
# print(counties[1])
#counties = ["Arapahoe","Denver","Jefferson"]
#if "El Paso" in counties:
# print("El Paso is in the list of counties.")
#else:
# print("El Paso is not the list of counties.")
#if "Arapahoe" in counties and "El Paso" in counties:
# print("Arapahoe and El Paso are in the list of counties.")
#else:
# print("Arapahoe or El Paso is not in the list of counties.")
#if "Arapahoe" in counties or "El Paso" in counties:
# print("Arapahoe or El Paso is in the list of counties.")
#else:
# print("Arapahoe and El Paso are not in the list of counties.")
#counties_dict = {"Arapahoe": 422829, "Denver": 463353, "Jefferson": 432438}
#for county in counties:
# print(county)
#for county in counties_dict.keys():
# print(county)
#for voters in counties_dict.values():
# print(voters)
#for county in counties_dict:
# print(counties_dict[county])
#for county, voters in counties_dict.items():
#print(f"{county} county has {voters} registered voters.")
voting_data = [{"county":"Arapahoe", "registered_voters": 422829},
{"county":"Denver", "registered_voters": 463353},
{"county":"Jefferson", "registered_voters": 432438}]
#prints as a continuous list
#print(voting_data)
#prints as a stack. prints 1 under the other
#for county_dict in voting_data:
#print(county_dict)
#3.2.10 says this will iterarte and print the counties.
#I understand the for loop but dont understand the print line.
#how does the module expect us to know this if we didn't cover it.
#['county'] is throwing me off
#for i in range(len(voting_data)):
#print(voting_data[i]['county'])
#for i in range(len(voting_data)):
#print(voting_data[i]['registered_voters'])
#why doesnt this work with registered_voters_dict. neither_dict are defined
#for county_dict in voting_data:
#for value in county_dict.values():
#print(value)
#candidate_votes = int (input("How many votes did the candidate get in the election?"))
#total_votes = int(input("What is the total number of votes in the election?"))
#message_to_candidate = (
#f"You received {candidate_votes:,} number of votes. "
#f"The total number of votes in the election was {total_votes:,}. "
#f"You received {candidate_votes / total_votes * 100:.2f}% of the votes")
#print(message_to_candidate)
#f'{value:{width},.{precision}}'
#width=number of characters
#precision=.#'sf where # is the decimal places
#skill drill
#for county, voters in counties_dict.items():
#print(f"{county} county has {voters:,} registered voters.")
#skill drill--need help solving
#for county_dict in voting_data:
#print(f"{county} county has {voters} registered voters.")
for county, voters in voting_data:
print (f"{'county'} county has {'voters'} registered voters")
| 34.082353 | 87 | 0.70038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,618 | 0.903693 |
0bdc5b9ff9f1b0f990c0c865e7cf1275a25cef14 | 1,600 | py | Python | compss/programming_model/bindings/python/src/pycompss/matlib/algebra/mean.py | TANGO-Project/compss-tango | d9e007b6fe4f8337d4f267f95f383d8962602ab8 | [
"Apache-2.0"
] | 3 | 2018-03-05T14:52:22.000Z | 2019-02-08T09:58:24.000Z | compss/programming_model/bindings/python/src/pycompss/matlib/algebra/mean.py | TANGO-Project/compss-tango | d9e007b6fe4f8337d4f267f95f383d8962602ab8 | [
"Apache-2.0"
] | null | null | null | compss/programming_model/bindings/python/src/pycompss/matlib/algebra/mean.py | TANGO-Project/compss-tango | d9e007b6fe4f8337d4f267f95f383d8962602ab8 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2002.2.rc1710017 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PyCOMPSs Mathematical Library: Algebra: Mean
============================================
This file contains the arithmetic mean algorithm.
"""
from pycompss.api.task import task
from pycompss.functions.reduce import mergeReduce
def _list_lenght(l):
"""
Recursive function to get the size of any list
"""
if l:
if not isinstance(l[0], list):
return 1 + _list_lenght(l[1:])
else:
return _list_lenght(l[0]) + _list_lenght(l[1:])
return 0
@task(returns=float)
def _mean(X, n):
return sum(X)/float(n)
def mean(X, wait=False):
"""
Arithmetic mean
:param X: chunked data
:param wait: if we want to wait for result. Default False
:return: mean of X.
"""
n = _list_lenght(X)
result = mergeReduce(reduce_add, [_mean(x, n) for x in X])
if wait:
from pycompss.api.api import compss_wait_on
result = compss_wait_on(result)
return result
| 26.229508 | 75 | 0.660625 | 0 | 0 | 0 | 0 | 64 | 0.04 | 0 | 0 | 977 | 0.610625 |
0be08126819855add6dce623e8e5ab3393911667 | 1,407 | py | Python | account/models/addon.py | avwx-rest/account-backend | 4d2a8e47736cfe3421b3e55b47e6770490564149 | [
"MIT"
] | null | null | null | account/models/addon.py | avwx-rest/account-backend | 4d2a8e47736cfe3421b3e55b47e6770490564149 | [
"MIT"
] | null | null | null | account/models/addon.py | avwx-rest/account-backend | 4d2a8e47736cfe3421b3e55b47e6770490564149 | [
"MIT"
] | null | null | null | """
Plan add-on models
"""
# pylint: disable=too-few-public-methods
from typing import Optional
from beanie import Document
from pydantic import BaseModel
class AddonOut(BaseModel):
"""Addon fields returned to the user"""
key: str
name: str
description: str
class UserAddon(AddonOut):
"""Addon fields stored in the user model"""
price_id: str
class Addon(Document, AddonOut):
"""Plan add-on entitlement"""
product_id: str
price_ids: Optional[dict[str, str]]
class Collection:
"""DB collection name"""
name = "addon"
@classmethod
async def by_key(cls, key: str) -> "Addon":
"""Get an add-on by internal key"""
return await cls.find_one(cls.key == key)
@classmethod
async def by_product_id(cls, key: str) -> "Addon":
"""Get an add-on by Stripe product ID"""
return await cls.find_one(cls.product_id == key)
def to_user(self, plan: str) -> UserAddon:
"""Return a user-specific version of the addon"""
try:
price = self.price_ids[plan]
except (AttributeError, KeyError, TypeError):
key = "yearly" if plan.endswith("-year") else "monthly"
price = self.price_ids[key]
return UserAddon(
key=self.key,
name=self.name,
description=self.description,
price_id=price,
)
| 23.065574 | 67 | 0.608387 | 1,240 | 0.881308 | 0 | 0 | 327 | 0.232409 | 293 | 0.208244 | 370 | 0.262971 |
0be2db2efc5096fbb77f69461080e7c7bd259e7d | 4,434 | py | Python | src/tools/cc-frame-viewer/ccfv.py | jxjnjjn/chromium | 435c1d02fd1b99001dc9e1e831632c894523580d | [
"Apache-2.0"
] | 9 | 2018-09-21T05:36:12.000Z | 2021-11-15T15:14:36.000Z | tools/cc-frame-viewer/ccfv.py | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/cc-frame-viewer/ccfv.py | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3 | 2018-11-28T14:54:13.000Z | 2020-07-02T07:36:07.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import sys
def Init():
chromeapp_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'third_party', 'py-chrome-app'))
assert os.path.isdir(chromeapp_path)
sys.path.append(chromeapp_path)
Init()
import chromeapp
from build import parse_deps
srcdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "src"))
js_warning_message = """/**
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/* WARNING: This file is generated by ccfv.py
*
* Do not edit directly.
*/
"""
css_warning_message = """/**
/* Copyright (c) 2012 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file. */
/* WARNING: This file is generated by ccfv.py
*
* Do not edit directly.
*/
"""
py_warning_message = """#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. */
#
# WARNING: This file is generated by ccfv.py
#
# Do not edit directly.
#
"""
def _sopen(filename, mode):
if filename != '-':
return open(filename, mode)
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
def generate_css(input_filenames):
load_sequence = parse_deps.calc_load_sequence(input_filenames, srcdir)
style_sheet_chunks = [css_warning_message, '\n']
for module in load_sequence:
for style_sheet in module.style_sheets:
style_sheet_chunks.append("""%s\n""" % style_sheet.contents)
return ''.join(style_sheet_chunks)
def generate_js(input_filenames):
load_sequence = parse_deps.calc_load_sequence(input_filenames, srcdir)
js_chunks = [js_warning_message, '\n']
js_chunks.append("window.FLATTENED = {};\n")
js_chunks.append("window.FLATTENED_RAW_SCRIPTS = {};\n")
for module in load_sequence:
js_chunks.append("window.FLATTENED['%s'] = true;\n" % module.name)
for raw_script in module.dependent_raw_scripts:
js_chunks.append("window.FLATTENED_RAW_SCRIPTS['%s'] = true;\n" %
raw_script.name)
raw_scripts_that_have_been_written = set()
for module in load_sequence:
for raw_script in module.dependent_raw_scripts:
if raw_script.name in raw_scripts_that_have_been_written:
continue
js_chunks.append(raw_script.contents)
js_chunks.append("\n")
raw_scripts_that_have_been_written.add(raw_script.name)
js_chunks.append(module.contents)
js_chunks.append("\n")
return ''.join(js_chunks)
def Main(args):
parser = optparse.OptionParser('%prog <filename>')
parser.add_option('--debug', dest='debug_mode', action='store_true',
default=False, help='Enables debugging features')
options, args = parser.parse_args(args)
if len(args) != 1:
parser.error("argument required")
if not os.path.exists(args[0]):
parser.error("%s does not exist" % args[0])
manifest_file = os.path.join(os.path.dirname(__file__),
'app', 'manifest.json')
app = chromeapp.App('cc-frame-viewer',
manifest_file,
debug_mode=options.debug_mode)
def OnLoad(req):
with open(args[0], 'r') as f:
return f.read()
input_filenames = [os.path.join(srcdir, f)
for f in ['base.js', 'model_view.js']]
view_js_file = os.path.join(os.path.dirname(__file__),
'app', 'model_view.js')
view_css_file = os.path.join(os.path.dirname(__file__),
'app', 'model_view.css')
with open(view_js_file, 'w') as f:
f.write(generate_js(input_filenames))
with open(view_css_file, 'w') as f:
f.write(generate_css(input_filenames))
with chromeapp.AppInstance(app, []) as app_instance:
app_instance.AddListener('load', OnLoad)
try:
return app_instance.Run()
finally:
if os.path.exists(view_js_file):
os.unlink(view_js_file)
if os.path.exists(view_css_file):
os.unlink(view_css_file)
if __name__ == "__main__":
sys.exit(Main(sys.argv[1:]))
| 31.671429 | 73 | 0.675011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,434 | 0.32341 |
0be480abda6ccdd7fb50e59653bf3b03a118a7b9 | 3,247 | py | Python | main.py | MarkPopovK/nhl-graph | 9cd19f70d30193bd5aa509ca421c90f9484d8c34 | [
"MIT"
] | null | null | null | main.py | MarkPopovK/nhl-graph | 9cd19f70d30193bd5aa509ca421c90f9484d8c34 | [
"MIT"
] | null | null | null | main.py | MarkPopovK/nhl-graph | 9cd19f70d30193bd5aa509ca421c90f9484d8c34 | [
"MIT"
] | null | null | null | import json
import requests
import dash
import dash_core_components as dcc
import dash_html_components as html
def server_setup(results):
app = dash.Dash(__name__)
app.layout = html.Div(children=[
html.H1(children='NHL 2018/2019 cumulative points stats'),
html.Div(children='''
'''),
dcc.Graph(
id='example-graph',
figure={
'data': [
*results
],
'layout': {
'title': 'NHL Data Visualization',
'xaxis': {
'title': 'Games Played',
},
'yaxis': {
'title': 'Points',
},
'height': 800,
'width': 1000,
}
},
)
])
app.run_server(debug=True)
def main():
# setup the time span
start_date = '2018-01-01'
end_date = '2019-01-01'
nhl_url = f'https://statsapi.web.nhl.com/api/v1/schedule?startDate={start_date}&endDate={end_date}'
s = requests.Session()
games = []
# download the games
raw_data = s.get(nhl_url).text
dates = json.loads(raw_data)['dates']
for date in dates:
games.extend(date['games'])
# filter the games
games = [game for game in games if all([
game['season'] == '20182019',
game['gameType'] == 'R',
])]
# create a data structure of results results = {wsh:{1:1, 2:3}, buf:{1:0, 2:2}}
results = {}
for game in games:
for team in game['teams']:
name = game['teams'][team]['team']['name']
record = game['teams'][team]['leagueRecord']
points = int(record['wins']) * 2 + int(record['ot'])
games = int(record['wins']) + int(record['losses']) + int(record['ot'])
results[name] = results.get(name, {}) # creating empty dict for storing results if doesn't exist yet
results[name][games] = points
# reformat the data structure for plotly into teams = [{'x':[1,2], 'y':[1,3], 'name':'wsh'}, ]
teams = []
points_matrix = {}
for result in results:
stats = results[result]
for game_n in stats:
points_matrix[game_n] = points_matrix.get(game_n, [])
points_matrix[game_n].append(stats[game_n])
points_matrix[game_n] = sorted(points_matrix[game_n])
print(len(points_matrix))
for result in sorted(results,
key=lambda result: list(results[result].values())[-1],
reverse=True):
game_ns = list(results[result].keys())
points = list(results[result].values())
place = [-(sorted(points_matrix[i] + [point], reverse=True).index(point) + 1) for i, point in
enumerate(points, 1)]
# points = [point-i for i, point in enumerate(points)]
teams.append({
'x': game_ns,
'y': points,
'name': f'{result} ({points[-1]}/{game_ns[-1]*2} pts = {round(points[-1]/game_ns[-1]/2*100,1)}%)',
'visible': 'legendonly',
})
server_setup(teams)
if __name__ == '__main__':
main()
| 29.788991 | 113 | 0.513397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 905 | 0.278719 |
0be53f53aa8ef004a05d693483b9d9e80f01a479 | 4,584 | py | Python | python_libs/train_lib.py | rubenIzquierdo/lda_wsd | b4ef1c2276b0eba48efda411ee67dcda25b481b1 | [
"Apache-2.0"
] | 1 | 2020-09-20T09:38:05.000Z | 2020-09-20T09:38:05.000Z | python_libs/train_lib.py | rubenIzquierdo/lda_wsd | b4ef1c2276b0eba48efda411ee67dcda25b481b1 | [
"Apache-2.0"
] | null | null | null | python_libs/train_lib.py | rubenIzquierdo/lda_wsd | b4ef1c2276b0eba48efda411ee67dcda25b481b1 | [
"Apache-2.0"
] | null | null | null | ##############################################
# Author: Ruben Izquierdo Bevia #
# VU University of Amsterdam #
# Mail: ruben.izquierdobevia@vu.nl #
# rubensanvi@gmail.com #
# Webpage: http://rubenizquierdobevia.com #
# Version: 1.0 #
# Modified: 23-mar-2015 #
##############################################
try:
import cPickle as pickler
except:
import pickle as pickler
import time
import os
import glob
from variables import *
from collections import defaultdict
from generate_lda_model import generate_lda_model
def train_sense(list_train_examples, name_fold, sense, options):
ret_code = generate_lda_model(list_train_examples, sense, options, name_fold)
#all_dbpedia_links = []
#print 'Training sense %s' % sense
#for naf_filename, term_id, num_sentence in list_occs:
# links = get_links_within_num_sentences(naf_filename, term_id,num_sentence, options['sentence_window'], only_leaves=False)
# all_dbpedia_links.extend(links)
#print '\tTerm id %s in file %s' % (term_id, naf_filename)
#for l in links:
# print '\t\t',l.encode('utf-8')
#print '\tFull list of dbpedia links for the sense' , sense
#for l in sorted(all_dbpedia_links):
# print '\t\t%s' % l.encode('utf-8')
return ret_code
def clean_models(this_folder,options):
filename_opts = os.path.join(this_folder,options['prefix_models']+'#'+OPTIONS_FILENAME)
if os.path.exists(filename_opts):
os.remove(filename_opts)
for name_fold in glob.glob(os.path.join(this_folder,'fold_*')):
for this_file in glob.glob(name_fold+'/'+options['prefix_models']+'*'):
os.remove(this_file)
def train_folder_lemma(this_folder,options):
#Load the possible senses
fd_senses = open(os.path.join(this_folder,'possible_senses'),'r')
possible_senses = fd_senses.readline().strip().split()
fd_senses.close()
start_time = time.time()
print '%s Training models for %s List of senses: %s' % (time.strftime('%Y-%m-%dT%H:%M:%S%Z'), this_folder, str(possible_senses))
print '\tOptions: %s' % str(options)
#fd_o = open(os.path.join(this_folder,'all_occurrences.bin'),'r')
#all_occs = pickler.load(fd_o)
#fd_o.close()
sense_distribution = {}
fd_d = open(os.path.join(this_folder,'sense_distribution.txt'),'r')
for line in fd_d:
sense, freq = line.strip().split()
sense_distribution[sense] = int(freq)
fd_d.close()
process_this = True
if 'min_occs' in options:
for sense, freq in sense_distribution.items():
if freq < options['min_occs']:
print '\tNot trained because there are only %d occurrences for the sense %s and the minimum was set to %d' % (freq, sense,options['min_occs'] )
process_this = False
ret = -1
break
if process_this:
##Save the options
fd_opts = open(os.path.join(this_folder,options['prefix_models']+'#'+OPTIONS_FILENAME),'w')
pickler.dump(options,fd_opts,0)
fd_opts.close()
print '\tOption saved to %s' % fd_opts.name
if len(possible_senses) == 1:
print '\tIt is a monosemous lemma, nothing to train'
ret = 0
else:
for name_fold in glob.glob(os.path.join(this_folder,'fold_*')):
fd_train = open(os.path.join(name_fold,'train_occurences.bin'),'rb')
train_instances = pickler.load(fd_train)
fd_train.close()
print '\tFold %s with %d examples' % (name_fold,len(train_instances))
instances_for_sense = defaultdict(list)
for naf_filename, term_id, num_sentence, sense in train_instances:
instances_for_sense[sense].append((naf_filename, term_id, num_sentence))
for sense, list_train_examples in instances_for_sense.items():
print '\t\t==> Sense %s with %d training examples' % (sense.encode('utf-8'),len(list_train_examples))
ret_code = train_sense(list_train_examples, name_fold, sense, options)
ret = 0
end_time = time.time()
total_secs = int(end_time - start_time)
num_min = total_secs/60
num_secs = total_secs - (num_min*60)
print '\tTotal time: %d min and %d seconds' % (num_min, num_secs)
return ret
| 39.86087 | 160 | 0.603839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,633 | 0.356239 |
0be8b81ee3c1560dffccec8e0d9a6d2ccd6cd6c8 | 1,620 | py | Python | pincer/utils/snowflake.py | MithicSpirit/Pincer | 3e5aee5bc228a77caac59e07299d54e558b7f39d | [
"MIT"
] | 1 | 2021-11-16T05:19:26.000Z | 2021-11-16T05:19:26.000Z | pincer/utils/snowflake.py | Seanpm2001-Discord/Pincer | a2c045f85f44712f3257e5cc50b3acacbd1302f9 | [
"MIT"
] | null | null | null | pincer/utils/snowflake.py | Seanpm2001-Discord/Pincer | a2c045f85f44712f3257e5cc50b3acacbd1302f9 | [
"MIT"
] | null | null | null | # Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
class Snowflake(int):
"""Discord utilizes Twitter's snowflake format for uniquely
identifiable descriptors (IDs).
These IDs are guaranteed to be unique across all of Discord,
except in some unique scenarios in which child objects
share their parent's ID.
Because Snowflake IDs are up to 64 bits in size (e.g. a uint64),
they are always returned as strings in the HTTP API
to prevent integer overflows in some languages.
"""
@classmethod
def __factory__(cls, string: str) -> Snowflake:
return cls.from_string(string)
@classmethod
def from_string(cls, string: str):
"""Initialize a new Snowflake from a string.
Parameters
----------
string: :class:`str`
The snowflake as a string.
"""
return Snowflake(int(string))
@property
def timestamp(self) -> int:
""":class:`int`: Milliseconds since Discord Epoch,
the first second of 2015 or 14200704000000
"""
return self >> 22
@property
def worker_id(self) -> int:
""":class:`int`: Internal worker ID"""
return (self >> 17) % 16
@property
def process_id(self) -> int:
""":class:`int`: Internal process ID"""
return (self >> 12) % 16
@property
def increment(self) -> int:
""":class:`int`: For every ID that is generated on that process,
this number is incremented.
"""
return self % 2048
| 27.931034 | 72 | 0.621605 | 1,483 | 0.915432 | 0 | 0 | 986 | 0.608642 | 0 | 0 | 996 | 0.614815 |
0bea5b3c06ed6536b18d4ffc6060eb3851bdfaff | 784 | py | Python | examples/squeeze.py | coherentgraphics/python-libcpdf | 4e47a45936443c863b0e34ce2b21d1623a244694 | [
"BSD-3-Clause"
] | 1 | 2021-08-19T06:55:59.000Z | 2021-08-19T06:55:59.000Z | examples/squeeze.py | coherentgraphics/python-libcpdf | 4e47a45936443c863b0e34ce2b21d1623a244694 | [
"BSD-3-Clause"
] | 1 | 2022-03-29T12:53:12.000Z | 2022-03-30T08:34:20.000Z | examples/squeeze.py | coherentgraphics/python-libcpdf | 4e47a45936443c863b0e34ce2b21d1623a244694 | [
"BSD-3-Clause"
] | null | null | null | #Squeeze example
import sys
sys.path.insert(0,'..')
import pycpdflib
#DLL loading depends on your own platform. These are the author's settings.
if sys.platform.startswith('darwin'):
pycpdflib.loadDLL("/Users/john/repos/python-libcpdf/libpycpdf.so")
elif sys.platform.startswith('linux'):
pycpdflib.loadDLL("../libpycpdf.so")
elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
os.add_dll_directory("C:\\\\OCaml64/home/JohnWhitington/python-libcpdf/")
pycpdflib.loadDLL("libpycpdf.dll")
#Load file
pdf = pycpdflib.fromFile('../pycpdflibmanual.pdf', '')
#Squeeze it
pycpdflib.squeezeInMemory(pdf)
#Write output. We make sure to use toFileExt, and make object streams.
pycpdflib.toFileExt(pdf, 'squeezed.pdf', False, False, True, True, True)
| 34.086957 | 77 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 386 | 0.492347 |
0bebb327020997966bf6dfa476e0cae9c0df6a36 | 858 | py | Python | src/watcher.py | RHEAGROUP/OCRE-Importing | c1aaf6b0be9beaad171b49b4c8ef1c056476f940 | [
"Apache-2.0"
] | null | null | null | src/watcher.py | RHEAGROUP/OCRE-Importing | c1aaf6b0be9beaad171b49b4c8ef1c056476f940 | [
"Apache-2.0"
] | null | null | null | src/watcher.py | RHEAGROUP/OCRE-Importing | c1aaf6b0be9beaad171b49b4c8ef1c056476f940 | [
"Apache-2.0"
] | null | null | null | import imaplib
import email
from create_orders_from_email import get_email_contents
import time
import sys
with imaplib.IMAP4_SSL(host="imap.gmail.com", port=imaplib.IMAP4_SSL_PORT) as imap_ssl:
resp_code, response = imap_ssl.login(sys.argv[1], sys.argv[2])
while True:
resp_code, mail_count = imap_ssl.select(mailbox="INBOX", readonly=True)
resp_code, mails = imap_ssl.search(None, "UnSeen (SUBJECT 'OCRE')")
for mail_id in mails[0].decode().split()[-10:]:
resp_code, mail_data = imap_ssl.fetch(mail_id, '(RFC822)')
message = email.message_from_bytes(mail_data[0][1])
for part in message.walk():
if part.get_content_type() == "text/plain":
get_email_contents(email=part.get_payload(), token=sys.argv[3], secret_token=sys.argv[4])
time.sleep(30) | 47.666667 | 109 | 0.671329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.081585 |
0beceb8a8dc74b4005985e4a4eb8fb3eac336b2d | 4,314 | py | Python | pytlite/__init__.py | panakuma/pytlite | cbac045fdde0ef1a15d4dabfc12281c1f5be529c | [
"MIT"
] | null | null | null | pytlite/__init__.py | panakuma/pytlite | cbac045fdde0ef1a15d4dabfc12281c1f5be529c | [
"MIT"
] | null | null | null | pytlite/__init__.py | panakuma/pytlite | cbac045fdde0ef1a15d4dabfc12281c1f5be529c | [
"MIT"
] | null | null | null | import socket
import struct
class Patlite(object):
auto_update = True
OFF = 0
BLINK = 0x20
ON = 0x01
SHORT = 0x08
LONG = 0x10
STATUS_STRING = {
OFF:"Off",BLINK:"Blink",ON:"On",
SHORT:"Short",LONG:"Long"
}
RED = 0
YELLOW = 1
GREEN = 2
LED_STRING = ["Red","Yellow","Green"]
_led = [0, 0, 0]
_buzzer = 0
send = None
class NAKError(Exception):
pass
def __init__(self, host, port=10000, proto="TCP", timeout=2):
"""Connect to Patlite Signal Tower"""
self.host = host
self.port = port
self.timeout = timeout
if proto.upper() == "TCP":
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.send = self._send_tcp
elif proto.upper() == "UDP":
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.send = self._send_udp
else:
not NotImplementedError("Protocol '%s' is not supported." % proto)
self.sock.settimeout(timeout)
# Get curernt status
self.get_status()
# Implementation of Send
def _send_tcp(self, data):
"""Send implementation for TCP"""
self.sock.sendall(data)
def _send_udp(self, data):
"""Send implementation for UDP"""
self.sock.sendto(data, (self.host, self.port))
def close(self):
"""Close Socket"""
self.sock.close()
def send_status(self):
"""Send change state command."""
data = 0
for i, status in enumerate(self._led):
data |= (status << i)
data |= self._buzzer
self.send(struct.pack("2B", 0x57, data))
# Recv ACK
data, addr = self.sock.recvfrom(10)
if not data[:3] == "ACK":
raise self.NAKError()
def get_status(self):
"""Get current status from Patlite"""
self.send("\x52")
data, addr = self.sock.recvfrom(10)
if not data[0] == "R":
raise self.NAKError()
data = struct.unpack("B", data[1])[0]
# Parse LED statuses.
for i in xrange(3):
led = self.OFF
if (data & (self.ON << i)):
led = self.ON
elif (data & (self.BLINK << i)):
led = self.BLINK
self._led[i] = led
# Parse the buzzer status.
buzzer = self.OFF
if (data & (self.LONG)):
buzzer = self.LONG
elif (data & (self.LONG)):
buzzer = self.SHORT
self._buzzer = buzzer
def print_status(self):
"""Print current status."""
for i, status in enumerate(self._led):
print ("%7s : %s" % (self.LED_STRING[i], self.STATUS_STRING[status]))
print ("%7s : %s" % ("Buzzer", self.STATUS_STRING[self.buzzer]))
def set_led(self, led, value):
"""Change a LED state."""
self._led[led] = value
if self.auto_update:
self.send_status()
# LED propertiess
red = property(lambda self:self._led[self.RED],
lambda self, value:self.set_led(self.RED, value))
green = property(lambda self:self._led[self.GREEN],
lambda self, value:self.set_led(self.GREEN, value))
yellow = property(lambda self:self._led[self.YELLOW],
lambda self, value:self.set_led(self.YELLOW, value))
def set_buzzer(self, value):
"""Change the buzzer state."""
self._buzzer = value
if self.auto_update:
self.send_status()
# Buzzer property
buzzer = property(lambda self:self._buzzer,
lambda self, value:self.set_buzzer(value))
if __name__ == "__main__":
# For testing
import sys
host = sys.argv[1]
if len(sys.argv) >= 3:
port = int(sys.argv[2])
else:
port = 10000
if len(sys.argv) >= 4:
proto = sys.argv[3].upper()
else:
proto = "TCP"
p = Patlite(host, port, proto)
print ("""For examples.
p.red = p.ON
p.yellow = p.BLINK
p.green = p.OFF
p.buzzer = p.SHORT
""")
import code
code.InteractiveConsole(globals()).interact()
| 25.526627 | 81 | 0.534539 | 3,784 | 0.877144 | 0 | 0 | 0 | 0 | 0 | 0 | 689 | 0.159713 |
0bee95548e6fcafe6a5b00d7403872593616c0ae | 11,892 | py | Python | src/app/todos/routes.py | alexzanderr/metro.digital | 71a58b417f6498808224a6de96578bde76f89c60 | [
"MIT"
] | null | null | null | src/app/todos/routes.py | alexzanderr/metro.digital | 71a58b417f6498808224a6de96578bde76f89c60 | [
"MIT"
] | 1 | 2021-12-16T22:11:25.000Z | 2021-12-16T22:11:25.000Z | src/app/todos/routes.py | alexzanderr/flask_web_app | 71a58b417f6498808224a6de96578bde76f89c60 | [
"MIT"
] | null | null | null | """
# type: ignore
type ignore is to tell LSP-pyright to ignore the line
because something it thinks that there are errors, but actually at runtime there are not
"""
from .validation import validate_password_check
from .validation import validate_email
from .validation import validate_password
from .validation import validate_username
from json import dumps
from flask import render_template
from flask import Blueprint
from flask import request
from flask import url_for
from flask import redirect
# mongo db client stuff
from ..mongodb_client import mongodb
from ..mongodb_client import CollectionInvalid
from ..mongodb_client import ObjectId
from ..mongodb_client import collection_exists
from ..mongodb_client import get_db_name
from ..mongodb_client import collection_create
from ..mongodb_client import get_collection
from ..mongodb_client import create_or_get_collection
from ..routes_utils import json_response
from string import ascii_letters, digits
from random import choice, randint
from datetime import datetime, timedelta
import hashlib
todos = Blueprint(
"todos",
__name__,
url_prefix="/todos",
# not working
# template_folder="templates/todos"
)
# document template
# todo = {
# text: 'yeaaah',
# timestamp: 1639492801.10111,
# datetime: '14.12.2021-16:40:01',
# completed: false
# }
todos_collection_name = "todos"
todos_collection = create_or_get_collection(todos_collection_name)
# document template
# user = {
# "username": "alexzander",
# "password": "37djw7dh237dh2yudhja1721hg2", # hashed
# "eamil": "alexxander18360@gmail.com",
# "creation_timestamp": datetime.timestamp(datetime.now()),
# "creation_datetime": datetime.now().strftime("%d.%m.%Y-%H:%M:%S")
# }
users_collection_name = "users"
users_collection = create_or_get_collection(users_collection_name)
# ('_id', 1)]},
# 'username_1': {'v': 2, 'key': [('username', 1)], 'unique': True}}
users_unique_keys = [{
"name": "username",
"exists": False
}]
for _, value in users_collection.index_information().items():
for unique_key in value["key"]:
for users_unique_key in users_unique_keys:
if unique_key[0] == users_unique_key["name"]:
users_unique_key["exists"] = True
for users_unique_key in users_unique_keys:
if not users_unique_key["exists"]:
users_collection.create_index([
(users_unique_key["name"], 1)
], unique=True)
register_tokens_collection_name = "register_tokens"
register_tokens_collection = create_or_get_collection(register_tokens_collection_name)
# ('_id', 1)]},
# 'username_1': {'v': 2, 'key': [('username', 1)], 'unique': True}}
tokens_unique_keys = [{
"name": "token",
"exists": False
}]
for _, value in users_collection.index_information().items():
for unique_key in value["key"]:
for tokens_unique_key in tokens_unique_keys:
if unique_key[0] == tokens_unique_key["name"]:
tokens_unique_key["exists"] = True
for tokens_unique_key in users_unique_keys:
if not tokens_unique_key["exists"]:
users_collection.create_index([
(tokens_unique_key["name"], 1)
], unique=True)
# users_collection.create_index([("username", 1)], unique=True)
@todos.route("/")
def todos_root():
# TODO
# add authentication with accounts
todos_collection = get_collection(todos_collection_name)
todo_list = todos_collection.find()
return render_template("todos/index.html", todo_list=todo_list)
def hash_password(password: str):
# deci input pentru sha256 trebuie sa fie bytes
return hashlib.sha256(password.encode()).hexdigest()
def check_hash_of_password(username: str, password: str):
_user = users_collection.find_one({"username": username})
_hashed_password = hash_password(password)
return _user["password"] == _hashed_password # type: ignore
@todos.route("/login", methods=["GET", "POST"])
def todos_login():
"""
Function: todos_login
Summary: this function returns a login page with a form
Returns: render_template("todos/login.html")
"""
method = request.method
if method == "POST":
# then create a new user in database and encrypt
# the password
# then redirect to /todos based on the content that the user has in todos database
# return render_template ?
pass
else:
# GET
# if the user is already authenticated
# then redirect to /todos page
# else
# return below
return render_template("todos/login.html")
@todos.route("/mongo/add", methods=["POST"])
def mongo_add():
todos_collection.insert_one({
"text": request.form["text"],
"timestamp": datetime.timestamp(datetime.now()),
"datetime": datetime.now().strftime("%d.%m.%Y-%H:%M:%S"),
"completed": False
})
# return dict(todo), {
# "Refresh": "1; url={}".format(url_for("todos"))
# }
return redirect("/todos")
@todos.route("/mongo/complete/<oid>")
def mongo_complete(oid):
requested_todo = todos_collection.find_one({
"_id": ObjectId(oid)
})
completed = True
if requested_todo["completed"]: # type: ignore
completed = False
todos_collection.update_one(
requested_todo,
{"$set": {"completed": completed}})
# todos_collection.replace_one(requested_todo, {"something": "else"})
# 61b6247e165b109454a32c1b
# 61b6247e165b109454a32c1b
return redirect("/todos")
@todos.route("/mongo/delete/<oid>")
def mongo_delete(oid):
requested_todo = todos_collection.find_one({
"_id": ObjectId(oid)
})
todos_collection.delete_one(requested_todo)
return redirect(url_for("todos"))
@todos.route('/mongo/delete/all')
def mongo_delete_all():
todos_collection.delete_many({})
return redirect(url_for('todos'))
# @todos.route("/", methods=['POST'])
# @todos.route("/<component_name>", methods=['POST'])
# def graphql_query(component_name="app"):
# return str(component_name)
todos_api = Blueprint(
"todos_api",
__name__,
url_prefix="/todos/api")
@todos_api.route("/")
def todos_api_root():
return {"message": "salutare"}, 200
@todos_api.route("/mongo/add", methods=["POST"])
def todos_api_mongo_add():
json_from_request = request.get_json()
todo = {
"text": json_from_request["text"], # type: ignore
"timestamp": datetime.timestamp(datetime.now()),
"datetime": datetime.now().strftime("%d.%m.%Y-%H:%M:%S"),
"completed": False
}
todos_collection.insert_one(todo)
# the above function insert a _id key
todo["oid"] = str(todo["_id"])
del todo["_id"]
return json_response(todo, 200)
# PATCH request
# The PATCH method applies partial modifications to a resource
# meaning that in this case partial mods are todo completed == true
@todos_api.route("/mongo/complete/<oid>", methods=["PATCH"])
def todos_api_mongo_complete(oid):
requested_todo = todos_collection.find_one({
"_id": ObjectId(oid)
})
completed = True
if requested_todo["completed"]: # type: ignore
completed = False
todos_collection.update_one(
requested_todo,
{"$set": {"completed": completed}}
)
requested_todo["oid"] = str(requested_todo["_id"]) # type: ignore
requested_todo["completed"] = completed # type: ignore
del requested_todo["_id"] # type: ignore
return json_response(requested_todo, 200) # type: ignore
# TODO add the oid in the post data body
# instead of making it an url, so that no one can see
# te oid
@todos_api.route("/mongo/delete/<oid>", methods=["DELETE"])
def todos_api_mongo_delete(oid):
requested_todo = todos_collection.find_one({
"_id": ObjectId(oid)
})
todos_collection.delete_one(requested_todo)
requested_todo["oid"] = str(requested_todo["_id"]) # type: ignore
del requested_todo["_id"] # type: ignore
return json_response(requested_todo, 200) # type: ignore
def generate_random_register_token():
return "".join([choice(ascii_letters + digits) for _ in range(30)])
def get_new_register_token():
"""
Function: get_new_token()
Summary: gets new token based on whats in the db
Returns: new token that is not the database
"""
brand_new_token = generate_random_register_token()
while register_tokens_collection.find_one({"token": brand_new_token}):
brand_new_token = generate_random_register_token()
return brand_new_token
@todos.route("/register", methods=["GET", "POST"])
def todos_register():
method = request.method
if method == "POST":
# then create a new user in database and encrypt
# the password
# then redirect to /todos based on the content that the user has in todos database
# return render_template ?
# get data and token from request data body
json_from_request: dict = request.get_json() # type: ignore
username = json_from_request["username"]
email = json_from_request["email"]
password = json_from_request["password"]
password_check = json_from_request["password_check"]
remember_me = json_from_request["remember_me"]
register_token = json_from_request["register_token"]
if not register_tokens_collection.find_one({"token": register_token}):
return {
"message": "cannot register, register token is not database"
}, 403
users_collection.insert_one({
"username": username,
"password": hash_password(password), # hashed
"email": email,
"creation_timestamp": datetime.timestamp(datetime.now()),
"creation_datetime": datetime.now().strftime("%d.%m.%Y-%H:%M:%S")
})
# you can redirect from POST request sorry
# and you can render HTML from here because you
# are making the request from ajax, not from firefox
return {"message": "success", "redirectTo": "/todos"}, 200
# or you can redirect to login page
# or you can automatically login the user after registration
else:
# GET
# if the user is already authenticated
# then redirect to /todos page
# else
# return below
return render_template("todos/register.html")
@todos_api.post("/register/validation")
def todos_api_register():
"""
Function: todos_api_register
Returns: json with validated input
"""
json_from_request: dict = request.get_json() # type: ignore
username = json_from_request["username"]
email = json_from_request["email"]
password = json_from_request["password"]
password_check = json_from_request["password_check"]
remember_me = json_from_request["remember_me"]
# some examples
results = {
"username": validate_username(username),
"password": validate_password(password),
"email": validate_email(email),
"password_check": validate_password_check(password, password_check),
"register_token": None
}
all_passed = True
for k, v in results.items():
if k != "register_token" and not v["passed"]:
all_passed = False
break
if all_passed:
new_token = get_new_register_token()
results["register_token"] = new_token
register_tokens_collection.insert_one({
"token": new_token,
"expiration_timestamp": datetime.timestamp(datetime.now() + timedelta(minutes=2))
})
# TODO add check for username in database
return json_response(results, 200)
# return {
# "username": username,
# "email": email,
# "password": password,
# "password_check": password_check,
# "remember_me": remember_me
# }, 200
| 30.414322 | 93 | 0.670114 | 0 | 0 | 0 | 0 | 7,197 | 0.605197 | 0 | 0 | 4,485 | 0.377144 |
0bf068e6001d65484a727f0f24e8ba43b05c99ef | 4,075 | py | Python | src/arcam/fmj/server.py | evanugarte/arcam_fmj | bde06a15f9bb3e612d406198ccc87ca0aa1fff93 | [
"MIT"
] | 5 | 2020-10-26T06:43:02.000Z | 2021-10-01T20:52:25.000Z | src/arcam/fmj/server.py | evanugarte/arcam_fmj | bde06a15f9bb3e612d406198ccc87ca0aa1fff93 | [
"MIT"
] | 5 | 2020-08-28T09:23:17.000Z | 2021-08-21T19:41:50.000Z | src/arcam/fmj/server.py | evanugarte/arcam_fmj | bde06a15f9bb3e612d406198ccc87ca0aa1fff93 | [
"MIT"
] | 3 | 2020-10-05T05:54:05.000Z | 2021-08-18T22:10:15.000Z | """Fake server"""
import asyncio
import logging
from typing import Callable, Dict, List, Optional, Tuple, Union
from . import (
AmxDuetRequest,
AmxDuetResponse,
AnswerCodes,
CommandNotRecognised,
CommandPacket,
ResponseException,
ResponsePacket,
read_command,
write_packet
)
_LOGGER = logging.getLogger(__name__)
class Server():
def __init__(self, host: str, port: int, model: str) -> None:
self._server: Optional[asyncio.AbstractServer] = None
self._host = host
self._port = port
self._handlers: Dict[Union[Tuple[int, int], Tuple[int, int, bytes]], Callable] = dict()
self._tasks: List[asyncio.Task] = list()
self._amxduet = AmxDuetResponse({
"Device-SDKClass": "Receiver",
"Device-Make": "ARCAM",
"Device-Model": model,
"Device-Revision": "x.y.z"
})
async def process(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
_LOGGER.debug("Client connected")
task = asyncio.current_task()
assert task
self._tasks.append(task)
try:
await self.process_runner(reader, writer)
finally:
_LOGGER.debug("Client disconnected")
self._tasks.remove(task)
async def process_runner(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
while True:
request = await read_command(reader)
if request is None:
_LOGGER.debug("Client disconnected")
return
responses = await self.process_request(request)
_LOGGER.debug("Client command %s -> %s", request, responses)
for response in responses:
await write_packet(writer, response)
async def process_request(self, request: Union[CommandPacket, AmxDuetRequest]):
if isinstance(request, AmxDuetRequest):
return [self._amxduet]
handler = self._handlers.get((request.zn, request.cc, request.data))
if handler is None:
handler = self._handlers.get((request.zn, request.cc))
try:
if handler:
data = handler(
zn=request.zn,
cc=request.cc,
data=request.data)
if isinstance(data, bytes):
response = [
ResponsePacket(
request.zn,
request.cc,
AnswerCodes.STATUS_UPDATE,
data)
]
else:
response = data
else:
raise CommandNotRecognised()
except ResponseException as e:
response = [
ResponsePacket(
request.zn,
request.cc,
e.ac,
e.data or bytes()
)
]
return response
def register_handler(self, zn, cc, data, fun):
if data:
self._handlers[(zn, cc, data)] = fun
else:
self._handlers[(zn, cc)] = fun
async def start(self):
_LOGGER.debug("Starting server")
self._server = await asyncio.start_server(
self.process,
self._host,
self._port)
return self
async def stop(self):
if self._server:
_LOGGER.debug("Stopping server")
self._server.close()
await self._server.wait_closed()
self._server = None
if self._tasks:
_LOGGER.debug("Cancelling clients %s", self._tasks)
for task in self._tasks:
task.cancel()
await asyncio.wait(self._tasks)
class ServerContext():
def __init__(self, server: Server):
self._server = server
async def __aenter__(self):
await self._server.start()
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._server.stop()
| 30.639098 | 95 | 0.543804 | 3,719 | 0.912638 | 0 | 0 | 0 | 0 | 2,864 | 0.702822 | 244 | 0.059877 |
0bf08ab02b380ff1463f83075b0e9055842f3b57 | 13,810 | py | Python | qiskit_optimization/converters/linear_inequality_to_penalty.py | X-Libor/qiskit-optimization | f2a92538d883ebc0f78c156ab6f31710e9b1d14f | [
"Apache-2.0"
] | 109 | 2021-01-20T09:37:26.000Z | 2022-03-30T08:13:53.000Z | qiskit_optimization/converters/linear_inequality_to_penalty.py | X-Libor/qiskit-optimization | f2a92538d883ebc0f78c156ab6f31710e9b1d14f | [
"Apache-2.0"
] | 216 | 2021-01-20T10:31:37.000Z | 2022-03-31T15:16:13.000Z | qiskit_optimization/converters/linear_inequality_to_penalty.py | X-Libor/qiskit-optimization | f2a92538d883ebc0f78c156ab6f31710e9b1d14f | [
"Apache-2.0"
] | 69 | 2021-01-19T21:43:18.000Z | 2022-03-24T22:13:01.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Converter to convert a problem with inequality constraints to unconstrained with penalty terms."""
import logging
from typing import Optional, Union, Tuple, List, Dict
import numpy as np
from .quadratic_program_converter import QuadraticProgramConverter
from ..exceptions import QiskitOptimizationError
from ..problems.constraint import Constraint, ConstraintSense
from ..problems.quadratic_objective import QuadraticObjective
from ..problems.quadratic_program import QuadraticProgram
from ..problems.variable import Variable
logger = logging.getLogger(__name__)
class LinearInequalityToPenalty(QuadraticProgramConverter):
r"""Convert linear inequality constraints to penalty terms of the objective function.
There are some linear constraints which do not require slack variables to
construct penalty terms [1]. This class supports the following inequality constraints.
.. math::
\begin{array}{}
\text { Inequality constraint } & & \text { Penalty term } \\
x \leq y & \rightarrow & P(x-x y) \\
x \geq y & \rightarrow & P(y-x y) \\
\sum_{i=1}^n x_i \leq 1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} x_i x_j\\
\sum_{i=1}^n x_i \geq n-1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} (1 - x_i) (1 - x_j)
\end{array}
Note that x, y, z and :math:`x_i` are binary variables, and P is a penalty factor,
where the value of P is automatically determined or supplied by users.
If constraints match with any of the patterns, they are converted into penalty terms and added
to the objective function. Otherwise, constraints are kept as is.
References:
[1]: Fred Glover, et al. (2019),
A Tutorial on Formulating and Using QUBO Models,
`arXiv:1811.11538 <https://arxiv.org/abs/1811.11538>`_.
"""
def __init__(self, penalty: Optional[float] = None) -> None:
"""
Args:
penalty: Penalty factor to scale equality constraints that are added to objective.
If None is passed, a penalty factor will be automatically calculated on
every conversion.
"""
self._src_num_vars: Optional[int] = None
self._dst: Optional[QuadraticProgram] = None
self._penalty: Optional[float] = penalty
self._should_define_penalty: bool = penalty is None
def convert(self, problem: QuadraticProgram) -> QuadraticProgram:
r"""Convert inequality constraints into penalty terms of the objective function.
This methods converts the following patterns where x, y, and :math:`x_i` are binary variables
and P is a penalty factor.
.. math::
\begin{array}{}
\text { Inequality constraint } & & \text { Penalty term } \\
x \leq y & \rightarrow & P(x-x y) \\
x \geq y & \rightarrow & P(y-x y) \\
\sum_{i=1}^n x_i \leq 1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} x_i x_j\\
\sum_{i=1}^n x_i \geq n-1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} (1 - x_i) (1 - x_j)
\end{array}
Args:
problem: The problem to be solved.
Returns:
The converted problem
Raises:
QiskitOptimizationError: If an unsupported-type variable exists.
"""
# create empty QuadraticProgram model
self._src_num_vars = problem.get_num_vars()
self._dst = QuadraticProgram(name=problem.name)
# If no penalty was given, set the penalty coefficient by _auto_define_penalty()
if self._should_define_penalty:
penalty = self._auto_define_penalty(problem)
else:
penalty = self._penalty
# Set variables
for x in problem.variables:
if x.vartype == Variable.Type.CONTINUOUS:
self._dst.continuous_var(x.lowerbound, x.upperbound, x.name)
elif x.vartype == Variable.Type.BINARY:
self._dst.binary_var(x.name)
elif x.vartype == Variable.Type.INTEGER:
self._dst.integer_var(x.lowerbound, x.upperbound, x.name)
else:
raise QiskitOptimizationError(f"Unsupported vartype: {x.vartype}")
# get original objective terms
offset = problem.objective.constant
linear = problem.objective.linear.to_dict()
quadratic = problem.objective.quadratic.to_dict()
sense = problem.objective.sense.value
# convert linear constraints into penalty terms
for constraint in problem.linear_constraints:
# special constraint check function here
if not self._is_matched_constraint(problem, constraint):
self._dst.linear_constraint(
constraint.linear.coefficients,
constraint.sense,
constraint.rhs,
constraint.name,
)
continue
conv_offset, conv_linear, conv_quadratic, varmap = self._conversion_table(constraint)
# constant part
offset += sense * penalty * conv_offset
# linear parts of penalty
for j, j_2 in varmap.items():
# if j already exists in the linear terms dic, add a penalty term
# into existing value else create new key and value in the linear_term dict
if conv_linear[j] != 0:
linear[j_2] = linear.get(j_2, 0.0) + sense * penalty * conv_linear[j]
# quadratic parts of penalty
for j, j_2 in varmap.items():
for k in range(j, len(varmap)):
# if j and k already exist in the quadratic terms dict,
# add a penalty term into existing value
# else create new key and value in the quadratic term dict
if conv_quadratic[j][k] != 0:
tup = (j_2, varmap[k])
quadratic[tup] = (
quadratic.get(tup, 0.0) + sense * penalty * conv_quadratic[j][k]
)
# Copy quadratic_constraints
for quadratic_constraint in problem.quadratic_constraints:
self._dst.quadratic_constraint(
quadratic_constraint.linear.coefficients,
quadratic_constraint.quadratic.coefficients,
quadratic_constraint.sense,
quadratic_constraint.rhs,
quadratic_constraint.name,
)
if problem.objective.sense == QuadraticObjective.Sense.MINIMIZE:
self._dst.minimize(offset, linear, quadratic)
else:
self._dst.maximize(offset, linear, quadratic)
# Update the penalty to the one just used
self._penalty = penalty
return self._dst
@staticmethod
def _conversion_table(
constraint,
) -> Tuple[int, np.ndarray, np.ndarray, Dict[int, int]]:
"""Construct conversion matrix for special constraint.
Returns:
Return conversion table which is used to construct
penalty term in main function.
Raises:
QiskitOptimizationError: if the constraint is invalid.
"""
vars_dict = constraint.linear.to_dict()
coeffs = list(vars_dict.values())
varmap = dict(enumerate(vars_dict.keys()))
rhs = constraint.rhs
sense = constraint.sense
num_vars = len(vars_dict)
# initialize return values, these are used for converted offset, linear
# and quadratic terms
offset = 0
linear = np.zeros(num_vars, dtype=int)
quadratic = np.zeros((num_vars, num_vars), dtype=int)
# rhs = num_vars - 1 correspond to multiple variable with >= n - 1 case.
if sense == ConstraintSense.GE and rhs == num_vars - 1:
# x_1 + ... + x_n >= n - 1
# The number of offset is combination ( nC2 )
offset = num_vars * (num_vars - 1) // 2
linear = np.full(num_vars, 1 - num_vars, dtype=int)
quadratic = np.triu(np.ones((num_vars, num_vars), dtype=int), k=1)
elif sense == ConstraintSense.LE and rhs == 1:
# x_1 + ... + x_n <= 1
quadratic = np.triu(np.ones((num_vars, num_vars), dtype=int), k=1)
elif rhs == 0:
if num_vars != 2:
raise QiskitOptimizationError(
f"Internal error: invalid number of variables {num_vars} {constraint.name}"
)
quadratic = np.array([[0, -1], [0, 0]])
if sense == ConstraintSense.GE:
# x >= y case
if coeffs[0] < 0.0:
linear[0] = 1
else:
linear[1] = 1
elif sense == ConstraintSense.LE:
# x <= y case
if coeffs[0] > 0.0:
linear[0] = 1
else:
linear[1] = 1
else:
raise QiskitOptimizationError(f"Internal error: invalid constraint {constraint.name}")
return offset, linear, quadratic, varmap
@staticmethod
def _is_matched_constraint(problem, constraint) -> bool:
"""Determine if constraint is special or not.
Returns:
True: when constraint is special
False: when constraint is not special
"""
params = constraint.linear.to_dict()
num_vars = len(params)
rhs = constraint.rhs
sense = constraint.sense
coeff_array = np.array(list(params.values()))
# Binary parameter?
if any(problem.variables[i].vartype != Variable.Type.BINARY for i in params.keys()):
return False
if num_vars == 2 and rhs == 0:
if sense in (Constraint.Sense.LE, Constraint.Sense.GE):
# x-y<=0
# x-y>=0
return coeff_array.min() == -1.0 and coeff_array.max() == 1.0
elif num_vars >= 2:
if sense == Constraint.Sense.LE and rhs == 1:
if all(i == 1 for i in params.values()):
# x1+x2+...<=1
return True
elif sense == Constraint.Sense.GE and rhs == num_vars - 1:
if all(i == 1 for i in params.values()):
# x1+x2+...>=n-1
return True
return False
@staticmethod
def _auto_define_penalty(problem) -> float:
"""Automatically define the penalty coefficient.
Returns:
Return the minimum valid penalty factor calculated
from the upper bound and the lower bound of the objective function.
If a constraint has a float coefficient,
return the default value for the penalty factor.
"""
default_penalty = 1e5
# Check coefficients of constraints.
# If a constraint has a float coefficient, return the default value for the penalty factor.
terms = []
for constraint in problem.linear_constraints:
terms.append(constraint.rhs)
terms.extend(constraint.linear.to_array().tolist())
if any(isinstance(term, float) and not term.is_integer() for term in terms):
logger.warning(
"Warning: Using %f for the penalty coefficient because "
"a float coefficient exists in constraints. \n"
"The value could be too small. "
"If so, set the penalty coefficient manually.",
default_penalty,
)
return default_penalty
lin_b = problem.objective.linear.bounds
quad_b = problem.objective.quadratic.bounds
return 1.0 + (lin_b.upperbound - lin_b.lowerbound) + (quad_b.upperbound - quad_b.lowerbound)
def interpret(self, x: Union[np.ndarray, List[float]]) -> np.ndarray:
"""Convert the result of the converted problem back to that of the original problem
Args:
x: The result of the converted problem or the given result in case of FAILURE.
Returns:
The result of the original problem.
Raises:
QiskitOptimizationError: if the number of variables in the result differs from
that of the original problem.
"""
if len(x) != self._src_num_vars:
raise QiskitOptimizationError(
f"The number of variables in the passed result ({len(x)}) differs from "
f"that of the original problem ({self._src_num_vars})."
)
return np.asarray(x)
@property
def penalty(self) -> Optional[float]:
"""Returns the penalty factor used in conversion.
Returns:
The penalty factor used in conversion.
"""
return self._penalty
@penalty.setter
def penalty(self, penalty: Optional[float]) -> None:
"""Set a new penalty factor.
Args:
penalty: The new penalty factor.
If None is passed, a penalty factor will be automatically calculated
on every conversion.
"""
self._penalty = penalty
self._should_define_penalty = penalty is None
| 38.683473 | 105 | 0.590949 | 12,753 | 0.923461 | 0 | 0 | 5,605 | 0.405865 | 0 | 0 | 6,123 | 0.443374 |
0bf0a581d088ce393c1930059e7c765bc7897ac7 | 581 | py | Python | PHASE_2/Application_SourceCode/backend/prediction_page.py | vicinx3/disease-outbreak | 035e78875c374e2cdbd4720a4f2ed1370f63a88c | [
"MIT"
] | null | null | null | PHASE_2/Application_SourceCode/backend/prediction_page.py | vicinx3/disease-outbreak | 035e78875c374e2cdbd4720a4f2ed1370f63a88c | [
"MIT"
] | null | null | null | PHASE_2/Application_SourceCode/backend/prediction_page.py | vicinx3/disease-outbreak | 035e78875c374e2cdbd4720a4f2ed1370f63a88c | [
"MIT"
] | null | null | null | from flask import Blueprint, abort, request, jsonify
from prediction_utils import *
prediction_page = Blueprint('prediction_page', __name__)
@prediction_page.route('/map', methods=['GET'])
def get_map_data():
offset = int(request.args['offset'])
country = request.args['country'] if 'country' in request.args else ''
disease = request.args['disease'] if 'disease' in request.args else ''
return jsonify(get_outbreaks_by_country(offset, country, disease))
@prediction_page.route('/table', methods=['GET'])
def get_table_data():
return jsonify(get_outbreaks()) | 38.733333 | 74 | 0.73494 | 0 | 0 | 0 | 0 | 436 | 0.75043 | 0 | 0 | 89 | 0.153184 |
0bf1df4821a7ee776dde7f0d63b87186e9b1ce95 | 209 | py | Python | djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/draugiem/urls.py | DemarcusL/django_wiki_lab | 3b7cf18af7e0f89c94d10eb953ca018a150a2f55 | [
"MIT"
] | 6,342 | 2015-01-01T07:40:30.000Z | 2022-03-31T04:18:30.000Z | djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/draugiem/urls.py | DemarcusL/django_wiki_lab | 3b7cf18af7e0f89c94d10eb953ca018a150a2f55 | [
"MIT"
] | 2,198 | 2015-01-02T15:17:45.000Z | 2022-03-28T10:20:43.000Z | djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/draugiem/urls.py | DemarcusL/django_wiki_lab | 3b7cf18af7e0f89c94d10eb953ca018a150a2f55 | [
"MIT"
] | 2,928 | 2015-01-01T10:44:13.000Z | 2022-03-31T03:20:16.000Z | from django.urls import path
from . import views
urlpatterns = [
path("draugiem/login/", views.login, name="draugiem_login"),
path("draugiem/callback/", views.callback, name="draugiem_callback"),
]
| 20.9 | 73 | 0.712919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.344498 |
0bf22a138d39df7cd40996f49fda96cf2eac2037 | 26,174 | py | Python | Python/venv/lib/python3.7/site-packages/IPython/core/inputtransformer2.py | HenriqueBuzin/TCC | 5fb9db42e97e28131bff97da3252a9ee33b3684e | [
"Unlicense"
] | 445 | 2019-01-26T13:50:26.000Z | 2022-03-18T05:17:38.000Z | Python/venv/lib/python3.7/site-packages/IPython/core/inputtransformer2.py | HenriqueBuzin/TCC | 5fb9db42e97e28131bff97da3252a9ee33b3684e | [
"Unlicense"
] | 242 | 2019-01-29T15:48:27.000Z | 2022-03-31T22:09:21.000Z | Python/venv/lib/python3.7/site-packages/IPython/core/inputtransformer2.py | HenriqueBuzin/TCC | 5fb9db42e97e28131bff97da3252a9ee33b3684e | [
"Unlicense"
] | 31 | 2019-03-10T09:51:27.000Z | 2022-02-14T23:11:12.000Z | """Input transformer machinery to support IPython special syntax.
This includes the machinery to recognise and transform ``%magic`` commands,
``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
deprecated in 7.0.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from codeop import compile_command
import re
import tokenize
from typing import List, Tuple, Union
import warnings
_indent_re = re.compile(r'^[ \t]+')
def leading_indent(lines):
"""Remove leading indentation.
If the first line starts with a spaces or tabs, the same whitespace will be
removed from each following line in the cell.
"""
if not lines:
return lines
m = _indent_re.match(lines[0])
if not m:
return lines
space = m.group(0)
n = len(space)
return [l[n:] if l.startswith(space) else l
for l in lines]
class PromptStripper:
"""Remove matching input prompts from a block of input.
Parameters
----------
prompt_re : regular expression
A regular expression matching any input prompt (including continuation,
e.g. ``...``)
initial_re : regular expression, optional
A regular expression matching only the initial prompt, but not continuation.
If no initial expression is given, prompt_re will be used everywhere.
Used mainly for plain Python prompts (``>>>``), where the continuation prompt
``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
If initial_re and prompt_re differ,
only initial_re will be tested against the first line.
If any prompt is found on the first two lines,
prompts will be stripped from the rest of the block.
"""
def __init__(self, prompt_re, initial_re=None):
self.prompt_re = prompt_re
self.initial_re = initial_re or prompt_re
def _strip(self, lines):
return [self.prompt_re.sub('', l, count=1) for l in lines]
def __call__(self, lines):
if not lines:
return lines
if self.initial_re.match(lines[0]) or \
(len(lines) > 1 and self.prompt_re.match(lines[1])):
return self._strip(lines)
return lines
classic_prompt = PromptStripper(
prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
initial_re=re.compile(r'^>>>( |$)')
)
ipython_prompt = PromptStripper(re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)'))
def cell_magic(lines):
if not lines or not lines[0].startswith('%%'):
return lines
if re.match(r'%%\w+\?', lines[0]):
# This case will be handled by help_end
return lines
magic_name, _, first_line = lines[0][2:-1].partition(' ')
body = ''.join(lines[1:])
return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
% (magic_name, first_line, body)]
def _find_assign_op(token_line) -> Union[int, None]:
"""Get the index of the first assignment in the line ('=' not inside brackets)
Note: We don't try to support multiple special assignment (a = b = %foo)
"""
paren_level = 0
for i, ti in enumerate(token_line):
s = ti.string
if s == '=' and paren_level == 0:
return i
if s in {'(','[','{'}:
paren_level += 1
elif s in {')', ']', '}'}:
if paren_level > 0:
paren_level -= 1
def find_end_of_continued_line(lines, start_line: int):
"""Find the last line of a line explicitly extended using backslashes.
Uses 0-indexed line numbers.
"""
end_line = start_line
while lines[end_line].endswith('\\\n'):
end_line += 1
if end_line >= len(lines):
break
return end_line
def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
r"""Assemble a single line from multiple continued line pieces
Continued lines are lines ending in ``\``, and the line following the last
``\`` in the block.
For example, this code continues over multiple lines::
if (assign_ix is not None) \
and (len(line) >= assign_ix + 2) \
and (line[assign_ix+1].string == '%') \
and (line[assign_ix+2].type == tokenize.NAME):
This statement contains four continued line pieces.
Assembling these pieces into a single line would give::
if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
This uses 0-indexed line numbers. *start* is (lineno, colno).
Used to allow ``%magic`` and ``!system`` commands to be continued over
multiple lines.
"""
parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
return ' '.join([p[:-2] for p in parts[:-1]] # Strip backslash+newline
+ [parts[-1][:-1]]) # Strip newline from last line
class TokenTransformBase:
"""Base class for transformations which examine tokens.
Special syntax should not be transformed when it occurs inside strings or
comments. This is hard to reliably avoid with regexes. The solution is to
tokenise the code as Python, and recognise the special syntax in the tokens.
IPython's special syntax is not valid Python syntax, so tokenising may go
wrong after the special syntax starts. These classes therefore find and
transform *one* instance of special syntax at a time into regular Python
syntax. After each transformation, tokens are regenerated to find the next
piece of special syntax.
Subclasses need to implement one class method (find)
and one regular method (transform).
The priority attribute can select which transformation to apply if multiple
transformers match in the same place. Lower numbers have higher priority.
This allows "%magic?" to be turned into a help call rather than a magic call.
"""
# Lower numbers -> higher priority (for matches in the same location)
priority = 10
def sortby(self):
return self.start_line, self.start_col, self.priority
def __init__(self, start):
self.start_line = start[0] - 1 # Shift from 1-index to 0-index
self.start_col = start[1]
@classmethod
def find(cls, tokens_by_line):
"""Find one instance of special syntax in the provided tokens.
Tokens are grouped into logical lines for convenience,
so it is easy to e.g. look at the first token of each line.
*tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
This should return an instance of its class, pointing to the start
position it has found, or None if it found no match.
"""
raise NotImplementedError
def transform(self, lines: List[str]):
"""Transform one instance of special syntax found by ``find()``
Takes a list of strings representing physical lines,
returns a similar list of transformed lines.
"""
raise NotImplementedError
class MagicAssign(TokenTransformBase):
"""Transformer for assignments from magics (a = %foo)"""
@classmethod
def find(cls, tokens_by_line):
"""Find the first magic assignment (a = %foo) in the cell.
"""
for line in tokens_by_line:
assign_ix = _find_assign_op(line)
if (assign_ix is not None) \
and (len(line) >= assign_ix + 2) \
and (line[assign_ix+1].string == '%') \
and (line[assign_ix+2].type == tokenize.NAME):
return cls(line[assign_ix+1].start)
def transform(self, lines: List[str]):
"""Transform a magic assignment found by the ``find()`` classmethod.
"""
start_line, start_col = self.start_line, self.start_col
lhs = lines[start_line][:start_col]
end_line = find_end_of_continued_line(lines, start_line)
rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
assert rhs.startswith('%'), rhs
magic_name, _, args = rhs[1:].partition(' ')
lines_before = lines[:start_line]
call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
new_line = lhs + call + '\n'
lines_after = lines[end_line+1:]
return lines_before + [new_line] + lines_after
class SystemAssign(TokenTransformBase):
"""Transformer for assignments from system commands (a = !foo)"""
@classmethod
def find(cls, tokens_by_line):
"""Find the first system assignment (a = !foo) in the cell.
"""
for line in tokens_by_line:
assign_ix = _find_assign_op(line)
if (assign_ix is not None) \
and not line[assign_ix].line.strip().startswith('=') \
and (len(line) >= assign_ix + 2) \
and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
ix = assign_ix + 1
while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
if line[ix].string == '!':
return cls(line[ix].start)
elif not line[ix].string.isspace():
break
ix += 1
def transform(self, lines: List[str]):
"""Transform a system assignment found by the ``find()`` classmethod.
"""
start_line, start_col = self.start_line, self.start_col
lhs = lines[start_line][:start_col]
end_line = find_end_of_continued_line(lines, start_line)
rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
assert rhs.startswith('!'), rhs
cmd = rhs[1:]
lines_before = lines[:start_line]
call = "get_ipython().getoutput({!r})".format(cmd)
new_line = lhs + call + '\n'
lines_after = lines[end_line + 1:]
return lines_before + [new_line] + lines_after
# The escape sequences that define the syntax transformations IPython will
# apply to user input. These can NOT be just changed here: many regular
# expressions and other parts of the code may use their hardcoded values, and
# for all intents and purposes they constitute the 'IPython syntax', so they
# should be considered fixed.
ESC_SHELL = '!' # Send line to underlying system shell
ESC_SH_CAP = '!!' # Send line to system shell and capture output
ESC_HELP = '?' # Find information about object
ESC_HELP2 = '??' # Find extra-detailed information about object
ESC_MAGIC = '%' # Call magic function
ESC_MAGIC2 = '%%' # Call cell-magic function
ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
ESC_QUOTE2 = ';' # Quote all args as a single string, call
ESC_PAREN = '/' # Call first argument with rest of line as arguments
ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
def _make_help_call(target, esc, next_input=None):
"""Prepares a pinfo(2)/psearch call from a target name and the escape
(i.e. ? or ??)"""
method = 'pinfo2' if esc == '??' \
else 'psearch' if '*' in target \
else 'pinfo'
arg = " ".join([method, target])
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = arg.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
if next_input is None:
return 'get_ipython().run_line_magic(%r, %r)' % (t_magic_name, t_magic_arg_s)
else:
return 'get_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
(next_input, t_magic_name, t_magic_arg_s)
def _tr_help(content):
"""Translate lines escaped with: ?
A naked help line should fire the intro help screen (shell.show_usage())
"""
if not content:
return 'get_ipython().show_usage()'
return _make_help_call(content, '?')
def _tr_help2(content):
"""Translate lines escaped with: ??
A naked help line should fire the intro help screen (shell.show_usage())
"""
if not content:
return 'get_ipython().show_usage()'
return _make_help_call(content, '??')
def _tr_magic(content):
"Translate lines escaped with a percent sign: %"
name, _, args = content.partition(' ')
return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
def _tr_quote(content):
"Translate lines escaped with a comma: ,"
name, _, args = content.partition(' ')
return '%s("%s")' % (name, '", "'.join(args.split()) )
def _tr_quote2(content):
"Translate lines escaped with a semicolon: ;"
name, _, args = content.partition(' ')
return '%s("%s")' % (name, args)
def _tr_paren(content):
"Translate lines escaped with a slash: /"
name, _, args = content.partition(' ')
return '%s(%s)' % (name, ", ".join(args.split()))
tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
ESC_HELP : _tr_help,
ESC_HELP2 : _tr_help2,
ESC_MAGIC : _tr_magic,
ESC_QUOTE : _tr_quote,
ESC_QUOTE2 : _tr_quote2,
ESC_PAREN : _tr_paren }
class EscapedCommand(TokenTransformBase):
"""Transformer for escaped commands like %foo, !foo, or /foo"""
@classmethod
def find(cls, tokens_by_line):
"""Find the first escaped command (%foo, !foo, etc.) in the cell.
"""
for line in tokens_by_line:
if not line:
continue
ix = 0
ll = len(line)
while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
ix += 1
if ix >= ll:
continue
if line[ix].string in ESCAPE_SINGLES:
return cls(line[ix].start)
def transform(self, lines):
"""Transform an escaped line found by the ``find()`` classmethod.
"""
start_line, start_col = self.start_line, self.start_col
indent = lines[start_line][:start_col]
end_line = find_end_of_continued_line(lines, start_line)
line = assemble_continued_line(lines, (start_line, start_col), end_line)
if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
escape, content = line[:2], line[2:]
else:
escape, content = line[:1], line[1:]
if escape in tr:
call = tr[escape](content)
else:
call = ''
lines_before = lines[:start_line]
new_line = indent + call + '\n'
lines_after = lines[end_line + 1:]
return lines_before + [new_line] + lines_after
_help_end_re = re.compile(r"""(%{0,2}
[a-zA-Z_*][\w*]* # Variable name
(\.[a-zA-Z_*][\w*]*)* # .etc.etc
)
(\?\??)$ # ? or ??
""",
re.VERBOSE)
class HelpEnd(TokenTransformBase):
"""Transformer for help syntax: obj? and obj??"""
# This needs to be higher priority (lower number) than EscapedCommand so
# that inspecting magics (%foo?) works.
priority = 5
def __init__(self, start, q_locn):
super().__init__(start)
self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
self.q_col = q_locn[1]
@classmethod
def find(cls, tokens_by_line):
"""Find the first help command (foo?) in the cell.
"""
for line in tokens_by_line:
# Last token is NEWLINE; look at last but one
if len(line) > 2 and line[-2].string == '?':
# Find the first token that's not INDENT/DEDENT
ix = 0
while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
ix += 1
return cls(line[ix].start, line[-2].start)
def transform(self, lines):
"""Transform a help command found by the ``find()`` classmethod.
"""
piece = ''.join(lines[self.start_line:self.q_line+1])
indent, content = piece[:self.start_col], piece[self.start_col:]
lines_before = lines[:self.start_line]
lines_after = lines[self.q_line + 1:]
m = _help_end_re.search(content)
if not m:
raise SyntaxError(content)
assert m is not None, content
target = m.group(1)
esc = m.group(3)
# If we're mid-command, put it back on the next prompt for the user.
next_input = None
if (not lines_before) and (not lines_after) \
and content.strip() != m.group(0):
next_input = content.rstrip('?\n')
call = _make_help_call(target, esc, next_input=next_input)
new_line = indent + call + '\n'
return lines_before + [new_line] + lines_after
def make_tokens_by_line(lines:List[str]):
"""Tokenize a series of lines and group tokens by line.
The tokens for a multiline Python string or expression are grouped as one
line. All lines except the last lines should keep their line ending ('\\n',
'\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
for example when passing block of text to this function.
"""
# NL tokens are used inside multiline expressions, but also after blank
# lines or comments. This is intentional - see https://bugs.python.org/issue17061
# We want to group the former case together but split the latter, so we
# track parentheses level, similar to the internals of tokenize.
NEWLINE, NL = tokenize.NEWLINE, tokenize.NL
tokens_by_line = [[]]
if len(lines) > 1 and not lines[0].endswith(('\n', '\r', '\r\n', '\x0b', '\x0c')):
warnings.warn("`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified")
parenlev = 0
try:
for token in tokenize.generate_tokens(iter(lines).__next__):
tokens_by_line[-1].append(token)
if (token.type == NEWLINE) \
or ((token.type == NL) and (parenlev <= 0)):
tokens_by_line.append([])
elif token.string in {'(', '[', '{'}:
parenlev += 1
elif token.string in {')', ']', '}'}:
if parenlev > 0:
parenlev -= 1
except tokenize.TokenError:
# Input ended in a multiline string or expression. That's OK for us.
pass
if not tokens_by_line[-1]:
tokens_by_line.pop()
return tokens_by_line
def show_linewise_tokens(s: str):
"""For investigation and debugging"""
if not s.endswith('\n'):
s += '\n'
lines = s.splitlines(keepends=True)
for line in make_tokens_by_line(lines):
print("Line -------")
for tokinfo in line:
print(" ", tokinfo)
# Arbitrary limit to prevent getting stuck in infinite loops
TRANSFORM_LOOP_LIMIT = 500
class TransformerManager:
"""Applies various transformations to a cell or code block.
The key methods for external use are ``transform_cell()``
and ``check_complete()``.
"""
def __init__(self):
self.cleanup_transforms = [
leading_indent,
classic_prompt,
ipython_prompt,
]
self.line_transforms = [
cell_magic,
]
self.token_transformers = [
MagicAssign,
SystemAssign,
EscapedCommand,
HelpEnd,
]
def do_one_token_transform(self, lines):
"""Find and run the transform earliest in the code.
Returns (changed, lines).
This method is called repeatedly until changed is False, indicating
that all available transformations are complete.
The tokens following IPython special syntax might not be valid, so
the transformed code is retokenised every time to identify the next
piece of special syntax. Hopefully long code cells are mostly valid
Python, not using lots of IPython special syntax, so this shouldn't be
a performance issue.
"""
tokens_by_line = make_tokens_by_line(lines)
candidates = []
for transformer_cls in self.token_transformers:
transformer = transformer_cls.find(tokens_by_line)
if transformer:
candidates.append(transformer)
if not candidates:
# Nothing to transform
return False, lines
ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
for transformer in ordered_transformers:
try:
return True, transformer.transform(lines)
except SyntaxError:
pass
return False, lines
def do_token_transforms(self, lines):
for _ in range(TRANSFORM_LOOP_LIMIT):
changed, lines = self.do_one_token_transform(lines)
if not changed:
return lines
raise RuntimeError("Input transformation still changing after "
"%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
def transform_cell(self, cell: str) -> str:
"""Transforms a cell of input code"""
if not cell.endswith('\n'):
cell += '\n' # Ensure the cell has a trailing newline
lines = cell.splitlines(keepends=True)
for transform in self.cleanup_transforms + self.line_transforms:
lines = transform(lines)
lines = self.do_token_transforms(lines)
return ''.join(lines)
def check_complete(self, cell: str):
"""Return whether a block of code is ready to execute, or should be continued
Parameters
----------
source : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent_spaces : int or None
The number of spaces by which to indent the next line of code. If
status is not 'incomplete', this is None.
"""
# Remember if the lines ends in a new line.
ends_with_newline = False
for character in reversed(cell):
if character == '\n':
ends_with_newline = True
break
elif character.strip():
break
else:
continue
if not ends_with_newline:
# Append an newline for consistent tokenization
# See https://bugs.python.org/issue33899
cell += '\n'
lines = cell.splitlines(keepends=True)
if not lines:
return 'complete', None
if lines[-1].endswith('\\'):
# Explicit backslash continuation
return 'incomplete', find_last_indent(lines)
try:
for transform in self.cleanup_transforms:
lines = transform(lines)
except SyntaxError:
return 'invalid', None
if lines[0].startswith('%%'):
# Special case for cell magics - completion marked by blank line
if lines[-1].strip():
return 'incomplete', find_last_indent(lines)
else:
return 'complete', None
try:
for transform in self.line_transforms:
lines = transform(lines)
lines = self.do_token_transforms(lines)
except SyntaxError:
return 'invalid', None
tokens_by_line = make_tokens_by_line(lines)
if not tokens_by_line:
return 'incomplete', find_last_indent(lines)
if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
# We're in a multiline string or expression
return 'incomplete', find_last_indent(lines)
newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER}
# Pop the last line which only contains DEDENTs and ENDMARKER
last_token_line = None
if {t.type for t in tokens_by_line[-1]} in [
{tokenize.DEDENT, tokenize.ENDMARKER},
{tokenize.ENDMARKER}
] and len(tokens_by_line) > 1:
last_token_line = tokens_by_line.pop()
while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
tokens_by_line[-1].pop()
if len(tokens_by_line) == 1 and not tokens_by_line[-1]:
return 'incomplete', 0
if tokens_by_line[-1][-1].string == ':':
# The last line starts a block (e.g. 'if foo:')
ix = 0
while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
ix += 1
indent = tokens_by_line[-1][ix].start[1]
return 'incomplete', indent + 4
if tokens_by_line[-1][0].line.endswith('\\'):
return 'incomplete', None
# At this point, our checks think the code is complete (or invalid).
# We'll use codeop.compile_command to check this with the real parser
try:
with warnings.catch_warnings():
warnings.simplefilter('error', SyntaxWarning)
res = compile_command(''.join(lines), symbol='exec')
except (SyntaxError, OverflowError, ValueError, TypeError,
MemoryError, SyntaxWarning):
return 'invalid', None
else:
if res is None:
return 'incomplete', find_last_indent(lines)
if last_token_line and last_token_line[0].type == tokenize.DEDENT:
if ends_with_newline:
return 'complete', None
return 'incomplete', find_last_indent(lines)
# If there's a blank line at the end, assume we're ready to execute
if not lines[-1].strip():
return 'complete', None
return 'complete', None
def find_last_indent(lines):
m = _indent_re.match(lines[-1])
if not m:
return 0
return len(m.group(0).replace('\t', ' '*4))
| 36.968927 | 181 | 0.604302 | 16,676 | 0.637121 | 0 | 0 | 2,806 | 0.107206 | 0 | 0 | 10,963 | 0.418851 |
0bf2a45e6eafd2d17de3e49288033fffcc462838 | 6,806 | py | Python | run_metrics.py | bradyneal/realcause | 7411cc26f10d750aa011b25f2a46cb572d8b8540 | [
"MIT"
] | 35 | 2021-03-30T12:30:53.000Z | 2022-03-05T10:07:24.000Z | run_metrics.py | bradyneal/realcause | 7411cc26f10d750aa011b25f2a46cb572d8b8540 | [
"MIT"
] | null | null | null | run_metrics.py | bradyneal/realcause | 7411cc26f10d750aa011b25f2a46cb572d8b8540 | [
"MIT"
] | 1 | 2021-04-01T06:31:14.000Z | 2021-04-01T06:31:14.000Z | from pathlib import Path
import os
import zipfile
import json
from addict import Dict
from train_generator import get_data
from loading import load_gen
import numpy as np
from collections import OrderedDict
from tqdm import tqdm
def get_univariate_results(model, num_tests=100, verbose=False, n=None):
all_runs = list()
t_ks_pvals = list()
y_ks_pvals = list()
y_es_pvals = list()
t_es_pvals = list()
for _ in tqdm(range(num_tests)):
uni_metrics = model.get_univariate_quant_metrics(
dataset="test", verbose=verbose, n=n
)
all_runs.append(uni_metrics)
t_ks_pvals.append(uni_metrics["t_ks_pval"])
y_ks_pvals.append(uni_metrics["y_ks_pval"])
y_es_pvals.append(uni_metrics["y_es_pval"])
t_es_pvals.append(uni_metrics["t_es_pval"])
summary = OrderedDict()
summary.update(avg_t_ks_pval=sum(t_ks_pvals) / num_tests)
summary.update(avg_y_ks_pval=sum(y_ks_pvals) / num_tests)
summary.update(avg_t_es_pval=sum(t_es_pvals) / num_tests)
summary.update(avg_y_es_pval=sum(y_es_pvals) / num_tests)
return summary
def get_multivariate_results(model, include_w, num_tests=100, n=1000):
# wasserstein1 pval', 'wasserstein2 pval', 'Friedman-Rafsky pval', 'kNN pval', 'Energy pval'
w1_pval = list()
w2_pval = list()
fr_pval = list()
knn_pval = list()
energy_pval = list()
for _ in tqdm(range(num_tests)):
multi_metrics = model.get_multivariate_quant_metrics(
dataset="test", n=n, include_w=include_w
)
w1_pval.append(multi_metrics["wasserstein1 pval"])
w2_pval.append(multi_metrics["wasserstein2 pval"])
fr_pval.append(multi_metrics["Friedman-Rafsky pval"])
knn_pval.append(multi_metrics["kNN pval"])
energy_pval.append(multi_metrics["Energy pval"])
summary = OrderedDict()
summary.update(avg_w1_pval=sum(w1_pval) / num_tests)
summary.update(avg_w2_pval=sum(w2_pval) / num_tests)
summary.update(avg_fr_pval=sum(fr_pval) / num_tests)
summary.update(avg_knn_pval=sum(knn_pval) / num_tests)
summary.update(avg_energy_pval=sum(energy_pval) / num_tests)
return summary
def evaluate_directory(
checkpoint_dir="./GenModelCkpts",
# checkpoint_dir="./LinearModelCkpts",
data_filter=None,
num_tests=100,
n_uni=None,
n_multi=1000,
include_w=True,
results_dir="./results",
):
checkpoint_dir = Path(checkpoint_dir).resolve()
results_dir = Path(results_dir)
results_dir.mkdir(exist_ok=True, parents=True)
dataset_roots = [Path(i) for i in os.listdir(checkpoint_dir)]
results = {}
# For each overall dataset (LBIDD, lalonde, etc.)
for root in dataset_roots:
subdatasets = os.listdir(checkpoint_dir / root)
if data_filter is not None:
if data_filter not in str(root):
continue
if "1k" in str(root):
continue
# For each subdataset (psid1, cps1, etc.)
for subdata in subdatasets:
subdata_path = checkpoint_dir / root / subdata
# Check if unzipping is necessary
if (
len(os.listdir(subdata_path)) == 1
and ".zip" in os.listdir(subdata_path)[0]
):
zip_name = os.listdir(subdata_path)[0]
zip_path = subdata_path / zip_name
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(subdata_path)
subfolders = [f.path for f in os.scandir(subdata_path) if f.is_dir()]
assert len(subfolders) == 1
model_folder = subdata_path / Path(subfolders[0])
with open(model_folder / "args.txt") as f:
args = Dict(json.load(f))
args.saveroot = model_folder
args.dataroot = "./datasets/"
args.comet = False
ites, ate, w, t, y = get_data(args)
# Now load model
model, args = load_gen(saveroot=str(args.saveroot), dataroot="./datasets")
# TODO: compare the pipeline of noisy_ate() to ite() too see what's different
if ate is not None:
t0 = np.zeros((t.shape[0], 1))
t1 = np.ones((t.shape[0], 1))
print("computing ate...", end="\r", flush=True)
noisy_ate = model.noisy_ate(w=w, t1=t1, t0=t0, transform_w=True)
else:
noisy_ate = None
if ites is not None:
print("computing ite estimate...", end="\r", flush=True)
ite_est = model.ite(w=w, noisy=True)
pehe = np.sqrt(np.median(np.square(ites - ite_est)))
else:
ite_est = None
pehe = None
print("computing uni metrics...", end="\r", flush=True)
uni_summary = get_univariate_results(model, num_tests=num_tests, n=n_uni)
print("computing multi metrics include_w=True...", end="\r", flush=True)
multi_summary_w = get_multivariate_results(
model, num_tests=num_tests, n=n_multi, include_w=True
)
print("computing multi metrics include_w=False...", end="\r", flush=True)
multi_summary_no_w = get_multivariate_results(
model, num_tests=num_tests, n=n_multi, include_w=False
)
if args.test_size is None:
total = args.train_prop + args.val_prop + args.test_prop
n_total = y.shape[0]
n_train = round(n_total * args.train_prop / total)
n_val = round(n_total * args.val_prop / total)
n_test = n_total - n_train - n_val
else:
n_test = args.test_size
subdict = {}
subdict["univariate_test_size"] = n_uni if n_uni is not None else n_test
subdict["multivariate_test_size"] = n_multi
subdict["pehe"] = pehe
subdict["ate"] = ate
subdict["ate_est"] = noisy_ate
subdict["univariate_metrics"] = uni_summary
subdict["multivariate_metrics_w"] = multi_summary_w
subdict["multivariate_metrics_no_w"] = multi_summary_no_w
results[str(root) + "_" + str(subdata)] = subdict
if data_filter is not None:
with open(
results_dir / (data_filter + "_results.json"), "w"
) as fp:
json.dump(results, fp, indent=4)
else:
with open(results_dir / "results.json", "w") as fp:
json.dump(results, fp, indent=4)
if __name__ == "__main__":
evaluate_directory(data_filter='lalonde', num_tests=1, n_uni=None, n_multi=200)
| 35.821053 | 96 | 0.603438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 933 | 0.137085 |
0bf32d2a4cd637f3e9b811ce4815b37e0ae0c3d5 | 619 | py | Python | Getting-Started-with-Python/pay_calculator.py | michellevrp/Python_Course | ac2a353fa828290d9923665fbdb1e742bea9931e | [
"CC0-1.0"
] | null | null | null | Getting-Started-with-Python/pay_calculator.py | michellevrp/Python_Course | ac2a353fa828290d9923665fbdb1e742bea9931e | [
"CC0-1.0"
] | null | null | null | Getting-Started-with-Python/pay_calculator.py | michellevrp/Python_Course | ac2a353fa828290d9923665fbdb1e742bea9931e | [
"CC0-1.0"
] | null | null | null | #first exercise
#This code asks the user for hours and rate for hour, calculate total pay and print it.
hrs = input("Enter Hours:")
rate = input("Enter Rate:")
pay = float(hrs) * float(rate)
print("Pay:", pay)
#second exercise
#This code asks the user for hours and rate for hour, calculate total pay and print it.
#If more than 40 hours, the rate is 1.5 the initial rate.
hrs = input("Enter Hours:")
rate = input("Enter Rate:")
try:
h = float(hrs)
r = float(rate)
except:
print("Insert numbers")
if h>40:
p = 40 * r + (((h-40)*1.5)*r)
else:
p = h * r
p= float(p)
print(p)
| 25.791667 | 89 | 0.628433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.557351 |
0bf3c2faad0821a872957b34e95b418c7cf6cb30 | 5,419 | py | Python | vcx/wrappers/python3/vcx/api/disclosed_proof.py | esplinr/evernym-sdk | f7d99da59fb436860c8469f0685e0d4e27139855 | [
"Apache-2.0"
] | null | null | null | vcx/wrappers/python3/vcx/api/disclosed_proof.py | esplinr/evernym-sdk | f7d99da59fb436860c8469f0685e0d4e27139855 | [
"Apache-2.0"
] | null | null | null | vcx/wrappers/python3/vcx/api/disclosed_proof.py | esplinr/evernym-sdk | f7d99da59fb436860c8469f0685e0d4e27139855 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
from ctypes import *
from vcx.common import do_call, create_cb
from vcx.api.connection import Connection
from vcx.api.vcx_stateful import VcxStateful
import json
class DisclosedProof(VcxStateful):
def __init__(self, source_id: str):
VcxStateful.__init__(self, source_id)
self._name = source_id
self._proof_req = None
def __del__(self):
self.release()
self.logger.debug("Deleted {} obj: {}".format(DisclosedProof, self.handle))
@property
def proof_request(self):
return self._proof_req
@proof_request.setter
def proof_request(self, x):
self._proof_req = x
@staticmethod
async def create(source_id: str, proof_request: str):
constructor_params = (source_id,)
c_source_id = c_char_p(source_id.encode('utf-8'))
c_proof_request = c_char_p(json.dumps(proof_request).encode('utf-8'))
c_params = (c_source_id, c_proof_request, )
return await DisclosedProof._create("vcx_disclosed_proof_create_with_request",
constructor_params,
c_params)
@staticmethod
async def create_with_msgid(source_id: str, connection: Connection, msg_id: str):
proof = DisclosedProof(source_id)
c_source_id = c_char_p(source_id.encode('utf-8'))
c_msg_id = c_char_p(json.dumps(msg_id).encode('utf-8'))
c_connection_handle = c_uint32(connection.handle)
if not hasattr(DisclosedProof.create_with_msgid, "cb"):
DisclosedProof.create_with_msgid.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_uint32, c_char_p))
proof.handle, proof_req = await do_call('vcx_disclosed_proof_create_with_msgid',
c_source_id,
c_connection_handle,
c_msg_id,
DisclosedProof.create_with_msgid.cb)
proof.proof_request = json.loads(proof_req.decode())
return proof
@staticmethod
async def deserialize(data: dict):
disclosed_proof = await DisclosedProof._deserialize("vcx_disclosed_proof_deserialize",
json.dumps(data),
data.get('source_id'))
return disclosed_proof
@staticmethod
async def get_requests(connection: Connection) -> dict:
if not hasattr(DisclosedProof.get_requests, "cb"):
DisclosedProof.get_requests.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_connection_handle = c_uint32(connection.handle)
data = await do_call('vcx_disclosed_proof_get_requests',
c_connection_handle,
DisclosedProof.get_requests.cb)
return json.loads(data.decode())
async def serialize(self) -> dict:
return await self._serialize(DisclosedProof, 'vcx_disclosed_proof_serialize')
async def update_state(self) -> int:
return await self._update_state(DisclosedProof, 'vcx_disclosed_proof_update_state')
async def get_state(self) -> int:
return await self._get_state(DisclosedProof, 'vcx_disclosed_proof_get_state')
def release(self) -> None:
self._release(DisclosedProof, 'vcx_disclosed_proof_release')
async def get_creds(self) -> dict:
if not hasattr(DisclosedProof.get_creds, "cb"):
self.logger.debug("vcx_disclosed_proof_retrieve_credentials: Creating callback")
DisclosedProof.send_proof.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_disclosed_proof_handle = c_uint32(self.handle)
data = await do_call('vcx_disclosed_proof_retrieve_credentials',
c_disclosed_proof_handle,
DisclosedProof.send_proof.cb)
return json.loads(data.decode())
async def send_proof(self, connection: Connection):
if not hasattr(DisclosedProof.send_proof, "cb"):
self.logger.debug("vcx_disclosed_proof_send_proof: Creating callback")
DisclosedProof.send_proof.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32))
c_disclosed_proof_handle = c_uint32(self.handle)
c_connection_handle = c_uint32(connection.handle)
await do_call('vcx_disclosed_proof_send_proof',
c_disclosed_proof_handle,
c_connection_handle,
DisclosedProof.send_proof.cb)
async def generate_proof(self, selected_creds: dict, self_attested_attrs: dict):
if not hasattr(DisclosedProof.send_proof, "cb"):
self.logger.debug("vcx_disclosed_proof_generate_proof: Creating callback")
DisclosedProof.send_proof.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32))
c_disclosed_proof_handle = c_uint32(self.handle)
c_selected_creds = c_char_p(json.dumps(selected_creds).encode('utf-8'))
c_self_attested_attrs = c_char_p(json.dumps(self_attested_attrs).encode('utf-8'))
await do_call('vcx_disclosed_proof_generate_proof',
c_disclosed_proof_handle,
c_selected_creds,
c_self_attested_attrs,
DisclosedProof.send_proof.cb)
| 40.744361 | 116 | 0.644768 | 5,225 | 0.9642 | 0 | 0 | 2,416 | 0.445839 | 4,516 | 0.833364 | 642 | 0.118472 |
0bf431d2785411629685e804d55562f39393e330 | 3,442 | py | Python | pingdom/pingdom.py | srinivas-kandula/integrations | 7c9857fadacc845b3261ba5c1244faccd6511c80 | [
"Apache-2.0"
] | 10 | 2016-09-23T21:09:55.000Z | 2022-02-05T08:12:43.000Z | pingdom/pingdom.py | srinivas-kandula/integrations | 7c9857fadacc845b3261ba5c1244faccd6511c80 | [
"Apache-2.0"
] | 20 | 2017-06-29T00:20:56.000Z | 2022-03-24T07:32:15.000Z | pingdom/pingdom.py | srinivas-kandula/integrations | 7c9857fadacc845b3261ba5c1244faccd6511c80 | [
"Apache-2.0"
] | 39 | 2016-11-22T17:19:44.000Z | 2022-03-24T07:09:42.000Z | import sys
import requests
import json
import argparse
import time
parser = argparse.ArgumentParser(description='Collects monitoring data from Pingdom.')
parser.add_argument('-u', '--pingdom-user-name', help='The Pingdom User Name', required=True)
parser.add_argument('-p', '--pingdom-password', help='The Pingdom Password', required=True)
parser.add_argument('-a', '--pingdom-api-key', help='The Pingdom API-KEY', required=True)
class Pingdom:
def __init__(self, api_key, user_name, password):
self.api_key = api_key,
self.user_name = user_name,
self.password = password,
self.jsonData = []
def handle_error(self, error_message):
sys.stderr.write("ERROR:|Pingdom| " + error_message)
sys.exit(1)
def call_api(self, api):
headers = {'App-Key': self.api_key[0]}
base_api = 'https://api.pingdom.com/api/2.0/' + api
response = requests.get(base_api, headers=headers, auth=requests.auth.HTTPBasicAuth(self.user_name[0], self.password[0]))
if response.status_code == 200:
return response.json()
else:
self.handle_error("API [" + base_api + "] failed to execute with error code [" + str(response.status_code) + "].")
def get_checks(self):
response = self.call_api('checks')
data = response.get("checks")
counts = response.get("counts")
up_count = 0
down_count = 0
unconfirmed_down_count = 0
unknown_count = 0
paused_count = 0
for x in data:
status = x.get("status")
if status == "up":
up_count = up_count + 1
elif status == "down":
down_count == down_count + 1
elif status == "unconfirmed_down":
unconfirmed_down_count = unconfirmed_down_count + 1
elif status == "unknown":
unknown_count = unknown_count + 1
elif status == "paused":
paused_count = paused_count + 1
counts["up"] = up_count
counts["down"] = down_count
counts["unconfirmed_down"] = unconfirmed_down_count
counts["unknown"] = unknown_count
counts["paused"] = paused_count
data.append(counts)
self.jsonData = data
def get_credits(self):
response = self.call_api('credits')
self.jsonData.append(response)
def get_maintenance(self):
response = self.call_api('maintenance')
if response.get('maintenance'):
for mw in response.get('maintenance'):
window = {}
window["description"] = mw.get("description")
window["recurrencetype"] = mw.get("recurrencetype")
window["repeatevery"] = mw.get("repeatevery")
window["from"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(mw.get("from")))
window["to"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(mw.get("to")))
window["window"] = 1
self.jsonData.append(window)
if __name__ == "__main__":
try:
args = parser.parse_args()
pingdom = Pingdom(args.pingdom_api_key, args.pingdom_user_name, args.pingdom_password)
pingdom.get_checks()
pingdom.get_credits()
pingdom.get_maintenance()
print(json.dumps(pingdom.jsonData))
except Exception as e:
pingdom.handle_error(e.message)
| 35.484536 | 129 | 0.597908 | 2,637 | 0.766124 | 0 | 0 | 0 | 0 | 0 | 0 | 619 | 0.179837 |
0bf55cef9243ecacb2a401a9c42afa57b35ecfc2 | 10,224 | py | Python | OpenData/Upsilon/Upsilon.py | tylern4/tylern4.github.io | 77d8b5fecd91d2b884ae54e2fb3d59c521e02b8b | [
"MIT"
] | null | null | null | OpenData/Upsilon/Upsilon.py | tylern4/tylern4.github.io | 77d8b5fecd91d2b884ae54e2fb3d59c521e02b8b | [
"MIT"
] | null | null | null | OpenData/Upsilon/Upsilon.py | tylern4/tylern4.github.io | 77d8b5fecd91d2b884ae54e2fb3d59c521e02b8b | [
"MIT"
] | 1 | 2018-07-29T15:46:03.000Z | 2018-07-29T15:46:03.000Z | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy import stats
from pandas.tools.plotting import scatter_matrix
from scipy.optimize import curve_fit
from matplotlib.colors import LogNorm
df = pd.read_csv('/Users/tylern/Homework/PHYS723/project/LHC/CMS_data/MuRun.csv')
#Make sure events are neutral
#if first event is positive and the second is negative
#or the second is positive and the first is negative
df1 = df[df.Q1 == 1]
df1 = df1[df1.Q2 == -1]
df2 = df[df.Q1 == -1]
df2 = df2[df2.Q2 == 1]
frames = [df1, df2]
df = pd.concat(frames)
df = df[df.Type1 == 'G']
df = df[df.Type2 == 'G']
#df = df[np.sqrt(df.px1**2 + df.py1**2) + np.sqrt(df.px2**2 + df.py2**2) < 50]
mass_Up = 9.45
def poly(x, c1, c2, c3, c4):
return c1*x*x*x + c2*x*x + c3*x + c4
def big_poly(x, c1, c2, c3, c4, c5, c6, c7, c8):
return c8*x**7 + c7*x**6 + c6*x**5 + c5*x**4 + c4*x**3 + c3*x**2 + c2*x + c1
def gaussian(x, mu, sig, const):
return const * 1/(sig*np.sqrt(2*np.pi)) * np.exp(-(x - mu)**2 / 2*sig**2)
def gaus_poly(x, mu, sig, cont, c1, c2, c3, c4):
return poly(x, c1, c2, c3, c4) + gaussian(x, mu, sig, cont)
def big_poly_gaus(x, mu, sig, cont, c1, c2, c3, c4, c5, c6, c7, c8):
return gaussian(x, mu, sig, cont) + big_poly(x, c1, c2, c3, c4, c5, c6, c7, c8)
def chi_2(ys,yknown):
total = 0
for i in xrange(len(yknown)):
temp = (ys[i]-yknown[i])**2.0
if yknown[i] == 0:
total += 1
else :
total += temp/yknown[i]
return total/len(yknown)
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
upsilon = df[df.M < 14]
upsilon = upsilon[upsilon.M > 6]
mass = upsilon.M
num_bins = 400
hist, bin_edges = np.histogram(mass,bins=num_bins)
xdata = 0.5*(bin_edges[1:]+bin_edges[:-1])
ydata = hist
plt.hist(mass, num_bins, histtype=u'stepfilled',facecolor='g' , alpha=0.45)
popt_1, pcov_1 = curve_fit(poly, xdata, ydata)
x0 = np.array([9.45,10.7,1,popt_1[0],popt_1[1],popt_1[2],popt_1[3]])
popt_1, pcov_1 = curve_fit(gaus_poly, xdata, ydata,p0=x0)
c2 = chi_2(gaus_poly(xdata, *popt_1),ydata)
plt.plot(xdata,gaus_poly(xdata,*popt_1),'b--', lw=4,
label=r'$\mathrm{Poly\ bkg\ gaus\ peak\ : \ \chi^{2} = %.4f}$' %(c2))
plt.plot(xdata,poly(xdata,*popt_1[3:]),'g--', lw=4)
signal_line = lambda x : gaus_poly(x,*popt_1) - poly(x, *popt_1[3:])
signal = []
for i in xrange(num_bins):
temp = ydata[i] - signal_line(xdata[i])
signal.append(temp)
signal = []
for i in xrange(num_bins):
temp = ydata[i] - poly(xdata[i],*popt_1[3:])
signal.append(temp)
plt.xlim((np.min(xdata),np.max(xdata)))
plt.legend(loc=0)
plt.xlabel(r'Mass (GeV)', fontsize=20)
plt.ylabel(r'Counts (#)', fontsize=18)
plt.savefig('U_hist.pdf')
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
ydata = signal
plt.scatter(xdata,ydata,marker='o',color='g')
popt_1, pcov_1 = curve_fit(gaussian, xdata, ydata,p0=[9.45,12,1])
perr_1 = np.sqrt(np.diag(pcov_1))
plt.plot(xdata,gaussian(xdata,*popt_1),'g-', lw=4,
label=r'$\mathrm{Mass=%.4f \pm %.4f \ GeV,\ \Gamma=%.4f \pm %.4f \ GeV}$'
%(popt_1[0], perr_1[0], popt_1[1]*(2.0*np.sqrt(2.0 * np.log(2))), perr_1[1]))
mean,width = popt_1[0],popt_1[1]
sigma = 0.20/3.0 #width*(2.0*np.sqrt(2.0 * np.log(2)))
plt.axvline(x=(mean - 3.0*sigma),color='g')
plt.axvline(x=(mean + 3.0*sigma),color='g')
mean_U = mean
sigma_U = sigma
plt.xlim((np.min(xdata),np.max(xdata)))
plt.xlabel(r'Mass (GeV)', fontsize=20)
plt.ylabel(r'Counts (#)', fontsize=18)
plt.legend(loc=0)
plt.savefig('U_peak.pdf')
signal1 = []
for i in xrange(num_bins):
temp = ydata[i] - gaussian(xdata[i],*popt_1)
signal1.append(temp)
ydata = signal1
plt.scatter(xdata, signal1,marker='o', color='b')
popt_1, pcov_1 = curve_fit(gaussian, xdata, ydata, p0=[10,10.7,1],maxfev=8000)
perr_1 = np.sqrt(np.diag(pcov_1))
plt.plot(xdata,gaussian(xdata,*popt_1),'b', lw=4,
label=r'$\mathrm{Mass=%.4f \pm %.4f \ GeV,\ \Gamma=%.4f \pm %.4f}$'
%(popt_1[0], perr_1[0], popt_1[1]*(2.0*np.sqrt(2.0 * np.log(2))), perr_1[1]))
mean,width = popt_1[0],popt_1[1]
sigma = 0.30/3.0 #width*(2.0*np.sqrt(2.0 * np.log(2)))
mean_Up = mean
sigma_Up = sigma
plt.axvline(x=(mean - 3.0*sigma),color='b')
plt.axvline(x=(mean + 3.0*sigma),color='b')
plt.xlim((np.min(xdata),np.max(xdata)))
plt.xlabel(r'Mass (GeV)', fontsize=20)
plt.ylabel(r'Counts (#)', fontsize=18)
plt.legend(loc=0)
plt.savefig('Up_peak.pdf')
'''
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
signal1 = []
for i in xrange(num_bins):
temp = ydata[i] - gaussian(xdata[i],*popt_1)
signal1.append(temp)
ydata = signal1
plt.scatter(xdata, signal1,marker='o', color='b')
popt_1, pcov_1 = curve_fit(gaussian, xdata, ydata, p0=[10,10.7,1],maxfev=80000)
perr_1 = np.sqrt(np.diag(pcov_1))
plt.plot(xdata,gaussian(xdata,*popt_1),'b', lw=4,
label=r'$\mathrm{Mass=%.4f \pm %.4f \ GeV,\ \Gamma=%.4f \pm %.4f}$'
%(popt_1[0], perr_1[0], popt_1[1]*(2.0*np.sqrt(2.0 * np.log(2))), perr_1[1]))
mean,width = popt_1[0],popt_1[1]
sigma = 0.30/3.0 #width*(2.0*np.sqrt(2.0 * np.log(2)))
mean_Up = mean
sigma_Up = sigma
plt.axvline(x=(mean - 3.0*sigma),color='b')
plt.axvline(x=(mean + 3.0*sigma),color='b')
plt.xlim((np.min(xdata),np.max(xdata)))
plt.xlabel(r'Mass (GeV)', fontsize=20)
plt.ylabel(r'Counts (#)', fontsize=18)
plt.legend(loc=0)
plt.savefig('Up_peak.pdf')
'''
Up = df[df.M > (mean_Up - 3.0*sigma_Up)]
Up = Up[Up.M < (mean_Up + 3.0*sigma_Up)]
Up['Upx'] = Up.px1+Up.px2
Up['Upy'] = Up.py1+Up.py2
Up['Upz'] = Up.pz1+Up.pz2
Up['Upt'] = np.sqrt(np.square(Up.Upx) + np.square(Up.Upy))
Up['UE'] = Up.E1+Up.E2
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = Up[Up.Upt < 120]
temp = temp[temp.UE < 150]
plt.hist2d(temp.UE,temp.Upt,bins=200,cmap='viridis',norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Transverse Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('Ue_Upt_log.pdf')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = Up[Up.Upt < 30]
temp = temp[temp.UE < 30]
plt.hist2d(temp.UE,temp.Upt,bins=200,cmap='viridis',norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Transverse Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('Ue_Upt_log_2.pdf')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = Up[Up.Upt < 120]
temp = temp[temp.UE < 150]
plt.hist2d(temp.UE,temp.Upt,bins=200,cmap='viridis')#,norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Transverse Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('Ue_Upt.pdf')
#########################################
#########################################
#fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
#temp = Up.drop(['Event','Run','Type1','Type2'],axis=1)
#temp = temp.drop(['E1','px1','py1','pz1','pt1','eta1','phi1','Q1'],axis=1)
#temp = temp.drop(['E2','px2','py2','pz2','pt2','eta2','phi2','Q2'],axis=1)
#scatter_matrix(temp, alpha=0.1, figsize=(20, 15),diagonal='kde')
#plt.savefig('scatter_matrix.jpg')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = Up[Up.Upz < 120]
temp = temp[temp.UE < 150]
plt.hist2d(temp.UE,temp.Upz,bins=200,cmap='viridis',norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Z Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('UE_Upz.pdf')
#########################################
UPp = df[df.M > (mean_U - 3.0*sigma_U)]
UPp = UPp[UPp.M < (mean_U + 3.0*sigma_U)]
UPp['UPpx'] = UPp.px1+UPp.px2
UPp['UPpy'] = UPp.py1+UPp.py2
UPp['UPpz'] = UPp.pz1+UPp.pz2
UPp['UPpt'] = np.sqrt(np.square(UPp.UPpx) + np.square(UPp.UPpy))
UPp['UpE'] = UPp.E1+UPp.E2
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = UPp[UPp.UPpt < 120]
temp = temp[temp.UpE < 150]
plt.hist2d(temp.UpE,temp.UPpt,bins=200,cmap='viridis',norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Transverse Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('UpE_UPpt_log.pdf')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = UPp[UPp.UPpt < 120]
temp = temp[temp.UpE < 150]
plt.hist2d(temp.UpE,temp.UPpt,bins=200,cmap='viridis')#,norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Transverse Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('UpE_UPpt.pdf')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = UPp[UPp.UPpz < 120]
temp = temp[temp.UpE < 150]
plt.hist2d(temp.UpE,temp.UPpz,bins=200,cmap='viridis',norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Z Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('UE_UPpz.pdf')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = Up[np.abs(Up.Upz) < 200]
plt.hist(temp.Upz, 100, histtype=u'stepfilled',facecolor='b' , alpha=0.45)
plt.ylabel(r'Counts (#)', fontsize=18)
plt.xlabel(r'Z Momentum (GeV)', fontsize=20)
#plt.colorbar()
plt.savefig('Upz.pdf')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = Up[np.abs(Up.Upt) < 20]
plt.hist(temp.Upt, 100, histtype=u'stepfilled',facecolor='b' , alpha=0.45)
plt.ylabel(r'Counts (#)', fontsize=18)
plt.xlabel(r'Transverse Momentum (GeV)', fontsize=20)
#plt.colorbar()
plt.savefig('Upt.pdf')
######################################### | 34.894198 | 83 | 0.601624 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,643 | 0.356318 |
0bf5dadbd56e25b757cc7da476655a19d6ca5294 | 4,187 | py | Python | lib/JumpScale/lib/perftesttools/NodeBase.py | rudecs/jumpscale_core7 | 30c03f26f1cdad3edbb9d79d50fbada8acc974f5 | [
"Apache-2.0"
] | null | null | null | lib/JumpScale/lib/perftesttools/NodeBase.py | rudecs/jumpscale_core7 | 30c03f26f1cdad3edbb9d79d50fbada8acc974f5 | [
"Apache-2.0"
] | 4 | 2016-08-25T12:08:39.000Z | 2018-04-12T12:36:01.000Z | lib/JumpScale/lib/perftesttools/NodeBase.py | rudecs/jumpscale_core7 | 30c03f26f1cdad3edbb9d79d50fbada8acc974f5 | [
"Apache-2.0"
] | 3 | 2016-03-08T07:49:34.000Z | 2018-10-19T13:56:43.000Z | from JumpScale import j
# import sys
# import time
# import json
# import os
# import psutil
from MonitorTools import *
# from pssh import ParallelSSHClient
from gevent import monkey
monkey.patch_socket()
class NodeBase(MonitorTools):
def __init__(self,ipaddr,sshport=22,role=None,name=""):
"""
existing roles
- vnas
- monitor
- host
"""
if j.tools.perftesttools.monitorNodeIp==None:
raise RuntimeError("please do j.tools.perftesttools.init() before calling this")
print "connect redis: %s:%s"%(j.tools.perftesttools.monitorNodeIp, 9999)
self.redis=j.clients.redis.getGeventRedisClient(j.tools.perftesttools.monitorNodeIp, 9999)
self.key=j.tools.perftesttools.sshkey
self.name=name
self.ipaddr=ipaddr
self.sshport = sshport
self.debug=False
print "ssh init %s"%self
self.ssh=j.remote.ssh.getSSHClientUsingSSHAgent(host=ipaddr, username='root', port=sshport, timeout=10,gevent=True)
print "OK"
# self.ssh=ParallelSSHClient([ipaddr],port=sshport)
#user=None, password=None, port=None, pkey=None, forward_ssh_agent=True, num_retries=3, timeout=10, pool_size=10, proxy_host=None, proxy_port=22
self.role=role
def startMonitor(self,cpu=1,disks=[],net=1):
disks = [str(disk) for disk in disks]
self.prepareTmux("mon%s"%self.role,["monitor"])
env={}
if j.tools.perftesttools.monitorNodeIp==None:
raise RuntimeError("please do j.tools.perftesttools.init() before calling this")
env["redishost"]=j.tools.perftesttools.monitorNodeIp
env["redisport"]=9999
env["cpu"]=cpu
env["disks"]=",".join(disks)
env["net"]=net
env["nodename"]=self.name
self.executeInScreen("monitor","js 'j.tools.perftesttools.monitor()'",env=env)
def execute(self,cmd, env={},dieOnError=True,report=True):
if report:
print cmd
return self.ssh.execute(cmd, dieOnError=dieOnError)
# if dieOnError:
# self.fabric.env['warn_only'] = True
# res= self.ssh.run(cmd, dieOnError=dieOnError,env=env)
# if dieOnError:
# self.fabric.env['warn_only'] = False
# return res
def prepareTmux(self,session,screens=["default"],kill=True):
print "prepare tmux:%s %s %s"%(session,screens,kill)
if len(screens)<1:
raise RuntimeError("there needs to be at least 1 screen specified")
if kill:
self.execute("tmux kill-session -t %s"%session, dieOnError=False)
self.execute("tmux new-session -d -s %s -n %s"%(session,screens[0]), dieOnError=True)
screens.pop(0)
for screen in screens:
print "init tmux screen:%s"%screen
self.execute("tmux new-window -t '%s' -n '%s'" %(session,screen))
def executeInScreen(self,screenname,cmd,env={},session=""):
"""
gets executed in right screen for the disk
"""
envstr="export "
if env!={}:
#prepare export arguments
for key,val in env.iteritems():
envstr+="export %s=%s;"%(key,val)
envstr=envstr.strip(";")
cmd1="cd /tmp;%s;%s"%(envstr,cmd)
cmd1=cmd1.replace("'","\"")
windowcmd=""
if session!="":
windowcmd="tmux select-window -t \"%s\";"%session
cmd2="%stmux send-keys -t '%s' '%s\n'"%(windowcmd,screenname,cmd1)
# print cmd2
print "execute:'%s' on %s in screen:%s/%s"%(cmd1,self,session,screenname)
self.execute(cmd2,report=False)
def _initFabriclient(self):
c = j.remote.cuisine
self.fabric = c.fabric
if self.key:
self.fabric.env["key"] = self.key
# else:
# self.fabric.env["key_filename"] = '/root/.ssh/id_rsa.pub'
# self.fabric.env['use_ssh_config'] = True
self.fabric.env['user'] = 'root'
self.cuisine = c.connect(self.ipaddr, self.sshport)
def __str__(self):
return "node:%s"%self.ipaddr
def __repr__(self):
return self.__str__()
| 32.968504 | 152 | 0.602579 | 3,977 | 0.949845 | 0 | 0 | 0 | 0 | 0 | 0 | 1,433 | 0.34225 |
0bf660edadd12f37c5d93c674b3c544660d4a84c | 2,467 | py | Python | test/test_ABVD.py | SimonGreenhill/ABVDGet | 4d101577af00476c113394f014a6c74ba4351e0a | [
"BSD-3-Clause"
] | 2 | 2018-01-20T13:35:54.000Z | 2021-11-24T16:11:20.000Z | test/test_ABVD.py | SimonGreenhill/ABVDGet | 4d101577af00476c113394f014a6c74ba4351e0a | [
"BSD-3-Clause"
] | 10 | 2017-05-09T22:26:31.000Z | 2018-06-15T00:05:07.000Z | test/test_ABVD.py | SimonGreenhill/ABVDGet | 4d101577af00476c113394f014a6c74ba4351e0a | [
"BSD-3-Clause"
] | 1 | 2018-01-20T13:40:40.000Z | 2018-01-20T13:40:40.000Z | import os
import unittest
import tempfile
from abvdget import ABVDatabase, Record
TESTDATA = os.path.join(os.path.dirname(__file__), 'nengone.json')
EXPECTED = {
99: {
"LID": 99,
"Annotation": "arm and hand",
"Cognacy": '1',
"Item": "nin",
"Loan": None,
"Word": "hand",
"WID": 1
},
93340: {
"LID": 99,
"Annotation": None,
"Cognacy": '13',
"Item": "iñtërnâtiônàlizætiøn",
"Loan": None,
"Word": "leg/foot",
"WID": 4,
},
90697: {
"LID": 99,
"Annotation": None,
"Cognacy": None,
"Item": "kaka",
"Loan": None,
"Word": "to eat",
"WID": 37
},
70785: {
"LID": 99,
"Annotation": None,
"Cognacy": '1',
"Item": "tini",
"Loan": None,
"Word": "Three",
"WID": 199
}
}
class TestABVD(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.abvd = ABVDatabase(files=[TESTDATA])
def test_load(self):
assert TESTDATA in self.abvd.files
def test_get_details(self):
d = self.abvd.get_details(TESTDATA)
assert d['id'] == '99'
assert d['language'] == 'Nengone'
assert d['silcode'] == 'nen'
assert d['glottocode'] == 'neng1238'
def test_get_details_injects_filename(self):
d = self.abvd.get_details(TESTDATA)
assert d.get('filename') == TESTDATA
def test_get_location(self):
d = self.abvd.get_location(TESTDATA)
assert d['latitude'] == "-21.53484700204878876661"
assert d['longitude'] == "167.98095703125000000000"
def test_get_nlexemes(self):
assert self.abvd.get_nlexemes(TESTDATA) == 4
def test_get_ncognates(self):
assert self.abvd.get_ncognates(TESTDATA) == 3
def test_process(self):
for r in self.abvd.process():
assert r.ID in EXPECTED
for k in EXPECTED[r.ID]:
self.assertEqual(EXPECTED[r.ID][k], getattr(r, k))
def test_get_slug_for(self):
self.assertEqual(self.abvd.get_slug_for('Nengone', '99'), 'Nengone_99')
def test_save_details(self):
with tempfile.NamedTemporaryFile() as out:
self.abvd.save_details(out.name)
out.seek(0)
content = out.readlines()
assert len(content) == 2 # two lines, header and Nengone
| 26.244681 | 79 | 0.544386 | 1,550 | 0.626516 | 0 | 0 | 86 | 0.034762 | 0 | 0 | 513 | 0.207357 |
0bf672dcc2a270e726f2eccda64b6330ea2402ec | 662 | py | Python | PythonAPI/scripts/analyze.py | ZhuLingfeng1993/coco | da9a7245581b1ac91a925eef64b1beb9a1cd7df5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | PythonAPI/scripts/analyze.py | ZhuLingfeng1993/coco | da9a7245581b1ac91a925eef64b1beb9a1cd7df5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | PythonAPI/scripts/analyze.py | ZhuLingfeng1993/coco | da9a7245581b1ac91a925eef64b1beb9a1cd7df5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | #!/usr/bin/env python
# -----------------------------------------------------
# Written by Zhu Lingfeng on 2021/3/28.
# -----------------------------------------------------
import sys
from pycocotools.coco_analyze import COCOAnalyze, my_plot
annFile = sys.argv[1]
coco = COCOAnalyze(annFile)
# get all cats
catNms = 'all'
catIds = coco.getCatIds()
# get specific cats
# catNms = ['yin_hua_qing_xie']
# catIds = coco.getCatIds(catNms=catNms)
widths = coco.getBBoxWidths(catIds=catIds)
my_plot(data=widths, label=catNms, name='widths')
aspect_ratios = coco.getBBoxAspectRatios(catIds=catIds)
my_plot(data=aspect_ratios, label=catNms, name='aspect_ratios')
| 25.461538 | 63 | 0.634441 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.456193 |
0bf78cd69208917995cc6b701d1673bbdd16907c | 1,821 | py | Python | custom_components/ge_home/entities/ac/fan_mode_options.py | olds/ha_gehome | 5cb24deab64bcade45861da0497a84631845922c | [
"MIT"
] | 41 | 2021-08-02T02:15:54.000Z | 2022-03-30T11:11:42.000Z | custom_components/ge_home/entities/ac/fan_mode_options.py | olds/ha_gehome | 5cb24deab64bcade45861da0497a84631845922c | [
"MIT"
] | 46 | 2021-08-03T02:20:59.000Z | 2022-03-30T11:17:15.000Z | custom_components/ge_home/entities/ac/fan_mode_options.py | olds/ha_gehome | 5cb24deab64bcade45861da0497a84631845922c | [
"MIT"
] | 15 | 2021-08-31T00:21:33.000Z | 2022-03-30T12:53:21.000Z | import logging
from typing import Any, List, Optional
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_FAN_ONLY,
)
from gehomesdk import ErdAcFanSetting
from ..common import OptionsConverter
_LOGGER = logging.getLogger(__name__)
class AcFanModeOptionsConverter(OptionsConverter):
def __init__(self, default_option: ErdAcFanSetting = ErdAcFanSetting.AUTO):
self._default = default_option
@property
def options(self) -> List[str]:
return [i.stringify() for i in [ErdAcFanSetting.AUTO, ErdAcFanSetting.LOW, ErdAcFanSetting.MED, ErdAcFanSetting.HIGH]]
def from_option_string(self, value: str) -> Any:
try:
return ErdAcFanSetting[value.upper().replace(" ","_")]
except:
_LOGGER.warn(f"Could not set fan mode to {value}")
return self._default
def to_option_string(self, value: Any) -> Optional[str]:
try:
return {
ErdAcFanSetting.AUTO: ErdAcFanSetting.AUTO,
ErdAcFanSetting.LOW: ErdAcFanSetting.LOW,
ErdAcFanSetting.LOW_AUTO: ErdAcFanSetting.AUTO,
ErdAcFanSetting.MED: ErdAcFanSetting.MED,
ErdAcFanSetting.MED_AUTO: ErdAcFanSetting.AUTO,
ErdAcFanSetting.HIGH: ErdAcFanSetting.HIGH,
ErdAcFanSetting.HIGH_AUTO: ErdAcFanSetting.HIGH
}.get(value).stringify()
except:
pass
return self._default.stringify()
class AcFanOnlyFanModeOptionsConverter(AcFanModeOptionsConverter):
def __init__(self):
super().__init__(ErdAcFanSetting.LOW)
@property
def options(self) -> List[str]:
return [i.stringify() for i in [ErdAcFanSetting.LOW, ErdAcFanSetting.MED, ErdAcFanSetting.HIGH]]
| 35.705882 | 126 | 0.672707 | 1,528 | 0.839099 | 0 | 0 | 322 | 0.176826 | 0 | 0 | 42 | 0.023064 |
0bf9cab5ada34a8d93c85264fbdb2e3064f7cf91 | 13,975 | py | Python | tests/test_basic.py | WSULib/combine | 8e01ac83ca742ab792ba00e7e5c08bcfe8b48ef5 | [
"MIT"
] | 24 | 2017-10-18T17:24:43.000Z | 2019-04-26T17:40:41.000Z | tests/test_basic.py | WSULib/combine | 8e01ac83ca742ab792ba00e7e5c08bcfe8b48ef5 | [
"MIT"
] | 355 | 2017-08-31T20:27:30.000Z | 2019-05-30T18:06:27.000Z | tests/test_basic.py | tulibraries/combine | eb100ea17193d65485aa6c4a7f05a41b4cab7515 | [
"MIT"
] | 4 | 2018-02-28T18:36:54.000Z | 2019-04-22T18:33:43.000Z | import django
import os
import pytest
import shutil
import sys
import time
import uuid
# logging
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# init django settings file to retrieve settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'combine.settings'
sys.path.append('/opt/combine')
django.setup()
# import core
from core.models import Organization, RecordGroup, CombineJob, HarvestStaticXMLJob, QueryDict, Transformation,\
TransformJob, ValidationScenario, MergeJob
#############################################################################
# Tests Setup
#############################################################################
@pytest.mark.run(order=1)
def test_organization_create(VO):
'''
Test creation of organization
'''
# instantiate and save
VO.org = Organization(
name='test_org_%s' % uuid.uuid4().hex,
description=''
)
VO.org.save()
assert type(VO.org.id) == int
@pytest.mark.run(order=2)
def test_record_group_create(VO):
'''
Test creation of record group
'''
# instantiate and save
VO.rg = RecordGroup(
organization=VO.org,
name='test_record_group_%s' % uuid.uuid4().hex,
description=''
)
VO.rg.save()
assert type(VO.rg.id) == int
#############################################################################
# Test Harvest
#############################################################################
@pytest.mark.run(order=3)
def test_static_harvest(VO):
'''
Test static harvest of XML records from disk
'''
# copy test data to /tmp
payload_dir = '/tmp/%s' % uuid.uuid4().hex
shutil.copytree('/opt/combine/tests/data/static_harvest_data', payload_dir)
# emulate request.POST
request_dict = {
'dbdd': '',
'job_note': '',
'xpath_record_id': '',
'static_filepath': payload_dir,
'fm_config_json': '{"add_literals":{},"capture_attribute_values":[],"concat_values_on_all_fields":false,"concat_values_on_fields":{},"copy_to":{},"copy_to_regex":{},"copy_value_to_regex":{},"error_on_delims_collision":false,"exclude_attributes":[],"exclude_elements":[],"include_all_attributes":false,"include_attributes":[],"include_sibling_id":false,"multivalue_delim":"|","node_delim":"_","ns_prefix_delim":"|","remove_copied_key":true,"remove_copied_value":false,"remove_ns_prefix":true,"repeating_element_suffix_count":false,"self_describing":false,"skip_attribute_ns_declarations":true,"skip_repeating_values":true,"skip_root":false,"split_values_on_all_fields":false,"split_values_on_fields":{}}',
'static_payload': '',
'job_name': '',
'field_mapper': 'default',
'rits': '',
'additional_namespace_decs': 'xmlns:mods="http://www.loc.gov/mods/v3"',
'document_element_root': 'mods:mods'
}
query_dict = QueryDict('', mutable=True)
query_dict.update(request_dict)
# init job, using Variable Object (VO)
cjob = CombineJob.init_combine_job(
user=VO.user,
record_group=VO.rg,
job_type_class=HarvestStaticXMLJob,
job_params=query_dict,
files={},
hash_payload_filename=False
)
# start job and update status
job_status = cjob.start_job()
# if job_status is absent, report job status as failed
if job_status == False:
cjob.job.status = 'failed'
cjob.job.save()
# poll until complete
for x in range(0, 480):
# pause
time.sleep(1)
# refresh session
cjob.job.update_status()
# check status
if cjob.job.status != 'available':
continue
else:
break
# save static harvest job to VO
VO.static_harvest_cjob = cjob
# remove payload_dir
shutil.rmtree(payload_dir)
# assert job is done and available via livy
assert VO.static_harvest_cjob.job.status == 'available'
# assert record count is 250
assert VO.static_harvest_cjob.job.record_count == 250
# assert no indexing failures
assert len(VO.static_harvest_cjob.get_indexing_failures()) == 0
# #############################################################################
# # Test Transform
# #############################################################################
def prepare_transform():
'''
Create temporary transformation scenario based on tests/data/mods_transform.xsl
'''
with open('tests/data/mods_transform.xsl', 'r') as f:
xsl_string = f.read()
trans = Transformation(
name='temp_mods_transformation',
payload=xsl_string,
transformation_type='xslt',
filepath='will_be_updated'
)
trans.save()
# return transformation
return trans
@pytest.mark.run(order=4)
def test_static_transform(VO):
'''
Test static harvest of XML records from disk
'''
# prepare and capture temporary transformation scenario
VO.transformation_scenario = prepare_transform()
# emulate request.POST
request_dict = {
'dbdd': '',
'field_mapper': 'default',
'filter_dupe_record_ids': 'true',
'fm_config_json': '{"add_literals":{},"capture_attribute_values":[],"concat_values_on_all_fields":false,"concat_values_on_fields":{},"copy_to":{},"copy_to_regex":{},"copy_value_to_regex":{},"error_on_delims_collision":false,"exclude_attributes":[],"exclude_elements":[],"include_all_attributes":false,"include_attributes":[],"include_sibling_id":false,"multivalue_delim":"|","node_delim":"_","ns_prefix_delim":"|","remove_copied_key":true,"remove_copied_value":false,"remove_ns_prefix":true,"repeating_element_suffix_count":false,"self_describing":false,"skip_attribute_ns_declarations":true,"skip_repeating_values":true,"skip_root":false,"split_values_on_all_fields":false,"split_values_on_fields":{}}',
'input_es_query_valve': '',
'input_job_id': VO.static_harvest_cjob.job.id,
'input_numerical_valve': '',
'input_validity_valve': 'all',
'job_name': '',
'job_note': '',
'rits': '',
'sel_trans_json': '[{"index":0,"trans_id":%s}]' % VO.transformation_scenario.id
}
query_dict = QueryDict('', mutable=True)
query_dict.update(request_dict)
# init job
cjob = CombineJob.init_combine_job(
user=VO.user,
record_group=VO.rg,
job_type_class=TransformJob,
job_params=query_dict)
# start job and update status
job_status = cjob.start_job()
# if job_status is absent, report job status as failed
if job_status == False:
cjob.job.status = 'failed'
cjob.job.save()
# poll until complete
for x in range(0, 480):
# pause
time.sleep(1)
# refresh session
cjob.job.update_status()
# check status
if cjob.job.status != 'available':
continue
else:
break
# save static harvest job to VO
VO.static_transform_cjob = cjob
# assert job is done and available via livy
assert VO.static_transform_cjob.job.status == 'available'
# assert record count is 250
assert VO.static_transform_cjob.job.record_count == 250
# assert no indexing failures
assert len(VO.static_transform_cjob.get_indexing_failures()) == 0
# remove transformation
assert VO.transformation_scenario.delete()[0] > 0
# #############################################################################
# # Test Validation Scenarios
# #############################################################################
@pytest.mark.run(order=5)
def test_add_schematron_validation_scenario(VO):
'''
Add schematron validation
'''
# get schematron validation from test data
with open('tests/data/schematron_validation.sch', 'r') as f:
sch_payload = f.read()
# init new validation scenario
schematron_validation_scenario = ValidationScenario(
name='temp_vs_%s' % str(uuid.uuid4()),
payload=sch_payload,
validation_type='sch',
default_run=False
)
schematron_validation_scenario.save()
# pin to VO
VO.schematron_validation_scenario = schematron_validation_scenario
# assert creation
assert type(VO.schematron_validation_scenario.id) == int
@pytest.mark.run(order=6)
def test_add_python_validation_scenario(VO):
'''
Add python code snippet validation
'''
# get python validation from test data
with open('tests/data/python_validation.py', 'r') as f:
py_payload = f.read()
# init new validation scenario
python_validation_scenario = ValidationScenario(
name='temp_vs_%s' % str(uuid.uuid4()),
payload=py_payload,
validation_type='python',
default_run=False
)
python_validation_scenario.save()
# pin to VO
VO.python_validation_scenario = python_validation_scenario
# assert creation
assert type(VO.python_validation_scenario.id) == int
@pytest.mark.run(order=7)
def test_schematron_validation(VO):
# get target records
VO.harvest_record = VO.static_harvest_cjob.job.get_records().first()
VO.transform_record = VO.static_transform_cjob.job.get_records().first()
# validate harvest record with schematron
'''
expecting failure count of 2
'''
vs_results = VO.schematron_validation_scenario.validate_record(VO.harvest_record)
assert vs_results['parsed']['fail_count'] == 2
# validate transform record with schematron
'''
expecting failure count of 1
'''
vs_results = VO.schematron_validation_scenario.validate_record(VO.transform_record)
assert vs_results['parsed']['fail_count'] == 1
@pytest.mark.run(order=8)
def test_python_validation(VO):
# validate harvest record with python
'''
expecting failure count of 1
'''
vs_results = VO.python_validation_scenario.validate_record(VO.harvest_record)
print(vs_results)
assert vs_results['parsed']['fail_count'] == 1
# validate transform record with python
'''
expecting failure count of 1
'''
vs_results = VO.python_validation_scenario.validate_record(VO.transform_record)
print(vs_results)
assert vs_results['parsed']['fail_count'] == 1
# #############################################################################
# # Test Duplicate/Merge Job
# #############################################################################
@pytest.mark.run(order=9)
def test_merge_duplicate(VO):
'''
Duplicate Transform job, applying newly created validation scenarios
'''
# emulate request.POST
request_dict = {
'dbdd': '',
'field_mapper': 'default',
'filter_dupe_record_ids': 'true',
'fm_config_json': '{"add_literals":{},"capture_attribute_values":[],"concat_values_on_all_fields":false,"concat_values_on_fields":{},"copy_to":{},"copy_to_regex":{},"copy_value_to_regex":{},"error_on_delims_collision":false,"exclude_attributes":[],"exclude_elements":[],"include_all_attributes":false,"include_attributes":[],"include_sibling_id":false,"multivalue_delim":"|","node_delim":"_","ns_prefix_delim":"|","remove_copied_key":true,"remove_copied_value":false,"remove_ns_prefix":true,"repeating_element_suffix_count":false,"self_describing":false,"skip_attribute_ns_declarations":true,"skip_repeating_values":true,"skip_root":false,"split_values_on_all_fields":false,"split_values_on_fields":{}}',
'input_es_query_valve': '',
'input_numerical_valve': '',
'input_validity_valve': 'all',
'job_name': '',
'job_note': '',
'rits': ''
}
query_dict = QueryDict('', mutable=True)
query_dict.update(request_dict)
# set input jobs with QueryDict.setlist
query_dict.setlist('input_job_id', [
VO.static_harvest_cjob.job.id,
VO.static_transform_cjob.job.id
])
# set validation scenarios with QueryDict.setlist
query_dict.setlist('validation_scenario', [
VO.schematron_validation_scenario.id,
VO.python_validation_scenario.id
])
# init job
cjob = CombineJob.init_combine_job(
user=VO.user,
record_group=VO.rg,
job_type_class=MergeJob,
job_params=query_dict)
# start job and update status
job_status = cjob.start_job()
# if job_status is absent, report job status as failed
if job_status == False:
cjob.job.status = 'failed'
cjob.job.save()
# poll until complete
for x in range(0, 480):
# pause
time.sleep(1)
# refresh session
cjob.job.update_status()
# check status
if cjob.job.status != 'available':
continue
else:
break
# save static harvest job to VO
VO.merge_cjob = cjob
# assert job is done and available via livy
assert VO.merge_cjob.job.status == 'available'
# assert record count is 250
assert VO.merge_cjob.job.record_count == 250
# assert validation scenarios applied
job_validation_scenarios = VO.merge_cjob.job.jobvalidation_set.all()
assert job_validation_scenarios.count() == 2
# loop through validation scenarios and confirm that both show 250 failures
for jv in job_validation_scenarios:
assert jv.get_record_validation_failures().count() == 232
# assert no indexing failures
assert len(VO.merge_cjob.get_indexing_failures()) == 0
#############################################################################
# Tests Teardown
#############################################################################
@pytest.mark.last
def test_teardown(keep_records, VO):
'''
Test teardown
'''
# assert delete of org and children
if not keep_records:
assert VO.org.delete()[0] > 0
else:
assert True
assert VO.schematron_validation_scenario.delete()[0] > 0
assert VO.python_validation_scenario.delete()[0] > 0
| 31.906393 | 712 | 0.63127 | 0 | 0 | 0 | 0 | 11,903 | 0.851735 | 0 | 0 | 6,828 | 0.488587 |
0bfa78dfdaf19508cfab604315689fd88cd82b24 | 5,188 | py | Python | tree.py | rainer85ah/SupervisedLearning | 7d25d139434c852a6d14b98c4f089bd42814612d | [
"MIT"
] | null | null | null | tree.py | rainer85ah/SupervisedLearning | 7d25d139434c852a6d14b98c4f089bd42814612d | [
"MIT"
] | null | null | null | tree.py | rainer85ah/SupervisedLearning | 7d25d139434c852a6d14b98c4f089bd42814612d | [
"MIT"
] | null | null | null | """ Decision Trees - Supervised learning: 1-Classification*, 2-Regression.
D.T.s are a non-parametric supervised learning method used for classification and regression. The goal is to create a
model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
Some advantages of decision trees are:
1- Simple to understand and to interpret. Trees can be visualised.
2- Requires little data preparation. Other techniques often require data normalisation, dummy variables need to be
created and blank values to be removed. Note however that this module does not support missing values.
3- The cost of using the tree (i.e., predicting data) is logarithmic in the number of data points used to train the tree.
4- Able to handle both numerical and categorical data. Other techniques are usually specialised in analysing datasets
that have only one type of variable. See algorithms for more information.
5- Able to handle multi-output problems.
6- Uses a white box model. If a given situation is observable in a model, the explanation for the condition is easily
explained by boolean logic. By contrast, in a black box model (e.g., in an artificial neural network), results may be
more difficult to interpret.
7- Possible to validate a model using statistical tests. That makes it possible to account for the reliability
of the model.
8- Performs well even if its assumptions are somewhat violated by the true model from which the data were generated.
The disadvantages of decision trees include:
1- Decision-tree learners can create over-complex trees that do not generalise the data well.This is called overfitting.
Mechanisms such as pruning (not currently supported), setting the minimum number of samples required at a leaf node or
setting the maximum depth of the tree are necessary to avoid this problem.
2- Decision trees can be unstable because small variations in the data might result in a completely different tree
being generated. This problem is mitigated by using decision trees within an ensemble.
3- The problem of learning an optimal decision tree is known to be NP-complete under several aspects of optimality and
even for simple concepts. Consequently, practical decision-tree learning algorithms are based on heuristic algorithms
such as the greedy algorithm where locally optimal decisions are made at each node. Such algorithms cannot guarantee to
return the globally optimal decision tree. This can be mitigated by training multiple trees in an ensemble learner,
where the features and samples are randomly sampled with replacement.
4- There are concepts that are hard to learn because decision trees do not express them easily, such as XOR, parity or
multiplexer problems.
5- Decision tree learners create biased trees if some classes dominate. It is therefore recommended to balance the
dataset prior to fitting with the decision tree.
ID3 (Iterative Dichotomiser 3) was developed in 1986 by Ross Quinlan. The algorithm creates a multiway tree, finding
for each node (i.e. in a greedy manner) the categorical feature that will yield the largest information gain for
categorical targets. Trees are grown to their maximum size and then a pruning step is usually applied to improve the
ability of the tree to generalise to unseen data.
C4.5 is the successor to ID3 and removed the restriction that features must be categorical by dynamically defining a
discrete attribute (based on numerical variables) that partitions the continuous attribute value into a discrete set
of intervals. C4.5 converts the trained trees (i.e. the output of the ID3 algorithm) into sets of if-then rules.
These accuracy of each rule is then evaluated to determine the order in which they should be applied.
Pruning is done by removing a rule's precondition if the accuracy of the rule improves without it.
"""
import numpy as np
from sklearn.externals import joblib
from sklearn import datasets, metrics, tree
from sklearn.cross_validation import train_test_split
cancer = datasets.load_breast_cancer()
data = cancer.data
labels = cancer.target
data = np.asarray(data, dtype='float32')
labels = np.asarray(labels, dtype='int32')
trainData, testData, trainLabels, testLabels = train_test_split(data, labels, train_size=0.8, test_size=0.2)
print('Tree Learning... Fitting... ')
tree_clf = tree.DecisionTreeClassifier(criterion='entropy', splitter='best', max_depth=None, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None,
random_state=None, max_leaf_nodes=None, class_weight=None, presort=False)
tree_clf.fit(X=trainData, y=trainLabels)
print('Tree Predicting... ')
predicted = tree_clf.predict(X=testData)
print("Results: \n %s" % metrics.classification_report(testLabels, predicted))
matrix = metrics.confusion_matrix(testLabels, predicted)
print("Confusion Matrix: \n %s" % matrix)
print("\nMean Accuracy: %.4f " % tree_clf.score(X=testData, y=testLabels))
print("Tree Saving in ... /Output/Tree_model.pkl")
joblib.dump(tree_clf, '/home/rainer85ah/PycharmProjects/DiagnosticCancerSolution/Output/Tree_model.pkl')
| 67.376623 | 121 | 0.792791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,131 | 0.796261 |
0bfbd279db6c90816fbfdf80c181a91b1e674dce | 8,984 | py | Python | src/main.py | ckbjimmy/clneg | 7d74a31394ee946a8683a2361c7580071e65562b | [
"MIT"
] | 22 | 2018-10-24T02:19:13.000Z | 2022-01-04T23:13:16.000Z | src/main.py | ckbjimmy/clneg | 7d74a31394ee946a8683a2361c7580071e65562b | [
"MIT"
] | 2 | 2020-04-01T11:20:51.000Z | 2021-05-21T02:15:49.000Z | src/main.py | ckbjimmy/clneg | 7d74a31394ee946a8683a2361c7580071e65562b | [
"MIT"
] | 8 | 2019-12-04T13:25:46.000Z | 2021-02-23T04:30:17.000Z | import os
import sys
import re
import numpy as np
#from create_neglist import *
import pandas as pd
pd.set_option('display.max_rows', None)
from create_tokenization import *
from pycorenlp import StanfordCoreNLP
from concept_extraction import *
from syntactic_parsing import *
from tree_rules import *
from nltk.corpus import stopwords
from difflib import SequenceMatcher
def print_out_result(df):
for s in set(df['section'].values):
if s != '':
subset = df[df['section'] == s][['preferred', 'negation']]
subset['preferred'] = np.where(subset['negation'] == 1, subset['preferred'] + '(-)', subset['preferred'] + '(+)')
print('--- ' + s + ' ---\n' + ', '.join(subset['preferred']))
if __name__ == '__main__':
# can be extended to batch processing if needed (feed a list of filenames)
#filenames = ['dev.txt']
#filenames = ['3.txt']
#filenames = ['test_ready.txt']
filenames = [sys.argv[1]]
data_dir = '../data/'
ctakes_folder = './ctakes/'
tregex_dir = './stanford-tregex-2018-02-27/'
# negated term list (use the human annotated version)
neg_list = pd.read_csv(data_dir + 'neg_list_complete.txt', sep='\t', header=0)
neg = neg_list['ITEM'].values
neg_term = [' ' + item + ' ' for item in neg]
neg_term.extend(item + ' ' for item in neg)
nlp = StanfordCoreNLP('http://localhost:9000')
hard_section_list = mimic_tokenize(data_dir, filenames, nlp, neg_term)
df = ctakes_concept_extraction(data_dir, ctakes_folder, hard_section_list)
df1 = df[df.sent_id != 0]
df0 = df[df.sent_id == 0]
openNLP = OpenNLP()
sl, tree_list = synparse(data_dir, neg_list, openNLP)
stopwords = stopwords.words('english')
RM_POS = ['NN', 'NNS', 'RB', 'NP', 'ADVP', 'IN']
RM_CP = ['however', 'although', 'but']
print("\n--- Constituency tree parsing ---\n")
for i, t in enumerate(tree_list):
print('sent: ' + str(i))
print('original: ' + sl[i])
# get negated part of the sentence
with open(data_dir + 'ntree_tmp', 'w') as fw:
fw.write(t)
s = re.sub('\([A-Z]*\$? |\(-[A-Z]+- |\)|\)|\(, |\(. ', '', t)
print('neg part: ' + s)
# find what neg term is matched and use its neg type
try:
m = ''
for neg in [x for x in sorted(neg_list['ITEM'].tolist(), key=len, reverse=True)]:
#for neg in ['negative for']:
match = SequenceMatcher(None, s, neg).find_longest_match(0, len(s), 0, len(neg))
matched_string = s[match.a: match.a + match.size]
try: # if next char might be different, means partial match
if s[match.a + match.size + 1] == neg[match.b + match.size + 1] and \
s[match.a + match.size + 2] == neg[match.b + match.size + 2]:
if (len(matched_string) > len(m)) and \
((matched_string[0] == s[0] and matched_string[1] == s[1]) or \
(matched_string[len(matched_string)-1] == s[len(s)-1] and matched_string[len(matched_string)-2] == s[len(s)-2])): # either match from the beginning or laast
m = matched_string
matched_neg_item = neg[match.b: match.b + match.size]
if matched_neg_item[len(matched_neg_item)-1] == ' ':
matched_neg_item = matched_neg_item[0:len(matched_neg_item)-1]
else:
continue
except: # if no next char, means full match
try:
if (len(matched_string) > len(m)) and \
((matched_string[0] == s[0] and matched_string[1] == s[1]) or \
(matched_string[len(matched_string)-1] == s[len(s)-1] and matched_string[len(matched_string)-2] == s[len(s)-2])): # either match from the beginning or laast
m = matched_string
matched_neg_item = neg[match.b: match.b + match.size]
if matched_neg_item[len(matched_neg_item)-1] == ' ':
matched_neg_item = matched_neg_item[0:len(matched_neg_item)-1]
except: # match only one char!? rare case
if (len(matched_string) > len(m)) and \
(matched_string[0] == s[0]): # either match from the beginning or laast
m = matched_string
matched_neg_item = neg[match.b: match.b + match.size]
if matched_neg_item[len(matched_neg_item)-1] == ' ':
matched_neg_item = matched_neg_item[0:len(matched_neg_item)-1]
print('negated term: ' + matched_neg_item)
neg_type = neg_list[neg_list.ITEM == matched_neg_item]['TYPE'].values[0]
print('--- tregex/tsurgeon with negated type: ' + neg_type)
# run tregex/tsurgeon based on the selected neg type
ts_out, tree = tregex_tsurgeon(data_dir + 'ntree_tmp', neg_type)
# deal with corner cases
if neg_type == 'NP' and ('that' in ts_out):
print('--- NP with that')
ts_out, tree = tregex_tsurgeon(data_dir + 'ntree_tmp', 'NP-denies')
if neg_type == 'NP' and s == ts_out:
print('--- NP without S node')
ts_out, tree = tregex_tsurgeon(data_dir + 'ntree_tmp', 'NP-nS')
if neg_type == 'PP' and sum([item in neg_list['ITEM'].tolist() for item in ts_out.split()]) > 0:
print('--- NP without S node')
ts_out, tree = tregex_tsurgeon(data_dir + 'ntree_tmp', 'NP-nS')
if neg_type == 'VP-A' and s == ts_out:
print('--- VP-A remove denies')
ts_out, tree = tregex_tsurgeon(data_dir + 'ntree_tmp', 'NP-denies')
if neg_type == 'ADVP-A' and s == ts_out:
print('--- ADVP-A type 2')
ts_out, tree = tregex_tsurgeon(data_dir + 'ntree_tmp', 'ADVP-A2')
if neg_type == 'ADVP-A' and s == ts_out:
print('--- ADVP-A remove SBAR')
ts_out, tree = tregex_tsurgeon(data_dir + 'ntree_tmp', 'ADVP-sbar')
if neg_type == 'ADVP-A' and s == ts_out: # no longer
print('--- ADVP-A remove ADVP')
ts_out, tree = tregex_tsurgeon(data_dir + 'ntree_tmp', 'ADVP-advp')
if neg_type == 'ADVP-A' and s == ts_out:
print('--- ADVP-A remove RB')
ts_out, tree = tregex_tsurgeon(data_dir + 'ntree_tmp', 'ADVP-RB')
if 'SBAR' in tree:
print('--- forced remove SBAR')
ts_out, tree = tregex_tsurgeon(data_dir + 'ntree_tmp', 'forced-sbar')
# if sum([item in neg_list['ITEM'].tolist() for item in ts_out.split()]) > 0:
# print('--- remove neg terms if exists')
# ts_out = ' '.join(ts_out.split()[1:])
if sum([item in RM_POS for item in ts_out.split()]) > 0:
print('--- remove POS')
ts_out = ' '.join(ts_out.split()[1:])
if sum([item in RM_CP for item in ts_out.split()]) > 0:
print('--- remove CP')
for cp in RM_CP:
try:
cp_loc = ts_out.split().index(cp)
except:
continue
ts_out = ' '.join(ts_out.split()[:cp_loc])
if ts_out.split()[0] in neg_list['ITEM'].tolist() + stopwords:
print('--- remove first token f if f in negated list or stopword list')
ts_out = ' '.join(ts_out.split()[1:])
if neg_type == 'VP-A' and len(ts_out) < 2:
print('--- VP-A CC')
ts_out, tree = tregex_tsurgeon(data_dir + 'ntree_tmp', 'VP-CC')
print('>> ' + ts_out)
try:
neg_range = (sl[i].index(ts_out) + 1, sl[i].index(ts_out) + len(ts_out)) # negated place
except:
neg_range = (0, len(sl))
print('>> negated span: ' + str(neg_range) + '\n')
for idx in df1.index:
if df1['sent_id'][idx] == i+1 and df1['sent_loc'][idx] in range(neg_range[0], neg_range[1]+1):
df1['negation'][idx] = 1
except: # need to debug why very few cases don't work
continue
os.system('rm ../data/ntree_tmp')
# preserve the longest strings/concepts
df_s = df1
df_s['start'] = df_s['start'].astype(int)
df_s['len'] = df_s['original'].str.len()
df_s = df_s.sort_values('len', ascending=False)
df_s = df_s.drop_duplicates(['sent_id', 'start'], keep='first')
df_s = df_s.drop_duplicates(['sent_id', 'end'], keep='first')
df_s = df_s.sort_values('start', ascending=True)
df_s.to_csv('../data/final_output', sep='\t', index=False)
df_s[(df_s.sent_id != 0) & (df_s.section != '')]
df_ss = df_s[(df_s.sent_id != 0) & (df_s.section != '')]
print("\n--- Final output ---\n")
print_out_result(df_ss)
| 45.145729 | 182 | 0.547863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,346 | 0.261131 |
0bfc78ffcaa52c031df7862987e09dbeaa8057b9 | 61,939 | py | Python | sdk/python/pulumi_azure_native/edgeorder/v20201201preview/outputs.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_native/edgeorder/v20201201preview/outputs.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_native/edgeorder/v20201201preview/outputs.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = [
'AdditionalErrorInfoResponse',
'AddressDetailsResponse',
'AddressPropertiesResponse',
'AvailabilityInformationResponseResult',
'BillingModelResponseResult',
'CloudErrorResponse',
'ConfigurationResponseResult',
'ContactDetailsResponse',
'CostInformationResponseResult',
'DescriptionResponseResult',
'DeviceDetailsResponse',
'FilterablePropertyResponseResult',
'HierarchyInformationResponse',
'ImageInformationResponseResult',
'LinkResponseResult',
'MeterDetailsResponseResult',
'NotificationPreferenceResponse',
'OrderDetailsResponse',
'OrderStatusDetailsResponse',
'PreferencesResponse',
'ProductDetailsResponse',
'ProductFamilyResponseResult',
'ProductLineResponseResult',
'ProductResponseResult',
'ShippingAddressResponse',
'ShippingDetailsResponse',
'SpecificationResponseResult',
'SystemDataResponse',
'TransportPreferencesResponse',
]
@pulumi.output_type
class AdditionalErrorInfoResponse(dict):
def __init__(__self__, *,
info: Optional[Any] = None,
type: Optional[str] = None):
if info is not None:
pulumi.set(__self__, "info", info)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def info(self) -> Optional[Any]:
return pulumi.get(self, "info")
@property
@pulumi.getter
def type(self) -> Optional[str]:
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AddressDetailsResponse(dict):
"""
Address details for an order.
"""
def __init__(__self__, *,
return_address: 'outputs.AddressPropertiesResponse',
shipping_address: 'outputs.AddressPropertiesResponse'):
"""
Address details for an order.
:param 'AddressPropertiesResponseArgs' return_address: Return shipping address
:param 'AddressPropertiesResponseArgs' shipping_address: Customer address and contact details. It should be address resource
"""
pulumi.set(__self__, "return_address", return_address)
pulumi.set(__self__, "shipping_address", shipping_address)
@property
@pulumi.getter(name="returnAddress")
def return_address(self) -> 'outputs.AddressPropertiesResponse':
"""
Return shipping address
"""
return pulumi.get(self, "return_address")
@property
@pulumi.getter(name="shippingAddress")
def shipping_address(self) -> 'outputs.AddressPropertiesResponse':
"""
Customer address and contact details. It should be address resource
"""
return pulumi.get(self, "shipping_address")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AddressPropertiesResponse(dict):
"""
Address Properties
"""
def __init__(__self__, *,
contact_details: 'outputs.ContactDetailsResponse',
shipping_address: Optional['outputs.ShippingAddressResponse'] = None):
"""
Address Properties
:param 'ContactDetailsResponseArgs' contact_details: Contact details for the address
:param 'ShippingAddressResponseArgs' shipping_address: Shipping details for the address
"""
pulumi.set(__self__, "contact_details", contact_details)
if shipping_address is not None:
pulumi.set(__self__, "shipping_address", shipping_address)
@property
@pulumi.getter(name="contactDetails")
def contact_details(self) -> 'outputs.ContactDetailsResponse':
"""
Contact details for the address
"""
return pulumi.get(self, "contact_details")
@property
@pulumi.getter(name="shippingAddress")
def shipping_address(self) -> Optional['outputs.ShippingAddressResponse']:
"""
Shipping details for the address
"""
return pulumi.get(self, "shipping_address")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AvailabilityInformationResponseResult(dict):
"""
Availability information of a product system.
"""
def __init__(__self__, *,
availability_stage: str,
disabled_reason: str,
disabled_reason_message: str):
"""
Availability information of a product system.
:param str availability_stage: Current availability stage of the product. Availability stage
:param str disabled_reason: Reason why the product is disabled.
:param str disabled_reason_message: Message for why the product is disabled.
"""
pulumi.set(__self__, "availability_stage", availability_stage)
pulumi.set(__self__, "disabled_reason", disabled_reason)
pulumi.set(__self__, "disabled_reason_message", disabled_reason_message)
@property
@pulumi.getter(name="availabilityStage")
def availability_stage(self) -> str:
"""
Current availability stage of the product. Availability stage
"""
return pulumi.get(self, "availability_stage")
@property
@pulumi.getter(name="disabledReason")
def disabled_reason(self) -> str:
"""
Reason why the product is disabled.
"""
return pulumi.get(self, "disabled_reason")
@property
@pulumi.getter(name="disabledReasonMessage")
def disabled_reason_message(self) -> str:
"""
Message for why the product is disabled.
"""
return pulumi.get(self, "disabled_reason_message")
@pulumi.output_type
class BillingModelResponseResult(dict):
"""
Model to represent the billing cycle
"""
def __init__(__self__, *,
model: str):
"""
Model to represent the billing cycle
:param str model: String to represent the billing model
"""
pulumi.set(__self__, "model", model)
@property
@pulumi.getter
def model(self) -> str:
"""
String to represent the billing model
"""
return pulumi.get(self, "model")
@pulumi.output_type
class CloudErrorResponse(dict):
def __init__(__self__, *,
additional_info: Sequence['outputs.AdditionalErrorInfoResponse'],
details: Sequence['outputs.CloudErrorResponse'],
code: Optional[str] = None,
message: Optional[str] = None,
target: Optional[str] = None):
pulumi.set(__self__, "additional_info", additional_info)
pulumi.set(__self__, "details", details)
if code is not None:
pulumi.set(__self__, "code", code)
if message is not None:
pulumi.set(__self__, "message", message)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="additionalInfo")
def additional_info(self) -> Sequence['outputs.AdditionalErrorInfoResponse']:
return pulumi.get(self, "additional_info")
@property
@pulumi.getter
def details(self) -> Sequence['outputs.CloudErrorResponse']:
return pulumi.get(self, "details")
@property
@pulumi.getter
def code(self) -> Optional[str]:
return pulumi.get(self, "code")
@property
@pulumi.getter
def message(self) -> Optional[str]:
return pulumi.get(self, "message")
@property
@pulumi.getter
def target(self) -> Optional[str]:
return pulumi.get(self, "target")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConfigurationResponseResult(dict):
"""
Configuration object.
"""
def __init__(__self__, *,
availability_information: 'outputs.AvailabilityInformationResponseResult',
cost_information: 'outputs.CostInformationResponseResult',
description: 'outputs.DescriptionResponseResult',
display_name: str,
filterable_properties: Sequence['outputs.FilterablePropertyResponseResult'],
hierarchy_information: 'outputs.HierarchyInformationResponse',
image_information: Sequence['outputs.ImageInformationResponseResult'],
specifications: Sequence['outputs.SpecificationResponseResult']):
"""
Configuration object.
:param 'AvailabilityInformationResponseArgs' availability_information: Availability information of the product system.
:param 'CostInformationResponseArgs' cost_information: Cost information for the product system.
:param 'DescriptionResponseArgs' description: Description related to the product system.
:param str display_name: Display Name for the product system.
:param Sequence['FilterablePropertyResponseArgs'] filterable_properties: list of filters supported for a product
:param 'HierarchyInformationResponseArgs' hierarchy_information: Hierarchy information of the product system.
:param Sequence['ImageInformationResponseArgs'] image_information: Image information for the product system.
:param Sequence['SpecificationResponseArgs'] specifications: Specifications of the configuration
"""
pulumi.set(__self__, "availability_information", availability_information)
pulumi.set(__self__, "cost_information", cost_information)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "filterable_properties", filterable_properties)
pulumi.set(__self__, "hierarchy_information", hierarchy_information)
pulumi.set(__self__, "image_information", image_information)
pulumi.set(__self__, "specifications", specifications)
@property
@pulumi.getter(name="availabilityInformation")
def availability_information(self) -> 'outputs.AvailabilityInformationResponseResult':
"""
Availability information of the product system.
"""
return pulumi.get(self, "availability_information")
@property
@pulumi.getter(name="costInformation")
def cost_information(self) -> 'outputs.CostInformationResponseResult':
"""
Cost information for the product system.
"""
return pulumi.get(self, "cost_information")
@property
@pulumi.getter
def description(self) -> 'outputs.DescriptionResponseResult':
"""
Description related to the product system.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Display Name for the product system.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="filterableProperties")
def filterable_properties(self) -> Sequence['outputs.FilterablePropertyResponseResult']:
"""
list of filters supported for a product
"""
return pulumi.get(self, "filterable_properties")
@property
@pulumi.getter(name="hierarchyInformation")
def hierarchy_information(self) -> 'outputs.HierarchyInformationResponse':
"""
Hierarchy information of the product system.
"""
return pulumi.get(self, "hierarchy_information")
@property
@pulumi.getter(name="imageInformation")
def image_information(self) -> Sequence['outputs.ImageInformationResponseResult']:
"""
Image information for the product system.
"""
return pulumi.get(self, "image_information")
@property
@pulumi.getter
def specifications(self) -> Sequence['outputs.SpecificationResponseResult']:
"""
Specifications of the configuration
"""
return pulumi.get(self, "specifications")
@pulumi.output_type
class ContactDetailsResponse(dict):
"""
Contact Details.
"""
def __init__(__self__, *,
contact_name: str,
phone: str,
mobile: Optional[str] = None,
phone_extension: Optional[str] = None):
"""
Contact Details.
:param str contact_name: Contact name of the person.
:param str phone: Phone number of the contact person.
:param str mobile: Mobile number of the contact person.
:param str phone_extension: Phone extension number of the contact person.
"""
pulumi.set(__self__, "contact_name", contact_name)
pulumi.set(__self__, "phone", phone)
if mobile is not None:
pulumi.set(__self__, "mobile", mobile)
if phone_extension is not None:
pulumi.set(__self__, "phone_extension", phone_extension)
@property
@pulumi.getter(name="contactName")
def contact_name(self) -> str:
"""
Contact name of the person.
"""
return pulumi.get(self, "contact_name")
@property
@pulumi.getter
def phone(self) -> str:
"""
Phone number of the contact person.
"""
return pulumi.get(self, "phone")
@property
@pulumi.getter
def mobile(self) -> Optional[str]:
"""
Mobile number of the contact person.
"""
return pulumi.get(self, "mobile")
@property
@pulumi.getter(name="phoneExtension")
def phone_extension(self) -> Optional[str]:
"""
Phone extension number of the contact person.
"""
return pulumi.get(self, "phone_extension")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CostInformationResponseResult(dict):
"""
Cost information for the product system
"""
def __init__(__self__, *,
meter_details: Sequence['outputs.MeterDetailsResponseResult'],
primary_meter_type: str):
"""
Cost information for the product system
:param Sequence['MeterDetailsResponseArgs'] meter_details: Details on the various billing aspects for the product system.
:param str primary_meter_type: Primary meter i.e. basic billing type for the product system.
"""
pulumi.set(__self__, "meter_details", meter_details)
pulumi.set(__self__, "primary_meter_type", primary_meter_type)
@property
@pulumi.getter(name="meterDetails")
def meter_details(self) -> Sequence['outputs.MeterDetailsResponseResult']:
"""
Details on the various billing aspects for the product system.
"""
return pulumi.get(self, "meter_details")
@property
@pulumi.getter(name="primaryMeterType")
def primary_meter_type(self) -> str:
"""
Primary meter i.e. basic billing type for the product system.
"""
return pulumi.get(self, "primary_meter_type")
@pulumi.output_type
class DescriptionResponseResult(dict):
"""
Description related properties of a product system.
"""
def __init__(__self__, *,
attributes: Sequence[str],
description_type: str,
keywords: Sequence[str],
links: Sequence['outputs.LinkResponseResult'],
long_description: str,
short_description: str):
"""
Description related properties of a product system.
:param Sequence[str] attributes: Attributes for the product system.
:param str description_type: Type of description.
:param Sequence[str] keywords: Keywords for the product system.
:param Sequence['LinkResponseArgs'] links: Links for the product system.
:param str long_description: Long description of the product system.
:param str short_description: Short description of the product system.
"""
pulumi.set(__self__, "attributes", attributes)
pulumi.set(__self__, "description_type", description_type)
pulumi.set(__self__, "keywords", keywords)
pulumi.set(__self__, "links", links)
pulumi.set(__self__, "long_description", long_description)
pulumi.set(__self__, "short_description", short_description)
@property
@pulumi.getter
def attributes(self) -> Sequence[str]:
"""
Attributes for the product system.
"""
return pulumi.get(self, "attributes")
@property
@pulumi.getter(name="descriptionType")
def description_type(self) -> str:
"""
Type of description.
"""
return pulumi.get(self, "description_type")
@property
@pulumi.getter
def keywords(self) -> Sequence[str]:
"""
Keywords for the product system.
"""
return pulumi.get(self, "keywords")
@property
@pulumi.getter
def links(self) -> Sequence['outputs.LinkResponseResult']:
"""
Links for the product system.
"""
return pulumi.get(self, "links")
@property
@pulumi.getter(name="longDescription")
def long_description(self) -> str:
"""
Long description of the product system.
"""
return pulumi.get(self, "long_description")
@property
@pulumi.getter(name="shortDescription")
def short_description(self) -> str:
"""
Short description of the product system.
"""
return pulumi.get(self, "short_description")
@pulumi.output_type
class DeviceDetailsResponse(dict):
"""
Device details.
"""
def __init__(__self__, *,
device_history: Sequence[str],
serial_number: str):
"""
Device details.
:param Sequence[str] device_history: Package Shipping details
:param str serial_number: device serial number
"""
pulumi.set(__self__, "device_history", device_history)
pulumi.set(__self__, "serial_number", serial_number)
@property
@pulumi.getter(name="deviceHistory")
def device_history(self) -> Sequence[str]:
"""
Package Shipping details
"""
return pulumi.get(self, "device_history")
@property
@pulumi.getter(name="serialNumber")
def serial_number(self) -> str:
"""
device serial number
"""
return pulumi.get(self, "serial_number")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class FilterablePropertyResponseResult(dict):
"""
Class defining the list of filter values on a filter type as part of configuration request.
"""
def __init__(__self__, *,
type: str,
supported_values: Optional[Sequence[str]] = None):
"""
Class defining the list of filter values on a filter type as part of configuration request.
:param str type: Type of product filter.
:param Sequence[str] supported_values: Values to be filtered.
"""
pulumi.set(__self__, "type", type)
if supported_values is not None:
pulumi.set(__self__, "supported_values", supported_values)
@property
@pulumi.getter
def type(self) -> str:
"""
Type of product filter.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="supportedValues")
def supported_values(self) -> Optional[Sequence[str]]:
"""
Values to be filtered.
"""
return pulumi.get(self, "supported_values")
@pulumi.output_type
class HierarchyInformationResponse(dict):
"""
Holds details about product hierarchy information
"""
def __init__(__self__, *,
configuration_name: Optional[str] = None,
product_family_name: Optional[str] = None,
product_line_name: Optional[str] = None,
product_name: Optional[str] = None):
"""
Holds details about product hierarchy information
:param str configuration_name: Represents configuration name that uniquely identifies configuration
:param str product_family_name: Represents product family name that uniquely identifies product family
:param str product_line_name: Represents product line name that uniquely identifies product line
:param str product_name: Represents product name that uniquely identifies product
"""
if configuration_name is not None:
pulumi.set(__self__, "configuration_name", configuration_name)
if product_family_name is not None:
pulumi.set(__self__, "product_family_name", product_family_name)
if product_line_name is not None:
pulumi.set(__self__, "product_line_name", product_line_name)
if product_name is not None:
pulumi.set(__self__, "product_name", product_name)
@property
@pulumi.getter(name="configurationName")
def configuration_name(self) -> Optional[str]:
"""
Represents configuration name that uniquely identifies configuration
"""
return pulumi.get(self, "configuration_name")
@property
@pulumi.getter(name="productFamilyName")
def product_family_name(self) -> Optional[str]:
"""
Represents product family name that uniquely identifies product family
"""
return pulumi.get(self, "product_family_name")
@property
@pulumi.getter(name="productLineName")
def product_line_name(self) -> Optional[str]:
"""
Represents product line name that uniquely identifies product line
"""
return pulumi.get(self, "product_line_name")
@property
@pulumi.getter(name="productName")
def product_name(self) -> Optional[str]:
"""
Represents product name that uniquely identifies product
"""
return pulumi.get(self, "product_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ImageInformationResponseResult(dict):
"""
Image for the product
"""
def __init__(__self__, *,
image_type: str,
image_url: str):
"""
Image for the product
:param str image_type: Type of the image
:param str image_url: Url of the image
"""
pulumi.set(__self__, "image_type", image_type)
pulumi.set(__self__, "image_url", image_url)
@property
@pulumi.getter(name="imageType")
def image_type(self) -> str:
"""
Type of the image
"""
return pulumi.get(self, "image_type")
@property
@pulumi.getter(name="imageUrl")
def image_url(self) -> str:
"""
Url of the image
"""
return pulumi.get(self, "image_url")
@pulumi.output_type
class LinkResponseResult(dict):
"""
Returns link related to the product
"""
def __init__(__self__, *,
link_type: str,
link_url: str):
"""
Returns link related to the product
:param str link_type: Type of link
:param str link_url: Url of the link
"""
pulumi.set(__self__, "link_type", link_type)
pulumi.set(__self__, "link_url", link_url)
@property
@pulumi.getter(name="linkType")
def link_type(self) -> str:
"""
Type of link
"""
return pulumi.get(self, "link_type")
@property
@pulumi.getter(name="linkUrl")
def link_url(self) -> str:
"""
Url of the link
"""
return pulumi.get(self, "link_url")
@pulumi.output_type
class MeterDetailsResponseResult(dict):
"""
Billing details for each meter.
"""
def __init__(__self__, *,
billing_model: 'outputs.BillingModelResponseResult',
meter_id: str,
meter_type: str):
"""
Billing details for each meter.
:param 'BillingModelResponseArgs' billing_model: Billing model to represent billing cycle, i.e. Monthly, biweekly, daily, hourly etc.
:param str meter_id: MeterId/ Billing Guid against which the product system will be charged
:param str meter_type: Category of the billing meter.
"""
pulumi.set(__self__, "billing_model", billing_model)
pulumi.set(__self__, "meter_id", meter_id)
pulumi.set(__self__, "meter_type", meter_type)
@property
@pulumi.getter(name="billingModel")
def billing_model(self) -> 'outputs.BillingModelResponseResult':
"""
Billing model to represent billing cycle, i.e. Monthly, biweekly, daily, hourly etc.
"""
return pulumi.get(self, "billing_model")
@property
@pulumi.getter(name="meterId")
def meter_id(self) -> str:
"""
MeterId/ Billing Guid against which the product system will be charged
"""
return pulumi.get(self, "meter_id")
@property
@pulumi.getter(name="meterType")
def meter_type(self) -> str:
"""
Category of the billing meter.
"""
return pulumi.get(self, "meter_type")
@pulumi.output_type
class NotificationPreferenceResponse(dict):
"""
Notification preference for a job stage.
"""
def __init__(__self__, *,
send_notification: bool,
stage_name: str):
"""
Notification preference for a job stage.
:param bool send_notification: Notification is required or not.
:param str stage_name: Name of the stage.
"""
pulumi.set(__self__, "send_notification", send_notification)
pulumi.set(__self__, "stage_name", stage_name)
@property
@pulumi.getter(name="sendNotification")
def send_notification(self) -> bool:
"""
Notification is required or not.
"""
return pulumi.get(self, "send_notification")
@property
@pulumi.getter(name="stageName")
def stage_name(self) -> str:
"""
Name of the stage.
"""
return pulumi.get(self, "stage_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OrderDetailsResponse(dict):
"""
Order details
"""
def __init__(__self__, *,
cancellation_reason: str,
cancellation_status: str,
current_status: 'outputs.OrderStatusDetailsResponse',
deletion_status: str,
error: 'outputs.CloudErrorResponse',
forward_shipping_details: 'outputs.ShippingDetailsResponse',
management_rp_details: Any,
order_status_history: Sequence['outputs.OrderStatusDetailsResponse'],
order_type: str,
product_details: 'outputs.ProductDetailsResponse',
return_reason: str,
return_status: str,
reverse_shipping_details: 'outputs.ShippingDetailsResponse',
notification_email_list: Optional[Sequence[str]] = None,
preferences: Optional['outputs.PreferencesResponse'] = None):
"""
Order details
:param str cancellation_reason: Cancellation reason.
:param str cancellation_status: Describes whether the order is cancellable or not.
:param 'OrderStatusDetailsResponseArgs' current_status: Current Order Status
:param str deletion_status: Describes whether the order is deletable or not.
:param 'CloudErrorResponseArgs' error: Top level error for the job.
:param 'ShippingDetailsResponseArgs' forward_shipping_details: Forward Package Shipping details
:param Any management_rp_details: parent RP details
:param Sequence['OrderStatusDetailsResponseArgs'] order_status_history: Order history
:param str order_type: Order type.
:param 'ProductDetailsResponseArgs' product_details: Unique identifier for configuration.
:param str return_reason: Return reason.
:param str return_status: Describes whether the order is returnable or not.
:param 'ShippingDetailsResponseArgs' reverse_shipping_details: Reverse Package Shipping details
:param Sequence[str] notification_email_list: Package Shipping details
:param 'PreferencesResponseArgs' preferences: Customer notification Preferences
"""
pulumi.set(__self__, "cancellation_reason", cancellation_reason)
pulumi.set(__self__, "cancellation_status", cancellation_status)
pulumi.set(__self__, "current_status", current_status)
pulumi.set(__self__, "deletion_status", deletion_status)
pulumi.set(__self__, "error", error)
pulumi.set(__self__, "forward_shipping_details", forward_shipping_details)
pulumi.set(__self__, "management_rp_details", management_rp_details)
pulumi.set(__self__, "order_status_history", order_status_history)
pulumi.set(__self__, "order_type", order_type)
pulumi.set(__self__, "product_details", product_details)
pulumi.set(__self__, "return_reason", return_reason)
pulumi.set(__self__, "return_status", return_status)
pulumi.set(__self__, "reverse_shipping_details", reverse_shipping_details)
if notification_email_list is not None:
pulumi.set(__self__, "notification_email_list", notification_email_list)
if preferences is not None:
pulumi.set(__self__, "preferences", preferences)
@property
@pulumi.getter(name="cancellationReason")
def cancellation_reason(self) -> str:
"""
Cancellation reason.
"""
return pulumi.get(self, "cancellation_reason")
@property
@pulumi.getter(name="cancellationStatus")
def cancellation_status(self) -> str:
"""
Describes whether the order is cancellable or not.
"""
return pulumi.get(self, "cancellation_status")
@property
@pulumi.getter(name="currentStatus")
def current_status(self) -> 'outputs.OrderStatusDetailsResponse':
"""
Current Order Status
"""
return pulumi.get(self, "current_status")
@property
@pulumi.getter(name="deletionStatus")
def deletion_status(self) -> str:
"""
Describes whether the order is deletable or not.
"""
return pulumi.get(self, "deletion_status")
@property
@pulumi.getter
def error(self) -> 'outputs.CloudErrorResponse':
"""
Top level error for the job.
"""
return pulumi.get(self, "error")
@property
@pulumi.getter(name="forwardShippingDetails")
def forward_shipping_details(self) -> 'outputs.ShippingDetailsResponse':
"""
Forward Package Shipping details
"""
return pulumi.get(self, "forward_shipping_details")
@property
@pulumi.getter(name="managementRpDetails")
def management_rp_details(self) -> Any:
"""
parent RP details
"""
return pulumi.get(self, "management_rp_details")
@property
@pulumi.getter(name="orderStatusHistory")
def order_status_history(self) -> Sequence['outputs.OrderStatusDetailsResponse']:
"""
Order history
"""
return pulumi.get(self, "order_status_history")
@property
@pulumi.getter(name="orderType")
def order_type(self) -> str:
"""
Order type.
"""
return pulumi.get(self, "order_type")
@property
@pulumi.getter(name="productDetails")
def product_details(self) -> 'outputs.ProductDetailsResponse':
"""
Unique identifier for configuration.
"""
return pulumi.get(self, "product_details")
@property
@pulumi.getter(name="returnReason")
def return_reason(self) -> str:
"""
Return reason.
"""
return pulumi.get(self, "return_reason")
@property
@pulumi.getter(name="returnStatus")
def return_status(self) -> str:
"""
Describes whether the order is returnable or not.
"""
return pulumi.get(self, "return_status")
@property
@pulumi.getter(name="reverseShippingDetails")
def reverse_shipping_details(self) -> 'outputs.ShippingDetailsResponse':
"""
Reverse Package Shipping details
"""
return pulumi.get(self, "reverse_shipping_details")
@property
@pulumi.getter(name="notificationEmailList")
def notification_email_list(self) -> Optional[Sequence[str]]:
"""
Package Shipping details
"""
return pulumi.get(self, "notification_email_list")
@property
@pulumi.getter
def preferences(self) -> Optional['outputs.PreferencesResponse']:
"""
Customer notification Preferences
"""
return pulumi.get(self, "preferences")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OrderStatusDetailsResponse(dict):
"""
Order status CurrentStatus
"""
def __init__(__self__, *,
order_status: str,
last_updated_time: Optional[str] = None):
"""
Order status CurrentStatus
:param str order_status: Order status
:param str last_updated_time: last time order was updated
"""
pulumi.set(__self__, "order_status", order_status)
if last_updated_time is not None:
pulumi.set(__self__, "last_updated_time", last_updated_time)
@property
@pulumi.getter(name="orderStatus")
def order_status(self) -> str:
"""
Order status
"""
return pulumi.get(self, "order_status")
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> Optional[str]:
"""
last time order was updated
"""
return pulumi.get(self, "last_updated_time")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PreferencesResponse(dict):
"""
Preferences related to the order
"""
def __init__(__self__, *,
notification_preferences: Optional[Sequence['outputs.NotificationPreferenceResponse']] = None,
transport_preferences: Optional['outputs.TransportPreferencesResponse'] = None):
"""
Preferences related to the order
:param Sequence['NotificationPreferenceResponseArgs'] notification_preferences: Notification preferences.
:param 'TransportPreferencesResponseArgs' transport_preferences: Preferences related to the shipment logistics of the order.
"""
if notification_preferences is not None:
pulumi.set(__self__, "notification_preferences", notification_preferences)
if transport_preferences is not None:
pulumi.set(__self__, "transport_preferences", transport_preferences)
@property
@pulumi.getter(name="notificationPreferences")
def notification_preferences(self) -> Optional[Sequence['outputs.NotificationPreferenceResponse']]:
"""
Notification preferences.
"""
return pulumi.get(self, "notification_preferences")
@property
@pulumi.getter(name="transportPreferences")
def transport_preferences(self) -> Optional['outputs.TransportPreferencesResponse']:
"""
Preferences related to the shipment logistics of the order.
"""
return pulumi.get(self, "transport_preferences")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ProductDetailsResponse(dict):
"""
Represents product details
"""
def __init__(__self__, *,
device_details: Sequence['outputs.DeviceDetailsResponse'],
hierarchy_information: 'outputs.HierarchyInformationResponse',
count: Optional[int] = None):
"""
Represents product details
:param Sequence['DeviceDetailsResponseArgs'] device_details: list of device details
:param 'HierarchyInformationResponseArgs' hierarchy_information: Hierarchy of the product which uniquely identifies the product
:param int count: Quantity of the product
"""
pulumi.set(__self__, "device_details", device_details)
pulumi.set(__self__, "hierarchy_information", hierarchy_information)
if count is not None:
pulumi.set(__self__, "count", count)
@property
@pulumi.getter(name="deviceDetails")
def device_details(self) -> Sequence['outputs.DeviceDetailsResponse']:
"""
list of device details
"""
return pulumi.get(self, "device_details")
@property
@pulumi.getter(name="hierarchyInformation")
def hierarchy_information(self) -> 'outputs.HierarchyInformationResponse':
"""
Hierarchy of the product which uniquely identifies the product
"""
return pulumi.get(self, "hierarchy_information")
@property
@pulumi.getter
def count(self) -> Optional[int]:
"""
Quantity of the product
"""
return pulumi.get(self, "count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ProductFamilyResponseResult(dict):
"""
Product Family
"""
def __init__(__self__, *,
availability_information: 'outputs.AvailabilityInformationResponseResult',
cost_information: 'outputs.CostInformationResponseResult',
description: 'outputs.DescriptionResponseResult',
display_name: str,
filterable_properties: Sequence['outputs.FilterablePropertyResponseResult'],
hierarchy_information: 'outputs.HierarchyInformationResponse',
image_information: Sequence['outputs.ImageInformationResponseResult'],
product_lines: Sequence['outputs.ProductLineResponseResult']):
"""
Product Family
:param 'AvailabilityInformationResponseArgs' availability_information: Availability information of the product system.
:param 'CostInformationResponseArgs' cost_information: Cost information for the product system.
:param 'DescriptionResponseArgs' description: Description related to the product system.
:param str display_name: Display Name for the product system.
:param Sequence['FilterablePropertyResponseArgs'] filterable_properties: list of filters supported for a product
:param 'HierarchyInformationResponseArgs' hierarchy_information: Hierarchy information of the product system.
:param Sequence['ImageInformationResponseArgs'] image_information: Image information for the product system.
:param Sequence['ProductLineResponseArgs'] product_lines: List of product lines supported in the product family
"""
pulumi.set(__self__, "availability_information", availability_information)
pulumi.set(__self__, "cost_information", cost_information)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "filterable_properties", filterable_properties)
pulumi.set(__self__, "hierarchy_information", hierarchy_information)
pulumi.set(__self__, "image_information", image_information)
pulumi.set(__self__, "product_lines", product_lines)
@property
@pulumi.getter(name="availabilityInformation")
def availability_information(self) -> 'outputs.AvailabilityInformationResponseResult':
"""
Availability information of the product system.
"""
return pulumi.get(self, "availability_information")
@property
@pulumi.getter(name="costInformation")
def cost_information(self) -> 'outputs.CostInformationResponseResult':
"""
Cost information for the product system.
"""
return pulumi.get(self, "cost_information")
@property
@pulumi.getter
def description(self) -> 'outputs.DescriptionResponseResult':
"""
Description related to the product system.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Display Name for the product system.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="filterableProperties")
def filterable_properties(self) -> Sequence['outputs.FilterablePropertyResponseResult']:
"""
list of filters supported for a product
"""
return pulumi.get(self, "filterable_properties")
@property
@pulumi.getter(name="hierarchyInformation")
def hierarchy_information(self) -> 'outputs.HierarchyInformationResponse':
"""
Hierarchy information of the product system.
"""
return pulumi.get(self, "hierarchy_information")
@property
@pulumi.getter(name="imageInformation")
def image_information(self) -> Sequence['outputs.ImageInformationResponseResult']:
"""
Image information for the product system.
"""
return pulumi.get(self, "image_information")
@property
@pulumi.getter(name="productLines")
def product_lines(self) -> Sequence['outputs.ProductLineResponseResult']:
"""
List of product lines supported in the product family
"""
return pulumi.get(self, "product_lines")
@pulumi.output_type
class ProductLineResponseResult(dict):
"""
Product line
"""
def __init__(__self__, *,
availability_information: 'outputs.AvailabilityInformationResponseResult',
cost_information: 'outputs.CostInformationResponseResult',
description: 'outputs.DescriptionResponseResult',
display_name: str,
filterable_properties: Sequence['outputs.FilterablePropertyResponseResult'],
hierarchy_information: 'outputs.HierarchyInformationResponse',
image_information: Sequence['outputs.ImageInformationResponseResult'],
products: Sequence['outputs.ProductResponseResult']):
"""
Product line
:param 'AvailabilityInformationResponseArgs' availability_information: Availability information of the product system.
:param 'CostInformationResponseArgs' cost_information: Cost information for the product system.
:param 'DescriptionResponseArgs' description: Description related to the product system.
:param str display_name: Display Name for the product system.
:param Sequence['FilterablePropertyResponseArgs'] filterable_properties: list of filters supported for a product
:param 'HierarchyInformationResponseArgs' hierarchy_information: Hierarchy information of the product system.
:param Sequence['ImageInformationResponseArgs'] image_information: Image information for the product system.
:param Sequence['ProductResponseArgs'] products: List of products in the product line
"""
pulumi.set(__self__, "availability_information", availability_information)
pulumi.set(__self__, "cost_information", cost_information)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "filterable_properties", filterable_properties)
pulumi.set(__self__, "hierarchy_information", hierarchy_information)
pulumi.set(__self__, "image_information", image_information)
pulumi.set(__self__, "products", products)
@property
@pulumi.getter(name="availabilityInformation")
def availability_information(self) -> 'outputs.AvailabilityInformationResponseResult':
"""
Availability information of the product system.
"""
return pulumi.get(self, "availability_information")
@property
@pulumi.getter(name="costInformation")
def cost_information(self) -> 'outputs.CostInformationResponseResult':
"""
Cost information for the product system.
"""
return pulumi.get(self, "cost_information")
@property
@pulumi.getter
def description(self) -> 'outputs.DescriptionResponseResult':
"""
Description related to the product system.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Display Name for the product system.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="filterableProperties")
def filterable_properties(self) -> Sequence['outputs.FilterablePropertyResponseResult']:
"""
list of filters supported for a product
"""
return pulumi.get(self, "filterable_properties")
@property
@pulumi.getter(name="hierarchyInformation")
def hierarchy_information(self) -> 'outputs.HierarchyInformationResponse':
"""
Hierarchy information of the product system.
"""
return pulumi.get(self, "hierarchy_information")
@property
@pulumi.getter(name="imageInformation")
def image_information(self) -> Sequence['outputs.ImageInformationResponseResult']:
"""
Image information for the product system.
"""
return pulumi.get(self, "image_information")
@property
@pulumi.getter
def products(self) -> Sequence['outputs.ProductResponseResult']:
"""
List of products in the product line
"""
return pulumi.get(self, "products")
@pulumi.output_type
class ProductResponseResult(dict):
"""
List of Products
"""
def __init__(__self__, *,
availability_information: 'outputs.AvailabilityInformationResponseResult',
configurations: Sequence['outputs.ConfigurationResponseResult'],
cost_information: 'outputs.CostInformationResponseResult',
description: 'outputs.DescriptionResponseResult',
display_name: str,
filterable_properties: Sequence['outputs.FilterablePropertyResponseResult'],
hierarchy_information: 'outputs.HierarchyInformationResponse',
image_information: Sequence['outputs.ImageInformationResponseResult']):
"""
List of Products
:param 'AvailabilityInformationResponseArgs' availability_information: Availability information of the product system.
:param Sequence['ConfigurationResponseArgs'] configurations: List of configurations for the product
:param 'CostInformationResponseArgs' cost_information: Cost information for the product system.
:param 'DescriptionResponseArgs' description: Description related to the product system.
:param str display_name: Display Name for the product system.
:param Sequence['FilterablePropertyResponseArgs'] filterable_properties: list of filters supported for a product
:param 'HierarchyInformationResponseArgs' hierarchy_information: Hierarchy information of the product system.
:param Sequence['ImageInformationResponseArgs'] image_information: Image information for the product system.
"""
pulumi.set(__self__, "availability_information", availability_information)
pulumi.set(__self__, "configurations", configurations)
pulumi.set(__self__, "cost_information", cost_information)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "filterable_properties", filterable_properties)
pulumi.set(__self__, "hierarchy_information", hierarchy_information)
pulumi.set(__self__, "image_information", image_information)
@property
@pulumi.getter(name="availabilityInformation")
def availability_information(self) -> 'outputs.AvailabilityInformationResponseResult':
"""
Availability information of the product system.
"""
return pulumi.get(self, "availability_information")
@property
@pulumi.getter
def configurations(self) -> Sequence['outputs.ConfigurationResponseResult']:
"""
List of configurations for the product
"""
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="costInformation")
def cost_information(self) -> 'outputs.CostInformationResponseResult':
"""
Cost information for the product system.
"""
return pulumi.get(self, "cost_information")
@property
@pulumi.getter
def description(self) -> 'outputs.DescriptionResponseResult':
"""
Description related to the product system.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Display Name for the product system.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="filterableProperties")
def filterable_properties(self) -> Sequence['outputs.FilterablePropertyResponseResult']:
"""
list of filters supported for a product
"""
return pulumi.get(self, "filterable_properties")
@property
@pulumi.getter(name="hierarchyInformation")
def hierarchy_information(self) -> 'outputs.HierarchyInformationResponse':
"""
Hierarchy information of the product system.
"""
return pulumi.get(self, "hierarchy_information")
@property
@pulumi.getter(name="imageInformation")
def image_information(self) -> Sequence['outputs.ImageInformationResponseResult']:
"""
Image information for the product system.
"""
return pulumi.get(self, "image_information")
@pulumi.output_type
class ShippingAddressResponse(dict):
"""
Shipping address where customer wishes to receive the device.
"""
def __init__(__self__, *,
country: str,
street_address1: str,
address_type: Optional[str] = None,
city: Optional[str] = None,
company_name: Optional[str] = None,
postal_code: Optional[str] = None,
state_or_province: Optional[str] = None,
street_address2: Optional[str] = None,
street_address3: Optional[str] = None,
zip_extended_code: Optional[str] = None):
"""
Shipping address where customer wishes to receive the device.
:param str country: Name of the Country.
:param str street_address1: Street Address line 1.
:param str address_type: Type of address.
:param str city: Name of the City.
:param str company_name: Name of the company.
:param str postal_code: Postal code.
:param str state_or_province: Name of the State or Province.
:param str street_address2: Street Address line 2.
:param str street_address3: Street Address line 3.
:param str zip_extended_code: Extended Zip Code.
"""
pulumi.set(__self__, "country", country)
pulumi.set(__self__, "street_address1", street_address1)
if address_type is not None:
pulumi.set(__self__, "address_type", address_type)
if city is not None:
pulumi.set(__self__, "city", city)
if company_name is not None:
pulumi.set(__self__, "company_name", company_name)
if postal_code is not None:
pulumi.set(__self__, "postal_code", postal_code)
if state_or_province is not None:
pulumi.set(__self__, "state_or_province", state_or_province)
if street_address2 is not None:
pulumi.set(__self__, "street_address2", street_address2)
if street_address3 is not None:
pulumi.set(__self__, "street_address3", street_address3)
if zip_extended_code is not None:
pulumi.set(__self__, "zip_extended_code", zip_extended_code)
@property
@pulumi.getter
def country(self) -> str:
"""
Name of the Country.
"""
return pulumi.get(self, "country")
@property
@pulumi.getter(name="streetAddress1")
def street_address1(self) -> str:
"""
Street Address line 1.
"""
return pulumi.get(self, "street_address1")
@property
@pulumi.getter(name="addressType")
def address_type(self) -> Optional[str]:
"""
Type of address.
"""
return pulumi.get(self, "address_type")
@property
@pulumi.getter
def city(self) -> Optional[str]:
"""
Name of the City.
"""
return pulumi.get(self, "city")
@property
@pulumi.getter(name="companyName")
def company_name(self) -> Optional[str]:
"""
Name of the company.
"""
return pulumi.get(self, "company_name")
@property
@pulumi.getter(name="postalCode")
def postal_code(self) -> Optional[str]:
"""
Postal code.
"""
return pulumi.get(self, "postal_code")
@property
@pulumi.getter(name="stateOrProvince")
def state_or_province(self) -> Optional[str]:
"""
Name of the State or Province.
"""
return pulumi.get(self, "state_or_province")
@property
@pulumi.getter(name="streetAddress2")
def street_address2(self) -> Optional[str]:
"""
Street Address line 2.
"""
return pulumi.get(self, "street_address2")
@property
@pulumi.getter(name="streetAddress3")
def street_address3(self) -> Optional[str]:
"""
Street Address line 3.
"""
return pulumi.get(self, "street_address3")
@property
@pulumi.getter(name="zipExtendedCode")
def zip_extended_code(self) -> Optional[str]:
"""
Extended Zip Code.
"""
return pulumi.get(self, "zip_extended_code")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ShippingDetailsResponse(dict):
"""
Package shipping details
"""
def __init__(__self__, *,
carrier_display_name: str,
carrier_name: str,
tracking_id: str,
tracking_url: str):
"""
Package shipping details
:param str carrier_display_name: Carrier Name for display purpose. Not to be used for any processing.
:param str carrier_name: Name of the carrier.
:param str tracking_id: TrackingId of the package
:param str tracking_url: TrackingUrl of the package.
"""
pulumi.set(__self__, "carrier_display_name", carrier_display_name)
pulumi.set(__self__, "carrier_name", carrier_name)
pulumi.set(__self__, "tracking_id", tracking_id)
pulumi.set(__self__, "tracking_url", tracking_url)
@property
@pulumi.getter(name="carrierDisplayName")
def carrier_display_name(self) -> str:
"""
Carrier Name for display purpose. Not to be used for any processing.
"""
return pulumi.get(self, "carrier_display_name")
@property
@pulumi.getter(name="carrierName")
def carrier_name(self) -> str:
"""
Name of the carrier.
"""
return pulumi.get(self, "carrier_name")
@property
@pulumi.getter(name="trackingId")
def tracking_id(self) -> str:
"""
TrackingId of the package
"""
return pulumi.get(self, "tracking_id")
@property
@pulumi.getter(name="trackingUrl")
def tracking_url(self) -> str:
"""
TrackingUrl of the package.
"""
return pulumi.get(self, "tracking_url")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SpecificationResponseResult(dict):
"""
Specifications of the configurations
"""
def __init__(__self__, *,
name: str,
value: str):
"""
Specifications of the configurations
:param str name: Name of the specification
:param str value: Value of the specification
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the specification
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
"""
Value of the specification
"""
return pulumi.get(self, "value")
@pulumi.output_type
class SystemDataResponse(dict):
"""
Metadata pertaining to creation and last modification of the resource.
"""
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
last_modified_at: Optional[str] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[str] = None):
"""
Metadata pertaining to creation and last modification of the resource.
:param str created_at: The timestamp of resource creation (UTC).
:param str created_by: The identity that created the resource.
:param str created_by_type: The type of identity that created the resource.
:param str last_modified_at: The timestamp of resource last modification (UTC)
:param str last_modified_by: The identity that last modified the resource.
:param str last_modified_by_type: The type of identity that last modified the resource.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class TransportPreferencesResponse(dict):
"""
Preferences related to the shipment logistics of the sku
"""
def __init__(__self__, *,
preferred_shipment_type: str):
"""
Preferences related to the shipment logistics of the sku
:param str preferred_shipment_type: Indicates Shipment Logistics type that the customer preferred.
"""
pulumi.set(__self__, "preferred_shipment_type", preferred_shipment_type)
@property
@pulumi.getter(name="preferredShipmentType")
def preferred_shipment_type(self) -> str:
"""
Indicates Shipment Logistics type that the customer preferred.
"""
return pulumi.get(self, "preferred_shipment_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 35.679147 | 141 | 0.651883 | 59,942 | 0.967759 | 0 | 0 | 60,522 | 0.977123 | 0 | 0 | 29,555 | 0.477163 |
0bfd287e39c3f7db093ee81af20b674fef10f6e4 | 754 | py | Python | open-hackathon/src/hackathon/__init__.py | SpAiNiOr/LABOSS | 32ad341821e9f30fecfa338b5669f574d32dd0fa | [
"Apache-2.0"
] | null | null | null | open-hackathon/src/hackathon/__init__.py | SpAiNiOr/LABOSS | 32ad341821e9f30fecfa338b5669f574d32dd0fa | [
"Apache-2.0"
] | null | null | null | open-hackathon/src/hackathon/__init__.py | SpAiNiOr/LABOSS | 32ad341821e9f30fecfa338b5669f574d32dd0fa | [
"Apache-2.0"
] | null | null | null | __author__ = 'Junbo Wang'
__version__ = '2.0'
from flask import Flask
from hackathon.functions import safe_get_config
from flask_restful import Api
from flask_cors import CORS
# flask
app = Flask(__name__)
app.config['SECRET_KEY'] = '*K&ep_me^se(ret_!@#$'
# flask restful
api = Api(app)
# CORS
app.config['CORS_HEADERS'] = 'Content-Type, token'
cors = CORS(app)
from . import views
### example of scheduler
# from scheduler import scheduler
# from datetime import datetime, timedelta
#
# def alarm(time):
# print('Alarm! This alarm was scheduled at %s.' % time)
# return {
# "key": "val"
# }
#
# alarm_time = datetime.now() + timedelta(seconds=10)
# scheduler.add_job(alarm, 'date', run_date=alarm_time, args=[datetime.now()])
| 21.542857 | 78 | 0.696286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 467 | 0.619363 |
0bff94692e4bbe3d3e04ebd669ece2fd2be0847b | 489 | py | Python | nrm_django/nrm_site/settings/test.py | 18F/NRM-Grants-Agreements | 7b9016e034b75a2237f7c70ba539b542108c335e | [
"CC0-1.0"
] | 5 | 2020-11-18T20:00:02.000Z | 2021-04-16T23:50:07.000Z | nrm_django/nrm_site/settings/test.py | USDAForestService/NRM-Grants-Agreements | 7b9016e034b75a2237f7c70ba539b542108c335e | [
"CC0-1.0"
] | 210 | 2021-04-28T16:26:34.000Z | 2022-03-14T16:31:21.000Z | nrm_django/nrm_site/settings/test.py | USDAForestService/NRM-Grants-Agreements | 7b9016e034b75a2237f7c70ba539b542108c335e | [
"CC0-1.0"
] | 2 | 2021-07-06T20:57:27.000Z | 2021-07-07T13:06:46.000Z | import os
from .base import * # noqa
import dj_database_url
SECRET_KEY = "test mode"
database_url = os.getenv("DATABASE_URL")
if database_url:
DATABASES = {"default": dj_database_url.parse(database_url)}
else:
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": "nrm_test",
"HOST": "postgres",
"PORT": "5432",
"USER": "postgres",
"PASSWORD": "postgres",
}
}
| 21.26087 | 64 | 0.554192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.343558 |
0bfffd029eb8a857f5995b9732496cb660d8fb9b | 1,213 | py | Python | exampleTree_simpleS3.py | evilrovot/attackTrees | e228037e7a4705bf405f48b582f8d0fda4c981d8 | [
"Apache-2.0"
] | null | null | null | exampleTree_simpleS3.py | evilrovot/attackTrees | e228037e7a4705bf405f48b582f8d0fda4c981d8 | [
"Apache-2.0"
] | null | null | null | exampleTree_simpleS3.py | evilrovot/attackTrees | e228037e7a4705bf405f48b582f8d0fda4c981d8 | [
"Apache-2.0"
] | null | null | null | from models import Action, Block, Detect, Discovery, Edge, Node
import renderer
if __name__ == "__main__":
root = Node(label="Reality")
goal = Node(label="Attacker gets data from bucket")
apiCache = Action(
label="Search API Caches",
chain="recon",
cost=0,
time=3,
objective="Discover bucket paths",
pSuccess=1.0
)
root.createEdge(apiCache,label="#Yolosec")
s3urls = Discovery(
label="S3 Urls",
description="The URL paths to various S3 buckets",
sensitivity=3,
value=0
)
apiCache.createEdge(s3urls, label="#Yolosec")
downloadFiles = Action(
chain="exfiltration",
label="Download files from all buckets",
cost=0,
time=1,
objective="Access confidential information stored in S3",
pSuccess=1.0,
detections=["CloudWatch","DLP"]
)
s3urls.createEdge(downloadFiles, label="#Yolosec")
downloadFiles.createEdge(goal, label="#Yolosec")
style = renderer.loadStyle('style.json')
renderer.render(
node=root,
renderUnimplemented=True,
style=style,
fname="example_simpleS3",
fout="png"
) | 26.955556 | 65 | 0.610058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 331 | 0.272877 |
0400e97086bae64fd628eddd3e280d7f1e6fb57b | 14,853 | py | Python | python_scripts/geometry_factory.py | rwilliams01/isogeometric_application | e505061603b56b4f426220946da5ec551dc6c142 | [
"MIT"
] | null | null | null | python_scripts/geometry_factory.py | rwilliams01/isogeometric_application | e505061603b56b4f426220946da5ec551dc6c142 | [
"MIT"
] | null | null | null | python_scripts/geometry_factory.py | rwilliams01/isogeometric_application | e505061603b56b4f426220946da5ec551dc6c142 | [
"MIT"
] | null | null | null | import math
from KratosMultiphysics import *
from KratosMultiphysics.BRepApplication import *
from KratosMultiphysics.IsogeometricApplication import *
###
### This module is a factory to generate typical geometries for isogeometric analysis, e.g. circle, l-shape, ...
###
nurbs_fespace_library = BSplinesFESpaceLibrary()
grid_lib = ControlGridLibrary()
multipatch_util = MultiPatchUtility()
multipatch_refine_util = MultiPatchRefinementUtility()
bsplines_patch_util = BSplinesPatchUtility()
### Compute cross product
def cross(c, a, b):
c[0] = a[1]*b[2] - a[2]*b[1]
c[1] = a[2]*b[0] - a[0]*b[2]
c[2] = a[0]*b[1] - a[1]*b[0]
return c
### Compute dot product
def dot(a, b):
return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]
### Normalize a vector
def normalize(a):
norma = math.sqrt(a[0]**2 + a[1]**2 + a[2]**2)
a[0] = a[0] / norma
a[1] = a[1] / norma
a[2] = a[2] / norma
return a
### Compute Gaussian function
def gaussian(mu, sigma, x):
return math.exp(-0.5*((x-mu)/sigma)**2)/sigma/math.sqrt(2.0*math.pi)
### Compute inverse Gaussian function
def inv_gaussian1(mu, sigma, g):
return -sigma*math.sqrt(-2.0*math.log(sigma*math.sqrt(2*math.pi)*g)) + mu
### Compute inverse Gaussian function
def inv_gaussian2(mu, sigma, g):
return sigma*math.sqrt(-2.0*math.log(sigma*math.sqrt(2*math.pi)*g)) + mu
# ### Generate distributed Gaussian array in span (min, max). It is useful to generate a knot vector with Gaussian distribution for testing
# def GenerateGaussianArray(half_n, min_k, max_k, sigma):
# mu = 0.5*(min_k + max_k)
# max_g = gaussian(mu, sigma, mu)
# min_g = gaussian(mu, sigma, 0.0)
# print("min_g:", min_g)
# print("max_g:", max_g)
# print("mu:", inv_gaussian1(mu, sigma, max_g))
# print("mu:", inv_gaussian2(mu, sigma, max_g))
# k_list = []
# for i in range(0, half_n+1):
# t = float(i+1)/(half_n+1)
# g = t*(max_g-min_g) + min_g
# # print("g:", g)
# k = inv_gaussian1(mu, sigma, g)
# # print("k:", k)
# k_list.append(k)
# for i in range(0, half_n):
# t = float(half_n-i)/(half_n+1)
# g = t*(max_g-min_g) + min_g
# k = inv_gaussian2(mu, sigma, g)
# k_list.append(k)
# return k_list
# ### Generate distributed Gaussian array in span (min, max). It is useful to generate a knot vector with Gaussian distribution for testing
# def GenerateGaussianArray(n, min_k, max_k):
# mu = 0.0
# sigma = 1.0
# k_list = []
# g_list = []
# min_g = 0.0
# max_g = gaussian(mu, sigma, mu)
# sum_g = 0.0
# for i in range(0, n):
# t = 6.0*float(i)/(n-1) - 3.0;
# g = gaussian(mu, sigma, t)
# g_list.append(g)
# sum_g = sum_g + g
# t = 0.0
# for g in g_list:
# t = t + g
# k_list.append(t/sum_g)
# return k_list
### Generate distributed Gaussian array in span (min, max). It is useful to generate a knot vector with Gaussian distribution for testing
def GenerateGaussianArray(num_span, max_elem_in_span, sigma, min_k, max_k):
k_list = []
# make a span in [-3, 3]
for i in range(0, num_span):
min_t = -3.0 + float(i)/num_span*6.0
max_t = -3.0 + float(i+1)/num_span*6.0
# print("min_t:", min_t)
# print("max_t:", max_t)
# get a samping value in [min_t, max_t]
t = 0.5*(min_t + max_t)
# print("t:", t)
g = gaussian(0.0, sigma, t*sigma)
# print("g:", g)
n = int(max_elem_in_span*g)
# print("n:", n)
# generate n numbers from min_t to max_t
for j in range(0, n):
t = min_t + float(j+0.5)/n*(max_t-min_t)
t_scale = (t+3.0)/6.0;
k_list.append(t_scale*(max_k-min_k)+min_k)
return k_list
### Create a line from start_point to end_point with knot vector [0 0 0 ... 1 1 1]
### On output the pointer to the patch will be returned
def CreateLine(start_point, end_point, order = 1):
Id = 0
fes = nurbs_fespace_library.CreatePrimitiveFESpace(order)
ctrl_grid = grid_lib.CreateLinearControlPointGrid(start_point[0], start_point[1], start_point[2], fes.Number(0), end_point[0], end_point[1], end_point[2])
patch_ptr = multipatch_util.CreatePatchPointer(Id, fes)
patch = patch_ptr.GetReference()
patch.CreateControlPointGridFunction(ctrl_grid)
return patch_ptr
### Create a curve from the control point list, given as [ [x0, y0, z0], ... ]
### All the weight is assumed 1
def CreateCurve(points, order):
Id = 0
number = len(points)
fes = nurbs_fespace_library.CreateUniformFESpace(number, order)
ctrl_grid = StructuredControlPointGrid1D(number)
for i in range(0, number):
ctrl_grid.SetValue(i, ControlPoint(points[i][0], points[i][1], points[i][2], 1.0))
curve_ptr = multipatch_util.CreatePatchPointer(Id, fes)
curve = curve_ptr.GetReference()
curve.CreateControlPointGridFunction(ctrl_grid)
return curve_ptr
### Create an arc at center on the surface perpendicular to the given axis. By default, the quadratic arc is generated. The knot vector will be [0 0 0 1 1 1]
### On output the pointer to the patch will be returned. Small arc means that the open angle is less than 90 degrees.
def CreateSmallArc(center, axis, radius, start_angle, end_angle):
## firstly create an arc in xy plane at (0, 0)
Id = 0
fes = nurbs_fespace_library.CreatePrimitiveFESpace(2)
ctrl_grid = grid_lib.CreateLinearControlPointGrid(0.0, 0.0, 0.0, fes.Number(0), radius, 0.0, 0.0)
sweep = end_angle - start_angle
dsweep = 0.5*sweep/180.0*math.pi
wm = math.cos(dsweep)
x = radius*wm
y = radius*math.sin(dsweep)
xm = x + y*math.tan(dsweep)
if axis == 'z':
trans = RotationZ(start_angle + 0.5*sweep)
elif axis == 'y':
trans = RotationZ(start_angle + 0.5*sweep)
trans.AppendTransformation(RotationX(90.0))
elif axis == 'x':
trans = RotationZ(start_angle + 0.5*sweep + 90.0)
trans.AppendTransformation(RotationY(90.0))
trans.AppendTransformation(Translation(center[0], center[1], center[2]))
pt1 = ctrl_grid[0]
pt1.WX = x
pt1.WY = -y
pt1.WZ = 0.0
pt1.W = 1.0
pt1.ApplyTransformation(trans)
ctrl_grid[0] = pt1
pt2 = ctrl_grid[1]
pt2.WX = wm*xm
pt2.WY = 0.0
pt2.WZ = 0.0
pt2.W = wm
pt2.ApplyTransformation(trans)
ctrl_grid[1] = pt2
pt3 = ctrl_grid[2]
pt3.WX = x
pt3.WY = y
pt3.WZ = 0.0
pt3.W = 1.0
pt3.ApplyTransformation(trans)
ctrl_grid[2] = pt3
patch_ptr = multipatch_util.CreatePatchPointer(Id, fes)
patch = patch_ptr.GetReference()
patch.CreateControlPointGridFunction(ctrl_grid)
return patch_ptr
### Create a 2D ring at center on the surface perpendicular to the axis. By default, the quadratic arc is generated. The knot vector will be [0 0 0 1 1 1]
### On output the pointer to the patch will be returned. Small ring means that the open angle is less than 90 degrees.
def CreateSmallRing(center, axis, rin, rout, start_angle, end_angle):
## create inner arc
iarc_ptr = CreateSmallArc(center, axis, rin, start_angle, end_angle)
iarc = iarc_ptr.GetReference()
## create outer arc
oarc_ptr = CreateSmallArc(center, axis, rout, start_angle, end_angle)
oarc = oarc_ptr.GetReference()
## create ring
ring_patch_ptr = bsplines_patch_util.CreateLoftPatch(iarc, oarc)
return ring_patch_ptr
### Create the 2D rectangle aligned with Cartesian axes
def CreateRectangle(start_point, end_point):
line1 = CreateLine(start_point, [end_point[0], start_point[1], start_point[2]])
line2 = CreateLine([start_point[0], end_point[1], start_point[2]], [end_point[0], end_point[1], start_point[2]])
face_ptr = bsplines_patch_util.CreateLoftPatch(line1, line2)
return face_ptr
### Create the 2D parallelogram
### P4---P3
### | |
### P1---P2
def CreateParallelogram(P1, P2, P3, P4):
line1 = CreateLine(P1, P2)
line2 = CreateLine(P4, P3)
face_ptr = bsplines_patch_util.CreateLoftPatch(line1, line2)
return face_ptr
### Create the 3D slab aligned with Cartesian axes
def CreateSlab(start_point, end_point):
line1 = CreateLine(start_point, [end_point[0], start_point[1], start_point[2]])
line2 = CreateLine([start_point[0], end_point[1], start_point[2]], [end_point[0], end_point[1], start_point[2]])
face1_ptr = bsplines_patch_util.CreateLoftPatch(line1, line2)
face1 = face1_ptr.GetReference()
line3 = CreateLine([start_point[0], start_point[1], end_point[2]], [end_point[0], start_point[1], end_point[2]])
line4 = CreateLine([start_point[0], end_point[1], end_point[2]], [end_point[0], end_point[1], end_point[2]])
face2_ptr = bsplines_patch_util.CreateLoftPatch(line3, line4)
face2 = face2_ptr.GetReference()
volume_patch_ptr = bsplines_patch_util.CreateLoftPatch(face1, face2)
return volume_patch_ptr
### Create a half circle with 4 patches configuration
def CreateHalfCircle4(center, axis, radius, rotation_angle, params={}):
if 'make_interface' in params:
make_interface = params['make_interface']
else:
make_interface = True
if 'square_control' in params:
square_control = params['square_control']
else:
square_control = 1.0/3
### create arcs
arc1_ptr = CreateSmallArc(center, axis, radius, 0.0, 45.0)
arc1 = arc1_ptr.GetReference()
arc2_ptr = CreateSmallArc(center, axis, radius, 45.0, 135.0)
arc2 = arc2_ptr.GetReference()
arc3_ptr = CreateSmallArc(center, axis, radius, 135.0, 180.0)
arc3 = arc3_ptr.GetReference()
square_size = square_control*radius
### create lines
if axis == 'x':
p1 = [center[0], center[1] + square_size, center[2]]
p2 = [center[0], center[1] + square_size, center[2] + square_size]
p3 = [center[0], center[1] - square_size, center[2]]
p4 = [center[0], center[1] - square_size, center[2] + square_size]
elif axis == 'y':
p1 = [center[0], center[1], center[2] + square_size]
p2 = [center[0] + square_size, center[1], center[2] + square_size]
p3 = [center[0], center[1], center[2] - square_size]
p4 = [center[0] + square_size, center[1], center[2] - square_size]
elif axis == 'z':
p1 = [center[0] + square_size, center[1], center[2]]
p2 = [center[0] + square_size, center[1] + square_size, center[2]]
p3 = [center[0] - square_size, center[1], center[2]]
p4 = [center[0] - square_size, center[1] + square_size, center[2]]
u_order = arc1.Order(0)
line1_ptr = CreateLine(p1, p2, u_order)
line1 = line1_ptr.GetReference()
line2_ptr = CreateLine(p2, p4, u_order)
line2 = line2_ptr.GetReference()
line3_ptr = CreateLine(p4, p3, u_order)
line3 = line3_ptr.GetReference()
line4_ptr = CreateLine(p1, p3, u_order)
line4 = line4_ptr.GetReference()
patch1_ptr = bsplines_patch_util.CreateLoftPatch(arc1, line1)
patch2_ptr = bsplines_patch_util.CreateLoftPatch(arc2, line2)
patch3_ptr = bsplines_patch_util.CreateLoftPatch(arc3, line3)
patch4_ptr = bsplines_patch_util.CreateLoftPatchFromList2D([line2, line4], 1)
multipatch_refine_util.DegreeElevate(patch4_ptr, [0, u_order-1])
patch1 = patch1_ptr.GetReference()
patch1.Id = 1
patch2 = patch2_ptr.GetReference()
patch2.Id = 2
patch3 = patch3_ptr.GetReference()
patch3.Id = 3
patch4 = patch4_ptr.GetReference()
patch4.Id = 4
print("patch1:" + str(patch1))
print("patch4:" + str(patch4))
if rotation_angle != 0.0:
trans = Transformation()
trans.AppendTransformation(Translation(-center[0], -center[1], -center[2]))
if axis == 'z':
trans.AppendTransformation(RotationZ(rotation_angle))
elif axis == 'y':
trans.AppendTransformation(RotationY(rotation_angle))
elif axis == 'x':
trans.AppendTransformation(RotationX(rotation_angle))
trans.AppendTransformation(Translation(center[0], center[1], center[2]))
patch1.ApplyTransformation(trans)
patch2.ApplyTransformation(trans)
patch3.ApplyTransformation(trans)
patch4.ApplyTransformation(trans)
if make_interface:
bsplines_patch_util.MakeInterface(patch1, BoundarySide2D.U1, patch2, BoundarySide2D.U0, BoundaryDirection.Forward)
bsplines_patch_util.MakeInterface(patch2, BoundarySide2D.U1, patch3, BoundarySide2D.U0, BoundaryDirection.Forward)
bsplines_patch_util.MakeInterface(patch1, BoundarySide2D.V1, patch4, BoundarySide2D.U0, BoundaryDirection.Reversed)
bsplines_patch_util.MakeInterface(patch2, BoundarySide2D.V1, patch4, BoundarySide2D.V0, BoundaryDirection.Forward)
bsplines_patch_util.MakeInterface(patch3, BoundarySide2D.V1, patch4, BoundarySide2D.U1, BoundaryDirection.Forward)
return [patch1_ptr, patch2_ptr, patch3_ptr, patch4_ptr]
### Create a list of Frenet frame along a curve. The Frenet frame is stored as a transformation matrix.
### zvec is a reference vector to compute B at the first sampling point. It shall not be parallel with the tangent vector of the first sampling point.
def GenerateLocalFrenetFrame(curve, num_sampling_points, zvec = [1.0, 0.0, 0.0]):
trans_list = []
B = Array3()
ctrl_pnt_grid_func = curve.GridFunction(CONTROL_POINT_COORDINATES)
print(ctrl_pnt_grid_func)
for i in range(0, num_sampling_points):
xi = float(i) / (num_sampling_points-1)
pnt = [xi, 0.0, 0.0]
P = ctrl_pnt_grid_func.GetValue(pnt)
T = ctrl_pnt_grid_func.GetDerivative(pnt)
T = normalize(T[0])
if i == 0:
cross(B, zvec, T)
B = normalize(B)
else:
B = B - dot(B, T)*T
B = normalize(B)
trans = Transformation(B, T, P)
trans_list.append(trans)
return trans_list
def ExportLocalFrenetFrameToMatlab(trans_list, fn, s = 1.0):
ifile = open(fn, "w")
cnt = 1
ifile.write("s = " + str(s) + ";\n")
ifile.write("C = {}; B = {}; T = {}; N = {};\n")
for trans in trans_list:
P = trans.P()
B = trans.V1()
N = trans.V2()
T = trans.V3()
ifile.write("C{" + str(cnt) + "} = [" + str(P[0]) + " " + str(P[1]) + " " + str(P[2]) + "];\n")
ifile.write("T{" + str(cnt) + "} = [" + str(T[0]) + " " + str(T[1]) + " " + str(T[2]) + "];\n")
ifile.write("B{" + str(cnt) + "} = [" + str(B[0]) + " " + str(B[1]) + " " + str(B[2]) + "];\n")
ifile.write("N{" + str(cnt) + "} = [" + str(N[0]) + " " + str(N[1]) + " " + str(N[2]) + "];\n")
ifile.write("hold on; plot_frame(C{" + str(cnt) + "}, B{" + str(cnt) + "}, N{" + str(cnt) + "}, T{" +str(cnt) + "}, s);\n")
cnt = cnt + 1
ifile.close()
| 38.280928 | 158 | 0.642698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,809 | 0.256447 |
04013197d6bb7d88a6cd252419bfeecd25b47689 | 6,945 | py | Python | tutorials/tutorial_imc.py | softbear/squidpy_notebooks | 28a5989105e705f06070fbe52b3dfb4d5741e3bd | [
"MIT"
] | null | null | null | tutorials/tutorial_imc.py | softbear/squidpy_notebooks | 28a5989105e705f06070fbe52b3dfb4d5741e3bd | [
"MIT"
] | null | null | null | tutorials/tutorial_imc.py | softbear/squidpy_notebooks | 28a5989105e705f06070fbe52b3dfb4d5741e3bd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Analyze Imaging Mass Cytometry data
===================================
This tutorial shows how to apply Squidpy to Imaging Mass Cytometry data.
The data used here comes from a recent paper from :cite:`jackson2020single`.
We provide a pre-processed subset of the data, in :class:`anndata.AnnData` format.
For details on how it was pre-processed, please refer to the original paper.
.. seealso::
See :ref:`sphx_glr_auto_tutorials_tutorial_seqfish.py` for additional analysis examples.
Import packages & data
----------------------
To run the notebook locally, create a conda environment as *conda create -f environment.yml* using this
`environment.yml <https://github.com/theislab/squidpy_notebooks/blob/master/environment.yml>`_
"""
import scanpy as sc
import squidpy as sq
sc.logging.print_header()
print(f"squidpy=={sq.__version__}")
# load the pre-processed dataset
adata = sq.datasets.imc()
###############################################################################
# First, let's visualize the cluster annotation in spatial context
# with :func:`scanpy.pl.spatial`.
sc.pl.spatial(adata, color="cell type", spot_size=10)
###############################################################################
# We can appreciate how the majority of the tissue seems
# to consist of *apoptotic tumor cells*. There also seem to be other
# cell types scattered across the tissue, annotated as *T cells*,
# *Macrophages* and different types of *Stromal cells*. We can also
# appreciate how a subset of tumor cell, *basal CK tumor cells* seems
# to be located in the lower part of the tissue.
###############################################################################
# Co-occurrence across spatial dimensions
# +++++++++++++++++++++++++++++++++++++++
#
# We can visualize cluster co-occurrence in spatial dimensions using the original
# spatial coordinates.
# The co-occurrence score is defined as:
#
# .. math::
#
# \frac{p(exp|cond)}{p(exp)}
#
# where :math:`p(exp|cond)` is the conditional probability of observing a
# cluster :math:`exp` conditioned on the presence of a cluster :math:`cond`, whereas
# :math:`p(exp)` is the probability of observing :math:`exp` in the radius size
# of interest. The score is computed across increasing radii size
# around each cell in the tissue.
#
# We can compute this score with :func:`squidpy.gr.co_occurrence`
# and set the cluster annotation for the conditional probability with
# the argument ``clusters``. Then, we visualize the results with
# :func:`squidpy.pl.co_occurrence`.
# We visualize the result for two conditional groups, namely
# *basal CK tumor cell* and *T cells*.
sq.gr.co_occurrence(adata, cluster_key="cell type")
sq.pl.co_occurrence(
adata,
cluster_key="cell type",
clusters=["basal CK tumor cell", "T cells"],
figsize=(15, 4),
)
###############################################################################
# We can observe that *T cells* seems to co-occur
# with *endothelial* and *vimentin hi stromal cells*,
# whereas *basal CK tumor cell* seem to largely cluster
# together, except for the presence of a type of stromal
# cells (*small elongated stromal cell*) at close distance.
###############################################################################
# Neighborhood enrichment
# +++++++++++++++++++++++
# A similar analysis that can inform on the neighbor structure of
# the tissue is the *neighborhood enrichment test*.
# You can compute such score with the following function: :func:`squidpy.gr.nhood_enrichment`.
# In short, it's an enrichment score on spatial proximity of clusters:
# if spots belonging to two different clusters are often close to each other,
# then they will have a high score and can be defined as being *enriched*.
# On the other hand, if they are far apart, the score will be low
# and they can be defined as *depleted*.
# This score is based on a permutation-based test, and you can set
# the number of permutations with the ``n_perms`` argument (default is 1000).
#
# Since the function works on a connectivity matrix, we need to compute that as well.
# This can be done with :func:`squidpy.gr.spatial_neighbors`.
# Please see :ref:`sphx_glr_auto_examples_graph_compute_spatial_neighbors.py` for more details
# of how this function works.
#
# Finally, we visualize the results with :func:`squidpy.pl.nhood_enrichment`.
sq.gr.spatial_neighbors(adata)
sq.gr.nhood_enrichment(adata, cluster_key="cell type")
sq.pl.nhood_enrichment(adata, cluster_key="cell type")
###############################################################################
# Interestingly, *T cells* shows an enrichment with *stromal* and
# *endothelial cells*, as well as *macrophages*. Another interesting
# result is that *apoptotic tumor cells*, being uniformly spread across
# the tissue area, show a neighbor depletion against any other cluster
# (but a strong enrichment for itself). This is a correct interpretation
# from a permutation based approach, because the cluster annotation,
# being uniformly spread across the tissue, and in high number, it's
# more likely to be enriched with cell types from the same class,
# rather than different one.
###############################################################################
# Interaction matrix and network centralities
# +++++++++++++++++++++++++++++++++++++++++++
# Squidpy provides other descriptive statistics of the spatial graph.
# For instance, the interaction matrix, which counts the number of edges
# that each cluster share with all the others.
# This score can be computed with the function :func:`squidpy.gr.interaction_matrix`.
# We can visualize the results with :func:`squidpy.pl.interaction_matrix`.
sq.gr.interaction_matrix(adata, cluster_key="cell type")
sq.pl.interaction_matrix(adata, cluster_key="cell type")
###############################################################################
# Finally, similar to the previous analysis,
# we can investigate properties of the spatial graph by
# computing different network centralities:
#
# - degree_centrality
# - average_clustering
# - closeness_centrality
#
# Squidpy provides a convenient function for all of them:
# :func:`squidpy.gr.centrality_scores` and
# :func:`squidpy.pl.centrality_scores` for visualization.
sq.gr.centrality_scores(
adata,
cluster_key="cell type",
)
sq.pl.centrality_scores(adata, cluster_key="cell type", figsize=(20, 5), s=500)
###############################################################################
# You can familiarize yourself with network centralities from the
# excellent networkx
# `documentation <https://networkx.org/documentation/stable/reference/algorithms/centrality>`_ .
# For the purpose of this analysis, we can appreciate that the *apoptotic tumor cell*
# clusters shows high closeness centrality, indicating that nodes belonging to that group
# are often close to each other in the spatial graph.
| 42.87037 | 103 | 0.667819 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,212 | 0.894456 |
0403256dd8688242a04b96465856edb3da68c15d | 604 | py | Python | music_site/mongoServices/services.py | UVG-Teams/music-space | 8f464b6b1cbe59afea3be3ab1b9ed4e25ab0b424 | [
"MIT"
] | null | null | null | music_site/mongoServices/services.py | UVG-Teams/music-space | 8f464b6b1cbe59afea3be3ab1b9ed4e25ab0b424 | [
"MIT"
] | null | null | null | music_site/mongoServices/services.py | UVG-Teams/music-space | 8f464b6b1cbe59afea3be3ab1b9ed4e25ab0b424 | [
"MIT"
] | null | null | null | from pymongo import MongoClient
client = MongoClient()
db = client.music_space
def print_collection(collection):
print("/" * 75)
for x in db[collection].find():
print(x)
print("/" * 75)
def save_sales_on_mongo(collection, data):
for i in range(len(data['sales'])):
data['sales'][i]['total'] = float(data['sales'][i]['total'])
# db[collection].insert_one({ 'sales': data['sales'] })
if data['sales'] != []:
db[collection].insert_many( data['sales'] )
def save_recommendations_on_mongo(collection, data):
db[collection].insert_one({ 'rec': data })
| 27.454545 | 69 | 0.634106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.190397 |
0403e4d350613ff44a670bf3891877f93092864d | 324 | py | Python | cryptodoge/cmds/units.py | grayfallstown-cryptodoge/cryptodoge | ffeb5218ce184a56073a5dc0ac5acddba3728bd4 | [
"Apache-2.0"
] | 10 | 2021-08-21T17:41:51.000Z | 2022-02-09T04:28:12.000Z | cryptodoge/cmds/units.py | grayfallstown-cryptodoge/cryptodoge | ffeb5218ce184a56073a5dc0ac5acddba3728bd4 | [
"Apache-2.0"
] | 1 | 2021-12-15T21:23:38.000Z | 2021-12-15T21:23:38.000Z | cryptodoge/cmds/units.py | grayfallstown-cryptodoge/cryptodoge | ffeb5218ce184a56073a5dc0ac5acddba3728bd4 | [
"Apache-2.0"
] | 2 | 2021-08-21T18:22:59.000Z | 2021-12-10T07:12:18.000Z | from typing import Dict
# The rest of the codebase uses mojos everywhere.
# Only use these units for user facing interfaces.
units: Dict[str, int] = {
"cryptodoge": 10 ** 6, # 1 cryptodoge (XCD) is 1,000,000 mojo (1 million)
"mojo:": 1,
"colouredcoin": 10 ** 3, # 1 coloured coin is 1000 colouredcoin mojos
}
| 32.4 | 78 | 0.67284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.697531 |
04054ea3903ecb397e1a8c9f3bd80ebf9b23b358 | 3,203 | py | Python | scripts/build/submit_metrics.py | cypherdotXd/o3de | bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676 | [
"Apache-2.0",
"MIT"
] | 11 | 2021-07-08T09:58:26.000Z | 2022-03-17T17:59:26.000Z | scripts/build/submit_metrics.py | RoddieKieley/o3de | e804fd2a4241b039a42d9fa54eaae17dc94a7a92 | [
"Apache-2.0",
"MIT"
] | 29 | 2021-07-06T19:33:52.000Z | 2022-03-22T10:27:49.000Z | scripts/build/submit_metrics.py | RoddieKieley/o3de | e804fd2a4241b039a42d9fa54eaae17dc94a7a92 | [
"Apache-2.0",
"MIT"
] | 4 | 2021-07-06T19:24:43.000Z | 2022-03-31T12:42:27.000Z | #
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import argparse
import logging
import json
import socket
from datetime import datetime
SOCKET_TIMEOUT = 60
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S"
FILEBEAT_PIPELINE = "filebeat"
FILEBEAT_DEFAULT_IP = "127.0.0.1"
FILEBEAT_DEFAULT_PORT = 9000
def parse_args():
parser = argparse.ArgumentParser(
prog="submit_metrics.py",
description="Pushes a JSON document via Filebeat.",
add_help=False
)
def file_arg(arg):
try:
with open(arg) as json_file:
return json.load(json_file)
except ValueError:
raise argparse.ArgumentTypeError("Invalid json file '%s'" % arg)
parser.add_argument("-f", "--file", default=None, type=file_arg, help="File containing JSON data to upload.")
parser.add_argument("-i", "--index", default=None, help="Index to use when sending the data")
parser.add_argument("-ip", "--filebeat_ip", default=FILEBEAT_DEFAULT_IP, help="IP address where filebeat service is listening")
parser.add_argument("-port", "--filebeat_port", default=FILEBEAT_DEFAULT_PORT, help="Port where filebeat service is listening")
return parser.parse_args()
def submit(index, payload, filebeat_ip = FILEBEAT_DEFAULT_IP, filebeat_port = FILEBEAT_DEFAULT_PORT):
try:
filebeat_address = filebeat_ip, filebeat_port
logging.debug(f"Connecting to Filebeat on '{filebeat_address[0]}:{filebeat_address[1]}'")
fb_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fb_socket.settimeout(SOCKET_TIMEOUT)
fb_socket.connect(filebeat_address)
event = {
"index": index,
"timestamp": datetime.strptime(payload['timestamp'], DATE_FORMAT).strftime(DATE_FORMAT),
"pipeline": FILEBEAT_PIPELINE,
"payload": json.dumps(payload),
}
# Serialise event, add new line and encode as UTF-8 before sending to Filebeat.
data = json.dumps(event) + "\n"
data = data.encode()
total_sent = 0
logging.debug(f"Sending JSON data")
while total_sent < len(data):
try:
sent = fb_socket.send(data[total_sent:])
except BrokenPipeError:
print("An exception occurred while sending data")
fb_socket.close()
total_sent = 0
else:
total_sent = total_sent + sent
logging.debug("JSON data sent")
fb_socket.close()
logging.debug(f"Disconnected from Filebeat on '{filebeat_address[0]}:{filebeat_address[1]}'")
except (ConnectionError, socket.timeout):
logging.error("Failed to connect to Filebeat")
return False
return True
if __name__ == "__main__":
# Parse CLI arguments.
args = parse_args()
if not args.index:
logging.error(f"Index not specified")
exit(1)
if not submit(args.index, json.dumps(args.file), args.filebeat_ip, args.filebeat_port):
exit(1)
| 35.588889 | 131 | 0.650952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,006 | 0.314081 |
040553ee254257bcd7d020b647af86d296e5b39b | 4,167 | py | Python | pylearn2/models/local_coordinate_coding.py | Menerve/pylearn2 | ad7bcfda3294404aebd71f5a5c4a8623d401a98e | [
"BSD-3-Clause"
] | 3 | 2016-01-23T10:18:39.000Z | 2019-02-28T06:22:45.000Z | pylearn2/models/local_coordinate_coding.py | Menerve/pylearn2 | ad7bcfda3294404aebd71f5a5c4a8623d401a98e | [
"BSD-3-Clause"
] | null | null | null | pylearn2/models/local_coordinate_coding.py | Menerve/pylearn2 | ad7bcfda3294404aebd71f5a5c4a8623d401a98e | [
"BSD-3-Clause"
] | null | null | null | """
.. todo::
WRITEME
"""
import logging
from theano import function, shared
from pylearn2.optimization import linear_cg as cg
from pylearn2.optimization.feature_sign import feature_sign_search
import numpy as N
import theano.tensor as T
from pylearn2.utils.rng import make_np_rng
logger = logging.getLogger(__name__)
class LocalCoordinateCoding(object):
"""
.. todo::
WRITEME
Parameters
----------
nvis : WRITEME
nhid : WRITEME
coeff : WRITEME
"""
def __init__(self, nvis, nhid, coeff):
self.nvis = nvis
self.nhid = nhid
self.coeff = float(coeff)
self.rng = make_np_rng(None, [1, 2, 3], which_method="randn")
self.redo_everything()
def get_output_channels(self):
"""
.. todo::
WRITEME
"""
return self.nhid
def redo_everything(self):
"""
.. todo::
WRITEME
"""
self.W = shared(self.rng.randn(self.nhid, self.nvis), name='W')
self.W.T.name = 'W.T'
def weights_format(self):
"""
.. todo::
WRITEME
"""
return ['h', 'v']
def optimize_gamma(self, example):
"""
.. todo::
WRITEME
"""
#variable names chosen to follow the arguments to l1ls_featuresign
Y = N.zeros((self.nvis,))
Y[:] = example
c = (1e-10 + N.square(self.W.get_value(borrow=True) -
example).sum(axis=1))
A = self.W.get_value(borrow=True).T / c
x = feature_sign_search(A, Y, self.coeff)
g = x / c
return g
def train_batch(self, dataset, batch_size):
"""
.. todo::
WRITEME
"""
#TODO-- this results in compilation happening every time learn is
# called should cache the compilation results, including those
# inside cg
X = dataset.get_design_matrix()
m = X.shape[0]
assert X.shape[1] == self.nvis
gamma = N.zeros((batch_size, self.nhid))
cur_gamma = T.vector(name='cur_gamma')
cur_v = T.vector(name='cur_v')
recons = T.dot(cur_gamma, self.W)
recons.name = 'recons'
recons_diffs = cur_v - recons
recons_diffs.name = 'recons_diffs'
recons_diff_sq = T.sqr(recons_diffs)
recons_diff_sq.name = 'recons_diff'
recons_error = T.sum(recons_diff_sq)
recons_error.name = 'recons_error'
dict_dists = T.sum(T.sqr(self.W - cur_v), axis=1)
dict_dists.name = 'dict_dists'
abs_gamma = abs(cur_gamma)
abs_gamma.name = 'abs_gamma'
weighted_dists = T.dot(abs_gamma, dict_dists)
weighted_dists.name = 'weighted_dists'
penalty = self.coeff * weighted_dists
penalty.name = 'penalty'
#prevent directions of absolute flatness in the hessian
#W_sq = T.sqr(self.W)
#W_sq.name = 'W_sq'
#debug = T.sum(W_sq)
debug = 1e-10 * T.sum(dict_dists)
debug.name = 'debug'
#J = debug
J = recons_error + penalty + debug
J.name = 'J'
Jf = function([cur_v, cur_gamma], J)
start = self.rng.randint(m - batch_size + 1)
batch_X = X[start:start + batch_size, :]
#TODO-- optimize gamma
logger.info('optimizing gamma')
for i in xrange(batch_size):
#print str(i+1)+'/'+str(batch_size)
gamma[i, :] = self.optimize_gamma(batch_X[i, :])
logger.info('max min')
logger.info(N.abs(gamma).min(axis=0).max())
logger.info('min max')
logger.info(N.abs(gamma).max(axis=0).max())
#Optimize W
logger.info('optimizing W')
logger.warning("not tested since switching to Razvan's all-theano "
"implementation of linear cg")
cg.linear_cg(J, [self.W], max_iters=3)
err = 0.
for i in xrange(batch_size):
err += Jf(batch_X[i, :], gamma[i, :])
assert not N.isnan(err)
assert not N.isinf(err)
logger.info('err: {0}'.format(err))
return True
| 25.564417 | 75 | 0.556036 | 3,839 | 0.921286 | 0 | 0 | 0 | 0 | 0 | 0 | 1,116 | 0.267819 |
0405d2a62685ff4d6a8f6ab13f719af2aff5fa9b | 7,239 | py | Python | components/mpas-source/testing_and_setup/compass/landice/hydro-radial/plot_hydro-radial_profile.py | meng630/GMD_E3SM_SCM | 990f84598b79f9b4763c3a825a7d25f4e0f5a565 | [
"FTL",
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null | components/mpas-source/testing_and_setup/compass/landice/hydro-radial/plot_hydro-radial_profile.py | meng630/GMD_E3SM_SCM | 990f84598b79f9b4763c3a825a7d25f4e0f5a565 | [
"FTL",
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null | components/mpas-source/testing_and_setup/compass/landice/hydro-radial/plot_hydro-radial_profile.py | meng630/GMD_E3SM_SCM | 990f84598b79f9b4763c3a825a7d25f4e0f5a565 | [
"FTL",
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null | #!/usr/bin/env python
'''
Plots profiles for hydro-margin test case
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import netCDF4
from optparse import OptionParser
import matplotlib.pyplot as plt
from matplotlib import cm
secInYr = 3600.0 * 24.0 * 365.0 # Note: this may be slightly wrong for some calendar types!
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename", help="file to visualize", metavar="FILE")
parser.add_option("-t", "--time", dest="time", help="time step to visualize (0 based)", metavar="TIME")
parser.add_option("-s", "--save", action="store_true", dest="saveimages", help="include this flag to save plots as files")
parser.add_option("-n", "--nodisp", action="store_true", dest="hidefigs", help="include this flag to not display plots (usually used with -s)")
options, args = parser.parse_args()
if not options.filename:
print("No filename provided. Using output.nc.")
options.filename = "output.nc"
if not options.time:
print("No time provided. Using time -1.")
time_slice = -1
else:
time_slice = int(options.time)
f = netCDF4.Dataset(options.filename,'r')
#xtime = f.variables['xtime'][:]
xCell = f.variables['xCell'][:]
yCell = f.variables['yCell'][:]
xEdge = f.variables['xEdge'][:]
yEdge = f.variables['yEdge'][:]
h = f.variables['waterThickness'][time_slice,:]
u = f.variables['waterVelocityCellX'][time_slice,:]
P = f.variables['waterPressure'][time_slice,:]
N = f.variables['effectivePressure'][time_slice,:]
div = f.variables['divergence'][time_slice,:]
#H = f.variables['thickness'][time_slice,:]
opening = f.variables['openingRate'][time_slice,:]
closing = f.variables['closingRate'][time_slice,:]
melt = f.variables['basalMeltInput'][time_slice,:]
sliding = f.variables['basalSpeed'][time_slice,:]
days = f.variables['daysSinceStart'][:]
xtime = f.variables['xtime'][:]
print("Total number of time levels={}".format(len(days))
print("Using time slice {}, which is year {}".format(time_slice, days[time_slice]/365.0))
print("xtime=" + ''.join(xtime[time_slice,:]))
print("Attempting to read thickness field from landice_grid.nc.")
fin = netCDF4.Dataset("landice_grid.nc",'r')
H = fin.variables['thickness'][0,:]
# Find center row - currently files are set up to have central row at y=0
unique_ys=np.unique(yCell[:])
centerY=unique_ys[len(unique_ys)//2]
print("number of ys={}, center y index={}, center Y value={}".format(len(unique_ys), len(unique_ys)//2, centerY))
ind = np.nonzero(yCell[:] == centerY)
x = xCell[ind]/1000.0
print("start plotting.")
fig = plt.figure(1, facecolor='w')
# water thickness
ax1 = fig.add_subplot(121)
#plt.plot(x, H[ind]*917.0*9.81/1.0e5, '.-')
plt.plot(x, h[ind], '.-')
plt.xlabel('X-position (km)')
plt.ylabel('water depth (m)')
plt.grid(True)
# water pressure
ax = fig.add_subplot(122, sharex=ax1)
plt.plot(x, H[ind]*910.0*9.80616 / 1.0e5, '.-')
plt.plot(x, P[ind] / 1.0e5, '.--')
plt.xlabel('X-position (km)')
plt.ylabel('water pressure (bar)')
plt.grid(True)
# plot how close to SS we are
fig = plt.figure(2, facecolor='w')
ax1 = fig.add_subplot(211)
for i in ind:
plt.plot(days/365.0, f.variables['waterThickness'][:,i])
plt.xlabel('Years since start')
plt.ylabel('water thickness (m)')
plt.grid(True)
ax = fig.add_subplot(212, sharex=ax1)
for i in ind:
plt.plot(days/365.0, f.variables['effectivePressure'][:,i]/1.0e6)
plt.xlabel('Years since start')
plt.ylabel('effective pressure (MPa)')
plt.grid(True)
# plot opening/closing rates
fig = plt.figure(3, facecolor='w')
nplt=5
ax = fig.add_subplot(nplt,1,1)
plt.plot(x, opening[ind], 'r', label='opening')
plt.plot(x, closing[ind], 'b', label='closing')
plt.plot(x, melt[ind] / 1000.0, 'g', label='melt')
plt.xlabel('X-position (km)')
plt.ylabel('rate (m/s)')
plt.legend()
plt.grid(True)
# SS N=f(h0
ax = fig.add_subplot(nplt,1,2)
plt.plot(x, N[ind]/1.0e6, '.-', label='modeled transient to SS')
plt.plot(x, (opening[ind]/(0.04*3.1709792e-24*h[ind]))**0.3333333 / 1.0e6, '.--r', label='SS N=f(h)') # steady state N=f(h) from the cavity evolution eqn
plt.xlabel('X-position (km)')
plt.ylabel('effective pressure (MPa)')
#plt.ylim((0.0, 0.1))
plt.grid(True)
plt.legend()
ax = fig.add_subplot(nplt,1,3)
plt.plot(x, u[ind])
plt.ylabel('water velocity (m/s)')
plt.grid(True)
ax = fig.add_subplot(nplt,1,4)
plt.plot(x, u[ind]*h[ind])
plt.ylabel('water flux (m2/s)')
plt.grid(True)
ax = fig.add_subplot(nplt,1,5)
plt.plot(x, div[ind])
plt.plot(x, melt[ind] / 1000.0, 'g', label='melt')
plt.ylabel('divergence (m/s)')
plt.grid(True)
# optional - check velo field correctness
#fig = plt.figure(4, facecolor='w')
#plt.plot(x, sliding[ind])
#plt.grid(True)
# plot some edge quantities
inde = np.nonzero(yEdge[:] == centerY)
xe = xEdge[inde]/1000.0
ve = f.variables['waterVelocity'][time_slice,:]
#k = f.variables['effectiveConducEdge'][time_slice,:]
dphie = f.variables['hydropotentialBaseSlopeNormal'][time_slice,:]
he = f.variables['waterThicknessEdgeUpwind'][time_slice,:]
fluxe = f.variables['waterFluxAdvec'][time_slice,:]
fig = plt.figure(5, facecolor='w')
nplt=5
ax1 = fig.add_subplot(nplt,1,1)
plt.plot(xe, dphie[inde],'.')
plt.ylabel('dphidx edge)')
plt.grid(True)
ax = fig.add_subplot(nplt,1,2, sharex=ax1)
plt.plot(x, P[ind],'x')
plt.ylabel('dphidx edge)')
plt.grid(True)
ax = fig.add_subplot(nplt,1,3, sharex=ax1)
plt.plot(xe, ve[inde],'.')
plt.ylabel('vel edge)')
plt.grid(True)
ax = fig.add_subplot(nplt,1,4, sharex=ax1)
plt.plot(xe, he[inde],'.')
plt.plot(x, h[ind],'x')
plt.ylabel('h edge)')
plt.grid(True)
ax = fig.add_subplot(nplt,1,5, sharex=ax1)
plt.plot(xe, fluxe[inde],'.')
plt.ylabel('flux edge)')
plt.grid(True)
# ==========
# Make plot similar to Bueler and van Pelt Fig. 5
# get thickness/pressure at time 0 - this should be the nearly-exact solution interpolated onto the MPAS mesh
h0 = f.variables['waterThickness'][0,:]
P0 = f.variables['waterPressure'][0,:]
hasice = (sliding>0.0) # assuming sliding has been zeroed where there is no ice, so we don't need to get the thickness field
Werr = np.absolute(h - h0)
Perr = np.absolute(P - P0)
dcEdge= f.variables['dcEdge'][:]
dx = dcEdge.mean() # ideally should restrict this to edges with ice
fig = plt.figure(6, facecolor='w')
ax = fig.add_subplot(2,1,1)
plt.plot(dx, Werr[hasice].mean(), 's', label='avg W err')
plt.plot(dx, Werr[hasice].max(), 'x', label='max W err')
ax.set_yscale('log')
plt.grid(True)
plt.legend()
plt.xlabel('delta x (m)')
plt.ylabel('error in W (m)')
print("avg W err={}".format(Werr[hasice].mean()))
print("max W err={}".format(Werr[hasice].max()))
ax = fig.add_subplot(2,1,2)
plt.plot(dx, Perr[hasice].mean()/1.0e5, 's', label='avg P err')
plt.plot(dx, Perr[hasice].max()/1.0e5, 'x', label='max P err')
ax.set_yscale('log')
plt.grid(True)
plt.legend()
plt.xlabel('delta x (m)')
plt.ylabel('error in P (bar)')
print("avg P err={}".format(Perr[hasice].mean()/1.0e5))
print("max P err={}".format(Perr[hasice].max()/1.0e5))
print("plotting complete")
plt.draw()
if options.saveimages:
print("Saving figures to files.")
plt.savefig('GL-position.png')
if options.hidefigs:
print("Plot display disabled with -n argument.")
else:
plt.show()
| 29.426829 | 154 | 0.683105 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,739 | 0.378367 |
0405f2b3fc38c8bd5678c1eebbbb0e7b2bd3a132 | 3,301 | py | Python | abovl/app.py | romanchyla/arxiv_biboverlay | 2847a564b55fd96d461798c535377c679bc829e8 | [
"MIT"
] | 2 | 2019-03-17T01:50:46.000Z | 2020-10-02T07:57:21.000Z | abovl/app.py | mattbierbaum/arxiv_biboverlay | 2847a564b55fd96d461798c535377c679bc829e8 | [
"MIT"
] | 1 | 2019-10-24T12:16:30.000Z | 2019-10-24T12:19:13.000Z | abovl/app.py | arXiv/arxiv-biboverlay-ads-tokens | 2847a564b55fd96d461798c535377c679bc829e8 | [
"MIT"
] | 2 | 2020-12-06T16:29:19.000Z | 2021-11-05T12:30:41.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from adsmutils import ADSFlask, get_date
from views import bp
from abovl.models import OAuthClient
from flask.ext.session import Session
def create_app(**config):
"""
Create the application and return it to the user
:return: flask.Flask application
"""
app = AbovlADSFlask('arxiv_biboverlay', local_config=config)
app.url_map.strict_slashes = False
app.register_blueprint(bp)
sess = Session()
sess.init_app(app)
return app
class AbovlADSFlask(ADSFlask):
def __init__(self, *args, **kwargs):
ADSFlask.__init__(self, *args, **kwargs)
# HTTP client is provided by requests module; it handles connection pooling
# here we just set some headers we always want to use while sending a request
self.client.headers.update({'Authorization': 'Bearer {}'.format(self.config.get("API_TOKEN", ''))})
def load_client(self, token):
"""Loads client entry from the database."""
with self.session_scope() as session:
t = session.query(OAuthClient).filter_by(token=token).first()
if t:
return t.toJSON()
def delete_client(self, cid):
with self.session_scope() as session:
session.query(OAuthClient).filter_by(id=cid).delete()
session.commit()
def verify_token(self, token):
url = '{}/{}'.format(self.config.get('API_URL'), self.config.get('PROTECTED_ENDPOINT', 'v1/accounts/protected'))
r = self.client.get(url, headers={'Authorization': 'Bearer {}'.format(token)})
return r.status_code == 200 #TODO: we could also handle refresh in the future
def create_client(self):
"""Calls ADS api and gets a new OAuth application
registered."""
url = '{}/{}'.format(self.config.get('API_URL'), self.config.get('BOOTSTRAP_ENDPOINT', 'v1/accounts/bootstrap'))
counter = 0
with self.session_scope() as session:
counter = session.query(OAuthClient).count() # or we could simply use UUID
kwargs = {
'name': '{}:{}'.format(self.config.get('CLIENT_NAME_PREFIX', 'OAuth application'), counter+1),
'scopes': ' '.join(self.config.get('CLIENT_SCOPES', []) or []),
'redirect_uri': self.config.get('CLIENT_REDIRECT_URI', None),
'create_new': True,
'ratelimit': self.config.get('CLIENT_RATELIMIT', 1.0)
}
r = self.client.get(url, params=kwargs)
if r.status_code == 200:
j = r.json()
with self.session_scope() as session:
c = OAuthClient(client_id=j['client_id'], client_secret=j['client_secret'],
token=j['access_token'], refresh_token=j['refresh_token'],
expire_in=j['expire_in'], scopes=' '.join(j['scopes'] or []),
username=j['username'], ratelimit=j['ratelimit'])
session.add(c)
session.commit()
return c.toJSON()
else:
self.logger.error('Unexpected response for %s (%s): %s', url, kwargs, r.text) | 38.383721 | 120 | 0.580127 | 2,776 | 0.840957 | 0 | 0 | 0 | 0 | 0 | 0 | 980 | 0.29688 |
0405fb654ca1caa004613619d41e36289a568124 | 853 | bzl | Python | integration/bazel_workspace_tests/bazel_ngtsc_plugin/packages.bzl | John-Cassidy/angular | 6375fa79875c0fe7b815efc45940a6e6f5c9c9eb | [
"MIT"
] | 95,154 | 2015-01-01T04:52:07.000Z | 2022-03-31T21:33:49.000Z | integration/bazel_workspace_tests/bazel_ngtsc_plugin/packages.bzl | John-Cassidy/angular | 6375fa79875c0fe7b815efc45940a6e6f5c9c9eb | [
"MIT"
] | 45,008 | 2015-01-03T23:36:09.000Z | 2022-03-31T23:57:28.000Z | integration/bazel_workspace_tests/bazel_ngtsc_plugin/packages.bzl | John-Cassidy/angular | 6375fa79875c0fe7b815efc45940a6e6f5c9c9eb | [
"MIT"
] | 30,083 | 2015-01-02T20:55:07.000Z | 2022-03-31T21:03:16.000Z | ANGULAR_PACKAGES_CONFIG = [
("@angular/animations", struct(entry_points = ["browser"])),
("@angular/common", struct(entry_points = ["http/testing", "http", "testing"])),
("@angular/compiler", struct(entry_points = ["testing"])),
("@angular/core", struct(entry_points = ["testing"])),
("@angular/forms", struct(entry_points = [])),
("@angular/platform-browser", struct(entry_points = ["testing", "animations"])),
("@angular/platform-browser-dynamic", struct(entry_points = ["testing"])),
("@angular/router", struct(entry_points = [])),
]
ANGULAR_PACKAGES = [
struct(
name = name[len("@angular/"):],
entry_points = config.entry_points,
platform = config.platform if hasattr(config, "platform") else "browser",
module_name = name,
)
for name, config in ANGULAR_PACKAGES_CONFIG
]
| 40.619048 | 84 | 0.638921 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 283 | 0.33177 |
0406964288c8043fef5666ff547d7e3e0ba8ec71 | 792 | py | Python | 2017-05-13-concurrent-and-parallel-programming-in-python-part-1/threading_no_inheritance.py | funweb/blog | 3544121ba70522d5ffced1a0d7f8ff3fa0afdeca | [
"BSD-3-Clause"
] | 4 | 2016-07-23T14:15:56.000Z | 2019-05-22T10:05:27.000Z | 2017-05-13-concurrent-and-parallel-programming-in-python-part-1/threading_no_inheritance.py | funweb/blog | 3544121ba70522d5ffced1a0d7f8ff3fa0afdeca | [
"BSD-3-Clause"
] | null | null | null | 2017-05-13-concurrent-and-parallel-programming-in-python-part-1/threading_no_inheritance.py | funweb/blog | 3544121ba70522d5ffced1a0d7f8ff3fa0afdeca | [
"BSD-3-Clause"
] | 3 | 2016-08-30T15:42:32.000Z | 2019-11-24T09:58:21.000Z | #!/usr/bin/env python
#
# Author: Daniela Duricekova <daniela.duricekova@gmail.com>
#
import requests
import threading
import queue
URLS = [
'https://xkcd.com/138/',
'https://xkcd.com/149/',
'https://xkcd.com/285/',
'https://xkcd.com/303/',
'https://xkcd.com/327/',
'https://xkcd.com/387/',
'https://xkcd.com/612/',
'https://xkcd.com/648/'
]
def get_content_len(url, results):
r = requests.get(url)
results.put((url, len(r.text)))
if __name__ == '__main__':
results = queue.Queue()
threads = []
for url in URLS:
t = threading.Thread(target=get_content_len, args=(url, results))
threads.append(t)
t.start()
for t in threads:
t.join()
while not results.empty():
print(results.get())
| 19.317073 | 73 | 0.592172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.35101 |
040a17322fbf5848fa187c564957601556a8278d | 593 | py | Python | frontend/felask/__init__.py | pdonorio/restangulask | 8b2f59223f04e4857d131042a1b0d2666232ed8a | [
"MIT"
] | 4 | 2016-05-14T09:23:21.000Z | 2017-05-18T11:37:47.000Z | frontend/felask/__init__.py | pdonorio/restangulask | 8b2f59223f04e4857d131042a1b0d2666232ed8a | [
"MIT"
] | 28 | 2015-12-18T10:34:39.000Z | 2021-07-13T17:23:09.000Z | frontend/felask/__init__.py | pdonorio/restangulask | 8b2f59223f04e4857d131042a1b0d2666232ed8a | [
"MIT"
] | 2 | 2016-05-14T11:55:54.000Z | 2016-07-28T06:17:39.000Z | # -*- coding: utf-8 -*-
""" Factory and blueprints patterns """
import os
from commons.logs import get_logger
from commons.meta import Meta as m
logger = get_logger(__name__)
################
config = {
"default": "config.devel",
"development": "config.devel",
"production": "config.prod",
# "testing": "bookshelf.config.TestingConfig",
}
config_name = os.getenv('FLASK_CONFIGURATION', 'default')
CONFIG_MODULE = config[config_name]
configuration_module = m().get_module_from_string(CONFIG_MODULE)
logger.debug("Configuration:\t%s in [%s]" % (config_name, CONFIG_MODULE))
| 25.782609 | 73 | 0.698145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 257 | 0.43339 |
040a73dce7da16a79d50d9fa98ac26dd027253c4 | 3,235 | py | Python | pickups/irc.py | mtomwing/pickups | b25967eab1ec6316995b1420b2e9d9b862094bf8 | [
"MIT"
] | 75 | 2015-01-07T15:32:23.000Z | 2022-02-09T23:09:24.000Z | pickups/irc.py | okurz/pickups | b25967eab1ec6316995b1420b2e9d9b862094bf8 | [
"MIT"
] | 18 | 2015-02-17T17:31:46.000Z | 2017-04-22T05:36:46.000Z | pickups/irc.py | okurz/pickups | b25967eab1ec6316995b1420b2e9d9b862094bf8 | [
"MIT"
] | 28 | 2015-01-21T16:23:30.000Z | 2022-02-09T23:09:36.000Z | import logging
RPL_WELCOME = 1
RPL_WHOISUSER = 311
RPL_ENDOFWHO = 315
RPL_LISTSTART = 321
RPL_LIST = 322
RPL_LISTEND = 323
RPL_TOPIC = 332
RPL_WHOREPLY = 352
RPL_NAMREPLY = 353
RPL_ENDOFNAMES = 366
RPL_MOTD = 372
RPL_MOTDSTART = 375
RPL_ENDOFMOTD = 376
ERR_NOSUCHCHANNEL = 403
logger = logging.getLogger(__name__)
class Client(object):
def __init__(self, reader, writer):
self.reader = reader
self.writer = writer
self.nickname = None
self.sent_messages = []
def readline(self):
return self.reader.readline()
def write(self, sender, command, *args):
"""Sends a message to the client on behalf of another client."""
if not isinstance(command, str):
command = '{:03}'.format(command)
params = ' '.join('{}'.format(arg) for arg in args)
line = ':{} {} {}\r\n'.format(sender, command, params)
logger.info('Sent: %r', line)
self.writer.write(line.encode('utf-8'))
def swrite(self, command, *args):
"""Sends a message from the server to the client."""
self.write('pickups', command, self.nickname, *args)
def uwrite(self, command, *args):
"""Sends a message on behalf of the client."""
self.write(self.nickname, command, *args)
# IRC Stuff
def welcome(self):
"""Tells the client a welcome message."""
self.swrite(RPL_WELCOME, self.nickname, ':Welcome to pickups!')
def list_channels(self, info):
"""Tells the client what channels are available."""
self.swrite(RPL_LISTSTART)
for channel, num_users, topic in info:
self.swrite(RPL_LIST, channel, num_users, ':{}'.format(topic))
self.swrite(RPL_LISTEND, ':End of /LIST')
def join(self, channel):
"""Tells the client to join a channel."""
self.write(self.nickname, 'JOIN', ':{}'.format(channel))
def list_nicks(self, channel, nicks):
"""Tells the client what nicks are in channel."""
self.swrite(RPL_NAMREPLY, '=', channel, ':{}'.format(' '.join(nicks)))
self.swrite(RPL_ENDOFNAMES, channel, ':End of NAMES list')
def who(self, query, responses):
"""Tells the client a list of information matching a query."""
for response in responses:
self.swrite(
RPL_WHOREPLY, response['channel'],
'~{}'.format(response['user']), 'localhost', 'pickups',
response['nick'], 'H', ':0', response['real_name']
)
self.swrite(RPL_ENDOFWHO, query, ':End of WHO list')
def topic(self, channel, topic):
"""Tells the client the topic of the channel."""
self.swrite(RPL_TOPIC, channel, ':{}'.format(topic))
def privmsg(self, hostmask, target, message):
"""Sends the client a message from someone."""
for line in message.splitlines():
if line:
self.write(hostmask, 'PRIVMSG', target, ':{}'.format(line))
def tell_nick(self, nickname):
"""Tells the client its actual nick."""
self.uwrite('NICK', nickname)
self.nickname = nickname
def pong(self):
"""Replies to server pings."""
self.swrite('PONG', 'localhost')
| 31.715686 | 78 | 0.605873 | 2,914 | 0.900773 | 0 | 0 | 0 | 0 | 0 | 0 | 843 | 0.260587 |
040f11281f6c105b03b8346650ea2ba027a765e1 | 14,733 | py | Python | ForumMediaAnalyzer/MediaAnalyzer.py | jesseVDwolf/ForumMediaAnalyzer | 63d98920380073ab72528ac7f58a53f5f1adc9a3 | [
"MIT"
] | null | null | null | ForumMediaAnalyzer/MediaAnalyzer.py | jesseVDwolf/ForumMediaAnalyzer | 63d98920380073ab72528ac7f58a53f5f1adc9a3 | [
"MIT"
] | null | null | null | ForumMediaAnalyzer/MediaAnalyzer.py | jesseVDwolf/ForumMediaAnalyzer | 63d98920380073ab72528ac7f58a53f5f1adc9a3 | [
"MIT"
] | null | null | null | import re
import cv2
import json
import base64
import logging
import requests
import numpy as np
from datetime import datetime
import pytz
import gridfs
import pymongo
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError as MongoServerSelectionTimeoutError
import imagehash
from PIL import Image
from skimage.metrics import structural_similarity
class AnalyzeConditionsNotMetException(Exception):
"""
Raised when an error is encountered during execution of the run() function
"""
pass
class MediaAnalyzer(object):
"""
This class is used to analyze data generated by a MediaScraper object:
https://github.com/jesseVDwolf/ForumMediaScraper
It will retrieve data in batches using the MediaScraper's REST interface:
https://github.com/jesseVDwolf/ForumMediaScraperREST
"""
# taken from https://github.com/django/django/blob/stable/1.3.x/django/core/validators.py#L45
URL_VALIDATION_REGEX = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
MONGO_DEFAULT_URI = "mongodb://localhost:27017"
def __init__(self, scraper_rest_host: str="http://localhost:5000", log_level: int=logging.DEBUG,
document_retrieval_batch_size: int=5, mongo_uri: str=MONGO_DEFAULT_URI):
if re.match(MediaAnalyzer.URL_VALIDATION_REGEX, scraper_rest_host) is None:
raise ValueError('Invalid scraper_rest_host url: %s' % scraper_rest_host)
self.scraper_rest_host = scraper_rest_host
self.document_retrieval_batch_size = document_retrieval_batch_size
self.timezone = pytz.timezone('Europe/Berlin')
# create database related objects
self._mongo_client = MongoClient(mongo_uri)
self._mongo_database = self._mongo_client['9GagMedia']
self.gridfs = gridfs.GridFS(self._mongo_database)
self.logger = logging.getLogger(__name__)
self.logger.setLevel(log_level)
logging_args = {
"format": '%(asctime)s %(levelname)-8s %(message)s',
"level": logging.INFO,
"datefmt": '%Y-%m-%d %H:%M:%S'
}
logging.basicConfig(**logging_args)
if not self._mongo_database['Counter'].find_one():
self._mongo_database['Counter'].insert_one({'_id': 'OrderNum', 'val': 1})
def _get_tz_date(self):
return datetime.utcnow().replace(tzinfo=pytz.utc).astimezone(self.timezone)
@staticmethod
def _scale_images(image_one: np.ndarray, image_two: np.ndarray, scale_percent_dif: float=0.02):
# Scale the images so that they have the same
# dimensions. The bigger image will always be scaled down;
# It is considered bigger if contains more pixels i.e width x height
if image_one.shape == image_two.shape:
return image_one, image_two
# use aspect ratio to determine if images can be rescaled
*_, w1, h1 = cv2.boundingRect(image_one)
*_, w2, h2 = cv2.boundingRect(image_two)
if abs((float(w1) / h1) - (float(w2) / h2)) >= scale_percent_dif:
return None, None
if sum(image_one.shape[:2]) > sum(image_two.shape[:2]):
image_one = cv2.resize(
src=image_one,
dsize=(image_two.shape[1], image_two.shape[0]),
interpolation=cv2.INTER_CUBIC
)
else:
image_two = cv2.resize(
src=image_two,
dsize=(image_one.shape[1], image_one.shape[0]),
interpolation=cv2.INTER_CUBIC
)
return image_one, image_two
@staticmethod
def _mse(image_one: np.ndarray, image_two: np.ndarray):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((image_one.astype("float") - image_two.astype("float")) ** 2)
err /= float(image_one.shape[0] * image_one.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
@staticmethod
def _img_hash(image_one: np.ndarray, image_two: np.ndarray, func=imagehash.average_hash, cutoff: int=10):
# Use an image hashing algorithm to check for similarity between images
# Calculate the hashes of both images using one of the functions from
# the https://github.com/JohannesBuchner/imagehash project and subtract
# them from each other. A cutoff can be specified to account for
# little discrepancies
h1 = func(Image.fromarray(image_one))
h2 = func(Image.fromarray(image_two))
s = (h1 - h2) - cutoff
# return the similarity between images where the closer to 0 the better.
# taking into account the specified cutoff where s can not be a negative number
return int((abs(s)+s)/2)
def run(self):
try:
"""
Pre-run validation of resources on scraper rest interface and
the locally configured mongodb server
"""
r = requests.get(
url="%s/query" % self.scraper_rest_host,
params={'limit': 1, 'offset': 0}
)
r.raise_for_status()
self._mongo_client.server_info()
"""
Start processing. If posts have already been processed, use the ArticleId of the
last processed article to determine when to stop retrieving more data. Then use
different methods to determine similairity between images:
- image hashes
- mean squared error
- structural similarity measure
"""
last_article = self._mongo_database['Posts'].find_one(sort=[("OrderNum", pymongo.ASCENDING)])
run = self._mongo_database['Runs'].insert_one({
'StartProcessTime': self._get_tz_date(),
'EndProcessTime': None,
'PostsProcessed': 0,
'BatchesProcessed': 0
})
request_offset = 0
final_batch = False
last_article_found = False
posts_processed = 0
batches_processed = 0
while True:
resp = requests.get(url="%s/query" % self.scraper_rest_host, params={
'limit': self.document_retrieval_batch_size,
'offset': request_offset
})
resp.raise_for_status()
data = resp.json()
self.logger.debug('%s: Received new batch of data at %s using offset %d and limit %d' % (
str(run.inserted_id), self._get_tz_date().strftime("%Y-%m-%d %H:%M:%S"), request_offset, self.document_retrieval_batch_size))
if len(data['documents']) == 0:
self.logger.debug('%s: No more documents returned by %s using offset %d and limit %d' % (
str(run.inserted_id), self.scraper_rest_host, request_offset, self.document_retrieval_batch_size))
self.logger.info('%s: No more documents found. Finished %d batches' % (str(run.inserted_id), batches_processed))
break
if len(data['documents']) < self.document_retrieval_batch_size:
self.logger.debug('%s: No more data available from %s. Setting final batch' % (
str(run.inserted_id), self.scraper_rest_host))
final_batch = True
if len([doc for doc in data['documents'] if len(doc['Posts']) == 0]) == len(data['documents']):
self.logger.debug('%s: No posts found in documents at offset %d with limit %d' % (
str(run.inserted_id), request_offset, self.document_retrieval_batch_size))
self.logger.info('%s: No posts found in batch. Retrieving next batch' % str(run.inserted_id))
request_offset += self.document_retrieval_batch_size
batches_processed += 1
continue
for doc in [doc for doc in data['documents'] if len(doc['Posts']) != 0]:
if last_article:
if last_article['ArticleId'] == doc['StartPostId'] or last_article_found:
self.logger.debug('%s: Last article %s found at offset %d with limit %d' % (
str(run.inserted_id), str(last_article['ArticleId']), request_offset, self.document_retrieval_batch_size))
final_batch = True
break
self.logger.info('%s: %d posts found for processing in document %s' % (
str(run.inserted_id), len(doc['Posts']), doc['_id']))
processed_posts = self._mongo_database['Posts'].find({})
for post in doc['Posts']:
if last_article:
if last_article['ArticleId'] == post['ArticleId']:
self.logger.debug('%s: Last article %s found at offset %d with limit %d' % (
str(run.inserted_id), str(last_article['ArticleId']), request_offset, self.document_retrieval_batch_size))
last_article_found = True
break
im_s = str(post['MediaData'])
im_b = base64.b64decode(im_s.encode('utf-8'))
im_buff = np.asarray(bytearray(im_b), dtype=np.uint8)
im = cv2.imdecode(im_buff, cv2.IMREAD_GRAYSCALE)
media_id = self.gridfs.put(im_b)
md = {
"ArticleId": str(post['ArticleId']),
"RunId": run.inserted_id,
"PostProcessedTime": self._get_tz_date(),
"Dim": im.shape,
"MediaId": media_id,
"IsOriginal": True,
"RepostOff": None,
"Reposts": []
}
for pp in processed_posts:
if post['ArticleId'] == pp['ArticleId']:
# duplicates will always be exactly the same
# solution to a bug in the MediaScraper...
continue
f = self.gridfs.get(pp['MediaId'])
im1_buff = np.asarray(bytearray(f.read(size=-1)), dtype=np.uint8)
im1 = cv2.imdecode(im1_buff, cv2.IMREAD_GRAYSCALE)
im0, im1 = self._scale_images(im, im1)
if not hasattr(im0, "shape"):
# images could not be scaled since difference in dimensions
# is too big. Must be unique based on this
continue
mse = self._mse(im0, im1)
ss = structural_similarity(im0, im1)
hs = self._img_hash(im0, im1)
# The hash similarity will determine if an image is even close to being
# similar to the processed image. The structural similarity measure will
# then decide if this is actually correct. A last check is done to make
# sure that its not a meme that is posted with the same background but
# with different text using the very sensitive mse measure
if hs == 0:
if ss >= 0.65:
if not mse >= 2000.00 and pp['IsOriginal']:
# db image seems to be very similar to the processed image
md.update({"IsOriginal": False, "RepostOff": pp['_id'], "Reposts": None})
pp['Reposts'].append({
"ArticleId": md['ArticleId'],
"mse": mse,
"ssim": ss,
"hs": hs,
"certainty": 1
})
self._mongo_database['Posts'].replace_one({"_id": pp['_id']}, pp)
else:
# image background might be the same with different text
continue
else:
# structural similarity is too far off must be unique
continue
else:
# images are not similar at all
continue
self._mongo_database['Posts'].insert_one(md)
posts_processed += 1
if final_batch:
break
request_offset += self.document_retrieval_batch_size
batches_processed += 1
self.logger.info('%s: Finished final batch. %d posts processed' % (str(run.inserted_id), posts_processed))
self._mongo_database['Runs'].update_one({'_id': run.inserted_id}, {
"$set": {'PostsProcessed': posts_processed, 'EndProcessTime': self._get_tz_date()}
})
except requests.exceptions.RequestException as ree:
raise AnalyzeConditionsNotMetException({'message': ree})
except MongoServerSelectionTimeoutError as msste:
raise AnalyzeConditionsNotMetException({'message': msste})
except json.JSONDecodeError as je:
raise AnalyzeConditionsNotMetException({'message': je})
| 49.439597 | 146 | 0.527591 | 14,321 | 0.972036 | 0 | 0 | 2,526 | 0.171452 | 0 | 0 | 4,450 | 0.302043 |
041064c5ce5d6e640b83bd91055137139e3baf14 | 11,801 | py | Python | fluid/image_classification/train.py | bingyanghuang/models | 092914524cdfced300cbd5d5dcf1db998f99c7dc | [
"Apache-2.0"
] | null | null | null | fluid/image_classification/train.py | bingyanghuang/models | 092914524cdfced300cbd5d5dcf1db998f99c7dc | [
"Apache-2.0"
] | null | null | null | fluid/image_classification/train.py | bingyanghuang/models | 092914524cdfced300cbd5d5dcf1db998f99c7dc | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import time
import sys
import functools
import math
import paddle
import paddle.fluid as fluid
import paddle.dataset.flowers as flowers
import models
import reader
import argparse
from models.learning_rate import cosine_decay
from utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 256, "Minibatch size.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('total_images', int, 1281167, "Training image number.")
add_arg('num_epochs', int, 120, "number of epochs.")
add_arg('class_dim', int, 1000, "Class number.")
add_arg('image_shape', str, "3,224,224", "input image size")
add_arg('model_save_dir', str, "output", "model save directory")
add_arg('with_mem_opt', bool, True, "Whether to use memory optimization or not.")
add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
add_arg('checkpoint', str, None, "Whether to resume checkpoint.")
add_arg('lr', float, 0.1, "set learning rate.")
add_arg('lr_strategy', str, "piecewise_decay", "Set the learning rate decay strategy.")
add_arg('model', str, "SE_ResNeXt50_32x4d", "Set the network to use.")
add_arg('enable_ce', bool, False, "If set True, enable continuous evaluation job.")
# yapf: enable
model_list = [m for m in dir(models) if "__" not in m]
def optimizer_setting(params):
ls = params["learning_strategy"]
if ls["name"] == "piecewise_decay":
if "total_images" not in params:
total_images = 1281167
else:
total_images = params["total_images"]
batch_size = ls["batch_size"]
step = int(total_images / batch_size + 1)
bd = [step * e for e in ls["epochs"]]
base_lr = params["lr"]
lr = []
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
optimizer = fluid.optimizer.Momentum(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr),
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
elif ls["name"] == "cosine_decay":
if "total_images" not in params:
total_images = 1281167
else:
total_images = params["total_images"]
batch_size = ls["batch_size"]
step = int(total_images / batch_size + 1)
lr = params["lr"]
num_epochs = params["num_epochs"]
optimizer = fluid.optimizer.Momentum(
learning_rate=cosine_decay(
learning_rate=lr, step_each_epoch=step, epochs=num_epochs),
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
else:
lr = params["lr"]
optimizer = fluid.optimizer.Momentum(
learning_rate=lr,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
return optimizer
def train(args):
# parameters from arguments
class_dim = args.class_dim
model_name = args.model
checkpoint = args.checkpoint
pretrained_model = args.pretrained_model
with_memory_optimization = args.with_mem_opt
model_save_dir = args.model_save_dir
image_shape = [int(m) for m in args.image_shape.split(",")]
assert model_name in model_list, "{} is not in lists: {}".format(args.model,
model_list)
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# model definition
model = models.__dict__[model_name]()
if args.enable_ce:
assert model_name == "SE_ResNeXt50_32x4d"
fluid.default_startup_program().random_seed = 1000
model.params["enable_ce"] = True
class_dim = 102
if model_name == "GoogleNet":
out0, out1, out2 = model.net(input=image, class_dim=class_dim)
cost0 = fluid.layers.cross_entropy(input=out0, label=label)
cost1 = fluid.layers.cross_entropy(input=out1, label=label)
cost2 = fluid.layers.cross_entropy(input=out2, label=label)
avg_cost0 = fluid.layers.mean(x=cost0)
avg_cost1 = fluid.layers.mean(x=cost1)
avg_cost2 = fluid.layers.mean(x=cost2)
avg_cost = avg_cost0 + 0.3 * avg_cost1 + 0.3 * avg_cost2
acc_top1 = fluid.layers.accuracy(input=out0, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out0, label=label, k=5)
else:
out = model.net(input=image, class_dim=class_dim)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
test_program = fluid.default_main_program().clone(for_test=True)
# parameters from model and arguments
params = model.params
params["total_images"] = args.total_images
params["lr"] = args.lr
params["num_epochs"] = args.num_epochs
params["learning_strategy"]["batch_size"] = args.batch_size
params["learning_strategy"]["name"] = args.lr_strategy
# initialize optimizer
optimizer = optimizer_setting(params)
opts = optimizer.minimize(avg_cost)
if with_memory_optimization:
fluid.memory_optimize(fluid.default_main_program())
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if checkpoint is not None:
fluid.io.load_persistables(exe, checkpoint)
if pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
train_batch_size = args.batch_size
test_batch_size = 16
if not args.enable_ce:
train_reader = paddle.batch(reader.train(), batch_size=train_batch_size)
test_reader = paddle.batch(reader.val(), batch_size=test_batch_size)
else:
# use flowers dataset for CE and set use_xmap False to avoid disorder data
# but it is time consuming. For faster speed, need another dataset.
import random
random.seed(0)
np.random.seed(0)
train_reader = paddle.batch(
flowers.train(use_xmap=False), batch_size=train_batch_size)
test_reader = paddle.batch(
flowers.test(use_xmap=False), batch_size=test_batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[image, label])
train_exe = fluid.ParallelExecutor(
use_cuda=True if args.use_gpu else False, loss_name=avg_cost.name)
fetch_list = [avg_cost.name, acc_top1.name, acc_top5.name]
gpu = os.getenv("CUDA_VISIBLE_DEVICES") or ""
gpu_nums = len(gpu.split(","))
for pass_id in range(params["num_epochs"]):
train_info = [[], [], []]
test_info = [[], [], []]
train_time = []
for batch_id, data in enumerate(train_reader()):
t1 = time.time()
loss, acc1, acc5 = train_exe.run(fetch_list, feed=feeder.feed(data))
t2 = time.time()
period = t2 - t1
loss = np.mean(np.array(loss))
acc1 = np.mean(np.array(acc1))
acc5 = np.mean(np.array(acc5))
train_info[0].append(loss)
train_info[1].append(acc1)
train_info[2].append(acc5)
train_time.append(period)
if batch_id % 10 == 0:
print("Pass {0}, trainbatch {1}, loss {2}, \
acc1 {3}, acc5 {4} time {5}"
.format(pass_id, \
batch_id, loss, acc1, acc5, \
"%2.2f sec" % period))
sys.stdout.flush()
train_loss = np.array(train_info[0]).mean()
train_acc1 = np.array(train_info[1]).mean()
train_acc5 = np.array(train_info[2]).mean()
train_speed = np.array(train_time).mean() / train_batch_size
cnt = 0
for test_batch_id, data in enumerate(test_reader()):
t1 = time.time()
loss, acc1, acc5 = exe.run(test_program,
fetch_list=fetch_list,
feed=feeder.feed(data))
t2 = time.time()
period = t2 - t1
loss = np.mean(loss)
acc1 = np.mean(acc1)
acc5 = np.mean(acc5)
test_info[0].append(loss * len(data))
test_info[1].append(acc1 * len(data))
test_info[2].append(acc5 * len(data))
cnt += len(data)
if test_batch_id % 10 == 0:
print("Pass {0},testbatch {1},loss {2}, \
acc1 {3},acc5 {4},time {5}"
.format(pass_id, \
test_batch_id, loss, acc1, acc5, \
"%2.2f sec" % period))
sys.stdout.flush()
test_loss = np.sum(test_info[0]) / cnt
test_acc1 = np.sum(test_info[1]) / cnt
test_acc5 = np.sum(test_info[2]) / cnt
print("End pass {0}, train_loss {1}, train_acc1 {2}, train_acc5 {3}, "
"test_loss {4}, test_acc1 {5}, test_acc5 {6}".format(pass_id, \
train_loss, train_acc1, train_acc5, test_loss, test_acc1, \
test_acc5))
sys.stdout.flush()
model_path = os.path.join(model_save_dir + '/' + model_name,
str(pass_id))
if not os.path.isdir(model_path):
os.makedirs(model_path)
fluid.io.save_persistables(exe, model_path)
# This is for continuous evaluation only
if args.enable_ce and pass_id == args.num_epochs - 1:
if gpu_nums == 1:
# Use the mean cost/acc for training
print("kpis train_cost %s" % train_loss)
print("kpis train_acc_top1 %s" % train_acc1)
print("kpis train_acc_top5 %s" % train_acc5)
# Use the mean cost/acc for testing
print("kpis test_cost %s" % test_loss)
print("kpis test_acc_top1 %s" % test_acc1)
print("kpis test_acc_top5 %s" % test_acc5)
print("kpis train_speed %s" % train_speed)
else:
# Use the mean cost/acc for training
print("kpis train_cost_card%s %s" %
(gpu_nums, train_loss))
print("kpis train_acc_top1_card%s %s" %
(gpu_nums, train_acc1))
print("kpis train_acc_top5_card%s %s" %
(gpu_nums, train_acc5))
# Use the mean cost/acc for testing
print("kpis test_cost_card%s %s" %
(gpu_nums, test_loss))
print("kpis test_acc_top1_card%s %s" %
(gpu_nums, test_acc1))
print("kpis test_acc_top5_card%s %s" %
(gpu_nums, test_acc5))
print("kpis train_speed_card%s %s" %
(gpu_nums, train_speed))
def main():
args = parser.parse_args()
print_arguments(args)
train(args)
if __name__ == '__main__':
main()
| 39.734007 | 106 | 0.585289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,141 | 0.181425 |
041169eb352349150706a4d189fa7c6c01de6c21 | 1,889 | py | Python | Room.py | jberkow713/Adventure_Game | 4208be1d2a1fd71a46ff1ab6e6713a2cbaf1c8f7 | [
"MIT"
] | null | null | null | Room.py | jberkow713/Adventure_Game | 4208be1d2a1fd71a46ff1ab6e6713a2cbaf1c8f7 | [
"MIT"
] | null | null | null | Room.py | jberkow713/Adventure_Game | 4208be1d2a1fd71a46ff1ab6e6713a2cbaf1c8f7 | [
"MIT"
] | null | null | null | # Implement a class to hold room information. This should have name and
# description attributes.
class Room:
def __init__(self, number, world, name, description, enemies, enemyHP, enemy_diff, companion=[], item=[] ,enemy_description=[] ):
self.number = number
self.name = name
self.world = world
self.description = description
self.item = item
self.enemies = enemies
self.enemyHP = enemyHP
self.enemy_description = enemy_description
self.enemy_diff = enemy_diff
self.companion = companion
self.n_to = None
self.w_to = None
self.e_to = None
self.s_to = None
self.magic_to = None
self.fly_to = None
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
class Monster:
def __init__(self, name, ability):
self.name = name
self.ability = ability
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
class MagicRoom(Room):
def __init__(self, name, description, enemies, enemyHP, enemy_diff, companion, item=[] ,enemy_description=[]):
super().__init__ (name, description, enemies, enemyHP, enemy_diff, item=[], enemy_description=[] )
self.companion = companion
self.n_to = None
self.w_to = None
self.e_to = None
self.s_to = None
self.magic_to = None
self.fly_to = None
Map = { 1: [2,0,0,0,0,0], 2: [3,1,4,0,0, 0], 3: [0,2,0,0,0, 0], 4: [5,0,0,2,0, 0], 5: [6,4,0,0,0, 0], 6: [0,5,7,0,0, 0], \
7: [0,0,0,6,8, 0], 8: [9,0,10,0,7, 0], 9: [0,8,0,0,0, 11], 10: [0,0,0,8,0, 0], 11: [0,0,12,0,0, 9], 12: [14,0,0,0,13, 0], \
13: [0,0,0,0,0, 0], 14: [15,12,0,0,0, 0], 15: [16,14,0,0,0, 0], 16: [0,15,0,0,0, 0], 17: [18,0,0,0,0, 0], 18: [0,17,0,0,0, 0]\
}
| 33.732143 | 136 | 0.558497 | 1,361 | 0.720487 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.055056 |
0411c865d2a24b1680ac0780bd1e42a63973bbe8 | 4,844 | py | Python | server.py | fdp0525/seam-erasure | fa3aece97b4a4ab0a2bdaf0ea96911253d8a38fc | [
"MIT"
] | 1 | 2022-03-05T13:33:51.000Z | 2022-03-05T13:33:51.000Z | server.py | fdp0525/seam-erasure | fa3aece97b4a4ab0a2bdaf0ea96911253d8a38fc | [
"MIT"
] | null | null | null | server.py | fdp0525/seam-erasure | fa3aece97b4a4ab0a2bdaf0ea96911253d8a38fc | [
"MIT"
] | null | null | null | from __future__ import print_function
import os
import base64
import cStringIO
import time
import numpy
from PIL import Image
from flask import (Flask, request, render_template, url_for, flash, redirect,
send_file)
from SeamErasure import seam_erasure, obj_reader, util
from SeamErasure.lib import weight_data
app = Flask(__name__)
ALLOWED_EXTENSIONS = set(['png', 'tga', 'jpg', 'jpeg', 'gif', 'tif', 'tiff'])
def allowed_file(filename):
return ('.' in filename and filename.rsplit('.', 1)[1].lower() in
ALLOWED_EXTENSIONS)
def is_data_file(filename):
return "data" in request.form or ('.' in filename and
filename.rsplit('.', 1)[1].lower() == "data")
def upload_file(fileID):
""" Returns the uploaded file with fileID. None if no file uploaded. """
if request.method == 'POST':
if fileID not in request.files:
return None
else:
inFile = request.files[fileID]
if inFile.filename == '':
return None
elif inFile: # and allowed_file(file.filename)
return inFile
else:
return None
@app.route('/')
def index():
return render_template('min-form.html')
@app.route('/erased', methods=['GET', 'POST'])
def erase():
if request.method == 'POST':
try:
startTime = time.time()
# Check the uploaded files
obj_file = upload_file("obj-input")
if not obj_file or ('.' in obj_file.filename and
obj_file.filename.rsplit('.', 1)[1].lower() != "obj"):
return render_template('min-error.html',
error_msg="No OBJ model provided.")
tex_file = upload_file("tex-input")
if not tex_file:
return render_template('min-error.html',
error_msg="No texture image provided.")
mesh = obj_reader.quads_to_triangles(
obj_reader.parse_obj(obj_file))
isFloatTexture = isDataFile = False
if(is_data_file(tex_file.filename)):
textureData = weight_data.read_tex_from_file(tex_file)[0]
isFloatTexture, isDataFile = True, True
else:
textureData = numpy.array(Image.open(tex_file).transpose(
Image.FLIP_TOP_BOTTOM))
isFloatTexture = not issubclass(textureData.dtype.type,
numpy.integer)
if(not isFloatTexture):
textureData = textureData / 255.0
height, width, depth = (textureData.shape + (1,))[:3]
sv_methods = {"none": seam_erasure.SeamValueMethod.NONE,
"texture": seam_erasure.SeamValueMethod.TEXTURE,
"lerp": seam_erasure.SeamValueMethod.LERP}
sv_method = sv_methods[request.form["sv"]]
do_global = "global" in request.form
out = seam_erasure.erase_seam(mesh, textureData,
do_global=do_global, sv_method=sv_method,
display_energy_file=None)
out = out.reshape((height, width, -1))
if(out.shape[2] < 2):
out = numpy.squeeze(out, axis=2)
if(not isFloatTexture):
out = util.to_uint8(out)
base, ext = os.path.splitext(os.path.basename(tex_file.filename))
out_filename = base + "-erased" + ext
if isDataFile:
img_io = cStringIO.StringIO()
weight_data.write_tex_to_file(img_io, textureData)
img_io.seek(0)
return send_file(img_io, as_attachment=True,
attachment_filename=out_filename)
else:
texture = Image.fromarray(out).transpose(Image.FLIP_TOP_BOTTOM)
img_io = cStringIO.StringIO()
texture.save(img_io, format=Image.EXTENSION[ext])
img_io.seek(0)
if isFloatTexture:
return send_file(img_io, as_attachment=True,
attachment_filename=out_filename)
data_uri = base64.b64encode(img_io.getvalue())
try:
return render_template('min-results.html',
min_tex=data_uri, runtime=("%.2f" %
(time.time() - startTime)),
mime_type=Image.MIME[Image.EXTENSION[ext]])
except Exception:
return send_file(img_io, as_attachment=True,
attachment_filename=out_filename)
except Exception as e:
return render_template('min-error.html',
error_msg=("Unable to erase the texture (%s)." % e.message))
return render_template('min-form.html')
if __name__ == '__main__':
app.run(debug=True)
| 35.357664 | 79 | 0.570809 | 0 | 0 | 0 | 0 | 3,653 | 0.754129 | 0 | 0 | 503 | 0.10384 |
041275e33d9dfd21ee2a3f3c2fcabed44c088a34 | 1,203 | py | Python | summarizer/server.py | mhsong95/ai-moderator | df971f609b6b211aeee39009a3fb289bf8596475 | [
"Apache-2.0"
] | 4 | 2022-01-01T11:27:49.000Z | 2022-01-11T07:32:25.000Z | summarizer/server.py | mhsong95/ai-moderator | df971f609b6b211aeee39009a3fb289bf8596475 | [
"Apache-2.0"
] | null | null | null | summarizer/server.py | mhsong95/ai-moderator | df971f609b6b211aeee39009a3fb289bf8596475 | [
"Apache-2.0"
] | null | null | null | from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import parse_qs
from IPython.display import display
from summarizer import Summarizer
bert_model = Summarizer()
from transformers import pipeline
bart_summarizer = pipeline("summarization")
def bert_summarizing_model(input_txt, sent, ratio):
if sent != 0:
sum = bert_model(input_txt, num_sentences = sent)
elif ratio != 0:
sum = bert_model(input_txt, ratio = ratio)
full = ''.join(sum)
return full
class echoHandler(BaseHTTPRequestHandler):
def do_POST(self):
print(self.client_address)
content_len = int(self.headers.get('Content-Length'))
post_body = self.rfile.read(content_len).decode('utf-8')
fields = parse_qs(post_body)
res = bert_summarizing_model(fields['content'][0], 1, 0)
print(res)
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
self.wfile.write(res.encode())
def main():
PORT = 5050
server = HTTPServer(('', PORT), echoHandler)
print('Server running on port %s' % PORT)
server.serve_forever()
if __name__ == '__main__':
main()
| 29.341463 | 64 | 0.680798 | 501 | 0.416459 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.093932 |
0412bd04f86d75afe26c0f4fb4bf3b33f3366b14 | 3,481 | py | Python | otter/html.py | transientlunatic/otter | e6be6ab454474a04c3d2c6abf8f7848079642f38 | [
"0BSD"
] | null | null | null | otter/html.py | transientlunatic/otter | e6be6ab454474a04c3d2c6abf8f7848079642f38 | [
"0BSD"
] | null | null | null | otter/html.py | transientlunatic/otter | e6be6ab454474a04c3d2c6abf8f7848079642f38 | [
"0BSD"
] | null | null | null |
import matplotlib, numpy
from . import plot
import markdown
import tabulate
md_extensions = [
'markdown.extensions.tables',
'markdown.extensions.extra'
]
class HTMLElement(object):
tag = None
childtag = None
def __init__(self, content=None, **kwargs):
self.content = []
self.meta = kwargs
if content:
self.__add__(content)
def __repr__(self):
output = ""
attributes = ""
for attr, val in self.meta.items():
if attr=="cl": attr="class"
attributes += """{}='{}'""".format(attr, val)
if self.tag: output += "<{} {}>".format(self.tag, attributes)
for item in self.content:
if self.childtag:
output += "<{0}>{1}</{0}>".format(self.childtag, str(item))
else:
output += str(item)
if self.tag: output += "</{}>".format(self.tag)
return output
def __str__(self):
return self.__repr__()
def __iadd__(self, item):
self.__add__(item)
return self.content
def __add__(self, item):
#if isinstance(item, list) and isinstance(item[0], list):
# self.content.append( )
if type(item) in handlers.keys():
self.content.append(handlers[type(item)](item))
else:
self.content.append(item)
class OrderedList(HTMLElement):
tag = "ol"
childtag = "li"
def __add__(self, items):
for item in items:
if type(item) in handlers.keys():
self.content.append(handlers[type(item)](item))
else:
self.content.append(item)
class Table(HTMLElement):
tag = "table"
class Row(HTMLElement):
tag = "tr"
childtag = "td"
def __add__(self, items):
for item in items:
if type(item) in handlers.keys():
self.content.append(handlers[type(item)](item))
else:
self.content.append(item)
def dict_to_table(dictionary):
table = Table(cl="table table-sm table-striped table-bordered")
for key, val in dictionary.items():
table + Row([key, val])
return table
handlers = {
str: lambda x: markdown.markdown(str(x), output_format='xhtml5', extensions=md_extensions),
matplotlib.figure.Figure: plot.Figure,
list: OrderedList,
dict: dict_to_table,
numpy.ndarray: lambda x: tabulate.tabulate(x, tablefmt=MyHTMLFormat)
}
from functools import partial
def my_html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"' }
values_with_attrs =\
["<{0}{1} class=\"my-cell\">{2}</{0}>"
.format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr class=\"my-row\">" + \
"".join(values_with_attrs).rstrip() + \
"</tr>"
MyHTMLFormat = tabulate.TableFormat(
lineabove=tabulate.Line("<table class=\"table table-sm\">", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=tabulate.Line("</table>", "", "", ""),
headerrow=partial(my_html_row_with_attrs, "th"),
datarow=partial(my_html_row_with_attrs, "td"),
padding=0, with_header_hide=None)
| 30.008621 | 95 | 0.568228 | 1,836 | 0.527435 | 0 | 0 | 0 | 0 | 0 | 0 | 530 | 0.152255 |
04134b9c96f43ad672f3c3d788080e76c5ac0aed | 20,472 | py | Python | pymodaq_plugins/hardware/PI/PIPython/pipython/datarectools.py | SofyMeu/pymodaq_plugins | 42aff578063de9f0e15467eda6d4fb4225524e76 | [
"CECILL-B"
] | 1 | 2019-08-08T01:46:00.000Z | 2019-08-08T01:46:00.000Z | pymodaq_plugins/hardware/PI/PIPython/pipython/datarectools.py | SofyMeu/pymodaq_plugins | 42aff578063de9f0e15467eda6d4fb4225524e76 | [
"CECILL-B"
] | 2 | 2020-11-18T04:03:51.000Z | 2021-02-09T18:03:35.000Z | pymodaq_plugins/hardware/PI/PIPython/pipython/datarectools.py | SofyMeu/pymodaq_plugins | 42aff578063de9f0e15467eda6d4fb4225524e76 | [
"CECILL-B"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tools for setting up and using the data recorder of a PI device."""
from logging import debug, warning
from time import sleep, time
from pipython.pitools import FrozenClass
# seconds
SERVOTIMES = {
'C-413K011': 0.00003333333,
'C-663.11': 50E-6,
'C-702.00': 100E-6,
'C-843': 410E-6,
'C-863.11': 50E-6,
'C-867.160': 50E-6, # verified
'C-867.260': 50E-6, # verified
'C-867.262': 50E-6, # verified
'C-867K016': 50E-6, # verified
'C-867.B0017': 100E-6,
'C-867.B0019': 100E-6,
'C-867.B024': 100E-6,
'C-867.K0020': 100E-6,
'C-867.OE': 50E-6,
'C-867K021': 100E-6,
'C-877': 100E-6,
'C-880': 4096E-6,
'C-884.4D': 50E-6,
'C-884.4DB': 50E-6,
'C-887': 20E-6,
'E-710': 200E-6,
'E-755': 200E-6,
'E-861': 50E-6,
'E-861.11C885': 50E-6,
'E-871.1A1': 50E-6,
'E-873': 50E-6,
'E-873.1A1': 50E-6,
'E-873.3QTU': 50E-6,
}
MAXNUMVALUES = {
'C-413K011': 4096,
'C-663.11': 1024,
'C-702.00': 262144,
'C-863.11': 1024,
'C-863.12': 1024,
'C-867.160': 8192, # verified
'C-867.1U': 8192, # verified
'C-867.260': 8192, # verified
'C-867.262': 8192, # verified
'C-867K016': 8192, # verified
'C-867K028': 1024, # verified
'C-867K031': 8192, # verified
'C-867K036 APP': 1024, # verified
'C-867.2U': 8192, # verified
'C-867.2U2': 8192, # verified
'C-867.B0017': 8192,
'C-867.B0019': 8192,
'C-867.B024': 8192,
'C-867.K0020': 8192,
'C-867.OE': 1024,
'C-867K021': 8192,
'C-877': 1024,
'C-877.1U11': 1024, # verified
'C-877.2U12': 1024, # verified
'C-884.4D': 8192,
'C-884.4DB': 8192,
'E-761': 8192,
'E-861': 1024,
'E-861.11C885': 1024,
'E-871.1A1': 1024,
'E-873': 1024,
'E-873.1A1': 1024,
'E-873.3QTU': 1024,
}
class RecordOptions(object): # Too few public methods pylint: disable=R0903
"""Defines for the kind of data to be recorded."""
NOTHING_0 = 0
COMMANDED_POSITION_1 = 1
ACTUAL_POSITION_2 = 2
POSITION_ERROR_3 = 3
PIO_VALUE_4 = 4
DIO_VALUE_5 = 5
COMEDI_VALUE_6 = 6
PIEZO_VOLTAGE_7 = 7
TIMESTAMP_8 = 8
INDEX_9 = 9
TICKS_10 = 10
DDL_OUTPUT_13 = 13
OPENLOOP_INPUT_14 = 14
PID_OUTPUT_15 = 15
ANALOG_OUTPUT_16 = 16
SENSOR_NORMALIZED_17 = 17
SENSOR_FILTERED_18 = 18
SENSOR_ELEC_LIN_19 = 19
SENSOR_MECH_LIN_20 = 20
TARGET_SLEWRATE_LIM_22 = 22
TARGET_VELOCITY_23 = 23
TARGET_ACCELERATION_24 = 24
TARGET_JERK_25 = 25
DI_VALUE_26 = 26
DO_VALUE_27 = 27
CTV_TARGET_VALUE_28 = 28
CCV_CONTROL_VALUE_29 = 29
CAV_ACTUAL_VALUE_30 = 30
CCV_CURRENT_VALUE_31 = 31
DRIFT_COMP_OFFSET_32 = 32
HYBRID_MOTOR_VOLTAGE_33 = 33
HYBRID_PIEZO_VOLTAGE_34 = 34
SYSTEM_TIME_44 = 44
COMMANDED_VELOCITY_70 = 70
COMMANDED_ACCELERATION_71 = 71
ACTUAL_VELOCITY_72 = 72
MOTOR_OUTPUT_73 = 73
KP_OF_AXIS_74 = 74
KI_OF_AXIS_75 = 75
KD_OF_AXIS_76 = 76
SIGNAL_STATUS_REGISTER_80 = 80
ANALOG_INPUT_81 = 81
ACTIVE_PARAMETERSET_90 = 90
ACTUAL_FREQUENCY_91 = 91
P0_92 = 92
DIA_93 = 93
class TriggerSources(object): # Too few public methods pylint: disable=R0903
"""Defines for sources that can trigger data recording."""
DEFAULT_0 = 0
POSITION_CHANGING_COMMAND_1 = 1
NEXT_COMMAND_WITH_RESET_2 = 2
EXTERNAL_TRIGGER_3 = 3
TRIGGER_IMMEDIATELY_4 = 4
DIO_CHANNEL_5 = 5
POS_CHANGING_WITH_RESET_6 = 6
SMO_COMMAND_WITH_RESET_7 = 7
COMEDI_CHANNEL_8 = 8
WAVE_GENERATOR_9 = 9
def __getopt(name, enumclass):
"""Return item of 'enumclass' which name parts start with 'name'.
@param name : Short name of item, e.g. "CUR_POS". Case insensitive, separated by "_".
@param enumclass : Class name that contains enums.
@return : According enum value as integer.
"""
for item in dir(enumclass):
match = []
for i, itempart in enumerate(item.split('_')):
if itempart.isdigit():
continue
try:
namepart = name.split('_')[i]
except IndexError:
continue
match.append(__isabbreviation(namepart.upper(), itempart.upper()))
if all(match):
return getattr(enumclass, item)
def __isabbreviation(abbrev, item):
"""Return True if first char of 'abbrev' and 'item' match and all chars of 'abbrev' occur in 'item' in this order.
@param abbrev : Case sensitive string.
@param item : Case sensitive string.
@return : True if 'abbrev' is an abbreviation of 'item'.
"""
if not abbrev:
return True
if not item:
return False
if abbrev[0] != item[0]:
return False
return any(__isabbreviation(abbrev[1:], item[i + 1:]) for i in range(len(item)))
def getrecopt(name):
"""Return record option value according to 'name'.
@param name: Short name of item, e.g. "CUR_POS". Case insensitive, separated by "_".
@return : According enum value as integer.
"""
return __getopt(name, RecordOptions)
def gettrigsources(name):
"""Return trigger option value according to 'name'.
@param name: Short name of item, e.g. "CUR_POS". Case insensitive, separated by "_".
@return : According enum value as integer.
"""
return __getopt(name, TriggerSources)
def getservotime(gcs):
"""Return current servo cycle time in seconds as float.
@type gcs : pipython.gcscommands.GCSCommands
@return : Current servo cycle time in seconds as float.
"""
servotime = None
if gcs.devname in ['C-702.00']:
servotime = SERVOTIMES[gcs.devname]
if servotime is None:
servotime = gcs.getparam(0x0E000200) # SERVO_UPDATE_TIME
if servotime is None:
if gcs.devname in SERVOTIMES:
servotime = SERVOTIMES[gcs.devname]
if servotime is None:
raise NotImplementedError('servo cycle time for %r is unknown' % gcs.devname)
return float(servotime)
def getmaxnumvalues(gcs): # 'getmaxnumvalues' is too complex (11) pylint: disable=C0901
"""Return maximum possible number of data recorder values as integer.
@type gcs : pipython.gcscommands.GCSCommands
@return : Maximum possible number of data recorder values as integer.
"""
maxnumvalues = None
if gcs.devname in ['C-702.00']:
maxnumvalues = MAXNUMVALUES[gcs.devname]
if not maxnumvalues:
# E-517, E-518, E-852
maxnumvalues = gcs.getparam(0x16000201) # DATA REC SET POINTS
if not maxnumvalues:
# E-709, E-712, E-725, E-753.1CD, E-727, E-723K001
maxpoints = gcs.getparam(0x16000200) # DATA_REC_MAX_POINTS
numtables = gcs.getparam(0x16000300) # DATA_REC_CHAN_NUMBER
if maxpoints and numtables:
maxnumvalues = int(maxpoints / numtables)
if not maxnumvalues:
# C-843
maxpoints = gcs.getparam(0x16000200) # DATA_REC_MAX_POINTS
if maxpoints:
maxnumvalues = int(maxpoints / gcs.qTNR())
if not maxnumvalues:
# Mercury, etc.
maxnumvalues = gcs.getparam(0x16000001) # RECORDCYCLES_PER_TRIGGER
if not maxnumvalues:
if gcs.devname in MAXNUMVALUES:
maxnumvalues = MAXNUMVALUES[gcs.devname]
if not maxnumvalues:
raise NotImplementedError('maximum number of data recorder values for %r is unknown' % gcs.devname)
return maxnumvalues
class Datarecorder(FrozenClass):
"""Set up and use the data recorder of a PI device."""
def __init__(self, gcs):
"""Set up and use the data recorder of a PI device connected via 'gcs'.
@type gcs : pipython.gcscommands.GCSCommands
"""
super(Datarecorder, self).__init__()
debug('create an instance of Datarecorder(gcs=%s)', str(gcs))
self.__gcs = gcs
self.__cfg = {
'servotime': None,
'numvalues': None,
'offset': None,
'maxnumvalues': None,
'samplerate': None,
'sources': None,
'options': None,
'trigsources': None,
'rectables': [],
}
self._freeze()
@property
def servotime(self):
"""Return current servo cycle time in seconds as float."""
if self.__cfg['servotime'] is None:
self.__cfg['servotime'] = getservotime(self.__gcs)
debug('Datarecorder.servotime is %g secs', self.__cfg['servotime'])
return self.__cfg['servotime']
@servotime.setter
def servotime(self, value):
"""Set current servo cycle time in seconds as float."""
value = float(value)
self.__cfg['servotime'] = value
debug('Datarecorder.servotime set to %g secs', self.__cfg['servotime'])
@property
def numvalues(self):
"""Return number of data recorder values to record as integer."""
if self.__cfg['numvalues'] is None:
self.numvalues = self.maxnumvalues
return self.__cfg['numvalues']
@numvalues.setter
def numvalues(self, value):
"""Set number of data recorder values to record to 'value' as integer."""
value = int(value)
if value > self.maxnumvalues:
raise ValueError('%d exceeds the maximum number of data recorder values %d' % (value, self.maxnumvalues))
self.__cfg['numvalues'] = value
debug('Datarecorder.numvalues: set to %d', self.__cfg['numvalues'])
@property
def offset(self):
"""Return start point in the record table as integer, starts with index 1."""
if self.__cfg['offset'] is None:
if self.numvalues:
return 1
return self.__cfg['offset']
@offset.setter
def offset(self, value):
"""Set start point in the record table as integer, starts with index 1."""
value = int(value)
self.__cfg['offset'] = value
debug('Datarecorder.offset: set to %d', self.__cfg['offset'])
@property
def maxnumvalues(self):
"""Return maximum possible number of data recorder values as integer."""
if self.__cfg['maxnumvalues'] is None:
self.__cfg['maxnumvalues'] = getmaxnumvalues(self.__gcs)
debug('Datarecorder.maxnumvalues is %d', self.__cfg['maxnumvalues'])
return self.__cfg['maxnumvalues']
@maxnumvalues.setter
def maxnumvalues(self, value):
"""Set maximum possible number of data recorder values as integer."""
value = int(value)
self.__cfg['maxnumvalues'] = value
debug('Datarecorder.maxnumvalues: set to %d', self.__cfg['maxnumvalues'])
@property
def samplerate(self):
"""Return current sampling rate in multiples of servo cycle time as integer."""
if self.__cfg['samplerate'] is None:
if self.__gcs.HasqRTR():
self.__cfg['samplerate'] = self.__gcs.qRTR()
else:
warning('device %r does not support the RTR? command', self.__gcs.devname)
self.__cfg['samplerate'] = 1
return self.__cfg['samplerate']
@samplerate.setter
def samplerate(self, value):
"""Set current sampling rate to 'value' in multiples of servo cycle time as integer."""
value = max(1, int(value))
if self.__gcs.HasRTR():
self.__gcs.RTR(value)
self.__cfg['samplerate'] = value
else:
warning('device %r does not support the RTR command', self.__gcs.devname)
self.__cfg['samplerate'] = 1
debug('Datarecorder.samplerate: set to %d servo cycles', self.__cfg['samplerate'])
@property
def sampletime(self):
"""Return current sampling time in seconds as float."""
return self.samplerate * self.servotime
@sampletime.setter
def sampletime(self, value):
"""Set current sampling time to 'value' in seconds as float."""
self.samplerate = int(float(value) / self.servotime)
debug('Datarecorder.sampletime: set to %g s', self.sampletime)
@property
def samplefreq(self):
"""Return current sampling frequency in Hz as float."""
return 1. / self.sampletime
@samplefreq.setter
def samplefreq(self, value):
"""Set current sampling frequency to 'value' in Hz as float."""
self.sampletime = 1. / float(value)
debug('Datarecorder.samplefreq: set to %.2f Hz', self.samplefreq)
@property
def rectime(self):
"""Return complete record time in seconds as float."""
return self.numvalues * self.sampletime
@rectime.setter
def rectime(self, value):
"""Set number of values to record according to 'value' as complete record time in seconds as float."""
self.numvalues = float(value) / self.sampletime
debug('Datarecorder.frequency: set to %.2f Hz', self.samplefreq)
@property
def rectimemax(self):
"""Return complete record time in seconds as float."""
return self.maxnumvalues * self.sampletime
@rectimemax.setter
def rectimemax(self, value):
"""Set sample time to record for 'value' seconds (float) with max. number of points."""
self.numvalues = self.maxnumvalues
self.sampletime = float(value) / self.numvalues
debug('Datarecorder.rectimemax: %d values with sampling %g s', self.numvalues, self.sampletime)
@property
def sources(self):
"""Return current record source IDs as list of strings, defaults to first axis."""
self.__cfg['sources'] = self.__cfg['sources'] or self.__gcs.axes[0]
if isinstance(self.__cfg['sources'], (list, tuple)):
return self.__cfg['sources']
return [self.__cfg['sources']] * len(self.rectables)
@sources.setter
def sources(self, value):
"""Set record source IDs as string convertible or list of them."""
self.__cfg['sources'] = value
debug('Datarecorder.sources: set to %r', self.__cfg['sources'])
@sources.deleter
def sources(self):
"""Reset record source IDs."""
self.__cfg['sources'] = None
debug('Datarecorder.sources: reset')
@property
def options(self):
"""Return current record source IDs as list of integers, defaults to RecordOptions.ACTUAL_POSITION_2."""
self.__cfg['options'] = self.__cfg['options'] or RecordOptions.ACTUAL_POSITION_2
if isinstance(self.__cfg['options'], (list, tuple)):
return self.__cfg['options']
return [self.__cfg['options']] * len(self.rectables)
@options.setter
def options(self, value):
"""Set record source IDs as integer convertible or list of them."""
self.__cfg['options'] = value
debug('Datarecorder.options: set to %r', self.__cfg['options'])
@options.deleter
def options(self):
"""Reset record source IDs."""
self.__cfg['options'] = None
debug('Datarecorder.options: reset')
@property
def trigsources(self):
"""Return current trigger source as int or list, defaults to TriggerSources.NEXT_COMMAND_WITH_RESET_2."""
self.__cfg['trigsources'] = self.__cfg['trigsources'] or TriggerSources.NEXT_COMMAND_WITH_RESET_2
return self.__cfg['trigsources']
@trigsources.setter
def trigsources(self, value):
"""Set trigger source IDs. If single integer then "DRT 0" is used. If list
of integers then list size can be 1 or must match the length of self.rectables.
"""
if isinstance(value, tuple):
value = list(value)
self.__cfg['trigsources'] = value
debug('Datarecorder.trigsources: set to %r', self.__cfg['trigsources'])
@trigsources.deleter
def trigsources(self):
"""Reset trigger source IDs."""
self.__cfg['trigsources'] = None
debug('Datarecorder.trigsources: reset')
@property
def rectables(self):
"""Return the record tables as list of integers."""
if isinstance(self.__cfg['sources'], (list, tuple)):
numtables = len(self.__cfg['sources'])
elif isinstance(self.__cfg['options'], (list, tuple)):
numtables = len(self.__cfg['options'])
elif isinstance(self.__cfg['trigsources'], (list, tuple)):
numtables = len(self.__cfg['trigsources'])
else:
numtables = 1
self.__cfg['rectables'] = list(range(1, numtables + 1))
return self.__cfg['rectables']
def wait(self, timeout=0):
"""Wait for end of data recording.
@param timeout : Timeout in seconds, is disabled by default.
"""
assert self.rectables, 'rectables are not set'
numvalues = self.numvalues or self.maxnumvalues
if self.__gcs.HasqDRL():
maxtime = time() + timeout
while min([self.__gcs.qDRL(table)[table] for table in self.rectables]) < numvalues:
if timeout and time() > maxtime:
raise SystemError('timeout after %.1f secs while waiting on data recorder' % timeout)
else:
waittime = 1.2 * self.rectime
debug('Datarecorder.wait: wait %.2f secs for data recording', waittime)
sleep(waittime)
def read(self, offset=None, numvalues=None, verbose=False):
"""Read out the data and return it.
@param offset : Start point in the table as integer, starts with index 1, overwrites self.offset.
@param numvalues : Number of points to be read per table as integer, overwrites self.numvalues.
@param verbose : If True print a line that shows how many values have been read out already.
@return : Tuple of (header, data), see qDRR command.
"""
assert self.rectables, 'rectables are not set'
header = self.__gcs.qDRR(self.rectables, offset or self.offset, numvalues or self.numvalues)
while self.__gcs.bufstate is not True:
if verbose:
print('\rread data {:.1f}%...'.format(self.__gcs.bufstate * 100)),
sleep(0.05)
if verbose:
print('\r%s\r' % (' ' * 20)),
data = self.__gcs.bufdata
return header, data
def getdata(self, timeout=0, offset=None, numvalues=None):
"""Wait for end of data recording, start reading out the data and return the data.
@param timeout : Timeout in seconds, is disabled by default.
@param offset : Start point in the table as integer, starts with index 1, overwrites self.offset.
@param numvalues : Number of points to be read per table as integer, overwrites self.numvalues.
@return : Tuple of (header, data), see qDRR command.
"""
self.wait(timeout)
return self.read(offset, numvalues)
def arm(self):
"""Ready the data recorder with given options and activate the trigger.
If TriggerSources.NEXT_COMMAND_WITH_RESET_2 is used then the error check will be disabled.
"""
if self.__gcs.HasDRC():
for i in range(len(self.rectables)):
self.__gcs.DRC(self.rectables[i], self.sources[i], self.options[i])
else:
warning('device %r does not support the DRC command', self.__gcs.devname)
if self.__gcs.HasDRT():
errcheck = None
if isinstance(self.trigsources, (list, tuple)):
if TriggerSources.NEXT_COMMAND_WITH_RESET_2 in self.trigsources:
errcheck = self.__gcs.errcheck
self.__gcs.errcheck = False
if len(self.trigsources) == 1:
self.trigsources = [self.trigsources[0]] * len(self.rectables)
for i in range(len(self.rectables)):
self.__gcs.DRT(self.rectables[i], self.trigsources[i])
else:
if TriggerSources.NEXT_COMMAND_WITH_RESET_2 == self.trigsources:
errcheck = self.__gcs.errcheck
self.__gcs.errcheck = False
self.__gcs.DRT(0, self.trigsources)
if errcheck is not None:
self.__gcs.errcheck = errcheck
else:
warning('device %r does not support the DRT command', self.__gcs.devname)
@property
def timescale(self):
"""Return list of values for time scale of recorded data."""
return [1. / self.samplerate * x for x in range(self.numvalues)]
| 37.221818 | 118 | 0.621434 | 14,715 | 0.718787 | 0 | 0 | 8,301 | 0.405481 | 0 | 0 | 8,013 | 0.391413 |
0413c12c2eb58e98cad7f0b71a3fe624a6287c07 | 4,181 | py | Python | stemel/stemel_foxdot.py | reneghosh/stemel | 3a3d2635f57418f78c0b317a6125727668382469 | [
"MIT"
] | null | null | null | stemel/stemel_foxdot.py | reneghosh/stemel | 3a3d2635f57418f78c0b317a6125727668382469 | [
"MIT"
] | 1 | 2019-01-25T11:40:50.000Z | 2019-01-25T11:40:50.000Z | stemel/stemel_foxdot.py | satelliteray/stemel | 3a3d2635f57418f78c0b317a6125727668382469 | [
"MIT"
] | null | null | null | from FoxDot import *
from stemel.stemel import *
def foxdotidy(pitches, durations, sustains):
"""
transform a stemel note matrix
into a foxdot-compatible one.
"""
# clean up values that have rests but other notes too
# replace mixed durations with the first non-rest
counter = 0
for line in durations:
if len(line)>0:
(found, val, rest_list) = first_non_rest(durations[counter])
durations[counter]= val
if (found):
pitches_to_remove = []
sustains_to_remove = []
for index in rest_list:
pitches_to_remove.append(pitches[counter][index])
sustains_to_remove.append(sustains[counter][index])
for pitch in pitches_to_remove:
pitches[counter].remove(pitch)
for sustain in sustains_to_remove:
sustains[counter].remove(sustain)
counter += 1
# replace lists with tuples
new_pitches = []
new_durations = []
new_sustains = []
for li in pitches:
new_pitches.append(tuple(li))
for du in durations:
if type(du)==type({}):
new_durations.append(rest(du['rest']))
else:
new_durations.append(du)
for li in sustains:
new_sustains.append(tuple(li))
# clean up single element tuples
counter = 0
for line in new_pitches:
if (type(line)==type((tuple([])))) and (len(line)==1):
new_pitches[counter]=new_pitches[counter][0]
counter += 1
counter = 0
for line in new_sustains:
if (type(line)==type((tuple([])))) and (len(line)==1):
new_sustains[counter]=new_sustains[counter][0]
counter += 1
return (new_pitches, new_durations, new_sustains)
def first_non_rest(line):
"""
find the first non-rest in an array of durations.
"""
found = False
non_rest = None
rest_list = []
counter = 0
for val in line:
if type(val) != type({}): #found a non-rest
found = True
non_rest = val
else:
rest_list.append(counter)
counter += 1
if found is False:
non_rest = line[0]
return (found, non_rest, rest_list)
def foxdotidy_pattern(pattern):
"""
transform a pattern into a FoxDot pattern.
This involves replacing tuples with single values
when a tuple contains only one element, and replacing
hash rests with foxdot rests
"""
(pitches, durations, sustains, optionals) = pattern.patterns()
(pitches, durations, sustains) = foxdotidy(pitches, durations, sustains)
for optional in optionals:
pattern = optional.pattern
(op_values, op_durations, op_sustains) = foxdotidy(pattern[0],pattern[1],pattern[2])
optional.pattern = op_values
return(pitches, durations, sustains, optionals)
def stemel_player(player,pattern,**args):
"""
player that wraps foxdot synthdefs and calls them
with frequency, duration and sustain parameters,
relaying any other keyword parameter while doing so.
"""
(pitches, durations, sustains, optionals) = foxdotidy_pattern(pattern)
opts = {}
for optional in optionals:
opts[optional.name]=optional.pattern
for arg in args:
opts[arg]=args[arg]
if 'dur' in args.keys():
opts['dur']=args['dur']
else:
opts['dur']=durations
if 'sus' in args.keys():
opts['sus']=args['sus']
else:
opts['sus']=sustains
return player(pitches, **opts)
def smls(player,pattern, step=1, **args):
"""
alias for stemel_player for patterns as strings, thus
needing a step size for time allotment
"""
return stemel_player(player, Stemel(pattern).stretch(step),**args)
def smlp(player, pattern, step=1, **args):
"""
alias for stemel_player for patterns as Stemel objects, thus
needing no step size for time allotment
"""
pattern = pattern.stretch(step)
return stemel_player(player,pattern,**args)
if __name__ == '__main__':
Scale.default = "chromatic"
bass_pattern = "0-0-0-0-0-<5 5 7 10-0 /> * 12 * 12 * 12 * 12 * 12 10 22 22 22 22 24 | amp 0.4 0.6 0.7"
lead_pattern = ">>7 7 7 7 7 7 7 7 10 10 7 0 7 2 0 7 0 12 | amp 0.4 0.6 0.7"
b1 >> stplay(jbass, bass_pattern, 0.5, oct=4, lpf=240, room=0.7, mix=0.3, shape=0.1, amp=1.5)
p1 >> stplay(sitar, lead_pattern, 0.25, oct=5, hpf=320, room=0.7, mix=0.3, amp=0.7)
d1 >> play("x-t-")
while 1:
sleep(100)
| 31.201493 | 104 | 0.666826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,113 | 0.266204 |
0413d003a791010c53460cd58a2f4cb6b4826cdf | 503 | py | Python | msgpack_stream/_io.py | gesslerpd/msgpack-stream | 4c5a189f1359ef788a9f5c5ae75eb2b0ddddb9f9 | [
"MIT"
] | null | null | null | msgpack_stream/_io.py | gesslerpd/msgpack-stream | 4c5a189f1359ef788a9f5c5ae75eb2b0ddddb9f9 | [
"MIT"
] | null | null | null | msgpack_stream/_io.py | gesslerpd/msgpack-stream | 4c5a189f1359ef788a9f5c5ae75eb2b0ddddb9f9 | [
"MIT"
] | null | null | null | import io
class container(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def pack(typ, obj):
stream = io.BytesIO()
typ.pack(stream, obj)
data = stream.getvalue()
stream.close()
return data
def unpack(typ, data):
stream = io.BytesIO(data)
obj = typ.unpack(stream)
extra_data = stream.read()
if extra_data:
raise RuntimeError('too much data', extra_data)
stream.close()
return obj
| 18.62963 | 55 | 0.656064 | 128 | 0.254473 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.029821 |
041460ce850652eb21366eb704e9c3078b556bb6 | 12,448 | py | Python | code/segmentation.py | alejandropages/KernelPheno | 5a8d1fc8892b5cbc1309d42261596d12402c545c | [
"MIT"
] | null | null | null | code/segmentation.py | alejandropages/KernelPheno | 5a8d1fc8892b5cbc1309d42261596d12402c545c | [
"MIT"
] | 1 | 2020-12-04T02:34:43.000Z | 2020-12-04T02:34:43.000Z | code/segmentation.py | alejandropages/KernelPheno | 5a8d1fc8892b5cbc1309d42261596d12402c545c | [
"MIT"
] | null | null | null | from skimage.segmentation import slic
from skimage.io import imread, imsave
from skimage.color import rgb2gray
from skimage.measure import label, regionprops
from skimage.filters import threshold_otsu, gaussian
from skimage.color import label2rgb
from skimage.morphology import binary_closing, binary_opening, square
from skimage.util import invert
from skimage import img_as_float, img_as_int
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.patches as mpatches
import os.path as osp
import os
import argparse
import logging
from logger import get_logger
from utils import create_name_from_path, show_image, is_gray
log = get_logger(level=logging.DEBUG)
def normalize_images(img_paths, out_dir=False, plot=False, cmap=None):
'''
Normalize the images with respect to the background pixels for all provided
images
'''
print('Normalizing images')
if plot:
fig, ax = plt.subplots(nrows=1, ncols=2)
bg_avg = _get_bg_avg(img_paths, cmap=cmap)
if bg_avg is None and not cmap == 'gray':
print("There were no rgb images in the directory")
return
for img_file in img_paths:
print('Normalizing ' + img_file)
try:
img = img_as_float(imread(img_file, as_gray=(True if cmap == 'gray' else False)))
if len(img.shape) == 2 and not cmap == 'gray':
print("Expected rgb image, got grayscale")
print("Skipping ...")
continue
except FileNotFoundError as fnfe:
print(fnfe)
filter = get_filter(img)
masked = img.copy()
if cmap == 'gray':
masked[filter] = 0
else:
masked[filter] = [0,0,0]
diff = bg_avg - np.mean(masked)
print('Background diff: ' + str(diff))
normed = img + diff
if cmap == 'gray':
normed[normed > 1.0] = 1.0
normed[normed < -1.0] = -1.0
if plot:
print('Plotting')
ax[0].set_title('Original')
ax[1].set_title('Normalized')
exts = ['fig']
if cmap is not None:
exts.append(cmap)
ax[0].imshow(img, cmap=cmap)
ax[1].imshow(normed, cmap=cmap)
fig_name = create_name_from_path(img_file, exts, out_dir=out_dir)
print('Saving figure: ' + fig_name)
plt.savefig(fig_name)
exts = ['norm']
if cmap is not None:
exts = [cmap]
try:
out_name = create_name_from_path(img_file, exts, out_dir=out_dir)
print('Saving file: ' + out_name)
imsave(out_name, normed)
except OSError as ose:
print(ose)
except ValueError as ve:
print(ve)
finally:
print(img_file)
def draw_bounding_boxes(img_paths, out_dir=False, cmap=None):
'''
Takes the minimum and maximum row and column coordinates of bounding boxes
and plots bboxes on the image. Format for input tuple = (minr, minc, maaxr, maxc)
'''
print('Drawing Bounding Boxes')
for img_path in img_paths:
print('Processing ' + img_path)
try:
exts = []
if cmap is not None:
exts = [cmap]
exts += ['bbxs']
out_name = create_name_from_path(img_path, exts, out_dir)
img = imread(img_path, as_gray=cmap)
regions = get_sorted_bboxes(img)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
plot_bbxs(img, regions, ax, cmap=cmap, out=out_name)
except FileNotFoundError as fnfe:
print('image file does not exist')
print(fnfe)
except OSError as ose:
print('output directory likely does not exist')
print(ose)
return
def plot_bbx(img, regions, ax, cmap=None, out=None):
ax.imshow(img, cmap=cmap)
for i, (minr, minc, maxr, maxc) in enumerate(regions):
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
ax.text(minc - 20, minr - 20, str(i))
if out is not None:
plt.savefig(out)
else:
plt.show()
return
def segment_images(img_paths, out_dir=False):
for img_file in img_paths:
img = imread(img_file)
filter = get_filter(img)
filtered = img.copy()
if len(img.shape) == 3:
filtered[filter] = [0,0,0]
else:
filtered[filter] = 0
out_name = create_name_from_path(img_file, 'seg', out_dir=out_dir)
show_image(filtered)
imsave(out_name, filtered)
return
def get_sorted_bboxes(img):
filter = get_filter(img)
label_image = label(filter)
coords = []
for region in regionprops(label_image):
if region.area < 1000 or region.area > 100000:
continue
coords.append(region.bbox) # minr, minc, maxr, maxc
sorted_bbxs = sort_bbxs(coords, img.shape[0])
return sorted_bbxs
def test_get_sorted_bboxes():
img = imread("/home/apages/pysrc/KernelPheno/data/sample_images/DSC05389.jpeg")
get_sorted_bboxes(img, plot=True, fig_location="/home/apages/pysrc/KernelPheno/data/tests")
def sort_bbxs(regions, num_rows):
'''
Sort bboxes left to right, top to bottom
'''
def overlap(el1, el2):
'''
determine in bounding boxes overlap along a row
'''
el1_minr, _, el1_maxr, _ = el1
el2_minr, _, el2_maxr, _ = el2
inner_min = max(el1_minr, el2_minr)
outer_min = min(el1_maxr, el2_maxr)
return (inner_min < outer_min)
rows = []
while(len(regions)):
sorted_by_y = sorted(regions, key=lambda x: x[0])
first_el = sorted_by_y[0]
rows.append([first_el])
regions.remove(first_el)
sorted_by_y.pop(0)
for el in sorted_by_y:
if overlap(el, first_el) or overlap(el, rows[-1][-1]):
rows[-1].append(el)
regions.remove(el)
sorted_bbxs = []
for row in rows:
sorted_bbxs += sorted(row, key=lambda x: x[1])
return sorted_bbxs
def get_thumbnails(img, out_dir=False):
'''
Get's thumbnail images
'''
bboxes = get_sorted_bboxes(img)
thumbnails = []
for minr, minc, maxr, maxc in bboxes:
# plt.imshow(img[minr:maxr, minc:maxc], cmap=gray)
# plt.show()
thumbnails.append(img[minr:maxr, minc:maxc])
return thumbnails
def test_get_thumbnails():
img = imread("/home/apages/pysrc/KernelPheno/data/sample_images/DSC05389.jpeg")
get_thumbnails(img)
return
def segment_image(image):
log.info('Segmenting image')
filter = get_filter(image)
if is_gray(image):
image[filter] = 0
else:
image[filter] = [0,0,0]
return image
def get_filter(image):
# TODO: test this function
if not is_gray(image):
image = rgb2gray(image)
thresh = threshold_otsu(image)
# clear speckled black pixels in the image
filter = binary_closing(image > thresh, selem=square(10))
# clear speckled white pixels in the image
filter = binary_opening(filter, selem=square(15))
return filter
def _get_bg_avg(img_paths, cmap=None):
'''
Get mean of background pixels for all images in img_paths
'''
print('Getting background average')
if cmap == 'gray':
sum = 0
else:
sum = np.array([0,0,0], dtype=float)
img_count = 0
for img_file in img_paths:
try:
img = img_as_float(imread(img_file, as_gray=(True if cmap=='gray' else False)))
if ((len(img.shape) == 2) and cmap != 'gray'):
print("Ignoring: " + img_file)
print("> the gray argument is False but this image is grayscale")
continue
except FileNotFoundError as fnfe:
print(fnfe)
print('File: ' + img_file)
filter = get_filter(img)
masked = img.copy()
if cmap == 'gray':
masked[filter] = 0
else:
masked[filter] = [0,0,0]
mean = np.mean(masked, axis=(0,1))
print(mean)
sum += mean
img_count += 1
try:
mean = sum / float(img_count)
except ZeroDivisionError as zde:
return 0
print('Mean: ' + str(mean))
return mean
def test_gray_normal():
sample_img_dir = '/home/apages/pysrc/KernelPheno/data/sample_images'
test_files = [osp.join(sample_img_dir, img_file) for img_file in os.listdir(sample_img_dir)]
print("###################################################################")
print(" GRAYSCALE")
print("###################################################################")
normalize_images(test_files, out_dir='/home/apages/pysrc/KernelPheno/data/tests', gray=True)
print("###################################################################")
print(" COLOR")
print("###################################################################")
normalize_images(test_files, out_dir='/home/apages/pysrc/KernelPheno/data/tests')
def __test_all():
sample_img_dir = '/home/apages/pysrc/KernelPheno/data/sample_images'
test_files = [osp.join(sample_img_dir, img_file) for img_file in os.listdir(sample_img_dir)]
test_out = '/home/apages/pysrc/KernelPheno/data/tests'
gray_normed = [osp.join(test_out, file) for file in os.listdir(test_out) if ('bbxs' not in file.split(".")) and ('fig' not in file.split("."))]
print('##################################################################')
print(' TESTING SEGMENTATION')
print('##################################################################')
print('\n\n')
print('DRAWING BOUNDING BOXES')
draw_bounding_boxes(test_files,
out_dir='/home/apages/pysrc/KernelPheno/data/tests')
draw_bounding_boxes(test_files,
out_dir='/home/apages/pysrc/KernelPheno/data/tests',
cmap='gray')
print('##################################################################')
print(' NORMALIZING IMAGES')
print('##################################################################')
print('COLOR:')
normalize_images(test_files,
out_dir='/home/apages/pysrc/KernelPheno/data/tests',
plot=True)
print('GRAY:')
normalize_images(test_files,
out_dir='/home/apages/pysrc/KernelPheno/data/tests',
plot=True,
cmap='gray')
print('##################################################################')
print('\n\nDRAWING BBOXES AROUND NORMALIZED')
print('##################################################################')
draw_bounding_boxes(gray_normed,
out_dir='/home/apages/pysrc/KernelPheno/data/tests')
draw_bounding_boxes(gray_normed,
out_dir='/home/apages/pysrc/KernelPheno/data/tests',
cmap='gray')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--all', action='store_true', help='Run all functions on color and gray')
parser.add_argument('--thumb', action='store_true', help='Test thumbnails script')
parser.add_argument('--seg', action='store_true', help='Ouput Segmented images')
parser.add_argument('--bbx', action='store_true', help='Test Get bboxes')
args = parser.parse_args()
if args.all:
__test_all()
elif args.thumb:
create_thumbnails(['/home/apages/pysrc/KernelPheno/data/DSC05377.jpeg',
'/home/apages/pysrc/KernelPheno/data/DSC05389.jpeg',
'/home/apages/pysrc/KernelPheno/data/DSC05384.jpeg'],
out_dir='/home/apages/pysrc/KernelPheno/data/tests')
elif args.seg:
segment_images(['/home/apages/pysrc/KernelPheno/data/DSC05377.jpeg',
'/home/apages/pysrc/KernelPheno/data/DSC05389.jpeg',
'/home/apages/pysrc/KernelPheno/data/DSC05384.jpeg'],
out_dir='/home/apages/pysrc/KernelPheno/data/tests')
elif args.bbx:
test_get_sorted_bboxes()
| 32.671916 | 147 | 0.566436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,376 | 0.271208 |
04152059b68354f1293334f8c6d0586fb6c11889 | 5,271 | py | Python | examples/logan.py | codepr/drain | e2d9f1d3dfcc78ec8504eb2e8eef70b13247d174 | [
"WTFPL"
] | null | null | null | examples/logan.py | codepr/drain | e2d9f1d3dfcc78ec8504eb2e8eef70b13247d174 | [
"WTFPL"
] | 1 | 2020-08-18T21:22:15.000Z | 2021-02-01T22:39:16.000Z | examples/logan.py | codepr/drain | e2d9f1d3dfcc78ec8504eb2e8eef70b13247d174 | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python
#
# Drain usage example, a simple HTTP log aggregation stats monitor.
#
# Monitor an ever-growing HTTP log in Common Log Format by tailing it (let's a
# tail subprocess handle all the hassles like rename, re-creation during
# log-rotation etc.) and aggregating some stats on a time-basis.
#
# LogStats class uses an event loop to schedule some callbacks that just calculate
# and print some stats like top hits by section or check for the log flow size
# printing an alert based on a threshold.
import os
import sys
import asyncio
import subprocess
from datetime import datetime as dt
from statistics import mean
from collections import defaultdict
from dataclasses import dataclass
import drain
# Path of the log to watch
LOG_PATH = "../../../scratch/logan/access.log"
@dataclass
class LogRecord(drain.Record):
"""
Define a simple log record dataclass that apply some simple string
manipulation based on Common Log Format
"""
row: str
@property
def section(self) -> str:
tokens = self.row.split(" ")
try:
section = "/".join([tokens[0], tokens[6].split("/")[1]])
except IndexError:
section = ""
return section
@property
def method(self) -> str:
tokens = self.row.split(" ")
try:
method = tokens[5][1:]
except IndexError:
method = ""
return method
# Create a simple async generator based source
async def source(path):
"""
Async generator data source, subprocess a tail command on a POSIX shell
yielding rows as they're read.
"""
log = subprocess.Popen(
["tail", "-f", path],
encoding="utf-8",
errors="ignore",
universal_newlines=True,
bufsize=1,
stdout=subprocess.PIPE,
).stdout
try:
while True:
line = log.readline()
yield LogRecord(line).dumps()
except KeyboardInterrupt:
pass
finally:
log.close()
# Create a streaming app
app = drain.App()
log_stream = app.stream(
source(LOG_PATH), record_class=LogRecord, concurrency=10, name="test"
)
class LogStats:
"""
Just a simple stats aggregator for the log stream, compute some basic
calculations and print results on a time-basis using an asyncio event-loop
to schedule callbacks.
"""
class Window:
"""Deadsimple window-list inner class, trim size automatically on insert"""
def __init__(self, size):
self.size = size
self.items = []
def put(self, item):
if len(self.items) == self.size:
self.items.pop(0)
self.items.append(item)
def mean(self):
return mean(self.items)
def __init__(
self,
window_size=120,
alert_threshold=30,
stats_every=10,
mean_hits_every=1,
):
self.window = self.Window(window_size)
self.alert = False
self.alert_threshold = alert_threshold
self.methods = defaultdict(int)
self.hits = defaultdict(int)
self.hits_per_second = 0
self.loop = asyncio.get_running_loop()
self.stats_every = stats_every
self.mean_hits_every = mean_hits_every
self.loop.call_later(stats_every, self.print_hits)
self.loop.call_later(mean_hits_every, self.mean_hits)
def hit(self, record):
self.methods[record.method] += 1
self.hits[record.section] += 1
self.hits_per_second += 1
def print_hits(self):
max_hits = max(self.hits, key=self.hits.get)
min_hits = min(self.hits, key=self.hits.get)
methods = ""
total_calls = sum(self.methods.values())
methods = " ".join(
[f"{k}={v/total_calls}" for k, v in self.methods.items()]
)
print(f"Methods (%): {methods}")
print(f"Max hits by section: {max_hits} {self.hits[max_hits]}",)
print(f"Min hits by section: {min_hits} {self.hits[min_hits]}",)
self.loop.call_later(self.stats_every, self.print_hits)
def mean_hits(self):
self.window.put(self.hits_per_second)
self.hits_per_second = 0
avg = self.window.mean()
if not self.alert and avg > self.alert_threshold:
self.alert = True
print(
f"High traffic generated an alert - hits={avg}, "
f" triggered at {dt.now().strftime('%d/%m/%Y %H:%M:%S')}",
)
elif self.alert and avg < self.alert_threshold:
self.alert = False
print(
f"High traffic alert recovered - hits={avg}, "
f" recovered at {dt.now().strftime('%d/%m/%Y %H:%M:%S')}",
)
self.loop.call_later(self.mean_hits_every, self.mean_hits)
@app.sink(log_stream)
async def log_consumer(stream):
print(f"Watching file: {LOG_PATH}")
log_stats = LogStats()
async for record in stream:
log_stats.hit(record)
if __name__ == "__main__":
# Check file exists
if not os.path.isfile(LOG_PATH):
print(f"Cannot open file {LOG_PATH}: No such file or directory")
sys.exit(0)
try:
app.run()
except KeyboardInterrupt:
pass
| 28.646739 | 83 | 0.611649 | 3,287 | 0.623601 | 538 | 0.102068 | 809 | 0.153481 | 698 | 0.132423 | 1,685 | 0.319674 |
0416d53aab351e719d74b901b85b83af2eaaf30a | 3,854 | py | Python | Instructions/app.py | RHARO-DATA/sqlalchemy_Challenge | f1be50e3a26f3de71f89cc9d0d21be80bdaec968 | [
"ADSL"
] | null | null | null | Instructions/app.py | RHARO-DATA/sqlalchemy_Challenge | f1be50e3a26f3de71f89cc9d0d21be80bdaec968 | [
"ADSL"
] | null | null | null | Instructions/app.py | RHARO-DATA/sqlalchemy_Challenge | f1be50e3a26f3de71f89cc9d0d21be80bdaec968 | [
"ADSL"
] | null | null | null | from flask import Flask, jsonify
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, desc
import pandas as pd
import numpy as np
import datetime as dt
import sqlalchemy
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Base.classes.keys()
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
#Flask
app = Flask(__name__)
#Home Route
@app.route("/")
def home():
return "Possible routes:<br>/api/v1.0/precipitation<br>/api/v1.0/stations<br>/api/v1.0/tobs<br>/api/v1.0/STARTDATE<br>/api/v1.0/STARTDATE/ENDDATE<br><br>Format dates as YYYY-MM-DD"
#Precipitation Route
@app.route("/api/v1.0/precipitation")
def prcp():
latest = dt.date.fromisoformat(session.query(Measurement).order_by(desc(Measurement.date)).first().date)
earliest = latest-dt.timedelta(days=365)
prcp_data = session.query(Measurement.date, Measurement.prcp).filter(earliest<Measurement.date)
prcp_df = pd.read_sql(prcp_data.statement, session.bind)
prcp_df['date']=pd.to_datetime(prcp_df.date)
prcp_df.set_index('date', inplace=True)
prcp_df.sort_index(inplace=True)
#Convert to jsonify
prcp_df.index = prcp_df.index.astype('str')
return jsonify(prcp_df.to_dict())
#station route
@app.route("/api/v1.0/stations")
def stations():
#Stations
stations = session.query(Measurement.station).group_by(Measurement.station)
stations_df = prcp_df = pd.read_sql(stations.statement, session.bind)
return jsonify(stations_df.to_dict())
# Tobs Route
@app.route("/api/v1.0/tobs")
def tobs():
latest = dt.date.fromisoformat(session.query(Measurement).order_by(desc(Measurement.date)).first().date)
earliest = latest-dt.timedelta(days=365)
tobs_data = session.query(Measurement.date, Measurement.tobs).filter(earliest<Measurement.date)
tobs_df = pd.read_sql(tobs_data.statement, session.bind)
tobs_df['date']=pd.to_datetime(tobs_df.date)
tobs_df.set_index('date', inplace=True)
tobs_df.sort_index(inplace=True)
tobs_df.index = tobs_df.index.astype('str')
return jsonify(tobs_df.to_dict())
#start Route
@app.route("/api/v1.0/<start>")
def start_metrics(start):
date_list = session.query(Measurement.date)
date_df = pd.read_sql(date_list.statement, session.bind)
date_dict = date_df.to_dict()
for date in date_dict["date"].values():
if date == start:
return jsonify(session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).all())
return jsonify({"error":"date not found"}),404
# End Route
@app.route("/api/v1.0/<start>/<end>")
def start_end_metrics(start,end):
found_start = False
found_end = False
if start > end:
return " Wrong order"
date_list = session.query(Measurement.date)
date_df = pd.read_sql(date_list.statement, session.bind)
date_dict = date_df.to_dict()
for date in date_dict["date"].values():
if date == start:
found_start = True
if (found_start == True) and (found_end == True):
return jsonify(session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).filter(Measurement.date <= end).all())
if date == end:
found_end = True
if (found_start == True) and (found_end == True):
return jsonify(session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).filter(Measurement.date <= end).all())
return jsonify({"error":"date not found"}),404
if __name__ == "__main__":
app.run(debug=True) | 27.927536 | 182 | 0.707836 | 0 | 0 | 0 | 0 | 3,143 | 0.815516 | 0 | 0 | 556 | 0.144266 |
041736fd2e4a9549c96880f461f2c706cb2ed8e3 | 1,189 | py | Python | cogdl/layers/gin_layer.py | cenyk1230/cogdl | fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce | [
"MIT"
] | 1,072 | 2019-08-02T05:46:21.000Z | 2022-03-31T07:51:53.000Z | cogdl/layers/gin_layer.py | cenyk1230/cogdl | fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce | [
"MIT"
] | 96 | 2019-08-05T17:27:22.000Z | 2022-03-03T08:36:57.000Z | cogdl/layers/gin_layer.py | cenyk1230/cogdl | fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce | [
"MIT"
] | 299 | 2019-08-08T07:33:10.000Z | 2022-03-31T09:30:07.000Z | import torch
import torch.nn as nn
from cogdl.utils import spmm
class GINLayer(nn.Module):
r"""Graph Isomorphism Network layer from paper `"How Powerful are Graph
Neural Networks?" <https://arxiv.org/pdf/1810.00826.pdf>`__.
.. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
\mathrm{sum}\left(\left\{h_j^{l}, j\in\mathcal{N}(i)
\right\}\right)\right)
Parameters
----------
apply_func : callable layer function)
layer or function applied to update node feature
eps : float32, optional
Initial `\epsilon` value.
train_eps : bool, optional
If True, `\epsilon` will be a learnable parameter.
"""
def __init__(self, apply_func=None, eps=0, train_eps=True):
super(GINLayer, self).__init__()
if train_eps:
self.eps = torch.nn.Parameter(torch.FloatTensor([eps]))
else:
self.register_buffer("eps", torch.FloatTensor([eps]))
self.apply_func = apply_func
def forward(self, graph, x):
out = (1 + self.eps) * x + spmm(graph, x)
if self.apply_func is not None:
out = self.apply_func(out)
return out
| 30.487179 | 75 | 0.606392 | 1,121 | 0.942809 | 0 | 0 | 0 | 0 | 0 | 0 | 600 | 0.504626 |
04173ab291a28dec9be5ec4dc8e4e4e26a03aeda | 20 | py | Python | purviewcli/model/__init__.py | pblocz/purviewcli | 4f3ac4f746fac80a2db1e8c6910b88b2a70cb21b | [
"MIT"
] | null | null | null | purviewcli/model/__init__.py | pblocz/purviewcli | 4f3ac4f746fac80a2db1e8c6910b88b2a70cb21b | [
"MIT"
] | null | null | null | purviewcli/model/__init__.py | pblocz/purviewcli | 4f3ac4f746fac80a2db1e8c6910b88b2a70cb21b | [
"MIT"
] | null | null | null | from .atlas import * | 20 | 20 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
04188d5b55bb17b85704edde7d4e46723975338c | 67 | py | Python | src/backend/database/__init__.py | aimanow/sft | dce87ffe395ae4bd08b47f28e07594e1889da819 | [
"Apache-2.0"
] | null | null | null | src/backend/database/__init__.py | aimanow/sft | dce87ffe395ae4bd08b47f28e07594e1889da819 | [
"Apache-2.0"
] | null | null | null | src/backend/database/__init__.py | aimanow/sft | dce87ffe395ae4bd08b47f28e07594e1889da819 | [
"Apache-2.0"
] | null | null | null | from app.extensions import db, bcrypt
__all__ = ['db', 'bcrypt']
| 13.4 | 37 | 0.686567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.179104 |
0418998a91da3e3d7abd865cd4fdccc83d2e5270 | 325 | py | Python | arboreto/utils.py | redst4r/arboreto | 3ff7b6f987b32e5774771751dea646fa6feaaa52 | [
"BSD-3-Clause"
] | 20 | 2018-06-28T07:00:47.000Z | 2020-10-08T08:58:22.000Z | arboreto/utils.py | redst4r/arboreto | 3ff7b6f987b32e5774771751dea646fa6feaaa52 | [
"BSD-3-Clause"
] | 23 | 2018-06-06T13:11:20.000Z | 2021-01-08T03:37:43.000Z | arboreto/utils.py | redst4r/arboreto | 3ff7b6f987b32e5774771751dea646fa6feaaa52 | [
"BSD-3-Clause"
] | 15 | 2018-11-21T08:21:46.000Z | 2020-11-25T06:28:32.000Z | """
Utility functions.
"""
def load_tf_names(path):
"""
:param path: the path of the transcription factor list file.
:return: a list of transcription factor names read from the file.
"""
with open(path) as file:
tfs_in_file = [line.strip() for line in file.readlines()]
return tfs_in_file
| 20.3125 | 69 | 0.655385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.529231 |
0419d09c8576ea0ee5a5a7dda0668b4eb489ea24 | 315 | py | Python | twitter_auth.py | lagolucas/twitter-mimic | 1d2a26f56f0796d8dcbfb2d2175dddd706235606 | [
"MIT"
] | 1 | 2020-01-16T18:45:51.000Z | 2020-01-16T18:45:51.000Z | twitter_auth.py | lagolucas/twitter-mimic | 1d2a26f56f0796d8dcbfb2d2175dddd706235606 | [
"MIT"
] | null | null | null | twitter_auth.py | lagolucas/twitter-mimic | 1d2a26f56f0796d8dcbfb2d2175dddd706235606 | [
"MIT"
] | null | null | null | import tweepy
def autentica():
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
auth.set_access_token(key, secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, retry_count=3, retry_delay=60,
retry_errors=set([503]))
return api
| 24.230769 | 114 | 0.707937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
041b02a9d732f392f197e950c0af95abe44f914e | 678 | py | Python | user.py | kevin3708/PythonIP1 | e0909ec45ae653a3b81eaa44ce2679d401c5480a | [
"Unlicense"
] | null | null | null | user.py | kevin3708/PythonIP1 | e0909ec45ae653a3b81eaa44ce2679d401c5480a | [
"Unlicense"
] | null | null | null | user.py | kevin3708/PythonIP1 | e0909ec45ae653a3b81eaa44ce2679d401c5480a | [
"Unlicense"
] | null | null | null | class User:
"""
Class that generates new instances of users.
"""
user_list = []
def __init__(self,tUsername,iUsername,email,sUsername):
self.tUsername=tUsername
self.iUsername=iUsername
self.email=email
self.sUsername=sUsername
def save_user(self):
User.user_list.append(self)
def delete_user(self):
User.user_list.remove(self)
@classmethod
def from_input(cls):
return cls(
input('Twitter username: '),
input('Instagram username: '),
input('Email address: '),
input('Snapchat username: '),
)
user=User.from_input()
| 23.37931 | 59 | 0.584071 | 649 | 0.957227 | 0 | 0 | 231 | 0.340708 | 0 | 0 | 140 | 0.20649 |
041d25b3caf16615ffe44105dc3916c9eb43a246 | 10,048 | py | Python | tf_DDPG.py | laket/DDPG_Eager | e0b6f960acc193838189b714e67cd1f7da60f440 | [
"Apache-2.0"
] | 2 | 2019-05-13T18:43:17.000Z | 2019-08-22T08:08:03.000Z | tf_DDPG.py | DeanLeeFumu/DDPG_Eager | e0b6f960acc193838189b714e67cd1f7da60f440 | [
"Apache-2.0"
] | null | null | null | tf_DDPG.py | DeanLeeFumu/DDPG_Eager | e0b6f960acc193838189b714e67cd1f7da60f440 | [
"Apache-2.0"
] | 1 | 2019-08-23T02:45:16.000Z | 2019-08-23T02:45:16.000Z | # Copyright 2018 Oiki Tomoaki. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
DDPG implementation in Tensorflow Eager Execution
"""
import numpy as np
import tensorflow as tf
from utils import PytorchInitializer
layers = tf.keras.layers
regularizers = tf.keras.regularizers
losses = tf.keras.losses
class Actor(tf.keras.Model):
def __init__(self, state_dim, action_dim, max_action, name="Actor"):
super().__init__(name=name)
self.l1 = layers.Dense(400, kernel_initializer=PytorchInitializer(),
name="L1")
self.l2 = layers.Dense(300, kernel_initializer=PytorchInitializer(),
name="L2")
self.l3 = layers.Dense(action_dim, kernel_initializer=PytorchInitializer(),
name="L3")
self.max_action = max_action
# 後段の処理のために早めにshapeを確定させる
dummy_state = tf.constant(np.zeros(shape=[1, state_dim], dtype=np.float32))
self(dummy_state)
def call(self, inputs):
with tf.device("/gpu:0"):
features = tf.nn.relu(self.l1(inputs))
features = tf.nn.relu(self.l2(features))
features = self.l3(features)
action = self.max_action * tf.nn.tanh(features)
return action
class Critic(tf.keras.Model):
def __init__(self, state_dim, action_dim, wd=1e-2, name="Critic"):
super().__init__(name=name)
self.l1 = layers.Dense(400, kernel_initializer=PytorchInitializer(),
kernel_regularizer=regularizers.l2(wd), bias_regularizer=regularizers.l2(wd),
name="L1")
self.l2 = layers.Dense(300, kernel_initializer=PytorchInitializer(),
kernel_regularizer=regularizers.l2(wd), bias_regularizer=regularizers.l2(wd),
name="L2")
self.l3 = layers.Dense(1, kernel_initializer=PytorchInitializer(),
kernel_regularizer=regularizers.l2(wd), bias_regularizer=regularizers.l2(wd),
name="L3")
dummy_state = tf.constant(np.zeros(shape=[1, state_dim], dtype=np.float32))
dummy_action = tf.constant(np.zeros(shape=[1, action_dim], dtype=np.float32))
self([dummy_state, dummy_action])
def call(self, inputs):
with tf.device("/gpu:0"):
x, u = inputs
x = tf.nn.relu(self.l1(x))
inner_feat = tf.concat([x, u], axis=1)
x = tf.nn.relu(self.l2(inner_feat))
x = self.l3(x)
return x
class DDPG(tf.contrib.checkpoint.Checkpointable):
def __init__(self, state_dim, action_dim, max_action):
self.actor = Actor(state_dim, action_dim, max_action)
self.actor_target = Actor(state_dim, action_dim, max_action)
# initialize target network
for param, target_param in zip(self.actor.weights, self.actor_target.weights):
target_param.assign(param)
self.actor_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
self.critic = Critic(state_dim, action_dim)
self.critic_target = Critic(state_dim, action_dim)
# initialize target network
for param, target_param in zip(self.critic.weights, self.critic_target.weights):
target_param.assign(param)
self.critic_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
def select_action(self, state):
"""
:param np.ndarray state:
:return:
"""
assert isinstance(state, np.ndarray)
assert len(state.shape) == 1
state = np.expand_dims(state, axis=0).astype(np.float32)
action = self.actor(state).numpy()
return action[0]
def train(self, replay_buffer, iterations, batch_size=64, discount=0.99, tau=0.001):
for it in range(iterations):
state, next_state, action, reward, done = replay_buffer.sample(batch_size)
state = np.array(state, dtype=np.float32)
next_state = np.array(next_state, dtype=np.float32)
action = np.array(action, dtype=np.float32)
reward = np.array(reward, dtype=np.float32)
done = np.array(done, dtype=np.float32)
not_done = 1 - done
with tf.device("/gpu:0"):
with tf.GradientTape() as tape:
target_Q = self.critic_target([next_state, self.actor_target(next_state)])
target_Q = reward + (not_done * discount * target_Q)
# detach => stop_gradient
target_Q = tf.stop_gradient(target_Q)
current_Q = self.critic([state, action])
# Compute critic loss + L2 loss
critic_loss = tf.reduce_mean(losses.MSE(current_Q, target_Q)) + 0.5*tf.add_n(self.critic.losses)
critic_grad = tape.gradient(critic_loss, self.critic.trainable_variables)
self.critic_optimizer.apply_gradients(zip(critic_grad, self.critic.trainable_variables))
with tf.GradientTape() as tape:
next_action = self.actor(state)
actor_loss = -tf.reduce_mean(self.critic([state, next_action]))
actor_grad = tape.gradient(actor_loss, self.actor.trainable_variables)
self.actor_optimizer.apply_gradients(zip(actor_grad, self.actor.trainable_variables))
# Update target networks
for param, target_param in zip(self.critic.weights, self.critic_target.weights):
target_param.assign(tau * param + (1 - tau) * target_param)
for param, target_param in zip(self.actor.weights, self.actor_target.weights):
target_param.assign(tau * param + (1 - tau) * target_param)
class DDPG_fast(tf.contrib.checkpoint.Checkpointable):
def __init__(self, state_dim, action_dim, max_action):
self.actor = Actor(state_dim, action_dim, max_action)
self.actor_target = Actor(state_dim, action_dim, max_action)
self.actor_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
# initialize target network
for param, target_param in zip(self.actor.weights, self.actor_target.weights):
target_param.assign(param)
self.critic = Critic(state_dim, action_dim)
self.critic_target = Critic(state_dim, action_dim)
self.critic_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
# initialize target network
for param, target_param in zip(self.critic.weights, self.critic_target.weights):
target_param.assign(param)
def select_action(self, state):
"""
:param np.ndarray state:
:return:
"""
assert isinstance(state, np.ndarray)
assert len(state.shape) == 1
state = np.expand_dims(state, axis=0).astype(np.float32)
action = self._select_action_body(tf.constant(state))
return action.numpy()[0]
@tf.contrib.eager.defun
def _select_action_body(self, state):
"""
:param np.ndarray state:
:return:
"""
action = self.actor(state)
return action
def train(self, replay_buffer, iterations, batch_size=64, discount=0.99, tau=0.001):
for it in range(iterations):
state, next_state, action, reward, done = replay_buffer.sample(batch_size)
state = np.array(state, dtype=np.float32)
next_state = np.array(next_state, dtype=np.float32)
action = np.array(action, dtype=np.float32)
reward = np.array(reward, dtype=np.float32)
done = np.array(done, dtype=np.float32)
not_done = 1 - done
self._train_body(state, next_state, action, reward, not_done, discount, tau)
@tf.contrib.eager.defun
def _train_body(self, state, next_state, action, reward, not_done, discount, tau):
with tf.device("/gpu:0"):
with tf.GradientTape() as tape:
target_Q = self.critic_target([next_state, self.actor_target(next_state)])
target_Q = reward + (not_done * discount * target_Q)
# detach => stop_gradient
target_Q = tf.stop_gradient(target_Q)
current_Q = self.critic([state, action])
# Compute critic loss + L2 loss
critic_loss = tf.reduce_mean(losses.MSE(current_Q, target_Q)) + 0.5*tf.add_n(self.critic.losses)
critic_grad = tape.gradient(critic_loss, self.critic.trainable_variables)
self.critic_optimizer.apply_gradients(zip(critic_grad, self.critic.trainable_variables))
with tf.GradientTape() as tape:
next_action = self.actor(state)
actor_loss = -tf.reduce_mean(self.critic([state, next_action]))
actor_grad = tape.gradient(actor_loss, self.actor.trainable_variables)
self.actor_optimizer.apply_gradients(zip(actor_grad, self.actor.trainable_variables))
# Update target networks
for param, target_param in zip(self.critic.weights, self.critic_target.weights):
target_param.assign(tau * param + (1 - tau) * target_param)
for param, target_param in zip(self.actor.weights, self.actor_target.weights):
target_param.assign(tau * param + (1 - tau) * target_param)
| 40.192 | 116 | 0.624104 | 9,164 | 0.908766 | 0 | 0 | 1,780 | 0.176517 | 0 | 0 | 1,320 | 0.1309 |
041d51bd7b7973f10ca83696af8bf2fcc94f43e1 | 2,776 | py | Python | apps/restaccount/resources.py | solatis/microservices | ee80292fe94cca93e3e2be6c5bbe5c46f34a9969 | [
"MIT"
] | 38 | 2017-09-08T20:04:31.000Z | 2022-01-20T12:53:44.000Z | apps/restaccount/resources.py | solatis/microservices | ee80292fe94cca93e3e2be6c5bbe5c46f34a9969 | [
"MIT"
] | 6 | 2020-02-11T23:09:25.000Z | 2022-03-02T14:53:40.000Z | apps/restaccount/resources.py | solatis/microservices | ee80292fe94cca93e3e2be6c5bbe5c46f34a9969 | [
"MIT"
] | 8 | 2018-04-26T03:42:56.000Z | 2021-07-30T19:54:04.000Z | import json
import falcon
import time
import uuid
import requests
from apps.database import init_db, db_session
from apps.models import Account
from apps.restaccount.logging import logging
logger = logging.getLogger(__name__)
from decouple import config
ES_HOST = config('EVENTSTORE_HOST', default='eventstore')
ES_PORT = config('EVENTSTORE_PORT', default=2113, cast=int)
stream_url = 'http://{}:{}/streams/accounts'.format(ES_HOST, ES_PORT)
content_header = { 'Content-Type': 'application/vnd.eventstore.events+json' }
logger.info('stream_url: {}'.format(stream_url))
def get_account(account_id):
return Account.query.get(account_id)
class BalanceResource(object):
def on_get(self, req, resp, account_id):
init_db()
doc = db_session.query(Account).get(account_id)
db_session.close()
if doc is None:
raise falcon.HTTPBadRequest('Balance missing', 'Deposit money to start using an account')
else:
# Create a JSON representation of the resource
resp.body = json.dumps(doc.as_dict(), ensure_ascii=False)
# The following line can be omitted because 200 is the default
# status returned by the framework, but it is included here to
# illustrate how this may be overridden as needed.
resp.status = falcon.HTTP_200
class DepositResource(object):
def on_post(self, req, resp):
body = req.stream.read()
doc = json.loads(body.decode('utf-8'))
logger.info('doc: {}'.format(doc))
payload = [
{
"eventId": str(uuid.uuid1()),
"eventType": "created-deposit",
"data": doc
}
]
logger.info("payload: {}".format(payload))
r = requests.post(stream_url, data=str(payload), headers=content_header)
resp.status = falcon.HTTP_200
class TransferResource(object):
def on_post(self, req, resp):
body = req.stream.read()
doc = json.loads(body.decode('utf-8'))
acc = get_account(doc['account_id'])
payload = [
{
"eventId": str(uuid.uuid1()),
"eventType": "created-transfer",
"data": doc
}
]
if acc is None:
raise falcon.HTTPBadRequest('Account missing', 'You must deposit into an account before transfering')
if acc.balance < doc['amount']:
raise falcon.HTTPBadRequest('Insufficient funds', 'Account balance {} less than transfer amount {}'.format(acc.balance, doc['amount']))
else:
logger.info("payload: {}".format(payload))
r = requests.post(stream_url, data=str(payload), headers=content_header)
resp.status = falcon.HTTP_200 | 31.908046 | 147 | 0.627522 | 2,126 | 0.76585 | 0 | 0 | 0 | 0 | 0 | 0 | 728 | 0.262248 |
0420a40477dce2cde3d6888f93745bcc9d79ef0d | 4,123 | py | Python | mmselfsup/models/algorithms/cae.py | mitming/mmselfsup | 5b5cb474776291cfcb9a1140afd11b696e11fcab | [
"Apache-2.0"
] | 355 | 2021-12-16T04:32:49.000Z | 2022-03-31T22:15:23.000Z | mmselfsup/models/algorithms/cae.py | mitming/mmselfsup | 5b5cb474776291cfcb9a1140afd11b696e11fcab | [
"Apache-2.0"
] | 89 | 2021-12-16T05:15:42.000Z | 2022-03-31T10:57:39.000Z | mmselfsup/models/algorithms/cae.py | mitming/mmselfsup | 5b5cb474776291cfcb9a1140afd11b696e11fcab | [
"Apache-2.0"
] | 74 | 2021-12-16T04:40:02.000Z | 2022-03-31T08:40:32.000Z | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
import torch
from torchvision.transforms import Normalize
from ..builder import ALGORITHMS, build_backbone, build_head, build_neck
from .base import BaseModel
@ALGORITHMS.register_module()
class CAE(BaseModel):
"""CAE.
Implementation of `Context Autoencoder for Self-Supervised Representation
Learning <https://arxiv.org/abs/2202.03026>`_.
Args:
backbone (dict, optional): Config dict for module of backbone.
neck (dict, optional): Config dict for module of deep features to
compact feature vectors. Defaults to None.
head (dict, optional): Config dict for module of loss functions.
Defaults to None.
base_momentum (float): The base momentum coefficient for the target
network. Defaults to 0.0.
init_cfg (dict, optional): the config to control the initialization.
"""
def __init__(self,
backbone: dict = None,
neck: dict = None,
head: dict = None,
base_momentum: float = 0.0,
init_cfg: dict = None,
**kwargs) -> None:
super(CAE, self).__init__(init_cfg)
assert backbone is not None
self.backbone = build_backbone(backbone)
self.teacher = build_backbone(backbone)
assert neck is not None
self.neck = build_neck(neck)
assert head is not None
self.head = build_head(head)
self.momentum = base_momentum
self.img_norm = Normalize(
mean=torch.tensor((0.485, 0.456, 0.406)),
std=torch.tensor((0.229, 0.224, 0.225)))
def init_weights(self) -> None:
super().init_weights()
self._init_teacher()
def _init_teacher(self) -> None:
# init the weights of teacher with those of backbone
for param_backbone, param_teacher in zip(self.backbone.parameters(),
self.teacher.parameters()):
param_teacher.detach()
param_teacher.data.copy_(param_backbone.data)
param_teacher.requires_grad = False
def momentum_update(self) -> None:
"""Momentum update of the teacher network."""
for param_bacbone, param_teacher in zip(self.backbone.parameters(),
self.teacher.parameters()):
param_teacher.data = param_teacher.data * self.momentum + \
param_bacbone.data * (1. - self.momentum)
def extract_feat(self, img: torch.Tensor,
mask: torch.Tensor) -> torch.Tensor:
x = self.backbone(img, mask)
return x
def forward_train(self, samples: Sequence, **kwargs) -> dict:
img, img_target, mask = samples
# normalize images and the images to get the target
img_list = [self.img_norm(x).unsqueeze(0) for x in img]
img = torch.cat(img_list)
img_target = 0.8 * img_target + 0.1
mask = mask.flatten(1).to(torch.bool)
unmasked = self.backbone(img, mask)
# get the latent prediction for the masked patches
with torch.no_grad():
latent_target = self.teacher(img, ~mask)
latent_target = latent_target[:, 1:, :]
self.momentum_update()
pos_embed = self.backbone.pos_embed.expand(img.shape[0], -1, -1)
pos_embed_masked = pos_embed[:,
1:][mask].reshape(img.shape[0], -1,
pos_embed.shape[-1])
pos_embed_unmasked = pos_embed[:, 1:][~mask].reshape(
img.shape[0], -1, pos_embed.shape[-1])
# input the unmasked tokens and masked tokens to the decoder
logits, latent_pred = self.neck(unmasked[:, 1:], pos_embed_masked,
pos_embed_unmasked)
logits = logits.view(-1, logits.shape[-1])
losses = self.head(img_target, logits, latent_pred, latent_target,
mask)
return losses
| 37.481818 | 77 | 0.589619 | 3,853 | 0.934514 | 0 | 0 | 3,883 | 0.94179 | 0 | 0 | 955 | 0.231627 |
0420d2d243d0288c4e945481bf301146269df94c | 3,776 | py | Python | plncpro/predstoseq.py | nicolasDelhomme/PLncPRO | af11592e779170ff351f841b4b2754577e068999 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 5 | 2019-03-30T02:33:36.000Z | 2021-06-23T02:50:27.000Z | plncpro/predstoseq.py | nicolasDelhomme/PLncPRO | af11592e779170ff351f841b4b2754577e068999 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2021-12-25T04:30:19.000Z | 2022-02-27T00:18:12.000Z | plncpro/predstoseq.py | nicolasDelhomme/PLncPRO | af11592e779170ff351f841b4b2754577e068999 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 4 | 2019-03-30T02:33:43.000Z | 2022-01-11T10:07:55.000Z | '''
PLNCPRO
This file takes as input
and it extracts dasta seq by label after reading prediction file
Author : Urminder Singh
email: urmind13_sit@jnu.ac.in
UrMi 3/5/16
'''
import sys
import getopt
#import math
#import re
from Bio import SeqIO
from Bio.Seq import Seq
def main(args = sys.argv,home=None):
#set defaults
cutoff=0
min_len=0
max_len=float('Inf')
label='0'
try:
opts, args = getopt.getopt(sys.argv[2:],"hf:o:p:l:s:r:",["ifile=","ofile=","min=","max="])
#print opts
except getopt.GetoptError:
print('predstoseq.py -f <input fastafile> -o <outputfile> -p <predictionfile> -l <required label default:0> -s <class_prob_cutoff, default:0> -m <min_length, default:0> <max_length, default:inf>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('Use this to extract lncRNA or mRNA sequences, as predicted by PLNCPRO, from the input fasta file')
print('Usage:')
print('predstoseq.py -f <input fastafile> -o <outputfile> -p <predictionfile> -l <required label default:0> -s <class_prob_cutoff[range 0-1], default:0> -m <min_length, default:0> <max_length, default:inf>')
sys.exit()
elif opt in ("-f", "--ifile"):
#print 'infile found'
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
#print outputfile
elif opt in ("-p"):
predsfile= arg
#print predsfile
elif opt in ("-l"):
label=arg
#print label
elif opt in ("-s"):
#print 's: '+arg
cutoff=float(arg)
#print cutoff
elif opt in ("-r"):
min_len=int(arg)
elif opt in ("--min"):
min_len=int(arg)
#print 'max found'
elif opt in ("--max"):
max_len=int(arg)
#print 'max found'
if cutoff>1 or cutoff<0:
print('please enter probability value in range [0,1]')
sys.exit()
if max_len<=min_len:
print('Error: check min and max len')
sys.exit()
if label != '0' and label != '1':
print('Please check lablel is 0 or 1')
sys.exit()
print('**********Extracting Sequences***************')
print(('class prob cutoff='+ str(cutoff)))
print(('min length cutoff='+ str(min_len)))
print(('max length cutoff='+ str(max_len)))
#label=sys.argv[4]
#open preds file
idlist=[]
with open(predsfile) as f:
content=f.readlines()
for l in content:
if label=='1':
tocheck=float(l.split('\t')[2])
elif label=='0':
tocheck=float(l.split('\t')[3])
else:
print('check label\nError')
sys.exit(0)
if l.split('\t')[1]==label:
if cutoff <= tocheck :
idlist.append(l.split('\t')[0])
print(('Total sequences in prediction file with label '+label+' and class prob >= '+str(cutoff)+', were: '+str(len(idlist))))
ctr=0
min_len_filter=0
max_len_filter=0
#extract seq from fasta
output_handle = open(outputfile, "w")
for record in SeqIO.parse(inputfile, "fasta"):
found=0
seqid=record.id
#print record.seq
if seqid in idlist :
#write to file
#print 'found'
if len(record.seq)>max_len:
max_len_filter=max_len_filter+1
elif len(record.seq)<min_len:
min_len_filter=min_len_filter+1
else:
#print len(record.seq)
ctr=ctr+1
SeqIO.write(record, output_handle, "fasta")
found =1
print(('Total filtered due to length < '+str(min_len)+', were: '+str(min_len_filter)))
print(('Total filtered due to length > '+str(max_len)+', were: '+str(max_len_filter)))
print(('Sequences not found in the fasta file were: '+str(len(idlist)-ctr-min_len_filter-max_len_filter)))
if len(idlist)-ctr-min_len_filter-max_len_filter>0:
print('WARNING:')
print(('Please check input fasta as '+str(len(idlist)-ctr-min_len_filter-max_len_filter)+ ' sequences in the prediction file did not match to any sequences in fasta file'))
print(('Total sequences written: '+str(ctr)))
print(('File '+outputfile+' saved!'))
if __name__ == "__main__":
main()
| 28.390977 | 210 | 0.662606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,742 | 0.461335 |
0421fc55c0cd9357dc4b4cf552bac0572d28cf55 | 1,609 | py | Python | KnowledgeQuizTool/SummitMeeting/TitleBaidu.py | JianmingXia/StudyTest | 66d688ad41bbce619f44359ea126ff07a923f97b | [
"MIT"
] | null | null | null | KnowledgeQuizTool/SummitMeeting/TitleBaidu.py | JianmingXia/StudyTest | 66d688ad41bbce619f44359ea126ff07a923f97b | [
"MIT"
] | 68 | 2020-09-05T04:22:49.000Z | 2022-03-25T18:47:08.000Z | KnowledgeQuizTool/SummitMeeting/TitleBaidu.py | JianmingXia/StudyTest | 66d688ad41bbce619f44359ea126ff07a923f97b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author : Skye
# @Time : 2018/1/8 20:38
# @desc : python 3 , 答题闯关辅助,截屏 ,OCR 识别,百度搜索
import io
import urllib.parse
import webbrowser
import requests
import base64
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import os
def pull_screenshot():
os.system('adb shell screencap -p /sdcard/screenshot.png')
os.system('adb pull /sdcard/screenshot.png .')
pull_screenshot()
img = Image.open("./screenshot.png")
# 用 matplot 查看测试分辨率,切割
region = img.crop((50, 350, 1000, 560)) # 坚果 pro1
region.save('./crop.png')
#region = img.crop((75, 315, 1167, 789)) # iPhone 7P
#im = plt.imshow(img, animated=True)
#im2 = plt.imshow(region, animated=True)
#plt.show()
# 百度OCR API ,在 https://cloud.baidu.com/product/ocr 上注册新建应用即可
api_key = 'oZokCbcX3unqb4CpGvD873Co'
api_secret = '2bNzvBQ4l4HkXAGFc3azMeinQ02ntdf2'
# 获取token
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id='+api_key+'&client_secret='+api_secret
headers = {
'Content-Type':'application/json;charset=UTF-8'
}
res = requests.get(url=host,headers=headers).json()
token = res['access_token']
imgByteArr = io.BytesIO()
region.save(imgByteArr, format='PNG')
image_data = imgByteArr.getvalue()
base64_data = base64.b64encode(image_data)
r = requests.post('https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic',
params={'access_token': token}, data={'image': base64_data})
result = ''
for i in r.json()['words_result']:
result += i['words']
result = urllib.parse.quote(result)
webbrowser.open('https://baidu.com/s?wd='+result)
| 26.377049 | 128 | 0.709758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 905 | 0.532666 |
04236a7fa8c2bd2c2904277f4b28429731483ed2 | 1,084 | py | Python | tools/tianchi_xray/mixup_test.py | vivym/maskrcnn-benchmark | 4e1763ae09dab1ceebafad54412df657790ec9dc | [
"MIT"
] | null | null | null | tools/tianchi_xray/mixup_test.py | vivym/maskrcnn-benchmark | 4e1763ae09dab1ceebafad54412df657790ec9dc | [
"MIT"
] | null | null | null | tools/tianchi_xray/mixup_test.py | vivym/maskrcnn-benchmark | 4e1763ae09dab1ceebafad54412df657790ec9dc | [
"MIT"
] | null | null | null | import os
import random
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
def main():
names = list(os.listdir('datasets/tianchi_xray/restricted/'))
img1 = Image.open(os.path.join('datasets/tianchi_xray/restricted', names[random.randint(0, len(names))]))
img2 = Image.open(os.path.join('datasets/tianchi_xray/restricted', names[random.randint(0, len(names))]))
plt.subplot(2, 2, 1)
plt.imshow(img1)
plt.subplot(2, 2, 2)
plt.imshow(img2)
lambd = 0.5
img1 = np.array(img1, dtype='float32')
img2 = np.array(img2, dtype='float32')
height = max(img1.shape[0], img2.shape[0])
width = max(img1.shape[1], img2.shape[1])
mixed_img = np.zeros(shape=(height, width, 3), dtype='float32')
mixed_img[:img1.shape[0], :img1.shape[1], :] = img1 * lambd
mixed_img[:img2.shape[0], :img2.shape[1], :] += img2 * (1. - lambd)
mixed_img = mixed_img.astype('uint8')
mixed_img = Image.fromarray(mixed_img)
plt.subplot(2, 2, 3)
plt.imshow(mixed_img)
plt.show()
if __name__ == '__main__':
main()
| 28.526316 | 109 | 0.653137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.135609 |
0423b04563faed43dcb0948218e34b03759c29d8 | 650 | py | Python | mockito_util/write_readme.py | edgeware/mockito-python | 7cd7bfb75dfbc94414cca770d9676d798058788a | [
"MIT"
] | 4 | 2015-05-17T08:01:25.000Z | 2016-08-14T15:23:10.000Z | mockito_util/write_readme.py | edgeware/mockito-python | 7cd7bfb75dfbc94414cca770d9676d798058788a | [
"MIT"
] | 1 | 2015-08-03T09:14:27.000Z | 2015-08-03T09:34:09.000Z | mockito_util/write_readme.py | edgeware/mockito-python | 7cd7bfb75dfbc94414cca770d9676d798058788a | [
"MIT"
] | 2 | 2015-04-23T20:16:46.000Z | 2021-04-07T11:03:22.000Z | from __future__ import print_function
import os
import re
def openFile(f, m='r'):
if (os.path.exists(f)):
return open(f, m)
else:
return open('../' + f, m)
demo_test = ' '.join(openFile('mockito_test/demo_test.py').readlines())
demo_test = demo_test.split('#DELIMINATOR')[1]
readme_before = ''.join(openFile('README').readlines())
token = 'Basic usage:'
readme_after = re.compile(token + '.*', re.S).sub(token + '\n' + demo_test, readme_before)
if (readme_before != readme_after):
readme_file = openFile('README', 'w')
readme_file.write(readme_after)
print("README updated")
else:
print("README update not required")
| 26 | 90 | 0.678462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.215385 |
0423ce64c5f4c6cd0750ca2bcda5bdd83017475b | 5,887 | py | Python | pyperformance/run.py | brandtbucher/pyperformance | f81d9f41f43418f1914c9a791c13c4591e23f399 | [
"MIT"
] | 255 | 2016-08-27T05:38:56.000Z | 2019-05-27T16:15:38.000Z | pyperformance/run.py | brandtbucher/pyperformance | f81d9f41f43418f1914c9a791c13c4591e23f399 | [
"MIT"
] | 43 | 2016-08-30T15:22:01.000Z | 2019-05-24T10:59:54.000Z | pyperformance/run.py | brandtbucher/pyperformance | f81d9f41f43418f1914c9a791c13c4591e23f399 | [
"MIT"
] | 55 | 2016-08-30T15:19:45.000Z | 2019-05-20T12:41:02.000Z | from collections import namedtuple
import hashlib
import sys
import time
import traceback
import pyperformance
from . import _utils, _python, _pythoninfo
from .venv import VenvForBenchmarks, REQUIREMENTS_FILE
from . import _venv
class BenchmarkException(Exception):
pass
class RunID(namedtuple('RunID', 'python compat bench timestamp')):
def __new__(cls, python, compat, bench, timestamp):
self = super().__new__(
cls,
python,
compat,
bench or None,
int(timestamp) if timestamp else None,
)
return self
def __str__(self):
if not self.timestamp:
return self.name
return f'{self.name}-{self.timestamp}'
@property
def name(self):
try:
return self._name
except AttributeError:
name = f'{self.python}-compat-{self.compat}'
if self.bench:
name = f'{name}-bm-{self.bench.name}'
self._name = name
return self._name
def get_run_id(python, bench=None):
py_id = _python.get_id(python, prefix=True)
compat_id = get_compatibility_id(bench)
ts = time.time()
return RunID(py_id, compat_id, bench, ts)
def run_benchmarks(should_run, python, options):
to_run = sorted(should_run)
info = _pythoninfo.get_info(python)
runid = get_run_id(info)
unique = getattr(options, 'unique_venvs', False)
if not unique:
common = VenvForBenchmarks.ensure(
_venv.get_venv_root(runid.name, python=info),
info,
upgrade='oncreate',
inherit_environ=options.inherit_environ,
)
benchmarks = {}
venvs = set()
for i, bench in enumerate(to_run):
bench_runid = runid._replace(bench=bench)
assert bench_runid.name, (bench, bench_runid)
name = bench_runid.name
venv_root = _venv.get_venv_root(name, python=info)
print()
print('='*50)
print(f'({i+1:>2}/{len(to_run)}) creating venv for benchmark ({bench.name})')
print()
if not unique:
print('(trying common venv first)')
# Try the common venv first.
try:
common.ensure_reqs(bench)
except _venv.RequirementsInstallationFailedError:
print('(falling back to unique venv)')
else:
benchmarks[bench] = (common, bench_runid)
continue
venv = VenvForBenchmarks.ensure(
venv_root,
info,
upgrade='oncreate',
inherit_environ=options.inherit_environ,
)
try:
# XXX Do not override when there is a requirements collision.
venv.ensure_reqs(bench)
except _venv.RequirementsInstallationFailedError:
print('(benchmark will be skipped)')
print()
venv = None
venvs.add(venv_root)
benchmarks[bench] = (venv, bench_runid)
print()
suite = None
run_count = str(len(to_run))
errors = []
pyperf_opts = get_pyperf_opts(options)
import pyperf
for index, bench in enumerate(to_run):
name = bench.name
print("[%s/%s] %s..." %
(str(index + 1).rjust(len(run_count)), run_count, name))
sys.stdout.flush()
def add_bench(dest_suite, obj):
if isinstance(obj, pyperf.BenchmarkSuite):
results = obj
else:
results = (obj,)
version = pyperformance.__version__
for res in results:
res.update_metadata({'performance_version': version})
if dest_suite is not None:
dest_suite.add_benchmark(res)
else:
dest_suite = pyperf.BenchmarkSuite([res])
return dest_suite
bench_venv, bench_runid = benchmarks.get(bench)
if bench_venv is None:
print("ERROR: Benchmark %s failed: could not install requirements" % name)
errors.append(name)
continue
try:
result = bench.run(
bench_venv.python,
bench_runid,
pyperf_opts,
venv=bench_venv,
verbose=options.verbose,
)
except Exception as exc:
print("ERROR: Benchmark %s failed: %s" % (name, exc))
traceback.print_exc()
errors.append(name)
else:
suite = add_bench(suite, result)
print()
return (suite, errors)
# Utility functions
def get_compatibility_id(bench=None):
# XXX Do not include the pyperformance reqs if a benchmark was provided?
reqs = sorted(_utils.iter_clean_lines(REQUIREMENTS_FILE))
if bench:
lockfile = bench.requirements_lockfile
if lockfile and os.path.exists(lockfile):
reqs += sorted(_utils.iter_clean_lines(lockfile))
data = [
# XXX Favor pyperf.__version__ instead?
pyperformance.__version__,
'\n'.join(reqs),
]
h = hashlib.sha256()
for value in data:
h.update(value.encode('utf-8'))
compat_id = h.hexdigest()
# XXX Return the whole string?
compat_id = compat_id[:12]
return compat_id
def get_pyperf_opts(options):
opts = []
if options.debug_single_value:
opts.append('--debug-single-value')
elif options.rigorous:
opts.append('--rigorous')
elif options.fast:
opts.append('--fast')
if options.verbose:
opts.append('--verbose')
if options.affinity:
opts.append('--affinity=%s' % options.affinity)
if options.track_memory:
opts.append('--track-memory')
if options.inherit_environ:
opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ))
return opts
| 28.302885 | 86 | 0.584678 | 803 | 0.136402 | 0 | 0 | 301 | 0.05113 | 0 | 0 | 828 | 0.140649 |
0423e7262923e363b55991428757fd12dccb993b | 2,147 | py | Python | src/mqc/qc_filters.py | stephenkraemer/bistro | c9f63e948d20f8f1e59163f6267ad83cb70caa9d | [
"BSD-3-Clause"
] | 1 | 2020-11-09T13:41:46.000Z | 2020-11-09T13:41:46.000Z | src/mqc/qc_filters.py | stephenkraemer/bistro | c9f63e948d20f8f1e59163f6267ad83cb70caa9d | [
"BSD-3-Clause"
] | null | null | null | src/mqc/qc_filters.py | stephenkraemer/bistro | c9f63e948d20f8f1e59163f6267ad83cb70caa9d | [
"BSD-3-Clause"
] | null | null | null | """MotifPileupProcessors for QC filtering"""
# TODO: combine PhredFilter and MapqFilter to avoid cost for double
# iteration
from mqc.visitors import Visitor
from mqc.pileup.pileup import MotifPileup
import mqc.flag_and_index_values as mfl
qflag = mfl.qc_fail_flags
mflag = mfl.methylation_status_flags
from typing import Dict, Any
ConfigDict = Dict[str, Any]
class PhredFilter(Visitor):
"""Tag BSSeqPileupRead with low phred score qcfailflag bit
Reads are tagged as qcfail with the appropriate flag value if
BSSeqPileupRead.baseq_at_pos < min_phred_score.
Notes:
- phred filtering is usually done after overlap handling,
because overlap handling can be used to adjust phred scores
"""
def __init__(self, config: ConfigDict) -> None:
self.min_phred_score = config['basic_quality_filtering']['min_phred_score']
def process(self, motif_pileup: MotifPileup) -> None:
"""Check all reads and flag if appropriate
The qcfail flag is set independent of the presence of other
failure flags: overlap_flag, trimming flag and other qcfail
flags are not considered. This is by design. In different
scenarios, overlap flags are not considered (mate-stratified
methylation calling, M-bias stats), so reads must have
phred score fail informations independent of the overlap
flag. Similar situations arise for the other fail flags.
"""
for curr_read in motif_pileup.reads:
if curr_read.baseq_at_pos < self.min_phred_score:
curr_read.phred_fail_flag = 1
class MapqFilter(Visitor):
"""Tag BSSeqPileupRead with mapq fail flag
Adds mapq_fail flag to BSSeqPileupRead.qflag if
BSSeqPileupRead mapping quality is < min_mapq parameter
"""
def __init__(self, config: ConfigDict) -> None:
self.min_mapq = config['basic_quality_filtering']['min_mapq']
def process(self, motif_pileup: MotifPileup) -> None:
for curr_read in motif_pileup.reads:
if curr_read.alignment.mapping_quality < self.min_mapq:
curr_read.qc_fail_flag |= qflag.mapq_fail
| 37.666667 | 83 | 0.72054 | 1,777 | 0.827667 | 0 | 0 | 0 | 0 | 0 | 0 | 1,206 | 0.561714 |
04252521ad95d6d680ccffe8c6e5cf43ba96bcec | 1,110 | py | Python | akilib/raspberrypi/AKI_I2C_LPS25H.py | nonNoise/akilib | f111514f544ef765205faebd925d19c810121dad | [
"MIT"
] | 29 | 2015-05-28T11:20:36.000Z | 2018-09-07T07:35:08.000Z | examples/raspberrypi/akilib/raspberrypi/AKI_I2C_LPS25H.py | nonNoise/akilib | f111514f544ef765205faebd925d19c810121dad | [
"MIT"
] | null | null | null | examples/raspberrypi/akilib/raspberrypi/AKI_I2C_LPS25H.py | nonNoise/akilib | f111514f544ef765205faebd925d19c810121dad | [
"MIT"
] | 4 | 2015-07-03T08:41:19.000Z | 2018-09-07T07:35:51.000Z | #import mraa
import smbus
import time
I2C_ADDR = 0x5C
class AKI_I2C_LPS25H:
def __init__(self):
print "AKI_I2C_LPS25H"
self.i2c = smbus.SMBus(1)
def i2cReg(self,wr,addr=0x00,data=0x00):
try:
if(wr == "w"):
return self.i2c.write_byte_data(I2C_ADDR,addr,data)
elif(wr == "r"):
return self.i2c.read_byte_data(I2C_ADDR,addr)
else :
return -1
except IOError, err:
print "No ACK!"
time.sleep(0.1)
self.i2cReg(wr,addr,data)
def WHO_AM_I(self):
# "WHO_AM_I"
return self.i2cReg("r",0x0F)
def Init(self):
return self.i2cReg("w",0x20,0x90)
def Press(self):
p =0
p = p | self.i2cReg("r",0x28) <<0
p = p | self.i2cReg("r",0x29) <<8
p = p | self.i2cReg("r",0x2A) <<16
mbar = p/4096
return mbar
def Temp (self):
t = 0
t = t | self.i2cReg("r",0x2B) <<0
t = t | self.i2cReg("r",0x2C) <<8
t = -((t-1)^0xffff)
return (42.5+(t/480.0)) | 25.227273 | 67 | 0.496396 | 1,053 | 0.948649 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.068468 |
0427d648c870d7a9d365daf12a45f9e8f97edf96 | 7,326 | py | Python | figure/Figure2C_general_size_effect.py | YuanxiaoGao/Evolution_of_reproductive_strategies_in_incipient_multicellularity | 13eb51639fcee630a76e197b50ef321e3a94ce0f | [
"MIT"
] | null | null | null | figure/Figure2C_general_size_effect.py | YuanxiaoGao/Evolution_of_reproductive_strategies_in_incipient_multicellularity | 13eb51639fcee630a76e197b50ef321e3a94ce0f | [
"MIT"
] | null | null | null | figure/Figure2C_general_size_effect.py | YuanxiaoGao/Evolution_of_reproductive_strategies_in_incipient_multicellularity | 13eb51639fcee630a76e197b50ef321e3a94ce0f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 09:58:57 2020
@author: gao
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.axes_grid1 import AxesGrid
import matplotlib as mpl
import os
from matplotlib.colors import LinearSegmentedColormap
import pickle
import seaborn as sns
import pandas as pd
import ptitprince as pt
#----read lc list--------------------------------------------------
with open("../simulation/LC.txt", "r") as file:
lcs = eval(file.readline()) # read lc list
##============================================
"""Due to the long time for abstract the data for 3+1, so we run only one times and then save the data local.
The following gray code are used for the data abstracting for 3+1 or any other data."""
##----10000 line data -------------
#with open('%s/size_tsn_line10000_v0.txt'%data_pathway, 'rb') as fp:
# t_data0 = pickle.load(fp)
#
#test_number=10000 # test the results under 1000 lines
#t_data=t_data0[0:test_number]
#
#binary_lc=np.array([1,3,6,7,12,13,20,22,23,34,36,37,52,56,57,58])-1
#
##-------------------------------------------------------------------------------
#
##---read data-----------------------
#'''proved that all optimal lc are binary splitting lcs '''
#"collect the line and its corresponding optimal lcs"
#
#end_lc=58 # 13[5], 23[6], 37[7]; 58[8]; 87[9]; 128[10] # how many lcs we care
#
#line_opLCs=[] # collect the lines and its optimal lcs
#
#for T_cluster in range(0,test_number): # how many figures or cell divisions
# all_list=[]
# for i_th in range(0,end_lc): # read all lcs data = growth rate
#
# with open('./data/data_general_size_effect/%d_%d.txt'%(T_cluster,i_th), "r") as file:
# nan=float(np.nan)
# inf=np.inf
# grate = eval(file.readline()) # read growth rate
# all_list.append(np.array([i_th, grate]))
#
# all_list_f = np.array(all_list, dtype=np.float128)
# max_value=np.amax(all_list_f[:,1])
# lc_ith=int(all_list_f[np.where(all_list_f[:,1]==max_value)][0][0])
# "check if all optimal lcs are the binary splitting lcs"
## if lc_ith not in binary_lc:
## print('Wrong!!!! \n')
## print('The line is %s'%T_cluster)
# "save the line and optimal lcs"
# line_opLCs.append(np.array([T_cluster,lc_ith]))
#
#line_opLCs=np.array(line_opLCs) # each line-id and its optimal lc
#oplc_list=set(line_opLCs[:,1])
#
##--------np.array_t-data-------------------------------------------------
###%%
#neutral_list=np.array([np.log((i+1)/(i)) for i in range(1,8)]) #neutral cases
#
#t_data_arr=np.array([np.array(i) for i in t_data]) # 10000,7
#t_data_bac=t_data_arr*neutral_list # sample dots
#
## sample to panda.data---
#
#
#
#df_list=[]
#
#for i in range(7):
# data_sample={}
# df=pd.DataFrame(data_sample)
#
# posi=[str(i+1) for j in range(test_number)]
# dots=t_data_bac[:,i]
# chi_ratio=t_data_arr[:,i]
#
# df['posi']=posi
# df['dots']=dots
# df['chi_ratio']=chi_ratio
# df_list.append(df)
#
#frames=[df_list[0],df_list[1],df_list[2],df_list[3],df_list[4], df_list[5],df_list[6]]
#sample=pd.concat(frames)
#
#line_prop=[str('Sample') for i in range(7*test_number)]
#sample['Lines']=line_prop
#
###------target lc--========-------------
#blc=5
#target_lc=line_opLCs[np.where(line_opLCs[:,1]==blc)]
#target_lines=[]
#target_lines_ratio=[]
#for i in target_lc[:,0]:
# target_lines.append(t_data_bac[i]) # original t_sn data
# target_lines_ratio.append(t_data_arr[i]) # ratio t_sn data
#target_lines=np.array(target_lines) # target dots
#target_lines_ratio=np.array(target_lines_ratio) # ratio t_sn data
#
## target to panda.data---
#df_list0=[]
#
#for i in range(7):
# data_sample={}
# df=pd.DataFrame(data_sample)
#
# posi=[str(i+1) for j in range(np.shape(target_lines)[0])]
# dots=target_lines[:,i]
# chi_ratio=target_lines_ratio[:,i]
#
# df['posi']=posi
# df['dots']=dots
# df['chi_ratio']=chi_ratio
# df_list0.append(df)
#
#frames0=[df_list0[0],df_list0[1],df_list0[2],df_list0[3],df_list0[4], df_list0[5],df_list0[6]]
#target=pd.concat(frames0)
#
#line_prop0=[str('Promoted') for i in range(7*(np.shape(target_lines)[0]))]
#target['Lines']=line_prop0
#
##======who data-===============
#combined_data=pd.concat([sample,target])
#
#sample.to_pickle('sample_data.pkl')
#combined_data.to_pickle('origin_combined_data.pkl') # dave data
#target.to_pickle('origin_target_13.pkl') # blue dots
##============================================
"""These are data that we saved which can be generated by runing the above codes."""
blc=5
sample_data=pd.read_pickle('/Users/gao/Desktop/life-cycles-with-multiplayer-game/SimulationCode/v6/v2_VD_V0/v12_sizeti/code_general_size_effect/test_lines10000/sample_data.pkl') # read data
combined_data=pd.read_pickle('origin_combined_data.pkl') # read data
target=pd.read_pickle('/Users/gao/Desktop/life-cycles-with-multiplayer-game/SimulationCode/v6/v2_VD_V0/v12_sizeti/code_general_size_effect/test_lines10000/origin_target_13.pkl')
#-----------------draw figures--------------------------
#------raincloud plot----------
f,ax = plt.subplots( figsize=(12, 4))
df0=sample_data
df=target
dy="chi_ratio"; dx="posi"; ort="v"; pal={""}
pal = sns.color_palette(n_colors=1)
pal0 = sns.color_palette(n_colors=2)
dodge_value=1
jitter_value=0.12
ax=sns.stripplot( x = dx, y = dy, data = df0,
palette={ "darkgrey"},dodge=dodge_value,
edgecolor = "darkgrey",size = 1, jitter = jitter_value, zorder = 0,
orient = ort,alpha=0.5)
#-- blue color
ax=pt.half_violinplot( x = dx, y = dy, data = df,
palette = { "#377eb8"},
linewidth=0.5,dodge=dodge_value,
bw = .2, cut = 0.,scale = "area", width = 1., inner = None,
orient = ort,alpha=0.8)
ax=sns.stripplot( x = dx, y = dy, data = df, palette = { "#377eb8"},
linewidth=0.5,dodge=dodge_value,
edgecolor = "#377eb8",size = 2, jitter = jitter_value, zorder = 0,
orient = ort,alpha=0.8)
ax.set_xlabel(r"Organism size $n$",fontsize=16)
ax.set_ylabel("Normalised cell increment" "\n" r"component $\chi_{n}$",fontsize=16)
#------remove ticks and top and right frames
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#------remove ticks and top and right frames
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.yaxis.set_ticks_position('none')
plt.xlim(-.7,6.3)
#---artifical legend--------------
import matplotlib.patches as mpatches
legend_dict = { 'Sample' : 'silver', r'Promoting $3+1$' : '#377eb8' }
patchList = []
for key in legend_dict:
data_key = mpatches.Patch(color=legend_dict[key], label=key)
patchList.append(data_key)
ax.legend(handles=patchList,frameon=False,loc='upper center', bbox_to_anchor=(0.45, 1.13),
shadow=None, ncol=1)
plt.ylim(0.35,1.6)
plt.show()
#f.savefig('./figure/figure_2C.pdf' % ,bbox_inches='tight' ) # save figures
| 34.556604 | 194 | 0.616025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,316 | 0.725635 |
042994e79327daa22dad9686c292c61076c1cb9d | 4,029 | py | Python | lib/jnpr/eznc/facts/swver.py | cro/py-junos-eznc | 4c111476cc8eb7599462379ddf55743ae30bbf5c | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | lib/jnpr/eznc/facts/swver.py | cro/py-junos-eznc | 4c111476cc8eb7599462379ddf55743ae30bbf5c | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | lib/jnpr/eznc/facts/swver.py | cro/py-junos-eznc | 4c111476cc8eb7599462379ddf55743ae30bbf5c | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | import re
class version_info(object):
def __init__(self, verstr ):
"""verstr - version string"""
m1 = re.match('(.*)([RBIXS])(.*)', verstr)
self.type = m1.group(2)
self.major = tuple(map(int,m1.group(1).split('.'))) # creates tuyple
after_type = m1.group(3).split('.')
self.minor = after_type[0]
if 'X' == self.type:
# assumes form similar to "45-D10", so extract the bits from this
xm = re.match("(\d+)-(\w)(.*)", self.minor)
self.minor = tuple([int(xm.group(1)), xm.group(2), int(xm.group(3))])
if len(after_type) < 2:
self.build = None
else:
self.build = int(after_type[1])
elif 'I' == self.type:
try:
self.build = after_type[1] # assumes that we have a build/spin, but not numeric
except:
self.build = None
else:
self.build = int(after_type[1]) # assumes numeric build/spin
self.as_tuple = self.major + tuple([self.minor, self.build])
def __repr__(self):
retstr = "junos.versino_info(major={major}, type={type}, minor={minor}, build={build})".format(
major=self.major,
type=self.type,
minor=self.minor,
build=self.build
)
return retstr
def _cmp_tuple(self,other):
if self.type == 'I': raise RuntimeError("Internal Build")
bylen = {
2: (self.as_tuple[0:2]),
4: self.as_tuple
}
return bylen[len(other)]
def __lt__(self,other): return self._cmp_tuple(other) < other
def __le__(self,other): return self._cmp_tuple(other) <= other
def __gt__(self,other): return self._cmp_tuple(other) > other
def __ge__(self,other): return self._cmp_tuple(other) >= other
def __eq__(self,other): return self._cmp_tuple(other) == other
def __ne__(self,other): return self._cmp_tuple(other) != other
def software_version(junos, facts):
f_persona = facts.get('personality')
f_master = facts.get('master')
# ---------------------------------------------------------------------------
# run the right RPC to get the software information
# ---------------------------------------------------------------------------
if f_persona == 'MX':
x_swver = junos.cli("show version invoke-on all-routing-engines", format='xml')
elif f_persona == 'SWITCH':
## most EX switches support the virtual-chassis feature, so the 'all-members' option would be valid
## in some products, this options is not valid (i.e. not vc-capable. so we're going to try for vc, and if that
## throws an exception we'll rever to non-VC
try:
x_swver = junos.rpc.cli("show version all-members", format='xml')
except:
facts['vc_capable'] = False
x_swver = junos.rpc.cli("show version", format='xml')
else:
facts['vc_capable'] = True
else:
x_swver = junos.rpc.cli("show version", format='xml')
# ---------------------------------------------------------------------------
# extract the version information out of the RPC response
# ---------------------------------------------------------------------------
if x_swver.tag == 'multi-routing-engine-results':
facts['2RE'] = True
for re_sw in x_swver.xpath('.//software-information'):
re_name = re_sw.xpath('preceding-sibling::re-name')[0].text
m = re.search('(\d)', re_name)
re_name = m.group(0)
pkginfo = re_sw.xpath('package-information[1]/comment')[0].text
facts['version_RE'+re_name] = re.findall(r'\[(.*)\]', pkginfo)[0]
master = f_master[0] if isinstance(f_master,list) else f_master
facts['version'] = facts['version_'+master]
else:
pkginfo = x_swver.xpath('.//package-information[name = "junos"]/comment')[0].text
facts['version'] = re.findall(r'\[(.*)\]', pkginfo)[0]
# ---------------------------------------------------------------------------
# create a 'version_info' object based on the master version
# ---------------------------------------------------------------------------
facts['version_info'] = version_info(facts['version'])
| 39.116505 | 114 | 0.561181 | 1,789 | 0.444031 | 0 | 0 | 0 | 0 | 0 | 0 | 1,651 | 0.409779 |
042a1dff477ec006dda477d8738dfe23bcc7b467 | 9,358 | py | Python | modules.py | callistachang/CycleGAN-Music-Transfer | 928e87b4bebc4da1dcf7c43936d2c10fe76170f1 | [
"MIT"
] | null | null | null | modules.py | callistachang/CycleGAN-Music-Transfer | 928e87b4bebc4da1dcf7c43936d2c10fe76170f1 | [
"MIT"
] | 1 | 2021-07-07T13:36:18.000Z | 2021-07-07T13:36:18.000Z | modules.py | callistachang/CycleGAN-Music-Transfer | 928e87b4bebc4da1dcf7c43936d2c10fe76170f1 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow.keras import Model, layers, Input
from collections import namedtuple
def abs_criterion(pred, target):
return tf.reduce_mean(tf.abs(pred - target))
def mae_criterion(pred, target):
return tf.reduce_mean((pred - target) ** 2)
def sce_criterion(logits, labels):
return tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)
)
def softmax_criterion(logits, labels):
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)
)
def padding(x, p=3):
return tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], "REFLECT")
class InstanceNorm(layers.Layer):
def __init__(self, input_shape):
super(InstanceNorm, self).__init__()
self.scale = tf.Variable(
initial_value=np.random.normal(1.0, 0.02, input_shape),
trainable=True,
name="SCALE",
dtype=tf.float32,
)
self.offset = tf.Variable(
initial_value=np.zeros(input_shape),
trainable=True,
name="OFFSET",
dtype=tf.float32,
)
def call(self, x, epsilon=1e-5):
mean, variance = tf.nn.moments(x, axes=[1, 2], keepdims=True)
inv = tf.math.rsqrt(variance + epsilon)
normalized = (x - mean) * inv
return self.scale * normalized + self.offset
class Padding(layers.Layer):
def __init__(self):
super(Padding, self).__init__()
def call(self, x, p=3):
return tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], "REFLECT")
class ResNetBlock(layers.Layer):
def __init__(self):
super(ResNetBlock, self).__init__()
def call(self, x, dim, k_init, ks=3, s=1):
p = (ks - 1) // 2
# For ks = 3, p = 1
y = layers.Lambda(padding, arguments={"p": p}, name="PADDING_1")(x)
# After first padding, (batch * 130 * 130 * 3)
y = layers.Conv2D(
filters=dim,
kernel_size=ks,
strides=s,
padding="valid",
kernel_initializer=k_init,
use_bias=False,
)(y)
y = InstanceNorm(y.shape[-1:])(y)
y = layers.ReLU()(y)
# After first conv2d, (batch * 128 * 128 * 3)
y = layers.Lambda(padding, arguments={"p": p}, name="PADDING_2")(y)
# After second padding, (batch * 130 * 130 * 3)
y = layers.Conv2D(
filters=dim,
kernel_size=ks,
strides=s,
padding="valid",
kernel_initializer=k_init,
use_bias=False,
)(y)
y = InstanceNorm(y.shape[-1:])(y)
y = layers.ReLU()(y + x)
# After second conv2d, (batch * 128 * 128 * 3)
return y
# def instance_norm(x, epsilon=1e-5):
# scale = tf.Variable(
# initial_value=np.random.normal(1.0, 0.02, x.shape[-1:]),
# trainable=True,
# name="SCALE",
# dtype=tf.float32,
# )
# offset = tf.Variable(
# initial_value=np.zeros(x.shape[-1:]),
# trainable=True,
# name="OFFSET",
# dtype=tf.float32,
# )
# mean, variance = tf.nn.moments(x, axes=[1, 2], keepdims=True)
# inv = tf.math.rsqrt(variance + epsilon)
# normalized = (x - mean) * inv
# return scale * normalized + offset
def build_discriminator(options, name="Discriminator"):
initializer = tf.random_normal_initializer(0.0, 0.02)
inputs = Input(shape=(options.time_step, options.pitch_range, options.output_nc))
x = inputs
x = layers.Conv2D(
filters=options.df_dim,
kernel_size=7,
strides=2,
padding="same",
kernel_initializer=initializer,
use_bias=False,
name="CONV2D_1",
)(x)
x = layers.LeakyReLU(alpha=0.2)(x)
# (batch * 32 * 42 * 64)
x = layers.Conv2D(
filters=options.df_dim * 4,
kernel_size=7,
strides=2,
padding="same",
kernel_initializer=initializer,
use_bias=False,
name="CONV2D_2",
)(x)
x = InstanceNorm(x.shape[-1:])(x)
x = layers.LeakyReLU(alpha=0.2)(x)
# (batch * 16 * 21 * 256)
x = layers.Conv2D(
filters=1,
kernel_size=7,
strides=1,
padding="same",
kernel_initializer=initializer,
use_bias=False,
name="CONV2D_3",
)(x)
# (batch * 16 * 21 * 1)
outputs = x
return Model(inputs=inputs, outputs=outputs, name=name)
def build_generator(options, name="Generator"):
initializer = tf.random_normal_initializer(0.0, 0.02)
inputs = Input(shape=(options.time_step, options.pitch_range, options.output_nc))
x = inputs
# (batch * 64 * 84 * 1)
x = layers.Lambda(padding, name="PADDING_1")(x)
# (batch * 70 * 90 * 1)
x = layers.Conv2D(
filters=options.gf_dim,
kernel_size=7,
strides=1,
padding="valid",
kernel_initializer=initializer,
use_bias=False,
name="CONV2D_1",
)(x)
x = InstanceNorm(x.shape[-1:])(x)
x = layers.ReLU()(x)
# (batch * 64 * 84 * 64)
x = layers.Conv2D(
filters=options.gf_dim * 2,
kernel_size=3,
strides=2,
padding="same",
kernel_initializer=initializer,
use_bias=False,
name="CONV2D_2",
)(x)
x = InstanceNorm(x.shape[-1:])(x)
x = layers.ReLU()(x)
# (batch * 32 * 42 * 128)
x = layers.Conv2D(
filters=options.gf_dim * 4,
kernel_size=3,
strides=2,
padding="same",
kernel_initializer=initializer,
use_bias=False,
name="CONV2D_3",
)(x)
x = InstanceNorm(x.shape[-1:])(x)
x = layers.ReLU()(x)
# (batch * 16 * 21 * 256)
for i in range(10):
# x = resnet_block(x, options.gf_dim * 4)
x = ResNetBlock()(x, options.gf_dim * 4, initializer)
# (batch * 16 * 21 * 256)
x = layers.Conv2DTranspose(
filters=options.gf_dim * 2,
kernel_size=3,
strides=2,
padding="same",
kernel_initializer=initializer,
use_bias=False,
name="DECONV2D_1",
)(x)
x = InstanceNorm(x.shape[-1:])(x)
x = layers.ReLU()(x)
# (batch * 32 * 42 * 128)
x = layers.Conv2DTranspose(
filters=options.gf_dim,
kernel_size=3,
strides=2,
padding="same",
kernel_initializer=initializer,
use_bias=False,
name="DECONV2D_2",
)(x)
x = InstanceNorm(x.shape[-1:])(x)
x = layers.ReLU()(x)
# (batch * 64 * 84 * 64)
x = layers.Lambda(padding, name="PADDING_2")(x)
# After padding, (batch * 70 * 90 * 64)
x = layers.Conv2D(
filters=options.output_nc,
kernel_size=7,
strides=1,
padding="valid",
kernel_initializer=initializer,
activation="sigmoid",
use_bias=False,
name="CONV2D_4",
)(x)
# (batch * 64 * 84 * 1)
outputs = x
return Model(inputs=inputs, outputs=outputs, name=name)
def build_discriminator_classifier(options, name="Discriminator_Classifier"):
initializer = tf.random_normal_initializer(0.0, 0.02)
inputs = Input(shape=(options.time_step, options.pitch_range, options.output_nc))
x = inputs
# (batch * 64, 84, 1)
x = layers.Conv2D(
filters=options.df_dim,
kernel_size=[1, 12],
strides=[1, 12],
padding="same",
kernel_initializer=initializer,
use_bias=False,
name="CONV2D_1",
)(x)
x = layers.LeakyReLU(alpha=0.2)(x)
# (batch * 64 * 7 * 64)
x = layers.Conv2D(
filters=options.df_dim * 2,
kernel_size=[4, 1],
strides=[4, 1],
padding="same",
kernel_initializer=initializer,
use_bias=False,
name="CONV2D_2",
)(x)
x = InstanceNorm(x.shape[-1:])(x)
x = layers.LeakyReLU(alpha=0.2)(x)
# (batch * 16 * 7 * 128)
x = layers.Conv2D(
filters=options.df_dim * 4,
kernel_size=[2, 1],
strides=[2, 1],
padding="same",
kernel_initializer=initializer,
use_bias=False,
name="CONV2D_3",
)(x)
x = InstanceNorm(x.shape[-1:])(x)
x = layers.LeakyReLU(alpha=0.2)(x)
# (batch * 8 * 7 * 256)
x = layers.Conv2D(
filters=options.df_dim * 8,
kernel_size=[8, 1],
strides=[8, 1],
padding="same",
kernel_initializer=initializer,
use_bias=False,
name="CONV2D_4",
)(x)
x = InstanceNorm(x.shape[-1:])(x)
x = layers.LeakyReLU(alpha=0.2)(x)
# (batch * 1 * 7 * 512)
x = layers.Conv2D(
filters=2,
kernel_size=[1, 7],
strides=[1, 7],
padding="same",
kernel_initializer=initializer,
use_bias=False,
name="CONV2D_5",
)(x)
# (batch * 1 * 1 * 2)
x = tf.reshape(x, [-1, 2])
# (batch * 2)
outputs = x
return Model(inputs=inputs, outputs=outputs, name=name)
if __name__ == "__main__":
OPTIONS = namedtuple(
"OPTIONS",
"batch_size "
"time_step "
"input_nc "
"output_nc "
"pitch_range "
"gf_dim "
"df_dim ",
)
options = OPTIONS._make((128, 64, 1, 1, 84, 64, 64))
model = build_generator(options)
print(model.summary())
| 25.498638 | 85 | 0.556315 | 2,093 | 0.223659 | 0 | 0 | 0 | 0 | 0 | 0 | 1,762 | 0.188288 |
042bf56fcdd938bcd7ece04e0e049f1f49f9d9ac | 4,910 | py | Python | app.py | chaipi-chaya/Boston-Crime-Analysis | ffb5065d594f372c73eb830117bbff78ffd91c06 | [
"MIT"
] | 2 | 2020-01-02T15:28:17.000Z | 2020-02-04T23:35:58.000Z | app.py | chaipi-chaya/Boston-Crime-Analysis | ffb5065d594f372c73eb830117bbff78ffd91c06 | [
"MIT"
] | null | null | null | app.py | chaipi-chaya/Boston-Crime-Analysis | ffb5065d594f372c73eb830117bbff78ffd91c06 | [
"MIT"
] | null | null | null | from flask import Flask
from flask import render_template
from flask import request,session, redirect, url_for, escape,send_from_directory
import requests
import json
app = Flask(__name__, static_url_path='')
def predictor(tavg, model, degree):
if degree == 3:
y = model['coef'][0][3]*(tavg**3) + \
model['coef'][0][2]*(tavg**2) + \
model['coef'][0][1]*(tavg) + \
model['intercept'][0]
if degree == 4:
y = model['coef'][0][4]*(tavg**4) + \
model['coef'][0][3]*(tavg**3) + \
model['coef'][0][2]*(tavg**2) + \
model['coef'][0][1]*(tavg) + \
model['intercept'][0]
return round(y,0)
@app.route("/", methods=['GET','POST'])
def start():
return render_template('index.html')
@app.route("/code_explanation", methods=['GET','POST'])
def explanation():
return render_template('code_explanation.html')
@app.route("/web_scraping", methods=['GET','POST'])
def scraping():
return render_template('web_scraping.html')
@app.route("/preprocessing", methods=['GET','POST'])
def preprocessing():
return render_template('data_preprocess.html')
@app.route("/analyzing", methods=['GET','POST'])
def analyzing():
return render_template('data_analysis_overall.html')
@app.route("/regression", methods=['GET','POST'])
def regression():
return render_template('regression_analysis.html')
@app.route("/pattern", methods=['GET','POST'])
def pattern():
return render_template('crime_pattern_analysis.html')
@app.route("/short_patrol", methods=['GET','POST'])
def short_patrol():
return render_template('shortest_patrol_route.html')
@app.route("/markov_chain_demo", methods=['GET','POST'])
def mcdemo():
file = open("model/MC_and_lamda.txt","r")
data = json.loads(file.read())
return render_template('markov_chain_demo.html', data = data)
@app.route("/regression_api", methods=['GET','POST'])
def regression_api():
return render_template('regression_api.html')
@app.route("/predict_crime_by_temp", methods=['GET','POST'])
def perdict():
# load model
temp = request.args.get('temp')
if temp == None:
return "error: no input"
file = open("model/regression.txt","r")
models = json.loads(file.read())
if float(temp) >= 25:
degree = 4
model = models['regressorpoly4']
else :
degree = 3
model = models['regressorpoly325']
return str(predictor(float(temp), model, degree))
@app.route("/crime_forecast", methods=['GET','POST'])
def forecast():
r = requests.get('https://weather.com/weather/tenday/l/Boston+MA?canonicalCityId=6320cadd3d539b434b5a45c094becf3edbe8ea88958185a2287a801115c9ae30')
lines = r.text.split('\n')
conditions = []
n = 0
for line in lines:
if '<td class="temp" headers="hi-lo"' in line:
n += 1
if n == 1:
continue
condition = {}
if len(line.split("</tr>")) == 2:
condition['day'] = line.split('<span class="day-detail')[1].split("</span>")[0].split('>')[1]
hi = line.split('<td class="temp" headers="hi-lo"')[1].split("</sup>")[0].split('<span class="">')[1].split('<sup>')[0]
low = line.split('<td class="temp" headers="hi-lo"')[1].split("</sup>")[1].split('<span class="">')[1].split('<sup>')[0]
condition['temp'] = (float(hi)+float(low))/2
conditions.append(condition)
else :
tr = line.split("</tr>")
for i in range(len(tr)):
condition = {}
if i < len(tr) - 1:
condition['day'] = tr[i].split('<span class="day-detail')[1].split("</span>")[0].split('>')[1]
td = tr[i].split("hi-lo")
hi = td[1].split("</sup>")[0].split('<span class="">')[1].split('<sup>')[0]
low = td[1].split("</sup>")[1].split('<span class="">')[1].split('<sup>')[0]
condition['temp'] = (float(hi)+float(low))/2
conditions.append(condition)
crimes = []
temps = []
days = []
file = open("model/regression.txt","r")
models = json.loads(file.read())
for i in conditions:
if float(i['temp']) >= 25:
degree = 4
model = models['regressorpoly4']
else :
degree = 3
model = models['regressorpoly325']
days.append(i['day'])
temps.append(i['temp'])
crimes.append(float(predictor(float(i['temp']), model, degree)))
return render_template('forecast.html', days = days, temps = temps, crimes = crimes)
import webbrowser
webbrowser.open_new_tab("http://localhost:5000/")
if __name__ == "__main__":
app.run(host= '127.0.0.1',debug=False)
| 35.323741 | 151 | 0.563951 | 0 | 0 | 0 | 0 | 3,997 | 0.814053 | 0 | 0 | 1,354 | 0.275764 |