hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fe2e12189f5c7bd5c301d8cd6a29b000ff6951
| 4,352
|
py
|
Python
|
origin_check.py
|
mikispag/OriginCheck
|
b3bda26c382cdbfd78bddc11d99d6e8723255599
|
[
"MIT"
] | 1
|
2020-08-19T06:53:24.000Z
|
2020-08-19T06:53:24.000Z
|
origin_check.py
|
mikispag/OriginCheck
|
b3bda26c382cdbfd78bddc11d99d6e8723255599
|
[
"MIT"
] | null | null | null |
origin_check.py
|
mikispag/OriginCheck
|
b3bda26c382cdbfd78bddc11d99d6e8723255599
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import concurrent.futures
import logging
import requests
from sys import argv, exit
from urllib.parse import urlparse
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
HEADERS = {
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.30 Safari/537.36'
}
MIN_RESPONSE_LENGTH = 100
NUM_WORKERS = 50
urls = []
if len(argv) < 2:
exit("Please specify a URLs file.")
with open(argv[1]) as f:
urls = [line.rstrip() for line in f]
def check(url):
# Issue a GET request
r = requests.get(url, timeout=5, allow_redirects=False, headers=HEADERS)
response_size = len(r.text)
if r.status_code != 200 or response_size < MIN_RESPONSE_LENGTH:
logging.debug("Ignoring %s: response %d, response size %d.",
url, r.status_code, response_size)
return None
# Issue a second request to check for stability (200 + same response size)
r = requests.get(url, timeout=5, allow_redirects=False, headers=HEADERS)
if r.status_code != 200 or response_size != len(r.text):
logging.debug("URL %s is unstable.", url)
return None
logging.info("URL %s is stable.", url)
# If the URL is stable, try adding a same-origin Origin header
parsed_url = urlparse(r.url)
origin = parsed_url.scheme + '://' + parsed_url.netloc
logging.debug('Sending same-origin Origin %s for %s...', origin, url)
result = {
'url': url,
'SAMEORIGIN_OK': False,
'CROSSORIGIN_OK': False,
'SAMEORIGIN_KO_STATUS': False,
'SAMEORIGIN_KO_RESPONSE': False,
'CROSSORIGIN_KO_STATUS': False,
'CROSSORIGIN_KO_RESPONSE': False
}
r = requests.get(url, timeout=5, allow_redirects=False,
headers={**HEADERS, **{'Origin': origin}})
if r.status_code != 200:
logging.info(
"[SAME ORIGIN] URL %s changed status code to %d.", url, r.status_code)
result['SAMEORIGIN_KO_STATUS'] = r.status_code
return result
if response_size != len(r.text):
logging.info(
"[SAME ORIGIN] URL %s changed response size to %d.", url, len(r.text))
result['SAMEORIGIN_KO_RESPONSE'] = True
return result
result['SAMEORIGIN_OK'] = True
# If same-origin Origin header is OK, try a cross-origin one.
logging.debug('Sending cross-origin Origin for URL %s.', url)
r = requests.get(url, timeout=5, allow_redirects=False, headers={
**HEADERS, **{'Origin': 'https://example.org'}})
if r.status_code != 200:
logging.info(
"[CROSS ORIGIN] URL %s changed status code to %d.", url, r.status_code)
result['CROSSORIGIN_KO_STATUS'] = r.status_code
return result
if response_size != len(r.text):
logging.info(
"[CROSS ORIGIN] URL %s changed response size to %d.", url, len(r.text))
result['CROSSORIGIN_KO_RESPONSE'] = True
return result
result['CROSSORIGIN_OK'] = True
return result
with open('results.csv', 'w') as w:
print('url,SAMEORIGIN_OK,CROSSORIGIN_OK,SAMEORIGIN_KO_STATUS,SAMEORIGIN_KO_RESPONSE,CROSSORIGIN_KO_STATUS,CROSSORIGIN_KO_RESPONSE', file=w)
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
future_to_result = {executor.submit(check, url): url for url in urls}
for future in concurrent.futures.as_completed(future_to_result):
try:
result = future.result()
except:
continue
else:
if result:
print('{},{},{},{},{},{},{}'.format(result['url'],
int(result['SAMEORIGIN_OK']),
int(result['CROSSORIGIN_OK']),
int(result['SAMEORIGIN_KO_STATUS']),
int(result['SAMEORIGIN_KO_RESPONSE']),
int(result['CROSSORIGIN_KO_STATUS']),
int(result['CROSSORIGIN_KO_RESPONSE'])
), file=w)
| 39.563636
| 143
| 0.584789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,361
| 0.31273
|
f7ff07662b3e96ced8491b8279428f96107213e1
| 743
|
py
|
Python
|
orange3/Orange/preprocess/setup.py
|
rgschmitz1/BioDepot-workflow-builder
|
f74d904eeaf91ec52ec9b703d9fb38e9064e5a66
|
[
"MIT"
] | 54
|
2017-01-08T17:21:49.000Z
|
2021-11-02T08:46:07.000Z
|
orange3/Orange/preprocess/setup.py
|
Synthia-3/BioDepot-workflow-builder
|
4ee93abe2d79465755e82a145af3b6a6e1e79fd4
|
[
"MIT"
] | 22
|
2017-03-28T06:03:14.000Z
|
2021-07-28T05:43:55.000Z
|
orange3/Orange/preprocess/setup.py
|
Synthia-3/BioDepot-workflow-builder
|
4ee93abe2d79465755e82a145af3b6a6e1e79fd4
|
[
"MIT"
] | 21
|
2017-01-26T21:12:09.000Z
|
2022-01-31T21:34:59.000Z
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD Style.
import os
import numpy
def configuration(parent_package="", top_path=None):
from numpy.distutils.misc_util import Configuration
libraries = []
if os.name == "posix":
libraries.append("m")
config = Configuration("preprocess", parent_package, top_path)
for source in ("_discretize.c", "_relieff.cpp"):
config.add_extension(
source.rsplit(".", 1)[0],
sources=[source],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration(top_path="").todict())
| 24.766667
| 66
| 0.644684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 147
| 0.197847
|
f7ff646590489831f35fa9fe7ca9c0fe9f2f76be
| 592
|
py
|
Python
|
ProjectEuler_plus/euler_042.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
ProjectEuler_plus/euler_042.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
ProjectEuler_plus/euler_042.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
from math import sqrt
# (n * (n + 1)) / 2 -> n ** 2 + n - (2 * x)
# Solved with quadratic equation
# https://en.wikipedia.org/wiki/Quadratic_equation
for _ in range(int(input().strip())):
t = int(input().strip())
d = (sqrt(4 * 2 * t + 1) - 1)
if d.is_integer():
print(int(d) // 2)
else:
print(-1)
def e42():
for _ in range(int(input().strip())):
n = int(input().strip())
root = int(sqrt(2 * n))
if (root * (root + 1)) // 2 == n:
print(root)
else:
print(-1)
| 21.925926
| 52
| 0.489865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 151
| 0.255068
|
7900515320c3b3319c03f61841dc3f24a082e7f3
| 12,476
|
py
|
Python
|
src/lpb.py
|
RobbinBouwmeester/LIT
|
0516a69fbf1b8e9976524e0c243f82de041df544
|
[
"Apache-2.0"
] | null | null | null |
src/lpb.py
|
RobbinBouwmeester/LIT
|
0516a69fbf1b8e9976524e0c243f82de041df544
|
[
"Apache-2.0"
] | null | null | null |
src/lpb.py
|
RobbinBouwmeester/LIT
|
0516a69fbf1b8e9976524e0c243f82de041df544
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2017 Robbin Bouwmeester
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE."""
__author__ = "Robbin Bouwmeester"
__copyright__ = "Copyright 2017"
__credits__ = ["Robbin Bouwmeester"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Robbin Bouwmeester"
__email__ = "Robbin.bouwmeester@ugent.be"
__status__ = "nightly funzies"
import pandas as pd
from itertools import groupby
import logging
class LipidBLAST_entry():
def __init__(self,
name="",
ion="",
mw=0.0,
chem_form="",
num_ms2_peaks=0,
f_acyl_lengths=[],
unsats=[],
ms2=[]):
self.name = name
self.ion = ion
self.mw = mw
self.chem_form = chem_form
self.num_ms2_peaks = num_ms2_peaks
self.ms2 = ms2
self.f_acyl_lengths = f_acyl_lengths
self.unsats = unsats
def __str__(self):
ret_string = []
ret_string.append("================")
ret_string.append("")
ret_string.append("Lipid: %s" % (self.name))
ret_string.append("MW: %s" % (self.mw))
ret_string.append("Formula: %s" % (self.chem_form))
ret_string.append ("")
for f in self.ms2:
ret_string.append("%s\t%s\t%s" % (f[0],f[1],f[2]))
ret_string.append("")
ret_string.append("================")
return("\n".join(ret_string))
class LipidBLAST():
def __init__(self,
f_names=["LipidBlast-pos.msp","LipidBlast-neg.msp"],
min_acyl_length=10,
exclude_lyso=False,
include_ions=["[M-H]-"], #,"[M+]","[M+H]+","[M+NH4]+","[M-H]-","[M-2H](2-)","[M-Ac-H]-","[M+Na2-H]+","[M+]","[M+NH4]+","[M+Na]+","[M-2H](2-)","[M-Ac-H]-" "[M+]","[M+H]+","[M+NH4]+","[M-H]-","[M-2H](2-)","[M-Ac-H]-","[M+Na2-H]+","[M+]","[M+NH4]+","[M+Na]+","[M-2H](2-)","[M-Ac-H]-"
include_class=["PE","GPSer","GPCho","PC","GPA","PE","GPIns","GPEtn","GPGro"], #,"SM","TG","CL", #,"SM","TG","CL","GPSer","GPCho","PC","GPA","PE","GPIns","GPEtn","GPGro
aggregate_acyls=False,
use_simplified_names=True,
dalt_diff_lookup_bin=1):
self.f_names = f_names
self.min_acyl_length = min_acyl_length
self.exclude_lyso = exclude_lyso
self.include_ions = include_ions
self.include_class = include_class
self.use_simplified_names = use_simplified_names
self.dalt_diff_lookup_bin = dalt_diff_lookup_bin
self.aggregate_acyls = aggregate_acyls
self.lpb_dict = {}
self.ms1_dict = {}
self.ms1_dict_lookup = {}
self.tot_entr_read = 0
if len(self.f_names) > 0:
for f_name in f_names:
self.read_lpb(f_name)
def __str__(self):
ret_string = []
ret_string.append("Filenames: %s" % (self.f_names))
ret_string.append("Min acyl length: %s" % (self.min_acyl_length))
ret_string.append("Exclude lyso: %s" % (self.exclude_lyso))
ret_string.append("Include ions: %s" % (self.include_ions))
ret_string.append("Include lipid classes: %s" % (self.include_class))
ret_string.append("Use simplified names: %s" % (self.use_simplified_names))
ret_string.append("Lookup diff: %s Da" % (self.dalt_diff_lookup_bin))
ret_string.append("Total entries read: %s" % (self.tot_entr_read))
return("\n".join(ret_string))
def read_lpb(self,f_name):
def _get_general_info(name):
# Currently limited to max 9 unsats
unsats = [n[0] for n in name.split(":")[1:]]
class_name = name.split("(")[0]
if "-" in class_name:
name_split = name.split("(")
name_split[0] = name.split("(")[0].replace("-","")
name = "(".join(name_split)
acyl_lengths = name.split(":")
acyl_lengths.pop()
f_acyl_lengths = []
for acl in acyl_lengths:
try:
if "/" in acl:
f_acyl_lengths.append(acl.split("/")[1].replace("d","").replace("methyl-",""))
elif "-" in acl:
f_acyl_lengths.append(acl.split("-")[1].replace("d","").replace("methyl-",""))
else:
f_acyl_lengths.append(acl.split("(")[1].replace("d","").replace("methyl-",""))
except:
logging.warning("Could not format to get acyl lengths: %s" % (name))
return([0],[0],"")
try:
f_acyl_lengths = list(map(int,f_acyl_lengths))
unsats = list(map(int,unsats))
except:
logging.warning("Could not format to get acyl lengths: %s" % (name))
return([0],[0],"")
return(f_acyl_lengths,unsats,class_name)
def _simplify_name(class_name,acyls,unsats):
simplified_name = ""
simplified_name += class_name
simplified_name += "("
if not self.aggregate_acyls:
for f,u in zip(f_acyl_lengths,unsats):
simplified_name += str(f)
simplified_name += ":"
simplified_name += str(u)
simplified_name += "/"
simplified_name = simplified_name[:-1]
else:
simplified_name += str(sum(f_acyl_lengths))
simplified_name += ":"
simplified_name += str(sum(unsats))
simplified_name += ")"
return(simplified_name)
def _get_chem_form(chem_form_native,ion):
chem_form_ion = ""
for i,c in enumerate(chem_form_native):
if i+1 >= len(chem_form_native):
if c.isdigit(): chem_form_ion += c
else:
chem_form_ion += c
chem_form_ion += "1"
elif c.isdigit(): chem_form_ion += c
elif c.isupper() and chem_form_native[i+1].isdigit(): chem_form_ion += c
elif c.isupper() and chem_form_native[i+1].isupper():
chem_form_ion += c
chem_form_ion += "1"
elif chem_form_native[i+1].isdigit(): chem_form_ion += c
list_chem= [''.join(g) for _, g in groupby(chem_form_ion, str.isalpha)]
chem_form_ion = dict(zip(list_chem[::2],map(int,list_chem[1::2])))
if "+" not in ion:
if "[M-H]-" in ion:
try: chem_form_ion["H"] -= 1
except KeyError: logging.critical("ERROR: could not subtract atom when getting the ionized form from the molecule")
if "[M-2H](2-)" in ion:
try: chem_form_ion["H"] -= 2
except KeyError: logging.critical("ERROR: could not subtract atom when getting the ionized form from the molecule")
if "[M-Ac-H]-" in ion:
try:
chem_form_ion["C"] += 2
chem_form_ion["H"] += 3
chem_form_ion["O"] += 2
except KeyError: logging.critical("ERROR: could not subtract atom when getting the ionized form from the molecule")
else:
if "[M+H]+" in ion:
try: chem_form_ion["H"] += 1
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
if "[M+NH4]+" in ion:
try:
if chem_form_ion.has_key("N"): chem_form_ion["N"] += 1
else: chem_form_ion["N"] = 1
chem_form_ion["H"] += 4
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
if "[M+Na]+" in ion:
try:
if chem_form_ion.has_key("Na"): chem_form_ion["Na"] += 1
else: chem_form_ion["Na"] = 1
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
if "[M+Na2-H]+" in ion:
try:
if chem_form_ion.has_key("Na"): chem_form_ion["Na"] += 2
else: chem_form_ion["Na"] = 2
chem_form_ion["H"] -= 1
except KeyError: logging.critical("ERROR: could not add atom when getting the ionized form from the molecule")
return("".join([atom+str(num_atom) for atom,num_atom in sorted(chem_form_ion.items())]))
with open(f_name) as infile:
fragments = []
pre_c_mass = 0.0
name = ""
ion = ""
for line in infile:
line = line.strip()
#print(line)
if len(line) == 0:
f_acyl_lengths,unsats,class_name = _get_general_info(name)
f_acyl_lengths_error = [a for a in f_acyl_lengths if a < self.min_acyl_length and a != 0]
if (len(class_name) == 0) or \
(ion_type not in self.include_ions) or \
(len([c for c in self.include_class if c in name]) == 0) or \
(self.exclude_lyso and "/0:0" in name) or \
(len(f_acyl_lengths_error) > 0):
fragments = []
pre_c_mass = 0.0
name = ""
ion_type = ""
continue
simplified_name = _simplify_name(class_name,f_acyl_lengths,unsats)
new_entry = LipidBLAST_entry(name=name,
ion=ion_type,
mw=pre_c_mass,
chem_form=chem_form_ion,
num_ms2_peaks=num_peaks,
ms2=fragments,
f_acyl_lengths=f_acyl_lengths,
unsats=unsats)
self.lpb_dict["%s|%s" % (simplified_name,ion_type)] = new_entry
loc_dict = int(pre_c_mass) - int(pre_c_mass) % self.dalt_diff_lookup_bin
if loc_dict in self.ms1_dict_lookup.keys():
self.ms1_dict_lookup[loc_dict]["%s|%s" % (simplified_name,ion_type)] = new_entry
else:
self.ms1_dict_lookup[loc_dict] = {}
self.ms1_dict_lookup[loc_dict]["%s|%s" % (simplified_name,ion_type)] = new_entry
self.tot_entr_read += 1
fragments = []
pre_c_mass = 0.0
name = ""
ion_type = ""
elif ":" in line:
if line.startswith("PRECURSORMZ"):
pre_c_mass = float(line.split(": ")[1])
if line.startswith("Name: "):
name = line.split("; ")[-1]
ion_type = line.split("; ")[1]
if line.startswith("Comment: "):
# Some of the chemical formulas contain a ";" at the end; remove
chem_form_native = line.split("; ")[-1].replace(";","")
#print(chem_form_native)
chem_form_ion = _get_chem_form(chem_form_native,ion_type)
if line.startswith("Num Peaks:"):
num_peaks = int(line.split(": ")[-1])
else:
if line=="\x1a": #EOF
continue
fragments.append([float(line.split(" ")[0]),float(line.split(" ")[1]),line.split(" ")[2].replace("\"","")])
class PrecursorFilter():
def __init__(self,db,ppm=10):
self.db = db
self.ppm = ppm
def retrieve_entry_pre_c_mass(self,pre_c_mass):
mass_error_threshold = (pre_c_mass*self.ppm)/1000000
ret_entries = []
loc_dict = int(pre_c_mass) - int(pre_c_mass) % self.db.dalt_diff_lookup_bin
loc_dict_lower = (int(pre_c_mass-mass_error_threshold)) - (int(pre_c_mass-mass_error_threshold)) % self.db.dalt_diff_lookup_bin
loc_dict_upper = (int(pre_c_mass+mass_error_threshold)) - (int(pre_c_mass+mass_error_threshold)) % self.db.dalt_diff_lookup_bin
# TODO set does not have to be list
locs_to_search = list(set([loc_dict,loc_dict_lower,loc_dict_upper]))
for loc in locs_to_search:
try:
for name,entr in self.db.ms1_dict_lookup[loc].items():
mass_error = abs(entr.mw-pre_c_mass)
if mass_error < mass_error_threshold:
ret_entries.append([name,mass_error,entr])
except KeyError:
logging.warning("Could not find an entry in the database for prec mass: %s" % (pre_c_mass))
continue
return(ret_entries)
if __name__ == "__main__":
logging.basicConfig(filename="prec_filter.log",
level=logging.DEBUG,
filemode="w",
format="%(levelname)s:%(created)f:%(asctime)s:%(message)s")
logging.info("Reading the LPB database ...")
lpb = LipidBLAST()
logging.info("Done reading the LPB database ...")
logging.info(lpb)
step_three_df = pd.read_csv("stepone_new.csv")
precf = Precursor_filter(lpb)
prec_filt_result = []
for index,row in step_three_df.iterrows():
if (index % 10000==0):
logging.info("Analyzing row number and m/z: %s - %s" % (index,row["mz"]))
prec_hits = precf.retrieve_entry_pre_c_mass(row["mz"])
for hit in prec_hits:
prec_filt_result.append([row["mz"],hit[2].mw,hit[1],hit[0].split("|")[0],hit[2].chem_form,hit[0].split("|")[1]])
prec_filt_result = pd.DataFrame(prec_filt_result)
prec_filt_result.columns = ["Input Mass","Matched Mass","Delta","Abbreviation","Formula","Ion"]
prec_filt_result.to_excel("batch_results.xlsx",index=False)
| 36.162319
| 303
| 0.655579
| 10,074
| 0.80747
| 0
| 0
| 0
| 0
| 0
| 0
| 3,499
| 0.280458
|
79016946767147d0fbaeddece8c5f2511d1e6b1d
| 178
|
py
|
Python
|
floris/tools/optimization/scipy/__init__.py
|
eirikur16/flrs
|
c98604593753def05086b54ce82f5551f01d2529
|
[
"Apache-2.0"
] | 91
|
2019-06-04T08:56:29.000Z
|
2022-03-13T17:39:22.000Z
|
floris/tools/optimization/scipy/__init__.py
|
eirikur16/flrs
|
c98604593753def05086b54ce82f5551f01d2529
|
[
"Apache-2.0"
] | 224
|
2019-04-08T22:03:45.000Z
|
2022-03-31T17:56:09.000Z
|
floris/tools/optimization/scipy/__init__.py
|
eirikur16/flrs
|
c98604593753def05086b54ce82f5551f01d2529
|
[
"Apache-2.0"
] | 97
|
2019-04-23T20:48:20.000Z
|
2022-03-29T08:17:02.000Z
|
from . import (
yaw,
layout,
base_COE,
optimization,
layout_height,
power_density,
yaw_wind_rose,
power_density_1D,
yaw_wind_rose_parallel,
)
| 14.833333
| 27
| 0.651685
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
790266e9a7bcf554bd70851b9a13216ab9f797e3
| 11,530
|
py
|
Python
|
src/gdata/spreadsheets/data.py
|
Cloudlock/gdata-python3
|
a6481a13590bfa225f91a97b2185cca9aacd1403
|
[
"Apache-2.0"
] | 19
|
2017-06-09T13:38:03.000Z
|
2020-12-12T07:45:48.000Z
|
src/gdata/spreadsheets/data.py
|
AlexxIT/gdata-python3
|
5cc5a83a469d87f804d1fda8760ec76bcb6050c9
|
[
"Apache-1.1"
] | 11
|
2017-07-22T07:09:54.000Z
|
2020-12-02T15:08:48.000Z
|
src/gdata/spreadsheets/data.py
|
AlexxIT/gdata-python3
|
5cc5a83a469d87f804d1fda8760ec76bcb6050c9
|
[
"Apache-1.1"
] | 25
|
2017-07-03T11:30:39.000Z
|
2020-10-01T02:21:13.000Z
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License 2.0;
# This module is used for version 2 of the Google Data APIs.
"""Provides classes and constants for the XML in the Google Spreadsheets API.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements
"""
# __author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import gdata.data
GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s'
GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended'
INSERT_MODE = 'insert'
OVERWRITE_MODE = 'overwrite'
WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed'
BATCH_POST_ID_TEMPLATE = ('https://spreadsheets.google.com/feeds/cells'
'/%s/%s/private/full')
BATCH_ENTRY_ID_TEMPLATE = '%s/R%sC%s'
BATCH_EDIT_LINK_TEMPLATE = '%s/batch'
class Error(Exception):
pass
class FieldMissing(Exception):
pass
class HeaderNotSet(Error):
"""The desired column header had no value for the row in the list feed."""
class Cell(atom.core.XmlElement):
"""The gs:cell element.
A cell in the worksheet. The <gs:cell> element can appear only as a child
of <atom:entry>.
"""
_qname = GS_TEMPLATE % 'cell'
col = 'col'
input_value = 'inputValue'
numeric_value = 'numericValue'
row = 'row'
class ColCount(atom.core.XmlElement):
"""The gs:colCount element.
Indicates the number of columns in the worksheet, including columns that
contain only empty cells. The <gs:colCount> element can appear as a child
of <atom:entry> or <atom:feed>
"""
_qname = GS_TEMPLATE % 'colCount'
class Field(atom.core.XmlElement):
"""The gs:field element.
A field single cell within a record. Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'field'
index = 'index'
name = 'name'
class Column(Field):
"""The gs:column element."""
_qname = GS_TEMPLATE % 'column'
class Data(atom.core.XmlElement):
"""The gs:data element.
A data region of a table. Contained in an <atom:entry> element.
"""
_qname = GS_TEMPLATE % 'data'
column = [Column]
insertion_mode = 'insertionMode'
num_rows = 'numRows'
start_row = 'startRow'
class Header(atom.core.XmlElement):
"""The gs:header element.
Indicates which row is the header row. Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'header'
row = 'row'
class RowCount(atom.core.XmlElement):
"""The gs:rowCount element.
Indicates the number of total rows in the worksheet, including rows that
contain only empty cells. The <gs:rowCount> element can appear as a
child of <atom:entry> or <atom:feed>.
"""
_qname = GS_TEMPLATE % 'rowCount'
class Worksheet(atom.core.XmlElement):
"""The gs:worksheet element.
The worksheet where the table lives.Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'worksheet'
name = 'name'
class Spreadsheet(gdata.data.GDEntry):
"""An Atom entry which represents a Google Spreadsheet."""
def find_worksheets_feed(self):
return self.find_url(WORKSHEETS_REL)
FindWorksheetsFeed = find_worksheets_feed
def get_spreadsheet_key(self):
"""Extracts the spreadsheet key unique to this spreadsheet."""
return self.get_id().split('/')[-1]
GetSpreadsheetKey = get_spreadsheet_key
class SpreadsheetsFeed(gdata.data.GDFeed):
"""An Atom feed listing a user's Google Spreadsheets."""
entry = [Spreadsheet]
class WorksheetEntry(gdata.data.GDEntry):
"""An Atom entry representing a single worksheet in a spreadsheet."""
row_count = RowCount
col_count = ColCount
def get_worksheet_id(self):
"""The worksheet ID identifies this worksheet in its spreadsheet."""
return self.get_id().split('/')[-1]
GetWorksheetId = get_worksheet_id
class WorksheetsFeed(gdata.data.GDFeed):
"""A feed containing the worksheets in a single spreadsheet."""
entry = [WorksheetEntry]
class Table(gdata.data.GDEntry):
"""An Atom entry that represents a subsection of a worksheet.
A table allows you to treat part or all of a worksheet somewhat like a
table in a database that is, as a set of structured data items. Tables
don't exist until you explicitly create them before you can use a table
feed, you have to explicitly define where the table data comes from.
"""
data = Data
header = Header
worksheet = Worksheet
def get_table_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
GetTableId = get_table_id
class TablesFeed(gdata.data.GDFeed):
"""An Atom feed containing the tables defined within a worksheet."""
entry = [Table]
class Record(gdata.data.GDEntry):
"""An Atom entry representing a single record in a table.
Note that the order of items in each record is the same as the order of
columns in the table definition, which may not match the order of
columns in the GUI.
"""
field = [Field]
def value_for_index(self, column_index):
for field in self.field:
if field.index == column_index:
return field.text
raise FieldMissing('There is no field for %s' % column_index)
ValueForIndex = value_for_index
def value_for_name(self, name):
for field in self.field:
if field.name == name:
return field.text
raise FieldMissing('There is no field for %s' % name)
ValueForName = value_for_name
def get_record_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
class RecordsFeed(gdata.data.GDFeed):
"""An Atom feed containing the individuals records in a table."""
entry = [Record]
class ListRow(atom.core.XmlElement):
"""A gsx column value within a row.
The local tag in the _qname is blank and must be set to the column
name. For example, when adding to a ListEntry, do:
col_value = ListRow(text='something')
col_value._qname = col_value._qname % 'mycolumnname'
"""
_qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s'
class ListEntry(gdata.data.GDEntry):
"""An Atom entry representing a worksheet row in the list feed.
The values for a particular column can be get and set using
x.get_value('columnheader') and x.set_value('columnheader', 'value').
See also the explanation of column names in the ListFeed class.
"""
def get_value(self, column_name):
"""Returns the displayed text for the desired column in this row.
The formula or input which generated the displayed value is not accessible
through the list feed, to see the user's input, use the cells feed.
If a column is not present in this spreadsheet, or there is no value
for a column in this row, this method will return None.
"""
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) == 0:
return None
return values[0].text
def set_value(self, column_name, value):
"""Changes the value of cell in this row under the desired column name.
Warning: if the cell contained a formula, it will be wiped out by setting
the value using the list feed since the list feed only works with
displayed values.
No client side checking is performed on the column_name, you need to
ensure that the column_name is the local tag name in the gsx tag for the
column. For example, the column_name will not contain special characters,
spaces, uppercase letters, etc.
"""
# Try to find the column in this row to change an existing value.
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) > 0:
values[0].text = value
else:
# There is no value in this row for the desired column, so add a new
# gsx:column_name element.
new_value = ListRow(text=value)
new_value._qname = new_value._qname % (column_name,)
self._other_elements.append(new_value)
def to_dict(self):
"""Converts this row to a mapping of column names to their values."""
result = {}
values = self.get_elements(namespace=GSX_NAMESPACE)
for item in values:
result[item._get_tag()] = item.text
return result
def from_dict(self, values):
"""Sets values for this row from the dictionary.
Old values which are already in the entry will not be removed unless
they are overwritten with new values from the dict.
"""
for column, value in values.items():
self.set_value(column, value)
class ListsFeed(gdata.data.GDFeed):
"""An Atom feed in which each entry represents a row in a worksheet.
The first row in the worksheet is used as the column names for the values
in each row. If a header cell is empty, then a unique column ID is used
for the gsx element name.
Spaces in a column name are removed from the name of the corresponding
gsx element.
Caution: The columnNames are case-insensitive. For example, if you see
a <gsx:e-mail> element in a feed, you can't know whether the column
heading in the original worksheet was "e-mail" or "E-Mail".
Note: If two or more columns have the same name, then subsequent columns
of the same name have _n appended to the columnName. For example, if the
first column name is "e-mail", followed by columns named "E-Mail" and
"E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and
gsx:e-mail_3 respectively.
"""
entry = [ListEntry]
class CellEntry(gdata.data.BatchEntry):
"""An Atom entry representing a single cell in a worksheet."""
cell = Cell
class CellsFeed(gdata.data.BatchFeed):
"""An Atom feed contains one entry per cell in a worksheet.
The cell feed supports batch operations, you can send multiple cell
operations in one HTTP request.
"""
entry = [CellEntry]
def add_set_cell(self, row, col, input_value):
"""Adds a request to change the contents of a cell to this batch request.
Args:
row: int, The row number for this cell. Numbering starts at 1.
col: int, The column number for this cell. Starts at 1.
input_value: str, The desired formula/content this cell should contain.
"""
self.add_update(CellEntry(
id=atom.data.Id(text=BATCH_ENTRY_ID_TEMPLATE % (
self.id.text, row, col)),
cell=Cell(col=str(col), row=str(row), input_value=input_value)))
return self
AddSetCell = add_set_cell
def build_batch_cells_update(spreadsheet_key, worksheet_id):
"""Creates an empty cells feed for adding batch cell updates to.
Call batch_set_cell on the resulting CellsFeed instance then send the batch
request TODO: fill in
Args:
spreadsheet_key: The ID of the spreadsheet
worksheet_id:
"""
feed_id_text = BATCH_POST_ID_TEMPLATE % (spreadsheet_key, worksheet_id)
return CellsFeed(
id=atom.data.Id(text=feed_id_text),
link=[atom.data.Link(
rel='edit', href=BATCH_EDIT_LINK_TEMPLATE % (feed_id_text,))])
BuildBatchCellsUpdate = build_batch_cells_update
| 31.162162
| 82
| 0.674761
| 9,881
| 0.856982
| 0
| 0
| 0
| 0
| 0
| 0
| 6,827
| 0.592108
|
79028a174225260b671df8c8ac4560369e16c2c8
| 710
|
py
|
Python
|
tests/test_issues/test_member_example.py
|
hsolbrig/pyjsg
|
5ef46d9af6a94a0cd0e91ebf8b22f61c17e78429
|
[
"CC0-1.0"
] | 3
|
2017-07-23T11:11:23.000Z
|
2020-11-30T15:36:51.000Z
|
tests/test_issues/test_member_example.py
|
hsolbrig/pyjsg
|
5ef46d9af6a94a0cd0e91ebf8b22f61c17e78429
|
[
"CC0-1.0"
] | 15
|
2018-01-05T17:18:34.000Z
|
2021-12-13T17:40:25.000Z
|
tests/test_issues/test_member_example.py
|
hsolbrig/pyjsg
|
5ef46d9af6a94a0cd0e91ebf8b22f61c17e78429
|
[
"CC0-1.0"
] | null | null | null |
import unittest
from pyjsg.validate_json import JSGPython
class MemberExampleTestCase(unittest.TestCase):
def test1(self):
x = JSGPython('''doc {
last_name : @string, # exactly one last name of type string
first_name : @string+ # array or one or more first names
age : @int?, # optional age of type int
weight : @number* # array of zero or more weights
}
''')
rslts = x.conforms('''
{ "last_name" : "snooter",
"first_name" : ["grunt", "peter"],
"weight" : []
}''')
self.assertTrue(rslts.success)
if __name__ == '__main__':
unittest.main()
| 28.4
| 77
| 0.533803
| 599
| 0.843662
| 0
| 0
| 0
| 0
| 0
| 0
| 449
| 0.632394
|
7902cca06e3a841cee96255c053ca834cc5022f5
| 7,223
|
py
|
Python
|
src/pte/filetools/filefinder_abc.py
|
richardkoehler/pynm-decode
|
3120a410d79d3fce45d0f59025d68ba2d5e80d9e
|
[
"MIT"
] | 1
|
2022-01-08T09:33:09.000Z
|
2022-01-08T09:33:09.000Z
|
src/pte/filetools/filefinder_abc.py
|
richardkoehler/pynm-decode
|
3120a410d79d3fce45d0f59025d68ba2d5e80d9e
|
[
"MIT"
] | null | null | null |
src/pte/filetools/filefinder_abc.py
|
richardkoehler/pynm-decode
|
3120a410d79d3fce45d0f59025d68ba2d5e80d9e
|
[
"MIT"
] | null | null | null |
"""Define abstract base classes to construct FileFinder classes."""
import os
import shutil
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Sequence, Union
import mne_bids
@dataclass
class FileFinder(ABC):
"""Basic representation of class for finding and filtering files."""
hemispheres: Union[dict, None] = field(default_factory=dict)
directory: Union[Path, str] = field(init=False)
files: list = field(init=False, default_factory=list)
def __str__(self):
if not self.files:
return "No corresponding files found."
headers = ["Index", "Filename"]
col_width = max(len(os.path.basename(file)) for file in self.files)
format_row = f"{{:>{len(headers[0]) + 2}}}{{:>{col_width + 2}}}"
terminal_size = "\u2500" * shutil.get_terminal_size().columns
return "\n".join(
(
"Corresponding files found:",
"".join(
f"{{:>{len(header) + 2}}}".format(header)
for header in headers
),
terminal_size,
*(
format_row.format(idx, os.path.basename(file))
for idx, file in enumerate(self.files)
),
)
)
def __len__(self) -> int:
if not self.files:
return 0
return len(self.files)
@abstractmethod
def find_files(
self,
directory: Union[str, Path],
extensions: Optional[Union[Sequence, str]] = None,
keywords: Optional[Union[list, str]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list]] = None,
verbose: bool = False,
) -> None:
"""Find files in directory with optional
keywords and extensions."""
@abstractmethod
def filter_files(
self,
keywords: Optional[Union[str, list]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list]] = None,
verbose: bool = False,
) -> None:
"""Filter list of filepaths for given parameters."""
@staticmethod
def _keyword_search(
files: list[str], keywords: Optional[Union[str, list]]
) -> list:
if not keywords:
return files
if not isinstance(keywords, list):
keywords = [keywords]
filtered_files = [
file for file in files if any(key in file for key in keywords)
]
return filtered_files
def _find_files(
self,
directory: Union[Path, str],
extensions: Optional[Union[list, str]] = None,
) -> None:
"""Find files in directory with optional extensions.
Args:
directory (string)
keywords (list): e.g. ["SelfpacedRota", "ButtonPress] (optional)
extensions (list): e.g. [".json" or "tsv"] (optional)
verbose (bool): verbosity level (optional, default=True)
"""
files = []
for root, _, fnames in os.walk(directory):
fnames = [os.path.join(root, file) for file in fnames]
fnames = self._keyword_search(fnames, extensions)
if fnames:
files.extend(fnames)
self.files = files
def _filter_files(
self,
keywords: Optional[Union[str, list[str]]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list[str]]] = None,
) -> None:
"""Filter filepaths for given parameters."""
filtered_files = self.files
if exclude:
if not isinstance(exclude, list):
exclude = [exclude]
filtered_files = [
file
for file in filtered_files
if not any(item in file for item in exclude)
]
if keywords:
if not isinstance(keywords, list):
keywords = [keywords]
filtered_files = self._keyword_search(filtered_files, keywords)
if stimulation:
if stimulation.lower() in "stimon":
stim = "StimOn"
elif stimulation.lower() in "stimoff":
stim = "StimOff"
else:
raise ValueError("Keyword for stimulation not valid.")
filtered_files = self._keyword_search(filtered_files, [stim])
if medication:
if medication.lower() in "medon":
med = "MedOn"
elif medication.lower() in "medoff":
med = "MedOff"
else:
raise ValueError("Keyword for medication not valid.")
filtered_files = self._keyword_search(filtered_files, [med])
if hemisphere:
matching_files = []
for file in filtered_files:
subject = mne_bids.get_entities_from_fname(file)["subject"]
if (
subject not in self.hemispheres
or self.hemispheres[subject] is None
):
raise HemisphereNotSpecifiedError(
subject, self.hemispheres
)
hem = self.hemispheres[subject] + "_"
if hemisphere.lower() in "ipsilateral" and hem in file:
matching_files.append(file)
if hemisphere.lower() in "contralateral" and hem not in file:
matching_files.append(file)
filtered_files = matching_files
self.files = filtered_files
class DirectoryNotFoundError(Exception):
"""Exception raised when invalid Reader is passed.
Attributes:
directory -- input directory which caused the error
"""
def __init__(
self,
directory: Union[Path, str],
message="Input directory was not found.",
):
self.directory = directory
self.message = message
super().__init__(self.message)
def __str__(self):
return f"{self.message} Got: {self.directory}."
class HemisphereNotSpecifiedError(Exception):
"""Exception raised when electrode hemisphere is not specified in settings.
Attributes:
subject -- input subject which caused the error
hemisphere -- specified hemispheres
message -- explanation of the error
"""
def __init__(
self,
subject,
hemispheres,
message=(
"Input ECOG hemisphere is not specified in"
" `filefinder_settings.py` for given subject."
),
) -> None:
self.subject = subject
self.hemispheres = hemispheres
self.message = message
super().__init__(self.message)
def __str__(self):
return (
f"{self.message} Unspecified subject: {self.subject}."
f" Specified hemispheres: {self.hemispheres}."
)
| 33.439815
| 79
| 0.56417
| 6,946
| 0.96165
| 0
| 0
| 5,624
| 0.778624
| 0
| 0
| 1,611
| 0.223038
|
790323f724e852cdcf7d4d9d3e4d89703473f768
| 3,725
|
py
|
Python
|
panel/routes/server.py
|
emilio2hd/pz-panel
|
6b53f465b2c041e963e2b75e48b1612549ad6fea
|
[
"MIT"
] | null | null | null |
panel/routes/server.py
|
emilio2hd/pz-panel
|
6b53f465b2c041e963e2b75e48b1612549ad6fea
|
[
"MIT"
] | null | null | null |
panel/routes/server.py
|
emilio2hd/pz-panel
|
6b53f465b2c041e963e2b75e48b1612549ad6fea
|
[
"MIT"
] | null | null | null |
import glob
import time
from os import path
from flask import Blueprint, jsonify, current_app, request, Response, json
from flask_login import login_required
from .. import pz_server_state
from ..services.power_actions_service import is_valid_power_action, execute_action
from ..services.server_options_service import read_config, save_config, prepared_config_to_view, formatted_config_lines
from ..services.server_status_service import get_server_status
from ..utils.resources_functions import server_resources
server_blueprint = Blueprint('server', __name__, url_prefix='/server')
@server_blueprint.route('/status')
@login_required
def status():
rcon_host = current_app.config['RCON_HOST']
rcon_password = current_app.config['RCON_PASSWORD']
server_state, players = get_server_status(rcon_host, rcon_password)
return jsonify(
server_state=server_state,
online_players=players,
server_resources=server_resources()
)
@server_blueprint.route('/power-actions', methods=['POST'])
@login_required
def power_actions():
request_data = request.get_json()
pz_user_home = current_app.config["PZ_USER_HOME"]
power_action = request_data.get("power_action", None)
if not is_valid_power_action(power_action):
return jsonify(error="Unknown action"), 400
if not execute_action(power_action, pz_user_home):
return '', 500
return jsonify(server_state=pz_server_state.state)
def get_config(pz_server_config):
config = read_config(pz_server_config)
return {
"WorkshopItems": config["WorkshopItems"],
"Mods": config["Mods"]
}
@server_blueprint.route('/options')
@login_required
def list_workshop_items():
export_config = get_config(current_app.config['PZ_SERVER_CONFIG'])
return jsonify(
WorkshopItems=prepared_config_to_view(export_config["WorkshopItems"]),
Mods=prepared_config_to_view(export_config["Mods"])
)
@server_blueprint.route('/options/export')
@login_required
def export_server_config():
export_config = get_config(current_app.config['PZ_SERVER_CONFIG'])
return current_app.response_class(
formatted_config_lines(export_config),
mimetype='text/event-stream',
headers={"Content-Disposition": "attachment;filename=server_config.ini"}
)
@server_blueprint.route('/options', methods=['POST'])
@login_required
def save_items():
request_data = request.get_json()
config = save_config(current_app.config['PZ_SERVER_CONFIG'], request_data)
export_config = {
"WorkshopItems": prepared_config_to_view(config["WorkshopItems"]),
"Mods": prepared_config_to_view(config["Mods"])
}
return jsonify(export_config)
@server_blueprint.route('/log')
@login_required
def listen_log():
def followLog(serverLogsDir):
logFilePattern = "*_DebugLog-server.txt"
logFiles = glob.glob(path.join(serverLogsDir, logFilePattern))
if not logFiles:
yield 'data: {}\n\n'.format(
json.dumps({"error": True, "errorMessage": "No log file found"})
)
return
logFiles.sort(reverse=True)
with open(logFiles[0]) as serverLogFile:
try:
while True:
line = serverLogFile.readline()
if not line:
continue
time.sleep(0.01)
yield 'data: {}\n\n'.format(
json.dumps({"log": line.strip()})
)
finally:
pass
serverLogsDir = current_app.config['PZ_SERVER_LOGS_DIR']
return Response(followLog(serverLogsDir), mimetype='text/event-stream')
| 29.8
| 119
| 0.68698
| 0
| 0
| 948
| 0.254497
| 2,941
| 0.78953
| 0
| 0
| 542
| 0.145503
|
7903777a50ff41a94bed60837d113e3a3fca6cc0
| 23,095
|
py
|
Python
|
sub_models.py
|
tmartin2/EnsembleSplice-Inactive
|
a161ff007b47ceadd3a21376f2eac2971bb81d90
|
[
"MIT"
] | null | null | null |
sub_models.py
|
tmartin2/EnsembleSplice-Inactive
|
a161ff007b47ceadd3a21376f2eac2971bb81d90
|
[
"MIT"
] | null | null | null |
sub_models.py
|
tmartin2/EnsembleSplice-Inactive
|
a161ff007b47ceadd3a21376f2eac2971bb81d90
|
[
"MIT"
] | null | null | null |
# -----------------------------------------------------------------------------
# Copyright (c) 2021 Trevor P. Martin. All rights reserved.
# Distributed under the MIT License.
# -----------------------------------------------------------------------------
from Data import encode_data
# from utils import cross_validation
from Models import utils
from Models import build_models
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Perceptron
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import numpy as np
import pandas as pd
import tensorflow as tf
import copy
class CNN01(tf.keras.Model):
@staticmethod
def build(rows, columns, channels, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns, channels)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(
filters=32,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=64,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Conv2D(
filters=128,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class CNN02(tf.keras.Model):
@staticmethod
def build(rows, columns, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=32,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv1D(
filters=64,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Conv1D(
filters=128,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class CNN03(tf.keras.Model):
@staticmethod
def build(rows, columns, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=32,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Conv1D(
filters=64,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class CNN04(tf.keras.Model):
@staticmethod
def build(rows, columns, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv1D(
filters=32,
kernel_size=3,
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class CNN05(tf.keras.Model):
@staticmethod
def build(rows, columns, channels, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns, channels)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(
filters=32,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=64,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Conv2D(
filters=64,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Conv2D(
filters=128,
kernel_size=(3,3),
activation="relu",
padding="same"
)
)
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class DNN01(tf.keras.Model):
@staticmethod
def build(rows, columns, units, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=units, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dense(units=units//2, kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dropout(rate=0.15))
model.add(tf.keras.layers.Dense(units=units//4, kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class DNN02(tf.keras.Model):
@staticmethod
def build(rows, columns, units, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=units, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dropout(rate=0.50))
model.add(tf.keras.layers.Dense(units=units//2, kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class DNN03(tf.keras.Model):
@staticmethod
def build(rows, columns, units, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=units*2, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)))
model.add(tf.keras.layers.Dropout(rate=0.50))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
class RNN(tf.keras.Model):
@staticmethod
def build(rows, columns, units, classes):
model = tf.keras.Sequential()
input_shape = (rows, columns)
model.add(tf.keras.layers.InputLayer(input_shape=input_shape))
model.add(tf.keras.layers.LSTM(
units=units,
activation='tanh',
return_sequences=True,
)
)
model.add(tf.keras.layers.Dropout(rate=0.20))
model.add(tf.keras.layers.LSTM(
units=units//2,
activation='tanh',
)
)
model.add(tf.keras.layers.Dropout(rate=0.20))
model.add(tf.keras.layers.Dense(64, activation="relu"))
model.add(tf.keras.layers.Dense(classes, activation="softmax"))
return model
def run(datasets,
splice_sites,
sub_models,
save,
vis,
iter,
metrics,
summary,
config,
num_folds,
bal,
imbal,
imbal_t,
imbal_f,
batch_size,
epochs
):
"""
Parameters
----------
dataset: a string {nn269, ce, hs3d} indicating which dataset to use
splice_site_type: a string {acceptor, donor} indicating which splice
site to train on
model_architecture: a string {cnn, dnn, rnn} indicating which model
architecture to use for training
save_model: boolean, whether to save the current model
bal: boolean, whether to balance the dataset
summary: boolean, whether to print out the model architecture summary
config: boolean, whether to print out the model's configuration
visualize: boolean, whether to save a performance graph of the model
metrics: boolean, whether to print out the evaluation metrics for the model
num_folds: int (default 10), the number of folds for k-fold cross validation
epochs: int (default 15), the number of epochs for the chosen model
batch_size: int (default 32), the model batch size
model_iter: integer, the iteration of the current model architecture (e.g.
if this is the third cnn architecture you are testing, use 3)
"""
# (acceptor row len, donor row len) by dataset
network_rows = {
'acceptor':{
'nn269':90, 'ce':141,
'hs3d':140, 'hs2':602,
'ce2':602, 'dm':602,
'ar':602, 'or':602,
},
'donor':{
'nn269':15, 'ce':141,
'hs3d':140, 'hs2':602,
'ce2':602, 'dm':602,
'ar':602, 'or':602,
},
}
# initialize selected sub models
to_run = dict(
[
(sub_model,{
'nn269':'', 'ce':'',
'hs3d':'', 'hs2':'',
'ce2':'', 'dm':'',
'ar':'', 'or':''
}) for sub_model in sub_models
]
)
# results dictionary
results = copy.deepcopy(to_run)
# populate sub models with encoded data
for sub_model in sub_models:
for dataset in datasets:
# encode datasets -> return (acc_x, acc_y, don_x, don_y)
to_run[sub_model][dataset] = encode_data.encode(dataset, sub_model, bal)
# get a metrics dictionary
evals = dict(
[
(sub_model, {
'f1':'', 'precision':'',
'sensitivity':'', 'specificity':'',
'recall':'', 'mcc':'',
'err_rate':''
}) for sub_model in sub_models
]
)
# accumulate results from running cross validation
for sub_model in sub_models:
for dataset in datasets:
if to_run[sub_model][dataset] == '':
pass
else:
results[sub_model][dataset] = utils.cross_validation(
num_folds,
sub_model,
splice_sites,
dataset,
to_run[sub_model][dataset],# encoded data for dataset (ds)
network_rows, # donor, acceptor rows for ds
evals,
summary,
config,
batch_size,
epochs,
save,
)
# if vis:
print(results)
return results
# plot results
# loss_acc_sub_models(
# results,
# datasets,
# sub_models,
# epochs,
# num_folds,
# bal
# )
# # different by splice site type
# if splice_site_type == 'acceptor':
# cnn_X_train, cnn_y_train = cnn_acc_x, acc_y
# # same name to preserve for loop structure
# X_train, y_train = rd_acc_x, acc_y
# dataset_row_num = network_rows[dataset][0]
# if splice_site_type == 'donor':
# cnn_X_train, cnn_y_train = cnn_don_x, don_y
# X_train, y_train = rd_don_x, don_y
# dataset_row_num = network_rows[dataset][1]
#
#
# # if tune_rnn:
# # tune_rnn()
#
# # perform cross validation
# # general
# trn_fold_accs, trn_fold_losses = [], []
# val_fold_accs, val_fold_losses = [], []
# # esplice
# rnn_va, rnn_vl, cnn_vl, cnn_va, dnn_vl, dnn_va = [],[],[],[],[],[]
# rnn_ta, rnn_tl, cnn_tl, cnn_ta, dnn_tl, dnn_ta = [],[],[],[],[],[]
#
# # this loop inspired by https://www.machinecurve.com/
# #index.php/2020/02/18/how-to-use-k-fold-cross-validation-with-keras/
# k_fold = KFold(n_splits=num_folds, shuffle=False)
# fold = 1
# for train, test in k_fold.split(X_train, y_train):
# if model_architecture != 'esplice':
# X_trn, y_trn = X_train[train], y_train[train]
# X_val, y_val = X_train[test], y_train[test]
# if model_architecture=='cnn':
# history, model = build_cnn(
# dataset_row_num,
# summary,
# X_trn,
# y_trn,
# batch_size,
# epochs,
# X_val,#becomes X_val
# y_val,#becomes y_val
# fold,
# num_folds
# )
# if model_architecture=='dnn':
# history, model = build_dnn(
# dataset_row_num,
# summary,
# X_trn,
# y_trn,
# batch_size,
# epochs,
# X_val,#becomes X_val
# y_val,#becomes y_val
# fold,
# num_folds
# )
# if model_architecture=='rnn':
# history, model = build_rnn(
# dataset_row_num,
# summary,
# X_trn,
# y_trn,
# batch_size,
# epochs,
# X_val,#becomes X_val
# y_val,#becomes y_val
# fold,
# num_folds
# )
# # model.predict(X_trn)
# val_fold_accs.append(history.history['val_accuracy'])
# val_fold_losses.append(history.history['val_loss'])
# trn_fold_accs.append(history.history['accuracy'])
# trn_fold_losses.append(history.history['loss'])
# fold += 1
# else:
# # set up submodel datasets
# cnn_X_trn, cnn_y_trn = cnn_X_train[train], cnn_y_train[train]
# cnn_X_val, cnn_y_val = cnn_X_train[test], cnn_y_train[test]
# rd_X_trn, rd_y_trn = X_train[train], y_train[train]
# rd_X_val, rd_y_val = X_train[test], y_train[test]
# # build each submodel
# hist01, submodel_01 = build_cnn(
# dataset_row_num,
# summary,
# cnn_X_trn,
# cnn_y_trn,
# batch_size,
# epochs,
# cnn_X_val,
# cnn_y_val,
# fold,
# num_folds
# )
# hist02, submodel_02 = build_dnn(
# dataset_row_num,
# summary,
# rd_X_trn,
# rd_y_trn,
# batch_size,
# epochs,
# rd_X_val,
# rd_y_val,
# fold,
# num_folds
# )
# # hist03, submodel_03 = build_rnn(
# # dataset_row_num,
# # summary,
# # rd_X_trn,
# # rd_y_trn,
# # batch_size,
# # epochs,
# # rd_X_val,
# # rd_y_val,
# # fold,
# # num_folds
# # )
# models = [submodel_01, submodel_02]#, submodel_03]
# trn_scores, val_scores = EnsembleSplice.build(
# models,
# batch_size,
# cnn_X_trn,
# cnn_y_trn,
# cnn_X_val,
# cnn_y_val,
# rd_X_trn,
# rd_y_trn,
# rd_X_val,
# rd_y_val,
# )
# # get final epoch accuracy
# trn_fold_accs.append(trn_scores)
# val_fold_accs.append(val_scores)
# # rnn_va.append(hist03.history['val_accuracy'])
# # rnn_vl.append(hist03.history['val_loss'])
# # rnn_ta.append(hist03.history['accuracy'])
# # rnn_tl.append(hist03.history['loss'])
# # cnn_vl.append(hist01.history['val_loss'])
# # cnn_va.append(hist01.history['val_accuracy'])
# # cnn_tl.append(hist01.history['loss'])
# # cnn_ta.append(hist01.history['accuracy'])
# # dnn_vl.append(hist02.history['val_loss'])
# # dnn_va.append(hist02.history['val_accuracy'])
# # dnn_tl.append(hist02.history['loss'])
# # dnn_ta.append(hist02.history['accuracy'])
#
# # rnn_va.append(hist03.history['val_accuracy'][-1])
# # rnn_vl.append(hist03.history['val_loss'][-1])
# # rnn_ta.append(hist03.history['accuracy'][-1])
# # rnn_tl.append(hist03.history['loss'][-1])
# cnn_vl.append(hist01.history['val_loss'][-1])
# cnn_va.append(hist01.history['val_accuracy'][-1])
# cnn_tl.append(hist01.history['loss'][-1])
# cnn_ta.append(hist01.history['accuracy'][-1])
# dnn_vl.append(hist02.history['val_loss'][-1])
# dnn_va.append(hist02.history['val_accuracy'][-1])
# dnn_tl.append(hist02.history['loss'][-1])
# dnn_ta.append(hist02.history['accuracy'][-1])
#
# fold += 1
#
# # do something with predicted values and real values to get AUC-ROC scores
# # sklearn.metrics.roc_auc_score
# # also get f-score and other scores here
# # maybe connect tune_rnn and build_rnn -> get tuned parameters and plug them
# # in automatically to RNN
#
# if model_architecture != 'esplice':
#
# val_acc_by_epoch = np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(val_fold_accs).T)
# val_loss_by_epoch = np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(val_fold_losses).T)
# trn_acc_by_epoch = np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(trn_fold_accs).T)
# trn_loss_by_epoch = np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(trn_fold_losses).T)
#
# std_val_acc = np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(val_fold_accs).T)
# std_val_loss = np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(val_fold_losses).T)
# std_trn_acc = np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(trn_fold_accs).T)
# std_trn_loss = np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(trn_fold_losses).T)
#
# values = [
# val_acc_by_epoch,
# std_val_acc,
# trn_acc_by_epoch,
# std_trn_acc,
# val_loss_by_epoch,
# std_val_loss,
# trn_loss_by_epoch,
# std_trn_loss
# ]
#
# if model_architecture == 'esplice':
#
# # make a DICTIONARY AREY
# # ES_Val_ACc: (vacc, std_va)
# mean_good = lambda seq: np.apply_along_axis(lambda row: np.mean(row), 1, np.asarray(seq).T)
# std_good = lambda seq: np.apply_along_axis(lambda row: np.std(row), 1, np.asarray(seq).T)
# vacc = val_fold_accs
# tacc = trn_fold_accs
# # std_va = val_fold_accs
# # std_ta = trn_fold_accs
#
# values = [
# val_fold_accs,
# trn_fold_accs,
# #rnn_va,
# # rnn_vl,
# #rnn_ta,
# # rnn_tl,
# # cnn_vl,
# cnn_va,
# # cnn_tl,
# cnn_ta,
# # dnn_vl,
# dnn_va,
# # dnn_tl,
# dnn_ta
# ]
#
# # cnn_mva = mean_good(cnn_va)
# # cnn_mvl = mean_good(cnn_vl)
# # cnn_mta = mean_good(cnn_ta)
# # cnn_mtl = mean_good(cnn_tl)
# # cnn_sva = std_good(cnn_va)
# # cnn_svl = std_good(cnn_vl)
# # cnn_sta = std_good(cnn_ta)
# # cnn_stl = std_good(cnn_tl)
# #
# # dnn_mva = mean_good(dnn_va)
# # dnn_mvl = mean_good(dnn_vl)
# # dnn_mta = mean_good(dnn_ta)
# # dnn_mtl = mean_good(dnn_tl)
# # dnn_sva = std_good(dnn_va)
# # dnn_svl = std_good(dnn_vl)
# # dnn_sta = std_good(dnn_ta)
# # dnn_stl = std_good(dnn_tl)
# #
# # rnn_mva = mean_good(rnn_va)
# # rnn_mvl = mean_good(rnn_vl)
# # rnn_mta = mean_good(rnn_ta)
# # rnn_mtl = mean_good(rnn_tl)
# # rnn_sva = std_good(rnn_va)
# # rnn_svl = std_good(rnn_vl)
# # rnn_sta = std_good(rnn_ta)
# # rnn_stl = std_good(rnn_tl)
#
# # values = [
# # vacc,
# # # std_va,
# # tacc,
# # # std_ta,
# # cnn_mva,
# # cnn_sva,
# # cnn_mvl,
# # cnn_svl,
# # cnn_mta,
# # cnn_sta,
# # cnn_mtl,
# # cnn_stl,
# # dnn_mva,
# # dnn_sva,
# # dnn_mvl,
# # dnn_svl,
# # dnn_mta,
# # dnn_sta,
# # dnn_mtl,
# # dnn_stl,
# # rnn_mva,
# # rnn_sva,
# # rnn_mvl,
# # rnn_svl,
# # rnn_mta,
# # rnn_sta,
# # rnn_mtl,
# # rnn_stl,
# # ]
# if config:
# print(model.get_config())
# if save_model:
# name = input('What would you like to name this model?: ')
# model.save(f'{name}')
# tf.keras.utils.plot_model(model, f'{name}.png', show_shapes=True)
# if visualize:
# loss_acc_esplice(
# values,
# model_architecture,
# dataset,
# splice_site_type,
# num_folds,
# epochs,
# bal,
# )
| 34.781627
| 126
| 0.525352
| 7,716
| 0.334098
| 0
| 0
| 7,421
| 0.321325
| 0
| 0
| 11,801
| 0.510976
|
7903ec9c043049b9e677a2917e22d25071fe1f34
| 3,227
|
py
|
Python
|
tracportalopt/project/notification.py
|
isabella232/TracPortalPlugin
|
985581b16aad360cfc78d6b901c93fb922f7bc30
|
[
"MIT"
] | 2
|
2015-01-19T05:53:30.000Z
|
2016-01-08T10:30:02.000Z
|
tracportalopt/project/notification.py
|
iij/TracPortalPlugin
|
985581b16aad360cfc78d6b901c93fb922f7bc30
|
[
"MIT"
] | 1
|
2022-01-20T12:47:18.000Z
|
2022-01-20T12:47:18.000Z
|
tracportalopt/project/notification.py
|
isabella232/TracPortalPlugin
|
985581b16aad360cfc78d6b901c93fb922f7bc30
|
[
"MIT"
] | 3
|
2016-12-08T02:25:36.000Z
|
2022-01-20T12:10:58.000Z
|
#! -*- coding: utf-8 -*-
#
# (C) 2013 Internet Initiative Japan Inc.
# All rights reserved.
#
# Created on 2013/05/15
# @author: yosinobu@iij.ad.jp
"""Notify project owner with email when the project created successfully."""
from pkg_resources import resource_filename
from trac.config import Option, ListOption
from trac.core import Component, implements
from trac.notification import Notify, NotifyEmail
from trac.web.chrome import ITemplateProvider
from tracportal.i18n import _
from tracportal.project.api import IProjectCreationInterceptor
class ProjectCreationNotificationSystem(Component):
implements(ITemplateProvider, IProjectCreationInterceptor)
# options
from_name = Option('tracportal', 'notify_email_from_name', doc=_('Sender name to use in notification emails.'))
from_email = Option('tracportal', 'notify_email_from', doc=_('Sender address to use in notification emails.'))
ccrcpts = ListOption('tracportal', 'notify_email_cc',
doc=_('Email address(es) to always send notifications to, '
'addresses can be seen by all recipients (Cc:).'))
subject = Option('tracportal', 'notify_email_subject', default=_("Ready to start Trac project!"),
doc=_('Subject in notification emails.'))
# ITemplateProvider methods
def get_templates_dirs(self):
return [resource_filename(__name__, 'templates')]
def get_htdocs_dirs(self):
return []
# IProjectCreationInterceptor methods
def pre_process(self, project_info, owner_info):
pass
def post_process(self, project_info, owner_info, env):
if 'email' in owner_info:
project_info['url'] = env.abs_href()
support = {
'name': self.from_name or self.env.project_name,
'email': self.from_email or self.env.config.get('notification', 'smtp_from'),
}
notify_email = ProjectCreationNotifyEmail(self.env, (owner_info['email'],), tuple(self.ccrcpts),
project_info, owner_info, support)
notify_email.notify('')
class ProjectCreationNotifyEmail(NotifyEmail):
"""Notification of a project creation."""
template_name = 'project_creation_notify_email.txt'
def __init__(self, env, torcpts, ccrcpts, project_info, owner_info, support):
NotifyEmail.__init__(self, env)
self.torcpts = torcpts
self.ccrcpts = ccrcpts
self.project_info = project_info
self.owner_info = owner_info
self.support = support
self.subject = self.subject
def get_recipients(self, resid):
return (self.torcpts, self.ccrcpts,)
def notify(self, resid, subject=None, author=None):
if subject:
self.subject = subject
self.from_name = self.support['name']
self.from_email = self.support['email']
self.replyto_email = self.support['email']
if self.data is None:
self.data = {}
self.data.update({
'owner': self.owner_info,
'project': self.project_info,
'support': self.support,
})
Notify.notify(self, resid)
| 37.523256
| 115
| 0.654478
| 2,675
| 0.828943
| 0
| 0
| 0
| 0
| 0
| 0
| 866
| 0.268361
|
790488091f13f4b2ff427e7b9bda7aa18b0d732c
| 1,391
|
py
|
Python
|
misc/style/check-include-guard-convention.py
|
nitinkaveriappa/downward
|
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
|
[
"MIT"
] | 4
|
2019-04-23T10:41:35.000Z
|
2019-10-27T05:14:42.000Z
|
misc/style/check-include-guard-convention.py
|
nitinkaveriappa/downward
|
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
|
[
"MIT"
] | null | null | null |
misc/style/check-include-guard-convention.py
|
nitinkaveriappa/downward
|
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
|
[
"MIT"
] | 4
|
2018-01-16T00:00:22.000Z
|
2019-11-01T23:35:01.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import glob
import os.path
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
SRC_DIR = os.path.join(REPO, "src")
def check_header_files(component):
component_dir = os.path.join(SRC_DIR, component)
header_files = (glob.glob(os.path.join(component_dir, "*.h")) +
glob.glob(os.path.join(component_dir, "*", "*.h")))
assert header_files
errors = []
for filename in header_files:
assert filename.endswith(".h"), filename
rel_filename = os.path.relpath(filename, start=component_dir)
guard = rel_filename.replace(".", "_").replace("/", "_").replace("-", "_").upper()
expected = "#ifndef " + guard
for line in open(filename):
line = line.rstrip("\n")
if line.startswith("#ifndef"):
if line != expected:
errors.append('%s uses guard "%s" but should use "%s"' %
(filename, line, expected))
break
return errors
def main():
errors = []
errors.extend(check_header_files("preprocess"))
errors.extend(check_header_files("search"))
for error in errors:
print(error)
if errors:
sys.exit(1)
if __name__ == "__main__":
main()
| 28.979167
| 90
| 0.591661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 178
| 0.127965
|
7905a7207409a36e542edd41a689eb3240d45b7e
| 432
|
py
|
Python
|
kyu_7/fun_with_lists_length/length.py
|
pedrocodacyorg2/codewars
|
ba3ea81125b6082d867f0ae34c6c9be15e153966
|
[
"Unlicense"
] | 1
|
2022-02-12T05:56:04.000Z
|
2022-02-12T05:56:04.000Z
|
kyu_7/fun_with_lists_length/length.py
|
pedrocodacyorg2/codewars
|
ba3ea81125b6082d867f0ae34c6c9be15e153966
|
[
"Unlicense"
] | 182
|
2020-04-30T00:51:36.000Z
|
2021-09-07T04:15:05.000Z
|
kyu_7/fun_with_lists_length/length.py
|
pedrocodacyorg2/codewars
|
ba3ea81125b6082d867f0ae34c6c9be15e153966
|
[
"Unlicense"
] | 4
|
2020-04-29T22:04:20.000Z
|
2021-07-13T20:04:14.000Z
|
# Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
def length(head) -> int:
"""
The method length, which accepts a linked list
(head), and returns the length of the list.
:param head:
:return:
"""
i = 0
if head is None:
return 0
while head.next is not None:
head = head.next
i += 1
return i + 1
| 18.782609
| 53
| 0.581019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 256
| 0.592593
|
7907243674e9e866161964f1907b28118b6c5588
| 7,238
|
py
|
Python
|
test/functional/test_f_xcompat.py
|
farleyb-amazon/aws-encryption-sdk-python
|
7950abd73ee333407d2dadd02ef2d57c3df464cf
|
[
"Apache-2.0"
] | 95
|
2018-08-20T23:10:00.000Z
|
2022-02-17T02:54:32.000Z
|
test/functional/test_f_xcompat.py
|
farleyb-amazon/aws-encryption-sdk-python
|
7950abd73ee333407d2dadd02ef2d57c3df464cf
|
[
"Apache-2.0"
] | 220
|
2018-08-01T20:56:29.000Z
|
2022-03-28T18:12:35.000Z
|
test/functional/test_f_xcompat.py
|
farleyb-amazon/aws-encryption-sdk-python
|
7950abd73ee333407d2dadd02ef2d57c3df464cf
|
[
"Apache-2.0"
] | 63
|
2018-08-01T19:37:33.000Z
|
2022-03-20T17:14:15.000Z
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Functional test suite testing decryption of known good test files encrypted using static RawMasterKeyProvider."""
import base64
import json
import logging
import os
import sys
from collections import defaultdict
import attr
import pytest
import six
import aws_encryption_sdk
from aws_encryption_sdk.exceptions import InvalidKeyIdError
from aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm
from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey
from aws_encryption_sdk.internal.str_ops import to_bytes
from aws_encryption_sdk.key_providers.raw import RawMasterKeyProvider
pytestmark = [pytest.mark.accept]
# Environment-specific test file locator. May not always exist.
def _file_root():
return "."
try:
from .aws_test_file_finder import file_root
except ImportError:
file_root = _file_root
_LOGGER = logging.getLogger()
_WRAPPING_ALGORITHM_MAP = {
b"AES": {
128: {b"": {b"": WrappingAlgorithm.AES_128_GCM_IV12_TAG16_NO_PADDING}},
192: {b"": {b"": WrappingAlgorithm.AES_192_GCM_IV12_TAG16_NO_PADDING}},
256: {b"": {b"": WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING}},
},
b"RSA": defaultdict(
lambda: {
b"PKCS1": {b"": WrappingAlgorithm.RSA_PKCS1},
b"OAEP-MGF1": {
b"SHA-1": WrappingAlgorithm.RSA_OAEP_SHA1_MGF1,
b"SHA-256": WrappingAlgorithm.RSA_OAEP_SHA256_MGF1,
b"SHA-384": WrappingAlgorithm.RSA_OAEP_SHA384_MGF1,
b"SHA-512": WrappingAlgorithm.RSA_OAEP_SHA512_MGF1,
},
}
),
}
_KEY_TYPES_MAP = {b"AES": EncryptionKeyType.SYMMETRIC, b"RSA": EncryptionKeyType.PRIVATE}
_STATIC_KEYS = defaultdict(dict)
class StaticStoredMasterKeyProvider(RawMasterKeyProvider):
"""Provides static key"""
provider_id = "static-aws-xcompat"
def _get_raw_key(self, key_id):
"""Finds a loaded raw key."""
try:
algorithm, key_bits, padding_algorithm, padding_hash = key_id.upper().split(b".", 3)
key_bits = int(key_bits)
key_type = _KEY_TYPES_MAP[algorithm]
wrapping_algorithm = _WRAPPING_ALGORITHM_MAP[algorithm][key_bits][padding_algorithm][padding_hash]
static_key = _STATIC_KEYS[algorithm][key_bits]
return WrappingKey(
wrapping_algorithm=wrapping_algorithm, wrapping_key=static_key, wrapping_key_type=key_type
)
except KeyError:
_LOGGER.exception("Unknown Key ID: %s", key_id)
raise InvalidKeyIdError("Unknown Key ID: {}".format(key_id))
@attr.s
class RawKeyDescription(object):
"""Customer raw key descriptor used by StaticStoredMasterKeyProvider."""
encryption_algorithm = attr.ib(validator=attr.validators.instance_of(six.string_types))
key_bits = attr.ib(validator=attr.validators.instance_of(int))
padding_algorithm = attr.ib(validator=attr.validators.instance_of(six.string_types))
padding_hash = attr.ib(validator=attr.validators.instance_of(six.string_types))
@property
def key_id(self):
"""Build a key ID from instance parameters."""
return ".".join([self.encryption_algorithm, str(self.key_bits), self.padding_algorithm, self.padding_hash])
@attr.s
class Scenario(object):
"""Scenario details."""
plaintext_filename = attr.ib(validator=attr.validators.instance_of(six.string_types))
ciphertext_filename = attr.ib(validator=attr.validators.instance_of(six.string_types))
key_ids = attr.ib(validator=attr.validators.instance_of(list))
def _generate_test_cases(): # noqa=C901
try:
root_dir = os.path.abspath(file_root())
except Exception: # pylint: disable=broad-except
root_dir = os.getcwd()
if not os.path.isdir(root_dir):
root_dir = os.getcwd()
base_dir = os.path.join(root_dir, "aws_encryption_sdk_resources")
ciphertext_manifest_path = os.path.join(base_dir, "manifests", "ciphertext.manifest")
if not os.path.isfile(ciphertext_manifest_path):
# Make no test cases if the ciphertext file is not found
return []
with open(ciphertext_manifest_path, encoding="utf-8") as f:
ciphertext_manifest = json.load(f)
_test_cases = []
# Collect keys from ciphertext manifest
for algorithm, keys in ciphertext_manifest["test_keys"].items():
algorithm = to_bytes(algorithm.upper())
for key_bits, key_desc in keys.items():
key_desc = to_bytes(key_desc)
key_bits = int(key_bits)
raw_key = to_bytes(key_desc.get("line_separator", "").join(key_desc["key"]))
if key_desc["encoding"].lower() in ("raw", "pem"):
_STATIC_KEYS[algorithm][key_bits] = raw_key
elif key_desc["encoding"].lower() == "base64":
_STATIC_KEYS[algorithm][key_bits] = base64.b64decode(raw_key)
else:
raise Exception("TODO" + "Unknown key encoding")
# Collect test cases from ciphertext manifest
for test_case in ciphertext_manifest["test_cases"]:
key_ids = []
algorithm = aws_encryption_sdk.Algorithm.get_by_id(int(test_case["algorithm"], 16))
for key in test_case["master_keys"]:
sys.stderr.write("XC:: " + json.dumps(key) + "\n")
if key["provider_id"] == StaticStoredMasterKeyProvider.provider_id:
key_ids.append(
RawKeyDescription(
key["encryption_algorithm"],
key.get("key_bits", algorithm.data_key_len * 8),
key.get("padding_algorithm", ""),
key.get("padding_hash", ""),
).key_id
)
if key_ids:
_test_cases.append(
Scenario(
os.path.join(base_dir, test_case["plaintext"]["filename"]),
os.path.join(base_dir, test_case["ciphertext"]["filename"]),
key_ids,
)
)
return _test_cases
@pytest.mark.parametrize("scenario", _generate_test_cases())
def test_decrypt_from_file(scenario):
"""Tests decrypt from known good files."""
with open(scenario.ciphertext_filename, "rb") as infile:
ciphertext = infile.read()
with open(scenario.plaintext_filename, "rb") as infile:
plaintext = infile.read()
key_provider = StaticStoredMasterKeyProvider()
key_provider.add_master_keys_from_list(scenario.key_ids)
decrypted_ciphertext, _header = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=key_provider)
assert decrypted_ciphertext == plaintext
| 39.336957
| 116
| 0.678088
| 1,830
| 0.252832
| 0
| 0
| 1,567
| 0.216496
| 0
| 0
| 1,679
| 0.23197
|
7907463be0399381dbb251da2399a40b35f47313
| 986
|
py
|
Python
|
{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py
|
opatua/cookiecutter-flask-api
|
67e5a37ee2b8ca32823ac2f8051bab6653b3b44e
|
[
"MIT"
] | 2
|
2019-04-06T05:08:15.000Z
|
2019-04-06T19:23:44.000Z
|
{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py
|
opatua/cookiecutter-flask-api
|
67e5a37ee2b8ca32823ac2f8051bab6653b3b44e
|
[
"MIT"
] | null | null | null |
{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py
|
opatua/cookiecutter-flask-api
|
67e5a37ee2b8ca32823ac2f8051bab6653b3b44e
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_restful import Api
from flask_cors import CORS
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from {{cookiecutter.app_name}}.config import app_config
from {{cookiecutter.app_name}}.models import db, bcrypt
from {{cookiecutter.app_name}}.resources import Login, Register
from {{cookiecutter.app_name}}.schemas import ma
def create_app(env_name):
"""
Create app
"""
# app initiliazation
app = Flask(__name__)
CORS(app)
app.config.from_object(app_config[env_name])
# initializing bcrypt and db
bcrypt.init_app(app)
db.init_app(app)
ma.init_app(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
# Route
api = Api(app)
# user endpoint
api.add_resource(Login, '/auth/login')
api.add_resource(Register, '/auth/register')
return app
| 22.930233
| 63
| 0.704868
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 139
| 0.140974
|
79085a6c06f94f9781c1a341cbcc3d429b30a260
| 17,381
|
py
|
Python
|
docs/examples/Moving_Platform_Simulation.py
|
Red-Portal/Stone-Soup-1
|
267621c86161a839da9b144c2745d28d9166d903
|
[
"MIT"
] | 157
|
2019-04-14T20:43:11.000Z
|
2022-03-30T08:30:33.000Z
|
docs/examples/Moving_Platform_Simulation.py
|
Red-Portal/Stone-Soup-1
|
267621c86161a839da9b144c2745d28d9166d903
|
[
"MIT"
] | 364
|
2019-04-18T15:54:49.000Z
|
2022-03-31T09:50:02.000Z
|
docs/examples/Moving_Platform_Simulation.py
|
Red-Portal/Stone-Soup-1
|
267621c86161a839da9b144c2745d28d9166d903
|
[
"MIT"
] | 86
|
2019-04-20T02:01:18.000Z
|
2022-03-28T01:03:11.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""
Multi-Sensor Moving Platform Simulation Example
===============================================
This example looks at how multiple sensors can be mounted on a single moving platform and exploiting a defined moving
platform as a sensor target.
"""
# %%
# Building a Simulated Multi-Sensor Moving Platform
# -------------------------------------------------
# The focus of this example is to show how to setup and configure a simulation environment in order to provide a
# multi-sensor moving platform, as such the application of a tracker will not be covered in detail. For more information
# about trackers and how to configure them review of the tutorials and demonstrations is recommended.
#
# This example makes use of Stone Soup :class:`~.MovingPlatform`, :class:`~.MultiTransitionMovingPlatform` and
# :class:`~.Sensor` objects.
#
# In order to configure platforms, sensors and the simulation we will need to import some specific Stone Soup objects.
# As these have been introduced in previous tutorials they are imported upfront. New functionality within this example
# will be imported at the relevant point in order to draw attention to the new features.
# Some general imports and set up
from datetime import datetime
from datetime import timedelta
from matplotlib import pyplot as plt
import numpy as np
# Stone Soup imports:
from stonesoup.types.state import State, GaussianState
from stonesoup.types.array import StateVector
from stonesoup.types.array import CovarianceMatrix
from stonesoup.models.transition.linear import (
CombinedLinearGaussianTransitionModel, ConstantVelocity)
from stonesoup.predictor.particle import ParticlePredictor
from stonesoup.resampler.particle import SystematicResampler
from stonesoup.updater.particle import ParticleUpdater
from stonesoup.measures import Mahalanobis
from stonesoup.hypothesiser.distance import DistanceHypothesiser
from stonesoup.dataassociator.neighbour import GNNWith2DAssignment
from stonesoup.tracker.simple import SingleTargetTracker
# Define the simulation start time
start_time = datetime.now()
# %%
# Create a multi-sensor platform
# ------------------------------
# We have previously demonstrated how to create a :class:`~.FixedPlatform` which exploited a
# :class:`~.RadarRangeBearingElevation` *Sensor* in order to detect and track targets generated within a
# :class:`~.MultiTargetGroundTruthSimulator`.
#
# In this example we are going to create a moving platform which will be mounted with a pair of sensors and moves within
# a 6 dimensional state space according to the following :math:`\mathbf{x}`.
#
# .. math::
# \mathbf{x} = \begin{bmatrix}
# x\\ \dot{x}\\ y\\ \dot{y}\\ z\\ \dot{z} \end{bmatrix}
# = \begin{bmatrix}
# 0\\ 0\\ 0\\ 50\\ 8000\\ 0 \end{bmatrix}
#
# The platform will be initiated with a near constant velocity model which has been parameterised to have zero noise.
# Therefore the platform location at time :math:`k` is given by :math:`F_{k}x_{k-1}` where :math:`F_{k}` is given by:
#
# .. math::
# F_{k} = \begin{bmatrix}
# 1 & \triangle k & 0 & 0 & 0 & 0\\
# 0 & 1 & 0 & 0 & 0 & 0\\
# 0 & 0 & 1 & \triangle k & 0 & 0\\
# 0 & 0 & 0 & 1 & 0 & 0\\
# 0 & 0 & 0 & 0 & 1 & \triangle k \\
# 0 & 0 & 0 & 0 & 0 & 1\\
# \end{bmatrix}
# First import the Moving platform
from stonesoup.platform.base import MovingPlatform
# Define the initial platform position, in this case the origin
initial_loc = StateVector([[0], [0], [0], [50], [8000], [0]])
initial_state = State(initial_loc, start_time)
# Define transition model and position for 3D platform
transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# create our fixed platform
sensor_platform = MovingPlatform(states=initial_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
transition_model=transition_model)
# %%
# With our platform generated we now need to build a set of sensors which will be mounted onto the platform. In this
# case we will exploit a :class:`~.RadarElevationBearingRangeRate` and a :class:`~.PassiveElevationBearing` sensor
# (e.g. an optical sensor, which has no capability to directly measure range).
#
# First we will create a radar which is capable of measuring bearing (:math:`\phi`), elevation (:math:`\theta`), range
# (:math:`r`) and range-rate (:math:`\dot{r}`) of the target platform.
# Import a range rate bearing elevation capable radar
from stonesoup.sensor.radar.radar import RadarElevationBearingRangeRate
# Create a radar sensor
radar_noise_covar = CovarianceMatrix(np.diag(
np.array([np.deg2rad(3), # Elevation
np.deg2rad(3), # Bearing
100., # Range
25.]))) # Range Rate
# radar mountings
radar_mounting_offsets = StateVector([10, 0, 0]) # e.g. nose cone
radar_rotation_offsets = StateVector([0, 0, 0])
# Mount the radar onto the platform
radar = RadarElevationBearingRangeRate(ndim_state=6,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
noise_covar=radar_noise_covar,
mounting_offset=radar_mounting_offsets,
rotation_offset=radar_rotation_offsets,
)
sensor_platform.add_sensor(radar)
# %%
# Our second sensor is a passive sensor, capable of measuring the bearing (:math:`\phi`) and elevation (:math:`\theta`)
# of the target platform. For the purposes of this example we will assume that the passive sensor is an imager.
# The imager sensor model is described by the following equations:
#
# .. math::
# \mathbf{z}_k = h(\mathbf{x}_k, \dot{\mathbf{x}}_k)
#
# where:
#
# * :math:`\mathbf{z}_k` is a measurement vector of the form:
#
# .. math::
# \mathbf{z}_k = \begin{bmatrix} \theta \\ \phi \end{bmatrix}
#
# * :math:`h` is a non - linear model function of the form:
#
# .. math::
# h(\mathbf{x}_k,\dot{\mathbf{x}}_k) = \begin{bmatrix}
# \arcsin(\mathcal{z} /\sqrt{\mathcal{x} ^ 2 + \mathcal{y} ^ 2 +\mathcal{z} ^ 2}) \\
# \arctan(\mathcal{y},\mathcal{x}) \ \
# \end{bmatrix} + \dot{\mathbf{x}}_k
#
# * :math:`\mathbf{z}_k` is Gaussian distributed with covariance :math:`R`, i.e.:
#
# .. math::
# \mathbf{z}_k \sim \mathcal{N}(0, R)
#
# .. math::
# R = \begin{bmatrix}
# \sigma_{\theta}^2 & 0 \\
# 0 & \sigma_{\phi}^2 \\
# \end{bmatrix}
# Import a passive sensor capability
from stonesoup.sensor.passive import PassiveElevationBearing
imager_noise_covar = CovarianceMatrix(np.diag(np.array([np.deg2rad(0.05), # Elevation
np.deg2rad(0.05)]))) # Bearing
# imager mounting offset
imager_mounting_offsets = StateVector([0, 8, -1]) # e.g. wing mounted imaging pod
imager_rotation_offsets = StateVector([0, 0, 0])
# Mount the imager onto the platform
imager = PassiveElevationBearing(ndim_state=6,
mapping=(0, 2, 4),
noise_covar=imager_noise_covar,
mounting_offset=imager_mounting_offsets,
rotation_offset=imager_rotation_offsets,
)
sensor_platform.add_sensor(imager)
# %%
# Notice that we have added sensors to specific locations on the aircraft, defined by the mounting_offset parameter.
# The values in this array are defined in the platforms local coordinate frame of reference. So in this case an offset
# of :math:`[0, 8, -1]` means the sensor is located 8 meters to the right and 1 meter below the center point of the
# platform.
#
# Now that we have mounted the two sensors we can see that the platform object has both associated with it:
sensor_platform.sensors
# %%
# Create a Target Platform
# ------------------------
# There are two ways of generating a target in Stone Soup. Firstly, we can use the inbuilt ground-truth generator
# functionality within Stone Soup, which we demonstrated in the previous example, and creates a random target based on
# our selected parameters. The second method provides a means to generate a target which will perform specific
# behaviours, this is the approach we will take here.
#
# In order to create a target which moves in pre-defined sequences we exploit the fact that platforms can be used as
# sensor targets within a simulation, coupled with the :class:`~.MultiTransitionMovingPlatform` which enables a platform
# to be provided with a pre-defined list of transition models and transition times. The platform will continue to loop
# over the transition sequence provided until the simulation ends.
#
# When simulating sensor platforms it is important to note that within the simulation Stone Soup treats all platforms as
# potential targets. Therefore if we created multiple sensor platforms they would each *sense* all other platforms
# within the simulation (sensor-target geometry dependant).
#
# For this example we will create an air target which will fly a sequence of straight and level followed by a
# coordinated turn in the :math:`x-y` plane. This is configured such that the target will perform each manoeuvre for 8
# seconds, and it will turn through 45 degrees over the course of the turn manoeuvre.
# Import a Constant Turn model to enable target to perform basic manoeuvre
from stonesoup.models.transition.linear import ConstantTurn
straight_level = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# Configure the aircraft turn behaviour
turn_noise_diff_coeffs = np.array([0., 0.])
turn_rate = np.pi/32 # specified in radians per seconds...
turn_model = ConstantTurn(turn_noise_diff_coeffs=turn_noise_diff_coeffs, turn_rate=turn_rate)
# Configure turn model to maintain current altitude
turning = CombinedLinearGaussianTransitionModel(
[turn_model, ConstantVelocity(0.)])
manoeuvre_list = [straight_level, turning]
manoeuvre_times = [timedelta(seconds=8),
timedelta(seconds=8)]
# %%
# Now that we have created a list of manoeuvre behaviours and durations we can build our multi-transition moving
# platform. Because we intend for this platform to be a target we do not need to attach any sensors to it.
# Import a multi-transition moving platform
from stonesoup.platform.base import MultiTransitionMovingPlatform
initial_target_location = StateVector([[0], [-40], [1800], [0], [8000], [0]])
initial_target_state = State(initial_target_location, start_time)
target = MultiTransitionMovingPlatform(transition_models=manoeuvre_list,
transition_times=manoeuvre_times,
states=initial_target_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
sensors=None)
# %%
# Creating the simulator
# ----------------------
# Now that we have build our sensor platform and a target platform we need to wrap them in a simulator. Because we do
# not want any additional ground truth objects, which is how most simulators work in Stone Soup, we need to use a
# :class:`~.DummyGroundTruthSimulator` which returns a set of empty ground truth paths with timestamps. These are then
# feed into a :class:`~.PlatformDetectionSimulator` with the two platforms we have already built.
# Import the required simulators
from stonesoup.simulator.simple import DummyGroundTruthSimulator
from stonesoup.simulator.platform import PlatformDetectionSimulator
# %%
# We now need to create an array of timestamps which starts at *datetime.now()* and enable the simulator to run for
# 25 seconds.
times = np.arange(0, 24, 1) # 25 seconds
timestamps = [start_time + timedelta(seconds=float(elapsed_time)) for elapsed_time in times]
truths = DummyGroundTruthSimulator(times=timestamps)
sim = PlatformDetectionSimulator(groundtruth=truths, platforms=[sensor_platform, target])
# %%
# Create a Tracker
# ------------------------------------
# Now that we have setup our sensor platform, target and simulation we need to create a tracker. For this example we
# will use a Particle Filter as this enables us to handle the non-linear nature of the imaging sensor. In this example
# we will use an inflated constant noise model to account for target motion uncertainty.
#
# Note that we don't add a measurement model to the updater, this is because each sensor adds their measurement model to
# each detection they generate. The tracker handles this internally by checking for a measurement model with each
# detection it receives and applying only the relevant measurement model.
target_transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(5), ConstantVelocity(5), ConstantVelocity(1)])
# First add a Particle Predictor
predictor = ParticlePredictor(target_transition_model)
# Now create a resampler and particle updater
resampler = SystematicResampler()
updater = ParticleUpdater(measurement_model=None,
resampler=resampler)
# Create a particle initiator
from stonesoup.initiator.simple import GaussianParticleInitiator, SinglePointInitiator
single_point_initiator = SinglePointInitiator(
GaussianState([[0], [-40], [2000], [0], [8000], [0]], np.diag([10000, 1000, 10000, 1000, 10000, 1000])),
None)
initiator = GaussianParticleInitiator(number_particles=500,
initiator=single_point_initiator)
hypothesiser = DistanceHypothesiser(predictor, updater, measure=Mahalanobis(), missed_distance=np.inf)
data_associator = GNNWith2DAssignment(hypothesiser)
from stonesoup.deleter.time import UpdateTimeStepsDeleter
deleter = UpdateTimeStepsDeleter(time_steps_since_update=10)
# Create a Kalman single-target tracker
tracker = SingleTargetTracker(
initiator=initiator,
deleter=deleter,
detector=sim,
data_associator=data_associator,
updater=updater
)
# %%
# The final step is to iterate our tracker over the simulation and plot out the results. Because we have a bearing
# only sensor it does not make sense to plot out the detections without animating the resulting plot. This
# animation shows the sensor platform (blue) moving towards the true target position (red). The estimated target
# position is shown in black, radar detections are shown in yellow while the bearing only imager detections are
# coloured green.
from matplotlib import animation
import matplotlib
matplotlib.rcParams['animation.html'] = 'jshtml'
from stonesoup.models.measurement.nonlinear import CartesianToElevationBearingRangeRate
from stonesoup.functions import sphere2cart
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
frames = []
for time, ctracks in tracker:
artists = []
ax.set_xlabel("$East$")
ax.set_ylabel("$North$")
ax.set_ylim(0, 2250)
ax.set_xlim(-1000, 1000)
X = [state.state_vector[0] for state in sensor_platform]
Y = [state.state_vector[2] for state in sensor_platform]
artists.extend(ax.plot(X, Y, color='b'))
for detection in sim.detections:
if isinstance(detection.measurement_model, CartesianToElevationBearingRangeRate):
x, y = detection.measurement_model.inverse_function(detection)[[0, 2]]
color = 'y'
else:
r = 10000000
# extract the platform rotation offsets
_, el_offset, az_offset = sensor_platform.orientation
# obtain measurement angles and map to cartesian
e, a = detection.state_vector
x, y, _ = sphere2cart(r, a + az_offset, e + el_offset)
color = 'g'
X = [sensor_platform.state_vector[0], x]
Y = [sensor_platform.state_vector[2], y]
artists.extend(ax.plot(X, Y, color=color))
X = [state.state_vector[0] for state in target]
Y = [state.state_vector[2] for state in target]
artists.extend(ax.plot(X, Y, color='r'))
for track in ctracks:
X = [state.state_vector[0] for state in track]
Y = [state.state_vector[2] for state in track]
artists.extend(ax.plot(X, Y, color='k'))
frames.append(artists)
animation.ArtistAnimation(fig, frames)
# %%
# To increase your confidence with simulated platform targets it would be good practice to modify the target to fly
# pre-defined shapes, a race track oval for example. You could also experiment with different sensor performance levels
# in order to see at what point the tracker is no longer able to generate a reasonable estimate of the target location.
# %%
# Key points
# ----------
# 1. Platforms, static or moving, can be used as targets for sensor platforms.
# 2. Simulations can be built with only known platform behaviours when you want to test specific scenarios.
# 3. A tracker can be configured to exploit all sensor data created in a simulation.
| 44.452685
| 120
| 0.693976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9,924
| 0.570968
|
79093ae44bacb9494b8349f6098239d9b14a8d37
| 567
|
py
|
Python
|
Glyph-Builders/lowercase_from_upper.py
|
m4rc1e/mf-glyphs-scripts
|
c5ed026e5b72a886f1e574f85659cdcae041e66a
|
[
"MIT"
] | 27
|
2015-09-01T00:19:34.000Z
|
2021-12-05T01:59:01.000Z
|
Glyph-Builders/lowercase_from_upper.py
|
m4rc1e/mf-glyphs-scripts
|
c5ed026e5b72a886f1e574f85659cdcae041e66a
|
[
"MIT"
] | 26
|
2016-01-03T09:31:39.000Z
|
2018-06-01T18:05:58.000Z
|
Glyph-Builders/lowercase_from_upper.py
|
m4rc1e/mf-glyphs-scripts
|
c5ed026e5b72a886f1e574f85659cdcae041e66a
|
[
"MIT"
] | 7
|
2016-01-03T07:09:04.000Z
|
2018-04-06T00:24:14.000Z
|
#MenuTitle: Generate lowercase from uppercase
"""
Generate lowercase a-z from uppercase A-Z
TODO (M Foley) Generate all lowercase glyphs, not just a-z
"""
font = Glyphs.font
glyphs = list('abcdefghijklmnopqrstuvwxyz')
masters = font.masters
for glyph_name in glyphs:
glyph = GSGlyph(glyph_name)
glyph.updateGlyphInfo()
font.glyphs.append(glyph)
for idx,layer in enumerate(masters):
comp_name = glyph_name.upper()
component = GSComponent(comp_name, (0,0))
glyph.layers[idx].components.append(component)
Glyphs.redraw()
| 24.652174
| 58
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 182
| 0.320988
|
7909cb31dce0f5d0d244a16c56e9e7a864d3c124
| 2,372
|
py
|
Python
|
src/gui/SubVision.py
|
bochkovoi/AHP
|
b51dc598f8f7a65a2ade039d887dccfa6d070f1e
|
[
"MIT"
] | null | null | null |
src/gui/SubVision.py
|
bochkovoi/AHP
|
b51dc598f8f7a65a2ade039d887dccfa6d070f1e
|
[
"MIT"
] | null | null | null |
src/gui/SubVision.py
|
bochkovoi/AHP
|
b51dc598f8f7a65a2ade039d887dccfa6d070f1e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets, QtGui, QtCore
import sys, os.path as op
path1 = op.join( op.abspath(op.dirname(__file__)), '..', 'Structure')
path2 = op.join( op.abspath(op.dirname(__file__)), '..')
sys.path.append(path1)
sys.path.append(path2)
from Structure import *
from VisObject import *
class SubVision( QtWidgets.QWidget ):
""" Базовый класс-окно для показа подчиненных объектов """
def __init__( self, main_object, is_change=True, parent=None ):
super().__init__( parent=parent )
#Устанавливаем главный объект
self.__obj = main_object
#Устанавливаем параметр возможности изменения элементов (по умолчанию - Да)
self.is_change = is_change
self.initUI()
def initUI( self ):
''' Инициализируем содержимое окна '''
#Добавляем окно данных и устанавливаем в него подчиненные объекты
self.sub_objs = QtWidgets.QListWidget( )
for obj in self.__obj.sub_objects:
#Делаем ячейку
a = QtWidgets.QListWidgetItem()
#Устанавливаем в ней подчиненный базовому объект
a.sub_obj = obj
#Устанавливаем в ней текст-имя объекта подчиненного объекта
a.setText( obj.name )
#Добавляем в список
self.sub_objs.addItem( a )
#Объявляем форму и добавляем в нее список подчиненных объектов
self.form = QtWidgets.QFormLayout()
self.form.addRow(self.sub_objs)
self.setLayout(self.form)
#Соединяем двойной щелчок с методом
self.sub_objs.itemDoubleClicked.connect( self.isDoubleClicked )
def isDoubleClicked( self, obj ):
#Если окно возможно изменить, вызываем окно изменения, иначе - окно просмотра
if self.is_change:
sub_window = ChangeVisObject( obj.sub_obj, parent=self )
else:
sub_window = SimpleVisObject( obj.sub_obj, parent=self )
sub_window.setWindowTitle( "Редактирование объекта: " + obj.sub_obj.name )
#Делаем это или родительское окно неактивным
if self.parent() is None:
self.setEnabled( False )
else:
self.parent().setEnabled( False )
#Делаем дочернее окно активным и показываем его
sub_window.setEnabled( True )
sub_window.show()
| 38.885246
| 85
| 0.643339
| 2,615
| 0.884941
| 0
| 0
| 0
| 0
| 0
| 0
| 1,343
| 0.454484
|
790a31602a2e6231958a1ed23fbe61a5ef5fd6fa
| 23
|
py
|
Python
|
examples/ndfd/ndfd.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 123
|
2015-01-12T06:43:22.000Z
|
2022-03-20T18:06:46.000Z
|
examples/ndfd/ndfd.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 103
|
2015-01-08T18:35:57.000Z
|
2022-01-18T01:44:14.000Z
|
examples/ndfd/ndfd.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 54
|
2015-02-15T17:12:00.000Z
|
2022-03-07T23:02:32.000Z
|
from raw.ndfd import *
| 11.5
| 22
| 0.73913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
790a4f9b1ca5315576470030e7218150601d0818
| 56
|
py
|
Python
|
pandoc_mustache/__init__.py
|
copart/pandoc-mustache
|
f6ace29cd0c8d6b4d8f182eedcf36ad38a2412fa
|
[
"CC0-1.0"
] | 43
|
2017-12-27T05:57:00.000Z
|
2022-03-18T10:07:28.000Z
|
pandoc_mustache/__init__.py
|
copart/pandoc-mustache
|
f6ace29cd0c8d6b4d8f182eedcf36ad38a2412fa
|
[
"CC0-1.0"
] | 10
|
2018-02-07T11:20:37.000Z
|
2021-04-22T21:44:19.000Z
|
pandoc_mustache/__init__.py
|
copart/pandoc-mustache
|
f6ace29cd0c8d6b4d8f182eedcf36ad38a2412fa
|
[
"CC0-1.0"
] | 8
|
2018-11-05T13:10:35.000Z
|
2021-08-30T18:14:02.000Z
|
from .version import __version__
import pandoc_mustache
| 18.666667
| 32
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
790a863e1b7c7976c78fdf15265431950cd90024
| 5,163
|
py
|
Python
|
espnet2/gan_tts/espnet_model.py
|
actboy/espnet
|
c0ca15e9da6e89ff6df5fe70ed08654deeca2ac0
|
[
"Apache-2.0"
] | null | null | null |
espnet2/gan_tts/espnet_model.py
|
actboy/espnet
|
c0ca15e9da6e89ff6df5fe70ed08654deeca2ac0
|
[
"Apache-2.0"
] | 1
|
2021-08-11T08:35:36.000Z
|
2021-08-13T07:12:47.000Z
|
espnet2/gan_tts/espnet_model.py
|
shirayu/espnet
|
66f0f8382b0e1195bed7c280c29711f8436b3db4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based TTS ESPnet model."""
from contextlib import contextmanager
from distutils.version import LooseVersion
from typing import Any
from typing import Dict
from typing import Optional
import torch
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.train.abs_gan_espnet_model import AbsGANESPnetModel
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch < 1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class ESPnetGANTTSModel(AbsGANESPnetModel):
"""GAN-based TTS ESPnet model."""
def __init__(
self,
feats_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
tts: AbsGANTTS,
):
"""Initialize ESPnetGANTTSModel module."""
assert check_argument_types()
super().__init__()
self.feats_extract = feats_extract
self.normalize = normalize
self.tts = tts
assert hasattr(
tts, "generator"
), "generator module must be resistered as tts.generator"
assert hasattr(
tts, "discriminator"
), "discriminator module must be resistered as tts.discriminator"
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
) -> Dict[str, Any]:
"""Return generator or discriminator loss with dict format.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
with autocast(False):
# Extract features
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
# Normalize
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Make batch for tts inputs
batch = {}
batch.update(text=text, text_lengths=text_lengths)
batch.update(forward_generator=forward_generator)
# Update kwargs for additional auxiliary inputs
if feats is not None:
batch.update(feats=feats, feats_lengths=feats_lengths)
if self.tts.require_raw_speech:
batch.update(speech=speech, speech_lengths=speech_lengths)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
return self.tts(**batch)
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
"""Calculate features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker index tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
"""
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
feats_dict = {}
if feats is not None:
feats_dict.update(feats=feats, feats_lengths=feats_lengths)
return feats_dict
| 35.363014
| 81
| 0.627929
| 4,289
| 0.830719
| 49
| 0.009491
| 69
| 0.013364
| 0
| 0
| 1,996
| 0.386597
|
790b72b5977bc41bc1fa4f394888d33023e6e512
| 1,309
|
py
|
Python
|
array/bot/others/P_ex07.py
|
timkphd/examples
|
04c162ec890a1c9ba83498b275fbdc81a4704062
|
[
"Unlicense"
] | 5
|
2020-11-01T00:29:22.000Z
|
2022-01-24T19:09:47.000Z
|
array/bot/others/P_ex07.py
|
timkphd/examples
|
04c162ec890a1c9ba83498b275fbdc81a4704062
|
[
"Unlicense"
] | 1
|
2022-02-09T01:59:47.000Z
|
2022-02-09T01:59:47.000Z
|
array/bot/others/P_ex07.py
|
timkphd/examples
|
04c162ec890a1c9ba83498b275fbdc81a4704062
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
#
# This program shows how to use MPI_Alltoall. Each processor
# send/rec a different random number to/from other processors.
#
# numpy is required
import numpy
from numpy import *
# mpi4py module
from mpi4py import MPI
import sys
def myquit(mes):
MPI.Finalize()
print(mes)
sys.exit()
# Initialize MPI and print out hello
comm=MPI.COMM_WORLD
myid=comm.Get_rank()
numprocs=comm.Get_size()
print("hello from ",myid," of ",numprocs)
# We are going to send/recv a single value to/from
# each processor. Here we allocate arrays
s_vals=zeros(numprocs,"i")
r_vals=zeros(numprocs,"i")
# Fill the send arrays with random numbers
random.seed(myid)
for i in range(0, numprocs):
s_vals[i]=random.randint(1,10)
print("myid=",myid,"s_vals=",s_vals)
# Send/recv to/from all
comm.Alltoall(s_vals, r_vals)
print("myid=",myid,"r_vals=",r_vals)
MPI.Finalize()
# Note, the sent values and the recv values are
# like a transpose of each other
#
# mpiexec -n 4 ./P_ex07.py | grep s_v | sort
# myid= 0 s_vals= [6 1 4 4]
# myid= 1 s_vals= [6 9 6 1]
# myid= 2 s_vals= [9 9 7 3]
# myid= 3 s_vals= [9 4 9 9]
# mpiexec -n 4 ./P_ex07.py | grep r_v | sort
# myid= 0 r_vals= [6 6 9 9]
# myid= 1 r_vals= [1 9 9 4]
# myid= 2 r_vals= [4 6 7 9]
# myid= 3 r_vals= [4 1 3 9]
| 20.453125
| 63
| 0.675325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 815
| 0.622613
|
790c207725e1c54d9a32196cd02ceb7f9a4e7af7
| 18,376
|
py
|
Python
|
renderer/render_fmo.py
|
12564985/DeFMO
|
8ed9c2963678e2c59c7431ec8786302eea841572
|
[
"MIT"
] | 1
|
2022-03-14T12:46:38.000Z
|
2022-03-14T12:46:38.000Z
|
renderer/render_fmo.py
|
12564985/DeFMO
|
8ed9c2963678e2c59c7431ec8786302eea841572
|
[
"MIT"
] | null | null | null |
renderer/render_fmo.py
|
12564985/DeFMO
|
8ed9c2963678e2c59c7431ec8786302eea841572
|
[
"MIT"
] | null | null | null |
""" render_fmo.py renders obj file to rgb image with fmo model
Aviable function:
- clear_mash: delete all the mesh in the secene
- scene_setting_init: set scene configurations
- node_setting_init: set node configurations
- render: render rgb image for one obj file and one viewpoint
- render_obj: wrapper function for render() render
- init_all: a wrapper function, initialize all configurations
= set_image_path: reset defualt image output folder
author baiyu
modified by rozumden
"""
import sys
import os
import random
import pickle
import bpy
import glob
import numpy as np
from mathutils import Vector
from mathutils import Euler
import cv2
from PIL import Image
from skimage.draw import line_aa
from scipy import signal
from skimage.measure import regionprops
# import moviepy.editor as mpy
from array2gif import write_gif
abs_path = os.path.abspath(__file__)
sys.path.append(os.path.dirname(abs_path))
from render_helper import *
from settings import *
import settings
import pdb
def renderTraj(pars, H):
## Input: pars is either 2x2 (line) or 2x3 (parabola)
if pars.shape[1] == 2:
pars = np.concatenate( (pars, np.zeros((2,1))),1)
ns = 2
else:
ns = 5
ns = np.max([2, ns])
rangeint = np.linspace(0,1,ns)
for timeinst in range(rangeint.shape[0]-1):
ti0 = rangeint[timeinst]
ti1 = rangeint[timeinst+1]
start = pars[:,0] + pars[:,1]*ti0 + pars[:,2]*(ti0*ti0)
end = pars[:,0] + pars[:,1]*ti1 + pars[:,2]*(ti1*ti1)
start = np.round(start).astype(np.int32)
end = np.round(end).astype(np.int32)
rr, cc, val = line_aa(start[0], start[1], end[0], end[1])
valid = np.logical_and(np.logical_and(rr < H.shape[0], cc < H.shape[1]), np.logical_and(rr > 0, cc > 0))
rr = rr[valid]
cc = cc[valid]
val = val[valid]
if len(H.shape) > 2:
H[rr, cc, 0] = 0
H[rr, cc, 1] = 0
H[rr, cc, 2] = val
else:
H[rr, cc] = val
return H
def open_log(temp_folder = g_temp): # redirect output to log file
logfile = os.path.join(temp_folder,'blender_render.log')
try:
os.remove(logfile)
except OSError:
pass
open(logfile, 'a').close()
old = os.dup(1)
sys.stdout.flush()
os.close(1)
os.open(logfile, os.O_WRONLY)
return old
def close_log(old): # disable output redirection
os.close(1)
os.dup(old)
os.close(old)
def clear_mesh():
""" clear all meshes in the secene
"""
bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.data.objects:
if obj.type == 'MESH':
obj.select = True
bpy.ops.object.delete()
for block in bpy.data.meshes:
if block.users == 0:
bpy.data.meshes.remove(block)
for block in bpy.data.materials:
if block.users == 0:
bpy.data.materials.remove(block)
for block in bpy.data.textures:
if block.users == 0:
bpy.data.textures.remove(block)
for block in bpy.data.images:
if block.users == 0:
bpy.data.images.remove(block)
def scene_setting_init(use_gpu):
"""initialize blender setting configurations
"""
sce = bpy.context.scene.name
bpy.data.scenes[sce].render.engine = g_engine_type
bpy.data.scenes[sce].cycles.film_transparent = g_use_film_transparent
#output
bpy.data.scenes[sce].render.image_settings.color_mode = g_rgb_color_mode
bpy.data.scenes[sce].render.image_settings.color_depth = g_rgb_color_depth
bpy.data.scenes[sce].render.image_settings.file_format = g_rgb_file_format
bpy.data.scenes[sce].render.use_overwrite = g_depth_use_overwrite
bpy.data.scenes[sce].render.use_file_extension = g_depth_use_file_extension
if g_ambient_light:
world = bpy.data.worlds['World']
world.use_nodes = True
bg = world.node_tree.nodes['Background']
bg.inputs[0].default_value[:3] = g_bg_color
bg.inputs[1].default_value = 1.0
#dimensions
bpy.data.scenes[sce].render.resolution_x = g_resolution_x
bpy.data.scenes[sce].render.resolution_y = g_resolution_y
bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage
if use_gpu:
bpy.data.scenes[sce].render.engine = 'CYCLES' #only cycles engine can use gpu
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.context.user_preferences.addons['cycles'].preferences.devices[0].use = False
bpy.context.user_preferences.addons['cycles'].preferences.devices[1].use = True
ndev = len(bpy.context.user_preferences.addons['cycles'].preferences.devices)
print('Number of devices {}'.format(ndev))
for ki in range(2,ndev):
bpy.context.user_preferences.addons['cycles'].preferences.devices[ki].use = False
bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
# bpy.types.CyclesRenderSettings.device = 'GPU'
bpy.data.scenes[sce].cycles.device = 'GPU'
def node_setting_init():
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
for node in tree.nodes:
tree.nodes.remove(node)
render_layer_node = tree.nodes.new('CompositorNodeRLayers')
image_output_node = tree.nodes.new('CompositorNodeOutputFile')
image_output_node.base_path = g_syn_rgb_folder
links.new(render_layer_node.outputs[0], image_output_node.inputs[0])
# image_output_node = bpy.context.scene.node_tree.nodes[1]
image_output_node.base_path = g_temp
image_output_node.file_slots[0].path = 'image-######.png' # blender placeholder #
def render(obj_path, viewpoint, temp_folder):
"""render rbg image
render a object rgb image by a given camera viewpoint and
choose random image as background, only render one image
at a time.
Args:
obj_path: a string variable indicate the obj file path
viewpoint: a vp parameter(contains azimuth,elevation,tilt angles and distance)
"""
vp = viewpoint
cam_location = camera_location(vp.azimuth, vp.elevation, vp.distance)
cam_rot = camera_rot_XYZEuler(vp.azimuth, vp.elevation, vp.tilt)
cam_obj = bpy.data.objects['Camera']
cam_obj.location[0] = cam_location[0]
cam_obj.location[1] = cam_location[1]
cam_obj.location[2] = cam_location[2]
cam_obj.rotation_euler[0] = cam_rot[0]
cam_obj.rotation_euler[1] = cam_rot[1]
cam_obj.rotation_euler[2] = cam_rot[2]
if not os.path.exists(g_syn_rgb_folder):
os.mkdir(g_syn_rgb_folder)
obj = bpy.data.objects['model_normalized']
ni = g_fmo_steps
maxlen = 0.5
maxrot = 1.57/6
tri = 0
# rot_base = np.array([math.pi/2,0,0])
while tri <= g_max_trials:
do_repeat = False
tri += 1
if not g_apply_texture:
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
for tempi in range(len(bpy.data.objects[oi].data.materials)):
if bpy.data.objects[oi].data.materials[tempi].alpha != 1.0:
return True, True ## transparent object
los_start = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))
loc_step = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))/ni
rot_base = np.array((random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi)))
rot_step = np.array((random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot)))/ni
old = open_log(temp_folder)
for ki in [0, ni-1]+list(range(1,ni-1)):
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
bpy.data.objects[oi].location = los_start + loc_step*ki
bpy.data.objects[oi].rotation_euler = Euler(rot_base + (rot_step*ki))
bpy.context.scene.frame_set(ki + 1)
bpy.ops.render.render(write_still=True) #start rendering
if ki == 0 or ki == (ni-1):
Mt = cv2.imread(os.path.join(bpy.context.scene.node_tree.nodes[1].base_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)[:,:,-1] > 0
is_border = ((Mt[0,:].sum()+Mt[-1,:].sum()+Mt[:,0].sum()+Mt[:,-1].sum()) > 0) or Mt.sum()==0
if is_border:
if ki == 0:
close_log(old)
return False, True ## sample different starting viewpoint
else:
do_repeat = True ## just sample another motion direction
if do_repeat:
break
close_log(old)
if do_repeat == False:
break
if do_repeat: ## sample different starting viewpoint
return False, True
return False, False
def make_fmo(path, gt_path, video_path):
n_im = 5
background_images = os.listdir(g_background_image_path)
seq_name = random.choice(background_images)
seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.jpg"))
if len(seq_images) <= n_im:
seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.png"))
seq_images.sort()
bgri = random.randint(n_im,len(seq_images)-1)
bgr_path = seq_images[bgri]
B0 = cv2.imread(bgr_path)/255
B = cv2.resize(B0, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC)
B[B > 1] = 1
B[B < 0] = 0
FH = np.zeros(B.shape)
MH = np.zeros(B.shape[:2])
pars = np.array([[(B.shape[0]-1)/2-1, (B.shape[1]-1)/2-1], [1.0, 1.0]]).T
FM = np.zeros(B.shape[:2]+(4,g_fmo_steps,))
centroids = np.zeros((2,g_fmo_steps))
for ki in range(g_fmo_steps):
FM[:,:,:,ki] = cv2.imread(os.path.join(gt_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)/g_rgb_color_max
props = regionprops((FM[:,:,-1,ki]>0).astype(int))
if len(props) != 1:
return False
centroids[:,ki] = props[0].centroid
for ki in range(g_fmo_steps):
F = FM[:,:,:-1,ki]*FM[:,:,-1:,ki]
M = FM[:,:,-1,ki]
if ki < g_fmo_steps-1:
pars[:,1] = centroids[:,ki+1] - centroids[:,ki]
H = renderTraj(pars, np.zeros(B.shape[:2]))
H /= H.sum()*g_fmo_steps
for kk in range(3):
FH[:,:,kk] += signal.fftconvolve(H, F[:,:,kk], mode='same')
MH += signal.fftconvolve(H, M, mode='same')
Im = FH + (1 - MH)[:,:,np.newaxis]*B
Im[Im > 1] = 1
Im[Im < 0] = 0
if g_skip_low_contrast:
Diff = np.sum(np.abs(Im - B),2)
meanval = np.mean(Diff[MH > 0.05])
print("Contrast {}".format(meanval))
if meanval < 0.2:
return False
if g_skip_small:
sizeper = np.sum(MH > 0.01)/(MH.shape[0]*MH.shape[1])
print("Size percentage {}".format(sizeper))
if sizeper < 0.05:
return False
Im = Im[:,:,[2,1,0]]
Ims = Image.fromarray((Im * 255).astype(np.uint8))
Ims.save(path)
Ball = np.zeros(B.shape+(n_im,))
Ball[:,:,:,0] = B
for ki in range(1,n_im):
bgrki_path = seq_images[bgri-ki]
Ball[:,:,:,ki] = cv2.resize(cv2.imread(bgrki_path)/255, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC)
Ball[Ball > 1] = 1
Ball[Ball < 0] = 0
Bmed = np.median(Ball,3)
Image.fromarray((B[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr.png'))
Image.fromarray((Bmed[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr_med.png'))
# Ims.save(os.path.join(g_temp,"I.png"))
# Image.fromarray((FH * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"FH.png"))
# Image.fromarray((MH * 255).astype(np.uint8)).save(os.path.join(g_temp,"MH.png"))
# Image.fromarray((M * 255).astype(np.uint8)).save(os.path.join(g_temp,"M.png"))
# Image.fromarray((F * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"F.png"))
# Image.fromarray((B0 * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"B.png"))
if False:
Fwr = FM[:,:,:-1,:] * FM[:,:,-1:,:] + 1 * (1 - FM[:,:,-1:,:])
Fwr = (Fwr * 255).astype(np.uint8)
# Fwr[np.repeat(FM[:,:,-1:,:]==0,3,2)]=255
out = cv2.VideoWriter(video_path,cv2.VideoWriter_fourcc(*"MJPG"), 6, (F.shape[1],F.shape[0]),True)
for ki in range(g_fmo_steps):
out.write(Fwr[:,:,:,ki])
out.release()
return True
def render_obj(obj_path, path, objid, obj_name, temp_folder):
""" render one obj file by a given viewpoint list
a wrapper function for render()
Args:
obj_path: a string variable indicate the obj file path
"""
vps_path = random.sample(g_view_point_file, 1)[0]
vps = list(load_viewpoint(vps_path))
random.shuffle(vps)
save_path = os.path.join(path,"{}_{:04d}.png".format(obj_name,objid))
gt_path = os.path.join(path,"GT","{}_{:04d}".format(obj_name,objid))
video_path = os.path.join(path,"{}_{:04d}.avi".format(obj_name,objid))
if not os.path.exists(gt_path):
os.mkdir(gt_path)
image_output_node = bpy.context.scene.node_tree.nodes[1]
image_output_node.base_path = gt_path
for imt in bpy.data.images:
bpy.data.images.remove(imt)
if g_apply_texture:
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
bpy.context.scene.objects.active = bpy.data.objects[oi]
# pdb.set_trace()
# for m in bpy.data.materials:
# bpy.data.materials.remove(m)
# bpy.ops.object.material_slot_remove()
bpy.ops.object.editmode_toggle()
bpy.ops.uv.cube_project()
bpy.ops.object.editmode_toggle()
texture_images = os.listdir(g_texture_path)
texture = random.choice(texture_images)
tex_path = os.path.join(g_texture_path,texture)
# mat = bpy.data.materials.new(texture)
# mat.use_nodes = True
# nt = mat.node_tree
# nodes = nt.nodes
# links = nt.links
# # Image Texture
# textureNode = nodes.new("ShaderNodeTexImage")
# textureNode.image = bpy.data.images.load(tex_path)
# links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color'])
# mat.specular_intensity = 0
# bpy.data.objects[oi].active_material = mat
# print(bpy.data.objects[oi].active_material)
for mat in bpy.data.materials:
nodes = mat.node_tree.nodes
links = mat.node_tree.links
textureNode = nodes.new("ShaderNodeTexImage")
textureNode.image = bpy.data.images.load(tex_path)
links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color'])
# print(bpy.data.objects[oi].active_material)
tri = 0
while tri <= g_max_trials:
tri += 1
vp = random.sample(vps, 1)[0]
sample_different_object, sample_different_vp = render(obj_path, vp, temp_folder)
if sample_different_vp:
if sample_different_object:
print('Transparent object!')
return False
print('Rendering failed, repeating')
continue
success = make_fmo(save_path, gt_path, video_path)
if success:
return True
print('Making FMO failed, repeating')
return False
def init_all():
"""init everything we need for rendering
an image
"""
scene_setting_init(g_gpu_render_enable)
node_setting_init()
cam_obj = bpy.data.objects['Camera']
cam_obj.rotation_mode = g_rotation_mode
if g_render_light:
bpy.data.objects['Lamp'].data.energy = 50
bpy.ops.object.lamp_add(type='SUN')
bpy.data.objects['Sun'].data.energy = 5
### YOU CAN WRITE YOUR OWN IMPLEMENTATION TO GENERATE DATA
init_all()
argv = sys.argv
argv = argv[argv.index("--") + 1:]
start_index = int(argv[0])
step_index = int(argv[1])
print('Start index {}, step index {}'.format(start_index, step_index))
temp_folder = g_syn_rgb_folder+g_render_objs[start_index]+'/'
for obj_name in g_render_objs[start_index:(start_index+step_index)]:
print("Processing object {}".format(obj_name))
obj_folder = os.path.join(g_syn_rgb_folder, obj_name)
if not os.path.exists(obj_folder):
os.makedirs(obj_folder)
if not os.path.exists(os.path.join(obj_folder,"GT")):
os.mkdir(os.path.join(obj_folder,"GT"))
num = g_shapenet_categlory_pair[obj_name]
search_path = os.path.join(g_shapenet_path, num, '**','*.obj')
pathes = glob.glob(search_path, recursive=True)
random.shuffle(pathes)
objid = 1
tri = 0
while objid <= g_number_per_category:
print(" instance {}".format(objid))
clear_mesh()
path = random.sample(pathes, 1)[0]
old = open_log(temp_folder)
bpy.ops.import_scene.obj(filepath=path, axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl", use_split_groups=False, use_split_objects=True)
# bpy.ops.import_scene.obj(filepath=path)
close_log(old)
#combine_objects()
#scale_objects(0.5)
result = render_obj(path, obj_folder, objid, obj_name, temp_folder)
if result:
objid += 1
tri = 0
else:
print('Error! Rendering another object from the category!')
tri += 1
if tri > g_max_trials:
print('No object find in the category!!!!!!!!!')
break
| 39.181237
| 200
| 0.619286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,743
| 0.20369
|
790ca91d1e267c27a75b0c472c8aadefd871871f
| 11,385
|
py
|
Python
|
main.py
|
VV123/NLIDB_gradient
|
f42a6f383d2d4ac41c354cf55df2a21507577b02
|
[
"MIT"
] | null | null | null |
main.py
|
VV123/NLIDB_gradient
|
f42a6f383d2d4ac41c354cf55df2a21507577b02
|
[
"MIT"
] | 1
|
2021-01-11T03:42:43.000Z
|
2021-02-19T17:06:59.000Z
|
main.py
|
VV123/NLIDB_gradient
|
f42a6f383d2d4ac41c354cf55df2a21507577b02
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import sys
import argparse
import os
from tensorflow.python.platform import gfile
import numpy as np
import tensorflow as tf
from tensorflow.python.layers.core import Dense
from utils.data_manager import load_data, load_data_one
from collections import defaultdict
from argparse import ArgumentParser
from decode_helper import decode_one
import sys
reload(sys)
sys.setdefaultencoding('utf8')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tf_helper import train, evaluate, decode_data, decode_data_recover
from model1 import construct_graph
def init_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'--data_path',
default=os.path.dirname(os.path.abspath(__file__)) + '/data',
type=str,
help='Data path.')
arg_parser.add_argument(
'--load_data', default=False, type=bool, help='Load data.')
arg_parser.add_argument(
'--data',
choices=['wikisql', 'spider', 'overnight', 'overnight_set'],
default='wikisql',
help='data to train & test')
#arg_parser.add_argument('--tran_data', choices=['wikisql', 'spider', 'overnight'], default='overnight', help='data to transfer')
arg_parser.add_argument(
'--subset', choices=['all'], default='all', help='Subset of data.')
arg_parser.add_argument(
'--maxlen', default=60, type=int, help='Data record max length.')
arg_parser.add_argument(
'--annotation_path',
default=os.path.dirname(os.path.abspath(__file__)) +
'/data/DATA/wiki/',
type=str,
help='Data annotation path.')
arg_parser.add_argument(
'--mode',
choices=['train', 'infer', 'transfer','txt'],
default='infer',
help='Run mode')
#### Model configuration ####
arg_parser.add_argument(
'--cell',
choices=['gru'],
default='gru',
help='Type of cell used, currently only standard GRU cell is supported'
)
arg_parser.add_argument(
'--output_vocab_size',
default=20637,
#default=20452,
type=int,
help='Output vocabulary size.')
# Embedding sizes
arg_parser.add_argument(
'--embedding_dim',
default=300,
type=int,
help='Size of word embeddings')
#Hidden sizes
arg_parser.add_argument(
'--dim', default=400, type=int, help='Size of GRU hidden states')
arg_parser.add_argument(
'--hidden_size',
default=256,
type=int,
help='Size of LSTM hidden states')
arg_parser.add_argument(
'--no_copy',
default=False,
action='store_true',
help='Do not use copy mechanism')
#### Training ####
arg_parser.add_argument(
'--vocab', type=str, help='Path of the serialized vocabulary')
arg_parser.add_argument(
'--glove_embed_path',
default=None,
type=str,
help='Path to pretrained Glove mebedding')
arg_parser.add_argument(
'--batch_size', default=128, type=int, help='Batch size')
arg_parser.add_argument(
'--in_drop', default=0., type=float, help='In dropout rate')
arg_parser.add_argument(
'--out_drop', default=0., type=float, help='Out dropout rate')
# training details
arg_parser.add_argument(
'--valid_epoch_interval',
default=1,
type=int,
help='Perform validation every x epoch')
arg_parser.add_argument(
'--clip_grad', default=5., type=float, help='Clip gradients')
arg_parser.add_argument(
'--total_epochs', default=40, type=int, help='# of training epoches')
arg_parser.add_argument(
'--epochs', default=1, type=int, help='Record per x epoches')
arg_parser.add_argument(
'--lr', default=0.0001, type=float, help='Learning rate')
arg_parser.add_argument(
'--lr_decay',
default=0.5,
type=float,
help='decay learning rate if the validation performance drops')
#### decoding/validation/testing ####
arg_parser.add_argument(
'--load_model', default=False, type=bool, help='Whether to load model')
arg_parser.add_argument(
'--beam_width', default=5, type=int, help='Beam size for beam search')
arg_parser.add_argument(
'--decode_max_time_step',
default=100,
type=int,
help='Maximum number of time steps used '
'in decoding and sampling')
args = arg_parser.parse_args()
return args
def model(args, train_env, infer_env):
tf.reset_default_graph()
train_graph = tf.Graph()
infer_graph = tf.Graph()
with train_graph.as_default():
train_env.x = tf.placeholder(
tf.int32, shape=[None, args.maxlen], name='x')
train_env.y = tf.placeholder(tf.int32, (None, args.maxlen), name='y')
train_env.training = tf.placeholder_with_default(
False, (), name='train_mode')
train_env.train_op, train_env.loss, train_env.acc, sample_ids, logits = construct_graph(
"train", train_env, args)
train_env.saver = tf.train.Saver()
#[print(n.name) for n in tf.get_default_graph().as_graph_def().node if 'xxxxx' in n.name]
with infer_graph.as_default():
infer_env.x = tf.placeholder(
tf.int32, shape=[None, args.maxlen], name='x')
infer_env.y = tf.placeholder(tf.int32, (None, args.maxlen), name='y')
infer_env.training = tf.placeholder_with_default(
False, (), name='train_mode')
_, infer_env.loss, infer_env.acc, infer_env.pred_ids, _ = construct_graph(
"infer", infer_env, args)
infer_env.infer_saver = tf.train.Saver()
return train_graph, infer_graph
def inferrence(args):
args.load_model = True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
X_train, y_train = load_data(maxlen=args.maxlen,load=args.load_data, s='train')
X_test, y_test = load_data(maxlen=args.maxlen,load=args.load_data, s='test')
X_dev, y_dev = load_data(maxlen=args.maxlen,load=args.load_data, s='dev')
#X_train, y_train, X_test, y_test, X_dev, y_dev = load_data(args)
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('===========dev set============')
decode_data(sess, infer_env, X_dev, y_dev)
em = decode_data_recover(sess, infer_env, X_dev, y_dev, 'dev')
print('==========test set===========')
decode_data(sess, infer_env, X_test, y_test)
test_em = decode_data_recover(sess, infer_env, X_test, y_test,
'test')
return
def infer_one(args):
args.load_model = True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('===========decode============')
X_one = load_data_one(args.maxlen, 'qs.txt')
decode_one(sess, infer_env, X_one)
return
def train_model(args):
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
train_graph, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
args.load_model = False
X_train, y_train = load_data(maxlen=args.maxlen,load=args.load_data, s='train')
X_test, y_test = load_data(maxlen=args.maxlen,load=args.load_data, s='test')
X_dev, y_dev = load_data(maxlen=args.maxlen,load=args.load_data, s='dev')
#X_train, y_train, X_test, y_test, X_dev, y_dev = load_data(args)
model2load = 'model/{}'.format(args.subset)
max_em, global_test_em, best_base = -1, -1, -1
acc = 0
sess1 = tf.InteractiveSession(graph=train_graph)
sess1.run(tf.global_variables_initializer())
sess1.run(tf.local_variables_initializer())
sess2 = tf.InteractiveSession(graph=infer_graph)
sess2.run(tf.global_variables_initializer())
sess2.run(tf.global_variables_initializer())
for base in range(args.total_epochs / args.epochs):
print('\nIteration: %d (%d epochs)' % (base, args.epochs))
model2load = train(
sess1,
train_env,
X_train,
y_train,
epochs=args.epochs,
load=args.load_model,
name=args.subset,
batch_size=args.batch_size,
base=base,
model2Bload=model2load)
args.load_model = True
infer_env.infer_saver.restore(sess2, model2load)
print('===========dev set============')
dev_em = decode_data(sess2, infer_env, X_dev, y_dev)
dev_em = decode_data_recover(sess2, infer_env, X_dev, y_dev,
'dev')
print('==========test set===========')
test_em = decode_data(sess2, infer_env, X_test, y_test)
test_em = decode_data_recover(sess2, infer_env, X_test, y_test,
'test')
if dev_em > max_em:
max_em = dev_em
global_test_em = test_em
best_base = base
print('\n Saving model for best testing')
train_env.saver.save(sess1, 'best_model/{0}-{1}-{2:.2f}'.format(args.subset, base, max_em))
print('Max EM acc: %.4f during %d iteration.' % (max_em, best_base))
print('test EM acc: %.4f ' % global_test_em)
return
def transfer(args):
load_model = args.load_model if args.mode == 'train' else True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'overnight'
args.load_data = True
#X_tran, y_tran = load_data(args)
X_tran, y_tran = load_data(maxlen=args.maxlen,load=args.load_data, s='overnight')
args.data = 'overnight_set'
#tran_sets = load_data(args)
tran_sets = load_data(maxlen=args.maxlen,load=args.load_data, s='overnight_set')
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('========subset transfer set========')
subsets = ['basketball', 'calendar', 'housing', 'recipes', 'restaurants']
for subset, (X_tran_subset, y_tran_subset) in zip(subsets, tran_sets):
print('---------' + subset + '---------')
tran_em = decode_data(
sess,
infer_env,
X_tran_subset,
y_tran_subset,
filename=str(subset + '.txt'))
print('===========transfer set============')
tran_em = decode_data(sess, infer_env, X_tran, y_tran)
return
if __name__ == '__main__':
args = init_args()
print(args)
if args.mode == 'train':
print('\nTrain model.')
train_model(args)
elif args.mode == 'infer':
print('\nInference.')
inferrence(args)
elif args.mode == 'txt':
print('\nInference from txt.')
infer_one(args)
elif args.mode == 'transfer':
print('\nTransfer.')
transfer(args)
| 33.683432
| 133
| 0.623188
| 100
| 0.008783
| 0
| 0
| 0
| 0
| 0
| 0
| 2,630
| 0.231006
|
790e259abafc3b78efd22c4e49725337604761c5
| 55
|
py
|
Python
|
src/__init__.py
|
codespacedot/CodeSpaceAPI
|
22b457088aa592c4fb9111718810075d2643d9ca
|
[
"Apache-2.0"
] | 3
|
2021-07-05T17:28:14.000Z
|
2021-12-07T10:08:14.000Z
|
src/__init__.py
|
git-vish/CodeSpaceAPI
|
7ad4327e0eef3019098730358c4a23312bc85615
|
[
"Apache-2.0"
] | 2
|
2021-07-29T13:55:15.000Z
|
2021-07-31T16:49:03.000Z
|
src/__init__.py
|
git-vish/CodeSpaceAPI
|
7ad4327e0eef3019098730358c4a23312bc85615
|
[
"Apache-2.0"
] | 3
|
2021-07-01T16:32:20.000Z
|
2021-07-05T04:50:30.000Z
|
"""FastAPI Project for CodeSpace.
https://csdot.ml
"""
| 13.75
| 33
| 0.690909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 0.981818
|
790e708e4fd42df30662fd05e0fd27cb6d2b56ae
| 1,525
|
py
|
Python
|
gdsfactory/components/cdsem_straight.py
|
jorgepadilla19/gdsfactory
|
68e1c18257a75d4418279851baea417c8899a165
|
[
"MIT"
] | 42
|
2020-05-25T09:33:45.000Z
|
2022-03-29T03:41:19.000Z
|
gdsfactory/components/cdsem_straight.py
|
jorgepadilla19/gdsfactory
|
68e1c18257a75d4418279851baea417c8899a165
|
[
"MIT"
] | 133
|
2020-05-28T18:29:04.000Z
|
2022-03-31T22:21:42.000Z
|
gdsfactory/components/cdsem_straight.py
|
jorgepadilla19/gdsfactory
|
68e1c18257a75d4418279851baea417c8899a165
|
[
"MIT"
] | 17
|
2020-06-30T07:07:50.000Z
|
2022-03-17T15:45:27.000Z
|
"""CD SEM structures."""
from functools import partial
from typing import Optional, Tuple
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.straight import straight as straight_function
from gdsfactory.components.text_rectangular import text_rectangular
from gdsfactory.cross_section import strip
from gdsfactory.grid import grid
from gdsfactory.types import ComponentFactory, CrossSectionFactory
text_rectangular_mini = partial(text_rectangular, size=1)
LINE_LENGTH = 420.0
@cell
def cdsem_straight(
widths: Tuple[float, ...] = (0.4, 0.45, 0.5, 0.6, 0.8, 1.0),
length: float = LINE_LENGTH,
cross_section: CrossSectionFactory = strip,
text: Optional[ComponentFactory] = text_rectangular_mini,
spacing: float = 3,
) -> Component:
"""Returns straight waveguide lines width sweep.
Args:
widths: for the sweep
length: for the line
cross_section: for the lines
text: optional text for labels
spacing: edge to edge spacing
"""
lines = []
for width in widths:
cross_section = partial(cross_section, width=width)
line = straight_function(length=length, cross_section=cross_section)
if text:
line = line.copy()
t = line << text(str(int(width * 1e3)))
t.xmin = line.xmax + 5
t.y = 0
lines.append(line)
return grid(lines, spacing=(0, spacing))
if __name__ == "__main__":
c = cdsem_straight()
c.show()
| 28.773585
| 76
| 0.685902
| 0
| 0
| 0
| 0
| 924
| 0.605902
| 0
| 0
| 274
| 0.179672
|
79107ce8bb54a81242a6381a90d895c5d61ecf37
| 10,057
|
py
|
Python
|
Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py
|
hklion/WarpX
|
3c2d0ee2815ab1df21b9f78d899fe7b1a9651758
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py
|
hklion/WarpX
|
3c2d0ee2815ab1df21b9f78d899fe7b1a9651758
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py
|
hklion/WarpX
|
3c2d0ee2815ab1df21b9f78d899fe7b1a9651758
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2019-2022 Luca Fedeli, Yinjian Zhao, Hannah Klion
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
# This script tests the reduced particle diagnostics.
# The setup is a uniform plasma with electrons, protons and photons.
# Various particle and field quantities are written to file using the reduced diagnostics
# and compared with the corresponding quantities computed from the data in the plotfiles.
import os
import sys
import numpy as np
import openpmd_api as io
from scipy.constants import c
from scipy.constants import epsilon_0 as eps0
from scipy.constants import m_e, m_p
from scipy.constants import mu_0 as mu0
import yt
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
def do_analysis(single_precision = False):
fn = sys.argv[1]
ds = yt.load(fn)
ad = ds.all_data()
ad0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions)
opmd = io.Series('diags/openpmd/openpmd_%T.h5', io.Access.read_only)
opmd_i = opmd.iterations[200]
#--------------------------------------------------------------------------------------------------
# Part 1: get results from plotfiles (label '_yt')
#--------------------------------------------------------------------------------------------------
# Quantities computed from plotfiles
values_yt = dict()
domain_size = ds.domain_right_edge.value - ds.domain_left_edge.value
dx = domain_size / ds.domain_dimensions
# Electrons
x = ad['electrons', 'particle_position_x'].to_ndarray()
y = ad['electrons', 'particle_position_y'].to_ndarray()
z = ad['electrons', 'particle_position_z'].to_ndarray()
uz = ad['electrons', 'particle_momentum_z'].to_ndarray() / m_e / c
w = ad['electrons', 'particle_weight'].to_ndarray()
filt = uz < 0
x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)
zavg = np.zeros(ds.domain_dimensions)
uzavg = np.zeros(ds.domain_dimensions)
zuzavg = np.zeros(ds.domain_dimensions)
wavg = np.zeros(ds.domain_dimensions)
uzavg_filt = np.zeros(ds.domain_dimensions)
wavg_filt = np.zeros(ds.domain_dimensions)
for i_p in range(len(x)):
zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p]
uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p]
zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p]
uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p]
wavg_adj = np.where(wavg == 0, 1, wavg)
wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
values_yt['electrons: zavg'] = zavg / wavg_adj
values_yt['electrons: uzavg'] = uzavg / wavg_adj
values_yt['electrons: zuzavg'] = zuzavg / wavg_adj
values_yt['electrons: uzavg_filt'] = uzavg_filt / wavg_filt_adj
# protons
x = ad['protons', 'particle_position_x'].to_ndarray()
y = ad['protons', 'particle_position_y'].to_ndarray()
z = ad['protons', 'particle_position_z'].to_ndarray()
uz = ad['protons', 'particle_momentum_z'].to_ndarray() / m_p / c
w = ad['protons', 'particle_weight'].to_ndarray()
filt = uz < 0
x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)
zavg = np.zeros(ds.domain_dimensions)
uzavg = np.zeros(ds.domain_dimensions)
zuzavg = np.zeros(ds.domain_dimensions)
wavg = np.zeros(ds.domain_dimensions)
uzavg_filt = np.zeros(ds.domain_dimensions)
wavg_filt = np.zeros(ds.domain_dimensions)
for i_p in range(len(x)):
zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p]
uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p]
zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p]
uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p]
wavg_adj = np.where(wavg == 0, 1, wavg)
wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
values_yt['protons: zavg'] = zavg / wavg_adj
values_yt['protons: uzavg'] = uzavg / wavg_adj
values_yt['protons: zuzavg'] = zuzavg / wavg_adj
values_yt['protons: uzavg_filt'] = uzavg_filt / wavg_filt_adj
# Photons (momentum in units of m_e c)
x = ad['photons', 'particle_position_x'].to_ndarray()
y = ad['photons', 'particle_position_y'].to_ndarray()
z = ad['photons', 'particle_position_z'].to_ndarray()
uz = ad['photons', 'particle_momentum_z'].to_ndarray() / m_e / c
w = ad['photons', 'particle_weight'].to_ndarray()
filt = uz < 0
x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)
zavg = np.zeros(ds.domain_dimensions)
uzavg = np.zeros(ds.domain_dimensions)
zuzavg = np.zeros(ds.domain_dimensions)
wavg = np.zeros(ds.domain_dimensions)
uzavg_filt = np.zeros(ds.domain_dimensions)
wavg_filt = np.zeros(ds.domain_dimensions)
for i_p in range(len(x)):
zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p]
uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p]
zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p]
uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p]
wavg_adj = np.where(wavg == 0, 1, wavg)
wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
values_yt['photons: zavg'] = zavg / wavg_adj
values_yt['photons: uzavg'] = uzavg / wavg_adj
values_yt['photons: zuzavg'] = zuzavg / wavg_adj
values_yt['photons: uzavg_filt'] = uzavg_filt / wavg_filt_adj
values_rd = dict()
# Load reduced particle diagnostic data from plotfiles
values_rd['electrons: zavg'] = ad0[('boxlib','z_electrons')]
values_rd['protons: zavg'] = ad0[('boxlib','z_protons')]
values_rd['photons: zavg'] = ad0[('boxlib','z_photons')]
values_rd['electrons: uzavg'] = ad0[('boxlib','uz_electrons')]
values_rd['protons: uzavg'] = ad0[('boxlib','uz_protons')]
values_rd['photons: uzavg'] = ad0[('boxlib','uz_photons')]
values_rd['electrons: zuzavg'] = ad0[('boxlib','zuz_electrons')]
values_rd['protons: zuzavg'] = ad0[('boxlib','zuz_protons')]
values_rd['photons: zuzavg'] = ad0[('boxlib','zuz_photons')]
values_rd['electrons: uzavg_filt'] = ad0[('boxlib','uz_filt_electrons')]
values_rd['protons: uzavg_filt'] = ad0[('boxlib','uz_filt_protons')]
values_rd['photons: uzavg_filt'] = ad0[('boxlib','uz_filt_photons')]
values_opmd = dict()
# Load reduced particle diagnostic data from OPMD output
values_opmd['electrons: zavg'] = opmd_i.meshes['z_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['protons: zavg'] = opmd_i.meshes['z_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['photons: zavg'] = opmd_i.meshes['z_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['electrons: uzavg'] = opmd_i.meshes['uz_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['protons: uzavg'] = opmd_i.meshes['uz_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['photons: uzavg'] = opmd_i.meshes['uz_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['electrons: zuzavg'] = opmd_i.meshes['zuz_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['protons: zuzavg'] = opmd_i.meshes['zuz_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['photons: zuzavg'] = opmd_i.meshes['zuz_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['electrons: uzavg_filt'] = opmd_i.meshes['uz_filt_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['protons: uzavg_filt'] = opmd_i.meshes['uz_filt_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['photons: uzavg_filt'] = opmd_i.meshes['uz_filt_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()
opmd.flush()
del opmd
#--------------------------------------------------------------------------------------------------
# Part 3: compare values from plotfiles and diagnostics and print output
#--------------------------------------------------------------------------------------------------
error_plt = dict()
error_opmd = dict()
tolerance = 5e-3 if single_precision else 1e-12
# if single precision, increase tolerance from default value
check_tolerance = 5e-3 if single_precision else 1e-9
for k in values_yt.keys():
# check that the zeros line up, since we'll be ignoring them in the error calculation
assert(np.all((values_yt[k] == 0) == (values_rd[k] == 0)))
error_plt[k] = np.max(abs(values_yt[k] - values_rd[k])[values_yt[k] != 0] / abs(values_yt[k])[values_yt[k] != 0])
print(k, 'relative error plotfile = ', error_plt[k])
assert(error_plt[k] < tolerance)
assert(np.all((values_yt[k] == 0) == (values_opmd[k].T == 0)))
error_opmd[k] = np.max(abs(values_yt[k] - values_opmd[k].T)[values_yt[k] != 0] / abs(values_yt[k])[values_yt[k] != 0])
assert(error_opmd[k] < tolerance)
print(k, 'relative error openPMD = ', error_opmd[k])
test_name = os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, fn, rtol=check_tolerance)
| 46.995327
| 126
| 0.645521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,955
| 0.293825
|
7910b1ce3b116e87579add349dab0e8dadaa95e7
| 1,420
|
py
|
Python
|
predict.py
|
stonebegin/Promise12-3DUNet
|
d48d95ae7f2da98d068f84391dc547abd968981d
|
[
"MIT"
] | 2
|
2020-12-20T12:35:24.000Z
|
2021-01-04T03:21:37.000Z
|
predict.py
|
stonebegin/Promise12-3DUNet
|
d48d95ae7f2da98d068f84391dc547abd968981d
|
[
"MIT"
] | 1
|
2020-12-27T05:08:02.000Z
|
2020-12-27T08:08:50.000Z
|
predict.py
|
stonebegin/Promise12-3DUNet
|
d48d95ae7f2da98d068f84391dc547abd968981d
|
[
"MIT"
] | null | null | null |
import importlib
import os
from datasets.hdf5 import get_test_loaders
from unet3d import utils
from unet3d.config import load_config
from unet3d.model import get_model
logger = utils.get_logger('UNet3DPredictor')
def _get_predictor(model, loader, output_file, config):
predictor_config = config.get('predictor', {})
class_name = predictor_config.get('name', 'StandardPredictor')
m = importlib.import_module('unet3d.predictor')
predictor_class = getattr(m, class_name)
# model: UNet3D, loader: test_loader, output_file: data.h5, config: config.yaml
return predictor_class(model, loader, output_file, config, **predictor_config)
def main():
# Load configuration
config = load_config()
# Create the model
model = get_model(config)
# Load model state
model_path = config['model_path']
logger.info(f'Loading model from {model_path}...')
utils.load_checkpoint(model_path, model)
logger.info(f"Sending the model to '{config['device']}'")
model = model.to(config['device'])
logger.info('Loading HDF5 datasets...')
test_loader = get_test_loaders(config)['test']
for i, data_pair in enumerate(test_loader):
output_file = 'predict_' + str(i) + '.h5'
predictor = _get_predictor(model, data_pair, output_file, config)
predictor.predict()
if __name__ == '__main__':
main()
| 30.869565
| 84
| 0.687324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 368
| 0.259155
|
7910bb4a1911643dedff502020dff254dc351cc8
| 9,248
|
py
|
Python
|
gitScrabber/scrabTasks/file/languageDetector.py
|
Eyenseo/gitScrabber
|
e3f5ce1a7b034fa3e40a54577268228a3be2b141
|
[
"MIT"
] | null | null | null |
gitScrabber/scrabTasks/file/languageDetector.py
|
Eyenseo/gitScrabber
|
e3f5ce1a7b034fa3e40a54577268228a3be2b141
|
[
"MIT"
] | null | null | null |
gitScrabber/scrabTasks/file/languageDetector.py
|
Eyenseo/gitScrabber
|
e3f5ce1a7b034fa3e40a54577268228a3be2b141
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2017 Andreas Poppele
Copyright (c) 2017 Roland Jaeger
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from ..scrabTask import FileTask
import os
name = "LanguageDetector"
version = "1.1.1"
class LanguageDetector(FileTask):
cpp_extensions = ['.cpp', '.c++', '.cc',
'.cxx', '.c', '.h', '.hpp', '.hxx']
c_extensions = ['.c', '.h']
rust_extensions = ['.rs']
ruby_extensions = ['.rb']
java_extensions = ['.java']
go_extensions = ['.go']
php_extensions = ['.php', '.phtml', '.php3', '.php4', '.php5', '.php7',
'.phps']
js_extensions = ['.js']
objective_c_extensions = ['.h', '.m', '.mm', '.C']
swift_extensions = ['.swift']
c_sharp_extensions = ['.cs']
python_extensions = ['.py']
"""
Tries to detect the programming language of a library based on the file
extension
Example:
LanguageDetector:
main_language: C
languages:
- C
- C++
- Python
:param task_params: Parameter given explicitly for this task, for all
projects, defined in the task.yaml
:param global_args: Arguments that will be passed to all tasks. They
_might_ contain something that is useful for the
task, but the task has to check if it is _there_ as
these are user provided. If they are needed to work
that check should happen in the argHandler.
"""
def __init__(self, parameter, global_args):
super(LanguageDetector, self).__init__(name, version, parameter,
global_args)
# dictionary containing the common file extensions
# for each of the languages
self.__language_extensions = self.__get_language_extensions()
self.__report = self.__get_files_per_language()
def __get_language_extensions(self):
"""
:returns: A directory of the considered language extensions
"""
return {
'C++':
self.cpp_extensions,
'C':
self.c_extensions,
'Rust':
self.rust_extensions,
'Ruby':
self.ruby_extensions,
'Java':
self.java_extensions,
'Go':
self.go_extensions,
'PHP':
self.php_extensions,
'JavaScript':
self.js_extensions,
'Objective-C':
self.objective_c_extensions,
'Swift':
self.swift_extensions,
'C#':
self.c_sharp_extensions,
'Python':
self.python_extensions
}
def __get_files_per_language(self):
"""
:returns: A default directory of the considered languages, their
extensions and the amount of files that have that extension
(default=0)
"""
return {
'C++':
{extension: 0 for extension in self.cpp_extensions},
'C':
{extension: 0 for extension in self.c_extensions},
'Rust':
{extension: 0 for extension in self.rust_extensions},
'Ruby':
{extension: 0 for extension in self.ruby_extensions},
'Java':
{extension: 0 for extension in self.java_extensions},
'Go':
{extension: 0 for extension in self.go_extensions},
'PHP':
{extension: 0 for extension in self.php_extensions},
'JavaScript':
{extension: 0 for extension in self.js_extensions},
'Objective-C':
{extension: 0 for extension in self.objective_c_extensions},
'Swift':
{extension: 0 for extension in self.swift_extensions},
'C#':
{extension: 0 for extension in self.c_sharp_extensions},
'Python':
{extension: 0 for extension in self.python_extensions},
}
def __decide_h_extension(self):
"""
Decides which language 'owns' how many .h files
:returns: The report with divided header files
"""
report = self.__report
h_files = report['C']['.h']
if h_files > 0:
c_files = (sum(report['C'].values()) - h_files)
cpp_files = (sum(report['C++'].values())
- h_files
- report['C++']['.c'])
oc_files = (
sum(report['Objective-C'].values()) - h_files)
lang_fiels = c_files + cpp_files + oc_files
# Header only libraries are 'common' in C and C++
# the benefit of doubt goes to C
if lang_fiels == 0:
report['C']['.h'] = 1
report['C++']['.h'] = 0
report['Objective-C']['.h'] = 0
else:
report['C']['.h'] = (h_files *
c_files / lang_fiels)
report['C++']['.h'] = (h_files *
cpp_files / lang_fiels)
report['Objective-C']['.h'] = (h_files *
oc_files / lang_fiels)
return report
def __calculate_main_language(self, report):
"""
Calculates the main language (maximum of files extensions)
:param report: The report
:returns: The main language.
"""
max_files = 0
max_lang = None
for language in report:
lang_fiels = sum(report[language].values())
if max_files < lang_fiels:
max_lang = language
max_files = lang_fiels
return max_lang
def __calculate_used_languages(self, report):
"""
Calculates the used languages by throwing away the extension counts and
collapsing them to the language. Only languages that have at least one
file extension are kept and will appear in the report
:param report: The report
:returns: The used languages.
"""
languages = {}
for language in report:
total_files = sum(report[language].values())
if total_files > 0:
languages[language] = total_files
return sorted(languages, key=languages.get, reverse=True)
def scrab(self, project, filepath, file):
"""
Counts the files that have an extension of one of the languages
:param project: The project that the scrab task shall analyse
:param filepath: The filepath to the file that can be analysed
:param file: The file as string that can be analysed
:returns: Report that contains the scrabbed information of *this* file
- the extensions have either a count of 0 or 1
"""
filename, file_extension = os.path.splitext(filepath)
for language in self.__language_extensions:
if file_extension in self.__language_extensions[language]:
self.__report[language][file_extension] += 1
def report(self):
"""
Decides which headers files are (probable) from which language,
calculates the main language and removes redundant / unnecessary
detailed information from the report
:param report: The complete report this task created
:returns: Report that contains all scrabbed information
eg.:
LanguageDetector:
main_language: C
languages:
- C
- C++
- Python
"""
pre_report = self.__decide_h_extension()
main_language = self.__calculate_main_language(pre_report)
# write the result to the report
report = {}
report['main_language'] = main_language
report['languages'] = self.__calculate_used_languages(pre_report)
return report
| 35.706564
| 79
| 0.561635
| 8,031
| 0.868404
| 0
| 0
| 0
| 0
| 0
| 0
| 4,436
| 0.479671
|
7911642cb8be401271e397388edbb0e1b9d4ae27
| 4,667
|
py
|
Python
|
VAE/full_model/model_training.py
|
youngmg1995/NES-Music-Maker
|
aeda10a541cfd439cfa46c45e63411e0d98e41c1
|
[
"MIT"
] | 3
|
2020-06-26T22:02:35.000Z
|
2021-11-20T19:24:33.000Z
|
VAE/full_model/model_training.py
|
youngmg1995/NES-Music-Maker
|
aeda10a541cfd439cfa46c45e63411e0d98e41c1
|
[
"MIT"
] | null | null | null |
VAE/full_model/model_training.py
|
youngmg1995/NES-Music-Maker
|
aeda10a541cfd439cfa46c45e63411e0d98e41c1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 17:14:19 2020
@author: Mitchell
model_training.py
~~~~~~~~~~~~~~~~~
This file serves as a script for building and training our VAE model. To do
so we used the VAE and DataSequence classes defined in the file `VAE.py`, as
well as helper functions from the file `dataset_utils` for loading and parsing
our datasets.
The user has the the ability to specify several parameters that control the
loading of our data, the structure of our model, as well as the traininig plan
for our model. After training is complete the script also plots metrics tracked
during training and saves the final model.
"""
# Imports
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from dataset_utils import load_training, load_validation
from VAE import VAE, DataSequence
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os, time, json
### Load Data
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Parameters for shape of dataset (note these are also used for model def. and
# training.)
measures = 8
measure_len = 96
# training
training_foldername = '../../nesmdb24_seprsco/train/'
train_save_filename = 'transformed_dataset.json'
dataset , labels2int_map , int2labels_map = \
load_training(training_foldername, train_save_filename,
measures = measures, measure_len = measure_len)
# validation
validation_foldername = '../../nesmdb24_seprsco/valid/'
val_save_filename = 'transformed_val_dataset.json'
val_dataset = load_validation(validation_foldername,\
labels2int_map, val_save_filename,
measures = measures, measure_len = measure_len)
### Build Model
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### Model Parameters
latent_dim = 124
input_dims = [mapping.shape[0]-1 for mapping in int2labels_map]
dropout = .1
maxnorm = None
vae_b1 , vae_b2 = .02 , .1
# Build Model
model = VAE(latent_dim, input_dims, measures, measure_len, dropout,
maxnorm, vae_b1 , vae_b2)
model.build([tf.TensorShape([None, measures, measure_len, input_dims[i]])
for i in range(4)])
model.summary()
### Train Model
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Training Parameters
batch_size = 100
epochs = 10
# Cost Function
cost_function = model.vae_loss
# Learning_rate schedule
lr_0 = .001
decay_rate = .998
lr_decay = lambda t: lr_0 * decay_rate**t
lr_schedule = tf.keras.callbacks.LearningRateScheduler(lr_decay)
# Optimizer
optimizer = tf.keras.optimizers.Adam()
# Define callbacks
callbacks = [lr_schedule]
# Keras Sequences for Datasets (need to use since one-hot datasets too
# large for storing in memory)
training_seq = DataSequence(dataset, int2labels_map, batch_size)
validation_seq = DataSequence(val_dataset, int2labels_map, batch_size)
# Compile Model
model.compile(optimizer = optimizer,
loss = cost_function)
# Train model
tic = time.perf_counter()
history = model.fit_generator(generator = training_seq,
epochs = epochs)
toc = time.perf_counter()
print(f"Trained Model in {(toc - tic)/60:0.1f} minutes")
### Plot Training Metrics
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
training_loss = history.history['loss']
# Total Loss
plt.figure(1)
plt.plot(training_loss, 'b', label='Training')
plt.title('Loss vs Time')
plt.xlabel('Training Epoch')
plt.ylabel('Avg. Total Loss')
plt.legend()
plt.show()
### Save Model and History
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Save Model Weights
save_model = False
if save_model:
checkpoint_dir = '.\\training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "model_ckpt")
model.save_weights(checkpoint_prefix)
print('Model weights saved to files: '+checkpoint_prefix+'.*')
# Save Training History
save_history = False
if save_history:
checkpoint_dir = '.\\training_checkpoints'
history_filename = os.path.join(checkpoint_dir, "training_history.json")
with open(history_filename, 'w') as f:
json.dump({
key:[float(value) for value in history.history[key]]
for key in history.history
}, f)
print('Training history saved to file: '+ history_filename)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#----------------------------------END FILE------------------------------------
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| 31.748299
| 79
| 0.611099
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,270
| 0.486394
|
7911efa6a596e02ff81a8a1e7aa08e6a17b34751
| 721
|
py
|
Python
|
tests/validation/test_is_subnational1.py
|
StuartMacKay/ebird-api
|
14b5c777548416a58abec05e25cd4b9a8e22f210
|
[
"MIT"
] | 9
|
2020-05-16T20:26:33.000Z
|
2021-11-02T06:24:46.000Z
|
tests/validation/test_is_subnational1.py
|
StuartMacKay/ebird-api
|
14b5c777548416a58abec05e25cd4b9a8e22f210
|
[
"MIT"
] | 17
|
2019-06-22T09:41:22.000Z
|
2020-09-11T06:25:21.000Z
|
tests/validation/test_is_subnational1.py
|
ProjectBabbler/ebird-api
|
14b5c777548416a58abec05e25cd4b9a8e22f210
|
[
"MIT"
] | null | null | null |
import unittest
from ebird.api.validation import is_subnational1
class IsSubnational1Tests(unittest.TestCase):
"""Tests for the is_subnational1 validation function."""
def test_is_subnational1(self):
self.assertTrue(is_subnational1("US-NV"))
def test_invalid_code_is_not_subnational1(self):
self.assertFalse(is_subnational1("U"))
self.assertFalse(is_subnational1("US-"))
def test_country_is_not_subnational1(self):
self.assertFalse(is_subnational1("US"))
def test_subnational2_is_not_subnational1(self):
self.assertFalse(is_subnational1("US-NV-VMT"))
def test_location_is_not_subnational1(self):
self.assertFalse(is_subnational1("L123456"))
| 30.041667
| 60
| 0.744799
| 652
| 0.9043
| 0
| 0
| 0
| 0
| 0
| 0
| 95
| 0.131761
|
7912e94c22f794944d84a76c7ea337e0f1d42d27
| 83
|
py
|
Python
|
maths2.py
|
tavleensasan/Tav
|
7d9d041cf0ed13c2fe581dc8e40c93721ae4de73
|
[
"MIT"
] | null | null | null |
maths2.py
|
tavleensasan/Tav
|
7d9d041cf0ed13c2fe581dc8e40c93721ae4de73
|
[
"MIT"
] | null | null | null |
maths2.py
|
tavleensasan/Tav
|
7d9d041cf0ed13c2fe581dc8e40c93721ae4de73
|
[
"MIT"
] | null | null | null |
def multiple(first,second):
return first * second
def add(x,y):
return x+y
| 16.6
| 27
| 0.662651
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
79135bcae4aa65725d47cfe68fe799e301d340b1
| 7,172
|
py
|
Python
|
backend/kale/tests/assets/kfp_dsl/simple_data_passing.py
|
brness/kale
|
d90310dbebc765c68915df0cf832a7a5d1ec1551
|
[
"Apache-2.0"
] | 502
|
2019-07-18T16:19:16.000Z
|
2022-03-30T19:45:31.000Z
|
backend/kale/tests/assets/kfp_dsl/simple_data_passing.py
|
brness/kale
|
d90310dbebc765c68915df0cf832a7a5d1ec1551
|
[
"Apache-2.0"
] | 189
|
2019-09-22T10:54:02.000Z
|
2022-03-28T13:46:31.000Z
|
backend/kale/tests/assets/kfp_dsl/simple_data_passing.py
|
brness/kale
|
d90310dbebc765c68915df0cf832a7a5d1ec1551
|
[
"Apache-2.0"
] | 111
|
2019-09-25T20:28:47.000Z
|
2022-03-24T01:31:46.000Z
|
import json
import kfp.dsl as _kfp_dsl
import kfp.components as _kfp_components
from collections import OrderedDict
from kubernetes import client as k8s_client
def step1():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal([], ['_b', '_a'], _kale_pipeline_parameters, "/marshal")
def step1():
a = 1
b = 2
return a, b
step1()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
def step2():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.common.runutils import ttl as _kale_ttl
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_ttl(5)
@_kale_marshal(['_b', '_a'], ['_c'], _kale_pipeline_parameters, "/marshal")
def step2(a, b):
c = a + b
print(c)
return c
step2()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
def step3():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal(['_a', '_c'], [], _kale_pipeline_parameters, "/marshal")
def step3(a, c):
d = c + a
print(d)
step3()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
_kale_step1_op = _kfp_components.func_to_container_op(step1)
_kale_step2_op = _kfp_components.func_to_container_op(step2)
_kale_step3_op = _kfp_components.func_to_container_op(step3)
@_kfp_dsl.pipeline(
name='test',
description=''
)
def auto_generated_pipeline():
_kale_pvolumes_dict = OrderedDict()
_kale_volume_step_names = []
_kale_volume_name_parameters = []
_kale_marshal_vop = _kfp_dsl.VolumeOp(
name="kale-marshal-volume",
resource_name="kale-marshal-pvc",
modes=['ReadWriteMany'],
size="1Gi"
)
_kale_volume_step_names.append(_kale_marshal_vop.name)
_kale_volume_name_parameters.append(
_kale_marshal_vop.outputs["name"].full_name)
_kale_pvolumes_dict['/marshal'] = _kale_marshal_vop.volume
_kale_volume_step_names.sort()
_kale_volume_name_parameters.sort()
_kale_step1_task = _kale_step1_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after()
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step1_task.add_pod_label(_kale_k, _kale_v)
_kale_step_limits = {'amd/gpu': '1'}
for _kale_k, _kale_v in _kale_step_limits.items():
_kale_step1_task.container.add_resource_limit(_kale_k, _kale_v)
_kale_step1_task.container.working_dir = "/test"
_kale_step1_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step1_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step1_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step1_task.dependent_names +
_kale_volume_step_names)
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step2_task = _kale_step2_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step1_task)
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step2_task.add_pod_label(_kale_k, _kale_v)
_kale_step2_task.set_retry_strategy(
num_retries=5,
retry_policy="Always",
backoff_duration="20",
backoff_factor=2,
backoff_max_duration=None)
_kale_step2_task.container.working_dir = "/test"
_kale_step2_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step2_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step2_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step2_task.dependent_names +
_kale_volume_step_names)
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step3_task = _kale_step3_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step2_task, _kale_step1_task)
_kale_step_annotations = {'step3-annotation': 'test'}
for _kale_k, _kale_v in _kale_step_annotations.items():
_kale_step3_task.add_pod_annotation(_kale_k, _kale_v)
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step3_task.add_pod_label(_kale_k, _kale_v)
_kale_step3_task.container.working_dir = "/test"
_kale_step3_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step3_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step3_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step3_task.dependent_names +
_kale_volume_step_names)
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
if __name__ == "__main__":
pipeline_func = auto_generated_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.tar.gz'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
# Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment('test')
# Submit a pipeline run
from kale.common import kfputils
pipeline_id, version_id = kfputils.upload_pipeline(
pipeline_filename, "test")
run_result = kfputils.run_pipeline(
experiment_name=experiment.name, pipeline_id=pipeline_id, version_id=version_id)
| 34.480769
| 88
| 0.725739
| 0
| 0
| 0
| 0
| 4,808
| 0.670385
| 0
| 0
| 847
| 0.118098
|
791468fb9834f8a61e661025dfae37ea17e85be7
| 135
|
py
|
Python
|
note/urls.py
|
StevenYwch/CloudNote
|
c36efba53d83a040f4c9cff861d0df28d9db8f1b
|
[
"MIT"
] | null | null | null |
note/urls.py
|
StevenYwch/CloudNote
|
c36efba53d83a040f4c9cff861d0df28d9db8f1b
|
[
"MIT"
] | null | null | null |
note/urls.py
|
StevenYwch/CloudNote
|
c36efba53d83a040f4c9cff861d0df28d9db8f1b
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('list', views.list_view),
path('add', views.add_view),
]
| 19.285714
| 34
| 0.681481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.081481
|
791572847749537988baaf3cd53a31420b81f7a2
| 3,158
|
py
|
Python
|
roombapy/discovery.py
|
Erelen-Laiquendi/roombapy
|
104908ec040ebb72e16d3763741565eacc585801
|
[
"MIT"
] | 17
|
2018-01-27T19:53:06.000Z
|
2022-03-16T07:29:13.000Z
|
roombapy/discovery.py
|
Erelen-Laiquendi/roombapy
|
104908ec040ebb72e16d3763741565eacc585801
|
[
"MIT"
] | 78
|
2017-09-03T17:37:03.000Z
|
2022-03-30T10:41:55.000Z
|
roombapy/discovery.py
|
bdraco/Roomba980-Python
|
d25583a7b8cd6e65148caeebc6849e73dff645da
|
[
"MIT"
] | 25
|
2017-09-03T13:43:21.000Z
|
2022-03-19T23:41:51.000Z
|
import json
import logging
import socket
from roombapy.roomba_info import RoombaInfo
class RoombaDiscovery:
udp_bind_address = ""
udp_address = "<broadcast>"
udp_port = 5678
roomba_message = "irobotmcs"
amount_of_broadcasted_messages = 5
server_socket = None
log = None
def __init__(self):
"""Init discovery."""
self.server_socket = _get_socket()
self.log = logging.getLogger(__name__)
def find(self, ip=None):
if ip is not None:
return self.get(ip)
return self.get_all()
def get_all(self):
self._start_server()
self._broadcast_message(self.amount_of_broadcasted_messages)
robots = set()
while True:
response = self._get_response()
if response:
robots.add(response)
else:
break
return robots
def get(self, ip):
self._start_server()
self._send_message(ip)
return self._get_response(ip)
def _get_response(self, ip=None):
try:
while True:
raw_response, addr = self.server_socket.recvfrom(1024)
if ip is not None and addr[0] != ip:
continue
self.log.debug(
"Received response: %s, address: %s", raw_response, addr
)
data = raw_response.decode()
if self._is_from_irobot(data):
return _decode_data(data)
except socket.timeout:
self.log.info("Socket timeout")
return None
def _is_from_irobot(self, data):
if data == self.roomba_message:
return False
json_response = json.loads(data)
if (
"Roomba" in json_response["hostname"]
or "iRobot" in json_response["hostname"]
):
return True
return False
def _broadcast_message(self, amount):
for i in range(amount):
self.server_socket.sendto(
self.roomba_message.encode(), (self.udp_address, self.udp_port)
)
self.log.debug("Broadcast message sent: " + str(i))
def _send_message(self, udp_address):
self.server_socket.sendto(
self.roomba_message.encode(), (udp_address, self.udp_port)
)
self.log.debug("Message sent")
def _start_server(self):
self.server_socket.bind((self.udp_bind_address, self.udp_port))
self.log.debug("Socket server started, port %s", self.udp_port)
def _decode_data(data):
json_response = json.loads(data)
return RoombaInfo(
hostname=json_response["hostname"],
robot_name=json_response["robotname"],
ip=json_response["ip"],
mac=json_response["mac"],
firmware=json_response["sw"],
sku=json_response["sku"],
capabilities=json_response["cap"],
)
def _get_socket():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
server_socket.settimeout(5)
return server_socket
| 28.972477
| 79
| 0.594997
| 2,486
| 0.787207
| 0
| 0
| 0
| 0
| 0
| 0
| 251
| 0.079481
|
7915bd6303c3c35d054564976537a39f4bb990be
| 76
|
py
|
Python
|
nonbonded/cli/projects/__init__.py
|
SimonBoothroyd/nonbonded
|
3efbb7d943d936b47248975f9ad0d8a006ea8684
|
[
"MIT"
] | 5
|
2020-05-11T18:25:00.000Z
|
2022-01-27T10:55:09.000Z
|
nonbonded/cli/projects/__init__.py
|
SimonBoothroyd/nonbonded
|
3efbb7d943d936b47248975f9ad0d8a006ea8684
|
[
"MIT"
] | 88
|
2020-06-02T14:40:05.000Z
|
2022-03-02T09:20:39.000Z
|
nonbonded/cli/projects/__init__.py
|
SimonBoothroyd/nonbonded
|
3efbb7d943d936b47248975f9ad0d8a006ea8684
|
[
"MIT"
] | null | null | null |
# from nonbonded.cli.project.project import project
#
# __all__ = [project]
| 19
| 51
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 73
| 0.960526
|
791688dbd138ffb5132f957ed4ac7f6e3567bcff
| 30,666
|
py
|
Python
|
pmaf/biome/essentials/_taxonomy.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | 1
|
2021-07-02T06:24:17.000Z
|
2021-07-02T06:24:17.000Z
|
pmaf/biome/essentials/_taxonomy.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | 1
|
2021-06-28T12:02:46.000Z
|
2021-06-28T12:02:46.000Z
|
pmaf/biome/essentials/_taxonomy.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
| 37.081016
| 133
| 0.559903
| 29,908
| 0.975282
| 0
| 0
| 3,909
| 0.12747
| 0
| 0
| 8,318
| 0.271245
|
7916e1c58cd3262cc6b3f5abd2ae3b7c7603279e
| 9,607
|
py
|
Python
|
users_django/users/tests/test_views.py
|
r-o-main/users-exercise
|
ecd6e33308140f72cb6c446e0e7e93f327b57a97
|
[
"MIT"
] | null | null | null |
users_django/users/tests/test_views.py
|
r-o-main/users-exercise
|
ecd6e33308140f72cb6c446e0e7e93f327b57a97
|
[
"MIT"
] | null | null | null |
users_django/users/tests/test_views.py
|
r-o-main/users-exercise
|
ecd6e33308140f72cb6c446e0e7e93f327b57a97
|
[
"MIT"
] | null | null | null |
from rest_framework.test import APIRequestFactory
from rest_framework import status
from django.test import TestCase
from django.urls import reverse
from ..models import User
from ..serializer import UserSerializer
from ..views import UserViewSet
import ipapi
class UsersApiRootTestCase(TestCase):
def test_api_root_should_reply_200(self):
""" GET /api/v1/ should return an hyperlink to the users view and return a successful status 200 OK.
"""
request = APIRequestFactory().get("/api/v1/")
user_list_view = UserViewSet.as_view({"get": "list"})
response = user_list_view(request)
self.assertEqual(status.HTTP_200_OK, response.status_code)
class UsersApiTestCase(TestCase):
""" Factorize the tests setup to use a pool of existing users. """
def setUp(self):
self.factory = APIRequestFactory()
self.users = [
User.objects.create(
first_name="Riri", last_name="Duck", email="riri.duck@ricardo.ch", password="dummy"),
User.objects.create(
first_name="Fifi", last_name="Duck", email="fifi.duck@ricardo.ch", password="dummy"),
User.objects.create(
first_name="Loulou", last_name="Duck", email="loulou.duck@ricardo.ch", password="dummy")
]
class GetAllUsersTest(UsersApiTestCase):
""" Test GET /api/v1/users """
def test_list_all_users_should_retrieve_all_users_and_reply_200(self):
""" GET /api/v1/users should return all the users (or empty if no users found)
and return a successful status 200 OK.
"""
users = User.objects.all().order_by("id")
request = self.factory.get(reverse("v1:user-list"))
serializer = UserSerializer(users, many=True, context={'request': request})
user_list_view = UserViewSet.as_view({"get": "list"})
response = user_list_view(request)
self.assertEqual(len(self.users), len(response.data["results"]))
self.assertEqual(serializer.data, response.data["results"])
self.assertEqual(status.HTTP_200_OK, response.status_code)
class GetSingleUserTest(UsersApiTestCase):
""" Test GET /api/v1/users/:id """
def test_get_user_when_id_valid_should_retrieve_user_and_reply_200(self):
riri = User.objects.create(
first_name="Riri", last_name="Duck", email="riri.duck@ricardo.ch", password="dummy")
user = User.objects.get(pk=riri.pk)
request = self.factory.get(reverse("v1:user-detail", kwargs={"pk": riri.pk}))
serializer = UserSerializer(user, context={'request': request})
user_detail_view = UserViewSet.as_view({"get": "retrieve"})
response = user_detail_view(request, pk=riri.pk)
self.assertEqual(serializer.data, response.data)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_get_user_when_id_invalid_should_reply_404(self):
request = self.factory.get(reverse("v1:user-detail", kwargs={"pk": 100}))
user_detail_view = UserViewSet.as_view({"get": "retrieve"})
response = user_detail_view(request, pk=100)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
class CreateNewUserTest(UsersApiTestCase):
""" Test POST /api/v1/users
Override 'REMOTE_ADDR' to set IP address to Switzerland or another country for testing purpose.
"""
def test_post_user_when_from_Switzerland_and_data_valid_should_create_user_and_reply_201(self):
initial_users_count = len(self.users)
valid_data = {
"first_name": "Casper",
"last_name": "Canterville",
"email": "c@sper.com",
"password": "dummy",
}
request = self.factory.post(
reverse("v1:user-list"),
data=valid_data,
REMOTE_ADDR='2.16.92.0'
)
user_detail_view = UserViewSet.as_view({"post": "create"})
response = user_detail_view(request)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
new_users_count = User.objects.count()
self.assertEqual(initial_users_count+1, new_users_count)
def test_post_user_when_id_invalid_should_not_create_user_and_reply_400(self):
initial_users_count = len(self.users)
invalid_data = {
"first_name": "Casper",
"last_name": "Canterville",
"email": "",
"password": "dummy",
}
request = self.factory.post(
reverse("v1:user-list"),
data=invalid_data,
REMOTE_ADDR='2.16.92.0'
)
user_detail_view = UserViewSet.as_view({"post": "create"})
response = user_detail_view(request)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
users_count = User.objects.count()
self.assertEqual(initial_users_count, users_count)
def test_post_user_when_data_valid_but_email_already_used_should_not_create_user_and_reply_400(self):
initial_users_count = len(self.users)
valid_data_with_used_email = {
"first_name": "Casper",
"last_name": "Canterville",
"email": "riri.duck@ricardo.ch",
"password": "dummy",
}
request = self.factory.post(
reverse("v1:user-list"),
data=valid_data_with_used_email,
REMOTE_ADDR='2.16.92.0'
)
user_detail_view = UserViewSet.as_view({"post": "create"})
response = user_detail_view(request)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
new_users_count = User.objects.count()
self.assertEqual(initial_users_count, new_users_count)
def test_post_user_when_IP_not_in_Switzerland_should_not_create_user_and_reply_403(self):
initial_users_count = len(self.users)
valid_data = {
"first_name": "Casper",
"last_name": "Canterville",
"email": "c@sper.com",
"password": "dummy",
}
request = self.factory.post(
reverse("v1:user-list"),
data=valid_data,
REMOTE_ADDR='2.16.8.0' # Spain
)
user_detail_view = UserViewSet.as_view({"post": "create"})
response = user_detail_view(request)
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.assertTrue(len(response.data['detail']) > 0)
users_count = User.objects.count()
self.assertEqual(initial_users_count, users_count)
class UpdateSinglUserTest(UsersApiTestCase):
""" Test PUT|PATCH /api/v1/user/:id """
def test_patch_user_when_id_valid_should_patch_user_and_reply_200(self):
riri = User.objects.create(
first_name="Riri", last_name="Duck", email="riri.duck@ricardo.ch", password="dummy")
request = self.factory.patch(
reverse("v1:user-detail", kwargs={"pk": riri.pk}),
data={"email": "riri@ricardo.ch"}
)
user_detail_view = UserViewSet.as_view({"patch": "partial_update"})
response = user_detail_view(request, pk=riri.pk)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_patch_user_when_id_invalid_should_not_patch_user_and_reply_404(self):
riri = User.objects.create(
first_name="Riri", last_name="Duck", email="riri.duck@ricardo.ch", password="dummy")
request = self.factory.patch(
reverse("v1:user-detail", kwargs={"pk": 100}),
data={"email": "riri@ricardo.ch"}
)
user_detail_view = UserViewSet.as_view({"patch": "partial_update"})
response = user_detail_view(request, pk=100)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
def test_put_when_invalid_data_should_not_update_user_and_reply_400(self):
riri = User.objects.create(
first_name="Riri", last_name="Duck", email="riri.duck@ricardo.ch", password="dummy")
invalid_payload = {
"first_name": "",
"last_name": "Duck",
"email": "riri.duck@ricardo.ch"
}
request = self.factory.put(
reverse("v1:user-detail", kwargs={"pk": riri.pk}),
data=invalid_payload
)
user_detail_view = UserViewSet.as_view({"put": "update"})
response = user_detail_view(request, pk=riri.pk)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
class DeleteSinglePuppyTest(UsersApiTestCase):
""" Test DELETE /api/v1/user/:id """
def test_delete_user_when_id_valid_should_delete_user_and_reply_204(self):
initial_users_count = len(self.users)
user_to_delete = self.users[0]
request = self.factory.delete(reverse("v1:user-detail", kwargs={"pk": user_to_delete.pk}))
user_detail_view = UserViewSet.as_view({"delete": "destroy"})
response = user_detail_view(request, pk=user_to_delete.pk)
self.assertEqual(status.HTTP_204_NO_CONTENT, response.status_code)
new_users_count = User.objects.count()
self.assertEqual(initial_users_count-1, new_users_count)
def test_delete_user_when_id_invalid_should_reply_404(self):
request = self.factory.delete(reverse("v1:user-detail", kwargs={"pk": 100}))
user_detail_view = UserViewSet.as_view({"delete": "destroy"})
response = user_detail_view(request, pk=100)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
| 39.052846
| 108
| 0.65192
| 9,325
| 0.970646
| 0
| 0
| 0
| 0
| 0
| 0
| 1,835
| 0.191007
|
79181888e71b95f21231a74673bce1df5f5dad06
| 1,058
|
py
|
Python
|
jburt/mask.py
|
jbburt/jburt
|
7745491214ef2b665ca8d1fc526bc802a36985ff
|
[
"MIT"
] | null | null | null |
jburt/mask.py
|
jbburt/jburt
|
7745491214ef2b665ca8d1fc526bc802a36985ff
|
[
"MIT"
] | null | null | null |
jburt/mask.py
|
jbburt/jburt
|
7745491214ef2b665ca8d1fc526bc802a36985ff
|
[
"MIT"
] | null | null | null |
from typing import List
import numpy as np
def mask_nan(arrays: List[np.ndarray]) -> List[np.ndarray]:
"""
Drop indices from equal-sized arrays if the element at that index is NaN in
any of the input arrays.
Parameters
----------
arrays : List[np.ndarray]
list of ndarrays containing NaNs, to be masked
Returns
-------
List[np.ndarray]
masked arrays (free of NaNs)
Notes
-----
This function find the indices where one or more elements is NaN in one or
more of the input arrays, then drops those indices from all arrays.
For example:
>> a = np.array([0, 1, np.nan, 3])
>> b = np.array([np.nan, 5, np.nan, 7])
>> c = np.array([8, 9, 10, 11])
>> mask_nan([a, b, c])
[array([ 1., 3.]), array([ 5., 7.]), array([ 9, 11])]
"""
n = arrays[0].size
assert all(a.size == n for a in arrays[1:])
mask = np.array([False] * n)
for arr in arrays:
mask = np.logical_or(mask, np.isnan(arr))
return [arr[np.where(~mask)[0]] for arr in arrays]
| 27.128205
| 79
| 0.581285
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 715
| 0.675803
|
7918bd9392635ed706771c33b08bee283e79ec85
| 838
|
py
|
Python
|
ExpenseTracker/grocery/migrations/0004_auto_20200908_1918.py
|
lennyAiko/LifeExpenses
|
ec345228bca00742b0b08cf3fc294dba6574b515
|
[
"MIT"
] | null | null | null |
ExpenseTracker/grocery/migrations/0004_auto_20200908_1918.py
|
lennyAiko/LifeExpenses
|
ec345228bca00742b0b08cf3fc294dba6574b515
|
[
"MIT"
] | null | null | null |
ExpenseTracker/grocery/migrations/0004_auto_20200908_1918.py
|
lennyAiko/LifeExpenses
|
ec345228bca00742b0b08cf3fc294dba6574b515
|
[
"MIT"
] | 1
|
2020-09-01T15:38:19.000Z
|
2020-09-01T15:38:19.000Z
|
# Generated by Django 3.1.1 on 2020-09-08 18:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('grocery', '0003_auto_20200908_1417'),
]
operations = [
migrations.AlterField(
model_name='item',
name='list',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item', to='grocery.list'),
),
migrations.AlterField(
model_name='list',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='list', to=settings.AUTH_USER_MODEL),
),
]
| 31.037037
| 142
| 0.656325
| 679
| 0.810263
| 0
| 0
| 0
| 0
| 0
| 0
| 131
| 0.156325
|
7919878f4085d6d12cdcb153170df1fa3bde8e8d
| 1,035
|
py
|
Python
|
my_hello_world_app/web_api/router.py
|
gsjay980/data-science-IP
|
715550d1cbf67e552c0df533619460c0fee15b94
|
[
"MIT"
] | 5
|
2020-05-26T09:33:54.000Z
|
2021-07-01T02:42:30.000Z
|
my_hello_world_app/web_api/router.py
|
gsjay980/data-science-IP
|
715550d1cbf67e552c0df533619460c0fee15b94
|
[
"MIT"
] | 3
|
2019-12-26T17:34:24.000Z
|
2020-02-04T03:16:23.000Z
|
my_hello_world_app/web_api/router.py
|
gsjay980/data-science-IP
|
715550d1cbf67e552c0df533619460c0fee15b94
|
[
"MIT"
] | 2
|
2021-12-17T00:46:03.000Z
|
2022-02-26T11:04:55.000Z
|
from os import getenv
from typing import Optional, Dict
from flask import Flask
TestConfig = Optional[Dict[str, bool]]
def create_app(test_config: TestConfig = None) -> Flask:
""" App factory method to initialize the application with given configuration """
app: Flask = Flask(__name__)
if test_config is not None:
app.config.from_mapping(test_config)
@app.route("/")
def index() -> str: # pylint: disable=unused-variable
return "My Hello World App is working..."
@app.route("/version")
def version() -> str: # pylint: disable=unused-variable
"""
DOCKER_IMAGE_TAG is passed in the app from Dockerfile as ARG.
It should be setup in docker build task..
It is used in .gitlab-ci.yaml to pass the hash of the latest commit as docker image tag.
E.g. docker build --build-arg docker_image_tag="my-version" -t my-image-name:my-version .
"""
return getenv("DOCKER_IMAGE_TAG") or "DOCKER_IMAGE_TAG haven't been setup"
return app
| 32.34375
| 97
| 0.672464
| 0
| 0
| 0
| 0
| 629
| 0.607729
| 0
| 0
| 579
| 0.55942
|
791a179ef2265637a66974e7b35a3ad2c3c5a16a
| 10,666
|
py
|
Python
|
src/pythonModules/fourgp_rv/fourgp_rv/templates_resample.py
|
dcf21/4most-4gp
|
0421d76791315aa3ca8ff9e4bd2e37ad36c0141f
|
[
"MIT"
] | null | null | null |
src/pythonModules/fourgp_rv/fourgp_rv/templates_resample.py
|
dcf21/4most-4gp
|
0421d76791315aa3ca8ff9e4bd2e37ad36c0141f
|
[
"MIT"
] | null | null | null |
src/pythonModules/fourgp_rv/fourgp_rv/templates_resample.py
|
dcf21/4most-4gp
|
0421d76791315aa3ca8ff9e4bd2e37ad36c0141f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Code to take template spectra, used for RV fitting, and pass them through 4FS to resample them to 4MOST's resolution.
It then further resamples each arm onto a fixed logarithmic stride.
"""
import argparse
import hashlib
import logging
import numpy as np
import os
from os import path as os_path
from fourgp_fourfs import FourFS
from fourgp_degrade.resample import SpectrumResampler
from fourgp_degrade import SpectrumProperties
from fourgp_speclib import SpectrumLibrarySqlite
def command_line_interface(root_path):
"""
A simple command-line interface for running a tool to resample a library of template spectra onto fixed
logarithmic rasters representing each of the 4MOST arms.
We use the python argparse module to build the interface, and return the inputs supplied by the user.
:param root_path:
The root path of this 4GP installation; the directory where we can find 4FS.
:return:
An object containing the arguments supplied by the user.
"""
# Read input parameters
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('--templates-in',
required=False,
default='turbospec_rv_templates',
dest='templates_in',
help="Library of spectra to use as templates for RV code")
parser.add_argument('--workspace', dest='workspace', default="",
help="Directory where we expect to find spectrum libraries")
parser.add_argument('--templates-out',
required=False,
default="rv_templates_resampled",
dest="templates_out",
help="Library into which to place resampled templates for RV code")
parser.add_argument('--binary-path',
required=False,
default=root_path,
dest="binary_path",
help="Specify a directory where 4FS binary package is installed")
args = parser.parse_args()
# Set up logger
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info("Resampling template spectra")
return args
def logarithmic_raster(lambda_min, lambda_max, lambda_step):
"""
Create a logarithmic raster with a fixed logarithmic stride, based on a starting wavelength, finishing wavelength,
and a mean wavelength step.
:param lambda_min:
Smallest wavelength in raster.
:param lambda_max:
Largest wavelength in raster.
:param lambda_step:
The approximate pixel size in the raster.
:return:
A numpy array containing a wavelength raster with fixed logarithmic stride.
"""
return np.exp(np.arange(
np.log(lambda_min),
np.log(lambda_max),
np.log(1 + lambda_step / lambda_min)
))
def resample_templates(args, logger):
"""
Resample a spectrum library of templates onto a fixed logarithmic stride, representing each of the 4MOST arms in
turn. We use 4FS to down-sample the templates to the resolution of 4MOST observations, and automatically detect
the list of arms contained within each 4FS mock observation. We then resample the 4FS output onto a new raster
with fixed logarithmic stride.
:param args:
Object containing arguments supplied by the used, for example the name of the spectrum libraries we use for
input and output. The required fields are defined by the user interface above.
:param logger:
A python logging object.
:return:
None.
"""
# Set path to workspace where we expect to find libraries of spectra
workspace = args.workspace if args.workspace else os_path.join(args.our_path, "../../../workspace")
# Open input template spectra
spectra = SpectrumLibrarySqlite.open_and_search(
library_spec=args.templates_in,
workspace=workspace,
extra_constraints={"continuum_normalised": 0}
)
templates_library, templates_library_items, templates_spectra_constraints = \
[spectra[i] for i in ("library", "items", "constraints")]
# Create new SpectrumLibrary to hold the resampled output templates
library_path = os_path.join(workspace, args.templates_out)
output_library = SpectrumLibrarySqlite(path=library_path, create=True)
# Instantiate 4FS wrapper
etc_wrapper = FourFS(
path_to_4fs=os_path.join(args.binary_path, "OpSys/ETC"),
snr_list=[250.],
magnitude=13,
snr_per_pixel=True
)
for input_spectrum_id in templates_library_items:
logger.info("Working on <{}>".format(input_spectrum_id['filename']))
# Open Spectrum data from disk
input_spectrum_array = templates_library.open(ids=input_spectrum_id['specId'])
# Load template spectrum (flux normalised)
template_flux_normalised = input_spectrum_array.extract_item(0)
# Look up the unique ID of the star we've just loaded
# Newer spectrum libraries have a uid field which is guaranteed unique; for older spectrum libraries use
# Starname instead.
# Work out which field we're using (uid or Starname)
spectrum_matching_field = 'uid' if 'uid' in template_flux_normalised.metadata else 'Starname'
# Look up the unique ID of this object
object_name = template_flux_normalised.metadata[spectrum_matching_field]
# Search for the continuum-normalised version of this same object (which will share the same uid / name)
search_criteria = {
spectrum_matching_field: object_name,
'continuum_normalised': 1
}
continuum_normalised_spectrum_id = templates_library.search(**search_criteria)
# Check that continuum-normalised spectrum exists and is unique
assert len(continuum_normalised_spectrum_id) == 1, "Could not find continuum-normalised spectrum."
# Load the continuum-normalised version
template_continuum_normalised_arr = templates_library.open(
ids=continuum_normalised_spectrum_id[0]['specId']
)
# Turn the SpectrumArray we got back into a single Spectrum
template_continuum_normalised = template_continuum_normalised_arr.extract_item(0)
# Now create a mock observation of this template using 4FS
logger.info("Passing template through 4FS")
mock_observed_template = etc_wrapper.process_spectra(
spectra_list=((template_flux_normalised, template_continuum_normalised),)
)
# Loop over LRS and HRS
for mode in mock_observed_template:
# Loop over the spectra we simulated (there was only one!)
for index in mock_observed_template[mode]:
# Loop over the various SNRs we simulated (there was only one!)
for snr in mock_observed_template[mode][index]:
# Create a unique ID for this arm's data
unique_id = hashlib.md5(os.urandom(32)).hexdigest()[:16]
# Import the flux- and continuum-normalised spectra separately, but give them the same ID
for spectrum_type in mock_observed_template[mode][index][snr]:
# Extract continuum-normalised mock observation
logger.info("Resampling {} spectrum".format(mode))
mock_observed = mock_observed_template[mode][index][snr][spectrum_type]
# Replace errors which are nans with a large value
mock_observed.value_errors[np.isnan(mock_observed.value_errors)] = 1000.
# Check for NaN values in spectrum itself
if not np.all(np.isfinite(mock_observed.values)):
print("Warning: NaN values in template <{}>".format(template_flux_normalised.metadata['Starname']))
mock_observed.value_errors[np.isnan(mock_observed.values)] = 1000.
mock_observed.values[np.isnan(mock_observed.values)] = 1.
# Resample template onto a logarithmic raster of fixed step
resampler = SpectrumResampler(mock_observed)
# Construct the raster for each wavelength arm
wavelength_arms = SpectrumProperties(mock_observed.wavelengths).wavelength_arms()
# Resample 4FS output for each arm onto a fixed logarithmic stride
for arm_count, arm in enumerate(wavelength_arms["wavelength_arms"]):
arm_raster, mean_pixel_width = arm
name = "{}_{}".format(mode, arm_count)
arm_info = {
"lambda_min": arm_raster[0],
"lambda_max": arm_raster[-1],
"lambda_step": mean_pixel_width
}
arm_raster = logarithmic_raster(lambda_min=arm_info['lambda_min'],
lambda_max=arm_info['lambda_max'],
lambda_step=arm_info['lambda_step']
)
# Resample 4FS output onto a fixed logarithmic step
mock_observed_arm = resampler.onto_raster(arm_raster)
# Save it into output spectrum library
output_library.insert(spectra=mock_observed_arm,
filenames=input_spectrum_id['filename'],
metadata_list={
"uid": unique_id,
"template_id": object_name,
"mode": mode,
"arm_name": "{}_{}".format(mode,arm_count),
"lambda_min": arm_raster[0],
"lambda_max": arm_raster[-1],
"lambda_step": mean_pixel_width
})
| 45.387234
| 127
| 0.603975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,394
| 0.411963
|
791a74027f2dc3fbe44b27f9c9f0523352b4d029
| 149
|
py
|
Python
|
datahub/activity_feed/apps.py
|
Staberinde/data-hub-api
|
3d0467dbceaf62a47158eea412a3dba827073300
|
[
"MIT"
] | 6
|
2019-12-02T16:11:24.000Z
|
2022-03-18T10:02:02.000Z
|
datahub/activity_feed/apps.py
|
Staberinde/data-hub-api
|
3d0467dbceaf62a47158eea412a3dba827073300
|
[
"MIT"
] | 1,696
|
2019-10-31T14:08:37.000Z
|
2022-03-29T12:35:57.000Z
|
datahub/activity_feed/apps.py
|
Staberinde/data-hub-api
|
3d0467dbceaf62a47158eea412a3dba827073300
|
[
"MIT"
] | 9
|
2019-11-22T12:42:03.000Z
|
2021-09-03T14:25:05.000Z
|
from django.apps import AppConfig
class ActivityFeedConfig(AppConfig):
"""App config for activity_feed."""
name = 'datahub.activity_feed'
| 18.625
| 39
| 0.738255
| 112
| 0.751678
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 0.389262
|
791aafa638d97478db2d6a462067e347380d5760
| 117
|
py
|
Python
|
ffmpeg_normalize/__init__.py
|
kostalski/ffmpeg-normalize
|
2c73f47ec4369de08c1e2051af490322084fd17b
|
[
"MIT"
] | 3
|
2019-04-01T11:03:04.000Z
|
2019-12-31T02:17:15.000Z
|
ffmpeg_normalize/__init__.py
|
kostalski/ffmpeg-normalize
|
2c73f47ec4369de08c1e2051af490322084fd17b
|
[
"MIT"
] | 1
|
2021-04-15T18:46:45.000Z
|
2021-04-15T18:46:45.000Z
|
ffmpeg_normalize/__init__.py
|
kostalski/ffmpeg-normalize
|
2c73f47ec4369de08c1e2051af490322084fd17b
|
[
"MIT"
] | 1
|
2021-09-23T13:43:07.000Z
|
2021-09-23T13:43:07.000Z
|
from ._ffmpeg_normalize import FFmpegNormalize
from ._media_file import MediaFile
from ._version import __version__
| 23.4
| 46
| 0.863248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
791adee85a8db7759e5f3f2e7403b68b0df27e62
| 113
|
py
|
Python
|
__init__.py
|
nuxeo-cps/zope2--PortalTransforms
|
753f67202b016d0b07edd3bc65fd827cb39e50db
|
[
"BSD-3-Clause"
] | null | null | null |
__init__.py
|
nuxeo-cps/zope2--PortalTransforms
|
753f67202b016d0b07edd3bc65fd827cb39e50db
|
[
"BSD-3-Clause"
] | null | null | null |
__init__.py
|
nuxeo-cps/zope2--PortalTransforms
|
753f67202b016d0b07edd3bc65fd827cb39e50db
|
[
"BSD-3-Clause"
] | null | null | null |
__revision__ = '$Id$'
from utils import HAS_ZOPE
if HAS_ZOPE:
from Products.PortalTransforms.zope import *
| 16.142857
| 48
| 0.752212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.053097
|
791be8749fa60c1fc2eb6569f7089a3ef2f48994
| 11,259
|
py
|
Python
|
SpoTwillio/lib/python3.6/site-packages/twilio/rest/api/v2010/account/call/feedback.py
|
Natfan/funlittlethings
|
80d5378b45b5c0ead725942ee50403bd057514a6
|
[
"MIT"
] | 3
|
2019-11-12T07:55:51.000Z
|
2020-04-01T11:19:18.000Z
|
SpoTwillio/lib/python3.6/site-packages/twilio/rest/api/v2010/account/call/feedback.py
|
Natfan/funlittlethings
|
80d5378b45b5c0ead725942ee50403bd057514a6
|
[
"MIT"
] | 7
|
2020-06-06T01:06:19.000Z
|
2022-02-10T11:15:14.000Z
|
SpoTwillio/lib/python3.6/site-packages/twilio/rest/api/v2010/account/call/feedback.py
|
Natfan/funlittlethings
|
80d5378b45b5c0ead725942ee50403bd057514a6
|
[
"MIT"
] | 2
|
2019-10-20T14:54:47.000Z
|
2020-06-11T07:29:37.000Z
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class FeedbackList(ListResource):
def __init__(self, version, account_sid, call_sid):
"""
Initialize the FeedbackList
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param call_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackList
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackList
"""
super(FeedbackList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'call_sid': call_sid,
}
def get(self):
"""
Constructs a FeedbackContext
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
"""
return FeedbackContext(
self._version,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def __call__(self):
"""
Constructs a FeedbackContext
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
"""
return FeedbackContext(
self._version,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackList>'
class FeedbackPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the FeedbackPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The account_sid
:param call_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackPage
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackPage
"""
super(FeedbackPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FeedbackInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackPage>'
class FeedbackContext(InstanceContext):
def __init__(self, version, account_sid, call_sid):
"""
Initialize the FeedbackContext
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param call_sid: The call sid that uniquely identifies the call
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
"""
super(FeedbackContext, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'call_sid': call_sid,
}
self._uri = '/Accounts/{account_sid}/Calls/{call_sid}/Feedback.json'.format(**self._solution)
def create(self, quality_score, issue=values.unset):
"""
Create a new FeedbackInstance
:param unicode quality_score: The quality_score
:param FeedbackInstance.Issues issue: The issue
:returns: Newly created FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
data = values.of({
'QualityScore': quality_score,
'Issue': issue,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def fetch(self):
"""
Fetch a FeedbackInstance
:returns: Fetched FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def update(self, quality_score, issue=values.unset):
"""
Update the FeedbackInstance
:param unicode quality_score: An integer from 1 to 5
:param FeedbackInstance.Issues issue: Issues experienced during the call
:returns: Updated FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
data = values.of({
'QualityScore': quality_score,
'Issue': issue,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.FeedbackContext {}>'.format(context)
class FeedbackInstance(InstanceResource):
class Issues(object):
AUDIO_LATENCY = "audio-latency"
DIGITS_NOT_CAPTURED = "digits-not-captured"
DROPPED_CALL = "dropped-call"
IMPERFECT_AUDIO = "imperfect-audio"
INCORRECT_CALLER_ID = "incorrect-caller-id"
ONE_WAY_AUDIO = "one-way-audio"
POST_DIAL_DELAY = "post-dial-delay"
UNSOLICITED_CALL = "unsolicited-call"
def __init__(self, version, payload, account_sid, call_sid):
"""
Initialize the FeedbackInstance
:returns: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
super(FeedbackInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'issues': payload['issues'],
'quality_score': deserialize.integer(payload['quality_score']),
'sid': payload['sid'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'call_sid': call_sid,
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FeedbackContext for this FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
"""
if self._context is None:
self._context = FeedbackContext(
self._version,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def issues(self):
"""
:returns: The issues
:rtype: FeedbackInstance.Issues
"""
return self._properties['issues']
@property
def quality_score(self):
"""
:returns: 1 to 5 quality score
:rtype: unicode
"""
return self._properties['quality_score']
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
def create(self, quality_score, issue=values.unset):
"""
Create a new FeedbackInstance
:param unicode quality_score: The quality_score
:param FeedbackInstance.Issues issue: The issue
:returns: Newly created FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
return self._proxy.create(
quality_score,
issue=issue,
)
def fetch(self):
"""
Fetch a FeedbackInstance
:returns: Fetched FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
return self._proxy.fetch()
def update(self, quality_score, issue=values.unset):
"""
Update the FeedbackInstance
:param unicode quality_score: An integer from 1 to 5
:param FeedbackInstance.Issues issue: Issues experienced during the call
:returns: Updated FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
return self._proxy.update(
quality_score,
issue=issue,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.FeedbackInstance {}>'.format(context)
| 29.551181
| 101
| 0.606981
| 10,859
| 0.964473
| 0
| 0
| 1,607
| 0.14273
| 0
| 0
| 6,033
| 0.535838
|
791c899515ab5ee22d19f49f98ec0553bac2d037
| 144
|
py
|
Python
|
app/donut.py
|
zaphodef/stockpile
|
f682c0d16a07ab281363c8031c611305934da46c
|
[
"Apache-2.0"
] | null | null | null |
app/donut.py
|
zaphodef/stockpile
|
f682c0d16a07ab281363c8031c611305934da46c
|
[
"Apache-2.0"
] | null | null | null |
app/donut.py
|
zaphodef/stockpile
|
f682c0d16a07ab281363c8031c611305934da46c
|
[
"Apache-2.0"
] | null | null | null |
async def donut_handler(args):
print('This is where we would configure things to properly handle a .donut file request.')
return '', ''
| 36
| 94
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0
| 143
| 0.993056
| 87
| 0.604167
|
791fc2d140f54e02c2b4d1000be7565797957857
| 136
|
py
|
Python
|
gtf2bed/__init__.py
|
jvfe/gtf2bed
|
7ac21759498ca9495030982d2a11c2a63149a75c
|
[
"BSD-3-Clause"
] | 1
|
2021-04-22T09:27:35.000Z
|
2021-04-22T09:27:35.000Z
|
gtf2bed/__init__.py
|
jvfe/gtf2bed
|
7ac21759498ca9495030982d2a11c2a63149a75c
|
[
"BSD-3-Clause"
] | null | null | null |
gtf2bed/__init__.py
|
jvfe/gtf2bed
|
7ac21759498ca9495030982d2a11c2a63149a75c
|
[
"BSD-3-Clause"
] | null | null | null |
"""Top-level package for gtf2bed."""
__author__ = """João Vitor F. Cavalcante"""
__email__ = "jvfecav@gmail.com"
__version__ = "0.1.0"
| 22.666667
| 43
| 0.683824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 93
| 0.678832
|
79206dc12be47bbbc702eacb1b5f27bdf824bf1f
| 2,993
|
py
|
Python
|
fastapi_cloudauth/firebase.py
|
jleclanche/fastapi-cloudauth
|
9c098f91f46d9d927e1f10b82b80340951d0b1f2
|
[
"MIT"
] | null | null | null |
fastapi_cloudauth/firebase.py
|
jleclanche/fastapi-cloudauth
|
9c098f91f46d9d927e1f10b82b80340951d0b1f2
|
[
"MIT"
] | null | null | null |
fastapi_cloudauth/firebase.py
|
jleclanche/fastapi-cloudauth
|
9c098f91f46d9d927e1f10b82b80340951d0b1f2
|
[
"MIT"
] | null | null | null |
from calendar import timegm
from datetime import datetime
from typing import Any, Dict
from fastapi import HTTPException
from pydantic import BaseModel, Field
from starlette import status
from .base import UserInfoAuth
from .messages import NOT_VERIFIED
from .verification import JWKS, ExtraVerifier
class FirebaseClaims(BaseModel):
user_id: str = Field(alias="user_id")
email: str = Field(None, alias="email")
class FirebaseCurrentUser(UserInfoAuth):
"""
Verify ID token and get user info of Firebase
"""
user_info = FirebaseClaims
firebase_keys_url = "https://www.googleapis.com/robot/v1/metadata/x509/securetoken@system.gserviceaccount.com"
def __init__(self, project_id: str, *args: Any, **kwargs: Any):
self._key_refresh_locked = False
jwks = JWKS.firebase(self.firebase_keys_url)
super().__init__(
jwks,
*args,
user_info=self.user_info,
audience=project_id,
issuer=f"https://securetoken.google.com/{project_id}",
extra=FirebaseExtraVerifier(project_id=project_id),
**kwargs,
)
async def refresh_keys(self) -> None:
if not self._key_refresh_locked:
# Ensure only one key refresh can happen at once.
# This prevents a dogpile of requests the second the keys expire
# from causing a bunch of refreshes (each one is an http request).
self._key_refresh_locked = True
# Re-query the keys from firebase.
# NOTE: The expires comes from an http header which is supposed to
# be set to a time long before the keys are no longer in use.
# This allows gradual roll-out of the keys and should prevent any
# request from failing.
# The only scenario which will result in failing requests is if
# there are zero requests for the entire duration of the roll-out
# (observed to be around 1 week), followed by a burst of multiple
# requests at once.
jwks = JWKS.firebase(self.firebase_keys_url)
# Reset the keys and the expiry date.
self._verifier._jwks_to_key = jwks.keys
self._keys_expire = jwks.expires
# Remove the lock.
self._key_refresh_locked = False
class FirebaseExtraVerifier(ExtraVerifier):
def __init__(self, project_id: str):
self._pjt_id = project_id
def __call__(self, claims: Dict[str, str], auto_error: bool = True) -> bool:
# auth_time must be past time
if claims.get("auth_time"):
auth_time = int(claims["auth_time"])
now = timegm(datetime.utcnow().utctimetuple())
if now < auth_time:
if auto_error:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail=NOT_VERIFIED
)
return False
return True
| 36.060241
| 114
| 0.636151
| 2,682
| 0.896091
| 0
| 0
| 0
| 0
| 1,201
| 0.40127
| 959
| 0.320414
|
79222572360ae305c1ba2a36f8edf19a01cdcedf
| 2,410
|
py
|
Python
|
tests/instrumentation/sqlite_tests.py
|
dsanders11/opbeat_python
|
4bdfe494ed4dba12550dff86366b4402613bce92
|
[
"BSD-3-Clause"
] | 99
|
2015-02-27T02:21:41.000Z
|
2021-02-09T15:13:25.000Z
|
tests/instrumentation/sqlite_tests.py
|
dsanders11/opbeat_python
|
4bdfe494ed4dba12550dff86366b4402613bce92
|
[
"BSD-3-Clause"
] | 114
|
2015-01-16T15:06:49.000Z
|
2018-04-13T20:29:18.000Z
|
tests/instrumentation/sqlite_tests.py
|
dsanders11/opbeat_python
|
4bdfe494ed4dba12550dff86366b4402613bce92
|
[
"BSD-3-Clause"
] | 51
|
2015-01-07T12:13:56.000Z
|
2019-05-06T14:16:35.000Z
|
import sqlite3
import mock
import opbeat.instrumentation.control
from tests.helpers import get_tempstoreclient
from tests.utils.compat import TestCase
class InstrumentSQLiteTest(TestCase):
def setUp(self):
self.client = get_tempstoreclient()
opbeat.instrumentation.control.instrument()
@mock.patch("opbeat.traces.RequestsStore.should_collect")
def test_connect(self, should_collect):
should_collect.return_value = False
self.client.begin_transaction("transaction.test")
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
cursor.execute("""CREATE TABLE testdb (id integer, username text)""")
cursor.execute("""INSERT INTO testdb VALUES (1, "Ron")""")
cursor.execute("""DROP TABLE testdb""")
self.client.end_transaction("MyView")
transactions, traces = self.client.instrumentation_store.get_all()
expected_signatures = ['transaction', 'sqlite3.connect :memory:',
'CREATE TABLE', 'INSERT INTO testdb',
'DROP TABLE']
self.assertEqual(set([t['signature'] for t in traces]),
set(expected_signatures))
# Reorder according to the kinds list so we can just test them
sig_dict = dict([(t['signature'], t) for t in traces])
traces = [sig_dict[k] for k in expected_signatures]
self.assertEqual(traces[0]['signature'], 'transaction')
self.assertEqual(traces[0]['kind'], 'transaction')
self.assertEqual(traces[0]['transaction'], 'MyView')
self.assertEqual(traces[1]['signature'], 'sqlite3.connect :memory:')
self.assertEqual(traces[1]['kind'], 'db.sqlite.connect')
self.assertEqual(traces[1]['transaction'], 'MyView')
self.assertEqual(traces[2]['signature'], 'CREATE TABLE')
self.assertEqual(traces[2]['kind'], 'db.sqlite.sql')
self.assertEqual(traces[2]['transaction'], 'MyView')
self.assertEqual(traces[3]['signature'], 'INSERT INTO testdb')
self.assertEqual(traces[3]['kind'], 'db.sqlite.sql')
self.assertEqual(traces[3]['transaction'], 'MyView')
self.assertEqual(traces[4]['signature'], 'DROP TABLE')
self.assertEqual(traces[4]['kind'], 'db.sqlite.sql')
self.assertEqual(traces[4]['transaction'], 'MyView')
self.assertEqual(len(traces), 5)
| 38.253968
| 77
| 0.641494
| 2,254
| 0.93527
| 0
| 0
| 2,094
| 0.86888
| 0
| 0
| 719
| 0.29834
|
7922c3c12c906f5e3ff236bb30e73bcdb61a9ea9
| 477
|
py
|
Python
|
setup.py
|
samytessier/group_9_mlops
|
774f69354aeb5a9ddb59eb2cf5f8460832ab21b2
|
[
"MIT"
] | 1
|
2022-01-20T02:18:16.000Z
|
2022-01-20T02:18:16.000Z
|
setup.py
|
samytessier/group_9_mlops
|
774f69354aeb5a9ddb59eb2cf5f8460832ab21b2
|
[
"MIT"
] | 1
|
2022-01-08T17:18:04.000Z
|
2022-01-08T17:18:04.000Z
|
setup.py
|
samytessier/group_9_mlops
|
774f69354aeb5a9ddb59eb2cf5f8460832ab21b2
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='This MLOps project aims to use the Transformers framework from Hugging Face in order to tweak a pre-trained NLP model to accurately gauge the sentiment of an Amazon review (being able to guess the whether the rating of a product is positive or negative given only the text in a review).',
author='group9 DTU MLops',
license='MIT',
)
| 43.363636
| 305
| 0.742138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 323
| 0.677149
|
7923b39638368ab2ae741c772b643949cd865155
| 423
|
py
|
Python
|
gaphor/RAAML/stpa/connectors.py
|
Texopolis/gaphor
|
3b190620075fd413258af1e7a007b4b2167a7564
|
[
"Apache-2.0"
] | 867
|
2018-01-09T00:19:09.000Z
|
2022-03-31T02:49:23.000Z
|
gaphor/RAAML/stpa/connectors.py
|
burakozturk16/gaphor
|
86267a5200ac4439626d35d306dbb376c3800107
|
[
"Apache-2.0"
] | 790
|
2018-01-13T23:47:07.000Z
|
2022-03-31T16:04:27.000Z
|
gaphor/RAAML/stpa/connectors.py
|
burakozturk16/gaphor
|
86267a5200ac4439626d35d306dbb376c3800107
|
[
"Apache-2.0"
] | 117
|
2018-01-09T02:24:49.000Z
|
2022-03-23T08:07:42.000Z
|
from gaphor.diagram.connectors import Connector
from gaphor.diagram.presentation import Classified
from gaphor.RAAML.raaml import RelevantTo
from gaphor.RAAML.stpa import RelevantToItem
from gaphor.SysML.requirements.connectors import DirectedRelationshipPropertyPathConnect
@Connector.register(Classified, RelevantToItem)
class RelevantToConnect(DirectedRelationshipPropertyPathConnect):
relation_type = RelevantTo
| 35.25
| 88
| 0.87234
| 97
| 0.229314
| 0
| 0
| 145
| 0.34279
| 0
| 0
| 0
| 0
|
7923c47de0831caf8141bfde82615c01392124f5
| 1,197
|
py
|
Python
|
voltagemetricspublisher/services/extractionService.py
|
SumudithaR/svc.voltage-metrics-publisher
|
4e0418c855920d3e984acf097681e2fc8c8ec081
|
[
"Apache-2.0"
] | null | null | null |
voltagemetricspublisher/services/extractionService.py
|
SumudithaR/svc.voltage-metrics-publisher
|
4e0418c855920d3e984acf097681e2fc8c8ec081
|
[
"Apache-2.0"
] | null | null | null |
voltagemetricspublisher/services/extractionService.py
|
SumudithaR/svc.voltage-metrics-publisher
|
4e0418c855920d3e984acf097681e2fc8c8ec081
|
[
"Apache-2.0"
] | null | null | null |
from time import sleep
from gpiozero import MCP3008 # Installed in GAM 13/09/2019.
import time
import gpiozero
from ..models.rawMetricDto import RawMetricDto
class ExtractionService():
def __init__(self):
self.vref = 3.3
def getGpioValues(self):
print("Getting GPIO Values.")
adc0 = MCP3008(channel=0)
adc1 = MCP3008(channel=1)
adc2 = MCP3008(channel=2)
adc3 = MCP3008(channel=3)
adc4 = MCP3008(channel=4)
adc5 = MCP3008(channel=5)
adc6 = MCP3008(channel=6)
adc7 = MCP3008(channel=7)
model = RawMetricDto()
model.voltage0 = self.vref*4.57*adc0.value # Battery-Main
model.voltage1 = self.vref*4.57*adc1.value # Bus
model.voltage2 = self.vref*4.57*adc2.value # Router
model.voltage3 = self.vref*4.57*adc3.value # Battery-Emg.Lamps
model.voltage4 = self.vref*adc4.value # XX3
model.voltage5 = self.vref*adc5.value # XX4
model.voltage6 = self.vref*adc6.value # WTL
model.voltage7 = self.vref*adc7.value # WLL
model.deviceTime = time.asctime(time.localtime(time.time()))
return model
| 32.351351
| 71
| 0.622389
| 1,037
| 0.866332
| 0
| 0
| 0
| 0
| 0
| 0
| 118
| 0.09858
|
792592d09cfb1da8cbdd06e8e2cb4970a31ce4e6
| 553
|
py
|
Python
|
data_browser/migrations/0002_auto_20200331_1842.py
|
me2d09/django-data-browser
|
1108f714229aab8c30a27d93f264f2f26b8b0aee
|
[
"BSD-3-Clause"
] | null | null | null |
data_browser/migrations/0002_auto_20200331_1842.py
|
me2d09/django-data-browser
|
1108f714229aab8c30a27d93f264f2f26b8b0aee
|
[
"BSD-3-Clause"
] | null | null | null |
data_browser/migrations/0002_auto_20200331_1842.py
|
me2d09/django-data-browser
|
1108f714229aab8c30a27d93f264f2f26b8b0aee
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.0.13 on 2020-03-31 17:42
from django.db import migrations, models
import data_browser.models
class Migration(migrations.Migration):
dependencies = [
("data_browser", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="view",
name="id",
field=models.CharField(
default=data_browser.models.get_id,
max_length=12,
primary_key=True,
serialize=False,
),
),
]
| 21.269231
| 51
| 0.549729
| 431
| 0.779385
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.155515
|
7926794343cfee2a3a93c437f389f8d256dd16f9
| 320
|
py
|
Python
|
Community/get_audit_info/__init__.py
|
spenney-bc/gateway-workflows
|
0311a9224b2d53c01689eb6a9a0a593177abed63
|
[
"Apache-2.0"
] | 43
|
2017-12-04T17:38:24.000Z
|
2021-12-29T09:17:17.000Z
|
Community/get_audit_info/__init__.py
|
spenney-bc/gateway-workflows
|
0311a9224b2d53c01689eb6a9a0a593177abed63
|
[
"Apache-2.0"
] | 49
|
2017-12-07T21:02:29.000Z
|
2022-02-04T22:27:16.000Z
|
Community/get_audit_info/__init__.py
|
spenney-bc/gateway-workflows
|
0311a9224b2d53c01689eb6a9a0a593177abed63
|
[
"Apache-2.0"
] | 82
|
2017-12-04T17:56:00.000Z
|
2021-12-29T09:17:21.000Z
|
# Copyright 2020 BlueCat Networks. All rights reserved.
# -*- coding: utf-8 -*-
type = 'ui'
sub_pages = [
{
'name' : 'get_audit_info_page',
'title' : u'Get Audit Info',
'endpoint' : 'get_audit_info/get_audit_info_endpoint',
'description' : u'get_audit_info'
},
]
| 24.615385
| 65
| 0.56875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 213
| 0.665625
|
79273775aa326888e7143a25472099fb24c7a2cc
| 548
|
py
|
Python
|
ALDS/ALDS1_10_A.py
|
yu8ikmnbgt6y/MyAOJ
|
474b21a2a0c25e1c1f3d6d66d2a2ea52aecaa39b
|
[
"Unlicense"
] | 1
|
2020-01-08T16:33:46.000Z
|
2020-01-08T16:33:46.000Z
|
ALDS/ALDS1_10_A.py
|
yu8ikmnbgt6y/MyAOJ
|
474b21a2a0c25e1c1f3d6d66d2a2ea52aecaa39b
|
[
"Unlicense"
] | null | null | null |
ALDS/ALDS1_10_A.py
|
yu8ikmnbgt6y/MyAOJ
|
474b21a2a0c25e1c1f3d6d66d2a2ea52aecaa39b
|
[
"Unlicense"
] | null | null | null |
import sys
import io
input_txt = """
44
"""
sys.stdin = io.StringIO(input_txt)
tmp = input()
# copy the below part and paste to the submission form.
# ---------function------------
def fibonacci(n):
if n <= 1:
return 1
fib_array = [1] * 45
for i in range(2, n+1):
fib_array[i] = fib_array[i-1] + fib_array[i-2]
return fib_array[n]
def main():
n = int(input())
fib = fibonacci(n)
print(fib)
return
main()
# -----------------------------
sys.stdin = sys.__stdin__
| 17.125
| 56
| 0.509124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 132
| 0.240876
|
7927bbe2f2d0526128722c38428b7bbf96221e46
| 2,389
|
py
|
Python
|
armada/tests/unit/utils/test_lint.py
|
One-Fine-Day/armada
|
9cd71c8b55173a9c9c45bfb939d19277fabd902d
|
[
"Apache-2.0"
] | null | null | null |
armada/tests/unit/utils/test_lint.py
|
One-Fine-Day/armada
|
9cd71c8b55173a9c9c45bfb939d19277fabd902d
|
[
"Apache-2.0"
] | null | null | null |
armada/tests/unit/utils/test_lint.py
|
One-Fine-Day/armada
|
9cd71c8b55173a9c9c45bfb939d19277fabd902d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Armada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import yaml
from armada.utils import lint
class LintTestCase(unittest.TestCase):
def test_lint_armada_yaml_pass(self):
config = yaml.load("""
armada:
release_prefix: armada-test
charts:
- chart_group:
- chart:
name: chart
release_name: chart
namespace: chart
""")
resp = lint.valid_manifest(config)
self.assertTrue(resp)
def test_lint_armada_keyword_removed(self):
config = yaml.load("""
armasda:
release_prefix: armada-test
charts:
- chart_group:
- chart:
name: chart
release_name: chart
namespace: chart
""")
with self.assertRaises(Exception):
lint.valid_manifest(config)
def test_lint_prefix_keyword_removed(self):
config = yaml.load("""
armada:
release: armada-test
charts:
- chart_group:
- chart:
name: chart
release_name: chart
namespace: chart
""")
with self.assertRaises(Exception):
lint.valid_manifest(config)
def test_lint_armada_removed(self):
config = yaml.load("""
sarmada:
release_prefix: armada-test
charts:
- chart_group:
- chart:
name: chart
release_name: chart
namespace: chart
""")
with self.assertRaises(Exception):
lint.valid_manifest(config)
| 29.8625
| 74
| 0.541231
| 1,744
| 0.730013
| 0
| 0
| 0
| 0
| 0
| 0
| 1,653
| 0.691921
|
7927d5d5ec363318061f6e9faac288240c333204
| 7,149
|
py
|
Python
|
mliv/dgps.py
|
microsoft/AdversarialGMM
|
7a5cd51353c8a81e16c01220b71f77e4e1102add
|
[
"MIT"
] | 23
|
2020-12-01T22:55:40.000Z
|
2022-01-26T04:11:14.000Z
|
mliv/dgps.py
|
microsoft/AdversarialGMM
|
7a5cd51353c8a81e16c01220b71f77e4e1102add
|
[
"MIT"
] | null | null | null |
mliv/dgps.py
|
microsoft/AdversarialGMM
|
7a5cd51353c8a81e16c01220b71f77e4e1102add
|
[
"MIT"
] | 10
|
2020-12-05T17:12:49.000Z
|
2022-01-10T23:42:37.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
# continuously differentiable
fn_dict_cdiff = {'2dpoly': 1, 'sigmoid': 2,
'sin': 3, 'frequent_sin': 4,
'3dpoly': 7, 'linear': 8}
# continuous but not differentiable
fn_dict_cont = {'abs': 0, 'abs_sqrt': 5, 'rand_pw': 9,
'abspos': 10, 'sqrpos': 11, 'pwlinear': 15}
# discontinuous
fn_dict_disc = {'step': 6, 'band': 12, 'invband': 13,
'steplinear': 14}
# monotone
fn_dict_monotone = {'sigmoid': 2,
'step': 6, 'linear': 8,
'abspos': 10, 'sqrpos': 11, 'pwlinear': 15}
# convex
fn_dict_convex = {'abs': 0, '2dpoly': 1, 'linear': 8,
'abspos': 10, 'sqrpos': 11}
# all functions
fn_dict = {'abs': 0, '2dpoly': 1, 'sigmoid': 2,
'sin': 3, 'frequent_sin': 4, 'abs_sqrt': 5,
'step': 6, '3dpoly': 7, 'linear': 8, 'rand_pw': 9,
'abspos': 10, 'sqrpos': 11, 'band': 12, 'invband': 13,
'steplinear': 14, 'pwlinear': 15}
def generate_random_pw_linear(lb=-2, ub=2, n_pieces=5):
splits = np.random.choice(np.arange(lb, ub, 0.1),
n_pieces - 1, replace=False)
splits.sort()
slopes = np.random.uniform(-4, 4, size=n_pieces)
start = []
start.append(np.random.uniform(-1, 1))
for t in range(n_pieces - 1):
start.append(start[t] + slopes[t] * (splits[t] -
(lb if t == 0 else splits[t - 1])))
return lambda x: [start[ind] + slopes[ind] * (x - (lb if ind == 0 else splits[ind - 1])) for ind in [np.searchsorted(splits, x)]][0]
def get_tau_fn(func):
def first(x):
return x[:, [0]] if len(x.shape) == 2 else x
# func describes the relation between response and treatment
if func == fn_dict['abs']:
def tau_fn(x): return np.abs(first(x))
elif func == fn_dict['2dpoly']:
def tau_fn(x): return -1.5 * first(x) + .9 * (first(x)**2)
elif func == fn_dict['sigmoid']:
def tau_fn(x): return 2 / (1 + np.exp(-2 * first(x)))
elif func == fn_dict['sin']:
def tau_fn(x): return np.sin(first(x))
elif func == fn_dict['frequent_sin']:
def tau_fn(x): return np.sin(3 * first(x))
elif func == fn_dict['abs_sqrt']:
def tau_fn(x): return np.sqrt(np.abs(first(x)))
elif func == fn_dict['step']:
def tau_fn(x): return 1. * (first(x) < 0) + 2.5 * (first(x) >= 0)
elif func == fn_dict['3dpoly']:
def tau_fn(x): return -1.5 * first(x) + .9 * \
(first(x)**2) + first(x)**3
elif func == fn_dict['linear']:
def tau_fn(x): return first(x)
elif func == fn_dict['rand_pw']:
pw_linear = generate_random_pw_linear()
def tau_fn(x):
return np.array([pw_linear(x_i) for x_i in first(x).flatten()]).reshape(-1, 1)
elif func == fn_dict['abspos']:
def tau_fn(x): return np.abs(first(x)) * (first(x) >= 0)
elif func == fn_dict['sqrpos']:
def tau_fn(x): return (first(x)**2) * (first(x) >= 0)
elif func == fn_dict['band']:
def tau_fn(x): return 1.0 * (first(x) >= -.75) * (first(x) <= .75)
elif func == fn_dict['invband']:
def tau_fn(x): return 1. - 1. * (first(x) >= -.75) * (first(x) <= .75)
elif func == fn_dict['steplinear']:
def tau_fn(x): return 2. * (first(x) >= 0) - first(x)
elif func == fn_dict['pwlinear']:
def tau_fn(x):
q = first(x)
return (q + 1) * (q <= -1) + (q - 1) * (q >= 1)
else:
raise NotImplementedError()
return tau_fn
def standardize(z, p, y, fn):
ym = y.mean()
ystd = y.std()
y = (y - ym) / ystd
def newfn(x): return (fn(x) - ym) / ystd
return z, p, y, newfn
def get_data(n_samples, n_instruments, iv_strength, tau_fn, dgp_num):
# Construct dataset
# z:- instruments (features included here, can be high-dimensional)
# p :- treatments (features included here as well, can be high-dimensional)
# y :- response (is a scalar always)
confounder = np.random.normal(0, 1, size=(n_samples, 1))
z = np.random.normal(0, 1, size=(n_samples, n_instruments))
fn = tau_fn
if dgp_num == 1:
# DGP 1 in the paper
p = 2 * z[:, [0]] * (z[:, [0]] > 0) * iv_strength \
+ 2 * z[:, [1]] * (z[:, [1]] < 0) * iv_strength \
+ 2 * confounder * (1 - iv_strength) + \
np.random.normal(0, .1, size=(n_samples, 1))
y = fn(p) + 2 * confounder + \
np.random.normal(0, .1, size=(n_samples, 1))
elif dgp_num == 2:
# DGP 2 in the paper
p = 2 * z[:, [0]] * iv_strength \
+ 2 * confounder * (1 - iv_strength) + \
np.random.normal(0, .1, size=(n_samples, 1))
y = fn(p) + 2 * confounder + \
np.random.normal(0, .1, size=(n_samples, 1))
elif dgp_num == 3:
# DeepIV's DGP - has feature variables as well
# z is 3-dimensional: composed of (1) 1D z, (2) t - time unif~(0,10), and (3) s - customer type {1,...,7}
# y is related to p and z in a complex non-linear, non separable manner
# p is related to z again in a non-separable manner, rho is endogeneity parameter
rho = 0.8
psd = 3.7
pmu = 17.779
ysd = 158.
ymu = -292.1
z_1 = np.random.normal(0, 1, size=(n_samples, 1))
v = np.random.normal(0, 1, size=(n_samples, 1))
t = np.random.uniform(0, 10, size=(n_samples, 1))
s = np.random.randint(1, 8, size=(n_samples, 1))
e = rho * v + \
np.random.normal(0, np.sqrt(1 - rho**2), size=(n_samples, 1))
def psi(t): return 2 * (np.power(t - 5, 4) / 600 +
np.exp(-4 * np.power(t - 5, 2)) + t / 10 - 2)
p = 25 + (z_1 + 3) * psi(t) + v
p = (p - pmu) / psd
g = (10 + p) * s * psi(t) - 2 * p + e
y = (g - ymu) / ysd
z = np.hstack((z_1, s, t))
p = np.hstack((p, s, t))
def fn(p): return ((10 + p[:, 0]) * p[:, 1]
* psi(p[:, 2]) - 2 * p[:, 0] - ymu) / ysd
elif dgp_num == 4:
# Many weak Instruments DGP - n_instruments can be very large
z = np.random.normal(0.5, 1, size=(n_samples, n_instruments))
p = np.amin(z, axis=1).reshape(-1, 1) * iv_strength + confounder * \
(1 - iv_strength) + np.random.normal(0, 0.1, size=(n_samples, 1))
y = fn(p) + 2 * confounder + \
np.random.normal(0, 0.1, size=(n_samples, 1))
else:
# Here we have equal number of treatments and instruments and each
# instrument affects a separate treatment. Only the first treatment
# matters for the outcome.
z = np.random.normal(0, 2, size=(n_samples, n_instruments))
U = np.random.normal(0, 2, size=(n_samples, 1))
delta = np.random.normal(0, .1, size=(n_samples, 1))
zeta = np.random.normal(0, .1, size=(n_samples, 1))
p = iv_strength * z + (1 - iv_strength) * U + delta
y = fn(p) + U + zeta
return standardize(z, p, y, fn)
| 40.619318
| 136
| 0.527207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,494
| 0.20898
|
7928e18542e9bd6bf82dff12dad8c28ca120e4fe
| 16,097
|
py
|
Python
|
tests/test_definitions/test_expectations_cfe.py
|
OmriBromberg/great_expectations
|
60eb81ebfb08fef5d37d55c316dc962928beb165
|
[
"Apache-2.0"
] | 1
|
2021-11-09T05:07:43.000Z
|
2021-11-09T05:07:43.000Z
|
tests/test_definitions/test_expectations_cfe.py
|
OmriBromberg/great_expectations
|
60eb81ebfb08fef5d37d55c316dc962928beb165
|
[
"Apache-2.0"
] | 1
|
2021-12-07T13:06:29.000Z
|
2021-12-07T13:06:29.000Z
|
tests/test_definitions/test_expectations_cfe.py
|
OmriBromberg/great_expectations
|
60eb81ebfb08fef5d37d55c316dc962928beb165
|
[
"Apache-2.0"
] | null | null | null |
import glob
import json
import os
import random
import string
import pandas as pd
import pytest
from great_expectations.execution_engine.pandas_batch_data import PandasBatchData
from great_expectations.execution_engine.sparkdf_batch_data import SparkDFBatchData
from great_expectations.execution_engine.sqlalchemy_batch_data import (
SqlAlchemyBatchData,
)
from great_expectations.self_check.util import (
BigQueryDialect,
candidate_test_is_on_temporary_notimplemented_list_cfe,
evaluate_json_test_cfe,
get_test_validator_with_data,
mssqlDialect,
mysqlDialect,
postgresqlDialect,
sqliteDialect,
)
from tests.conftest import build_test_backends_list_cfe
from tests.test_definitions.test_expectations import tmp_dir
def pytest_generate_tests(metafunc):
# Load all the JSON files in the directory
dir_path = os.path.dirname(os.path.realpath(__file__))
expectation_dirs = [
dir_
for dir_ in os.listdir(dir_path)
if os.path.isdir(os.path.join(dir_path, dir_))
]
parametrized_tests = []
ids = []
backends = build_test_backends_list_cfe(metafunc)
for expectation_category in expectation_dirs:
test_configuration_files = glob.glob(
dir_path + "/" + expectation_category + "/*.json"
)
for c in backends:
for filename in test_configuration_files:
file = open(filename)
test_configuration = json.load(file)
for d in test_configuration["datasets"]:
datasets = []
if candidate_test_is_on_temporary_notimplemented_list_cfe(
c, test_configuration["expectation_type"]
):
skip_expectation = True
schemas = validator_with_data = None
else:
skip_expectation = False
if isinstance(d["data"], list):
sqlite_db_path = os.path.abspath(
os.path.join(
tmp_dir,
"sqlite_db"
+ "".join(
[
random.choice(
string.ascii_letters + string.digits
)
for _ in range(8)
]
)
+ ".db",
)
)
for dataset in d["data"]:
datasets.append(
get_test_validator_with_data(
c,
dataset["data"],
dataset.get("schemas"),
table_name=dataset.get("dataset_name"),
sqlite_db_path=sqlite_db_path,
)
)
validator_with_data = datasets[0]
else:
schemas = d["schemas"] if "schemas" in d else None
validator_with_data = get_test_validator_with_data(
c, d["data"], schemas=schemas
)
for test in d["tests"]:
generate_test = True
skip_test = False
if "only_for" in test:
# if we're not on the "only_for" list, then never even generate the test
generate_test = False
if not isinstance(test["only_for"], list):
raise ValueError("Invalid test specification.")
if validator_with_data and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
):
# Call out supported dialects
if "sqlalchemy" in test["only_for"]:
generate_test = True
elif (
"sqlite" in test["only_for"]
and sqliteDialect is not None
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
sqliteDialect,
)
):
generate_test = True
elif (
"postgresql" in test["only_for"]
and postgresqlDialect is not None
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
postgresqlDialect,
)
):
generate_test = True
elif (
"mysql" in test["only_for"]
and mysqlDialect is not None
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
mysqlDialect,
)
):
generate_test = True
elif (
"mssql" in test["only_for"]
and mssqlDialect is not None
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
mssqlDialect,
)
):
generate_test = True
elif (
"bigquery" in test["only_for"]
and BigQueryDialect is not None
and hasattr(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
"name",
)
and validator_with_data.execution_engine.active_batch_data.sql_engine_dialect.name
== "bigquery"
):
generate_test = True
elif validator_with_data and isinstance(
validator_with_data.execution_engine.active_batch_data,
PandasBatchData,
):
if "pandas" in test["only_for"]:
generate_test = True
if (
"pandas_022" in test["only_for"]
or "pandas_023" in test["only_for"]
) and int(pd.__version__.split(".")[1]) in [22, 23]:
generate_test = True
if ("pandas>=24" in test["only_for"]) and int(
pd.__version__.split(".")[1]
) > 24:
generate_test = True
elif validator_with_data and isinstance(
validator_with_data.execution_engine.active_batch_data,
SparkDFBatchData,
):
if "spark" in test["only_for"]:
generate_test = True
if not generate_test:
continue
if "suppress_test_for" in test and (
(
"sqlalchemy" in test["suppress_test_for"]
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
)
)
or (
"sqlite" in test["suppress_test_for"]
and sqliteDialect is not None
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
)
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
sqliteDialect,
)
)
or (
"postgresql" in test["suppress_test_for"]
and postgresqlDialect is not None
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
)
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
postgresqlDialect,
)
)
or (
"mysql" in test["suppress_test_for"]
and mysqlDialect is not None
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
)
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
mysqlDialect,
)
)
or (
"mssql" in test["suppress_test_for"]
and mssqlDialect is not None
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
)
and isinstance(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
mssqlDialect,
)
)
or (
"bigquery" in test["suppress_test_for"]
and BigQueryDialect is not None
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
)
and hasattr(
validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,
"name",
)
and validator_with_data.execution_engine.active_batch_data.sql_engine_dialect.name
== "bigquery"
)
or (
"pandas" in test["suppress_test_for"]
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
PandasBatchData,
)
)
or (
"spark" in test["suppress_test_for"]
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SparkDFBatchData,
)
)
):
skip_test = True
# Known condition: SqlAlchemy does not support allow_cross_type_comparisons
if (
"allow_cross_type_comparisons" in test["in"]
and validator_with_data
and isinstance(
validator_with_data.execution_engine.active_batch_data,
SqlAlchemyBatchData,
)
):
skip_test = True
parametrized_tests.append(
{
"expectation_type": test_configuration[
"expectation_type"
],
"validator_with_data": validator_with_data,
"test": test,
"skip": skip_expectation or skip_test,
}
)
ids.append(
c
+ "/"
+ expectation_category
+ "/"
+ test_configuration["expectation_type"]
+ ":"
+ test["title"]
)
metafunc.parametrize("test_case", parametrized_tests, ids=ids)
@pytest.mark.order(index=0)
def test_case_runner_cfe(test_case):
if test_case["skip"]:
pytest.skip()
# Note: this should never be done in practice, but we are wiping expectations to reuse batches during testing.
# test_case["batch"]._initialize_expectations()
if "parse_strings_as_datetimes" in test_case["test"]["in"]:
with pytest.deprecated_call():
evaluate_json_test_cfe(
validator=test_case["validator_with_data"],
expectation_type=test_case["expectation_type"],
test=test_case["test"],
)
else:
evaluate_json_test_cfe(
validator=test_case["validator_with_data"],
expectation_type=test_case["expectation_type"],
test=test_case["test"],
)
| 48.927052
| 118
| 0.391067
| 0
| 0
| 0
| 0
| 801
| 0.049761
| 0
| 0
| 1,333
| 0.08281
|
792b96690a5711f347a2fe1364e3eef792d1ebea
| 1,393
|
py
|
Python
|
corehq/apps/commtrack/resources/v0_1.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/commtrack/resources/v0_1.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T01:03:25.000Z
|
2022-03-12T01:03:25.000Z
|
corehq/apps/commtrack/resources/v0_1.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
from tastypie import fields
from corehq.apps.api.resources.v0_1 import CustomResourceMeta, DomainAdminAuthentication
from corehq.apps.products.models import Product
from corehq.apps.api.util import get_object_or_not_exist
from corehq.apps.api.resources import HqBaseResource
"""
Implementation of the CommCare Supply APIs. For more information see:
https://confluence.dimagi.com/display/lmis/API
"""
class ProductResource(HqBaseResource):
type = "product"
id = fields.CharField(attribute='_id', readonly=True, unique=True)
code = fields.CharField(attribute='code', readonly=True, unique=True)
name = fields.CharField(attribute='name', readonly=True)
unit = fields.CharField(attribute='unit', readonly=True, null=True)
description = fields.CharField(attribute='description', readonly=True, null=True)
category = fields.CharField(attribute='category', readonly=True, null=True)
last_modified = fields.DateTimeField(attribute='last_modified', readonly=True, null=True)
# TODO:
# price?
def obj_get(self, request, **kwargs):
return get_object_or_not_exist(Product, kwargs['pk'], kwargs['domain'])
def obj_get_list(self, request, **kwargs):
return Product.by_domain(kwargs['domain'])
class Meta(CustomResourceMeta):
authentication = DomainAdminAuthentication()
resource_name = 'product'
limit = 0
| 36.657895
| 93
| 0.740847
| 987
| 0.708543
| 0
| 0
| 0
| 0
| 0
| 0
| 239
| 0.171572
|
792bbfea31e2c47d9dc8a86be0bf40d5cfa67a78
| 7,381
|
py
|
Python
|
spectrl/util/io.py
|
luigiberducci/dirl
|
5f7997aea20dfb7347ebdee66de9bea4e6cd3c62
|
[
"MIT"
] | null | null | null |
spectrl/util/io.py
|
luigiberducci/dirl
|
5f7997aea20dfb7347ebdee66de9bea4e6cd3c62
|
[
"MIT"
] | null | null | null |
spectrl/util/io.py
|
luigiberducci/dirl
|
5f7997aea20dfb7347ebdee66de9bea4e6cd3c62
|
[
"MIT"
] | null | null | null |
import argparse
import os
import pathlib
import cv2
import pickle
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
from numpy import genfromtxt
def parse_command_line_options(print_options=False):
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=int, default=-1)
parser.add_argument("-d", type=pathlib.Path)
parser.add_argument("-s", type=int, choices=[0], default=0)
parser.add_argument("-e", type=int, default=0)
parser.add_argument("-a", type=str, default="ars")
parser.add_argument("-i", type=int, default=100)
parser.add_argument("-g", action="store_true")
parser.add_argument("-r", action="store_true")
args = parser.parse_args()
flags = {
"itno": args.n,
"folder": str(args.d),
"spec_num": args.s,
"env_num": args.e,
"alg": args.a,
"num_iter": args.i,
"gpu_flag": args.g,
"render": args.r
}
if print_options:
print('**** Command Line Options ****')
for key in flags:
print('{}: {}'.format(key, flags[key]))
return flags
def open_log_file(itno, folder):
'''
Open a log file to periodically flush data.
Parameters:
itno: int
folder: str
'''
fname = _get_prefix(folder) + 'log' + _get_suffix(itno) + '.txt'
open(fname, 'w').close()
file = open(fname, 'a')
return file
def save_object(name, object, itno, folder):
'''
Save any pickle-able object.
Parameters:
name: str
object: Object
itno: int
folder: str
'''
file = open(_get_prefix(folder) + name + _get_suffix(itno) + '.pkl', 'wb')
pickle.dump(object, file)
file.close()
def load_object(name, itno, folder):
'''
Load pickled object.
Parameters:
name: str
itno: int
folder: str
'''
file = open(_get_prefix(folder) + name + _get_suffix(itno) + '.pkl', 'rb')
object = pickle.load(file)
file.close()
return object
def save_log_info(log_info, itno, folder):
np.save(_get_prefix(folder) + 'log' + _get_suffix(itno) + '.npy', log_info)
def load_log_info(itno, folder, csv=False):
if csv:
return genfromtxt(_get_prefix(folder) + 'log' + _get_suffix(itno) + '.csv', delimiter=',')
else:
return np.load(_get_prefix(folder) + 'log' + _get_suffix(itno) + '.npy')
def log_to_file(file, iter, num_transitions, reward, prob, additional_data={}):
'''
Log data to file.
Parameters:
file: file_handle
iter: int
num_transitions: int (number of simulation steps in each iter)
reward: float
prob: float (satisfaction probability)
additional_data: dict
'''
file.write('**** Iteration Number {} ****\n'.format(iter))
file.write('Environment Steps Taken: {}\n'.format(num_transitions))
file.write('Reward: {}\n'.format(reward))
file.write('Satisfaction Probability: {}\n'.format(prob))
for key in additional_data:
file.write('{}: {}\n'.format(key, additional_data[key]))
file.write('\n')
file.flush()
def get_image_dir(itno, folder):
image_dir = '{}img{}'.format(_get_prefix(folder), _get_suffix(itno))
if os.path.exists(image_dir) is False:
os.mkdir(image_dir)
return image_dir
def generate_video(env, policy, itno, folder, max_step=10000):
image_dir = get_image_dir(itno, folder)
done = False
state = env.reset()
step = 0
while not done:
img_arr = env.render(mode='rgb_array')
img = Image.fromarray(img_arr)
img.save(image_dir + '/' + str(step) + '.png')
action = policy.get_action(state)
state, _, done, _ = env.step(action)
step += 1
if step > max_step:
done = True
video_name = image_dir + '/' + 'video.avi'
images_temp = [img for img in os.listdir(image_dir)]
images = []
for i in range(len(images_temp)):
for j in images_temp:
directory = str(i) + '.png'
if directory == j:
images.append(j)
frame = cv2.imread(os.path.join(image_dir, images_temp[0]))
height, width, _ = frame.shape
video = cv2.VideoWriter(
video_name, cv2.VideoWriter_fourcc(*'XVID'), 20, (width, height))
for image in images:
video.write(cv2.imread(os.path.join(image_dir, image)))
cv2.destroyAllWindows()
video.release()
def plot_for_threshold(itno, folders, xs, threshold, color):
ys = []
for folder in folders:
val = 0
count = 0
for j in range(itno):
data = load_log_info(j, folder)
for pos in range(len(data)):
if data[pos][-1] >= threshold:
val += data[pos][0]
count += 1
break
ys.append(val / count)
plt.subplots_adjust(bottom=0.145, left=0.13)
plt.rcParams.update({'font.size': 18})
plt.plot(xs, ys, '-ok', label='z = {}'.format(threshold), color=color)
def plot_error_bar(x, data, color, label, points=False):
'''
Plot the error bar from the data.
Parameters:
samples_per_iter: int (number of sample rollouts per iteration of the algorithm)
data: (3+)-tuple of np.array (curve, lower error bar, upper error bar, ...)
color: color of the plot
label: string
'''
plt.subplots_adjust(bottom=0.126)
plt.rcParams.update({'font.size': 18})
if points:
plt.errorbar(x, data[0], data[0] - data[1], fmt='--o', color=color, label=label)
else:
plt.plot(x, data[0], color=color, label=label)
plt.fill_between(x, data[1], data[2], color=color, alpha=0.15)
def extract_plot_data(folder, column_num, low, up, csv=False):
'''
Load and parse log_info to generate error bars
Parameters:
folder: string (name of folder)
column_num: int (column number in log.npy to use)
l: int (lower limit on run number)
u: int (upper limit on run number)
Returns:
4-tuple of numpy arrays (curve, lower error bar, upper error bar, max_over_runs)
'''
log_infos = []
min_length = 1000000
for itno in range(low, up):
log_info = np.transpose(load_log_info(
itno, folder, csv=csv))[column_num]
log_info = np.append([0], log_info)
min_length = min(min_length, len(log_info))
log_infos.append(log_info)
log_infos = [log_info[:min_length] for log_info in log_infos]
data = np.array(log_infos)
curve = np.mean(data, axis=0)
std = np.std(data, axis=0)
max_curve = np.amax(data, axis=0)
return curve, (curve - std), (curve + std), max_curve
# save and render current plot
def save_plot(folder, name, show=True, scientific=True):
plt.rcParams.update({'font.size': 14})
plt.legend()
ax = plt.gca()
ax.xaxis.major.formatter._useMathText = True
if scientific:
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.savefig(_get_prefix(folder) + name + '.pdf', format='pdf')
if show:
plt.show()
# get prefix for file name
def _get_prefix(folder):
if folder == '':
return ''
else:
return folder + '/'
# get suffix from itno
def _get_suffix(itno):
if itno < 0:
return ''
else:
return str(itno)
| 28.608527
| 98
| 0.605067
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,863
| 0.252405
|
792c98d61321846aacf5f5f89a160ce13339bfd4
| 940
|
py
|
Python
|
pyqt_sql_demo/syntax_highlighter/sql.py
|
nshiell/pyqt-sql-demo
|
9e64ba069de744f69c2ecc2eeddac5b0b9f0968a
|
[
"Unlicense"
] | 18
|
2018-05-14T16:27:24.000Z
|
2022-02-24T06:47:45.000Z
|
pyqt_sql_demo/syntax_highlighter/sql.py
|
nshiell/pyqt-sql-demo
|
9e64ba069de744f69c2ecc2eeddac5b0b9f0968a
|
[
"Unlicense"
] | 2
|
2020-09-11T07:56:05.000Z
|
2021-03-05T14:50:36.000Z
|
pyqt_sql_demo/syntax_highlighter/sql.py
|
nshiell/pyqt-sql-demo
|
9e64ba069de744f69c2ecc2eeddac5b0b9f0968a
|
[
"Unlicense"
] | 9
|
2019-01-16T16:03:51.000Z
|
2021-03-14T01:01:55.000Z
|
from pygments import highlight as _highlight
from pygments.lexers import SqlLexer
from pygments.formatters import HtmlFormatter
def style():
style = HtmlFormatter().get_style_defs()
return style
def highlight(text):
# Generated HTML contains unnecessary newline at the end
# before </pre> closing tag.
# We need to remove that newline because it's screwing up
# QTextEdit formatting and is being displayed
# as a non-editable whitespace.
highlighted_text = _highlight(text, SqlLexer(), HtmlFormatter()).strip()
# Split generated HTML by last newline in it
# argument 1 indicates that we only want to split the string
# by one specified delimiter from the right.
parts = highlighted_text.rsplit("\n", 1)
# Glue back 2 split parts to get the HTML without last
# unnecessary newline
highlighted_text_no_last_newline = "".join(parts)
return highlighted_text_no_last_newline
| 33.571429
| 76
| 0.738298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 446
| 0.474468
|
792d78778cc9f57f44aeb718a24a94af2accc6bf
| 4,337
|
py
|
Python
|
raytracing.py
|
avigael/raytracing-example
|
e1e9448fdf371c401e9ada642fd0ca8ed2702609
|
[
"MIT"
] | null | null | null |
raytracing.py
|
avigael/raytracing-example
|
e1e9448fdf371c401e9ada642fd0ca8ed2702609
|
[
"MIT"
] | null | null | null |
raytracing.py
|
avigael/raytracing-example
|
e1e9448fdf371c401e9ada642fd0ca8ed2702609
|
[
"MIT"
] | null | null | null |
'''2D Raytracing Example using Pygame'''
import sys
from math import pi, cos, sin
import pygame
# Constants
SIZE = (600, 600)
BORDERS = [[0, 0, SIZE[0], 0], [0, 0, 0, SIZE[1]],
[0, SIZE[1], SIZE[0], SIZE[1]], [SIZE[0], 0, SIZE[0], SIZE[1]]]
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
class Ray:
'''create rays and see their intersections'''
def __init__(self, x_1, y_1, x_2, y_2):
self.x_1 = x_1
self.y_1 = y_1
self.x_2 = x_2
self.y_2 = y_2
def cast(self):
'''see https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection'''
# Checks Window Borders
for border in BORDERS:
x_3 = border[0]
y_3 = border[1]
x_4 = border[2]
y_4 = border[3]
P = (self.x_1 - self.x_2) * (y_3 - y_4) - \
(self.y_1 - self.y_2) * (x_3 - x_4)
if P != 0:
t = ((self.x_1 - x_3) * (y_3 - y_4) -
(self.y_1 - y_3) * (x_3 - x_4))/P
u = -((self.x_1 - self.x_2) * (self.y_1 - y_3) -
(self.y_1 - self.y_2) * (self.x_1 - x_3))/P
if 0 <= t and u >= 0 and u <= 1:
point = ((self.x_2 - self.x_1) * t +
self.x_1, (y_4 - y_3) * u + y_3)
break
# Checks Barriers
for barrier in Barrier.collection:
x_3 = barrier[0]
y_3 = barrier[1]
x_4 = barrier[2]
y_4 = barrier[3]
P = (self.x_1 - self.x_2) * (y_3 - y_4) - \
(self.y_1 - self.y_2) * (x_3 - x_4)
if P != 0:
t = ((self.x_1 - x_3) * (y_3 - y_4) -
(self.y_1 - y_3) * (x_3 - x_4))/P
u = -((self.x_1 - self.x_2) * (self.y_1 - y_3) -
(self.y_1 - self.y_2) * (self.x_1 - x_3))/P
if 0 <= t and u >= 0 and u <= 1:
npoint = ((self.x_2 - self.x_1) * t +
self.x_1, (y_4 - y_3) * u + y_3)
if abs(npoint[0] - self.x_1) < abs(point[0] - self.x_1):
point = npoint
# Draws Ray
pygame.draw.aaline(screen, GREEN, (self.x_1, self.y_1), point)
class Radar:
'''creates rays around a point'''
def __init__(self, x, y, N):
self.rays = []
# N represents number of Rays
for i in range(0, N):
# Formula to create rays around a point
ray = Ray(x, y, x + cos(i/N * 2 * pi), y + sin(i/N * 2 * pi))
self.rays.append(ray)
def radiate(self):
'''emits rays'''
for ray in self.rays:
ray.cast()
class Barrier:
'''create barriers for rays to intersect with'''
collection = []
def __init__(self, x_1, y_1, x_2, y_2):
Barrier.collection.append([x_1, y_1, x_2, y_2])
def draw_barrier():
'''draws Barriers'''
for barrier in Barrier.collection:
p_1 = (barrier[0], barrier[1])
p_2 = (barrier[2], barrier[3])
pygame.draw.aaline(screen, BLACK, p_1, p_2)
def create_map():
'''initializes custom map'''
width = SIZE[0]
height = SIZE[1]
Barrier(width/6, height, width/6, height/2)
Barrier(width/3, height, width/3, height/1.5)
Barrier(width/2, height/2, width/6, height/2)
Barrier(width/2, height/1.5, width/3, height/1.5)
Barrier(width/1.5, height/1.5, width/1.5, height/2)
Barrier(width/1.2, height/2, width/1.5, height/2)
Barrier(width/1.2, height/2, width/1.2, height/1.5)
Barrier(width/1.5, height/1.5, width/1.2, height/1.5)
Barrier(width/3, height/6, width/3, height/3)
Barrier(width/3, height/6, width/2, height/3)
Barrier(width/2, height/6, width/2, height/3)
Barrier(width/2, height/6, width/1.5, height/3)
Barrier(width/1.5, height/6, width/1.5, height/3)
# Initialize Screen
pygame.init()
pygame.display.set_caption("Raytracing Example")
screen = pygame.display.set_mode(SIZE)
create_map()
# Game Loop
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
pygame.display.flip()
mouse = pygame.mouse.get_pos()
radar = Radar(mouse[0], mouse[1], 25)
screen.fill(WHITE)
draw_barrier()
radar.radiate()
| 32.125926
| 78
| 0.512797
| 2,596
| 0.59857
| 0
| 0
| 0
| 0
| 0
| 0
| 480
| 0.110676
|
792f039bf3dcdae9faa7ebde493ddb3c49ba4954
| 3,041
|
py
|
Python
|
preprocessed_data/UCM/Code/global_histogram_stretching.py
|
SaiKrishna1207/Underwater-Image-Segmentation
|
78def27e577b10e6722c02807bdcfeb7ba53d760
|
[
"MIT"
] | null | null | null |
preprocessed_data/UCM/Code/global_histogram_stretching.py
|
SaiKrishna1207/Underwater-Image-Segmentation
|
78def27e577b10e6722c02807bdcfeb7ba53d760
|
[
"MIT"
] | null | null | null |
preprocessed_data/UCM/Code/global_histogram_stretching.py
|
SaiKrishna1207/Underwater-Image-Segmentation
|
78def27e577b10e6722c02807bdcfeb7ba53d760
|
[
"MIT"
] | null | null | null |
import numpy as np
def histogram_r(r_array,height, width):
length = height * width
R_rray = []
for i in range(height):
for j in range(width):
R_rray.append(r_array[i][j])
R_rray.sort()
I_min = int(R_rray[int(length / 500)])
I_max = int(R_rray[-int(length / 500)])
array_Global_histogram_stretching = np.zeros((height, width))
for i in range(0, height):
for j in range(0, width):
if r_array[i][j] < I_min:
# p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = I_min
elif (r_array[i][j] > I_max):
p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = 255
else:
p_out = int((r_array[i][j] - I_min) * ((255 - I_min) / (I_max - I_min)))+ I_min
array_Global_histogram_stretching[i][j] = p_out
return (array_Global_histogram_stretching)
def histogram_g(r_array,height, width):
length = height * width
R_rray = []
for i in range(height):
for j in range(width):
R_rray.append(r_array[i][j])
R_rray.sort()
I_min = int(R_rray[int(length / 500)])
I_max = int(R_rray[-int(length / 500)])
array_Global_histogram_stretching = np.zeros((height, width))
for i in range(0, height):
for j in range(0, width):
if r_array[i][j] < I_min:
p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = 0
elif (r_array[i][j] > I_max):
p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = 255
else:
p_out = int((r_array[i][j] - I_min) * ((255) / (I_max - I_min)) )
array_Global_histogram_stretching[i][j] = p_out
return (array_Global_histogram_stretching)
def histogram_b(r_array,height, width):
length = height * width
R_rray = []
for i in range(height):
for j in range(width):
R_rray.append(r_array[i][j])
R_rray.sort()
I_min = int(R_rray[int(length / 500)])
I_max = int(R_rray[-int(length / 500)])
array_Global_histogram_stretching = np.zeros((height, width))
for i in range(0, height):
for j in range(0, width):
if r_array[i][j] < I_min:
# p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = 0
elif (r_array[i][j] > I_max):
# p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = I_max
else:
p_out = int((r_array[i][j] - I_min) * ((I_max) / (I_max - I_min)))
array_Global_histogram_stretching[i][j] = p_out
return (array_Global_histogram_stretching)
def stretching(img):
height = len(img)
width = len(img[0])
img[:, :, 2] = histogram_r(img[:, :, 2], height, width)
img[:, :, 1] = histogram_g(img[:, :, 1], height, width)
img[:, :, 0] = histogram_b(img[:, :, 0], height, width)
return img
| 37.54321
| 95
| 0.560671
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 69
| 0.02269
|
792f0b4ef299a46239013a5b5a1e30079b053c00
| 1,854
|
py
|
Python
|
python/test/experimental/test_tb_graph_writer.py
|
daniel-falk/nnabla
|
3fe132ea52dc10521cc029a5d6ba8f565cf65ccf
|
[
"Apache-2.0"
] | 2,792
|
2017-06-26T13:05:44.000Z
|
2022-03-28T07:55:26.000Z
|
python/test/experimental/test_tb_graph_writer.py
|
daniel-falk/nnabla
|
3fe132ea52dc10521cc029a5d6ba8f565cf65ccf
|
[
"Apache-2.0"
] | 138
|
2017-06-27T07:04:44.000Z
|
2022-02-28T01:37:15.000Z
|
python/test/experimental/test_tb_graph_writer.py
|
daniel-falk/nnabla
|
3fe132ea52dc10521cc029a5d6ba8f565cf65ccf
|
[
"Apache-2.0"
] | 380
|
2017-06-26T13:23:52.000Z
|
2022-03-25T16:51:30.000Z
|
# Copyright 2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
def test_show_graph():
try:
from nnabla.experimental.tb_graph_writer import TBGraphWriter
except:
pytest.skip(
'Skip because tensorboardX and tensorflow is not installed.')
nn.clear_parameters()
x = nn.Variable((2, 3, 4, 4))
with nn.parameter_scope('c1'):
h = PF.convolution(x, 8, (3, 3), pad=(1, 1))
h = F.relu(PF.batch_normalization(h))
with nn.parameter_scope('f1'):
y = PF.affine(h, 10)
with TBGraphWriter(log_dir='log_out') as tb:
tb.from_variable(y, output_name="y")
def test_show_curve():
try:
from nnabla.experimental.tb_graph_writer import TBGraphWriter
except:
pytest.skip(
'Skip because tensorboardX and tensorflow is not installed.')
with TBGraphWriter(log_dir='log_out') as tb:
values = []
for i in range(360):
s = np.sin(i / 180.0 * np.pi)
tb.add_scalar("show_curve/sin", s, i)
values.append(s)
nd_values = np.array(values)
for i in range(10):
tb.add_histogram("histogram", nd_values, i)
nd_values += 0.05
| 30.9
| 74
| 0.662891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 744
| 0.401294
|
792f6c2ed852cae02d142182deaf73d1e0349382
| 1,328
|
py
|
Python
|
pokeman/coatings/resolver_attribute_methods/selective_consumer.py
|
wmarcuse/pokeman
|
5d654c227c456a065b2fea6a0d5827bff424c703
|
[
"BSD-3-Clause"
] | null | null | null |
pokeman/coatings/resolver_attribute_methods/selective_consumer.py
|
wmarcuse/pokeman
|
5d654c227c456a065b2fea6a0d5827bff424c703
|
[
"BSD-3-Clause"
] | null | null | null |
pokeman/coatings/resolver_attribute_methods/selective_consumer.py
|
wmarcuse/pokeman
|
5d654c227c456a065b2fea6a0d5827bff424c703
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import logging
LOGGER = logging.getLogger(__name__)
def start(self):
self.start_consuming()
def on_message(self, channel, method, properties, body):
"""
Invoked by pika when a message is delivered from the AMQP broker. The
channel is passed for convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param channel: The channel object.
:type channel: pika.channel.Channel
:param method: basic_deliver method.
:type method: pika.Spec.Basic.Deliver
:param properties: The properties.
:type properties: pika.Spec.BasicProperties
:param body: The message body.
:type body: bytes
"""
try:
print('message received')
print(properties.correlation_id)
if properties.correlation_id == self.correlation_id_reference:
print("SUCCEEDEEDRT")
self.callback_method(json.loads(body), properties)
self.acknowledge_message(method.delivery_tag)
self.channel.stop_consuming()
except Exception:
LOGGER.exception("Synchronous callback method exception:")
| 31.619048
| 73
| 0.707831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 784
| 0.590361
|
79346eb30e63c170afbf3ea69f6c87de3e761345
| 4,100
|
py
|
Python
|
mc-core/mc/data_gen/gnb_status_indication_pb2.py
|
copslock/o-ran_ric-app_mc
|
243f8671c28596b1dc70dd295029d6151c9dd778
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
mc-core/mc/data_gen/gnb_status_indication_pb2.py
|
copslock/o-ran_ric-app_mc
|
243f8671c28596b1dc70dd295029d6151c9dd778
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
mc-core/mc/data_gen/gnb_status_indication_pb2.py
|
copslock/o-ran_ric-app_mc
|
243f8671c28596b1dc70dd295029d6151c9dd778
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 1
|
2021-07-07T06:43:16.000Z
|
2021-07-07T06:43:16.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: gnb_status_indication.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import x2ap_common_types_pb2 as x2ap__common__types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='gnb_status_indication.proto',
package='streaming_protobufs',
syntax='proto3',
serialized_options=_b('Z1gerrit.o-ran-sc.org/r/ric-plt/streaming-protobufs'),
serialized_pb=_b('\n\x1bgnb_status_indication.proto\x12\x13streaming_protobufs\x1a\x17x2ap_common_types.proto\"W\n\x13GNBStatusIndication\x12@\n\x0bprotocolIEs\x18\x01 \x01(\x0b\x32+.streaming_protobufs.GNBStatusIndicationIEs\"h\n\x16GNBStatusIndicationIEs\x12N\n\x19id_GNBOverloadInformation\x18\x01 \x01(\x0b\x32+.streaming_protobufs.GNBOverloadInformationB3Z1gerrit.o-ran-sc.org/r/ric-plt/streaming-protobufsb\x06proto3')
,
dependencies=[x2ap__common__types__pb2.DESCRIPTOR,])
_GNBSTATUSINDICATION = _descriptor.Descriptor(
name='GNBStatusIndication',
full_name='streaming_protobufs.GNBStatusIndication',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='protocolIEs', full_name='streaming_protobufs.GNBStatusIndication.protocolIEs', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=77,
serialized_end=164,
)
_GNBSTATUSINDICATIONIES = _descriptor.Descriptor(
name='GNBStatusIndicationIEs',
full_name='streaming_protobufs.GNBStatusIndicationIEs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id_GNBOverloadInformation', full_name='streaming_protobufs.GNBStatusIndicationIEs.id_GNBOverloadInformation', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=166,
serialized_end=270,
)
_GNBSTATUSINDICATION.fields_by_name['protocolIEs'].message_type = _GNBSTATUSINDICATIONIES
_GNBSTATUSINDICATIONIES.fields_by_name['id_GNBOverloadInformation'].message_type = x2ap__common__types__pb2._GNBOVERLOADINFORMATION
DESCRIPTOR.message_types_by_name['GNBStatusIndication'] = _GNBSTATUSINDICATION
DESCRIPTOR.message_types_by_name['GNBStatusIndicationIEs'] = _GNBSTATUSINDICATIONIES
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GNBStatusIndication = _reflection.GeneratedProtocolMessageType('GNBStatusIndication', (_message.Message,), {
'DESCRIPTOR' : _GNBSTATUSINDICATION,
'__module__' : 'gnb_status_indication_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.GNBStatusIndication)
})
_sym_db.RegisterMessage(GNBStatusIndication)
GNBStatusIndicationIEs = _reflection.GeneratedProtocolMessageType('GNBStatusIndicationIEs', (_message.Message,), {
'DESCRIPTOR' : _GNBSTATUSINDICATIONIES,
'__module__' : 'gnb_status_indication_pb2'
# @@protoc_insertion_point(class_scope:streaming_protobufs.GNBStatusIndicationIEs)
})
_sym_db.RegisterMessage(GNBStatusIndicationIEs)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 35.652174
| 426
| 0.79439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,420
| 0.346341
|
7934eac98ca8ffa62c49d783f06b030e9d1fdffb
| 902
|
py
|
Python
|
test_tflite_model.py
|
jh88/fbnet
|
5bd12ab8c7f6befc61efd8619d71e710db794c2b
|
[
"MIT"
] | 6
|
2020-01-16T14:38:10.000Z
|
2021-01-24T15:49:11.000Z
|
test_tflite_model.py
|
jh88/fbnet
|
5bd12ab8c7f6befc61efd8619d71e710db794c2b
|
[
"MIT"
] | null | null | null |
test_tflite_model.py
|
jh88/fbnet
|
5bd12ab8c7f6befc61efd8619d71e710db794c2b
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from time import perf_counter as timer
def main():
x = np.load('data/cifar_test_x.npy')
y = np.load('data/cifar_test_y.npy').flatten()
interpreter = tf.lite.Interpreter(model_path='data/fbnet.tflite')
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
pred = []
t0 = timer()
for i in range(len(x)):
interpreter.set_tensor(input_details[0]['index'], x[i:i+1])
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
pred.append(output_data.argmax())
t = timer() - t0
print('total time: {:.2f}s, average: {:.2f}ms'.format(t, t * 1000 / len(x)))
print('accuracy: {}/{}'.format(sum(y == pred), len(x)))
return output_data
if __name__ == '__main__':
main()
| 25.771429
| 80
| 0.649667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 146
| 0.161863
|
7935bc304873f87a9dd8551b03972144b4f09bb2
| 582
|
py
|
Python
|
src/pymor/vectorarrays/constructions.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
src/pymor/vectorarrays/constructions.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
src/pymor/vectorarrays/constructions.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2016 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
def cat_arrays(vector_arrays):
"""Return a new |VectorArray| which a concatenation of the arrays in `vector_arrays`."""
vector_arrays = list(vector_arrays)
total_length = sum(map(len, vector_arrays))
cated_arrays = vector_arrays[0].empty(reserve=total_length)
for a in vector_arrays:
cated_arrays.append(a)
return cated_arrays
| 41.571429
| 92
| 0.74055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 306
| 0.525773
|
7935f670a579e41f9498d1b3fe1e3afe2409108d
| 407
|
py
|
Python
|
swampytodo/urls.py
|
mrbaboon/swampytodo
|
096c39a57db0d8640e03262550dd1ed07191ecde
|
[
"MIT"
] | null | null | null |
swampytodo/urls.py
|
mrbaboon/swampytodo
|
096c39a57db0d8640e03262550dd1ed07191ecde
|
[
"MIT"
] | 2
|
2015-04-23T00:21:01.000Z
|
2015-04-23T00:29:23.000Z
|
swampytodo/urls.py
|
mrbaboon/swampytodo
|
096c39a57db0d8640e03262550dd1ed07191ecde
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'swampytodo.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^monitor', 'monitor.views.monitor_view', name='monitor'),
url(r'^todo', include('todo.urls', namespace='todo')),
url(r'^admin/', include(admin.site.urls)),
)
| 29.071429
| 67
| 0.643735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 186
| 0.457002
|
79371535785d9b4c1a14c7350dbe3a0fef48e07d
| 3,669
|
py
|
Python
|
src/pymor/playground/progressbar.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
src/pymor/playground/progressbar.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
src/pymor/playground/progressbar.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2016 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import sys
############################################################
#
# A progress bar that actually shows progress!
#
# Source:
# http://code.activestate.com/recipes/168639-progress-bar-class/
#
############################################################
class ProgressBar:
""" Creates a text-based progress bar. Call the object with the `print`
command to see the progress bar, which looks something like this::
[=======> 22% ]
You may specify the progress bar's width, min and max values on init.
"""
def __init__(self, minValue=0, maxValue=100, totalWidth=79):
self.progBar = "[]" # This holds the progress bar string
self.min = minValue
self.max = maxValue
self.width = totalWidth
self.amount = minValue # When amount == max, we are 100% done
self.update_amount(self.amount) # Build progress bar string
def update_amount(self, newAmount=0):
""" Update the progress bar with the new amount (with min and max
values set at initialization; if it is over or under, it takes the
min or max value as a default. """
if newAmount < self.min:
newAmount = self.min
if newAmount > self.max:
newAmount = self.max
self.amount = newAmount
# Figure out the new percent done, round to an integer
diffFromMin = float(self.amount - self.min)
percentDone = (diffFromMin / float(self.max - self.min)) * 100.0
percentDone = int(round(percentDone))
# Figure out how many hash bars the percentage should be
allFull = self.width - 2
numHashes = (percentDone / 100.0) * allFull
numHashes = int(round(numHashes))
# Build a progress bar with an arrow of equal signs; special cases for
# empty and full
if numHashes == 0:
self.progBar = "[>%s]" % (' ' * (allFull - 1))
elif numHashes == allFull:
self.progBar = "[%s]" % ('=' * allFull)
else:
self.progBar = "[%s>%s]" % ('=' * (numHashes - 1),
' ' * (allFull - numHashes))
# figure out where to put the percentage, roughly centered
percentPlace = (len(self.progBar) / 2) - len(str(percentDone))
percentString = str(percentDone) + "%"
# slice the percentage into the bar
self.progBar = ''.join([self.progBar[0:percentPlace], percentString,
self.progBar[percentPlace + len(percentString):]
])
def __str__(self):
return str(self.progBar)
def __call__(self, value):
""" Increases the amount by value, and writes to stdout. Prints a
carriage return first, so it will overwrite the current line in
stdout."""
if self.amount < self.max:
print('\r', end=' ')
self.update_amount(self.amount + value)
sys.stdout.write(str(self))
sys.stdout.write(self.amount < self.max and "\r" or "\n")
sys.stdout.flush()
def setMaximum(self, value):
self.max = value
def maximum(self):
return self.max
if __name__ == '__main__':
from time import sleep
p = ProgressBar()
for i in range(0, 201):
p(1)
if i == 90:
p.max = 200
sleep(0.02)
| 35.970588
| 80
| 0.562006
| 2,978
| 0.811665
| 0
| 0
| 0
| 0
| 0
| 0
| 1,576
| 0.429545
|
79372371d63d8554463e6ea69f517b712b741c97
| 8,184
|
py
|
Python
|
toontown/minigame/TwoDBattleMgr.py
|
SuperM0use24/TT-CL-Edition
|
fdad8394f0656ae122b687d603f72afafd220c65
|
[
"MIT"
] | 1
|
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
toontown/minigame/TwoDBattleMgr.py
|
SuperM0use24/TT-CL-Edition
|
fdad8394f0656ae122b687d603f72afafd220c65
|
[
"MIT"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
toontown/minigame/TwoDBattleMgr.py
|
SuperM0use24/TT-CL-Edition
|
fdad8394f0656ae122b687d603f72afafd220c65
|
[
"MIT"
] | 3
|
2021-06-03T05:36:36.000Z
|
2021-06-22T15:07:31.000Z
|
from panda3d.core import *
from direct.showbase.DirectObject import DirectObject
from toontown.toonbase.ToonBaseGlobal import *
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import *
from toontown.battle.BattleProps import *
from toontown.battle import MovieUtil
import math
class TwoDBattleMgr(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('TwoDBattleMgr')
def __init__(self, game, toon):
self.game = game
self.toon = toon
self.waterBulletIval = None
self.shootTrack = None
self.showCollSpheres = False
self.WATER_SPRAY_COLOR = Point4(1, 1, 1, 1)
self.WATER_BULLET_SCALE = 0.2
self.SHOOT_DISTANCE = 10
self.WATER_BULLET_START_POINT = Point3(0, 1, 3)
self.WATER_BULLET_END_POINT = Point3(0, self.WATER_BULLET_START_POINT.getY() + self.SHOOT_DISTANCE, self.WATER_BULLET_START_POINT.getZ())
self.WATER_BULLET_HIDE_POINT = Point3(0, 0, 1.5)
self.sprayProp = self.game.assetMgr.sprayProp.copyTo(self.game.assetMgr.world)
self.setupPistol()
if self.toon == base.localAvatar:
self.createShootCollision()
return
def destroy(self):
if self.toon == base.localAvatar:
if self.waterBulletIval:
self.waterBulletIval.finish()
del self.waterBulletIval
self.waterBulletIval = None
self.ignore('enter' + self.collSphereName)
base.localAvatar.controlManager.currentControls.cTrav.removeCollider(self.waterBullet)
self.waterBullet.removeNode()
del self.waterBullet
self.hand_jointpath0.removeNode()
MovieUtil.removeProp(self.pistol)
if self.shootTrack != None:
self.shootTrack.finish()
self.shootTrack = None
self.game = None
self.toon = None
return
def start(self):
pass
def stop(self):
pass
def setupPistol(self):
self.pistol = globalPropPool.getProp('water-gun')
hands = self.toon.getRightHands()
self.hand_jointpath0 = hands[0].attachNewNode('handJoint0-path')
pistolPos = Point3(0.28, 0.1, 0.08)
pistolHpr = VBase3(85.6, -4.44, 94.43)
MovieUtil.showProp(self.pistol, self.hand_jointpath0, pistolPos, pistolHpr)
def shoot(self):
if not self.shootTrack:
self.shootTrack = Parallel(self.getToonShootTrack(), self.getSprayTrack())
if self.toon == base.localAvatar:
self.shootTrack.append(Func(self.game.assetMgr.playWatergunSound))
self.shootTrack.append(self.getWaterBulletIval())
self.shootTrack.start()
return
elif self.shootTrack.isStopped():
self.shootTrack = Parallel(self.getToonShootTrack(), self.getSprayTrack())
if self.toon == base.localAvatar:
self.shootTrack.append(Func(self.game.assetMgr.playWatergunSound))
self.shootTrack.append(self.getWaterBulletIval())
self.shootTrack.start()
def createShootCollision(self):
self.notify.debug('entering createShootCollision')
collSphere = CollisionSphere(0, 0, 0, 1)
collSphere.setTangible(0)
self.collSphereName = self.game.uniqueName('waterBullet')
collNode = CollisionNode(self.collSphereName)
collNode.setFromCollideMask(ToontownGlobals.WallBitmask)
collNode.addSolid(collSphere)
self.waterBullet = base.localAvatar.attachNewNode(collNode)
self.waterBullet.setPos(self.WATER_BULLET_HIDE_POINT)
self.waterBullet.setScale(self.WATER_BULLET_SCALE)
self.waterBullet.hide()
if self.showCollSpheres:
self.waterBullet.show()
bulletEvent = CollisionHandlerEvent()
bulletEvent.addInPattern('enter%fn')
bulletEvent.addOutPattern('exit%fn')
cTrav = base.localAvatar.controlManager.currentControls.cTrav
cTrav.addCollider(self.waterBullet, bulletEvent)
self.accept('enter' + self.collSphereName, self.handleBulletCollision)
self.waterBulletIval = Sequence(Wait(0.15))
self.waterBulletIval.append(LerpPosInterval(self.waterBullet, 0.25, pos=Point3(self.WATER_BULLET_END_POINT), startPos=Point3(self.WATER_BULLET_START_POINT), name='waterBulletMoveFront'))
self.waterBulletIval.append(Func(self.waterBullet.setPos, self.WATER_BULLET_HIDE_POINT))
def getToonShootTrack(self):
def returnToLastAnim(toon):
if hasattr(toon, 'playingAnim') and toon.playingAnim:
toon.loop(toon.playingAnim)
else:
toon.loop('neutral')
torso = self.toon.getPart('torso', '1000')
toonTrack = Sequence(ActorInterval(self.toon, 'water-gun', startFrame=48, endFrame=58, partName='torso'), ActorInterval(self.toon, 'water-gun', startFrame=107, endFrame=126, playRate=2, partName='torso'), Func(returnToLastAnim, self.toon))
return toonTrack
def calcSprayStartPos(self):
if self.toon:
self.toon.update(0)
joint = self.pistol.find('**/joint_nozzle')
p = joint.getPos(render)
self.origin = p
def calcSprayEndPos(self):
if self.toon:
xDirection = -math.sin(self.toon.getH())
else:
xDirection = -math.sin(-90)
endPos = Point3(self.origin.getX() + self.SHOOT_DISTANCE * xDirection, self.origin.getY(), self.origin.getZ())
self.target = endPos
def getSprayTrack(self):
dSprayScale = 0.15
dSprayHold = 0.035
color = self.WATER_SPRAY_COLOR
parent = render
horizScale = 1.0
vertScale = 1.0
def showSpray(sprayScale, sprayRot, sprayProp, parent):
sprayRot.reparentTo(parent)
sprayRot.clearMat()
sprayScale.reparentTo(sprayRot)
sprayScale.clearMat()
sprayProp.reparentTo(sprayScale)
sprayProp.clearMat()
sprayRot.setPos(self.origin)
sprayRot.lookAt(Point3(self.target))
def calcTargetScale(horizScale = horizScale, vertScale = vertScale):
distance = Vec3(self.target - self.origin).length()
yScale = distance / MovieUtil.SPRAY_LEN
targetScale = Point3(yScale * horizScale, yScale, yScale * vertScale)
return targetScale
def prepareToShrinkSpray(spray, sprayProp):
sprayProp.setPos(Point3(0.0, -MovieUtil.SPRAY_LEN, 0.0))
spray.setPos(self.target)
def hideSpray(spray, sprayScale, sprayRot, sprayProp, propPool):
sprayProp.detachNode()
sprayRot.removeNode()
sprayScale.removeNode()
sprayProp = self.sprayProp
sprayScale = hidden.attachNewNode('spray-parent')
sprayRot = hidden.attachNewNode('spray-rotate')
spray = sprayRot
spray.setColor(color)
if color[3] < 1.0:
spray.setTransparency(1)
track = Sequence(Wait(0.1), Func(self.calcSprayStartPos), Func(self.calcSprayEndPos), Func(showSpray, sprayScale, sprayRot, sprayProp, parent), LerpScaleInterval(sprayScale, dSprayScale, calcTargetScale, startScale=MovieUtil.PNT3_NEARZERO), Wait(dSprayHold), Func(prepareToShrinkSpray, spray, sprayProp), LerpScaleInterval(sprayScale, dSprayScale, MovieUtil.PNT3_NEARZERO), Func(hideSpray, spray, sprayScale, sprayRot, sprayProp, globalPropPool))
return track
def handleBulletCollision(self, cevent):
if cevent.getIntoNodePath().getName()[:5] == 'Enemy':
sectionIndex = int(cevent.getIntoNodePath().getName()[6:8])
enemyIndex = int(cevent.getIntoNodePath().getName()[9:11])
messenger.send('enemyShot', [sectionIndex, enemyIndex])
def clearWaterBulletIval(self):
if self.waterBulletIval:
self.waterBulletIval.finish()
del self.waterBulletIval
self.waterBulletIval = None
return
def getWaterBulletIval(self):
if not self.waterBulletIval.isPlaying():
return self.waterBulletIval
| 43.301587
| 454
| 0.660191
| 7,866
| 0.961144
| 0
| 0
| 0
| 0
| 0
| 0
| 276
| 0.033724
|
7937d7c40eebe24b6b2fbdc5b2fcb247cedd3bed
| 1,211
|
py
|
Python
|
lesson-12/ex1.py
|
alirsamar/intro-ml
|
36450b26b7ea09472ccdd2a0abce51b6c3889a20
|
[
"MIT"
] | null | null | null |
lesson-12/ex1.py
|
alirsamar/intro-ml
|
36450b26b7ea09472ccdd2a0abce51b6c3889a20
|
[
"MIT"
] | null | null | null |
lesson-12/ex1.py
|
alirsamar/intro-ml
|
36450b26b7ea09472ccdd2a0abce51b6c3889a20
|
[
"MIT"
] | null | null | null |
# Explained Variance of Each PC
#### Boilerplate #################################################################
print __doc__
from time import time
import logging
import pylab as pl
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
n_samples, h, w = lfw_people.images.shape
np.random.seed(42)
X = lfw_people.data
n_features = X.shape[1]
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
n_components = 150
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
#### Exercise code #############################################################
print "Variance ratio:"
print pca.explained_variance_ratio_
| 26.911111
| 90
| 0.720066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 235
| 0.194055
|
793b2e2631986fe445d59015d41bd730fa29fbfa
| 352
|
py
|
Python
|
qt/__init__.py
|
popupcad/popupcad
|
d3da448260cd5cb9e05417b0a723d7f73ae4e06e
|
[
"MIT"
] | 19
|
2015-08-01T22:13:39.000Z
|
2020-03-07T03:55:46.000Z
|
qt/__init__.py
|
CadQuery/popupcad
|
b0c7b406d4b288c7cb375340323bba0252aedbfb
|
[
"MIT"
] | 106
|
2015-07-23T19:58:01.000Z
|
2019-05-14T03:46:08.000Z
|
qt/__init__.py
|
CadQuery/popupcad
|
b0c7b406d4b288c7cb375340323bba0252aedbfb
|
[
"MIT"
] | 9
|
2015-10-04T23:38:41.000Z
|
2020-07-16T03:50:34.000Z
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
import sys
argv = [item.lower() for item in sys.argv]
if 'qt4' in argv:
loaded = 'PyQt4'
elif 'qt5' in argv:
loaded = 'PyQt5'
elif 'pyside' in argv:
loaded = 'PySide'
else:
loaded = 'PyQt5'
| 16
| 43
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 186
| 0.528409
|
793db8fe0005397f1d556c93efea7bcd3624a441
| 3,941
|
py
|
Python
|
fast_carpenter/__main__.py
|
lgray/fast-carpenter
|
c33b83d16031e5ac4b857ac1a644433b6132bb05
|
[
"Apache-2.0"
] | null | null | null |
fast_carpenter/__main__.py
|
lgray/fast-carpenter
|
c33b83d16031e5ac4b857ac1a644433b6132bb05
|
[
"Apache-2.0"
] | null | null | null |
fast_carpenter/__main__.py
|
lgray/fast-carpenter
|
c33b83d16031e5ac4b857ac1a644433b6132bb05
|
[
"Apache-2.0"
] | null | null | null |
"""
Chop up those trees into nice little tables and dataframes
"""
from __future__ import print_function
import sys
from .help import help_stages
import fast_flow.v1 as fast_flow
import fast_curator
import logging
import atuproot.atuproot_main as atup
from .event_builder import EventBuilder
from atsge.build_parallel import build_parallel
from .utils import mkdir_p
from .version import __version__
atup.EventBuilder = EventBuilder
atup.build_parallel = build_parallel
logging.getLogger(__name__).setLevel(logging.INFO)
class DummyCollector():
def collect(self, *args, **kwargs):
pass
def create_parser():
from argparse import ArgumentParser, Action
class StagesHelp(Action):
def __call__(self, parser, namespace, values, option_string=None):
full_output = option_string == "--help-stages-full"
help_stages(values, full_output=full_output)
sys.exit(0)
parser = ArgumentParser(description=__doc__)
parser.add_argument("dataset_cfg", type=str,
help="Dataset config to run over")
parser.add_argument("sequence_cfg", type=str,
help="Config for how to process events")
parser.add_argument("--outdir", default="output", type=str,
help="Where to save the results")
parser.add_argument("--mode", default="multiprocessing", type=str,
help="Which mode to run in (multiprocessing, htcondor, sge)")
parser.add_argument("--ncores", default=1, type=int,
help="Number of cores to run on")
parser.add_argument("--nblocks-per-dataset", default=-1, type=int,
help="Number of blocks per dataset")
parser.add_argument("--nblocks-per-sample", default=-1, type=int,
help="Number of blocks per sample")
parser.add_argument("--blocksize", default=1000000, type=int,
help="Number of events per block")
parser.add_argument("--quiet", default=False, action='store_true',
help="Keep progress report quiet")
parser.add_argument("--profile", default=False, action='store_true',
help="Profile the code")
parser.add_argument("--help-stages", nargs="?", default=None, action=StagesHelp,
metavar="stage-name-regex",
help="Print help specific to the available stages")
parser.add_argument("--help-stages-full", action=StagesHelp, metavar="stage",
help="Print the full help specific to the available stages")
parser.add_argument("-v", "--version", action="version", version='%(prog)s ' + __version__)
return parser
def main(args=None):
args = create_parser().parse_args(args)
if args.ncores < 1:
args.ncores = 1
sequence = fast_flow.read_sequence_yaml(args.sequence_cfg, output_dir=args.outdir, backend="fast_carpenter")
datasets = fast_curator.read.from_yaml(args.dataset_cfg)
mkdir_p(args.outdir)
_, ret_val = run_carpenter(sequence, datasets, args)
print(ret_val)
return 0
def run_carpenter(sequence, datasets, args):
process = atup.AtUproot(args.outdir,
quiet=args.quiet,
parallel_mode=args.mode,
process=args.ncores,
max_blocks_per_dataset=args.nblocks_per_dataset,
max_blocks_per_process=args.nblocks_per_sample,
nevents_per_block=args.blocksize,
profile=args.profile,
profile_out_path="profile.txt",
)
sequence = [(s, s.collector() if hasattr(s, "collector") else DummyCollector()) for s in sequence]
ret_val = process.run(datasets, sequence)
return sequence, ret_val
if __name__ == "__main__":
main()
| 39.019802
| 112
| 0.632073
| 321
| 0.081451
| 0
| 0
| 0
| 0
| 0
| 0
| 819
| 0.207815
|
793dc7d4ffbc96247a33db7d9520735900231242
| 1,283
|
py
|
Python
|
pictures/tests.py
|
FredAtei/Photo-app
|
5f9e72948af6a27b1c6c438fa22652c06fc4f6d4
|
[
"MIT"
] | null | null | null |
pictures/tests.py
|
FredAtei/Photo-app
|
5f9e72948af6a27b1c6c438fa22652c06fc4f6d4
|
[
"MIT"
] | null | null | null |
pictures/tests.py
|
FredAtei/Photo-app
|
5f9e72948af6a27b1c6c438fa22652c06fc4f6d4
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from .models import Image,Location,Category
# Create your tests here.
class CategoryTestClass(TestCase):
def setUp(self):
self.travel = Category(name='travel')
def test_instance(self):
self.assertTrue(isinstance(self.travel,Category))
def test_save_method(self):
self.travel.save_category()
categories = Category.objects.all()
self.assertTrue(len(categories)>0)
class LocationTestClass(TestCase):
def setUp(self):
self.Paris = Location(name='Paris')
def test_instance(self):
self.assertTrue(isinstance(self.Paris,Location))
def test_save_method(self):
self.Paris.save_location()
locations = Location.objects.all()
self.assertTrue(len(locations)>0)
class ImageTestClass(TestCase):
def setUp(self):
self.new_image=Image(image_name='Eot',image_description='Great things',image_category=self.travel,image_location=self.locations)
self.new_image.save_image()
def tearDown(self):
Category.objects.all().delete()
Location.objects.all().delete()
Image.objects.all().delete()
def test_get_images(self):
all_images = Image.get_images()
self.assertTrue(len(all_images)>0)
| 31.292683
| 136
| 0.683554
| 1,168
| 0.910366
| 0
| 0
| 0
| 0
| 0
| 0
| 59
| 0.045986
|
793dd777651fda3f7f0226048a22a03099d8826c
| 2,110
|
py
|
Python
|
{{ cookiecutter.project_name|replace(' ', '_')|replace('-', '_')|lower }}/project/apps/users/views.py
|
digitalashes/django2.0-template
|
4387c25fb94cbff4f201b279f2eefcb174658eff
|
[
"Apache-2.0"
] | 1
|
2018-03-13T21:16:49.000Z
|
2018-03-13T21:16:49.000Z
|
{{ cookiecutter.project_name|replace(' ', '_')|replace('-', '_')|lower }}/project/apps/users/views.py
|
digitalashes/django2.0-template
|
4387c25fb94cbff4f201b279f2eefcb174658eff
|
[
"Apache-2.0"
] | null | null | null |
{{ cookiecutter.project_name|replace(' ', '_')|replace('-', '_')|lower }}/project/apps/users/views.py
|
digitalashes/django2.0-template
|
4387c25fb94cbff4f201b279f2eefcb174658eff
|
[
"Apache-2.0"
] | null | null | null |
{%- if cookiecutter.use_allauth == "y" and cookiecutter.use_rest == "y" %}
from django.contrib.auth import logout as auth_logout
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_lazy as _
from rest_auth.app_settings import create_token
from rest_auth.registration.views import RegisterView as RegisterViewBase
from rest_auth.views import PasswordChangeView as BasePasswordChangeView
from rest_framework import status
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from users.jwt import jwt_response_payload_handler
class RegisterApiView(RegisterViewBase):
"""
Generic user registration.
"""
http_method_names = ('post', 'head', 'options')
permission_classes = (AllowAny,)
def get_response_data(self, user):
token = create_token(user)
data = jwt_response_payload_handler(token, user)
return data
class LogoutApiView(APIView):
"""
Calls Django logout method and delete the Token object
assigned to the current User object.
Accepts/Returns nothing.
"""
http_method_names = ('post', 'head', 'options')
permission_classes = (AllowAny,)
def post(self, request):
return self.logout(request)
def logout(self, request):
try:
request.user.auth_token.delete()
except (AttributeError, ObjectDoesNotExist):
pass
auth_logout(request)
return Response({'detail': _('Successfully logged out.')}, status=status.HTTP_200_OK)
class PasswordChangeApiView(BasePasswordChangeView):
"""
Calls Django Auth SetPasswordForm save method.
Accepts the following POST parameters: old_password, new_password1, new_password2
Returns the success/fail message.
"""
http_method_names = ('post', 'head', 'options')
permission_classes = (IsAuthenticated,)
registration = RegisterApiView.as_view()
logout = LogoutApiView.as_view()
password_change = PasswordChangeApiView.as_view()
{%- endif %}
| 27.763158
| 93
| 0.734597
| 1,289
| 0.6109
| 0
| 0
| 0
| 0
| 0
| 0
| 476
| 0.225592
|
793e098cac69c0e739d368a1fa2b5c6d69bbe98f
| 4,519
|
py
|
Python
|
pyperform/tools.py
|
timgates42/pyperform
|
97d87e8b9ddb35bd8f2a6782965fd7735ab0349f
|
[
"MIT"
] | 250
|
2015-01-03T10:15:26.000Z
|
2022-03-31T19:43:37.000Z
|
pyperform/tools.py
|
timgates42/pyperform
|
97d87e8b9ddb35bd8f2a6782965fd7735ab0349f
|
[
"MIT"
] | 4
|
2015-01-23T00:19:45.000Z
|
2015-10-29T17:17:46.000Z
|
pyperform/tools.py
|
timgates42/pyperform
|
97d87e8b9ddb35bd8f2a6782965fd7735ab0349f
|
[
"MIT"
] | 14
|
2015-01-17T16:23:04.000Z
|
2021-07-15T10:59:53.000Z
|
__author__ = 'calvin'
import re
import sys
from math import log10
if sys.version[0] == '3':
pass
else:
range = xrange
classdef_regex = re.compile(r"\S*def .*#!|class .*#!")
tagged_line_regex = re.compile(r".*#!")
def convert_time_units(t):
""" Convert time in seconds into reasonable time units. """
if t == 0:
return '0 s'
order = log10(t)
if -9 < order < -6:
time_units = 'ns'
factor = 1000000000
elif -6 <= order < -3:
time_units = 'us'
factor = 1000000
elif -3 <= order < -1:
time_units = 'ms'
factor = 1000.
elif -1 <= order:
time_units = 's'
factor = 1
return "{:.3f} {}".format(factor * t, time_units)
def globalize_indentation(src):
""" Strip the indentation level so the code runs in the global scope. """
lines = src.splitlines()
indent = len(lines[0]) - len(lines[0].strip(' '))
func_src = ''
for ii, l in enumerate(src.splitlines()):
line = l[indent:]
func_src += line + '\n'
return func_src
def remove_decorators(src):
""" Remove decorators from the source code """
src = src.strip()
src_lines = src.splitlines()
multi_line = False
n_deleted = 0
for n in range(len(src_lines)):
line = src_lines[n - n_deleted].strip()
if (line.startswith('@') and 'Benchmark' in line) or multi_line:
del src_lines[n - n_deleted]
n_deleted += 1
if line.endswith(')'):
multi_line = False
else:
multi_line = True
setup_src = '\n'.join(src_lines)
return setup_src
def get_tagged_imports(fp):
imports = []
inside_def = False
def_lines = []
def_indent = 0
with open(fp, 'r') as f:
lastLine = f.readline()
for line in f:
tagged_class_or_def = re.findall(classdef_regex, lastLine)
tagged_line = re.findall(tagged_line_regex, lastLine)
# Find the indentation level of the function/class definition and capture all source code lines
# until we get a line that is the same indentation level (end of function/class definition).
if tagged_class_or_def or inside_def:
if tagged_class_or_def and def_lines:
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
if inside_def:
# For lines within the definition
indent = len(lastLine) - len(lastLine.lstrip(' '))
if indent == def_indent and lastLine != '\n':
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
def_indent = 0
if tagged_line:
imports.append(lastLine)
else:
if lastLine != '\n':
def_lines.append(lastLine)
else:
# For the definition line
inside_def = True
def_indent = len(lastLine) - len(lastLine.lstrip(' '))
def_lines.append(lastLine)
elif tagged_line:
imports.append(lastLine)
lastLine = line
# Examine the last line
tagged_line = re.findall(tagged_line_regex, lastLine)
if inside_def:
def_lines.append(line)
imports.append(''.join(def_lines))
elif tagged_line:
imports.append(line)
src = '\n'.join(imports) + '\n'
return src
def generate_call_statement(func, is_class_method, *args, **kwargs):
# Create the call statement
if is_class_method:
stmt = 'instance.' + func.__name__ + '('
else:
stmt = func.__name__ + '('
for arg in args:
stmt += arg.__repr__() + ', '
for kw, val in kwargs.items():
stmt += '{0}={1}, '.format(kw, val.__repr__())
stmt = stmt.strip(', ')
stmt += ')'
return stmt
def walk_tree(start, attr):
"""
Recursively walk through a tree relationship. This iterates a tree in a top-down approach,
fully reaching the end of a lineage before moving onto the next sibling of that generation.
"""
path = [start]
for child in path:
yield child
idx = path.index(child)
for grandchild in reversed(getattr(child, attr)):
path.insert(idx + 1, grandchild)
| 30.741497
| 107
| 0.548351
| 0
| 0
| 431
| 0.095375
| 0
| 0
| 0
| 0
| 849
| 0.187873
|
793e3ac3dcb05f0d0810a86209b05739d4ea782a
| 7,522
|
py
|
Python
|
dictify.py
|
Dharma-Sagar/dictify
|
c76713feaf45670b245ed7e7feb894c12dffb9cd
|
[
"Apache-2.0"
] | null | null | null |
dictify.py
|
Dharma-Sagar/dictify
|
c76713feaf45670b245ed7e7feb894c12dffb9cd
|
[
"Apache-2.0"
] | null | null | null |
dictify.py
|
Dharma-Sagar/dictify
|
c76713feaf45670b245ed7e7feb894c12dffb9cd
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from pathlib import Path
import re
import yaml
import json
from botok import Text
import pyewts
conv = pyewts.pyewts()
def dictify_text(string, is_split=False, selection_yaml='data/dictionaries/dict_cats.yaml', expandable=True, mode='en_bo'):
"""
takes segmented text and finds entries from dictionaries
:param expandable: will segment definitions into senses if True, not if False
:param selection_yaml: add None or "" to prevent selection
:param string: segmented text to be processed
:return: list of tuples containing the word and a dict containing the definitions(selected or not) and an url
"""
words = []
if is_split:
for w in string:
if w:
words.append((w, {}))
else:
string = string.replace('\n', ' ')
for w in string.split(' '):
if w:
words.append((w, {}))
dicts = load_dicts()
for num, word in enumerate(words):
lemma = word[0].rstrip('་')
defs = dicts[lemma]
# filter
if selection_yaml:
defs = select_defs(defs, yaml_path=selection_yaml, mode=mode)
# split in senses
if expandable:
if defs and 'en' in defs:
entry_en = defs['en'][1]
defs['en'][1] = split_in_senses(entry_en, lang='en')
if defs and 'bo' in defs:
entry_bo = defs['bo'][1]
defs['bo'][1] = split_in_senses(entry_bo, lang='bo')
words[num][1]['defs'] = defs
# url
url = gen_link(lemma)
words[num][1]['url'] = url
return words
def load_dicts():
dicts = defaultdict(dict)
dict_path = Path(__file__).parent / 'data/dictionaries/converted'
dict_other = Path(__file__).parent / 'data/dictionaries/other'
dict_files = sorted(list(dict_path.glob('*.txt')) + list(dict_other.glob('*.txt')))
for f in dict_files:
name = f.stem
if name.startswith('monlam'):
name = name[:-2] # remove file number suffix "_1", "_2" and "_3"
lines = f.read_text().split('\n')
for line in lines:
if '|' not in line:
continue
lemma, entry = line.split('|')
dicts[lemma][name] = f'{dicts[lemma][name]} {entry}' if name in dicts[lemma] else entry
return dicts
def split_in_senses(entry, lang):
header_size = 10 # syllables
tsikchen_dagsar = r' ([༡༢༣༤༥༦༧༨༩༠]+\.)'
tsikchen_dagsar_start = r'(?: |^)([༡༢༣༤༥༦༧༨༩༠]+\.)'
tsikchen = r' ([༡༢༣༤༥༦༧༨༩༠]+༽) '
tsikchen_start = r'(?: |^)([༡༢༣༤༥༦༧༨༩༠]+༽) '
monlam = r' ((?:[^་]+་[^་]+ )?[0-9]+\.) '
ry_start = r'^([0-9]+\)) ' # line must start with this pattern
ry = r'(?: |^)([0-9]+\)) '
senses = []
if lang == 'bo':
if re.findall(monlam, entry):
parts = [e for e in re.split(monlam, entry) if e]
try:
parts = [f'{parts[n]} {parts[n + 1]}' for n in range(0, len(parts), 2)]
except IndexError as e:
print(entry[:100])
raise SyntaxError(e)
for p in parts:
t = Text(p).tokenize_chunks_plaintext.split(' ')
if len(t) > header_size:
header, body = ''.join(t[:header_size]).replace('_', ' '), ''.join(t[header_size:]).replace('_', ' ')
senses.append((header, body))
else:
senses.append(p)
elif re.findall(tsikchen_dagsar, entry):
parts = [e for e in re.split(tsikchen_dagsar_start, entry) if e]
if not re.findall(r'^[༡༢༣༤༥༦༧༨༩༠]', parts[0]):
parts = [f'{parts[0]} {parts[1]}'] + parts[2:]
try:
parts = [f'{parts[n]}{parts[n + 1]}' for n in range(0, len(parts), 2)]
except IndexError as e:
print(entry[:100])
raise SyntaxError(e)
for p in parts:
t = Text(p).tokenize_chunks_plaintext.split(' ')
if len(t) > header_size:
header, body = ''.join(t[:header_size]).replace('_', ' '), ''.join(t[header_size:]).replace('_', ' ')
senses.append((header, body))
else:
senses.append(p)
elif re.findall(tsikchen, entry):
parts = [e for e in re.split(tsikchen_start, entry) if e]
if parts[0].startswith('༼'):
parts = [f'{parts[0]} {parts[1]}'] + parts[2:]
try:
parts = [f'{parts[n]} {parts[n + 1]}' for n in range(0, len(parts), 2)]
except IndexError as e:
print(entry[:100])
raise SyntaxError(e)
for p in parts:
t = Text(p).tokenize_chunks_plaintext.split(' ')
if len(t) > header_size:
header, body = ''.join(t[:header_size]).replace('_', ' '), ''.join(t[header_size:]).replace('_', ' ')
senses.append((header, body))
else:
senses.append(p)
else:
return entry
elif lang == 'en' and re.findall(ry_start, entry):
parts = [e for e in re.split(ry, entry) if e]
parts = [f'{parts[n]} {parts[n+1]}' for n in range(0, len(parts), 2)]
for p in parts:
t = p.split(' ')
size = header_size - 4 if header_size - 4 > 0 else 0
if len(t) > size:
header, body = ' '.join(t[:size]).replace('_', ' '), ' '.join(t[size:]).replace('_', ' ')
senses.append((header, body))
else:
senses.append(p)
else:
return entry
return senses
def select_defs(defs, yaml_path, mode):
cats = yaml.safe_load(Path(yaml_path).read_text())
english, tibetan = cats['english']['dictionary'], cats['tibetan']['dictionary']
selected = {}
# selecting the first English definition from the list in dict_cats.yaml
if 'en' in mode:
for full, name in english:
if full in defs:
selected['en'] = (name, defs[full])
break
# selecting the first Tibetan definition from the list in dict_cats.yaml
if 'bo' in mode:
for full, name in tibetan:
if full in defs:
selected['bo'] = (name, defs[full])
break
# format selected
if 'en' in selected and 'bo' in selected:
return {'en': [selected['en'][0], selected['en'][1]], 'bo': [selected['bo'][0], selected['bo'][1]]}
elif 'en' in selected:
return {'en': [selected['en'][0], selected['en'][1]]}
elif 'bo' in selected:
return {'bo': [selected['bo'][0], selected['bo'][1]]}
else:
return None
def gen_link(word):
link_pattern = 'https://dictionary.christian-steinert.de/#%7B%22activeTerm%22%3A%22{word}%22%2C%22' \
'lang%22%3A%22tib%22%2C%22inputLang%22%3A%22tib%22%2C%22currentListTerm%22%3A%22{word}%22%2C%22' \
'forceLeftSideVisible%22%3Atrue%2C%22offset%22%3A0%7D'
wylie = conv.toWylie(word).replace(' ', '%20')
return link_pattern.format(word=wylie)
if __name__ == '__main__':
for f in Path('input').glob('*.txt'):
dump = f.read_text(encoding='utf-8')
out = dictify_text(dump, expandable=True)
out_f = Path('output') / f.name
out_f.write_text(json.dumps(out, ensure_ascii=False, indent=4))
__all__ = [dictify_text]
| 37.237624
| 123
| 0.534964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,822
| 0.238607
|
f700e260a7d6b3f4dc9cdfd4df281f246d308a20
| 2,504
|
py
|
Python
|
tests/test_validators.py
|
fakeezz/edipy
|
00c125621201e7290add135240c131c22feb3a72
|
[
"MIT"
] | 1
|
2018-05-15T18:27:31.000Z
|
2018-05-15T18:27:31.000Z
|
tests/test_validators.py
|
fakeezz/edipy
|
00c125621201e7290add135240c131c22feb3a72
|
[
"MIT"
] | null | null | null |
tests/test_validators.py
|
fakeezz/edipy
|
00c125621201e7290add135240c131c22feb3a72
|
[
"MIT"
] | 2
|
2020-12-25T16:37:56.000Z
|
2021-06-22T13:13:18.000Z
|
# coding: utf-8
import pytest
from edipy import fields, validators, exceptions
@pytest.mark.parametrize('fixed_type, data', [
(fields.Integer(1, validators=[validators.Range(1, 5)]), '1'),
(fields.Integer(1, validators=[validators.MaxValue(3)]), '2'),
(fields.Integer(1, validators=[validators.MinValue(1)]), '5'),
(fields.String(5, validators=[validators.Regex(r"[0-9]+")]), '12345'),
(fields.String(12, validators=[validators.Email()]), 'abc@mail.com'),
])
def test_using_validators(fixed_type, data):
try:
fixed_type.encode(data)
except exceptions.ValidationError:
pytest.fail(u"ValidationError should not be thrown")
@pytest.mark.parametrize('fixed_type, data', [
(fields.Integer(1, validators=[validators.Range(1, 5)]), '0'),
(fields.Integer(1, validators=[validators.Range(1, 5)]), '6'),
])
def test_validate_range(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
@pytest.mark.parametrize('fixed_type, data', [
(fields.Integer(1, validators=[validators.MaxValue(1)]), '2'),
(fields.Integer(1, validators=[validators.MaxValue(5)]), '6'),
])
def test_validate_max_value(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
@pytest.mark.parametrize('fixed_type, data', [
(fields.Integer(1, validators=[validators.MinValue(1)]), '0'),
(fields.Integer(1, validators=[validators.MinValue(5)]), '4'),
])
def test_validate_min_value(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
@pytest.mark.parametrize('fixed_type, data', [
(fields.String(5, validators=[validators.Regex(r"[0-9]+")]), 'a123f'),
(fields.String(5, validators=[validators.Regex(r"\d")]), 'abcde'),
(fields.String(5, validators=[validators.Regex(r"[A-Z]{6}")]), 'ABCDE'),
])
def test_validate_regex(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
def test_throws_exception_when_regex_is_invalid():
with pytest.raises(ValueError):
field = fields.String(5, validators=[validators.Regex(")")])
@pytest.mark.parametrize('fixed_type, data', [
(fields.String(11, validators=[validators.Email()]), 'edimail.com'),
(fields.String(11, validators=[validators.Email()]), 'edi@mailcom'),
])
def test_validate_email(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
| 33.837838
| 76
| 0.69369
| 0
| 0
| 0
| 0
| 2,244
| 0.896166
| 0
| 0
| 294
| 0.117412
|
f7013f89ddf7249cb8c21753c974a4e817c0eaa2
| 45,183
|
py
|
Python
|
archetypal/schedule.py
|
brunomarct/archetypal
|
ce8daf4e18ef3ec92967e5d6837b392199caf83b
|
[
"MIT"
] | null | null | null |
archetypal/schedule.py
|
brunomarct/archetypal
|
ce8daf4e18ef3ec92967e5d6837b392199caf83b
|
[
"MIT"
] | null | null | null |
archetypal/schedule.py
|
brunomarct/archetypal
|
ce8daf4e18ef3ec92967e5d6837b392199caf83b
|
[
"MIT"
] | null | null | null |
################################################################################
# Module: schedule.py
# Description: Functions for handling conversion of EnergyPlus schedule objects
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/samuelduchesne/archetypal
################################################################################
import functools
import io
import logging as lg
from datetime import datetime, timedelta
import archetypal
import numpy as np
import pandas as pd
from archetypal import log
class Schedule(object):
"""An object designed to handle any EnergyPlys schedule object"""
def __init__(self, sch_name, idf=None, start_day_of_the_week=0,
strict=False, base_year=2018, schType=None, **kwargs):
"""
Args:
idf (IDF): IDF object
sch_name (str): The schedule name in the idf file
start_day_of_the_week (int): 0-based day of week (Monday=0)
strict (bool): if True, schedules that have the Field-Sets such
as Holidays and CustomDay will raise an error if they are absent
from the IDF file. If False, any missing qualifiers will be
ignored.
base_year (int): The base year of the schedule. Defaults to 2018
since the first day of that year is a Monday.
"""
super(Schedule, self).__init__(**kwargs)
self.strict = strict
self.idf = idf
self.schName = sch_name
self.startDayOfTheWeek = self.get_sdow(start_day_of_the_week)
self.year = base_year
self.startDate = self.start_date()
self.count = 0
self.startHOY = 1
self.endHOY = 24
self.unit = "unknown"
self.index_ = None
self.values = None
self.schType = schType
_type = kwargs.get('Type', None)
if _type is None:
self.schTypeLimitsName = self.get_schedule_type_limits_name(
sch_type=self.schType)
else:
self.schTypeLimitsName = _type
@classmethod
def constant_schedule(cls, hourly_value=1, Name='AlwaysOn', **kwargs):
idftxt = "VERSION, 8.9;" # Not an emplty string. has just the
# version number
# we can make a file handle of a string
fhandle = io.StringIO(idftxt)
# initialize the IDF object with the file handle
idf_scratch = archetypal.IDF(fhandle)
idf_scratch.add_object(ep_object='Schedule:Constant'.upper(),
**dict(Name=Name,
Schedule_Type_Limits_Name='',
Hourly_Value=hourly_value),
save=False)
sched = Schedule(sch_name=Name, idf=idf_scratch, **kwargs)
return sched
@property
def all_values(self):
"""returns the values array"""
if self.values is None:
self.values = self.get_schedule_values(sch_name=self.schName,
sch_type=self.schType)
return self.values
else:
return self.values
@property
def max(self):
return max(self.all_values)
@property
def min(self):
return min(self.all_values)
@property
def mean(self):
return np.mean(self.all_values)
@property
def series(self):
"""Returns the schedule values as a pd.Series object with a
DateTimeIndex"""
index = pd.date_range(start=self.startDate, periods=len(
self.all_values), freq='1H')
return pd.Series(self.all_values, index=index)
def get_schedule_type_limits_name(self, sch_name=None, sch_type=None):
"""Return the Schedule Type Limits name associated to a schedule
name"""
if sch_name is None:
sch_name = self.schName
if sch_type is None:
schedule_values = self.idf.get_schedule_data_by_name(sch_name,
sch_type=sch_type)
try:
schedule_limit_name = schedule_values.Schedule_Type_Limits_Name
except:
return 'unknown'
else:
return schedule_limit_name
def get_schedule_type_limits_data(self, sch_name=None):
"""Returns Schedule Type Limits data from schedule name"""
if sch_name is None:
sch_name = self.schName
schedule_values = self.idf.get_schedule_data_by_name(sch_name)
try:
schedule_limit_name = schedule_values.Schedule_Type_Limits_Name
except:
# this schedule is probably a 'Schedule:Week:Daily' which does
# not have a Schedule_Type_Limits_Name field
return '', '', '', ''
else:
lower_limit, upper_limit, numeric_type, unit_type = \
self.idf.get_schedule_type_limits_data_by_name(
schedule_limit_name)
self.unit = unit_type
if self.unit == "unknown":
self.unit = numeric_type
return lower_limit, upper_limit, numeric_type, unit_type
def get_schedule_type(self, sch_name=None):
"""Return the schedule type"""
if sch_name is None:
sch_name = self.schName
schedule_values = self.idf.get_schedule_data_by_name(sch_name)
sch_type = schedule_values.fieldvalues[0]
return sch_type
def start_date(self):
"""The start date of the schedule. Satisfies `startDayOfTheWeek`"""
import calendar
c = calendar.Calendar(firstweekday=self.startDayOfTheWeek)
start_date = c.monthdatescalendar(self.year, 1)[0][0]
return datetime(start_date.year, start_date.month, start_date.day)
def plot(self, slice=None, **kwargs):
hourlyvalues = self.all_values
index = pd.date_range(self.startDate, periods=len(
hourlyvalues),
freq='1H')
series = pd.Series(hourlyvalues, index=index, dtype=float)
if slice is None:
slice = pd.IndexSlice[:]
elif len(slice) > 1:
slice = pd.IndexSlice[slice[0]:slice[1]]
ax = series.loc[slice].plot(**kwargs, label=self.schName)
return ax
def get_interval_day_ep_schedule_values(self, sch_name=None):
"""'Schedule:Day:Interval"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('Schedule:Day:Interval'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
number_of_day_sch = int((len(values.fieldvalues) - 3) / 2)
hourly_values = np.arange(24)
start_hour = 0
for i in range(number_of_day_sch):
value = float(values['Value_Until_Time_{}'.format(i + 1)])
until_time = [int(s.strip()) for s in
values['Time_{}'.format(i + 1)].split(":") if
s.strip().isdigit()]
end_hour = int(until_time[0] + until_time[1] / 60)
for hour in range(start_hour, end_hour):
hourly_values[hour] = value
start_hour = end_hour
if numeric_type.strip().lower() == "discrete":
hourly_values = hourly_values.astype(int)
return hourly_values
def get_hourly_day_ep_schedule_values(self, sch_name=None):
"""'Schedule:Day:Hourly'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('Schedule:Day:Hourly'.upper(), sch_name)
fieldvalues_ = np.array(values.fieldvalues[3:])
return fieldvalues_
def get_compact_weekly_ep_schedule_values(self, sch_name=None,
start_date=None, index=None):
"""'schedule:week:compact'"""
if start_date is None:
start_date = self.startDate
if index is None:
idx = pd.date_range(start=start_date, periods=168, freq='1H')
slicer_ = pd.Series([False] * (len(idx)), index=idx)
else:
slicer_ = pd.Series([False] * (len(index)), index=index)
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:week:compact'.upper(), sch_name)
weekly_schedules = pd.Series([0] * len(slicer_), index=slicer_.index)
# update last day of schedule
if self.count == 0:
self.schType = values.key
self.endHOY = 168
num_of_daily_schedules = int(len(values.fieldvalues[2:]) / 2)
for i in range(num_of_daily_schedules):
day_type = values['DayType_List_{}'.format(i + 1)].lower()
how = self.field_set(day_type, slicer_)
if not weekly_schedules.loc[how].empty:
# Loop through days and replace with day:schedule values
days = []
for name, day in weekly_schedules.loc[how].groupby(pd.Grouper(
freq='D')):
if not day.empty:
ref = values.get_referenced_object(
"ScheduleDay_Name_{}".format(i + 1))
day.loc[:] = self.get_schedule_values(
sch_name=ref.Name, sch_type=ref.key)
days.append(day)
new = pd.concat(days)
slicer_.update(
pd.Series([True] * len(new.index), index=new.index))
slicer_ = slicer_.apply(lambda x: x == True)
weekly_schedules.update(new)
else:
return weekly_schedules.values
return weekly_schedules.values
def get_daily_weekly_ep_schedule_values(self, sch_name=None):
"""'schedule:week:daily'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:week:daily'.upper(), sch_name)
# 7 list for 7 days of the week
hourly_values = []
for day in ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']:
ref = values.get_referenced_object(
'{}_ScheduleDay_Name'.format(day))
h = self.get_schedule_values(sch_name=ref.Name, sch_type=ref.key)
hourly_values.append(h)
hourly_values = np.array(hourly_values)
# shift days earlier by self.startDayOfTheWeek
hourly_values = np.roll(hourly_values, -self.startDayOfTheWeek, axis=0)
return hourly_values.ravel()
def get_list_day_ep_schedule_values(self, sch_name=None):
"""'schedule:day:list'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:day:list'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
import pandas as pd
freq = int(values['Minutes_per_Item']) # Frequency of the values
num_values = values.fieldvalues[5:] # List of values
method = values['Interpolate_to_Timestep'] # How to resample
# fill a list of available values and pad with zeros (this is safer
# but should not occur)
all_values = np.arange(int(24 * 60 / freq))
for i in all_values:
try:
all_values[i] = num_values[i]
except:
all_values[i] = 0
# create a fake index to help us with the resampling
index = pd.date_range(start=self.startDate,
periods=(24 * 60) / freq,
freq='{}T'.format(freq))
series = pd.Series(all_values, index=index)
# resample series to hourly values and apply resampler function
series = series.resample('1H').apply(_how(method))
return series.values
def get_constant_ep_schedule_values(self, sch_name=None):
"""'schedule:constant'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:constant'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
hourly_values = np.arange(8760)
value = float(values['Hourly_Value'])
for hour in hourly_values:
hourly_values[hour] = value
if numeric_type.strip().lower() == 'discrete':
hourly_values = hourly_values.astype(int)
return hourly_values
def get_file_ep_schedule_values(self, sch_name=None):
"""'schedule:file'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:file'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
filename = values['File_Name']
column = values['Column_Number']
rows = values['Rows_to_Skip_at_Top']
hours = values['Number_of_Hours_of_Data']
sep = values['Column_Separator']
interp = values['Interpolate_to_Timestep']
import pandas as pd
import os
idfdir = os.path.dirname(self.idf.idfname)
file = os.path.join(idfdir, filename)
delimeter = _separator(sep)
skip_rows = int(rows) - 1 # We want to keep the column
col = [int(column) - 1] # zero-based
values = pd.read_csv(file, delimiter=delimeter, skiprows=skip_rows,
usecols=col)
return values.iloc[:, 0].values
def get_compact_ep_schedule_values(self, sch_name=None):
"""'schedule:compact'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:compact'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
field_sets = ['through', 'for', 'interpolate', 'until', 'value']
fields = values.fieldvalues[3:]
index = pd.date_range(start=self.startDate, periods=8760, freq='H')
zeros = np.zeros(len(index))
slicer_ = pd.Series([False] * len(index), index=index)
series = pd.Series(zeros, index=index)
from_day = self.startDate
ep_from_day = datetime(self.year, 1, 1)
from_time = '00:00'
how_interpolate = None
for field in fields:
if any([spe in field.lower() for spe in field_sets]):
f_set, hour, minute, value = self.field_interpreter(field)
if f_set.lower() == 'through':
# main condition. All sub-conditions must obey a
# `Through` condition
# First, initialize the slice (all False for now)
through_conditions = self.invalidate_condition(series)
# reset from_time
from_time = '00:00'
# Prepare ep_to_day variable
ep_to_day = self.date_field_interpretation(value) + \
timedelta(days=1)
# Calculate Timedelta in days
days = (ep_to_day - ep_from_day).days
# Add timedelta to start_date
to_day = from_day + timedelta(days=days) + timedelta(
hours=-1)
# slice the conditions with the range and apply True
through_conditions.loc[from_day:to_day] = True
from_day = to_day + timedelta(hours=1)
ep_from_day = ep_to_day
elif f_set.lower() == 'for':
# slice specific days
# reset from_time
from_time = '00:00'
for_condition = self.invalidate_condition(series)
values = value.split()
if len(values) > 1:
# if multiple `For`. eg.: For: Weekends Holidays,
# Combine both conditions
for value in values:
if value.lower() == 'allotherdays':
# Apply condition to slice
how = self.field_set(value, slicer_)
# Reset though condition
through_conditions = how
for_condition = how
else:
how = self.field_set(value, slicer_)
for_condition.loc[how] = True
elif value.lower() == 'allotherdays':
# Apply condition to slice
how = self.field_set(value, slicer_)
# Reset though condition
through_conditions = how
for_condition = how
else:
# Apply condition to slice
how = self.field_set(value)
for_condition.loc[how] = True
# Combine the for_condition with all_conditions
all_conditions = through_conditions & for_condition
# update in memory slice
# self.sliced_day_.loc[all_conditions] = True
elif 'interpolate' in f_set.lower():
# we need to upsample to series to 8760 * 60 values
new_idx = pd.date_range(start=self.startDate,
periods=525600, closed='left',
freq='T')
series = series.resample('T').pad()
series = series.reindex(new_idx)
series.fillna(method='pad', inplace=True)
through_conditions = through_conditions.resample('T').pad()
through_conditions = through_conditions.reindex(new_idx)
through_conditions.fillna(method='pad', inplace=True)
for_condition = for_condition.resample('T').pad()
for_condition = for_condition.reindex(new_idx)
for_condition.fillna(method='pad', inplace=True)
how_interpolate = value.lower()
elif f_set.lower() == 'until':
until_condition = self.invalidate_condition(series)
if series.index.freq.name == 'T':
# until_time = str(int(hour) - 1) + ':' + minute
until_time = timedelta(hours=int(hour),
minutes=int(minute)) - timedelta(
minutes=1)
else:
until_time = str(int(hour) - 1) + ':' + minute
until_condition.loc[until_condition.between_time(from_time,
str(
until_time)).index] = True
all_conditions = for_condition & through_conditions & \
until_condition
from_time = str(int(hour)) + ':' + minute
elif f_set.lower() == 'value':
# If the therm `Value: ` field is used, we will catch it
# here.
# update in memory slice
slicer_.loc[all_conditions] = True
series[all_conditions] = value
else:
# Do something here before looping to the next Field
pass
else:
# If the term `Value: ` is not used; the variable is simply
# passed in the Field
value = float(field)
series[all_conditions] = value
# update in memory slice
slicer_.loc[all_conditions] = True
if how_interpolate:
return series.resample('H').mean().values
else:
return series.values
def field_interpreter(self, field):
"""dealing with a Field-Set (Through, For, Interpolate,
# Until, Value) and return the parsed string"""
if 'through' in field.lower():
# deal with through
if ':' in field.lower():
# parse colon
f_set, statement = field.split(':')
hour = None
minute = None
value = statement.strip()
else:
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
elif 'for' in field.lower():
if ':' in field.lower():
# parse colon
f_set, statement = field.split(':')
value = statement.strip()
hour = None
minute = None
else:
# parse without a colon
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
elif 'interpolate' in field.lower():
msg = 'The schedule "{sch}" contains sub-hourly values (' \
'Field-Set="{field}"). The average over the hour is ' \
'taken'.format(sch=self.schName, field=field)
log(msg, lg.WARNING)
f_set, value = field.split(':')
hour = None
minute = None
elif 'until' in field.lower():
if ':' in field.lower():
# parse colon
try:
f_set, hour, minute = field.split(':')
hour = hour.strip() # remove trailing spaces
minute = minute.strip() # remove trailing spaces
value = None
except:
f_set = 'until'
hour, minute = field.split(':')
hour = hour[-2:].strip()
minute = minute.strip()
value = None
else:
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
elif 'value' in field.lower():
if ':' in field.lower():
# parse colon
f_set, statement = field.split(':')
value = statement.strip()
hour = None
minute = None
else:
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
else:
# deal with the data value
f_set = field
hour = None
minute = None
value = field[len(field) + 1:].strip()
return f_set, hour, minute, value
@staticmethod
def invalidate_condition(series):
index = series.index
periods = len(series)
return pd.Series([False] * periods, index=index)
def get_yearly_ep_schedule_values(self, sch_name=None):
"""'schedule:year'"""
# first week
start_date = self.startDate
idx = pd.date_range(start=start_date, periods=8760, freq='1H')
hourly_values = pd.Series([0] * 8760, index=idx)
# update last day of schedule
self.endHOY = 8760
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:year'.upper(), sch_name)
# generate weekly schedules
num_of_weekly_schedules = int(len(values.fieldvalues[3:]) / 5)
for i in range(num_of_weekly_schedules):
ref = values.get_referenced_object(
'ScheduleWeek_Name_{}'.format(i + 1))
start_month = values['Start_Month_{}'.format(i + 1)]
end_month = values['End_Month_{}'.format(i + 1)]
start_day = values['Start_Day_{}'.format(i + 1)]
end_day = values['End_Day_{}'.format(i + 1)]
start = datetime.strptime(
'{}/{}/{}'.format(self.year, start_month, start_day),
'%Y/%m/%d')
end = datetime.strptime(
'{}/{}/{}'.format(self.year, end_month, end_day),
'%Y/%m/%d')
days = (end - start).days + 1
end_date = start_date + timedelta(days=days) + timedelta(hours=23)
how = pd.IndexSlice[start_date:end_date]
weeks = []
for name, week in hourly_values.loc[how].groupby(
pd.Grouper(freq='168H')):
if not week.empty:
try:
week.loc[:] = self.get_schedule_values(
sch_name=ref.Name, start_date=week.index[0],
index=week.index, sch_type=ref.key)
except ValueError:
week.loc[:] = self.get_schedule_values(
ref.Name, week.index[0])[0:len(week)]
finally:
weeks.append(week)
new = pd.concat(weeks)
hourly_values.update(new)
start_date += timedelta(days=days)
return hourly_values.values
def get_schedule_values(self, sch_name=None, start_date=None, index=None,
sch_type=None):
"""Main function that returns the schedule values
Args:
sch_type:
index:
start_date:
"""
if sch_name is None:
sch_name = self.schName
if sch_type is None:
schedule_values = self.idf.get_schedule_data_by_name(sch_name)
self.schType = schedule_values.key.upper()
sch_type = self.schType
if self.count == 0:
# This is the first time, get the schedule type and the type limits.
self.schTypeLimitsName = self.get_schedule_type_limits_name()
self.count += 1
if sch_type.upper() == "schedule:year".upper():
hourly_values = self.get_yearly_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:day:interval".upper():
hourly_values = self.get_interval_day_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:day:hourly".upper():
hourly_values = self.get_hourly_day_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:day:list".upper():
hourly_values = self.get_list_day_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:week:compact".upper():
hourly_values = self.get_compact_weekly_ep_schedule_values(
sch_name, start_date, index)
elif sch_type.upper() == "schedule:week:daily".upper():
hourly_values = self.get_daily_weekly_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:constant".upper():
hourly_values = self.get_constant_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:compact".upper():
hourly_values = self.get_compact_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:file".upper():
hourly_values = self.get_file_ep_schedule_values(
sch_name)
else:
log('Archetypal does not support "{}" currently'.format(
self.schType), lg.WARNING)
hourly_values = []
return hourly_values
def is_schedule(self, sch_name):
"""Returns True if idfobject is one of 'schedule_types'"""
if sch_name.upper() in self.idf.schedules_dict:
return True
else:
return False
def to_year_week_day(self):
"""convert a Schedule Class to the 'Schedule:Year',
'Schedule:Week:Daily' and 'Schedule:Day:Hourly' representation
Returns:
'Schedule:Year', list of ['Schedule:Week:Daily'],
list of ['Schedule:Day:Hourly']
"""
full_year = np.array(self.all_values) # array of shape (8760,)
values = full_year.reshape(-1, 24) # shape (365, 24)
# create unique days
unique_days, nds = np.unique(values, axis=0, return_inverse=True)
ep_days = []
dict_day = {}
count_day = 0
for unique_day in unique_days:
name = 'd_' + self.schName + '_' + '%03d' % count_day
name, count_day = archetypal.check_unique_name('d', count_day,
name,
archetypal.settings.unique_schedules,
suffix=True)
dict_day[name] = unique_day
archetypal.settings.unique_schedules.append(name)
# Create idf_objects for schedule:day:hourly
ep_day = self.idf.add_object(
ep_object='Schedule:Day:Hourly'.upper(),
save=False,
**dict(Name=name,
Schedule_Type_Limits_Name=self.schType,
**{'Hour_{}'.format(i + 1): unique_day[i]
for i in range(24)})
)
ep_days.append(ep_day)
# create unique weeks from unique days
unique_weeks, nwsi, nws, count = np.unique(
full_year[:364 * 24, ...].reshape(-1, 168), return_index=True,
axis=0, return_inverse=True, return_counts=True)
# Appending unique weeks in dictionary with name and values of weeks as
# keys
# {'name_week': {'dayName':[]}}
dict_week = {}
count_week = 0
for unique_week in unique_weeks:
week_id = 'w_' + self.schName + '_' + '%03d' % count_week
week_id, count_week = archetypal.check_unique_name('w',
count_week,
week_id,
archetypal.settings.unique_schedules,
suffix=True)
archetypal.settings.unique_schedules.append(week_id)
dict_week[week_id] = {}
for i in list(range(0, 7)):
day_of_week = unique_week[..., i * 24:(i + 1) * 24]
for key in dict_day:
if (day_of_week == dict_day[key]).all():
dict_week[week_id]['day_{}'.format(i)] = key
# Create idf_objects for schedule:week:daily
list_day_of_week = ['Sunday', 'Monday', 'Tuesday',
'Wednesday', 'Thursday', 'Friday', 'Saturday']
ordered_day_n = np.array([6, 0, 1, 2, 3, 4, 5])
ordered_day_n = np.roll(ordered_day_n, self.startDayOfTheWeek)
ep_weeks = []
for week_id in dict_week:
ep_week = self.idf.add_object(
ep_object='Schedule:Week:Daily'.upper(),
save=False,
**dict(Name=week_id,
**{'{}_ScheduleDay_Name'.format(
weekday): dict_week[week_id][
'day_{}'.format(i)] for
i, weekday in
zip(ordered_day_n, list_day_of_week)
},
Holiday_ScheduleDay_Name=
dict_week[week_id]['day_6'],
SummerDesignDay_ScheduleDay_Name=
dict_week[week_id]['day_1'],
WinterDesignDay_ScheduleDay_Name=
dict_week[week_id]['day_1'],
CustomDay1_ScheduleDay_Name=
dict_week[week_id]['day_2'],
CustomDay2_ScheduleDay_Name=
dict_week[week_id]['day_5'])
)
ep_weeks.append(ep_week)
import itertools
blocks = {}
from_date = datetime(self.year, 1, 1)
bincount = [sum(1 for _ in group)
for key, group in itertools.groupby(nws + 1) if key]
week_order = {i: v for i, v in enumerate(np.array(
[key for key, group in itertools.groupby(nws + 1) if key]) - 1)}
for i, (week_n, count) in enumerate(
zip(week_order, bincount)):
week_id = list(dict_week)[week_order[i]]
to_date = from_date + timedelta(days=int(count * 7), hours=-1)
blocks[i] = {}
blocks[i]['week_id'] = week_id
blocks[i]['from_day'] = from_date.day
blocks[i]['end_day'] = to_date.day
blocks[i]['from_month'] = from_date.month
blocks[i]['end_month'] = to_date.month
from_date = to_date + timedelta(hours=1)
# If this is the last block, force end of year
if i == len(bincount) - 1:
blocks[i]['end_day'] = 31
blocks[i]['end_month'] = 12
new_dict = dict(Name=self.schName + '_',
Schedule_Type_Limits_Name=self.schTypeLimitsName)
for i in blocks:
new_dict.update({"ScheduleWeek_Name_{}".format(i + 1):
blocks[i]['week_id'],
"Start_Month_{}".format(i + 1):
blocks[i]['from_month'],
"Start_Day_{}".format(i + 1):
blocks[i]['from_day'],
"End_Month_{}".format(i + 1):
blocks[i]['end_month'],
"End_Day_{}".format(i + 1):
blocks[i]['end_day']})
ep_year = self.idf.add_object(ep_object='Schedule:Year'.upper(),
save=False, **new_dict)
return ep_year, ep_weeks, ep_days
def date_field_interpretation(self, field):
"""Date Field Interpretation
Args:
field (str): The EnergyPlus Field Contents
Returns:
(datetime): The datetime object
Info:
See EnergyPlus documentation for more details:
1.6.8.1.2 Field: Start Date (Table 1.4: Date Field Interpretation)
"""
# < number > Weekday in Month
formats = ['%m/%d', '%d %B', '%B %d', '%d %b', '%b %d']
date = None
for format_str in formats:
# Tru to parse using each defined formats
try:
date = datetime.strptime(field, format_str)
except:
pass
else:
date = datetime(self.year, date.month, date.day)
if date is None:
# if the defined formats did not work, try the fancy parse
try:
date = self.parse_fancy_string(field)
except:
msg = "the schedule '{sch}' contains a " \
"Field that is not understood: '{field}'".format(
sch=self.schName,
field=field)
raise ValueError(msg)
else:
return date
else:
return date
def parse_fancy_string(self, field):
"""Will try to parse cases such as `3rd Monday in February` or `Last
Weekday In Month`
Args:
field (str): The EnergyPlus Field Contents
Returns:
(datetime): The datetime object
"""
import re
# split the string at the term ' in '
time, month = field.lower().split(' in ')
month = datetime.strptime(month, '%B').month
# split the first part into nth and dayofweek
nth, dayofweek = time.split(' ')
if 'last' in nth:
nth = -1 # Use the last one
else:
nth = re.findall(r'\d+', nth) # use the nth one
nth = int(nth[0]) - 1 # python is zero-based
weekday = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3,
'friday': 4, 'saturday': 5, 'sunday': 6}
# parse the dayofweek eg. monday
dayofweek = weekday.get(dayofweek, 6)
# create list of possible days using Calendar
import calendar
c = calendar.Calendar(firstweekday=self.startDayOfTheWeek)
monthcal = c.monthdatescalendar(self.year, month)
# iterate though the month and get the nth weekday
date = [day for week in monthcal for day in week if \
day.weekday() == dayofweek and \
day.month == month][nth]
return datetime(date.year, date.month, date.day)
def field_set(self, field, slicer_=None):
"""helper function to return the proper slicer depending on the
field_set value.
Available values are:
Weekdays, Weekends, Holidays, Alldays, SummerDesignDay,
WinterDesignDay, Sunday, Monday, Tuesday, Wednesday, Thursday,
Friday, Saturday, CustomDay1, CustomDay2, AllOtherDays
Args:
field (str): The EnergyPlus field set value.
slicer_ (pd.Series): The persistent slicer for this schedule
Returns:
(indexer-like): Returns the appropriate indexer for the series.
"""
if field.lower() == 'weekdays':
# return only days of weeks
return lambda x: x.index.dayofweek < 5
elif field.lower() == 'weekends':
# return only weekends
return lambda x: x.index.dayofweek >= 5
elif field.lower() == 'alldays':
log('For schedule "{}", the field-set "AllDays" may be overridden '
'by the "AllOtherDays" field-set'.format(
self.schName), lg.WARNING)
# return all days := equivalenet to .loc[:]
return pd.IndexSlice[:]
elif field.lower() == 'allotherdays':
# return unused days (including special days). Uses the global
# variable `slicer_`
import operator
if slicer_ is not None:
return _conjunction(*[self.special_day(field, slicer_),
~slicer_], logical=operator.or_)
else:
raise NotImplementedError
elif field.lower() == 'sunday':
# return only sundays
return lambda x: x.index.dayofweek == 6
elif field.lower() == 'monday':
# return only mondays
return lambda x: x.index.dayofweek == 0
elif field.lower() == 'tuesday':
# return only Tuesdays
return lambda x: x.index.dayofweek == 1
elif field.lower() == 'wednesday':
# return only Wednesdays
return lambda x: x.index.dayofweek == 2
elif field.lower() == 'thursday':
# return only Thursdays
return lambda x: x.index.dayofweek == 3
elif field.lower() == 'friday':
# return only Fridays
return lambda x: x.index.dayofweek == 4
elif field.lower() == 'saturday':
# return only Saturdays
return lambda x: x.index.dayofweek == 5
elif field.lower() == 'summerdesignday':
# return design_day(self, field)
return None
elif field.lower() == 'winterdesignday':
# return design_day(self, field)
return None
elif field.lower() == 'holiday' or field.lower() == 'holidays':
field = 'holiday'
return self.special_day(field, slicer_)
elif not self.strict:
# If not strict, ignore missing field-sets such as CustomDay1
return pd.IndexSlice[:]
else:
raise NotImplementedError(
'Archetypal does not yet support The '
'Field_set "{}"'.format(field))
def __len__(self):
"""returns the length of all values of the schedule"""
return len(self.all_values)
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, Schedule):
return self.all_values == other.all_values
else:
raise NotImplementedError
def __ne__(self, other):
return ~(self.__eq__(other))
def __add__(self, other):
if isinstance(other, Schedule):
return self.all_values + other.all_values
elif isinstance(other, list):
return self.all_values + other
else:
raise NotImplementedError
def __sub__(self, other):
if isinstance(other, Schedule):
return self.all_values - other.all_values
elif isinstance(other, list):
return self.all_values - other
else:
raise NotImplementedError
def __mul__(self, other):
if isinstance(other, Schedule):
return self.all_values * other.all_values
elif isinstance(other, list):
return self.all_values * other
else:
raise NotImplementedError
def get_sdow(self, start_day_of_week):
"""Returns the start day of the week"""
if start_day_of_week is None:
return self.idf.day_of_week_for_start_day
else:
return start_day_of_week
def special_day(self, field, slicer_):
"""try to get the RunPeriodControl:SpecialDays for the corresponding
Day Type"""
sp_slicer_ = slicer_.copy()
sp_slicer_.loc[:] = False
special_day_types = ['holiday', 'customday1', 'customday2']
dds = self.idf.idfobjects['RunPeriodControl:SpecialDays'.upper()]
dd = [dd for dd in dds if dd.Special_Day_Type.lower() == field
or dd.Special_Day_Type.lower() in special_day_types]
if len(dd) > 0:
slice = []
for dd in dd:
# can have more than one special day types
data = dd.Start_Date
ep_start_date = self.date_field_interpretation(data)
ep_orig = datetime(self.year, 1, 1)
days_to_speciald = (ep_start_date - ep_orig).days
duration = int(dd.Duration)
from_date = self.startDate + timedelta(days=days_to_speciald)
to_date = from_date + timedelta(days=duration) + timedelta(
hours=-1)
sp_slicer_.loc[from_date:to_date] = True
return sp_slicer_
elif not self.strict:
return sp_slicer_
else:
msg = 'Could not find a "SizingPeriod:DesignDay" object ' \
'needed for schedule "{}" with Day Type "{}"'.format(
self.schName, field.capitalize()
)
raise ValueError(msg)
def design_day(schedule, field):
# try to get the SizingPeriod:DesignDay for the corresponding Day Type
dds = schedule.idf.idfobjects['SizingPeriod:DesignDay'.upper()]
dd = [dd for dd in dds if dd.Day_Type.lower() == field]
if len(dd) > 0:
# should have found only one design day matching the Day Type
data = [dd[0].Month, dd[0].Day_of_Month]
date = '/'.join([str(item).zfill(2) for item in data])
date = schedule.date_field_interpretation(date)
return lambda x: x.index == date
else:
msg = 'Could not find a "SizingPeriod:DesignDay" object ' \
'needed for schedule "{}" with Day Type "{}"'.format(
schedule.schName, field.capitalize()
)
raise ValueError(msg)
def _conjunction(*conditions, logical=np.logical_and):
"""Applies a logical function on n conditions"""
return functools.reduce(logical, conditions)
def _separator(sep):
"""helper function to return the correct delimiter"""
if sep == 'Comma':
return ','
elif sep == 'Tab':
return '\t'
elif sep == 'Fixed':
return None
elif sep == 'Semicolon':
return ';'
else:
return ','
def _how(how):
"""Helper function to return the correct resampler"""
if how.lower() == 'average':
return 'mean'
elif how.lower() == 'linear':
return 'interpolate'
elif how.lower() == 'no':
return 'max'
else:
return 'max'
| 39.808811
| 100
| 0.535799
| 43,146
| 0.954917
| 0
| 0
| 1,737
| 0.038444
| 0
| 0
| 9,992
| 0.221145
|
f7024605869dd7788905637cfccaa41707efb6c3
| 256
|
py
|
Python
|
data/scripts/reverse.py
|
levindu/OpenCC
|
345ea91303e5b3d9332dc51ea73370dac83e4c6b
|
[
"Apache-2.0"
] | 43
|
2018-09-17T00:45:35.000Z
|
2021-11-14T23:56:45.000Z
|
data/scripts/reverse.py
|
levindu/OpenCC
|
345ea91303e5b3d9332dc51ea73370dac83e4c6b
|
[
"Apache-2.0"
] | 7
|
2019-11-26T10:48:14.000Z
|
2021-06-13T04:49:58.000Z
|
data/scripts/reverse.py
|
levindu/OpenCC
|
345ea91303e5b3d9332dc51ea73370dac83e4c6b
|
[
"Apache-2.0"
] | 6
|
2018-09-17T02:09:59.000Z
|
2020-08-15T13:57:44.000Z
|
#!/usr/bin/env python
#coding: utf-8
import sys
from common import reverse_items
if len(sys.argv) != 3:
print("Reverse key and value of all pairs")
print(("Usage: ", sys.argv[0], "[input] [output]"))
exit(1)
reverse_items(sys.argv[1], sys.argv[2])
| 21.333333
| 53
| 0.671875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 98
| 0.382813
|
f703531b591af3d5317bed220eaa477c0403e4d5
| 2,576
|
py
|
Python
|
stock_predictions/web/template.py
|
abakhru/stock_prediction
|
bfb4483ac888bc67e2a8928fdf037d23acbf48f9
|
[
"MIT"
] | 1
|
2020-07-14T09:05:56.000Z
|
2020-07-14T09:05:56.000Z
|
stock_predictions/web/template.py
|
abakhru/stock_prediction
|
bfb4483ac888bc67e2a8928fdf037d23acbf48f9
|
[
"MIT"
] | null | null | null |
stock_predictions/web/template.py
|
abakhru/stock_prediction
|
bfb4483ac888bc67e2a8928fdf037d23acbf48f9
|
[
"MIT"
] | null | null | null |
template = """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Title of the document</title>
<script type="text/javascript" src="https://s3.tradingview.com/tv.js"></script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/milligram/1.3.0/milligram.min.css">
<style>
.tradingview-widget-container {{
position: sticky;
top: 20px;
}}
.stocks-view {{
display: flex;
flex-wrap: nowrap;
}}
.stocks-listing {{
width: 780px;
flex-wrap: nowrap;
padding: 20px;
}}
.stocks-graph {{
flex-wrap: nowrap;
padding: 20px;
}}
th.sticky-header {{
position: sticky;
top: 0;
z-index: 10;
background-color: white;
}}
.positive-movement {{
color: green;
font-weight: bold;
}}
.negative-movement {{
color: red;
font-weight: bold;
}}
.blue-category {{
background-color: lightsteelblue;
}}
</style>
</head>
<body>
{}
<div class="stocks-view">
<div class="stocks-listing">
<table>
<thead>
<tr>
<th class="sticky-header">Symbol</th>
<th class="sticky-header">April 1 2019</th>
<th class="sticky-header">Dec 2 2019</th>
<th class="sticky-header">Today</th>
<th class="sticky-header">Movement since April 1 2019</th>
<th class="sticky-header">Movement since Dec 2 2019</th>
<th class="sticky-header">Bankruptcy probability</th>
</tr>
</thead>
<tbody>
{}
</tbody>
</table>
</div>
<div class="stocks-graph"
<!-- TradingView Widget BEGIN -->
<div class="tradingview-widget-container">
<div id="tradingview_63a66"></div>
<div class="tradingview-widget-copyright"><a href="https://www.tradingview.com/symbols/AAPL/" rel="noopener" target="_blank"><span class="blue-text">AAPL Chart</span></a> by TradingView</div>
</div>
<!-- TradingView Widget END -->
</div>
</div>
<script type="text/javascript">
function renderChart(symbol) {{
new TradingView.widget(
{{
"width": 750,
"height": 500,
"symbol": symbol,
"interval": "180",
"timezone": "Etc/UTC",
"theme": "light",
"style": "1",
"locale": "en",
"toolbar_bg": "#f1f3f6",
"enable_publishing": false,
"allow_symbol_change": true,
"container_id": "tradingview_63a66"
}}
);
}}
document.addEventListener('DOMContentLoaded', function(){{
renderChart('BA');
}}, false);
</script>
</body>
</html>"""
| 24.533333
| 195
| 0.572593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,564
| 0.995342
|
f704431757b191fd6a6405e1724d23679ca1b2f0
| 1,173
|
py
|
Python
|
script/app/agg.py
|
Intelligent-Systems-Lab/ISL-BCFL
|
42ceb86708a76e28b31c22b33c15ee9a6a745ec7
|
[
"Apache-2.0"
] | null | null | null |
script/app/agg.py
|
Intelligent-Systems-Lab/ISL-BCFL
|
42ceb86708a76e28b31c22b33c15ee9a6a745ec7
|
[
"Apache-2.0"
] | null | null | null |
script/app/agg.py
|
Intelligent-Systems-Lab/ISL-BCFL
|
42ceb86708a76e28b31c22b33c15ee9a6a745ec7
|
[
"Apache-2.0"
] | null | null | null |
import os
# import torch
import argparse
import base64
import sys
import io
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
def fullmodel2base64(model):
buffer = io.BytesIO()
torch.save(model, buffer)
bg = buffer.getvalue()
return base64.b64encode(bg).decode()
def base642fullmodel(modbase64):
inputrpc = bytes(modbase64.encode())
inputrpc_ = base64.b64decode(inputrpc)
loadmodel = torch.load(io.BytesIO(inputrpc_))
return loadmodel
model_list = []
f = open(sys.argv[1], "r")
models = f.read().split(",")
f.close()
print(models)
for m in models:
model_list.append(base642fullmodel(m))
new_model_state = model_list[0].state_dict()
#sum the weight of the model
for m in model_list[1:]:
state_m = m.state_dict()
for key in state_m:
new_model_state[key] += state_m[key]
#average the model weight
for key in new_model_state:
new_model_state[key] /= len(model_list)
new_model = model_list[0]
new_model.load_state_dict(new_model_state)
output = fullmodel2base64(new_model)
print(output)
| 19.55
| 56
| 0.734868
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 73
| 0.062234
|
f70474925eb078c598d03d4255e4e76e7b6c9361
| 420
|
py
|
Python
|
examples/function_examples/bpod_info.py
|
ckarageorgkaneen/pybpod-api
|
ebccef800ae1abf3b6a643ff33166fab2096c780
|
[
"MIT"
] | 1
|
2021-01-18T08:18:22.000Z
|
2021-01-18T08:18:22.000Z
|
examples/function_examples/bpod_info.py
|
ckarageorgkaneen/pybpod-api
|
ebccef800ae1abf3b6a643ff33166fab2096c780
|
[
"MIT"
] | 1
|
2020-09-18T20:46:11.000Z
|
2020-12-29T14:55:20.000Z
|
examples/function_examples/bpod_info.py
|
ckarageorgkaneen/pybpod-api
|
ebccef800ae1abf3b6a643ff33166fab2096c780
|
[
"MIT"
] | 3
|
2020-09-12T15:32:11.000Z
|
2022-03-11T23:08:03.000Z
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Get hardware info from Bpod
"""
from pybpodapi.protocol import Bpod
from confapp import conf
my_bpod = Bpod()
my_bpod.close()
print("Target Bpod firmware version: ", conf.TARGET_BPOD_FIRMWARE_VERSION)
print("Firmware version (read from device): ", my_bpod.hardware.firmware_version)
print("Machine type version (read from device): ", my_bpod.hardware.machine_type)
| 21
| 81
| 0.742857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 192
| 0.457143
|
f704c0f9b4488488f3aae9f679bb84275d8e52d4
| 11,405
|
py
|
Python
|
src/core/src/core_logic/PackageFilter.py
|
Azure/LinuxPatchExtension
|
6af622afb4298805bdf47328d6bc66a785f7166b
|
[
"Apache-2.0"
] | 4
|
2020-06-01T14:36:30.000Z
|
2021-08-24T16:55:50.000Z
|
src/core/src/core_logic/PackageFilter.py
|
Azure/LinuxPatchExtension
|
6af622afb4298805bdf47328d6bc66a785f7166b
|
[
"Apache-2.0"
] | 34
|
2020-09-11T17:20:42.000Z
|
2022-03-28T14:08:44.000Z
|
src/core/src/core_logic/PackageFilter.py
|
Azure/LinuxPatchExtension
|
6af622afb4298805bdf47328d6bc66a785f7166b
|
[
"Apache-2.0"
] | 1
|
2020-12-28T10:13:20.000Z
|
2020-12-28T10:13:20.000Z
|
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
"""Package Filter"""
from core.src.bootstrap.Constants import Constants
import fnmatch
class PackageFilter(object):
"""implements the Package filtering logic"""
def __init__(self, execution_config, composite_logger):
self.execution_config = execution_config
self.composite_logger = composite_logger
# Exclusions - note: version based exclusion is not supported
self.global_excluded_packages = self.sanitize_str_to_list(self.execution_config.global_exclusion_list)
self.installation_excluded_package_masks = self.execution_config.excluded_package_name_mask_list
self.installation_excluded_packages, self.installation_excluded_package_versions = self.get_packages_and_versions_from_masks(self.installation_excluded_package_masks)
# Inclusions - note: version based inclusion is optionally supported
self.installation_included_package_masks = self.execution_config.included_package_name_mask_list
self.installation_included_packages, self.installation_included_package_versions = self.get_packages_and_versions_from_masks(self.installation_included_package_masks)
self.installation_included_classifications = [] if self.execution_config.included_classifications_list is None else self.execution_config.included_classifications_list
# Neutralize global excluded packages, if customer explicitly includes the package
packages_to_clear_from_global = []
for package in self.global_excluded_packages:
if self.check_for_explicit_inclusion(package):
self.composite_logger.log_debug('Removing package from global exclusion list: ' + package)
packages_to_clear_from_global.append(package)
self.global_excluded_packages = [x for x in self.global_excluded_packages if x not in packages_to_clear_from_global]
# Logging
self.composite_logger.log("\nAzure globally-excluded packages: " + str(self.global_excluded_packages))
self.composite_logger.log("Included package classifications: " + ', '.join(self.installation_included_classifications))
self.composite_logger.log("Included packages: " + str(self.installation_included_package_masks))
self.composite_logger.log("Excluded packages: " + str(self.installation_excluded_packages))
if '=' in str(self.installation_excluded_package_masks):
self.composite_logger.log_error("\n /!\\ Package exclusions do not support version matching in the filter today. "
"Due to this, more packages than expected may be excluded from this update deployment.")
# region Inclusion / exclusion presence checks
def is_exclusion_list_present(self):
"""Return true if either Global or patch installation specific exclusion list present"""
return bool(self.global_excluded_packages) or bool(self.installation_excluded_packages)
def is_inclusion_list_present(self):
"""Return true if patch installation Inclusion is present"""
return bool(self.installation_included_packages)
# endregion
# region Package exclusion checks
def check_for_exclusion(self, one_or_more_packages):
"""Return true if package need to be excluded"""
return self.check_for_match(one_or_more_packages, self.installation_excluded_packages) or \
self.check_for_match(one_or_more_packages, self.global_excluded_packages)
# endregion
# region Package inclusion checks
def check_for_inclusion(self, package, package_version=Constants.DEFAULT_UNSPECIFIED_VALUE):
"""Return true if package should be included (either because no inclusion list is specified, or because of explicit match)"""
return not self.is_inclusion_list_present() or self.check_for_explicit_inclusion(package, package_version)
def check_for_explicit_inclusion(self, package, package_version=Constants.DEFAULT_UNSPECIFIED_VALUE):
"""Return true if package should be included due to an explicit match to the inclusion list """
return self.check_for_match(package, self.installation_included_packages, package_version, self.installation_included_package_versions)
# endregion
# region Inclusion / exclusion common match checker
def check_for_match(self, one_or_more_packages, matching_list, linked_package_versions=Constants.DEFAULT_UNSPECIFIED_VALUE, version_matching_list=Constants.DEFAULT_UNSPECIFIED_VALUE):
# type: (str, object, str, object) -> bool # type hinting to remove a warning
"""Return true if package(s) (with, optionally, linked version(s)) matches the filter list"""
if matching_list:
if type(one_or_more_packages) is str:
return self.single_package_check_for_match(one_or_more_packages, matching_list, linked_package_versions, version_matching_list)
else:
for index, each_package in enumerate(one_or_more_packages):
if type(linked_package_versions) is str:
if self.single_package_check_for_match(each_package, matching_list, linked_package_versions, version_matching_list):
return True
else:
if self.single_package_check_for_match(each_package, matching_list, linked_package_versions[index], version_matching_list):
return True
return False
def single_package_check_for_match(self, package, matching_list, package_version, version_matching_list):
"""Returns true if a single package (optionally, version) matches the filter list"""
for index, matching_package in enumerate(matching_list):
if fnmatch.fnmatch(package, matching_package) or fnmatch.fnmatch(self.get_product_name_without_arch(package), matching_package):
self.composite_logger.log_debug(' - [Package] {0} matches expression {1}'.format(package, matching_package))
if package_version == Constants.DEFAULT_UNSPECIFIED_VALUE or not version_matching_list or version_matching_list[index] == Constants.DEFAULT_UNSPECIFIED_VALUE:
self.composite_logger.log_debug(' - [Version] Check skipped as not specified.')
return True
elif len(version_matching_list) > index and fnmatch.fnmatch(package_version, version_matching_list[index]):
self.composite_logger.log_debug(' - [Version] {0} matches expression {1}'.format(package, version_matching_list[index]))
return True
elif len(version_matching_list) <= index: # This should never happen - something has gone horribly wrong
self.composite_logger.log_error(' - [Version] Index error - ({0} of {1})'.format(index + 1, len(version_matching_list)))
else:
self.composite_logger.log_debug(' - Package {0} (version={1}) was found, but it did not match filter specified for version ({2})'.format(package, package_version, version_matching_list[index]))
return False
@staticmethod
def get_product_name_without_arch(package_name):
"""Splits out product name without architecture - if this is changed, review YumPackageManager"""
architectures = ['.x86_64', '.noarch', '.i686']
for arch in architectures:
if package_name.endswith(arch):
return package_name.replace(arch, '')
return package_name
# endregion
# region Get included / excluded package masks
def get_packages_and_versions_from_masks(self, package_masks):
"""Return package names and versions"""
packages = []
package_versions = []
if package_masks is not None:
for index, package_mask in enumerate(package_masks):
package_mask_split = str(package_mask).split('=')
if len(package_mask_split) == 1: # no version specified
packages.append(package_mask_split[0].strip())
package_versions.append(Constants.DEFAULT_UNSPECIFIED_VALUE)
elif len(package_mask_split) == 2: # version also specified
packages.append(package_mask_split[0].strip())
package_versions.append(package_mask_split[1].strip())
else: # invalid format
self.composite_logger.log_warning("Invalid package format: " + str(package_mask) + " [Ignored]")
return packages, package_versions
@staticmethod
def sanitize_str_to_list(string_input):
"""Strips excess white-space and converts a comma-separated string to a list"""
return [] if (string_input is None) else string_input.strip().split(",")
# endregion
# region Get installation classifications from execution configuration
def is_msft_critsec_classification_only(self):
return ('Critical' in self.installation_included_classifications or 'Security' in self.installation_included_classifications) and 'Other' not in self.installation_included_classifications
def is_msft_other_classification_only(self):
return 'Other' in self.installation_included_classifications and not ('Critical' in self.installation_included_classifications or 'Security' in self.installation_included_classifications)
def is_msft_all_classification_included(self):
"""Returns true if all classifications were individually selected *OR* (nothing was selected AND no inclusion list is present) -- business logic"""
all_classifications = [key for key in Constants.PackageClassification.__dict__.keys() if not key.startswith('__')]
all_classifications_explicitly_selected = bool(len(self.installation_included_classifications) == (len(all_classifications) - 1))
no_classifications_selected = bool(len(self.installation_included_classifications) == 0)
only_unclassified_selected = bool('Unclassified' in self.installation_included_classifications and len(self.installation_included_classifications) == 1)
return all_classifications_explicitly_selected or ((no_classifications_selected or only_unclassified_selected) and not self.is_inclusion_list_present())
def is_invalid_classification_combination(self):
return ('Other' in self.installation_included_classifications and 'Critical' in self.installation_included_classifications and 'Security' not in self.installation_included_classifications) or \
('Other' in self.installation_included_classifications and 'Security' in self.installation_included_classifications and 'Critical' not in self.installation_included_classifications)
# endregion
| 65.924855
| 216
| 0.728979
| 10,703
| 0.938448
| 0
| 0
| 615
| 0.053924
| 0
| 0
| 3,198
| 0.280403
|
f70920a45d8b352e57cdd5c4ba4ed7a956b3f421
| 4,150
|
py
|
Python
|
pyesgf/util.py
|
ggarcias/esgf-pyclient-cmip6
|
9e7975d2e676ed2c4001edb4e25c9c20cc16b7af
|
[
"BSD-3-Clause"
] | 17
|
2016-09-07T02:55:30.000Z
|
2022-03-10T15:34:53.000Z
|
pyesgf/util.py
|
ggarcias/esgf-pyclient-cmip6
|
9e7975d2e676ed2c4001edb4e25c9c20cc16b7af
|
[
"BSD-3-Clause"
] | 61
|
2015-05-27T08:10:46.000Z
|
2022-03-17T12:36:45.000Z
|
pyesgf/util.py
|
ggarcias/esgf-pyclient-cmip6
|
9e7975d2e676ed2c4001edb4e25c9c20cc16b7af
|
[
"BSD-3-Clause"
] | 22
|
2015-10-27T11:21:05.000Z
|
2022-01-12T08:26:16.000Z
|
"""
Utility functions using the pyesgf package.
"""
import sys
from urllib.parse import quote_plus
def ats_url(base_url):
"""
Return the URL for the ESGF SAML AttributeService
"""
# Strip '/' from url as necessary
base_url = base_url.rstrip('/')
return '/'.join([base_url,
'esgf-idp/saml/soap/secure/attributeService.htm'])
def get_manifest(drs_id, version, connection):
"""
Retrieve the filenames, sizes and checksums of a dataset.
This function will raise ValueError if more than one dataset is found
matching the given drs_id and version on a search without replicas.
The connection should be either distrib=True or be connected to a suitable
ESGF search interface.
:param drs_id: a string containing the DRS identifier without version
:param version: The version as a string or int
"""
if isinstance(version, int):
version = str(version)
context = connection.new_context(drs_id=drs_id, version=version)
results = context.search()
if len(results) > 1:
raise ValueError("Search for dataset %s.v%s returns multiple hits" %
(drs_id, version))
file_context = results[0].file_context()
manifest = {}
for file in file_context.search():
manifest[file.filename] = {
'checksum_type': file.checksum_type,
'checksum': file.checksum,
'size': file.size,
}
return manifest
def urlencode(query):
"""
Encode a sequence of two-element tuples or dictionary into a URL query
string.
This version is adapted from the standard library to understand operators
in the pyesgf.search.constraints module.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
"""
if hasattr(query, "items"):
# mapping objects
query = list(query.items())
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("not a valid non-string sequence "
"or mapping object", tb)
def append(k, v, tag, lst):
from .search.consts import OPERATOR_NEQ
if tag == OPERATOR_NEQ:
lst.append('%s!=%s' % (k, v))
elif tag is None:
lst.append('%s=%s' % (k, v))
else:
raise ValueError('Unknown operator tag %s' % tag)
def strip_tag(v):
if isinstance(v, tuple):
tag, v = v
else:
tag = None
return tag, v
lst = []
for k, v in query:
tag, v = strip_tag(v)
k = quote_plus(str(k))
if isinstance(v, str):
if hasattr(v, 'encode'):
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = quote_plus(v.encode("ASCII", "replace"))
else:
v = quote_plus(v)
append(k, v, tag, lst)
else:
try:
# is this a sufficient test for sequence-ness?
len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v))
append(k, v, tag, lst)
else:
# loop over the sequence
for elt in v:
append(k, quote_plus(str(elt)), tag, lst)
return '&'.join(lst)
| 30.291971
| 78
| 0.576867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,875
| 0.451807
|
f709b6ad81d25a0c074deaa1308cf04158654f02
| 1,373
|
py
|
Python
|
tests/book/ch05/classify_name.py
|
TITC/pyhanlp
|
ad062f358805da5bf97f78d9f37f441c06ae4d19
|
[
"Apache-2.0"
] | null | null | null |
tests/book/ch05/classify_name.py
|
TITC/pyhanlp
|
ad062f358805da5bf97f78d9f37f441c06ae4d19
|
[
"Apache-2.0"
] | null | null | null |
tests/book/ch05/classify_name.py
|
TITC/pyhanlp
|
ad062f358805da5bf97f78d9f37f441c06ae4d19
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Author:hankcs
# Date: 2018-06-21 19:46
# 《自然语言处理入门》5.3 基于感知机的人名性别分类
# 配套书籍:http://nlp.hankcs.com/book.php
# 讨论答疑:https://bbs.hankcs.com/
import sys,os# environment, adjust the priority
sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from pyhanlp import *
from tests.test_utility import ensure_data
PerceptronNameGenderClassifier = JClass('com.hankcs.hanlp.model.perceptron.PerceptronNameGenderClassifier')
cnname = ensure_data('cnname', 'http://file.hankcs.com/corpus/cnname.zip')
TRAINING_SET = os.path.join(cnname, 'train.csv')
TESTING_SET = os.path.join(cnname, 'test.csv')
MODEL = cnname + ".bin"
def run_classifier(averaged_perceptron):
print('=====%s=====' % ('平均感知机算法' if averaged_perceptron else '朴素感知机算法'))
classifier = PerceptronNameGenderClassifier()
print('训练集准确率:', classifier.train(TRAINING_SET, 10, averaged_perceptron))
model = classifier.getModel()
print('特征数量:', len(model.parameter))
# model.save(MODEL, model.featureMap.entrySet(), 0, True)
# classifier = PerceptronNameGenderClassifier(MODEL)
for name in "赵建军", "沈雁冰", "陆雪琪", "李冰冰":
print('%s=%s' % (name, classifier.predict(name)))
print('测试集准确率:', classifier.evaluate(TESTING_SET))
if __name__ == '__main__':
run_classifier(False)
run_classifier(True)
| 38.138889
| 112
| 0.718864
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 692
| 0.452583
|
f70b82a64651b669501101e2383b4a201ac4b9ba
| 5,305
|
py
|
Python
|
tests/test_content_download.py
|
easydo-cn/edo_client
|
775f185c54f2eeda6a7dd6482de8228ca9ad89b0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_content_download.py
|
easydo-cn/edo_client
|
775f185c54f2eeda6a7dd6482de8228ca9ad89b0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_content_download.py
|
easydo-cn/edo_client
|
775f185c54f2eeda6a7dd6482de8228ca9ad89b0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import io
import os
import shutil
import tempfile
import unittest
from edo_client import WoClient
class ContentApi_DownloadTestCase(unittest.TestCase):
'''
- Basically this is to ensure
all the facilities related to HTTP range headers are working properly;
'''
@classmethod
def setUpClass(cls):
cls.file_size = 10 * (2 ** 20)
cls.download_url = 'http://192.168.1.115/docker/unittest/10mb.test'
cls.api_url = 'https://httpbin.org/redirect-to?url={}'.format(
cls.download_url
)
cls.empty_file_url = 'http://192.168.1.115/docker/unittest/empty_file.bin'
# We're just testing some basic util functions,
# and don't want a real WoClient instance
cls.client = WoClient(
cls.api_url + '#',
'', '', '', '',
account='', instance=''
)
cls.tmpdir = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir)
def test_01_get_download_url(self):
self.assertEqual(
self.client.content.get_download_url(uid=''),
self.download_url,
'Should be able to extract direct download URL from 302 redirect'
)
def test_11_download_to_stream_all(self):
'''测试:下载完整文件到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url
)
self.assertEqual(
self.file_size,
stream.tell(),
'Cursor should be at the end of stream after download'
)
stream.seek(0, os.SEEK_SET)
self.assertEqual(
self.file_size,
len(stream.read()),
'File length should be 10240 bytes'
)
def test_12_download_stream_first_byte(self):
'''测试:下载第一个字节到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=0, end=0,
)
self.assertEqual(1, stream.tell(), 'Download first byte of file')
def test_13_download_stream_head_part(self):
'''测试:从头下载一部分到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=0, end=(5 * (2 ** 20) - 1),
)
self.assertEqual(5 * (2 ** 20), stream.tell())
def test_14_download_stream_tail_part(self):
'''测试:从中间开始,下载文件后半部分到流'''
stream = io.BytesIO()
self.client.content.download_to_stream(
stream, url=self.download_url, start=(5 * (2 ** 20)), end=None,
)
self.assertEqual(5 * (2 ** 20), stream.tell())
def test_15_download_partial(self):
'''测试:从中间开始,下载一部分到流'''
stream = io.BytesIO()
start, end = 1234, 54321
self.client.content.download_to_stream(
stream, url=self.download_url, start=start, end=end,
)
self.assertEqual(stream.tell(), end - start + 1)
def test_21_get_data_full_size(self):
'''测试:完整读取文件内容'''
self.assertEqual(
self.file_size,
len(self.client.content.get_data(url=self.download_url)),
'.get_data shoule be able to download the whole file by default',
)
def test_22_get_data_first_byte(self):
'''测试:读取文件第一个字节'''
self.assertEqual(
1,
len(self.client.content.get_data(url=self.download_url, size=1)),
'.get_data should be able to download the 1st byte of given file',
)
def test_23_get_data_head_part(self):
'''测试:从头读取文件的一部分内容'''
size = 5432
self.assertEqual(
size,
len(self.client.content.get_data(url=self.download_url, size=size)), # noqa E501
'.get_data should download the first {} bytes'.format(size),
)
def test_24_get_data_tail_part(self):
'''测试:从中间开始,读取文件后半部分内容'''
start = 12345
size = self.file_size - start
self.assertEqual(
size,
len(self.client.content.get_data(
url=self.download_url,
offset=start, size=size
)),
'.get_data shoule download last {} bytes'.format(size),
)
def test_25_get_data_partial(self):
'''测试:从中间开始,读取文件一部分的内容'''
start = 23451
size = self.file_size - start
self.assertEqual(
size,
len(self.client.content.get_data(
url=self.download_url,
offset=start, size=size,
)),
'.get_data should download {} bytes starting from offset {}'.format(size, start), # noqa E501
)
def test_31_download_to_file(self):
'''测试:完整下载文件到本地'''
fd, fpath = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.client.content.download_to_file(destination=fpath, url=self.download_url)
self.assertEqual(self.file_size, os.stat(fpath).st_size)
def test_41_download_empty_file(self):
'''测试:下载空文件到本地'''
fd, fpath = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.client.content.download_to_file(destination=fpath, url=self.empty_file_url)
self.assertEqual(0, os.stat(fpath).st_size)
| 32.746914
| 106
| 0.590575
| 5,525
| 0.979089
| 0
| 0
| 701
| 0.124225
| 0
| 0
| 1,443
| 0.255715
|
f70e1eec634ed0c89cd786687c6b726187e816d5
| 11,426
|
py
|
Python
|
src/train.py
|
Gordonbuck/ml-oov-we
|
ce28cd8b556a16125ba36cd41781a3e60bb26422
|
[
"MIT"
] | null | null | null |
src/train.py
|
Gordonbuck/ml-oov-we
|
ce28cd8b556a16125ba36cd41781a3e60bb26422
|
[
"MIT"
] | null | null | null |
src/train.py
|
Gordonbuck/ml-oov-we
|
ce28cd8b556a16125ba36cd41781a3e60bb26422
|
[
"MIT"
] | null | null | null |
import higher
from leap import Leap
import numpy as np
import os
import torch
import torch.nn as nn
import gc
def train(model, source_corpus, char2idx, args, device):
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=args.lr_decay, patience=args.patience,
threshold=args.threshold)
best_valid_cosine = 1
for epoch in np.arange(args.n_epochs):
valid_cosine = []
valid_ce = []
model.train()
for batch in np.arange(args.n_batch):
train_contexts, train_targets, train_vocabs, train_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
fixed=args.fixed_shot,
return_inds=True)
optimizer.zero_grad()
if args.lang_model:
pred_emb, pred_ind = model.forward(train_contexts, train_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, train_inds)
loss += -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
else:
pred_emb = model.forward(train_contexts, train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
valid_contexts, valid_targets, valid_vocabs, valid_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
use_valid=True,
fixed=args.fixed_shot,
return_inds=True)
if args.lang_model:
pred_emb, pred_ind = model.forward(valid_contexts, valid_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, valid_inds).mean()
valid_ce += [loss.cpu().numpy()]
else:
pred_emb = model.forward(valid_contexts, valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, valid_targets).mean()
valid_cosine += [loss.cpu().numpy()]
avg_valid = np.average(valid_cosine)
lr_scheduler.step(avg_valid)
if args.lang_model:
avg_ce = np.average(valid_ce)
print(f"Average cosine loss: {avg_valid}; Average cross entropy loss: {avg_ce}")
else:
print(f"Average cosine loss: {avg_valid}")
if avg_valid < best_valid_cosine:
best_valid_cosine = avg_valid
torch.save(model.state_dict(), os.path.join(args.save_dir, 'model.pt'))
if optimizer.param_groups[0]['lr'] < args.lr_early_stop:
print('LR early stop')
break
def maml_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
meta_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
gc.collect()
source_valid_cosine = []
target_valid_cosine = []
model.train()
with torch.backends.cudnn.flags(benchmark=True):
for meta_batch in np.arange(args.n_meta_batch):
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_inner_lr_init)
meta_optimizer.zero_grad()
with higher.innerloop_ctx(model, inner_optimizer, copy_initial_weights=False) as (fmodel, diffopt):
for inner_batch in np.arange(args.n_inner_batch):
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = fmodel.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
diffopt.step(loss)
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = fmodel.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
meta_optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'maml_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.maml_lr_early_stop:
print('LR early stop')
break
def leap_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
leap = Leap(model)
meta_optimizer = torch.optim.Adam(leap.parameters(), lr=args.leap_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
source_valid_cosine = []
target_valid_cosine = []
model.train()
for meta_batch in np.arange(args.n_meta_batch):
meta_optimizer.zero_grad()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = model.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.normalize()
meta_optimizer.step()
leap.to(model)
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'leap_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.leap_lr_early_stop:
print('LR early stop')
break
| 52.412844
| 120
| 0.591896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 399
| 0.03492
|
f70e20602d9329f0b785241b32a1ae744bf6d702
| 119
|
py
|
Python
|
number reverser.py
|
Jayapraveen34/crazy-lover
|
be5bd897c40c31b3e5e6eafe3b6436cb3d888efe
|
[
"BSD-2-Clause"
] | null | null | null |
number reverser.py
|
Jayapraveen34/crazy-lover
|
be5bd897c40c31b3e5e6eafe3b6436cb3d888efe
|
[
"BSD-2-Clause"
] | null | null | null |
number reverser.py
|
Jayapraveen34/crazy-lover
|
be5bd897c40c31b3e5e6eafe3b6436cb3d888efe
|
[
"BSD-2-Clause"
] | null | null | null |
a = str(input('Enter the number you want to reverse:'))
b = (a[::-1])
c = int(b)
print('the reversed number is',c)
| 23.8
| 56
| 0.605042
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 63
| 0.529412
|
f70ec64b9e31daafd1fb2f1ca0a900fb5ba86171
| 3,473
|
py
|
Python
|
pyexcel_xls/xlsw.py
|
pyexcel/pyexcel-xls
|
995cfd273d5360947a528ff3a1ed3f9e52a429ad
|
[
"BSD-3-Clause"
] | 40
|
2016-05-18T20:09:39.000Z
|
2022-02-09T06:39:41.000Z
|
pyexcel_xls/xlsw.py
|
wenxuefeng3930/pyexcel-xls
|
995cfd273d5360947a528ff3a1ed3f9e52a429ad
|
[
"BSD-3-Clause"
] | 46
|
2016-02-01T22:12:31.000Z
|
2021-10-07T18:57:05.000Z
|
pyexcel_xls/xlsw.py
|
wenxuefeng3930/pyexcel-xls
|
995cfd273d5360947a528ff3a1ed3f9e52a429ad
|
[
"BSD-3-Clause"
] | 24
|
2016-01-29T12:26:27.000Z
|
2021-10-31T15:37:15.000Z
|
"""
pyexcel_xlsw
~~~~~~~~~~~~~~~~~~~
The lower level xls file format handler using xlwt
:copyright: (c) 2016-2021 by Onni Software Ltd
:license: New BSD License
"""
import datetime
import xlrd
from xlwt import XFStyle, Workbook
from pyexcel_io import constants
from pyexcel_io.plugin_api import IWriter, ISheetWriter
DEFAULT_DATE_FORMAT = "DD/MM/YY"
DEFAULT_TIME_FORMAT = "HH:MM:SS"
DEFAULT_LONGTIME_FORMAT = "[HH]:MM:SS"
DEFAULT_DATETIME_FORMAT = "%s %s" % (DEFAULT_DATE_FORMAT, DEFAULT_TIME_FORMAT)
EMPTY_SHEET_NOT_ALLOWED = "xlwt does not support a book without any sheets"
class XLSheetWriter(ISheetWriter):
"""
xls sheet writer
"""
def __init__(self, xls_book, xls_sheet, sheet_name):
if sheet_name is None:
sheet_name = constants.DEFAULT_SHEET_NAME
self._xls_book = xls_book
self._xls_sheet = xls_sheet
self._xls_sheet = self._xls_book.add_sheet(sheet_name)
self.current_row = 0
def write_row(self, array):
"""
write a row into the file
"""
for i, value in enumerate(array):
style = None
tmp_array = []
if isinstance(value, datetime.datetime):
tmp_array = [
value.year,
value.month,
value.day,
value.hour,
value.minute,
value.second,
]
value = xlrd.xldate.xldate_from_datetime_tuple(tmp_array, 0)
style = XFStyle()
style.num_format_str = DEFAULT_DATETIME_FORMAT
elif isinstance(value, datetime.timedelta):
value = value.days + value.seconds / 86_400
style = XFStyle()
style.num_format_str = DEFAULT_LONGTIME_FORMAT
elif isinstance(value, datetime.date):
tmp_array = [value.year, value.month, value.day]
value = xlrd.xldate.xldate_from_date_tuple(tmp_array, 0)
style = XFStyle()
style.num_format_str = DEFAULT_DATE_FORMAT
elif isinstance(value, datetime.time):
tmp_array = [value.hour, value.minute, value.second]
value = xlrd.xldate.xldate_from_time_tuple(tmp_array)
style = XFStyle()
style.num_format_str = DEFAULT_TIME_FORMAT
if style:
self._xls_sheet.write(self.current_row, i, value, style)
else:
self._xls_sheet.write(self.current_row, i, value)
self.current_row += 1
def close(self):
pass
class XLSWriter(IWriter):
"""
xls writer
"""
def __init__(
self,
file_alike_object,
_, # file_type not used
encoding="ascii",
style_compression=2,
**keywords,
):
self.file_alike_object = file_alike_object
self.work_book = Workbook(
style_compression=style_compression, encoding=encoding
)
def create_sheet(self, name):
return XLSheetWriter(self.work_book, None, name)
def write(self, incoming_dict):
if incoming_dict:
IWriter.write(self, incoming_dict)
else:
raise NotImplementedError(EMPTY_SHEET_NOT_ALLOWED)
def close(self):
"""
This call actually save the file
"""
self.work_book.save(self.file_alike_object)
| 31.008929
| 78
| 0.589692
| 2,866
| 0.825223
| 0
| 0
| 0
| 0
| 0
| 0
| 464
| 0.133602
|
f70ef0f412e5276c5b8da11a1ad63834bedea5f9
| 593
|
py
|
Python
|
venv/lib/python3.6/site-packages/gensim/__init__.py
|
bopopescu/wired_cli
|
844b5c2bf32c95ad2974663f0501a85ff6134bd4
|
[
"MIT"
] | 2
|
2021-06-09T20:55:17.000Z
|
2021-11-03T03:07:37.000Z
|
venv/lib/python3.6/site-packages/gensim/__init__.py
|
bopopescu/wired_cli
|
844b5c2bf32c95ad2974663f0501a85ff6134bd4
|
[
"MIT"
] | 4
|
2020-07-26T02:10:42.000Z
|
2021-03-31T18:48:58.000Z
|
venv/lib/python3.6/site-packages/gensim/__init__.py
|
bopopescu/wired_cli
|
844b5c2bf32c95ad2974663f0501a85ff6134bd4
|
[
"MIT"
] | 1
|
2020-07-25T23:57:23.000Z
|
2020-07-25T23:57:23.000Z
|
"""This package contains interfaces and functionality to compute pair-wise document similarities within a corpus
of documents.
"""
from gensim import parsing, corpora, matutils, interfaces, models, similarities, summarization, utils # noqa:F401
import logging
__version__ = '3.5.0'
class NullHandler(logging.Handler):
"""For python versions <= 2.6; same as `logging.NullHandler` in 2.7."""
def emit(self, record):
pass
logger = logging.getLogger('gensim')
if len(logger.handlers) == 0: # To ensure reload() doesn't add another one
logger.addHandler(NullHandler())
| 28.238095
| 114
| 0.726813
| 153
| 0.25801
| 0
| 0
| 0
| 0
| 0
| 0
| 271
| 0.456998
|
f70efa147c6f9c7ee90e557fe0740d068a1ce522
| 213
|
py
|
Python
|
tests/test_ai.py
|
divanorama/katrain
|
dc22aa88526fb6446f908259f06020d649a2d0a9
|
[
"MIT"
] | null | null | null |
tests/test_ai.py
|
divanorama/katrain
|
dc22aa88526fb6446f908259f06020d649a2d0a9
|
[
"MIT"
] | null | null | null |
tests/test_ai.py
|
divanorama/katrain
|
dc22aa88526fb6446f908259f06020d649a2d0a9
|
[
"MIT"
] | null | null | null |
import pytest
from katrain.core.constants import AI_STRATEGIES_RECOMMENDED_ORDER, AI_STRATEGIES
class TestAI:
def test_order(self):
assert set(AI_STRATEGIES_RECOMMENDED_ORDER) == set(AI_STRATEGIES)
| 23.666667
| 81
| 0.798122
| 113
| 0.530516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f70fbb21c94acb9d07d8e2e1ca75454e92d0eaf5
| 28,076
|
py
|
Python
|
game_client.py
|
wenlianglaw/Tetris-in-Python
|
d4f0a22c4827e7eeb44c55def3f024e0c6932ebe
|
[
"MIT"
] | 1
|
2021-06-25T20:43:19.000Z
|
2021-06-25T20:43:19.000Z
|
game_client.py
|
wenlianglaw/Tetris-in-Python
|
d4f0a22c4827e7eeb44c55def3f024e0c6932ebe
|
[
"MIT"
] | null | null | null |
game_client.py
|
wenlianglaw/Tetris-in-Python
|
d4f0a22c4827e7eeb44c55def3f024e0c6932ebe
|
[
"MIT"
] | null | null | null |
# This file defines the back end of the Tetris game
#
# GameState is the base class of GameClient.
#
# GameClient.Run() will start two threads:
# - _ProcessActions: Process the action list every x seconds
# - _AutoDrop: Auto drops the current piece.
#
# GameClient:
# - current piece
# - held piece
# - piece list
# - color_map: game board
# - InputActions(...): Inputs a list of actions.
# - ProcessActions(...): Lets the game client process a list of actions
# directly
# - ProcessAction(...): Lets the game client process one actions directly
# - PutPiece(...): Puts the current piece if the position is valid.
# - GetState(...): Gets game state, useful to AI
# - CheckValidity(...): Checks if a move is valid
# - SpawnPiece(...): Sets the current piece.
# - Restart(...): Restarts the game.
# - Rotate(...): Alternatively, callers can directly call Rotate to rotate
# current_piece
# - Move(...): Alternatively, callers can directly call Move to move the
# current_piece
#
import copy
import queue
import threading
import time
from threading import Lock
from typing import Tuple, List
import numpy as np
import actions
import shape
# Some global settings
DEFAULT_LENGTH = 20
DEFAULT_WIDTH = 10
MAP_PADDING_SIZE = 4
# When there are less than threshold pieces, spawn a new bag.
REFILL_THRESHOLD = 5
# Disable the auto drop in next few seconds
MAXIMUM_LOCK_TIME = 4
INCREMENTAL_LOCK_TIME = 1
# Scores
SINGLE = 5
DOUBLE = 10
TSS = 20
TRIPLE = 40
QUAD = 50
TSD = 60
TST = 80
PC = 120
# ATTACKS
ATTACK_DOUBLE = 1
ATTACK_TSS = 2
ATTACK_TRIPLE = 2
ATTACK_QUAD = 4
ATTACK_TSD = 4
ATTACK_TST = 6
ATTACK_PC = 10
class InternalError(Exception):
"""Any internal errors."""
class GameState:
def __init__(self):
self.height = 0
self.width = 0
self.color_map = np.array([])
self.current_piece = None
self.held_piece = None
self.score = 0
self.piece_list = []
self.is_gameover = False
self.can_swap = True
self.accumulated_lines_eliminated = 0
self.piece_dropped = 0
self.blevel_increase = False
self.level = 0
self.line_sent = 0
self.line_received = 0
def __deepcopy__(self, memodict=None):
if memodict is None:
memodict = dict()
another = copy.copy(self)
another.color_map = self.color_map.copy()
if self.current_piece is not None:
another.current_piece = self.current_piece.copy()
if self.held_piece is not None:
another.held_piece = self.held_piece.copy()
another.piece_list = copy.deepcopy(self.piece_list.copy())
return another
def copy(self):
return self.__deepcopy__()
def __str__(self):
ret = ""
ret += f"""height: {self.height}
width: {self.width}
color_map: {self.color_map}
current_piece: {self.current_piece}
held_piece: {self.held_piece}
score: {self.score}
piece_list: {self.piece_list}
is_gameover: {self.is_gameover}
can_swap: {self.can_swap}
piece_dropped: {self.piece_dropped}
level: {self.level}
"""
class GameClient(GameState):
def __init__(self, height: int = DEFAULT_LENGTH, width: int = DEFAULT_WIDTH, map_height_padding=MAP_PADDING_SIZE,
map_side_padding=MAP_PADDING_SIZE):
super().__init__()
self.height = height
self.width = width
self.map_height_padding = map_height_padding
self.map_side_padding = map_side_padding
self.dtype = np.uint8
self.dtype_length = 8
if self.width + 2 * map_side_padding > 8:
self.dtype = np.uint16
self.dtype_length = 16
if self.width + 2 * map_side_padding > 16:
self.dtype = np.uint32
self.dtype_length = 32
if self.width + 2 * map_side_padding > 32:
self.dtype = np.uint64
self.dtype_length = 64
if self.width + 2 * map_side_padding > 64:
self.dtype = np.uint128
self.dtype_length = 128
if self.width + 2 * map_side_padding > 128:
raise InternalError(
"width too long to support bit map. Consider chaning it to a smaller value.")
# Lock time settings
# When the lock is enabled, count the lock time.
# When the accumulated lock time is greater than the current maximum lock time,
# force to perform the auto drop. Otherwise autodop is disabled for this turn.
# When current locktime is reached but an refresh lock time request is genertaed.
# increase the current maximum lock time by incremental lock time.
self.maximum_lock_time = MAXIMUM_LOCK_TIME
self.current_maximum_lock_time = 0
self.incremental_lock_time = INCREMENTAL_LOCK_TIME
self.accumulate_lock_time = 0
# Only when move or rotate at bottom locks the auto drop
self._enable_lock_time = False
# Color map marks the color for each cell.
self.color_map = np.array([[]], dtype=self.dtype)
# Bit map for a better performance in some calculation.
self.bit_map = np.array([], dtype=self.dtype)
# Lock for current_piece
self.mutex_current_piece = Lock()
self.last_put_piece = None
# List of actions to process
self.action_list = queue.Queue()
self._init_spawn_interval = 500 # 500 ms at level 0
self._current_spawn_interval = 500
# actions.Action
self.last_action = None
self.disable_autodrop = False
self.line_tobesent = 0
# Used when calculate the auto drop interval decrease based on current level.
# Generated from the sigmoid function
# x = np.linspace(0, 40, 40)
# interval_decrease = 110 / (1 + np.exp(0.16 * x))
# interval_decrease = np.cumsum(interval_decrease)
# print(repr(np.cumsum(interval_decrease)))
self.interval_decrease = np.array(
[55., 100.49727968, 150.55179446, 190.28030383,
230.85041422, 260.47244367, 290.38990828, 320.86947489,
345.19115272, 350.63934095, 380.49515164, 400.03022699,
410.5020957, 420.15098155, 430.19789113, 440.8437644,
450.26946046, 455.63636342, 461.08741849, 465.74844074,
469.72957119, 473.12678557, 476.02338748, 478.4914391,
480.59310001, 482.38185737, 483.90364044, 485.19781892,
486.29808909, 487.23325451, 488.02790975, 488.70303602,
489.27651798, 489.76359062, 490.17722443, 490.52845671,
490.82667585, 491.07986489, 491.2948099, 491.47727802])
self._RefillPieces()
self._TakePieceFromList()
self.accumulated_lines_eliminated = 0
# When soft-dropping, temporarily disable auto-drop
self.soft_drop = False
self.piece_dropped = 0
# Must be put after the initializations above
self._InitMap()
def _InitMap(self):
side_padding = (1 << self.map_side_padding) - 1
init_row = (side_padding << (self.map_side_padding + self.width)) | side_padding
bottom_padding = (1 << (self.width + 2 * self.map_side_padding)) - 1
self.bit_map = np.concatenate((
np.array((self.map_height_padding + self.height) * [init_row], dtype=self.dtype),
np.array(self.map_height_padding * [bottom_padding], dtype=self.dtype)), dtype=self.dtype)
self.color_map = np.array([[0 for i in range(self.width)] for x in range(self.height + self.map_height_padding)],
dtype=self.dtype)
def Restart(self):
self._InitMap()
self.piece_list = []
self.held_piece = None
self.current_piece = None
# Lock of the game state
self.mutex_current_piece = Lock()
self.is_gameover = False
self.last_put_piece = None
# List of actions to process
self.action_list = queue.Queue()
self._init_spawn_interval = 500.0
self._current_spawn_interval = 500.0
# actions.Action
self.last_action = []
self.can_swap = True
self.score = 0
self.accumulate_lock_time = 0
self.accumulated_lines_eliminated = 0
self.soft_drop = False
self.piece_dropped = 0
self.line_sent = 0
self.line_received = 0
self.line_tobesent = 0
self._enable_lock_time = False
self._RefillPieces()
self._TakePieceFromList()
def Run(self):
auto_drop_th = threading.Thread(target=self.AutoDrop, name="auto_drop", daemon=True)
process_input_th = threading.Thread(target=self._ProcessActionsThread, daemon=True)
if not self.disable_autodrop:
auto_drop_th.start()
process_input_th.start()
if not self.disable_autodrop:
auto_drop_th.join()
process_input_th.join()
print("game ends")
def GetState(self) -> GameState:
"""Gets game state.
Returns the objects ref instead of copy For better performance.
"""
return copy.deepcopy(super())
def GetCell(self, i: int, j: int) -> int:
"""Gets cell at [i,j].
Notes: This function doesn't check the index out of boundary error.
"""
return self.color_map[i, j]
def GetMap(self):
"""Gets whole color_map."""
return self.color_map
def GetMapArea(self, corner: Tuple[int, int],
size: Tuple[int, int]) -> np.array:
"""Gets an area of
:param top_left:
:param bottom_right:
:return: The area of the color_map.
"""
size = (np.min([size[0], self.color_map.shape[0] - corner[0]]),
np.min([size[1], self.color_map.shape[1] - corner[1]]))
return self.color_map[corner[0]: corner[0] + size[0],
corner[1]: corner[1] + size[1]]
def SetMap(self, pos: Tuple[int, int], v: int, map: np.array = None):
"""Sets the cell at [i,j] to value v."""
(i, j) = pos
bit_map = self.bit_map.copy()
if map is None or map is self.color_map:
map = self.color_map
bit_map = self.bit_map
map[i, j] = v
# Set a bit to value: Clear to bit to 0 and then set to value
bit_v = 0 if v == 0 else 1
bit_j_pos = self.width + self.map_side_padding - 1 - j
bit_map[i] = (bit_map[i] & ~(1 << bit_j_pos)) | (bit_v << bit_j_pos)
def SetWholeMap(self, map: np.array):
if map.shape != self.color_map.shape:
raise InternalError(
f"Map shape {map.shape}"
f" must match the color_map shape: {self.color_map.shape}")
self.color_map = map
# Convert the map to Bollean map
bit_color_map = map != 0
# Revert the order and padding, then call the packbits(..., order="little") fn
bit_color_map = bit_color_map[:, ::-1]
bit_color_map = np.pad(
bit_color_map,
((0, 0), (self.map_side_padding, self.map_side_padding)),
"constant", constant_values=(1,))
padding0_len = self.dtype_length - bit_color_map.shape[1]
bit_color_map = np.pad(bit_color_map, ((0, 0), (0, padding0_len)),
"constant", constant_values=(0,))
int_color_map = np.packbits(bit_color_map, bitorder="little").view(self.dtype)
self.bit_map[0:self.map_height_padding + self.height] = int_color_map
print(int_color_map)
print(self.bit_map)
def copy(self):
another = copy.copy(self)
another.last_action = copy.copy(self.last_action)
if self.last_put_piece is not None:
another.last_put_piece = self.last_put_piece.copy()
another.color_map = np.copy(self.color_map)
another.bit_map = np.copy(self.bit_map)
another.action_list = copy.copy(self.action_list)
another.piece_list = self.piece_list.copy()
another.current_piece = self.current_piece.copy()
if self.held_piece is None:
another.held_piece = None
else:
another.held_piece = self.held_piece.copy()
return another
def AutoDrop(self):
while True:
if self.soft_drop:
# If it is soft dropping, we don't perform auto drop.
self.soft_drop = False
else:
if self.CheckValidity(self.current_piece, offset=(1, 0)):
self.Move(actions.Action(down=True, source_user_or_ai=False))
else:
if (not self._enable_lock_time or
self.accumulate_lock_time >= self.current_maximum_lock_time):
self.PutPiece()
else:
self.accumulate_lock_time += self._current_spawn_interval / 1000
time.sleep(self._current_spawn_interval / 1000)
def InputActions(self, acts: List[actions.Action]):
if self.is_gameover:
return
if len(acts) > 30:
print("len:", len(acts))
acts = acts[-30:]
for act in acts:
if self.action_list.qsize() > 50:
break
self.action_list.put(act)
def ProcessActions(self, actions: List[actions.Action], post_processing=True):
for a in actions:
self.ProcessAction(a, post_processing=post_processing)
def ProcessAction(self, action: actions.Action, post_processing=True):
if self.is_gameover:
return
# print(f"Processed action: {action.direction}, {action.rotation}, {action.swap}")
# self.test += 1
# print(self.test)
if action.swap:
self.Swap()
self.Rotate(action.rotation)
self.Move(action, post_processing=post_processing)
def _ProcessActionsThread(self):
while True:
while not self.action_list.empty():
act = self.action_list.get()
self.ProcessAction(act)
self.action_list.task_done()
time.sleep(0.001)
def SetLevel(self, level: int = 0):
"""Let the front end set!"""
self.level = level
i = min(len(self.interval_decrease), self.level)
self._current_spawn_interval = max(
10, self._init_spawn_interval - self.interval_decrease[i])
def IncreaseLevel(self, inc: int = 1):
"""Let the front end decide!"""
self.level += inc
self.SetLevel(self.level)
def Move(self, action: actions.Action, post_processing=True) -> bool:
"""Moves the current piece.
:param direction: Direction to move
:param post_processing: if True, put the piece to color_map and
apply line eliminate. Otherwise just update the current_piece's states.
:return True if moved; False otherwise
"""
if (action.direction == actions.NONE and
not action.down):
return False
moved = False
if action.down:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (1, 0)):
self.current_piece.x += 1
moved = True
self.soft_drop = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.LEFT:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (0, -1)):
self.current_piece.y += -1
moved = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.RIGHT:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (0, 1)):
self.current_piece.y += 1
moved = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.HARD_DROP or action.direction == actions.SOFT_DROP:
try:
self.mutex_current_piece.acquire()
while self.CheckValidity(self.current_piece, (1, 0)):
self.current_piece.x += 1
moved = True
finally:
self.mutex_current_piece.release()
if post_processing and action.direction == actions.HARD_DROP:
self.PutPiece()
if moved:
self.last_action = action
at_bottom = not self.CheckValidity(self.current_piece, (1, 0))
if (at_bottom and action.direction != actions.HARD_DROP and
action.source_user):
self._RefreshLockTime()
return moved
def _RefreshLockTime(self):
self._enable_lock_time = True
if self.accumulate_lock_time >= self.current_maximum_lock_time:
self.current_maximum_lock_time = min(
self.current_maximum_lock_time + self.incremental_lock_time,
self.maximum_lock_time)
def _ResetLockTime(self):
self._enable_lock_time = False
self.accumulate_lock_time = 0
self.current_maximum_lock_time = 0
def Swap(self):
"""Swaps the held piece and the current if its swappable"""
if not self.can_swap:
return
try:
self.mutex_current_piece.acquire()
t = self.held_piece
self.held_piece = self.current_piece
self.current_piece = t
if not self.current_piece:
self._TakePieceFromList()
self.current_piece.Init()
self.held_piece.Init()
self.can_swap = False
finally:
self.mutex_current_piece.release()
def CheckGameOver(self):
self.is_gameover = np.any(
self.GetMapArea((0, 0), (self.map_height_padding, self.width)) != 0)
return self.is_gameover
def _AnalyzeElimination(self, n_eliminate: int) -> int:
ret = 0
is_last_put_t = isinstance(self.last_put_piece, shape.T)
if n_eliminate == 1:
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TSS")
ret += TSS
self.line_tobesent += ATTACK_TSS
else:
ret += SINGLE
if n_eliminate == 2:
# TSD
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TSD")
ret += TSD
self.line_tobesent += ATTACK_TSD
# Normal Double
else:
ret += DOUBLE
self.line_tobesent += ATTACK_DOUBLE
if n_eliminate == 3:
# TST
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TST")
ret += TST
self.line_tobesent += ATTACK_TST
else:
ret += TRIPLE
self.line_tobesent += ATTACK_TRIPLE
if n_eliminate == 4:
ret += QUAD
self.line_tobesent += ATTACK_QUAD
# Checks for PC
if np.all(self.color_map == 0):
print("PC")
ret += PC
self.line_tobesent += ATTACK_PC
return ret * (self.level + 3)
def _LineClear(self):
elimated_lines = []
elimated_cnt = 0
# Checks the 4 lines... This is not adapt to shape with higher than 4 lines
# but that's not a part of this game. I don't have plan to support custom
# shapes.
for row in range(4):
if not (self.last_put_piece.x + row >= 0 and
self.last_put_piece.x + row < self.height + self.map_height_padding):
continue
if np.all(self.color_map[self.last_put_piece.x + row, :] != 0):
elimated_lines.append(row + self.last_put_piece.x)
elimated_cnt += 1
self.color_map = np.vstack((np.zeros((elimated_cnt, self.width),
dtype=self.dtype),
np.delete(self.color_map, elimated_lines, axis=0)))
# Updates the bit_map
side_padding = (1 << self.map_side_padding) - 1
init_row = (side_padding << (self.map_side_padding + self.width)) | side_padding
self.bit_map = np.concatenate((elimated_cnt * [init_row],
np.delete(self.bit_map, elimated_lines))).astype(self.dtype)
self.accumulated_lines_eliminated += elimated_cnt
self.score += self._AnalyzeElimination(n_eliminate=elimated_cnt)
def _SendAttack(self):
"""Send attack to target."""
# This feature has not been implemented yet.
self.line_sent += self.line_tobesent
self.line_tobesent = 0
def PutPiece(self, piece: shape.Shape = None):
""" Puts a piece to color_map if it is a valid placement then execute the post processing.
:param piece: The piece to put, if None, put the self.current_piece
:param color_map: The color_map where the piece puts, if None, self.color_map will be used.
:returns: True if the piece has been put. False otherwise.
"""
if self._PrePutPiece(piece):
self._PostPutPiece(piece)
return True
else:
return False
def _PrePutPiece(self, piece: shape.Shape = None, map: np.array = None):
""" Puts a piece to color_map if it is a valid placement.
Post put processing such as self._LineClear will not be executed
:param piece: The piece to put, if None, put the self.current_piece
:param map: The color_map where the piece puts, if None, self.color_map will be used.
:returns: True if the piece has been put. False otherwise.
"""
try:
if not piece:
self.mutex_current_piece.acquire()
piece = self.current_piece
if map is None:
map = self.color_map
if not self.CheckValidity(piece):
return False
for (i, j) in piece.GetShape():
self.SetMap((piece.x + i, piece.y + j), piece.id, map)
return True
finally:
if self.mutex_current_piece.locked():
self.mutex_current_piece.release()
def _PostPutPiece(self, piece: shape.Shape = None):
if piece is not None:
self.last_put_piece = piece
else:
self.last_put_piece = self.current_piece
# LineClear should be called prior to SendAttack
self._LineClear()
if piece is None:
self._TakePieceFromList()
self.CheckGameOver()
self._ResetLockTime()
self._SendAttack()
self.can_swap = True
self.piece_dropped += 1
def TextDraw(self):
preview_map = self.color_map.copy()
self._PrePutPiece(self.current_piece, preview_map)
for i in preview_map:
print(i)
print()
def SpawnPiece(self, piece: shape.Shape = None) -> bool:
if not piece:
self._TakePieceFromList()
else:
self.current_piece = piece.copy()
return self.CheckValidity(self.current_piece)
def _FindFittedPiece(self, piece: shape.Shape = None, num_90rotations: int = 0):
"""Finds a location that fits this piece with n 90rotations.
Ref: https://tetris.fandom.com/wiki/SRS
:param piece: The piece to be put in the color_map. If none, it will be set to the current_piece
:param num_90rotations: How many 90 rotations
:return: piece - shape.Shape: the piece with rotations that fits the color_map.
"""
if not piece:
piece = self.current_piece
def _IsJLSTZ(piece: shape.Shape):
jlstz = [shape.J, shape.L, shape.S, shape.T, shape.Z]
for s in jlstz:
if isinstance(piece, s):
return True
return False
# The 180 rotation wall kick table is copied from
# https://tetris.fandom.com/wiki/SRS#180.C2.B0_rotation
# which is origined from
# https://github.com/JoshuaWebb/nullpomino/blob/master/src/mu/nu/nullpo/game/subsystem/wallkick/StandardWallkick.java
offset_map_jlstz = [
# state 0
([(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)], # 0>>1
# 0>>2, 180 rotation
# [(0,0), (1, 0), (2, 0), (1, 1), (2, 1), (-1, 0), (-2, 0), (-1, 1), (-2, 1), (0, -1), (3, 0), (-3, 0)],
[(0, 0)],
[(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)]), # 0>>3
# state 1
([(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)], # 1>>2
# l>>3, 180 rotation
# [(0,0), (0, 1), (0, 2), (-1, 1), (-1, 2), (0, -1), (0, -2), (-1, -1), (-1, -2), (1, 0), (0, 3), (0, -3)],
[(0, 0)],
[(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)]), # 1>>0
# state 2
([(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)], # 2>>3
# [(0,0), (-1, 0), (-2, 0), (-1, -1), (-2, -1), (1, 0), (2, 0), (1, -1), (2, -1), (0, 1), (-3, 0), (3, 0)], # 2>>0,
[(0, 0)],
[(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)]), # 2>>1
# state 3
([(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)], # 3>>0
# 3>>1, 180 rotation
# [(0,0), (0, 1), (0, 2), (1, 1), (1, 2), (0, -1), (0, -2), (1, -1), (1, -2), (-1, 0), (0, 3), (0, -3)],
[(0, 0)],
[(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)]), # 3>>2
]
offset_map_i = [
# state 0
[[(0, 0), (0, -2), (0, 1), (1, -2), (-2, 1), ], # 0>>1
# [(0,0), (-1, 0), (-2, 0), (1, 0), (2, 0), (0, 1)], # 0>>2, 180 rotation
[(0, 0)],
[(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)]], # 0>>3
# state 1
[[(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)], # 1>>2
# [(0,0), (0, 1), (0, 2), (0, -1), (0, -2), (-1, 0)], # 1>>3, 180 rotation,
[(0, 0)],
[(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)]], # 1>>0
# state 2
[[(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)], # 2>>3
# [(0, 0), (1, 0), (2, 0), (-1, 0), (-2, 0), (0, -1)], # 2>>0, 180 rotation
[(0, 0)],
[(0, 0), (0, 1), (0, -2), (2, 1), (-1, -2)]], # 2>>1
# state 3
[[(0, 0), (0, 1), (0, -2), (2, 1), (-1, -2)], # 3>>0
# [(0, 0), (0, 1), (0, 2), (0, -1), (0, -2), (1, 0)], # 3>>1, 180 rotation
[(0, 0)],
[(0, 0), (0, -2), (0, 1), (1, -2), (2, 1)]], # 3>>2
]
state = piece.state
num_90rotations %= 4
offset_piece = piece.copy()
ori_x = offset_piece.x
ori_y = offset_piece.y
for _ in range(num_90rotations):
offset_piece.Rotate90()
if num_90rotations == 0:
if self.CheckValidity(offset_piece):
return offset_piece
num_90rotations -= 1
if _IsJLSTZ(piece):
for (offset_x, offset_y) in offset_map_jlstz[state][num_90rotations]:
offset_piece.x = ori_x + offset_x
offset_piece.y = ori_y + offset_y
if (offset_piece.y >= self.width or
offset_piece.x >= self.height + self.map_height_padding):
continue
if self.CheckValidity(offset_piece):
return offset_piece
else:
for (offset_x, offset_y) in offset_map_i[state][num_90rotations]:
offset_piece.x = ori_x + offset_x
offset_piece.y = ori_y + offset_y
if (offset_piece.y >= self.width or
offset_piece.x >= self.height + self.map_height_padding):
continue
if self.CheckValidity(offset_piece):
return offset_piece
return None
def Rotate(self, n: int) -> bool:
"""Rotates the current piece.
:param n: rotations, in range [0,4)
:return: True if the current piece can be rotated. False otherwise.
"""
n %= 4
if n == 0:
return False
fitted_piece = self._FindFittedPiece(num_90rotations=n)
if fitted_piece:
self.current_piece = fitted_piece
self.last_action = actions.Action(dir=0, rotation=n)
if not self.CheckValidity(self.current_piece, (1, 0)):
self._RefreshLockTime()
return fitted_piece is not None
def CheckValidity(self, piece: shape.Shape, offset: Tuple[int, int] = (0, 0)):
"""Checks if the piece with offset can be put in the color_map
:param piece: The piece to be put.
:param offset: The inital offset to the piece
:return: True if the current state can fit into the color_map. False otherwise.
"""
(ox, oy, os) = (piece.x, piece.y, piece.state)
piece.x += offset[0]
piece.y += offset[1]
a = self.bit_map[piece.x: piece.x + 4]
b = self.width - piece.y
c = piece.GetBitMap().astype(self.dtype)
d = c << b
e = a & d
check_rst = e == 0
(piece.x, piece.y, piece.state) = (ox, oy, os)
return np.all(check_rst)
def _GetNextBag(self):
start_y = int((self.width - 3) / 2)
assert start_y >= 0
bag = [shape.I(start_y=start_y),
shape.J(start_y=start_y),
shape.L(start_y=start_y),
shape.O(start_y=start_y),
shape.S(start_y=start_y),
shape.T(start_y=start_y),
shape.Z(start_y=start_y)]
np.random.shuffle(bag)
return bag
def _RefillPieces(self):
"""
When there are less than REFILL_THRESHOLD pieces in the list,
refill it with a new bag.
"""
if len(self.piece_list) <= REFILL_THRESHOLD:
self.piece_list.extend(self._GetNextBag())
def _TakePieceFromList(self):
self._RefillPieces()
self.current_piece = self.piece_list[0].copy()
self.piece_list = self.piece_list[1:]
def CreateGameFromState(state: GameState) -> GameClient:
game = GameClient(height=state.height, width=state.width)
game.color_map = np.copy(state.color_map)
game.current_piece = state.current_piece.copy()
if state.held_piece is not None:
game.held_piece = state.held_piece.copy()
else:
game.held_piece = None
game.score = state.score
game.piece_list = state.piece_list.copy()
game.can_swap = state.can_swap
game.is_gameover = state.is_gameover
game.accumulated_lines_eliminated = state.accumulated_lines_eliminated
game.piece_dropped = state.piece_dropped
game.line_sent = state.line_sent
game.line_received = state.line_received
return game
| 32.799065
| 123
| 0.63036
| 25,737
| 0.91669
| 0
| 0
| 0
| 0
| 0
| 0
| 6,960
| 0.247899
|
f711bbbc339573d1744df69fd2b79a94a7b3f1b9
| 2,615
|
py
|
Python
|
gateway/builders/authorization_builder.py
|
TarlanPayments/gw-python-client
|
a0dd5292c877ab06bf549693a1bfc9fb06ef9d19
|
[
"MIT"
] | null | null | null |
gateway/builders/authorization_builder.py
|
TarlanPayments/gw-python-client
|
a0dd5292c877ab06bf549693a1bfc9fb06ef9d19
|
[
"MIT"
] | null | null | null |
gateway/builders/authorization_builder.py
|
TarlanPayments/gw-python-client
|
a0dd5292c877ab06bf549693a1bfc9fb06ef9d19
|
[
"MIT"
] | null | null | null |
# The MIT License
#
# Copyright (c) 2017 Tarlan Payments.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
class AuthorizationBuilder(object):
def __init__(self, __client_auth_data_set, __client_mandatory_fields):
from gateway.data_sets.request_parameters import (
RequestParameters,
RequestParametersTypes
)
self.__data_sets = RequestParameters
self.__data_types = RequestParametersTypes
self.__auth_mandatory_fields = __client_mandatory_fields
self.__auth_data_set = __client_auth_data_set
def add_account_guid(self, guid=None):
"""
Tarlan Payments Merchant Account GUID.
Args:
guid (str): Tarlan Payments Merchant Account GUID.
"""
self.__auth_mandatory_fields[self.__data_sets.AUTH_DATA_ACCOUNT_GUID] = self.__data_types.AUTH_DATA_ACCOUNT_GUID
self.__auth_data_set[self.__data_sets.AUTH_DATA_ACCOUNT_GUID] = guid
def add_secret_key(self, value=None):
"""
Tarlan Payments Merchant Password
Args:
value (str): Tarlan Payments Merchant Password
"""
self.__auth_mandatory_fields[self.__data_sets.AUTH_DATA_SECRET_KEY] = self.__data_types.AUTH_DATA_SECRET_KEY
self.__auth_data_set[self.__data_sets.AUTH_DATA_SECRET_KEY] = value
def add_session_id(self, id_value=None):
"""
Tarlan Payments Gateway Session ID
Args:
id_value (str): Tarlan Payments Gateway Session ID
"""
self.__auth_data_set[self.__data_sets.AUTH_DATA_SECRET_KEY] = id_value
| 41.507937
| 120
| 0.728489
| 1,497
| 0.572467
| 0
| 0
| 0
| 0
| 0
| 0
| 1,501
| 0.573996
|
f712616c7f9dabddbf70b11e2c6cc653c11f9e33
| 1,931
|
py
|
Python
|
cas9/score.py
|
cangtu/cot
|
2ecbe83fe7bb3538f80692fc4412830f6c976558
|
[
"MIT"
] | 1
|
2018-07-11T06:12:51.000Z
|
2018-07-11T06:12:51.000Z
|
cas9/score.py
|
cangtu/cot
|
2ecbe83fe7bb3538f80692fc4412830f6c976558
|
[
"MIT"
] | null | null | null |
cas9/score.py
|
cangtu/cot
|
2ecbe83fe7bb3538f80692fc4412830f6c976558
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright @ 0x6c78.
#
# 16-10-20 下午1:27 0x6c78@gmail.com
#
# Distributed under terms of the MIT License
from operator import mul
from itertools import combinations
class Score(object):
def __init__(self):
"""
张峰实验室通过实验获得的每个位置错配的特异性,具体参考网页:
http://crispr.mit.edu/about
"""
self.m = (0, 0, 0.014, 0, 0, 0.395, 0.317, 0, 0.389, 0.079, 0.445,
0.508, 0.613, 0.851, 0.732, 0.828, 0.615, 0.804, 0.685, 0.583)
def _t1(self, locs):
"""
:param locs: 失配的位置
:return: 公式第一部分的值
"""
return reduce(mul, [1-self.m[loc] for loc in locs])
@staticmethod
def _t2(locs):
"""
:param locs: 失配的位置, 由于没有失配就没有mean pairwise distance,故locs的length至少为1
:return: 公式第二部分的值
"""
if len(locs) == 1:
return 1.000
else:
locs = sorted(locs)
length = len(locs)
mpd = (locs[-1] - locs[0]) / (length - 1) # mean pairwise distance
return 1 / (((19 - mpd) / 19) * 4 + 1)
@staticmethod
def _t3(m):
"""
:param m: 失配碱基的个数
:return: 公式第三部分的值
"""
return 1 / (m ** 2)
def get(self, locs):
if len(locs) == 0:
return 100.000
elif len(locs) == 1:
return round(100 * self._t1(locs), 3)
else:
return round(100 * self._t1(locs) * self._t2(locs) * self._t3(len(locs)), 3)
@classmethod
def to_dict(cls):
"""
将所有可能的错配结果对应的得分先计算好,放到一个字典里
加速得分的计算
:return: 一个字典,字典的键是错配的位置由下划线分割的字符串,值是得分
"""
mm2score = {}
pos_list = range(20)
score = cls()
for mm_cnt in xrange(5):
for mm_pos_list in combinations(pos_list, mm_cnt):
mm2score['_'.join(str(_) for _ in mm_pos_list)] = score.get(mm_pos_list)
return mm2score
| 26.094595
| 88
| 0.518384
| 2,034
| 0.910882
| 0
| 0
| 1,208
| 0.540976
| 0
| 0
| 912
| 0.408419
|
f71362b0a4e90908b800515208bd4b73487ecd9e
| 1,823
|
py
|
Python
|
RiotGames/API/Match.py
|
Timohiho/RiotGames
|
f75256cca1b5c224393dca99296a6163b70b335f
|
[
"MIT"
] | 2
|
2021-05-05T12:33:51.000Z
|
2021-12-15T13:08:44.000Z
|
RiotGames/API/Match.py
|
Timohiho/RiotGames
|
f75256cca1b5c224393dca99296a6163b70b335f
|
[
"MIT"
] | null | null | null |
RiotGames/API/Match.py
|
Timohiho/RiotGames
|
f75256cca1b5c224393dca99296a6163b70b335f
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021.
# The copyright lies with Timo Hirsch-Hoffmann, the further use is only permitted with reference to source
import urllib.request
from RiotGames.API.RiotApi import RiotApi
class Match(RiotApi):
__timeline_by_match_id_url: str = "https://{}.api.riotgames.com/lol/match/v4/timelines/by-match/{}?api_key={}"
def __init__(self, apikey: str):
"""
:param apikey:
"""
super().__init__(apikey)
self.__super = super()
def by_id(self, match_id: int, region: str):
"""
Special Function still in development
https://developer.riotgames.com/apis#match-v4/GET_getMatchlist
TODO
:param match_id:
:param region:
:return:
"""
pass
def matchlist_by_account_id(self, account_id: str, begin_time: int = None, end_time: int = None,
begin_index: int = None, end_index: int = None, champions: list = None,
queue: list = None, season: list = None):
"""
Special Function still in development
https://developer.riotgames.com/apis#match-v4/GET_getMatchlist
TODO
format url
:param account_id:
encrypted account id
:param begin_time:
:param end_time:
:param begin_index:
:param end_index:
:param champions:
:param queue:
:param season:
:return:
"""
pass
def timeline_by_match_id(self, match_id: int, region: str) -> dict:
"""
:param match_id:
:param region:
:return:
"""
return eval(bytes(
urllib.request.urlopen(
self.__timeline_by_match_id_url.format(region, match_id, super()._get_key())).read()).decode())
| 28.936508
| 114
| 0.580362
| 1,622
| 0.889742
| 0
| 0
| 0
| 0
| 0
| 0
| 949
| 0.52057
|
f714e5ccca4b369e0fbd09fb0a4e6218788b9ed7
| 3,513
|
py
|
Python
|
google_or_tools/coloring_ip_sat.py
|
tias/hakank
|
87b7f180c9393afce440864eb9e5fb119bdec1a4
|
[
"MIT"
] | 279
|
2015-01-10T09:55:35.000Z
|
2022-03-28T02:34:03.000Z
|
google_or_tools/coloring_ip_sat.py
|
tias/hakank
|
87b7f180c9393afce440864eb9e5fb119bdec1a4
|
[
"MIT"
] | 10
|
2017-10-05T15:48:50.000Z
|
2021-09-20T12:06:52.000Z
|
google_or_tools/coloring_ip_sat.py
|
tias/hakank
|
87b7f180c9393afce440864eb9e5fb119bdec1a4
|
[
"MIT"
] | 83
|
2015-01-20T03:44:00.000Z
|
2022-03-13T23:53:06.000Z
|
# Copyright 2021 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple coloring problem (MIP approach) in OR-tools CP-SAT Solver.
Inspired by the GLPK:s model color.mod
'''
COLOR, Graph Coloring Problem
Written in GNU MathProg by Andrew Makhorin <mao@mai2.rcnet.ru>
Given an undirected loopless graph G = (V, E), where V is a set of
nodes, E <= V x V is a set of arcs, the Graph Coloring Problem is to
find a mapping (coloring) F: V -> C, where C = {1, 2, ... } is a set
of colors whose cardinality is as small as possible, such that
F(i) != F(j) for every arc (i,j) in E, that is adjacent nodes must
be assigned different colors.
'''
This is a port of my old OR-tools CP solver coloring_ip.py
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other OR-tols models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
# from cp_sat_utils import *
def main():
model = cp.CpModel()
# max number of colors
# [we know that 4 suffices for normal maps]
nc = 5
# number of nodes
n = 11
# set of nodes
V = list(range(n))
num_edges = 20
#
# Neighbours
#
# This data correspond to the instance myciel3.col from:
# http://mat.gsia.cmu.edu/COLOR/instances.html
#
# Note: 1-based (adjusted below)
E = [[1, 2], [1, 4], [1, 7], [1, 9], [2, 3], [2, 6], [2, 8], [3, 5], [3, 7],
[3, 10], [4, 5], [4, 6], [4, 10], [5, 8], [5, 9], [6, 11], [7, 11],
[8, 11], [9, 11], [10, 11]]
#
# declare variables
#
# x[i,c] = 1 means that node i is assigned color c
x = {}
for v in V:
for j in range(nc):
x[v, j] = model.NewIntVar(0, 1, 'v[%i,%i]' % (v, j))
# u[c] = 1 means that color c is used, i.e. assigned to some node
u = [model.NewIntVar(0, 1, 'u[%i]' % i) for i in range(nc)]
# number of colors used, to minimize
num_colors = model.NewIntVar(0,nc, "num_colors")
model.Add(num_colors == sum(u))
#
# constraints
#
# each node must be assigned exactly one color
for i in V:
model.Add(sum([x[i, c] for c in range(nc)]) == 1)
# adjacent nodes cannot be assigned the same color
# (and adjust to 0-based)
for i in range(num_edges):
for c in range(nc):
model.Add(x[E[i][0] - 1, c] + x[E[i][1] - 1, c] <= u[c])
# objective
model.Minimize(num_colors)
#
# solution
#
solver = cp.CpSolver()
status = solver.Solve(model)
if status == cp.OPTIMAL:
print()
print('number of colors:', solver.Value(num_colors))
print('colors used:', [solver.Value(u[i]) for i in range(nc)])
print()
for v in V:
print('v%i' % v, ' color ', end=' ')
for c in range(nc):
if solver.Value(x[v, c]) == 1:
print(c)
print()
print('NumConflicts:', solver.NumConflicts())
print('NumBranches:', solver.NumBranches())
print('WallTime:', solver.WallTime())
if __name__ == '__main__':
main()
| 27.232558
| 78
| 0.63507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,124
| 0.604611
|
f715d5acbe3a069259390dee428b7666dca26c08
| 9,706
|
py
|
Python
|
src/intermediate_representation/sem_utils.py
|
ckosten/ValueNet4SPARQL
|
de320a2f0e1a4c5a6c0e5cc79057dda9901046e8
|
[
"Apache-2.0"
] | null | null | null |
src/intermediate_representation/sem_utils.py
|
ckosten/ValueNet4SPARQL
|
de320a2f0e1a4c5a6c0e5cc79057dda9901046e8
|
[
"Apache-2.0"
] | null | null | null |
src/intermediate_representation/sem_utils.py
|
ckosten/ValueNet4SPARQL
|
de320a2f0e1a4c5a6c0e5cc79057dda9901046e8
|
[
"Apache-2.0"
] | 1
|
2021-09-23T13:02:45.000Z
|
2021-09-23T13:02:45.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# -*- coding: utf-8 -*-
"""
# @Time : 2019/5/27
# @Author : Jiaqi&Zecheng
# @File : sem_utils.py
# @Software: PyCharm
"""
import os
import json
import re as regex
import spacy
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
def partial_match(query, table_name):
query = [token.lemma_ for token in nlp(query)]
table_name = [nlp(token)[0].lemma_ for token in table_name]
if query in table_name:
return True
return False
def is_partial_match(query, table_names):
query = nlp(query)[0].lemma_
table_names = [[token.lemma_ for token in nlp(names)] for names in table_names]
same_count = 0
result = None
for names in table_names:
if query in names:
same_count += 1
result = names
return result if same_count == 1 else False
def multi_option(question, q_ind, names, N):
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question):
re = is_partial_match(question[i][0], names)
if re is not False:
return re
return False
def multi_equal(question, q_ind, names, N):
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question):
if question[i] == names:
return i
return False
def random_choice(question_arg, question_arg_type, names, ground_col_labels, q_ind, N, origin_name):
# first try if there are other table
for t_ind, t_val in enumerate(question_arg_type):
if t_val == ['table']:
return names[origin_name.index(question_arg[t_ind])]
for i in range(q_ind + 1, q_ind + N + 1):
if i < len(question_arg):
if len(ground_col_labels) == 0:
for n in names:
if partial_match(question_arg[i][0], n) is True:
return n
else:
for n_id, n in enumerate(names):
if n_id in ground_col_labels and partial_match(question_arg[i][0], n) is True:
return n
if len(ground_col_labels) > 0:
return names[ground_col_labels[0]]
else:
return names[0]
def alter_column0(datas):
"""
Attach column * table
:return: model_result_replace
"""
zero_count = 0
count = 0
result = []
for d in datas:
if 'C(0)' in d['model_result']:
pattern = regex.compile('C\(.*?\) T\(.*?\)')
result_pattern = list(set(pattern.findall(d['model_result'])))
ground_col_labels = []
for pa in result_pattern:
pa = pa.split(' ')
if pa[0] != 'C(0)':
index = int(pa[1][2:-1])
ground_col_labels.append(index)
ground_col_labels = list(set(ground_col_labels))
question_arg_type = d['question_arg_type']
question_arg = d['question_arg']
table_names = [[token.lemma_ for token in nlp(names)] for names in d['table_names']]
origin_table_names = [[wordnet_lemmatizer.lemmatize(x.lower()) for x in names.split(' ')] for names in
d['table_names']]
count += 1
easy_flag = False
for q_ind, q in enumerate(d['question_arg']):
q_str = " ".join(" ".join(x) for x in d['question_arg'])
if 'how many' in q_str or 'number of' in q_str or 'count of' in q_str:
easy_flag = True
if easy_flag:
# check for the last one is a table word
for q_ind, q in enumerate(d['question_arg']):
if (q_ind > 0 and q == ['many'] and d['question_arg'][q_ind - 1] == ['how']) or (
q_ind > 0 and q == ['of'] and d['question_arg'][q_ind - 1] == ['number']) or (
q_ind > 0 and q == ['of'] and d['question_arg'][q_ind - 1] == ['count']):
re = multi_equal(question_arg_type, q_ind, ['table'], 2)
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
else:
re = multi_option(question_arg, q_ind, d['table_names'], 2)
if re is not False:
table_result = re
result.append((d['query'], d['question'], table_result, d))
pass
else:
re = multi_equal(question_arg_type, q_ind, ['table'], len(question_arg_type))
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
pass
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names,
ground_col_labels=ground_col_labels, q_ind=q_ind, N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
zero_count += 1
break
else:
M_OP = False
for q_ind, q in enumerate(d['question_arg']):
if M_OP is False and q in [['than'], ['least'], ['most'], ['msot'], ['fewest']] or \
question_arg_type[q_ind] == ['M_OP']:
M_OP = True
re = multi_equal(question_arg_type, q_ind, ['table'], 3)
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
else:
re = multi_option(question_arg, q_ind, d['table_names'], 3)
if re is not False:
table_result = re
# print(table_result)
result.append((d['query'], d['question'], table_result, d))
pass
else:
# zero_count += 1
re = multi_equal(question_arg_type, q_ind, ['table'], len(question_arg_type))
if re is not False:
# This step work for the number of [table] example
table_result = table_names[origin_table_names.index(question_arg[re])]
result.append((d['query'], d['question'], table_result, d))
break
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names,
ground_col_labels=ground_col_labels, q_ind=q_ind, N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
pass
if M_OP is False:
table_result = random_choice(question_arg=question_arg,
question_arg_type=question_arg_type,
names=table_names, ground_col_labels=ground_col_labels, q_ind=q_ind,
N=2,
origin_name=origin_table_names)
result.append((d['query'], d['question'], table_result, d))
for re in result:
table_names = [[token.lemma_ for token in nlp(names)] for names in re[3]['table_names']]
origin_table_names = [[x for x in names.split(' ')] for names in re[3]['table_names']]
if re[2] in table_names:
re[3]['rule_count'] = table_names.index(re[2])
else:
re[3]['rule_count'] = origin_table_names.index(re[2])
for data in datas:
if 'rule_count' in data:
str_replace = 'C(0) T(' + str(data['rule_count']) + ')'
replace_result = regex.sub('C\(0\) T\(.\)', str_replace, data['model_result'])
data['model_result_replace'] = replace_result
else:
data['model_result_replace'] = data['model_result']
| 46.888889
| 117
| 0.474861
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,390
| 0.14321
|