content stringlengths 5 1.05M |
|---|
import os
from glob import glob
from typing import List, Tuple
import albumentations as A
import cv2
import numpy as np
import torch
import torch.utils.data as data
from albumentations.pytorch import ToTensorV2 as ToTensor
class Transform:
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
def __init__(self, image_size: int, mean=None, std=None) -> None:
mean = self.mean if mean is None else mean
std = self.std if std is None else std
self.transform = A.Compose(
[
A.Resize(image_size, image_size),
A.Normalize(mean=mean, std=std, max_pixel_value=255),
ToTensor(),
],
# image, anime, anime_gray, smooth_gray
additional_targets={
"anime": "image",
"anime_gray": "image",
"smooth_gray": "image",
},
)
def __call__(
self,
real: np.ndarray,
anime: np.ndarray,
anime_gray: np.ndarray,
smooth_gray: np.ndarray,
) -> Tuple[torch.Tensor, torch.Tensor]:
images = self.transform(
image=real,
anime=anime,
anime_gray=anime_gray,
smooth_gray=smooth_gray,
)
real = images["image"]
anime = images["anime"]
anime_gray = images["anime_gray"]
smooth_gray = images["smooth_gray"]
return real, anime, anime_gray, smooth_gray
class Dataset(data.Dataset):
def __init__(
self,
real_paths: List[str],
anime_paths: List[str],
smooth_paths: List[str],
transform: Transform,
) -> None:
super().__init__()
self.real_paths = real_paths
self.anime_paths = anime_paths
self.smooth_paths = smooth_paths
assert len(anime_paths) == len(smooth_paths)
self.real_count = len(real_paths)
self.anime_count = len(anime_paths)
self.data_size = max(self.real_count, self.anime_count)
self.transform = transform
def __len__(self):
return self.data_size
@classmethod
def color_loader(cls, path: str):
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
@classmethod
def convert_gray(cls, image: np.ndarray):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
gray = np.asarray([gray] * 3)
gray = np.transpose(gray, (1, 2, 0))
return gray
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
ridx = index % self.real_count
aidx = index % self.anime_count
real = self.color_loader(self.real_paths[ridx])
anime = self.color_loader(self.anime_paths[aidx])
anime_g = self.convert_gray(anime)
smooth = self.color_loader(self.smooth_paths[aidx])
smooth = self.convert_gray(smooth)
real, anime, anime_g, smooth = self.transform(
real, anime, anime_g, smooth
)
return real, anime, anime_g, smooth
def build_dataloader(
args,
) -> Tuple[data.DataLoader, data.DataLoader, data.DataLoader]:
real_paths = glob(os.path.join(args.real_image_root, "*.jpg"))
anime_paths = glob(os.path.join(args.style_image_root, "style", "*.jpg"))
smooth_paths = glob(os.path.join(args.style_image_root, "smooth", "*.jpg"))
image_size = args.image_size
batch_size = args.batch_size
transform = Transform(image_size)
dataset = Dataset(
real_paths,
anime_paths,
smooth_paths,
transform,
)
dl = data.DataLoader(dataset, batch_size, shuffle=False, drop_last=True)
return dl
|
"""
448. 找到所有数组中消失的数字
"""
def findDisappearedNumbers(nums):
n = len(nums)
for v in nums:
t = (v - 1) % n
nums[t] += n
res = [x+1 for x in range(n) if nums[x] <= n]
return res
print(findDisappearedNumbers([4,3,2,7,8,2,3,1])) # [5, 6]
"""
442. 数组中重复的数据
"""
def findDuplicates(nums):
n = len(nums)
for v in nums:
nums[(v - 1) % n] += n
return [x+1 for x in range(n) if nums[x] > 2*n]
def other(nums):
res = []
for v in nums:
t = int(abs(v))
if nums[t - 1] < 0:
res.append(t)
else:
nums[t - 1] *= -1
return res
print(findDuplicates([4,3,2,7,8,2,3,1])) # [2, 3]
print(other([4,3,2,7,8,2,3,1])) # [2, 3]
|
def alive(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("Yes", "utf-8"))
def bad_request(self, error):
self.send_response(400)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(error, "utf-8"))
def custom_response(self, error, id):
self.send_response(id)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(error, "utf-8")) |
import falcon
import os
from wsgiref import simple_server
from peach import WebHandler
def int_to_falcon_status(status):
http_code_name = "HTTP_{}".format(int(status))
return getattr(falcon, http_code_name)
class FalconHandler(WebHandler):
def __init__(self, api_factory=None):
from .api import FalconApiFactory
super().__init__(api_factory or FalconApiFactory())
def _load_config_from_pyfile(self, filename):
conf = object()
try:
with open(filename, mode='rb') as config_file:
exec(compile(config_file.read(), filename, 'exec'), conf.__dict__)
except IOError as e:
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
return self._config_from_object(conf)
def _config_from_object(self, obj):
conf = {}
for key in dir(obj):
if key.isupper():
conf[key] = getattr(obj, key)
return conf
def get_config(self, config):
if isinstance(config, str) and os.path.isfile(config):
config = self._load_config_from_pyfile(config)
elif isinstance(config, object) and not isinstance(config, dict):
config = self._config_from_object(config)
return config
def create_app(self, config):
app = falcon.API()
self._factory.build(app, config)
return app
def run(self, app, host, port):
httpd = simple_server.make_server(host, port, app)
httpd.serve_forever()
|
print('='*20)
pt = int(input('Primeiro termo: '))
r = int(input('Razão: '))
"""décimo = pt + (10 - 1) * r"""
for c in range(pt, pt + (r*10), r):
print('{}'.format(c), end=' -> ')
print('ACABOU')
|
inventario = {
'itens': {'hp_potion': {5}, 'flechas': {21}, 'tomo_feitico-fireball': {1}},
'armas': {'principal': {'Espada de Aço Forjado (Ótimo)'}, 'secundaria': {'Adaga de Aço Polido'}, 'ranged': {'Arco de Caça Reforçado'}},
'armadura': {'cabeca': {'Elmo de Aço (Ótimo)'}, 'peitoral': {'Peitoral de Ferro'}, 'calca': {'Calças de Lã'}, 'botas': {'Chinelo de Dedo'}},
'mochila': {'Lanterna (Bem desgastada)': {67}}
}
dicionario_teste = {'nome':'Erick','idade':'18','sexo':'M','gosta_de':'Computadores, Aviões'}
for k, v in dicionario_teste.items():
print(k, v)
for keys, values in inventario.items():
print(f'\n{keys}:{values}')
def celsius(fahrenheit):
c = (fahrenheit - 32) * 5/9
return print(f'O valor de {fahrenheit}ºF em ºC é: {c:.2f}')
celsius(77)
|
"""flint format command
:copyright: Copyright 2021 Marshall Ward, see AUTHORS for details.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
import flint
def format_statements(srcdirs, includes=None, excludes=None):
proj = flint.parse(*srcdirs, includes=includes, excludes=excludes)
for src in proj.sources:
for stmt in src.statements:
print(stmt.reformat())
|
import re
def StringChallenge(strParam):
encoder = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
decoder = {
**{str_i: idx for idx, str_i in enumerate(encoder)},
"plus": "+", "minus": "-"
}
operator = {
'+': lambda a, b: a + b,
'-': lambda a, b: a - b
}
strParam = re.findall("|".join(decoder.keys()), strParam)
decodedParam = [decoder[str_i] for str_i in strParam]
result, i = 0, 0
while isinstance(decodedParam[i], int):
result = 10 * result + decodedParam[i]
i += 1
op, operand = None, 0
for param in decodedParam[i:]:
if param in ['+', '-']:
if op:
result = op(result, operand)
operand = 0
op = operator[param]
else:
operand = 10 * operand + param
result = str(op(result, operand))
if not result.startswith('-'):
return "".join([encoder[int(i)] for i in result])
else:
return "negative" + "".join([encoder[int(i)] for i in result[1:]])
# keep this function call here
print(StringChallenge("oneminusoneone")) |
import pathlib
from panditas.models import DataFlow, DataSet, MergeMultipleRule
from panditas.transformation_rules import ConditionalFill, ConstantColumn, PivotTable
fixtures_path = "{0}/fixtures".format(pathlib.Path(__file__).parent)
def test_insurance_agency_experience():
data_flow = DataFlow(
name="Agency Experience",
steps=[
DataSet(
columns=["revisionId", "lossReserveBalance", "claimStatus"],
df_path="{0}/claims.csv".format(fixtures_path),
name="claims",
source="csv",
),
DataSet(
columns=["revisionId", "policyId", "policyInforcePremium"],
df_path="{0}/policy_state.csv".format(fixtures_path),
name="inforce",
source="csv",
),
DataSet(
columns=[
"revisionId",
"policyId",
"policyChangeTransactionType",
"policyChangeWrittenPremium",
],
df_path="{0}/policy_changes.csv".format(fixtures_path),
name="transactions",
source="csv",
),
DataSet(
columns=["policyId", "policyNumber"],
df_path="{0}/policies.csv".format(fixtures_path),
name="policies",
source="csv",
),
DataSet(
columns=["revisionId", "agencyName"],
df_path="{0}/agencies.csv".format(fixtures_path),
name="agencies",
source="csv",
),
DataSet(
columns=["revisionId", "lineOfBusinessName"],
df_path="{0}/lines.csv".format(fixtures_path),
name="lines",
source="csv",
),
MergeMultipleRule(
data_sets=[
"claims",
"inforce",
"transactions",
"policies",
"agencies",
"lines",
],
name="merge_facts_dims",
merge_types=["outer", "outer", "outer", "left", "left"],
),
# Claim Count
ConstantColumn(
column_name="claimCount", column_value=0, name="add_claim_count_column"
),
ConditionalFill(
fill_column="claimCount",
fill_value=1,
name="calculate_claim_count",
where_column="claimStatus",
where_condition="contains",
where_condition_values=["Open"],
),
# Policy New
ConstantColumn(
column_name="newCount", column_value=0, name="add_new_count_column"
),
ConditionalFill(
fill_column="newCount",
fill_value=1,
name="calculate_new_count",
where_column="policyChangeTransactionType",
where_condition="==",
where_condition_values=["New"],
),
ConstantColumn(
column_name="newPremium", column_value=0, name="add_new_premium"
),
ConditionalFill(
fill_column="newPremium",
fill_value=1,
name="calculate_new_premium",
where_column="policyChangeTransactionType",
where_condition="==",
where_condition_values=["New"],
),
# Policy cancel
ConstantColumn(
column_name="cancelCount",
column_value=0,
name="add_cancel_count_column",
),
ConditionalFill(
fill_column="cancelCount",
fill_value=1,
name="calculate_cancel_count",
where_column="policyChangeTransactionType",
where_condition="==",
where_condition_values=["Canceled"],
),
ConstantColumn(
column_name="cancelPremium", column_value=0, name="add_cancel_premium"
),
ConditionalFill(
fill_column="cancelPremium",
fill_value=1,
name="calculate_cancel_premium",
where_column="policyChangeTransactionType",
where_condition="==",
where_condition_values=["Canceled"],
),
PivotTable(
group_columns=["agencyName", "lineOfBusinessName"],
group_values=[
"claimCount",
"lossReserveBalance",
"newCount",
"newPremium",
"cancelCount",
"cancelPremium",
"policyInforcePremium",
],
group_functions=["sum", "last", "sum", "sum", "sum", "sum", "max"],
name="group_by_agency_line",
),
],
)
data_flow.run()
assert data_flow.output_data_set == "group_by_agency_line"
df = DataFlow.get_output_df(data_flow.output_data_set)
assert sorted(df.columns.tolist()) == [
"agencyName",
"cancelCount",
"cancelPremium",
"claimCount",
"lineOfBusinessName",
"lossReserveBalance",
"newCount",
"newPremium",
"policyInforcePremium",
]
|
from azure_utils.machine_learning import create_model
if __name__ == '__main__':
create_model.main()
|
"""Read a GO Annotation File (GAF) and store the data in a Python object.
Annotations available from the Gene Ontology Consortium:
http://current.geneontology.org/annotations/
"""
import sys
import os
import re
import collections as cx
import datetime
from goatools.anno.annoreader_base import AnnoReaderBase
from goatools.anno.init.utils import get_date_yyyymmdd
from goatools.anno.extensions.factory import get_extensions
__copyright__ = "Copyright (C) 2016-2019, DV Klopfenstein, H Tang. All rights reserved."
__author__ = "DV Klopfenstein"
# pylint: disable=too-few-public-methods
class InitAssc(object):
"""Read annotation file and store a list of namedtuples."""
def __init__(self, fin_gaf):
self.fin_gaf = fin_gaf
self.hdr = None
self.datobj = None
# pylint: disable=too-many-arguments
def init_associations(self, hdr_only, prt, namespaces, allow_missing_symbol):
"""Read GAF file. Store annotation data in a list of namedtuples."""
import timeit
tic = timeit.default_timer()
nts = self._read_gaf_nts(hdr_only, namespaces, allow_missing_symbol)
# GAF file has been read
if prt:
prt.write('HMS:{HMS} {N:7,} annotations READ: {ANNO} {NSs}\n'.format(
N=len(nts), ANNO=self.fin_gaf,
NSs=','.join(namespaces) if namespaces else '',
HMS=str(datetime.timedelta(seconds=(timeit.default_timer()-tic)))))
# If there are illegal GAF lines ...
if self.datobj:
if self.datobj.ignored or self.datobj.illegal_lines:
self.datobj.prt_error_summary(self.fin_gaf)
return nts
#### return self.evobj.sort_nts(nts, 'Evidence_Code')
# pylint: disable=too-many-locals
def _read_gaf_nts(self, hdr_only, namespaces, allow_missing_symbol):
"""Read GAF file. Store annotation data in a list of namedtuples."""
nts = []
ver = None
hdrobj = GafHdr()
datobj = None
# pylint: disable=not-callable
ntobj_make = None
get_gafvals = None
lnum = -1
line = ''
get_all_nss = namespaces is None or namespaces == {'BP', 'MF', 'CC'}
try:
with open(self.fin_gaf) as ifstrm:
for lnum, line in enumerate(ifstrm, 1):
# Read data
if get_gafvals:
# print(lnum, line)
flds = line.split('\t')
nspc = GafData.aspect2ns[flds[8]] # 8 GAF Aspect -> BP, MF, or CC
if get_all_nss or nspc in namespaces:
gafvals = get_gafvals(flds, nspc)
if gafvals:
nts.append(ntobj_make(gafvals))
else:
datobj.ignored.append((lnum, line))
# Read header
elif datobj is None:
if line[0] == '!':
if ver is None and line[1:13] == 'gaf-version:':
ver = line[13:].strip()
hdrobj.chkaddhdr(line)
else:
self.hdr = hdrobj.get_hdr()
if hdr_only:
return nts
datobj = GafData(ver, allow_missing_symbol)
get_gafvals = datobj.get_gafvals
ntobj_make = datobj.get_ntobj()._make
# pylint: disable=broad-except
except Exception as inst:
import traceback
traceback.print_exc()
sys.stderr.write("\n **FATAL-gaf: {MSG}\n\n".format(MSG=str(inst)))
sys.stderr.write("**FATAL-gaf: {FIN}[{LNUM}]:\n{L}".format(
FIN=self.fin_gaf, L=line, LNUM=lnum))
if datobj is not None:
datobj.prt_line_detail(sys.stdout, line)
sys.exit(1)
self.datobj = datobj
return nts
class GafData(object):
"""Extracts GAF fields from a GAF line."""
spec_req1 = [0, 1, 2, 4, 6, 8, 11, 13, 14]
req_str = ["REQ", "REQ", "REQ", "", "REQ", "REQ", "REQ", "", "REQ", "", "",
"REQ", "REQ", "REQ", "REQ", "", ""]
aspect2ns = {'P':'BP', 'F':'MF', 'C':'CC'}
gafhdr = [ # Col Req? Cardinality Example
# --- -------- -------------- -----------------
'DB', # 0 required 1 UniProtKB
'DB_ID', # 1 required 1 P12345
'DB_Symbol', # 2 required 1 PHO3
'Qualifier', # 3 optional 0 or greater NOT
'GO_ID', # 4 required 1 GO:0003993
'DB_Reference', # 5 required 1 or greater PMID:2676709
'Evidence_Code', # 6 required 1 IMP
'With_From', # 7 optional 0 or greater GO:0000346
'NS', # 8 required 1 P->BP F->MF C->CC
'DB_Name', # 9 optional 0 or 1 Toll-like receptor 4
'DB_Synonym', # 10 optional 0 or greater hToll|Tollbooth
'DB_Type', # 11 required 1 protein
'Taxon', # 12 required 1 or 2 taxon:9606
'Date', # 13 required 1 20090118
'Assigned_By', # 14 required 1 SGD
]
# Col Required Cardinality Example
gafhdr2 = [ # --- -------- ------------ -------------------
'Extension', # 15 optional 0 or greater part_of(CL:0000576)
'Gene_Product_Form_ID', # 16 optional 0 or 1 UniProtKB:P12345-2
]
gaf_columns = {
"2.1" : gafhdr + gafhdr2, # !gaf-version: 2.1
"2.0" : gafhdr + gafhdr2, # !gaf-version: 2.0
"1.0" : gafhdr} # !gaf-version: 1.0
# Expected numbers of columns for various versions
gaf_numcol = {
"2.1" : 17,
"2.0" : 17,
"1.0" : 15}
def __init__(self, ver, allow_missing_symbol=False):
self.ver = ver
self.is_long = ver[0] == '2'
self.flds = self.gaf_columns[ver]
# pylint: disable=line-too-long
self.req1 = self.spec_req1 if not allow_missing_symbol else [i for i in self.spec_req1 if i != 2]
# Store information about illegal lines seen in a GAF file from the field
self.ignored = [] # Illegal GAF lines that are ignored (e.g., missing an ID)
self.illegal_lines = cx.defaultdict(list) # GAF lines that are missing information (missing taxon)
def chk(self, annotations, fout_err):
"""Check annotations."""
for idx, ntd in enumerate(annotations):
self._chk_fld(ntd, "Qualifier") # optional 0 or greater
self._chk_fld(ntd, "DB_Reference", 1) # required 1 or greater
self._chk_fld(ntd, "With_From") # optional 0 or greater
self._chk_fld(ntd, "DB_Name", 0, 1) # optional 0 or 1
self._chk_fld(ntd, "DB_Synonym") # optional 0 or greater
self._chk_fld(ntd, "Taxon", 1, 2)
flds = list(ntd)
self._chk_qty_eq_1(flds)
# self._chk_qualifier(ntd.Qualifier, flds, idx)
if not ntd.Taxon or len(ntd.Taxon) not in {1, 2}:
self.illegal_lines['BAD TAXON'].append(
(idx, '**{I}) TAXON: {NT}'.format(I=idx, NT=ntd)))
if self.illegal_lines:
self.prt_error_summary(fout_err)
return not self.illegal_lines
def get_ntobj(self):
"""Get namedtuple object specific to version"""
return cx.namedtuple("ntgafobj", " ".join(self.flds))
def get_gafvals(self, flds, nspc):
"""Convert fields from string to preferred format for GAF ver 2.1 and 2.0."""
flds[3] = self._get_qualifier(flds[3]) # 3 Qualifier
flds[5] = self._get_set(flds[5]) # 5 DB_Reference
flds[7] = self._get_set(flds[7]) # 7 With_From
flds[8] = nspc # 8 GAF Aspect field converted to BP, MF, or CC
flds[9] = self._get_set(flds[9]) # 9 DB_Name
flds[10] = self._get_set(flds[10]) # 10 DB_Synonym
flds[12] = self._do_taxons(flds[12]) # 12 Taxon
flds[13] = get_date_yyyymmdd(flds[13]) # 13 Date 20190406
# Version 2.x has these additional fields not found in v1.0
if self.is_long:
flds[15] = get_extensions(flds[15]) # Extensions (or Annotation_Extension)
flds[16] = self._get_set(flds[16].rstrip())
else:
flds[14] = self._get_set(flds[14].rstrip())
return flds
@staticmethod
def _get_qualifier(val):
"""Get qualifiers. Correct for inconsistent capitalization in GAF files"""
quals = set()
if val == '':
return quals
for val in val.split('|'):
val = val.lower()
quals.add(val if val != 'not' else 'NOT')
return quals
@staticmethod
def _get_set(val):
"""Further split a GAF value within a single field."""
return set(val.split('|')) if val else set()
@staticmethod
def _get_list(val):
"""Further split a GAF value within a single field."""
return val.split('|') if val else []
def _chk_fld(self, ntd, name, qty_min=0, qty_max=None):
"""Further split a GAF value within a single field."""
vals = getattr(ntd, name)
num_vals = len(vals)
if num_vals < qty_min:
self.illegal_lines['MIN QTY'].append(
(-1, "FIELD({F}): MIN QUANTITY({Q}) WASN'T MET: {V}".format(
F=name, Q=qty_min, V=vals)))
if qty_max is not None:
if num_vals > qty_max:
self.illegal_lines['MAX QTY'].append(
(-1, "FIELD({F}): MAX QUANTITY({Q}) EXCEEDED: {V}\n{NT}".format(
F=name, Q=qty_max, V=vals, NT=ntd)))
def _chk_qualifier(self, qualifiers, flds, lnum):
"""Check that qualifiers are expected values."""
# http://geneontology.org/page/go-annotation-conventions#qual
for qual in qualifiers:
if qual not in AnnoReaderBase.exp_qualifiers:
errname = 'UNEXPECTED QUALIFIER({QUAL})'.format(QUAL=qual)
self.illegal_lines[errname].append((lnum, "\t".join(flds)))
def prt_line_detail(self, prt, line):
"""Print line header and values in a readable format."""
values = line.split('\t')
self._prt_line_detail(prt, values)
def _prt_line_detail(self, prt, values, lnum=""):
"""Print header and field values in a readable format."""
data = zip(self.req_str, self.flds, values)
pat = "{:2}) {:3} {:20} {}"
txt = [pat.format(i, req, hdr, val) for i, (req, hdr, val) in enumerate(data)]
prt.write("{LNUM}\n{TXT}\n".format(LNUM=lnum, TXT="\n".join(txt)))
def _chk_qty_eq_1(self, flds):
"""Check that these fields have only one value: required 1."""
for col in self.req1:
if not flds[col]:
self.illegal_lines['QTY 1'].append(
(-1, "**ERROR: UNEXPECTED REQUIRED VAL({V}) FOR COL({R}):{H}: ".format(
V=flds[col], H=self.gafhdr[col], R=col)))
self.illegal_lines['QTY 1'].append((-1, "{H0}({DB}) {H1}({ID})\n".format(
H0=self.gafhdr[0], DB=flds[0], H1=self.gafhdr[1], ID=flds[1])))
def _do_taxons(self, taxon_str):
"""Taxon"""
taxons = self._get_list(taxon_str)
taxons_str = [v.split(':')[1] for v in taxons] # strip "taxon:"
taxons_int = [int(s) for s in taxons_str if s]
return taxons_int
def prt_error_summary(self, fout_err):
"""Print a summary about the GAF file that was read."""
# Get summary of error types and their counts
errcnts = []
if self.ignored:
errcnts.append(" {N:9,} IGNORED associations\n".format(N=len(self.ignored)))
if self.illegal_lines:
for err_name, errors in self.illegal_lines.items():
errcnts.append(" {N:9,} {ERROR}\n".format(N=len(errors), ERROR=err_name))
# Save error details into a log file
fout_log = self._wrlog_details_illegal_gaf(fout_err, errcnts)
sys.stdout.write(" WROTE GAF ERROR LOG: {LOG}:\n".format(LOG=fout_log))
for err_cnt in errcnts:
sys.stdout.write(err_cnt)
def _wrlog_details_illegal_gaf(self, fout_err, err_cnts):
"""Print details regarding illegal GAF lines seen to a log file."""
# fout_err = "{}.log".format(fin_gaf)
gaf_base = os.path.basename(fout_err)
with open(fout_err, 'w') as prt:
prt.write("ILLEGAL GAF ERROR SUMMARY:\n\n")
for err_cnt in err_cnts:
prt.write(err_cnt)
prt.write("\n\nILLEGAL GAF ERROR DETAILS:\n\n")
for lnum, line in self.ignored:
prt.write("**WARNING: GAF LINE IGNORED: {FIN}[{LNUM}]:\n{L}\n".format(
FIN=gaf_base, L=line, LNUM=lnum))
self.prt_line_detail(prt, line)
prt.write("\n\n")
for error, lines in self.illegal_lines.items():
for lnum, line in lines:
prt.write("**WARNING: GAF LINE ILLEGAL({ERR}): {FIN}[{LNUM}]:\n{L}\n".format(
ERR=error, FIN=gaf_base, L=line, LNUM=lnum))
self.prt_line_detail(prt, line)
prt.write("\n\n")
return fout_err
class GafHdr(object):
"""Used to build a GAF header."""
cmpline = re.compile(r'^!(\w[\w\s-]+:.*)$')
def __init__(self):
self.gafhdr = []
def get_hdr(self):
"""Return GAF header data as a string paragragh."""
return "\n".join(self.gafhdr)
def chkaddhdr(self, line):
"""If this line contains desired header info, save it."""
mtch = self.cmpline.search(line)
if mtch:
self.gafhdr.append(mtch.group(1))
# Copyright (C) 2016-2019, DV Klopfenstein, H Tang. All rights reserved."
|
# WARNING: This is an automatically generated file and will be overwritten
# by CellBlender on the next model export.
import os
import shared
import mcell as m
from parameters import *
# ---- subsystem ----
MODEL_PATH = os.path.dirname(os.path.abspath(__file__))
vm = m.Species(
name = 'vm',
diffusion_constant_3d = 5.00000000000000024e-05
)
# ---- create subsystem object and add components ----
subsystem = m.Subsystem()
subsystem.add_species(vm)
|
##
## bund message interface
##
import time
import uuid
import msgpack
import socket
import rsa
from collections import UserDict
class Message(UserDict):
def __init__(self, **data):
UserDict.__init__(self)
if '_data' in data:
self.load(data['_data'])
if '_keys' in data:
self.keys = data['_keys']
else:
self.keys = None
self.data = {}
self.data['stamp'] = time.time()
self.data['seal'] = False
self.data['version'] = 1.0
self.data['from'] = None
self.data['to'] = ['*']
self.data['id'] = '%s@%s'%(socket.gethostname(), str(uuid.uuid4()))
self.data['type'] = 'generic/message'
self.data['msg'] = None
for key in data:
if key[0] == '_':
continue
self.data[key] = data[key]
def dumps(self):
return msgpack.packb(self.data, use_bin_type=True)
def load(self, data):
self.data = msgpack.unpackb(data)
def seal(self, key=None):
if not key:
key = self.keys.keys.key
data = self.dumps()
sign = rsa.sign(data, key, 'SHA-512')
return msgpack.packb({'seal': True,
'msg': data,
'sign': sign,
'name': self.keys.name})
def unseal(self, data):
if not self.keys:
return False
_data = msgpack.unpackb(data)
if not _data[b'seal']:
return False
try:
rsa.verify(_data[b'msg'], _data[b'sign'], self.keys[_data[b'name'].decode('utf-8')])
self.load(_data[b'msg'])
return True
except:
return False
if __name__ == '__main__':
import keyring
kr = keyring.KeyRing()
m = Message(msg="Hello world!", _keys=kr)
d = m.dumps()
print("A sample message", repr(d))
m2 = Message(_data=d, _keys=kr)
print("A message ID",m['id'])
e = m2.seal()
print("Sealed message",repr(e))
print("Seal verification", m2.unseal(e))
|
from utils import backend_is_non_db, check_can_migrate, get_models_for_db, get_model_by_name
class BackendRouter(object):
"""
A router to send all non-db models to the correct backend
"""
def db_for_read(self, model, **hints):
return getattr(model, 'any_backend', None)
def db_for_write(self, model, **hints):
return getattr(model, 'any_backend', None)
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_migrate(self, db, app_label, model=None, **hints):
model_name = hints.get('model_name', None)
if app_label and model_name:
model = get_model_by_name(app_label, model_name)
if model:
non_db = self.db_for_read(model)
if non_db == db:
return check_can_migrate(db)
if not non_db and not backend_is_non_db(db):
return True
return False
else:
return True
|
# -*- coding: utf-8 -*-
# Learn more: https://github.com/kennethreitz/setup.py
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='bloom',
version='0.1.0',
description='Design Experiments for Bloom Filters',
long_description=readme,
author='Ian Philpot',
author_email='ianphil@microsoft.com',
url='https://github.com/iphilpot/DoE_Bloom_Filter',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
)
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
regex_nodes = r"(?P<itemname>\S+)\n|((?P<name>\S+) = (?P<value>(.+\n?)))"
regex_status = r"(?P<name>[^,]+)=(?P<value>([^,]*))"
regex_gpu = (r"gpu\[(?P<gpu_num>\d+)\]=(?P<gpu_status>[^,]+)|"
r"(?P<name>[^,]+)=(?P<value>([^,]*))")
regex_gpu_status = r"(?P<name>[^;]+)=(?P<value>([^;]*))"
regex_queue_state = r"(?P<state>\w+):(?P<value>\d+)"
regex_node_properties = r"(?P<name>[^,]+)"
regex_memory = r"(?P<number>\d+)\s*(?P<measure>kb|mb|)$"
regex_job_info = r"Job Id: (?P<itemname>\S+)\n|((?P<name>\S+)(?=\s=\s)(\s=\s)(?P<value>(.+\n?)))"
regex_var_list = r"(?P<item_name>[^,]+)=(?P<item_value>[^,]+),?"
def parse_node_output(stdout):
node = {}
result = []
matches = re.finditer(regex_nodes, stdout, re.MULTILINE)
for matchNum, match in enumerate(matches, start=1):
if match.group("itemname") is not None:
node = {}
node["node_name"] = match.group("itemname")
result.append(node)
elif match.group("name") == "gpu_status":
node[match.group("name")] = parse_gpu(
match.group("value").strip())
elif match.group("name") == "status":
node[match.group("name")] = parse_node_status(
match.group("value").strip())
elif match.group("name") == "properties":
node[match.group("name")] = parse_node_properties(
match.group("value").strip())
else:
node[match.group("name")] = match.group("value").strip()
return result
def parse_queue_output(stdout):
node = {}
result = []
matches = re.finditer(regex_nodes, stdout, re.MULTILINE)
for matchNum, match in enumerate(matches, start=1):
if match.group("itemname") is not None:
node = {}
node["queue_name"] = match.group("itemname")
result.append(node)
elif match.group("name") == "state_count":
node[match.group("name")] = parse_queue_state(
match.group("value").rstrip()
)
else:
node[match.group("name")] = match.group("value").strip()
return result
def parse_queue_state(state_string):
state = {}
matches = re.finditer(regex_queue_state, state_string.strip())
for matchNum, match in enumerate(matches, start=1):
state[match.group("state")] = int(match.group("value").strip())
return state
def parse_node_status(status_string):
status = {}
matches = re.finditer(regex_status, status_string.strip())
for matchNum, match in enumerate(matches, start=1):
status[match.group("name")] = parse_num_values(
match.group("value").strip()
)
return status
def parse_node_properties(properties_string):
properties = []
matches = re.finditer(regex_node_properties, properties_string.strip())
for matchNum, match in enumerate(matches, start=1):
properties.append(match.group("name").strip())
return properties
def parse_gpu(gpu_status_string):
gpus = {}
gpus["gpu_list"] = []
matches = re.finditer(regex_gpu, gpu_status_string.strip())
for matchNum, match in enumerate(matches, start=1):
if match.group("gpu_num") is not None:
gpu = {}
gpu_matches = re.finditer(
regex_gpu_status, match.group("gpu_status")
)
for gpuMatchNum, gpu_match in enumerate(gpu_matches, start=1):
gpu[gpu_match.group("name")] = parse_num_values(
gpu_match.group("value").strip()
)
gpus["gpu_list"].append(gpu)
elif match.group("name") is not None:
gpus[match.group("name")] = match.group("value").strip()
return gpus
def parse_num_values(memory_string):
match = re.match(regex_memory, memory_string.strip(), re.IGNORECASE)
if match is not None:
if(
match.group("measure") is None
or match.group("measure").lower() == "mb"
):
return int(match.group("number"))
elif match.group("measure").lower() == "kb":
return int(round(int(match.group("number")) / 1024))
return memory_string
def parse_job_output(stdout):
job = {}
result = []
normalized = re.sub("\n {8,}", "", stdout)
matches = re.finditer(regex_job_info, normalized, re.MULTILINE)
for matchNum, match in enumerate(matches, start=1):
if match.group("itemname") is not None:
job = {}
job["job_id"] = match.group("itemname")
result.append(job)
else:
var_matches = re.finditer(regex_var_list, match.group("value"), re.IGNORECASE)
job[match.group("name").lower()] = match.group("value").strip()
return result
def parse_var_list(list_string):
variables = {}
matches = re.finditer(regex_var_list, list_string.strip())
for matchNum, match in enumerate(matches, start=1):
variables[match.group("item_name").strip()] = match.group("item_value").strip()
return variables
|
from __future__ import absolute_import
from django.contrib.auth import get_user_model
from django.test import TestCase
from ..forms import PostForm
from ..models import Blog, Post, Section
from .tests import randomword
class TestForms(TestCase):
def setUp(self):
super().setUp()
self.user = get_user_model().objects.create_user(
username="patrick",
password="password"
)
self.user.save()
self.blog = Blog.objects.first()
self.section = Section.objects.create(name="hello", slug="hello")
self.content = "You won't believe what happened next!"
self.teaser = "Only his dog knows the truth"
self.title_len = Post._meta.get_field("title").max_length
def test_max_slug(self):
"""
Ensure Post can be created with slug same length as title.
"""
title = randomword(self.title_len)
form_data = {
"section": self.section,
"title": title,
"content": self.content,
"teaser": self.teaser,
"state": 1
}
form = PostForm(data=form_data)
# slug field is not validated in form
self.assertTrue(form.is_valid())
# slug field is set (from title) in model .save() method
self.assertTrue(form.save(blog=self.blog, author=self.user))
|
import decouple
from abc import ABC
class Settings(ABC):
def __init__(self):
self.is_local = None
self.root_path = None
self._setup()
def _setup(self):
self.is_local = Settings._load_variable(variable='IS_LOCAL', default=False, cast=bool)
self.root_path = '.' if self.is_local else '/tmp'
@staticmethod
def _load_variable(variable: str, default=None, cast: type = str):
try:
return decouple.config(variable, default=default, cast=cast)
except decouple.UndefinedValueError:
return default
|
from qtpy import QtWidgets, QtCore
from pyqtgraph.widgets.SpinBox import SpinBox
from pyqtgraph.parametertree.parameterTypes.basetypes import WidgetParameterItem
from pymodaq.daq_utils.daq_utils import scroll_log, scroll_linear
import numpy as np
class SliderSpinBox(QtWidgets.QWidget):
def __init__(self, *args, subtype='lin', **kwargs):
super().__init__()
self.subtype = subtype
self.initUI(*args, **kwargs)
self.valueChanged = self.spinbox.valueChanged # (value) for compatibility with QSpinBox
self.sigValueChanged = self.spinbox.sigValueChanged # (self)
self.sigValueChanging = self.spinbox.sigValueChanging # (self, value) sent immediately; no delay.
self.sigChanged = self.spinbox.sigValueChanged
@property
def opts(self):
return self.spinbox.opts
@opts.setter
def opts(self, **opts):
self.setOpts(**opts)
def setOpts(self, **opts):
self.spinbox.setOpts(**opts)
if 'visible' in opts:
self.slider.setVisible(opts['visible'])
def insert_widget(self ,widget, row=0):
self.vlayout.insertWidget(row, widget)
def initUI(self, *args, **kwargs):
"""
Init the User Interface.
"""
self.vlayout = QtWidgets.QVBoxLayout()
self.slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.slider.setMinimumWidth(50)
self.slider.setMinimum(0)
self.slider.setMaximum(100)
if 'value' in kwargs:
value = kwargs.pop('value')
else:
if 'bounds' in kwargs:
value = kwargs['bounds'][0]
else:
value = 1
self.spinbox = SpinBox(parent=None, value=value, **kwargs)
self.vlayout.addWidget(self.slider)
self.vlayout.addWidget(self.spinbox)
self.vlayout.setSpacing(0)
self.vlayout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.vlayout)
self.slider.valueChanged.connect(self.update_spinbox)
self.spinbox.valueChanged.connect(self.update_slide)
def update_spinbox(self, val):
"""
val is a percentage [0-100] used in order to set the spinbox value between its min and max
"""
min_val = float(self.opts['bounds'][0])
max_val = float(self.opts['bounds'][1])
if self.subtype == 'log':
val_out = scroll_log(val, min_val, max_val)
else:
val_out = scroll_linear(val, min_val, max_val)
try:
self.slider.valueChanged.disconnect(self.update_spinbox)
self.spinbox.valueChanged.disconnect(self.update_slide)
except Exception:
pass
self.spinbox.setValue(val_out)
self.slider.valueChanged.connect(self.update_spinbox)
self.spinbox.valueChanged.connect(self.update_slide)
def update_slide(self, val):
"""
val is the spinbox value between its min and max
"""
min_val = float(self.opts['bounds'][0])
max_val = float(self.opts['bounds'][1])
try:
self.slider.valueChanged.disconnect(self.update_spinbox)
self.spinbox.valueChanged.disconnect(self.update_slide)
except Exception:
pass
if self.subtype == 'linear':
value = int((val - min_val) / (max_val - min_val) * 100)
else:
value = int((np.log10(val) - np.log10(min_val)) / (np.log10(max_val) - np.log10(min_val)) * 100)
self.slider.setValue(value)
self.slider.valueChanged.connect(self.update_spinbox)
self.spinbox.valueChanged.connect(self.update_slide)
def setValue(self, val):
self.spinbox.setValue(val)
def value(self):
return self.spinbox.value()
class SliderParameterItem(WidgetParameterItem):
"""Registered parameter type which displays a QLineEdit"""
def makeWidget(self):
opts = self.param.opts
defs = {
'value': 0, 'min': None, 'max': None,
'step': 1.0, 'dec': False,
'siPrefix': False, 'suffix': '', 'decimals': 12,
}
if 'subtype' not in opts:
opts['subtype'] = 'linear'
defs['bounds'] = [0., self.param.value()] # max value set to default value when no max given
if 'limits' not in opts:
if 'min' in opts:
defs['bounds'][0] = opts['min']
if 'max' in opts:
defs['bounds'][1] = opts['max']
else:
defs['bounds'] = opts['limits']
w = SliderSpinBox(subtype=opts['subtype'], bounds=defs['bounds'], value=defs['value'])
self.setSizeHint(1, QtCore.QSize(50, 50))
return w |
# -*- coding: utf-8 -*-
"""
Created on Sun May 19 22:23:19 2013
@author: Belli
"""
from __future__ import division
from math import exp
n=[0,
0.125335479355233e-1, #[1]
0.78957634722828e1, #[2]
-0.87803203303561e1, #[3]
0.31802509345418, #[4]
-0.26145533859358, #[5]
-0.78199751687981e-2, #[6]
0.88089493102134e-2, #[7]
-0.66856572307965, #[8]
0.20433810950965, #[9]
-0.66212605039687e-4, #[10]
-0.19232721156002, #[11]
-0.25709043003438, #[12]
0.16074868486251, #[13]
-0.40092828925807e-1, #[14]
0.39343422603254e-6, #[15]
-0.75941377088144e-5, #[16]
0.56250979351888e-3, #[17]
-0.15608652257135e-4, #[18]
0.11537996422951e-8, #[19]
0.36582165144204e-6, #[20]
-0.13251180074668e-11, #[21]
-0.62639586912454e-9, #[22]
-0.10793600908932, #[23]
0.17611491008752e-1, #[24]
0.22132295167546, #[25]
-0.40247669763528, #[26]
0.58083399985759, #[27]
0.49969146990806e-2, #[28]
-0.31358700712549e-1, #[29]
-0.74315929710341, #[30]
0.47807329915480, #[31]
0.20527940895948e-1, #[32]
-0.13636435110343, #[33]
0.14180634400617e-1, #[34]
0.83326504880713e-2, #[35]
-0.29052336009585e-1, #[36]
0.38615085574206e-1, #[37]
-0.20393486513704e-1, #[38]
-0.16554050063734e-2, #[39]
0.19955571979541e-2, #[40]
0.15870308324157e-3, #[41]
-0.16388568342530e-4, #[42]
0.43613615723811e-1, #[43]
0.34994005463765e-1, #[44]
-0.76788197844621e-1, #[45]
0.22446277332006e-1, #[46]
-0.62689710414685e-4, #[47]
-0.55711118565645e-9, #[48]
-0.19905718354408, #[49]
0.31777497330738, #[50]
-0.11841182425981, #[51]
-0.31306260323435e2, #[52]
0.31546140237781e2, #[53]
-0.25213154341695e4, #[54]
-0.14874640856724, #[55]
0.31806110878444, #[56]
]
d=[0,
1, #[1]
1, #[2]
1, #[3]
2, #[4]
2, #[5]
3, #[6]
4, #[7]
1, #[8]
1, #[9]
1, #[10]
2, #[11]
2, #[12]
3, #[13]
4, #[14]
4, #[15]
5, #[16]
7, #[17]
9, #[18]
10, #[19]
11, #[20]
13, #[21]
15, #[22]
1, #[23]
2, #[24]
2, #[25]
2, #[26]
3, #[27]
4, #[28]
4, #[29]
4, #[30]
5, #[31]
6, #[32]
6, #[33]
7, #[34]
9, #[35]
9, #[36]
9, #[37]
9, #[38]
9, #[39]
10, #[40]
10, #[41]
12, #[42]
3, #[43]
4, #[44]
4, #[45]
5, #[46]
14, #[47]
3, #[48]
6, #[49]
6, #[50]
6, #[51]
3, #[52]
3, #[53]
3, #[54]
]
t= [0.00,
-0.5, #[1]
0.875, #[2]
1, #[3]
0.5, #[4]
0.75, #[5]
0.375, #[6]
1, #[7]
4, #[8]
6, #[9]
12, #[10]
1, #[11]
5, #[12]
4, #[13]
2, #[14]
13, #[15]
9, #[16]
3, #[17]
4, #[18]
11, #[19]
4, #[20]
13, #[21]
1, #[22]
7, #[23]
1, #[24]
9, #[25]
10, #[26]
10, #[27]
3, #[28]
7, #[29]
10, #[30]
10, #[31]
6, #[32]
10, #[33]
10, #[34]
1, #[35]
2, #[36]
3, #[37]
4, #[38]
8, #[39]
6, #[40]
9, #[41]
8, #[42]
16, #[43]
22, #[44]
23, #[45]
23, #[46]
10, #[47]
50, #[48]
44, #[49]
46, #[50]
50, #[51]
0, #[52]
1, #[53]
4, #[54]
]
c=[0,
0, #[1]
0, #[2]
0, #[3]
0, #[4]
0, #[5]
0, #[6]
0, #[7]
1, #[8]
1, #[9]
1, #[10]
1, #[11]
1, #[12]
1, #[13]
1, #[14]
1, #[15]
1, #[16]
1, #[17]
1, #[18]
1, #[19]
1, #[20]
1, #[21]
1, #[22]
2, #[23]
2, #[24]
2, #[25]
2, #[26]
2, #[27]
2, #[28]
2, #[29]
2, #[30]
2, #[31]
2, #[32]
2, #[33]
2, #[34]
2, #[35]
2, #[36]
2, #[37]
2, #[38]
2, #[39]
2, #[40]
2, #[41]
2, #[42]
3, #[43]
3, #[44]
3, #[45]
3, #[46]
4, #[47]
6, #[48]
6, #[49]
6, #[50]
6, #[51]
0, #[52]
0, #[53]
0, #[54]
]
alpha=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 20, 20, 20]
beta=[
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,150, 150,
250,
0.3,
0.3,
]
GAMMA=[
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1.21, #[52]
1.21, #[53]
1.25, #[54]
]
epsilon=[
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,
1,
1,
]
a=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3.5,
3.5,]
b=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0.85,
0.95,
]
A=[
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0.32, #[55]
0.32, #[56]
]
B=[
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0.2, #[55]
0.2, #[56]
]
C=[
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
28., #[55]
32., #[56]
]
D=[
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,700.0,800.0]
tau = 1.01
delta = 0.95
dtau = 1e-10
ddelta = 1e-10
for i in [55]:
def f(tau, delta):
theta = (1.0-tau)+A[i]*pow(pow(delta-1.0,2),1.0/(2.0*beta[i]))
DELTA=pow(theta,2)+B[i]*pow(pow(delta-1.0,2),a[i]);
PSI=exp(-C[i]*pow(delta-1.0,2)-D[i]*pow(tau-1.0,2));
dPSI_dDelta=-2.0*C[i]*(delta-1.0)*PSI;
dDELTA_dDelta=(delta-1.0)*(A[i]*theta*2.0/beta[i]*pow(pow(delta-1.0,2),1.0/(2.0*beta[i])-1.0)+2.0*B[i]*a[i]*pow(pow(delta-1.0,2),a[i]-1.0));
dDELTAbi_dDelta=b[i]*pow(DELTA,b[i]-1.0)*dDELTA_dDelta;
dPSI2_dDelta2=(2.0*C[i]*pow(delta-1.0,2)-1.0)*2.0*C[i]*PSI;
dDELTA2_dDelta2=1.0/(delta-1.0)*dDELTA_dDelta+pow(delta-1.0,2)*(4.0*B[i]*a[i]*(a[i]-1.0)*pow(pow(delta-1.0,2),a[i]-2.0)+2.0*pow(A[i]/beta[i],2)*pow(pow(pow(delta-1.0,2),1.0/(2.0*beta[i])-1.0),2)+A[i]*theta*4.0/beta[i]*(1.0/(2.0*beta[i])-1.0)*pow(pow(delta-1.0,2),1.0/(2.0*beta[i])-2.0));
dDELTAbi2_dDelta2=b[i]*(pow(DELTA,b[i]-1.0)*dDELTA2_dDelta2+(b[i]-1.0)*pow(DELTA,b[i]-2.0)*pow(dDELTA_dDelta,2));
dPSI_dTau=-2.0*D[i]*(tau-1.0)*PSI;
dDELTAbi_dTau=-2.0*theta*b[i]*pow(DELTA,b[i]-1.0);
dPSI2_dDelta_dTau=4.0*C[i]*D[i]*(delta-1.0)*(tau-1.0)*PSI;
dDELTAbi2_dDelta_dTau=-A[i]*b[i]*2.0/beta[i]*pow(DELTA,b[i]-1.0)*(delta-1.0)*pow(pow(delta-1.0,2),1.0/(2.0*beta[i])-1.0)-2.0*theta*b[i]*(b[i]-1.0)*pow(DELTA,b[i]-2.0)*dDELTA_dDelta;
dPSI3_dDelta2_dTau = (2.0*C[i]*pow(delta-1.0,2)-1.0)*2.0*C[i]*dPSI_dTau;
dDELTA_dTau = -2*theta;
dDELTA2_dDelta_dTau = 2.0*A[i]/(beta[i])*pow(pow(delta-1,2),1.0/(2.0*beta[i])-0.5);
dDELTA3_dDelta2_dTau = 2.0*A[i]*(beta[i]-1)/(beta[i]*beta[i])*pow(pow(delta-1,2),1/(2*beta[i])-1.0);
dDELTAbim1_dTau = (b[i]-1)*pow(DELTA,b[i]-2)*dDELTA_dTau;
dDELTAbim2_dTau = (b[i]-2)*pow(DELTA,b[i]-3)*dDELTA_dTau;
Line1 = dDELTAbim1_dTau*dDELTA2_dDelta2 + pow(DELTA,b[i]-1)*dDELTA3_dDelta2_dTau;
Line2 = (b[i]-1)*(dDELTAbim2_dTau*pow(dDELTA_dDelta,2)+pow(DELTA,b[i]-2)*2*dDELTA_dDelta*dDELTA2_dDelta_dTau);
dDELTAbi3_dDelta2_dTau = b[i]*(Line1+Line2);
ddelta2 = n[i]*(pow(DELTA,b[i])*(2.0*dPSI_dDelta+delta*dPSI2_dDelta2)+2.0*dDELTAbi_dDelta*(PSI+delta*dPSI_dDelta)+dDELTAbi2_dDelta2*delta*PSI);
dPSI3_dDelta3=2.0*C[i]*PSI*(-4*C[i]*C[i]*pow(delta-1.0,3)+6*C[i]*(delta-1));
dtheta_dDelta = A[i]/(2*beta[i])*pow(pow(delta-1,2),1/(2*beta[i])-1)*2*(delta-1);
PI = 4*B[i]*a[i]*(a[i]-1)*pow(pow(delta-1,2),a[i]-2)+2*pow(A[i]/beta[i],2)*pow(pow(delta-1,2),1/beta[i]-2)+4*A[i]*theta/beta[i]*(1/(2*beta[i])-1)*pow(pow(delta-1,2),1/(2*beta[i])-2);
dPI_dDelta = -8*B[i]*a[i]*(a[i]-1)*(a[i]-2)*pow(pow(delta-1,2),a[i]-5.0/2.0)-8*pow(A[i]/beta[i],2)*(1/(2*beta[i])-1)*pow(pow(delta-1,2),1/beta[i]-5.0/2.0)-(8*A[i]*theta)/beta[i]*(1/(2*beta[i])-1)*(1/(2*beta[i])-2)*pow(pow(delta-1,2),1/(2*beta[i])-5.0/2.0)+4*A[i]/beta[i]*(1/(2*beta[i])-1)*pow(pow(delta-1,2),1/(2*beta[i])-2)*dtheta_dDelta;
dDELTA3_dDelta3 = 1/(delta-1)*dDELTA2_dDelta2-1/pow(delta-1,2)*dDELTA_dDelta+pow(delta-1,2)*dPI_dDelta+2*(delta-1)*PI;
dDELTAbi3_dDelta3 = b[i]*(pow(DELTA,b[i]-1)*dDELTA3_dDelta3+dDELTA2_dDelta2*(b[i]-1)*pow(DELTA,b[i]-2)*dDELTA_dDelta+(b[i]-1)*(pow(DELTA,b[i]-2)*2*dDELTA_dDelta*dDELTA2_dDelta2+pow(dDELTA_dDelta,2)*(b[i]-2)*pow(DELTA,b[i]-3)*dDELTA_dDelta));
Line1 = pow(DELTA,b[i])*(2*dPSI2_dDelta_dTau+delta*dPSI3_dDelta2_dTau)+dDELTAbi_dTau*(2*dPSI_dDelta+delta*dPSI2_dDelta2);
Line2 = 2*dDELTAbi_dDelta*(dPSI_dTau+delta*dPSI2_dDelta_dTau)+2*dDELTAbi2_dDelta_dTau*(PSI+delta*dPSI_dDelta);
Line3 = dDELTAbi2_dDelta2*delta*dPSI_dTau + dDELTAbi3_dDelta2_dTau*delta*PSI;
ddelta2_dtau = n[i]*(Line1+Line2+Line3);
ddelta3 = n[i]*(pow(DELTA,b[i])*(3.0*dPSI2_dDelta2+delta*dPSI3_dDelta3)+3.0*dDELTAbi_dDelta*(2*dPSI_dDelta+delta*dPSI2_dDelta2)+3*dDELTAbi2_dDelta2*(PSI+delta*dPSI_dDelta)+dDELTAbi3_dDelta3*PSI*delta);
return locals()
base = f(tau, delta)
plus_tau = f(tau+dtau, delta)
minus_tau = f(tau-dtau, delta)
plus_delta = f(tau, delta+ddelta)
minus_delta = f(tau, delta-ddelta)
# print base['dDELTA_dDelta'],(plus_delta['DELTA']-minus_delta['DELTA'])/(2*ddelta)
# print base['dDELTA_dTau'],(plus_tau['DELTA']-minus_tau['DELTA'])/(2*dtau)
# print base['dDELTA2_dDelta_dTau'],(plus_tau['dDELTA_dDelta']-minus_tau['dDELTA_dDelta'])/(2*dtau)
# print base['dDELTA3_dDelta2_dTau'],(plus_delta['dDELTA2_dDelta_dTau']-minus_delta['dDELTA2_dDelta_dTau'])/(2*ddelta)
# print base['dDELTAbi3_dDelta2_dTau'],(plus_delta['dDELTAbi2_dDelta_dTau']-minus_delta['dDELTAbi2_dDelta_dTau'])/(2*ddelta)
# print base['ddelta2_dtau'],(plus_tau['ddelta2']-minus_tau['ddelta2'])/(2*dtau)
# print base['dPSI3_dDelta3'],(plus_delta['dPSI2_dDelta2']-minus_delta['dPSI2_dDelta2'])/(2*ddelta)
# print base['dtheta_dDelta'],(plus_delta['theta']-minus_delta['theta'])/(2*ddelta)
print base['dPI_dDelta'],(plus_delta['PI']-minus_delta['PI'])/(2*ddelta)
print base['dDELTA3_dDelta3'],(plus_delta['dDELTA2_dDelta2']-minus_delta['dDELTA2_dDelta2'])/(2*ddelta)
print base['dDELTAbi3_dDelta3'],(plus_delta['dDELTAbi2_dDelta2']-minus_delta['dDELTAbi2_dDelta2'])/(2*ddelta)
print base['ddelta3'],(plus_delta['ddelta2']-minus_delta['ddelta2'])/(2*ddelta) |
import dynamic_tensor
model = Model()
model_input_shape = [1, 2, 2, 1]
dynamic_layer = dynamic_tensor.DynamicInputGenerator(model, model_input_shape)
test_node_input = dynamic_layer.getTestNodeInput()
perms = Parameter("perms", "TENSOR_INT32", "{4}", [0, 2, 1, 3])
model_output = Output("output", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
model_transpose = model.Operation("TRANSPOSE", test_node_input, perms).To(model_output)
model_input_data = [1.0, 2.0, 3.0, 4.0]
model_output_data = [1.0, 3.0, 2.0, 4.0]
Example({
dynamic_layer.getModelInput(): model_input_data,
dynamic_layer.getShapeInput(): model_input_shape,
model_output: model_output_data,
}, model=model_transpose)
|
try:
import concurrent.futures as futures
except ImportError:
try:
import futures
except ImportError:
futures = None
import zipfile
import shutil
import tempfile
import requests
from os import path
# --- Globals ----------------------------------------------
PLUGINS = """
ack.vim https://github.com/mileszs/ack.vim
bufexplorer https://github.com/jlanzarotta/bufexplorer
ctrlp.vim https://github.com/ctrlpvim/ctrlp.vim
nerdtree https://github.com/scrooloose/nerdtree
vim-colors-solarized https://github.com/altercation/vim-colors-solarized
lightline.vim https://github.com/itchyny/lightline.vim
""".strip()
GITHUB_ZIP = "%s/archive/master.zip"
SOURCE_DIR = path.join(path.dirname(__file__), "sources_non_forked")
def download_extract_replace(plugin_name, zip_path, temp_dir, source_dir):
temp_zip_path = path.join(temp_dir, plugin_name)
# Download and extract file in temp dir
req = requests.get(zip_path)
open(temp_zip_path, "wb").write(req.content)
zip_f = zipfile.ZipFile(temp_zip_path)
zip_f.extractall(temp_dir)
plugin_temp_path = path.join(
temp_dir, path.join(temp_dir, "%s-master" % plugin_name)
)
# Remove the current plugin and replace it with the extracted
plugin_dest_path = path.join(source_dir, plugin_name)
try:
shutil.rmtree(plugin_dest_path)
except OSError:
pass
shutil.move(plugin_temp_path, plugin_dest_path)
print("Updated {0}".format(plugin_name))
def update(plugin):
name, github_url = plugin.split(" ")
zip_path = GITHUB_ZIP % github_url
download_extract_replace(name, zip_path, temp_directory, SOURCE_DIR)
if __name__ == "__main__":
temp_directory = tempfile.mkdtemp()
try:
if futures:
with futures.ThreadPoolExecutor(16) as executor:
executor.map(update, PLUGINS.splitlines())
else:
[update(x) for x in PLUGINS.splitlines()]
finally:
shutil.rmtree(temp_directory)
|
#!/usr/bin/env python
__author__ = 'Will Kamp'
__copyright__ = 'Copyright 2013, Matrix Mariner Inc.'
__license__ = 'BSD'
__email__ = 'will@mxmariner.com'
__status__ = 'Development' # 'Prototype', 'Development', or 'Production'
'''Downloads noaa product catalog xml by region name and retrieves listing of chart files in the catalog
'''
from urllib import request
import os
from . import config
from xml.dom import minidom
xml_urls = {'NOAA_ALL': 'http://www.charts.noaa.gov/RNCs/RNCProdCat_19115.xml',
#'DISTRICT_01': 'http://www.charts.noaa.gov/RNCs/01CGD_RNCProdCat_19115.xml',
#'DISTRICT_05': 'http://www.charts.noaa.gov/RNCs/05CGD_RNCProdCat_19115.xml',
#'DISTRICT_07': 'http://www.charts.noaa.gov/RNCs/07CGD_RNCProdCat_19115.xml',
#'DISTRICT_08': 'http://www.charts.noaa.gov/RNCs/08CGD_RNCProdCat_19115.xml',
#'DISTRICT_09': 'http://www.charts.noaa.gov/RNCs/09CGD_RNCProdCat_19115.xml',
#'DISTRICT_11': 'http://www.charts.noaa.gov/RNCs/11CGD_RNCProdCat_19115.xml',
#'DISTRICT_13': 'http://www.charts.noaa.gov/RNCs/13CGD_RNCProdCat_19115.xml',
#'DISTRICT_14': 'http://www.charts.noaa.gov/RNCs/14CGD_RNCProdCat_19115.xml',
#'DISTRICT_17': 'http://www.charts.noaa.gov/RNCs/17CGD_RNCProdCat_19115.xml',
'REGION_02': 'http://www.charts.noaa.gov/RNCs/02Region_RNCProdCat_19115.xml',
'REGION_03': 'http://www.charts.noaa.gov/RNCs/03Region_RNCProdCat_19115.xml',
'REGION_04': 'http://www.charts.noaa.gov/RNCs/04Region_RNCProdCat_19115.xml',
'REGION_06': 'http://www.charts.noaa.gov/RNCs/06Region_RNCProdCat_19115.xml',
'REGION_07': 'http://www.charts.noaa.gov/RNCs/07Region_RNCProdCat_19115.xml',
'REGION_08': 'http://www.charts.noaa.gov/RNCs/08Region_RNCProdCat_19115.xml',
'REGION_10': 'http://www.charts.noaa.gov/RNCs/10Region_RNCProdCat_19115.xml',
'REGION_12': 'http://www.charts.noaa.gov/RNCs/12Region_RNCProdCat_19115.xml',
'REGION_13': 'http://www.charts.noaa.gov/RNCs/13Region_RNCProdCat_19115.xml',
'REGION_14': 'http://www.charts.noaa.gov/RNCs/14Region_RNCProdCat_19115.xml',
'REGION_15': 'http://www.charts.noaa.gov/RNCs/15Region_RNCProdCat_19115.xml',
'REGION_17': 'http://www.charts.noaa.gov/RNCs/17Region_RNCProdCat_19115.xml',
'REGION_22': 'http://www.charts.noaa.gov/RNCs/22Region_RNCProdCat_19115.xml',
'REGION_24': 'http://www.charts.noaa.gov/RNCs/24Region_RNCProdCat_19115.xml',
'REGION_26': 'http://www.charts.noaa.gov/RNCs/26Region_RNCProdCat_19115.xml',
'REGION_30': 'http://www.charts.noaa.gov/RNCs/30Region_RNCProdCat_19115.xml',
'REGION_32': 'http://www.charts.noaa.gov/RNCs/32Region_RNCProdCat_19115.xml',
'REGION_34': 'http://www.charts.noaa.gov/RNCs/34Region_RNCProdCat_19115.xml',
'REGION_36': 'http://www.charts.noaa.gov/RNCs/36Region_RNCProdCat_19115.xml',
'REGION_40': 'http://www.charts.noaa.gov/RNCs/40Region_RNCProdCat_19115.xml',
#'AK_N': 'http://www.charts.noaa.gov/RNCs/36Region_RNCProdCat_19115.xml',
#'AK_S': 'http://www.charts.noaa.gov/RNCs/34Region_RNCProdCat_19115.xml',
#'CT': 'http://www.charts.noaa.gov/RNCs/CT_RNCProdCat_19115.xml',
#'GA': 'http://www.charts.noaa.gov/RNCs/GA_RNCProdCat_19115.xml',
#'IL': 'http://www.charts.noaa.gov/RNCs/IL_RNCProdCat_19115.xml',
#'MA': 'http://www.charts.noaa.gov/RNCs/MA_RNCProdCat_19115.xml',
#'MI': 'http://www.charts.noaa.gov/RNCs/MI_RNCProdCat_19115.xml',
#'NC': 'http://www.charts.noaa.gov/RNCs/NC_RNCProdCat_19115.xml',
#'NV': 'http://www.charts.noaa.gov/RNCs/NV_RNCProdCat_19115.xml',
#'OR': 'http://www.charts.noaa.gov/RNCs/OR_RNCProdCat_19115.xml',
#'PR': 'http://www.charts.noaa.gov/RNCs/PR_RNCProdCat_19115.xml',
#'TX': 'http://www.charts.noaa.gov/RNCs/TX_RNCProdCat_19115.xml',
#'WA': 'http://www.charts.noaa.gov/RNCs/WA_RNCProdCat_19115.xml',
#'AL': 'http://www.charts.noaa.gov/RNCs/AL_RNCProdCat_19115.xml',
#'DE': 'http://www.charts.noaa.gov/RNCs/DE_RNCProdCat_19115.xml',
#'HI': 'http://www.charts.noaa.gov/RNCs/HI_RNCProdCat_19115.xml',
#'IN': 'http://www.charts.noaa.gov/RNCs/IN_RNCProdCat_19115.xml',
#'MD': 'http://www.charts.noaa.gov/RNCs/MD_RNCProdCat_19115.xml',
#'MN': 'http://www.charts.noaa.gov/RNCs/MN_RNCProdCat_19115.xml',
#'NH': 'http://www.charts.noaa.gov/RNCs/NH_RNCProdCat_19115.xml',
#'NY': 'http://www.charts.noaa.gov/RNCs/NY_RNCProdCat_19115.xml',
#'PA': 'http://www.charts.noaa.gov/RNCs/PA_RNCProdCat_19115.xml',
#'RI': 'http://www.charts.noaa.gov/RNCs/RI_RNCProdCat_19115.xml',
#'VA': 'http://www.charts.noaa.gov/RNCs/VA_RNCProdCat_19115.xml',
#'WI': 'http://www.charts.noaa.gov/RNCs/WI_RNCProdCat_19115.xml',
#'CA': 'http://www.charts.noaa.gov/RNCs/CA_RNCProdCat_19115.xml',
#'FL': 'http://www.charts.noaa.gov/RNCs/FL_RNCProdCat_19115.xml',
#'ID': 'http://www.charts.noaa.gov/RNCs/ID_RNCProdCat_19115.xml',
#'LA': 'http://www.charts.noaa.gov/RNCs/LA_RNCProdCat_19115.xml',
#'ME': 'http://www.charts.noaa.gov/RNCs/ME_RNCProdCat_19115.xml',
#'MS': 'http://www.charts.noaa.gov/RNCs/MS_RNCProdCat_19115.xml',
#'NJ': 'http://www.charts.noaa.gov/RNCs/NJ_RNCProdCat_19115.xml',
#'OH': 'http://www.charts.noaa.gov/RNCs/OH_RNCProdCat_19115.xml',
#'PO': 'http://www.charts.noaa.gov/RNCs/PO_RNCProdCat_19115.xml',
#'SC': 'http://www.charts.noaa.gov/RNCs/SC_RNCProdCat_19115.xml',
#'VT': 'http://www.charts.noaa.gov/RNCs/VT_RNCProdCat_19115.xml'
}
#override the NOAA XML file and add these extra charts
chart_additions = {'REGION_06': ['12200_1.KAP', '13003_1.KAP']}
class NoaaXmlReader():
def __init__(self, xml_url_key, xml_dir=None):
if xml_dir is None:
xml_dir = config.noaa_meta_dir
#chart_covers are not charts and should be skipped
self.chart_covers = {'12352_8.KAP', '12364_24.KAP', '12372_19.KAP', '13221_2.KAP', '13229_15.KAP',
'14786_79.KAP', '14786_80.KAP', '14786_81.KAP', '14786_82.KAP', '14786_83.KAP',
'14786_84.KAP', '14786_85.KAP', '14786_86.KAP', '14786_87.KAP', '14786_88.KAP',
'14842_45.KAP', '14842_46.KAP', '14842_47.KAP', '14842_48.KAP', '14842_49.KAP',
'14842_50.KAP', '14842_51.KAP', '14846_39.KAP', '14846_40.KAP', '14846_41.KAP',
'14846_42.KAP', '14846_43.KAP', '14846_44.KAP', '14853_48.KAP', '14853_49.KAP',
'14853_50.KAP', '14853_51.KAP', '14853_52.KAP', '14853_53.KAP', '14853_54.KAP',
'14886_15.KAP', '14886_16.KAP', '14886_17.KAP', '14886_18.KAP', '14886_19.KAP',
'14916_37.KAP', '14916_38.KAP', '14916_39.KAP', '14916_40.KAP', '14916_41.KAP',
'14916_42.KAP', '14916_43.KAP', '14926_33.KAP', '14926_34.KAP', '14926_35.KAP',
'14926_36.KAP', '14926_37.KAP', '11324_2.KAP', '18423_19.KAP', '18445_17.KAP',
'18652_20.KAP', '12285_19.KAP', '12285_18.KAP', '12205_13.KAP', '11451_16.KAP',
'11451_17.KAP', '11326_7.KAP'}
self.problem_charts = {'12206_6.KAP', '5161_1.KAP', '18445_7.KAP', '1116A_1.KAP', '1117A_1.KAP', '18445_8.KAP'}
#18445_8 is identical to another chart that has feet depth units that we modified the header
#1116A_1.KAP and 1117A_1.KAP have identical non lease block charts
self.region = xml_url_key
xml_url = xml_urls[xml_url_key]
self.region_name = xml_url.split('/')[-1]
xml_file_path = os.path.join(xml_dir, self.region_name)
if not os.path.isfile(xml_file_path):
print('retrieving xml from NOAA: ' + self.region_name)
with open(xml_file_path, "w") as xml:
req = request.Request(url=xml_url)
f = request.urlopen(req)
xml.write(f.read().decode('utf-8'))
self.xml_file = open(xml_file_path)
def get_map_files(self):
map_files = []
dom = minidom.parse(self.xml_file)
for node in dom.getElementsByTagName('EX_Extent'):
for child_node in node.getElementsByTagName('gco:CharacterString'):
kap = child_node.toxml()
kap = kap[kap.find('file name: ')+11:kap.find('.KAP')+4]
if not (kap in self.chart_covers or kap in self.problem_charts):
map_files.append(kap)
if self.region in chart_additions:
for chart in chart_additions[self.region]:
map_files.append(chart)
map_files.sort()
return map_files
if __name__ == '__main__':
nxl = NoaaXmlReader('REGION_04')
print(nxl.get_map_files()) |
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common definitions of hyperparamter schedules."""
import tensorflow as tf
class WarmUpPiecewiseConstantSchedule(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""Learning rate schedule.
It starts with a linear warmup to the initial learning rate over
`warmup_epochs`. This is found to be helpful for large batch size training
(Goyal et al., 2018). The learning rate's value then uses the initial
learning rate, and decays by a multiplier at the start of each epoch in
`decay_epochs`. The stepwise decaying schedule follows He et al. (2015).
"""
def __init__(self,
steps_per_epoch,
base_learning_rate,
decay_ratio,
decay_epochs,
warmup_epochs):
super().__init__()
self.steps_per_epoch = steps_per_epoch
self.base_learning_rate = base_learning_rate
self.decay_ratio = decay_ratio
self.decay_epochs = decay_epochs
self.warmup_epochs = warmup_epochs
def __call__(self, step):
lr_epoch = tf.cast(step, tf.float32) / self.steps_per_epoch
learning_rate = self.base_learning_rate
if self.warmup_epochs >= 1:
learning_rate *= lr_epoch / self.warmup_epochs
decay_epochs = [self.warmup_epochs] + self.decay_epochs
for index, start_epoch in enumerate(decay_epochs):
learning_rate = tf.where(
lr_epoch >= start_epoch,
self.base_learning_rate * self.decay_ratio**index,
learning_rate)
return learning_rate
def get_config(self):
return {
'steps_per_epoch': self.steps_per_epoch,
'base_learning_rate': self.base_learning_rate,
}
class AddWarmupDecaySchedule(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""A wrapper for LearningRateSchedule that includes warmup steps."""
def __init__(self, lr_schedule, warmup_steps):
"""Add warmup decay to a learning rate schedule.
Args:
lr_schedule: base learning rate scheduler
warmup_steps: number of warmup steps
"""
super().__init__()
self._lr_schedule = lr_schedule
self._warmup_steps = warmup_steps
def __call__(self, step):
lr = self._lr_schedule(step)
if self._warmup_steps:
initial_learning_rate = tf.convert_to_tensor(
self._lr_schedule.initial_learning_rate, name='initial_learning_rate')
dtype = initial_learning_rate.dtype
global_step_recomp = tf.cast(step, dtype)
warmup_steps = tf.cast(self._warmup_steps, dtype)
warmup_lr = initial_learning_rate * global_step_recomp / warmup_steps
lr = tf.cond(global_step_recomp < warmup_steps,
lambda: warmup_lr,
lambda: lr)
return lr
def get_config(self):
config = self._lr_schedule.get_config()
config.update({
'warmup_steps': self._warmup_steps,
})
return config
class WarmUpPolynomialSchedule(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""Linear warmup then polynomial decay learning rate schedule."""
def __init__(self,
base_learning_rate,
end_learning_rate,
decay_steps,
warmup_steps,
decay_power=1.0):
super().__init__()
poly_schedule = tf.keras.optimizers.schedules.PolynomialDecay(
base_learning_rate,
decay_steps,
end_learning_rate,
decay_power)
self._schedule = AddWarmupDecaySchedule(poly_schedule, warmup_steps)
def __call__(self, step):
return self._schedule(step)
def get_config(self):
return {
'schedule': self._schedule,
}
|
# -*- coding: utf-8 -*-
"""
| '_ \ / _` |_____ / __/ _ \| '_ ` _ \| '_ \ / _` | '__/ _ \
| |_) | (_| |_____| (_| (_) | | | | | | |_) | (_| | | | __/
| .__/ \__, | \___\___/|_| |_| |_| .__/ \__,_|_| \___|
|_| |___/ |_|
This is used to compare two databases. Takes two connection strings. One it
considers truth and another to test against it. Used to determine the difference
in two databases.
"""
import contextlib
import sys
import threading
import time
import click
from psycopg2 import ProgrammingError
from psycopg2.extensions import parse_dsn
from . import config
from .environments import PGCOMPARE_NO_SPIN, PGCOMPARE_NO_ASYNC
from .models import PGDetails
from .vendor.blindspin import spinner
TITLE_TEXT = """\
This is used to compare two databases. Takes two connection strings. One it
considers truth and another to test against it. Used to determine the difference
in two databases.
"""
if PGCOMPARE_NO_SPIN or PGCOMPARE_NO_ASYNC:
@contextlib.contextmanager
def spinner():
yield
def initialize():
""" Initial processing before the comparisons begin. """
print_welcome_text()
config.truth_db = PGDetails(**config.truth_db_config)
config.test_db = PGDetails(**config.test_db_config)
message = click.style("Retrieving details for all tables. This could take awhile... ", fg="yellow")
config.log.info(message)
with spinner():
load_table_details_for_both_dbs(config.truth_db, config.test_db)
# Clearing that status
sys.stdout.write("\b")
sys.stdout.flush()
return
def transform_conn_string(conn_string):
""" Transforms any valid connection string passed in to a dict and
returns the dict. If the conn_string is invalid or not supported it
echos that to stdout and exits the cli.
Ex:
'dbname=test user=postgres password=secret' ->
{'password': 'secret', 'user': 'postgres', 'dbname': 'test'}
"postgresql://someone@example.com/somedb?connect_timeout=10" ->
{'host': 'example.com', 'user': 'someone', 'dbname': 'somedb', 'connect_timeout': '10'}
"""
try:
conn_dict = parse_dsn(conn_string)
except ProgrammingError:
message = "Invalid connection string: {}. Exiting.".format(conn_string)
config.log.error(message)
sys.exit(1)
return conn_dict
def prompt_for_conn_strings():
""" Prompts user to put in connection strings. """
config.truth_db_conn_string = click.prompt("Database to consider TRUTH: ")
config.test_db_conn_string = click.prompt("Database to consider TEST: ")
return
def print_welcome_text():
""" Outputs welcome text """
output = '\n\t\t\t'
output += click.style("=== PG-COMPARE ===\n\n", fg="yellow", bold=True)
output += click.style(TITLE_TEXT)
output += '\n'
config.log.info(output)
print_info_about_databases()
return
def print_info_about_databases():
""" Show the connection details of truth and test database """
from tabulate import tabulate
output = []
displayed_keys = []
for k, v in config.truth_db_config.items():
output.append((k, v, config.test_db_config.get(k, '')))
displayed_keys.append(k)
for k, v in config.test_db_config.items():
if k not in displayed_keys:
output.append((k, config.truth_db_config.get(k, ''), v))
displayed_keys.append(k)
config.log.info(tabulate(output, headers=['', 'Truth Database', 'Test Database'], tablefmt="simple"))
config.log.info('\n')
return
def load_table_details_for_both_dbs(*databases):
""" Load all needed data from both databases into memory. """
if PGCOMPARE_NO_ASYNC:
for db in databases:
db.get_details_for_tables()
return
threads = []
stop_event = threading.Event()
initial_active_threads = threading.active_count()
for db in databases:
process = threading.Thread(target=db.get_details_for_tables)
process.start()
threads.append(process)
while threading.active_count() > initial_active_threads:
try:
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
stop_event.set()
if stop_event.is_set():
for db in databases:
db._close_all()
sys.exit(1)
return
def calculate_elapsed_time():
""" Calculates time the script took to execute. """
end_time = time.time()
seconds = end_time - config.start_time
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
|
from .Confirm import verify
from django.urls import path
urlpatterns = [
path('<str:token_type>/<str:email_token>', verify)
]
|
"""
Queries that list admin and resolve them individually
"""
import graphene
from django.contrib.auth import get_user_model
from graphql_jwt.decorators import superuser_required
from accounts.models import Admin
from accounts.types import AdminType
class AdminsQuery(graphene.AbstractType):
"""Used to read or fetch values"""
all_admins = graphene.List(AdminType)
admin = graphene.Field(
AdminType,
admin_email=graphene.String()
)
admins = graphene.Field(
graphene.List(AdminType),
creator_email=graphene.String()
)
@superuser_required
def resolve_all_admins(self, info, **kwargs):
"""Query all admins"""
return Admin.objects.all()
@superuser_required
def resolve_admin(self, info, **kwargs):
"""Query a specific admin"""
admin_email = kwargs.get('admin_email')
admin = get_user_model().objects.get(email=admin_email)
return Admin.objects.get(
admin=admin
)
@superuser_required
def resolve_admins(self, info, **kwargs):
"""Query all admins from a creator admin"""
creator_email = kwargs.get('creator_email')
creator = get_user_model().objects.get(email=creator_email)
return Admin.objects.filter(
creator=creator
)
|
from discord.ext import commands, tasks
from discord import Intents, User, Reaction, Message, TextChannel, NotFound
from discord.ext.commands import CommandNotFound, Context, CommandInvokeError
from emojis import decode
from discord import Embed
from discord.utils import get
from persistent import server_config, sql
from util.logging import log, LogLevel
import asyncio
import simplejson as json
# settings.py
from dotenv import load_dotenv
load_dotenv()
import os
from datetime import datetime, timedelta
LIVETOKEN = os.getenv("LIVETOKEN")
DEVTOKEN = os.getenv("DEVTOKEN")
# dev token, please start using environment variables...
#Different token
DISCORD_TOKEN = LIVETOKEN
TOKEN = DISCORD_TOKEN
VERSION = "0.2.7"
COMMAND_PREFIX = "."
intents = Intents.all()
client = commands.Bot(command_prefix=COMMAND_PREFIX, intents=intents)
client.remove_command('help')
@tasks.loop(seconds = 120)
async def st():
with open("suspend.log", 'r+') as sl:
data = json.loads(sl.read())
guild = get(client.guilds, id=522815906376843274)
suspended_role = get(guild.roles, id=776320653581877278)
mr = get(guild.roles, id=522817975091462147)
dellist = []
for uid in data:
try:
member = await guild.fetch_member(uid)
if data[uid]['dur'] <= 0:
print("unsupending user")
if member is None:
print("Error Null Member")
else:
try:
await member.remove_roles(suspended_role)
if suspended_role is not None:
await member.add_roles(mr)
logchannel = get(guild.channels, id=761788719685435404)
suschannel = get(guild.channels, id=763644055536009216)
embed=Embed(title="User Unsuspended", description="**%s**, He has served his time \n Unsuspender: **SpaceBot**" % (member.display_name))
await suschannel.send(embed=embed)
await logchannel.send(embed=embed)
except Exception as e:
await log("An unexpected Error has occured during unsuspend while removing user roles.")
dellist.append(uid)
else:
data[uid]['dur'] = data[uid]['dur'] - 2
except NotFound:
dellist.append(uid)
except Exception as e:
await log("An unexpected Error has occured during unsuspend tread.")
await log(e)
for uid in dellist:
data.pop(uid, None)
sl.seek(0)
json.dump(data, sl)
sl.truncate()
sl.close()
'''
@tasks.loop(seconds = 86400)
async def pt():
await log("rollover thread %s" % (datetime.now().weekday()))
if datetime.now().weekday() == 0:
guild = get(client.guilds, id=522815906376843274)
logchannel = get(guild.channels, id=761788719685435404)
staffinfo = get(guild.channels, id=816025151178670090)
embed = Embed(title="Monday Quota Bot", description="=========================")
embed.add_field(name = "%s: Quota has come are you ready to parse staff?" % (datetime.now()),
value = "This will start the rollover, (STAFF WILL NOT BE AUTO DM'ED)",
inline=False)
msg = await staffinfo.send(content = "<@&761211992438472744> <@&522816654091223051>", embed=embed)
await msg.add_reaction("✔")
await msg.add_reaction("❌")
def check(obj, user = 0):
if obj.message != msg:
return False
elif user.bot:
return False
else:
return True
contract = await client.wait_for('reaction_add', check=check)
await msg.clear_reactions()
if contract[0].emoji == "✔":
data = await sql.rollover()
for user in data:
try:
member = guild.get_member(int(user[0]))
e = Embed(
title="Inactivity Alert",
description=''
**%s did not meet quota.**
Their weekly quota was: **%s** points, but they only had **%s** points.\n
Their roles are: %s \n
If they shouldn't have a quota copy the following command \n
`.noquota %s` \n
When you are done with this card react with ❌
' % (member.display_name,
40,
user[1],
", ".join([(role.name) for role in member.roles]),
member.id),
color=0xdb021c)
e.set_footer(text="Space Travel Dungeons", icon_url = "https://cdn.discordapp.com/attachments/751589431441490082/764948382912479252/SPACE.gif")
await logchannel.send(embed = e)
k = await staffinfo.send(embed = e)
def kcheck(obj, user = 0):
if obj.message != k:
return False
elif user.bot:
return False
else:
return True
await msg.add_reaction("❌")
contract = await client.wait_for('reaction_add', check=kcheck)
await k.delete()
e = Embed(
title="Quota Notice",
description='<@!%s> has completed quota check on <@!%s>',
color=0xdb021c)
await logchannel.send( embed = e)
except Exception:
print("Opps something went wrong")
await log("Rolling over")
else:
embed = Embed(title="Monday Quota Bot", description="=========================")
embed.add_field(name = "%s: %s?" % (datetime.now(), contract[1].display_name),
value = "You have canceled rollover this week, dm @Arceye if this was a mistake",
inline=False)
await msg.edit(embed=embed)
'''
@client.event
async def on_ready():
await log(f'{client.user.name} v{VERSION} has connected to Discord!')
await log("Attempting connection to SQL server...")
await sql.connect()
# Loads config for guilds the bot is currently a member of.
await log("Loading config for connected guilds.")
st.start()
# pt.start()
for guild in client.guilds:
cfg = await server_config.get_config(guild)
if cfg is not None:
await log("Loaded config for guild " + guild.name + " with id " + str(guild.id) + ".")
@client.event
async def on_message(message: Message):
user: str = f"{message.author.name}#{message.author.discriminator}"
channel = "DMs"
if message.guild is not None:
channel = f"(guild {message.guild.name} with id {message.guild.id})"
msg: str = message.content
if msg.startswith(COMMAND_PREFIX):
await log(f"{user} issued command {msg} in {channel}.")
await client.process_commands(message)
@client.event
async def on_command_error(ctx: Context, error):
if isinstance(error, CommandNotFound):
pass
#await ctx.send(f":x: **Command `{ctx.message.content.split()[0].replace(COMMAND_PREFIX, '')}` not found.**")
#elif isinstance(error, CommandInvokeError):
# await ctx.send(":x: **Unable to invoke command.** Ensure you allow direct messages for server members.", delete_after=10)
else:
await log("An exception occurred while executing this command.", LogLevel.WARN)
await log(error.__str__())
@client.event
async def on_member_join(member):
pass
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
if filename != '__init__.py':
print(f"Loading cog from {filename}.")
client.load_extension(f'cogs.{filename[:-3]}')
client.run(TOKEN)
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(UserProfile)
admin.site.register(Like)
admin.site.register(Comment)
admin.site.register(FriendRequest)
admin.site.register(Post)
admin.site.register(Inbox)
admin.site.register(ExternalServer)
class SignUpConfirmAdmin(admin.ModelAdmin):
list_display = ('boolean',)
def has_add_permission(self, request):
return False
admin.site.register(SignUpConfirm, SignUpConfirmAdmin) |
"""Vera tests."""
from unittest.mock import MagicMock
import pyvera as pv
from homeassistant.const import STATE_LOCKED, STATE_UNLOCKED
from homeassistant.core import HomeAssistant
from .common import ComponentFactory, new_simple_controller_config
async def test_lock(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device: pv.VeraLock = MagicMock(spec=pv.VeraLock)
vera_device.device_id = 1
vera_device.vera_device_id = vera_device.device_id
vera_device.comm_failure = False
vera_device.name = "dev1"
vera_device.category = pv.CATEGORY_LOCK
vera_device.is_locked = MagicMock(return_value=False)
entity_id = "lock.dev1_1"
component_data = await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(devices=(vera_device,)),
)
update_callback = component_data.controller_data[0].update_callback
assert hass.states.get(entity_id).state == STATE_UNLOCKED
await hass.services.async_call(
"lock",
"lock",
{"entity_id": entity_id},
)
await hass.async_block_till_done()
vera_device.lock.assert_called()
vera_device.is_locked.return_value = True
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_LOCKED
await hass.services.async_call(
"lock",
"unlock",
{"entity_id": entity_id},
)
await hass.async_block_till_done()
vera_device.unlock.assert_called()
vera_device.is_locked.return_value = False
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNLOCKED
|
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
import logging
from geocoder.location import BBox
from geocoder.base import OneResult, MultipleResultsQuery
class GisgraphyResult(OneResult):
@property
def lat(self):
return self.raw.get('lat')
@property
def lng(self):
return self.raw.get('lng')
@property
def address(self):
return self.raw.get('formatedFull', '')
@property
def country(self):
return self.raw.get('countryCode', '')
@property
def state(self):
return self.raw.get('state', '')
@property
def city(self):
return self.raw.get('city', '')
@property
def street(self):
return self.raw.get('streetName', '')
@property
def housenumber(self):
return self.raw.get('houseNumber', '')
@property
def postal(self):
return self.raw.get('zipCode', '')
class GisgraphyQuery(MultipleResultsQuery):
"""
Gisgraphy REST API
=======================
API Reference
-------------
http://www.gisgraphy.com/documentation/user-guide.php
"""
provider = 'gisgraphy'
method = 'geocode'
_URL = 'https://services.gisgraphy.com/geocoding/'
_RESULT_CLASS = GisgraphyResult
_KEY_MANDATORY = False
def _build_params(self, location, provider_key, **kwargs):
return {
'address': location,
'limitnbresult': kwargs.get('maxRows', 1),
'format': 'json',
}
def _adapt_results(self, json_response):
return json_response['result']
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
g = GisgraphyQuery('Ottawa Ontario', maxRows=3)
g.debug()
|
"""
import bk_typing
"""
import typing as tp
from typing import Union, List, Tuple, Dict, Sequence, Iterable, TypeVar, Any
TV = TypeVar('TV')
# __t New Number Subtypes, and functions to cast to them
NonNegInt = tp.NewType('NonNegInt', int)
NonNegFloat = tp.NewType('NonNegFloat', float)
Probability = tp.NewType('Probability', NonNegFloat)
def NNI(n: int) -> NonNegInt:
assert n >= 0, f"n should be cast-able to a NonNegInt, but is: {n}"
return tp.cast(NonNegInt, n)
def NNF(f: float) -> NonNegFloat:
assert f >= 0, f"f should be cast-able to a NonNegFloat, but is: {f}"
return tp.cast(NonNegFloat, f)
def PBT(p: float) -> Probability:
assert p >= 0 and p <= 1, f"p should be cast-able to a Probability, but is: {p}"
return tp.cast(Probability, p)
def TEST_New_Number_Subtypes():
pass
# __WARN: These don't actually check (with mypy) while inside this function - dedent then run mypy to check that all working!
# They do check if inside the """if __name__ == '__main__':""", but not inside this function!
if "IPYTHON" == "don't run":
NNI(-1) # __c Should throw assertion errors when run
NNF(-.5) # __c Should throw assertion errors when run
PBT(1.5) # __c Should throw assertion errors when run
NNI(1)
NNF(2.5)
PBT(.35)
def ThroughProb(p: Probability) -> Probability:
return p
ThroughProb(.5) # __c Expect error
x: NonNegFloat
x = PBT(.6)
y: Probability
y = NonNegFloat(.6) # __c Expect error
y = x # __c Expect error
x = y
x = .5 # __c Expect error
y = 9.99 # __c Expect error
# __t New Iterable (or self) Types
ListTuple = Union[List[TV], Tuple[TV, ...]]
SelfList = Union[TV, List[TV]]
SelfTuple = Union[TV, Tuple[TV]]
SelfListTuple = Union[TV, ListTuple[TV]]
SelfSequence = Union[TV, Sequence[TV]]
SelfIterable = Union[TV, Iterable[TV]]
# __c Recursion/Forward Referencing of types not supported, so use "Any" at 2nd level
# __c Also, "List" is an invariant type, so 2nd level types don't work that well as parameter annotations (cause have to match exactly, not just subclass)
SelfList_Recursive = Union[TV, List[ Union[TV, List[ Any]]]]
SelfSequence_Recursive = Union[TV, Sequence[ Union[TV, Sequence[ Any]]]]
SelfIterable_Recursive = Union[TV, Iterable[ Union[TV, Iterable[ Any]]]]
# if "Test Self____" == "Skip":
if True:
# Typed fcns
def my_fcn_SelfList(inp: SelfList[int]): pass
def my_fcn_SelfListTuple(inp: SelfListTuple[int]): pass
def my_fcn_SelfSequence(inp: SelfSequence[int]): pass
def my_fcn_SelfIterable(inp: SelfIterable[int]): pass
def my_fcn_SelfList_Recursive(inp: SelfList_Recursive[int]): pass
# def my_fcn_SelfListTuple_Recursive(inp: SelfListTuple_Recursive[int]): pass
def my_fcn_SelfSequence_Recursive(inp: SelfSequence_Recursive[int]): pass
def my_fcn_SelfIterable_Recursive(inp: SelfIterable_Recursive[int]): pass
an_int: int = 3
list_int: List[int] = [3, 4]
list_list_int: List[Union[int, List[int]]] = [3, 9, [4, 5]]
tuple_int: Tuple[int, ...] = (3, 4)
tuple_tuple_int: Tuple[Union[int, Tuple[int, ...]], ...] = (3, 9, (4, 5))
list_tuple_int: List[Union[int, Tuple[int, ...]]] = [3, 9, (4, 5)] # todo ADD this!
iter_int: Iterable[int] = range(5)
list_iter_int: List[Union[int, Iterable]] = [3, 9, range(5)]
# ______t Test List
my_fcn_SelfList(an_int)
my_fcn_SelfList(list_int)
# my_fcn_SelfList(list_list_int) # __c Should complain
# my_fcn_SelfList(tuple_int) # __c Should complain
# my_fcn_SelfList(tuple_tuple_int) # __c Should complain
# my_fcn_SelfList(list_tuple_int) # __c Should complain
# my_fcn_SelfList(iter_int) # __c Should complain
# my_fcn_SelfList(list_iter_int) # __c Should complain
# ______t Test ListTuple
my_fcn_SelfListTuple(an_int)
my_fcn_SelfListTuple(list_int)
# my_fcn_SelfListTuple(list_list_int) # __c Should complain
my_fcn_SelfListTuple(tuple_int)
# my_fcn_SelfListTuple(tuple_tuple_int) # __c Should complain
# my_fcn_SelfListTuple(list_tuple_int) # __c Should complain
# my_fcn_SelfListTuple(iter_int) # __c Should complain
# my_fcn_SelfListTuple(list_iter_int) # __c Should complain
# ______t Test Sequence
my_fcn_SelfSequence(an_int)
my_fcn_SelfSequence(list_int)
# my_fcn_SelfSequence(list_list_int) # __c Should complain
my_fcn_SelfSequence(tuple_int)
# my_fcn_SelfSequence(tuple_tuple_int) # __c Should complain
# my_fcn_SelfSequence(list_tuple_int) # __c Should complain
# my_fcn_SelfSequence(iter_int) # __c Should complain
# my_fcn_SelfSequence(list_iter_int) # __c Should complain
# ______t Test Iterable
my_fcn_SelfIterable(an_int)
my_fcn_SelfIterable(list_int)
# my_fcn_SelfIterable(list_list_int) # __c Should complain
my_fcn_SelfIterable(tuple_int)
# my_fcn_SelfIterable(tuple_tuple_int) # __c Should complain
# my_fcn_SelfIterable(list_tuple_int) # __c Should complain
my_fcn_SelfIterable(iter_int)
# my_fcn_SelfIterable(list_iter_int) # __c Should complain
# __t RECURSIVE
# # ______t Test List_Recursive
my_fcn_SelfList_Recursive(3)
my_fcn_SelfList_Recursive([3, 4])
my_fcn_SelfList_Recursive([3, 9, [4, 5]]) # __c Should be OK b/c passing in directly, so isn't concerned about it being mutated incorrectly
# my_fcn_SelfList_Recursive((3, 4)) # __c Should complain
# my_fcn_SelfList_Recursive((3, 9, (4, 5))) # __c Should complain
# my_fcn_SelfList_Recursive([3, 9, (4, 5)]) # __c Should complain
# my_fcn_SelfList_Recursive(range(5)) # __c Should complain
# my_fcn_SelfList_Recursive([3, 9, range(5)]) # __c Should complain
# ______t Test Sequence_Recursive
my_fcn_SelfSequence_Recursive(an_int)
my_fcn_SelfSequence_Recursive(list_int)
my_fcn_SelfSequence_Recursive(list_list_int)
my_fcn_SelfSequence_Recursive(tuple_int)
my_fcn_SelfSequence_Recursive(tuple_tuple_int)
my_fcn_SelfSequence_Recursive(list_tuple_int)
# my_fcn_SelfSequence_Recursive(iter_int) # __c Should complain
# my_fcn_SelfSequence_Recursive(list_iter_int) # __c Should complain
# ______t Test Iterable_Recursive
my_fcn_SelfIterable_Recursive(an_int)
my_fcn_SelfIterable_Recursive(list_int)
my_fcn_SelfIterable_Recursive(list_list_int)
my_fcn_SelfIterable_Recursive(tuple_int)
my_fcn_SelfIterable_Recursive(tuple_tuple_int)
my_fcn_SelfIterable_Recursive(list_tuple_int)
my_fcn_SelfIterable_Recursive(iter_int)
my_fcn_SelfIterable_Recursive(list_iter_int)
|
"""
Provisioning
============
The Provisioning module includes classes to manage the provisioning of a
node.
.. Copyright:
Copyright 2020 Wirepas Ltd under Apache License, Version 2.0.
See file LICENSE for full license details.
"""
from .events import (
ProvisioningEvent,
ProvisioningEventTimeout,
ProvisioningEventPacketReceived,
)
from .sm import ProvisioningStateMachine, ProvisioningStatus
from .message import (
ProvisioningMessage,
ProvisioningMessageSTART,
ProvisioningMessageDATA,
ProvisioningMessageDATA_ACK,
ProvisioningMessageNACK,
ProvisioningMethod,
ProvisioningNackReason,
)
from .provisioning_server import main as prov_main
__all__ = [
"ProvisioningEvent",
"ProvisioningEventTimeout",
"ProvisioningEventPacketReceived",
"ProvisioningStateMachine",
"ProvisioningMessage",
"ProvisioningMessageSTART",
"ProvisioningMessageDATA",
"ProvisioningMessageDATA_ACK",
"ProvisioningMessageNACK",
"ProvisioningMethod",
"ProvisioningNackReason",
"ProvisioningStatus",
"prov_main",
]
|
#!/usr/bin/env python3
""" 音声情報処理 n本ノック !! """
# MIT License
# Copyright (C) 2020 by Akira TAMAMORI
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Commentary:
# - 音声からケプストラム法により基本周波数を推定する
# - パワーが最大となる音声フレームを対象に推定
import numpy as np
import scipy
from scipy.io import wavfile
import librosa
import matplotlib.pyplot as plt
IN_WAVE_FILE = "in.wav" # 分析対象の音声
FRAME_LENGTH = 1024 # フレーム長 (FFTサイズ)
HOP_LENGTH = 80 # フレームのシフト長
FFT_LENGTH = FRAME_LENGTH
MAX_Fo = 200 # 分析における基本周波数の最大値 (Hz)
MIN_Fo = 60 # 分析における基本周波数の最小値 (Hz)
# 音声のロード
fs, data = wavfile.read(IN_WAVE_FILE)
data = data.astype(np.float64)
# ケプストラムの最大次数、最小次数
max_cep_order = int(np.floor(fs / MIN_Fo))
min_cep_order = int(np.floor(fs / MAX_Fo))
# フレーム化
frames = librosa.util.frame(data, frame_length=FRAME_LENGTH,
hop_length=HOP_LENGTH).T
# パワーが最大のフレーム位置を取得
max_ind = np.argmax(np.sum(frames * frames, axis=1))
# パワーが最大となるフレームを取り出す
pow_max_frame = frames[max_ind, :]
# 窓掛け
window = scipy.signal.blackman(FFT_LENGTH)
windowed_frame = pow_max_frame * window
# ケプストラムの計算 (FFT → 絶対値 → 対数 → 逆FFT)
fft_spec = scipy.fft.rfft(windowed_frame)
log_amp_spec = np.log(np.abs(fft_spec))
cepstrum = scipy.fft.irfft(log_amp_spec)
# ピーク位置の検出
peak_index = np.argmax(cepstrum[min_cep_order: max_cep_order])
max_quef = peak_index + min_cep_order
# ケフレンシに変換して基本周波数の推定
fo = fs / max_quef
print(f"Fundamental Frequency = {fo:.2f} Hz")
# 波形表示
fig = plt.figure(figsize=(12, 8))
time = np.arange(len(windowed_frame)) / fs
axes = fig.add_subplot(3, 1, 1)
axes.plot(time, pow_max_frame, label="original")
axes.plot(time, windowed_frame, label="windowed")
axes.set_xlabel("Time (sec)")
axes.set_ylabel("Amplitude")
axes.set_title("Waveform")
axes.legend()
axes.set_xlim(0, np.max(time))
# 相対パワー表示
axes = fig.add_subplot(3, 1, 2)
freq = fs/2 * np.arange(len(log_amp_spec)) / len(log_amp_spec)
logpower = 20 * np.log(np.abs(fft_spec) / np.max(np.abs(fft_spec)))
axes.plot(freq, logpower, label="original")
axes.set_xlabel("Frequency (Hz)")
axes.set_ylabel("Log power (dB)")
axes.set_title("Log spectrum")
axes.set_xlim(0, np.max(freq))
axes.set_ylim(np.min(logpower), 0)
# ケプストラム表示 (対数振幅)
axes = fig.add_subplot(3, 1, 3)
quef = np.arange(FFT_LENGTH / 2) / fs
log_cepstrum = np.log(np.abs(cepstrum))
axes.plot(quef, log_cepstrum[:len(quef)])
axes.set_xlabel("Quefrency (sec)")
axes.set_ylabel("Log amplitude (dB)")
axes.set_title("Cepstrum")
axes.set_xlim(0, np.max(quef))
plt.tight_layout()
plt.show()
|
import numpy as np
from sklearn import metrics
def uncertain_label(n):
# Constructing the transfer matrix, n is the number of classes
transfer_matrix = np.zeros((n, n))
for i in range(n):
for j in range(n):
if i == j:
transfer_matrix[i, j] = i
s = n
for t in range(n):
transfer_matrix[t, t + 1:n] = np.arange(s, s + n - t - 1)
transfer_matrix[t + 1:n, t] = np.arange(s, s + n - t - 1)
s += n - t - 1
return transfer_matrix
def evaluate(y_pred, y_true, n_classes, gamma=0.8):
A = 2 # meta class
benefit_value = 0
Re = 0
Ri = 0
transfer_matrix = uncertain_label(n_classes)
#print(transfer_matrix)
for i in range(len(y_pred)):
if y_pred[i] < n_classes:
if y_pred[i] == y_true[i]:
benefit_value += 1
else:
Re += 1
if y_pred[i] >= n_classes:
if y_true[i] in np.argwhere(transfer_matrix == y_pred[i])[0, :]:
benefit_value += (1 / A)**gamma
Ri += 1
else:
Re += 1
bt = round(benefit_value / len(y_pred), 4)
Re = round(Re / len(y_pred), 4)
Ri = round(Ri / len(y_pred), 4)
matrix = metrics.confusion_matrix(y_true.reshape(-1), y_pred.reshape(-1))
print(matrix)
precision = 0
recall = 0
f_measure1 = 0
fp = 0
fn = 0
for i in range(n_classes):
TP = matrix[i, i]
FP = matrix[:, i].sum() - matrix[i, i]
FN = matrix[i, :].sum() - matrix[i, i]
TN = len(y_pred) - TP - FP - FN
fp += FP
fn += FN
if TP == 0 and FP == 0:
P = 0
else:
P = TP / (TP + FP)
if TP == 0 and FN == 0:
R = 0
else:
R = TP / (TP + FN)
if P == 0 and R == 0:
F1 = 0
else:
F1 = 2 * P * R / (P + R)
precision += P
recall += R
f_measure1 += F1
e_precision = round(precision / n_classes, 4)
e_recall = round(recall / n_classes, 4)
e_f_measure1 = round(f_measure1 / n_classes, 4)
if max(y_pred) < n_classes:
print(
f'Re:{Re} Ri:{Ri} Ra:{round(1-Re-Ri, 4)} P:{e_precision} R:{e_recall} F1:{e_f_measure1} Bt:{bt}'
)
else:
print(
f'Re:{Re} Ri:{Ri} Ra:{round(1-Re-Ri, 4)} E-P:{e_precision} E-R:{e_recall} E-F1:{e_f_measure1} Bt:{bt}'
)
return Re, Ri, e_precision, e_recall, e_f_measure1, bt
|
from django.shortcuts import render
from django.http import HttpResponse
from requestHandler.models import User, Song, SystemSetting
from .forms import updateForm
import json
fields = ['FirstName',
'LastName',
'Email',
'passWord']
def requestInfo(request):
# API that returns all the necessary user infomation for facial recognition
# security measures might be implemented in the future.
users = User.objects.all()
result = {}
for user in users:
info = {}
info['firstName'] = user.FirstName
info['lastName'] = user.LastName
try:
info['image'] = user.Image.path
except:
info['image'] = 'NULL'
try:
info['FavouriteSongName'] = user.FavouriteSong.SongName
info['FavouriteSongPath'] = user.FavouriteSong.File.path
except Exception as e:
info['FavouriteSongName'] = 'NULL'
info['FavouriteSongPath'] = 'NULL'
result[user.FirstName + user.LastName] = info
return HttpResponse(json.dumps(result), content_type="application/json")
def requestLoginInfo(request):
# API that returns all the necessary user infomation for user login
users = User.objects.all()
result = {}
for user in users:
info = {}
info['Email'] = user.Email
info['passWord'] = user.passWord
info['id'] = user.id
result[user.Email] = info
return HttpResponse(json.dumps(result), content_type="application/json")
def requestUpdateUserInfo(request):
if request.method == 'GET':
userId = request.GET["userId"]
user = User.objects.get(id=userId)
initials = {}
for field in fields:
initials[field] = user.__dict__[field]
initials['FavouriteSong'] = user.FavouriteSong
initials['Image'] = user.Image
initials['userId'] = user.id
initials['passWord'] = user.passWord
form = updateForm(initial=initials)
form.fields['Email'].widget.attrs['readonly'] = True
context = {"form": form}
return render(request, "requestHandler/updateInfo.html", context)
elif request.method == 'POST':
form = updateForm(request.POST, request.FILES)
if form.is_valid():
userId = form.cleaned_data['userId']
user = User.objects.get(id=userId)
if user is not None:
for field in fields:
user.__dict__[field] = form.cleaned_data[field]
user.save()
return HttpResponse("update saved!")
else:
return HttpResponse("user not found")
else:
return HttpResponse("invalid information")
def getSettings(request):
if request.method == 'GET':
settingObj = SystemSetting.objects.all()
result = {}
if (len(settingObj) > 0):
setting = settingObj[0]
result["defaultBehavior"] = setting.DefaultBehavior
result["defaultSong"] = setting.DefaultSong.SongName if setting.DefaultSong is not None else "Null"
return HttpResponse(json.dumps(result), content_type="application/json")
else:
result["defaultBehavior"] = "Null"
result["defaultSong"] = "NULL"
return HttpResponse(json.dumps(result), content_type="application/json")
def getSongs(request):
if request.method == 'GET':
result = {}
Songs = Song.objects.all()
for song in Songs:
result[song.SongName] = song.File.path if song.File is not None else "None"
return HttpResponse(json.dumps(result), content_type="application/json")
|
import json
import sys
from cluster_clients import *
import logging
STATUS = b'UNKNOWN'
def diff_hub_IDs_and_opssight_IDs(hub_IDs, opssight_IDs):
hub_IDs_set = set(hub_IDs)
opssight_IDs_set = set(opssight_IDs)
IDs_hub_only = hub_IDs_set.difference(opssight_IDs_set)
IDs_both = hub_IDs_set.intersection(opssight_IDs_set)
IDs_opssight_only = opssight_IDs_set.difference(hub_IDs_set)
return IDs_hub_only, IDs_both, IDs_opssight_only
def assess_opssight(hubs, opssight, k8s):
print("\n")
# Get Total Images in Cluster
cluster_images = k8s.get_images()
print("Images In Your Kubernetes Cluster :", len(cluster_images))
# Get Scanned Images from OpsSight
opssight_IDs = opssight.get_shas_names()
num_opssight_IDs = len(opssight_IDs)
print("Images in OpsSight:", num_opssight_IDs)
# Get Scanned Images from Hubs
total_hub_IDs = []
num_total_hub_IDs = 0
for hub in hubs:
hub_IDs = hub.get_code_locations_names()
print("Total Images Scanned in Hub1 :", len(hub_IDs))
num_total_hub_IDs += len(hub_IDs)
total_hub_IDs.extend(hub_IDs)
print("Cumulative Images Scanned by Hubs:", num_total_hub_IDs)
# Analyze Scanned Images
IDs_hub_only, IDs_hub_and_opssight, IDs_opssight_only = diff_hub_IDs_and_opssight_IDs(total_hub_IDs, opssight_IDs)
# Display Ship-It Policy Results
print("\n")
print("***************************************")
print("OpsSight Test Policy : "+str(100)+"% Image coverage to ship.")
coverage = len(IDs_opssight_only) / float(len(cluster_images))
print("OpsSight Coverage : %.2f%% Image Coverage" % coverage)
print("***************************************")
if coverage < 100.0:
print("OpsSight Test Result: No Automated Release is Possible at this time.")
else:
print("OpsSight Test Result: PASS")
print("***************************************")
def main():
if len(sys.argv) < 2:
print("USAGE:")
print("python3 run.py <config_file_path>")
sys.exit("Wrong Number of Parameters")
logging.basicConfig(level=logging.ERROR, stream=sys.stdout)
logging.debug("Starting Tests")
# Read parameters from config file
test_config_path = sys.argv[1]
test_config_json = None
with open(test_config_path) as f:
test_config_json = json.load(f)
opssight_url = test_config_json["PerceptorURL"]
hub_url = test_config_json["HubURL"]
port = test_config_json["Port"]
usr = test_config_json["Username"]
password = test_config_json["Password"]
# Create Kubernetes, OpsSight, and Hub Clients
k8s_client = K8sClientWrapper()
opssight_client = OpsSightClient(opssight_url)
hub_client1 = HubClient(hub_url, usr, password)
hub_client2 = HubClient("int-eric-int-eric.10.1.176.130.xip.io", usr, password)
hub_client3 = HubClient("jim-emea-scaffold-jim-emea-scaffold.10.1.176.130.xip.io", usr, password)
hub_client4 = HubClient("hammerp-hammerp.10.1.176.130.xip.io", usr, password)
# TO DO: Testing...
# Display OpsSight Assessment Test
assess_opssight([hub_client1,hub_client2,hub_client3,hub_client4], opssight_client, k8s_client)
main() |
#
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import logging
import re
import json
import requests
from api.auth import get_auth
from cryptography.fernet import Fernet, InvalidToken
import settings
from google_helpers.bigquery.bq_support import BigQuerySupport
logger = logging.getLogger('main_logger')
BLACKLIST_RE = settings.BLACKLIST_RE
cipher_suite = Fernet(settings.PAGE_TOKEN_KEY)
def encrypt_pageToken(email, jobReference, next_page):
# cipher_suite = Fernet(settings.PAGE_TOKEN_KEY)
jobDescription = dict(
email = email,
jobReference = jobReference,
next_page = next_page
)
plain_jobDescription = json.dumps(jobDescription).encode()
cipher_jobReference = cipher_suite.encrypt(plain_jobDescription).decode()
return cipher_jobReference
def decrypt_pageToken(email, cipher_jobReference):
# cipher_suite = Fernet(settings.PAGE_TOKEN_KEY)
try:
plain_jobDescription = cipher_suite.decrypt(cipher_jobReference.encode())
jobDescription = json.loads(plain_jobDescription.decode())
if jobDescription["email"] == email:
jobDescription.pop('email')
return jobDescription
else:
# Caller's email doesn't match what was encrypted
logger.error("Caller's email, {}, doesn't match what was encrypted: {}".format(
email, jobDescription['email']))
return {}
except InvalidToken:
logger.error("Could not decrypt token: {}".format(cipher_jobReference))
return {}
def submit_BQ_job(sql_string, params):
results = BigQuerySupport.execute_query_and_fetch_results(sql_string, params, no_results=True)
return results
def perform_query(request, func, url, data=None, user=None):
query_info = {}
path_params = {
"sql": False,
}
path_booleans = []
path_integers = []
local_params = {
"page_size": 1000
}
local_booleans = []
local_integers = ["page_size"]
jobReference = {}
next_page = ""
access_methods = ["url", "guid"]
try:
# if 'next_page' in request.args and \
# not request.args.get('next_page') in ["", None]:
# # We have a non-empty next_page token
# jobDescription = decrypt_pageToken(user, request.args.get('next_page'))
# if jobDescription == {}:
# query_info = dict(
# message="Invalid next_page token {}".format(request.args.get('next_page')),
# code=400
# )
# return query_info
# else:
# jobReference = jobDescription['jobReference']
# next_page = jobDescription['next_page']
#
# # If next_page is empty, then we timed out on the previous pass
# if not next_page:
# job_status = BigQuerySupport.wait_for_done(query_job={'jobReference':jobReference})
#
# # Decide how to proceed depending on job status (DONE, RUNNING, ERRORS)
# query_info = is_job_done(job_status, query_info, jobReference, user)
# if "message" in query_info:
# return query_info
# query_info = dict(
# # cohort = {},
# )
# else:
if True:
# Validate most params only on initial request; ignore on next_page requests
query_info = validate_keys(request, query_info, {**path_params, **local_params})
query_info = validate_parameters(request, query_info, path_params, path_booleans, path_integers, user)
if query_info:
return query_info
auth = get_auth()
if func == requests.post:
results = func(url, params=path_params, json=data, headers=auth)
else:
results = func(url, params=path_params, headers=auth)
query_info = results.json()
if "message" in query_info:
return query_info
# Start the BQ job, but don't get any data results, just the job info.
job_status = submit_BQ_job(query_info['query']['sql_string'],
query_info['query']['params'])
jobReference = job_status['jobReference']
# Decide how to proceed depending on job status (DONE, RUNNING, ERRORS)
query_info = is_job_done(job_status, query_info, jobReference, user)
if "message" in query_info:
return query_info
# print(("[STATUS] query_info with job_ref: {}").format(query_info))
# Validate "local" params on initial and next_page requests
query_info = validate_parameters(request, query_info, local_params, local_booleans, local_integers, None)
if "message" in query_info:
return query_info
query_info, next_page = get_query_job_results(query_info,
local_params['page_size'],
jobReference,
next_page)
if next_page:
cipher_pageToken = encrypt_pageToken(user, jobReference,
next_page)
else:
cipher_pageToken = ""
query_info['next_page'] = cipher_pageToken
except Exception as e:
logger.exception(e)
query_info = dict(
message='[ERROR] get_query(): Error trying to preview a cohort',
code=400)
return query_info
def query_next_page(request, user):
query_info = {}
path_params = {
}
path_booleans = []
path_integers = []
local_params = {
"page_size": 1000
}
local_booleans = []
local_integers = ["page_size"]
jobReference = {}
next_page = ""
access_methods = ["url", "guid"]
try:
if 'next_page' in request.args and \
not request.args.get('next_page') in ["", None]:
# We have a non-empty next_page token
jobDescription = decrypt_pageToken(user, request.args.get('next_page'))
if jobDescription == {}:
query_info = dict(
message="Invalid next_page token {}".format(request.args.get('next_page')),
code=400
)
return query_info
else:
jobReference = jobDescription['jobReference']
next_page = jobDescription['next_page']
# If next_page is empty, then we timed out on the previous pass
if not next_page:
job_status = BigQuerySupport.wait_for_done(query_job={'jobReference':jobReference})
# Decide how to proceed depending on job status (DONE, RUNNING, ERRORS)
query_info = is_job_done(job_status, query_info, jobReference, user)
if "message" in query_info:
return query_info
query_info = dict(
cohort = {},
)
else:
query_info = dict(
message="Invalid next_page token {}".format(request.args.get('next_page')),
code=400
)
return query_info
# Validate "local" params on initial and next_page requests
query_info = validate_parameters(request, query_info, local_params, local_booleans, local_integers, None)
if "message" in query_info:
return query_info
query_info, next_page = get_query_job_results(query_info,
local_params['page_size'],
jobReference,
next_page)
if next_page:
cipher_pageToken = encrypt_pageToken(user, jobReference,
next_page)
else:
cipher_pageToken = ""
query_info['next_page'] = cipher_pageToken
except Exception as e:
logger.exception(e)
query_info = dict(
message='[ERROR] get_query(): Error trying to preview a cohort',
code=400)
return query_info
def perform_fixed_query(request, sql, user=None):
query_info = {}
path_params = {
}
path_booleans = []
path_integers = []
local_params = {
"page_size": 1000
}
local_booleans = []
local_integers = ["page_size"]
jobReference = {}
next_page = ""
access_methods = ["url", "guid"]
try:
# if 'next_page' in request.args and \
# not request.args.get('next_page') in ["", None]:
# # We have a non-empty next_page token
# jobDescription = decrypt_pageToken(user, request.args.get('next_page'))
# if jobDescription == {}:
# query_info = dict(
# message="Invalid next_page token {}".format(request.args.get('next_page')),
# code=400
# )
# return query_info
# else:
# jobReference = jobDescription['jobReference']
# next_page = jobDescription['next_page']
#
# # If next_page is empty, then we timed out on the previous pass
# if not next_page:
# job_status = BigQuerySupport.wait_for_done(query_job={'jobReference':jobReference})
#
# # Decide how to proceed depending on job status (DONE, RUNNING, ERRORS)
# query_info = is_job_done(job_status, query_info, jobReference, user)
# if "message" in query_info:
# return query_info
# query_info = dict(
# cohort = {},
# )
# else:
if True:
# Validate most params only on initial request; ignore on next_page requests
query_info = validate_keys(request, query_info, {**path_params, **local_params})
query_info = validate_parameters(request, query_info, path_params, path_booleans, path_integers, user)
if query_info != {}:
return query_info
if "message" in query_info:
return query_info
# Start the BQ job, but don't get any data results, just the job info.
job_status = submit_BQ_job(sql, [])
jobReference = job_status['jobReference']
# Decide how to proceed depending on job status (DONE, RUNNING, ERRORS)
query_info = is_job_done(job_status, query_info, jobReference, user)
if "message" in query_info:
# The job did not complete in time, or there was some other issue.
return query_info
# print(("[STATUS] query_info with job_ref: {}").format(query_info))
# Validate "local" params on initial and next_page requests
query_info = validate_parameters(request, query_info, local_params, local_booleans, local_integers, None)
if "message" in query_info:
return query_info
query_info, next_page = get_query_job_results(query_info,
local_params['page_size'],
jobReference,
next_page)
if next_page:
cipher_pageToken = encrypt_pageToken(user, jobReference,
next_page)
else:
cipher_pageToken = ""
query_info['next_page'] = cipher_pageToken
except Exception as e:
logger.exception(e)
query_info = dict(
message='[ERROR] get_query(): Error trying to preview a cohort',
code=400)
return query_info
def perform_fixed_query_next_page(request, user=None):
query_info = {}
path_params = {
}
path_booleans = []
path_integers = []
local_params = {
"page_size": 1000
}
local_booleans = []
local_integers = ["page_size"]
jobReference = {}
next_page = ""
access_methods = ["url", "guid"]
try:
if 'next_page' in request.args and \
not request.args.get('next_page') in ["", None]:
# We have a non-empty next_page token
jobDescription = decrypt_pageToken(user, request.args.get('next_page'))
if jobDescription == {}:
query_info = dict(
message="Invalid next_page token {}".format(request.args.get('next_page')),
code=400
)
return query_info
else:
jobReference = jobDescription['jobReference']
next_page = jobDescription['next_page']
# If next_page is empty, then we timed out on the previous pass
if not next_page:
job_status = BigQuerySupport.wait_for_done(query_job={'jobReference':jobReference})
# Decide how to proceed depending on job status (DONE, RUNNING, ERRORS)
query_info = is_job_done(job_status, query_info, jobReference, user)
if "message" in query_info:
return query_info
query_info = dict(
cohort = {},
)
else:
query_info = dict(
message="Invalid next_page token {}".format(request.args.get('next_page')),
code=400
)
return query_info
# Validate most params only on initial request; ignore on next_page requests
# print(("[STATUS] query_info with job_ref: {}").format(query_info))
# Validate "local" params on initial and next_page requests
query_info = validate_parameters(request, query_info, local_params, local_booleans, local_integers, None)
if "message" in query_info:
return query_info
query_info, next_page = get_query_job_results(query_info,
local_params['page_size'],
jobReference,
next_page)
if next_page:
cipher_pageToken = encrypt_pageToken(user, jobReference,
next_page)
else:
cipher_pageToken = ""
query_info['next_page'] = cipher_pageToken
except Exception as e:
logger.exception(e)
query_info = dict(
message='[ERROR] get_query(): Error trying to preview a cohort',
code=400)
return query_info
def is_job_done(job_is_done, query_info, jobReference, user):
if job_is_done and job_is_done['status']['state'] == 'DONE':
if 'status' in job_is_done and 'errors' in job_is_done['status']:
job_id = job_is_done['jobReference']['jobId']
logger.error("[ERROR] During query job {}: {}".format(job_id, str(
job_is_done['status']['errors'])))
logger.error("[ERROR] Error'd out query: {}".format(query_info['query']['sql_string']))
return dict(
message="[ERROR] During query job {}: {}".format(job_id, str(
job_is_done['status']['errors'])),
code=500)
else:
# Don't return the query in this form
query_info.pop('query', None)
else:
# We timed out waiting for the BQ job to complete.
# Return the job ref so that the user can get the results when the job completes.
# Don't return the query in this form
query_info.pop('query', None)
logger.error("[ERROR] API query took longer than the allowed time to execute. " +
"Retry the query using the next_page token.")
cipher_pageToken = encrypt_pageToken(user, jobReference, "")
query_info['next_page'] = cipher_pageToken
query_info["cohortObjects"] = {
"totalFound": 0,
"rowsReturned": 0,
"collections": [],
}
return dict(
message="[ERROR] API query took longer than the allowed time to execute. " +
"Retry the query using the next_page token.",
query_info=query_info,
code=202)
return query_info
# Check if there are invalid keys
def validate_keys(request, query_info, params):
blacklist = re.compile(BLACKLIST_RE, re.UNICODE)
for key in request.args.keys():
match = blacklist.search(str(key))
if match:
query_info = dict(
message = "Key {} contains invalid characters; please edit and resubmit. " +
"[Saw {}]".format(str(key, match)),
code = 400
)
if not key in params:
query_info = dict(
message="Invalid key {}".format(key),
code=400
)
return query_info
def validate_parameters(request, query_info, params, booleans, integers, user):
try:
for param in integers:
params[param] = int(params[param])
except ValueError:
query_info = dict(
message = "Parameter {} must have an integer value".format(param),
code = 400
)
if user:
params["email"] = user
for key in params:
if key in request.args:
params[key] = request.args.get(key)
for param in booleans:
params[param] = params[param] in [True, 'True']
return query_info
def get_query_job_results(query_info, maxResults, jobReference, next_page):
results = BigQuerySupport.get_job_result_page(job_ref=jobReference,
page_token=next_page,
maxResults=maxResults)
schema_names = [field['name'] for field in results['schema']['fields']]
query_info["query_results"] = dict(
totalFound = int(results['totalFound']),
rowsReturned = len(results["current_page_rows"])
)
rows = form_rows_json(results['current_page_rows'], schema_names)
query_info["query_results"]['json'] = rows
# rowsReturned = len(results["current_page_rows"])
return query_info, results['next_page']
def form_rows_json(data, schema_names):
rows = []
for row in data:
# row_vals = [ val['v'] for val in row['f']]
row_vals = [unpack(val) for val in row['f']]
rows.append(dict(zip(schema_names,row_vals)))
return rows
def unpack(val):
if not type(val['v']) == list:
return val['v']
else:
return [subval['v'] for subval in val['v']]
|
from app import db
from flask import Blueprint
from flask import render_template
from werkzeug.exceptions import BadGateway
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import InternalServerError
from werkzeug.exceptions import MethodNotAllowed
from werkzeug.exceptions import NotFound
from werkzeug.exceptions import RequestTimeout
from werkzeug.exceptions import ServiceUnavailable
bp = Blueprint("error", __name__)
@bp.app_errorhandler(NotFound)
@bp.app_errorhandler(BadRequest)
@bp.app_errorhandler(BadGateway)
@bp.app_errorhandler(RequestTimeout)
@bp.app_errorhandler(MethodNotAllowed)
@bp.app_errorhandler(ServiceUnavailable)
@bp.app_errorhandler(InternalServerError)
def handle_exception(e):
"""generic error page"""
if e.code == 500:
db.session.rollback()
return render_template("error/generic.html", e=e), e.code
|
from .constraints import *
from .variables import *
from .relaxation import *
from .utils import * |
from builtins import ValueError, isinstance
import json
import math
import os
from os.path import exists, join
from time import time, sleep
import ipdb
import torch
from torch.nn import functional as F
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.videocaption_eval import decode_sequence
from horovod import torch as hvd
import numpy as np
from PIL import Image
# import misc.utils as utils
# from utils.aic_evaler import AICEvaler
# from utils.coco_evaler import COCOEvaler
def validate(model, val_dataloaders, opts, global_step):
#ipdb.set_trace()
model.eval()
for task, loader in val_dataloaders.items():
LOGGER.info(f"validate on {task} task")
assert 'Two' in task or 'Three' in task
if 'Two' in task:
val_log = validate_2m(model, loader, task.split('--')[0])
else:
val_log = validate_3m(model, loader, task.split('--')[0])
val_log = {f'valid_{task}/{k}': v for k, v in val_log.items()}
TB_LOGGER.log_scaler_dict(val_log)
model.train()
@torch.no_grad()
def validate_2m(model, val_loader, task):
LOGGER.info("start running {} validation...".format(task))
n_correct = 0
n_word = 0
n_correct_caption = 0
n_word_caption = 0
tot_score = 0
n_ex = 0
txt_feature = []
video_feature = []
val_log = {}
for i, batch in enumerate(val_loader):
evaluation_dict= model(batch, task=task, compute_loss=False)
if 'contraTwo' in task.split('_'):
normalized_txt = evaluation_dict['normalized_txt']
normalized_video = evaluation_dict['normalized_video']
txt_feature.append(normalized_txt)
video_feature.append(normalized_video)
if 'mlmTwo' in task.split('_'):
prediction_scores = evaluation_dict['prediction_scores']
txt_labels = evaluation_dict['txt_labels']
txt_labels = txt_labels[txt_labels != -1]
n_correct += (prediction_scores.max(dim=-1)[1] == txt_labels).sum().item()
n_word += txt_labels.numel()
if 'unimlmTwo' in task.split('_'):
prediction_scores_caption = evaluation_dict['prediction_scores_caption']
txt_labels_caption = evaluation_dict['txt_labels_caption']
txt_labels_caption = txt_labels_caption[txt_labels_caption != -1]
n_correct_caption += (prediction_scores_caption.max(dim=-1)[1] == txt_labels_caption).sum().item()
n_word_caption += txt_labels_caption.numel()
if 'matchTwo' in task.split('_'):
vtm_scores = evaluation_dict['vtm_scores']
ground_truth = evaluation_dict['ground_truth']
predictions = vtm_scores.max(dim = 1 )[1]
tot_score += (predictions.cpu().numpy() == ground_truth.cpu().numpy()).sum()
n_ex += len(ground_truth)
if 'mlmTwo' in task.split('_'):
n_correct = sum(all_gather_list(n_correct))
n_word = sum(all_gather_list(n_word))
mlm_acc = n_correct / n_word
val_log['mlm_acc'] = mlm_acc
if 'unimlmTwo' in task.split('_'):
n_correct_caption = sum(all_gather_list(n_correct_caption))
n_word_caption = sum(all_gather_list(n_word_caption))
unimlm_acc = n_correct_caption / n_word_caption
val_log['unimlm_acc'] = unimlm_acc
if 'matchTwo' in task.split('_'):
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
match_acc = tot_score / n_ex
val_log['match_acc'] = match_acc
if 'contraTwo' in task.split('_'):
txt_feature = torch.cat(txt_feature, dim = 0)
video_feature = torch.cat(video_feature, dim = 0)
all_txt_feature = hvd.allgather(txt_feature)
all_video_feature = hvd.allgather(video_feature)
score_matrix_tv = torch.matmul(all_txt_feature, all_video_feature.permute(1,0))
t2v_r1, v2t_r1 = compute_r1(score_matrix_tv)
val_log['t2v_r1'] = t2v_r1*100
#val_log['v2t_r1'] = v2t_r1*100
LOGGER.info(val_log)
return val_log
@torch.no_grad()
def validate_3m(model, val_loader, task):
LOGGER.info("start running {} validation...".format(task))
n_correct = 0
n_correct_woaudio = 0
n_word = 0
n_correct_caption = 0
n_word_caption = 0
n_correct_caption_woaudio = 0
n_word_caption_woaudio = 0
txt_feature = []
video_feature = []
va_feature = []
val_log = {}
for i, batch in enumerate(val_loader):
evaluation_dict= model(batch, task=task, compute_loss=False)
if 'contraThree' in task.split('_'):
feat_t = evaluation_dict['normalized_txt']
feat_v = evaluation_dict['normalized_video']
feat_va = evaluation_dict['normalized_va']
txt_feature.append(feat_t)
video_feature.append(feat_v)
va_feature.append(feat_va)
if 'mlmThree' in task.split('_'):
prediction_scores = evaluation_dict['prediction_scores']
txt_labels = evaluation_dict['txt_labels']
prediction_scores_woaudio = evaluation_dict.get('prediction_scores_woaudio', prediction_scores)
txt_labels = txt_labels[txt_labels != -1]
n_correct += (prediction_scores.max(dim=-1)[1] == txt_labels).sum().item()
n_correct_woaudio += (prediction_scores_woaudio.max(dim=-1)[1] == txt_labels).sum().item()
n_word += txt_labels.numel()
if 'unimlmThree' in task.split('_'):
prediction_scores_caption = evaluation_dict['prediction_scores_caption']
txt_labels_caption = evaluation_dict['txt_labels_caption']
prediction_scores_caption_woaudio = evaluation_dict.get('prediction_scores_caption_two', prediction_scores_caption)
txt_labels_caption_woaudio = evaluation_dict.get('txt_labels_caption_two',txt_labels_caption)
txt_labels_caption = txt_labels_caption[txt_labels_caption != -1]
txt_labels_caption_woaudio = txt_labels_caption_woaudio[txt_labels_caption_woaudio != -1]
n_correct_caption += (prediction_scores_caption.max(dim=-1)[1] == txt_labels_caption).sum().item()
n_word_caption += txt_labels_caption.numel()
n_correct_caption_woaudio += (prediction_scores_caption_woaudio.max(dim=-1)[1] == txt_labels_caption_woaudio).sum().item()
n_word_caption_woaudio += txt_labels_caption_woaudio.numel()
if 'mlmThree' in task.split('_'):
n_correct = sum(all_gather_list(n_correct))
n_correct_woaudio = sum(all_gather_list(n_correct_woaudio))
n_word = sum(all_gather_list(n_word))
mlm_acc = n_correct / n_word
mlm_acc_woaudio = n_correct_woaudio / n_word
val_log['mlm_acc'] = mlm_acc
val_log['mlm_acc_woaudio'] = mlm_acc_woaudio
if 'unimlmThree' in task.split('_'):
n_correct_caption = sum(all_gather_list(n_correct_caption))
n_word_caption = sum(all_gather_list(n_word_caption))
unimlm_acc = n_correct_caption / n_word_caption
n_correct_caption_woaudio = sum(all_gather_list(n_correct_caption_woaudio))
n_word_caption_woaudio = sum(all_gather_list(n_word_caption_woaudio))
unimlm_acc_woaudio = n_correct_caption_woaudio / n_word_caption_woaudio
val_log['unimlm_acc'] = unimlm_acc
val_log['unimlm_acc_woaudio'] = unimlm_acc_woaudio
if 'contraThree' in task.split('_'):
txt_feature = torch.cat(txt_feature, dim = 0)
video_feature = torch.cat(video_feature, dim = 0)
va_feature = torch.cat(va_feature, dim = 0)
all_txt_feature = hvd.allgather(txt_feature)
all_video_feature = hvd.allgather(video_feature)
all_va_feature = hvd.allgather(va_feature)
score_matrix_tv = torch.matmul(all_txt_feature, all_video_feature.permute(1,0))
score_matrix_t_va = torch.matmul(all_txt_feature, all_va_feature.permute(1,0))
t2v_r1, v2t_r1 = compute_r1(score_matrix_tv)
t2va_r1, va2t_r1 = compute_r1(score_matrix_t_va)
val_log['t2v_r1'] = t2v_r1*100
val_log['t2va_r1'] = t2va_r1*100
#val_log['v2t_r1'] = v2t_r1*100
LOGGER.info(val_log)
return val_log
def compute_r1(score_matrix):
# video retrieval
size = len(score_matrix)
_, rank_txt = score_matrix.topk(size, dim=1)
gt_video = torch.arange(size).long().to(rank_txt.device).unsqueeze(1).expand_as(rank_txt)
rank = (rank_txt == gt_video).nonzero()[:,1]
vr_r1 = (rank < 1).sum().item() / size
vr_r5 = (rank < 5).sum().item() / size
vr_r10 = (rank < 10).sum().item() / size
v_medianR = torch.median(rank) +1
# text retrieval
_, rank_video = score_matrix.topk(size, dim=0)
gt_video = torch.arange(size).long().to(rank_txt.device).unsqueeze(0).expand_as(rank_video)
rank = (rank_video == gt_video).nonzero()[:,0]
tr_r1 = (rank < 1).sum().item() / size
tr_r5 = (rank < 5).sum().item() / size
tr_r10 = (rank < 10).sum().item() / size
t_medianR = torch.median(rank) +1
return vr_r1, tr_r1
|
import eventlet
import socketio
import os
import sys
import sshtunnel
import time
sio = socketio.Server(async_mode='eventlet')
app = socketio.Middleware(sio)
#temp
import apps.streetdownloader.server as srv
srv_handle=srv.handler()
node_thread_number =4
if __name__ == '__main__':
port = 30021
if len(sys.argv) >= 2:
port = int(sys.argv[1])
print("starting at local port {0}...".format(port))
sio.register_namespace(srv_handle('/task'))
eventlet.wsgi.server(eventlet.listen(('', port)), app)
|
import asyncio
import urllib.parse as urlparse
import discord
import aiohttp
import lxml.html
import lxml.etree
from discord.ext import commands
import avaconfig as cfg
from .common import Cog
class AvaRSS(Cog):
"""Updates users when new Ava's Demon pages are released"""
def __init__(self, bot):
super().__init__(bot)
self.file_handle = open("latest_page.txt", "r+")
self.last_known_page = int(self.file_handle.read()) or 0
self.reencode_parser = lxml.etree.XMLParser(encoding="utf-8")
self.check_loop = None
self.ready = False
if self.bot.is_ready():
self.on_ready()
async def on_ready(self):
if self.ready:
return self.bot.logger.debug(
"Bot already ready, not initialising loop again...")
self.bot.logger.info("Bot ready!")
async def run_check():
while True:
self.bot.logger.info("Checking RSS automatically...")
await self.check_rss()
await asyncio.sleep(5 * 60) # Check RSS every 5 min
self.check_loop = self.bot.loop.create_task(run_check())
async def check_rss(self):
self.bot.logger.info("Downloading RSS feed")
async with self.bot.session.get("http://feeds.feedburner.com/AvasDemon?format=xml") as resp:
original_text = await resp.text()
if resp.status == 200:
self.bot.logger.info("Downloaded RSS feed successfully!")
# Ugh, we have to re-encode it because they have an encoding
# declaration... See:
# http://lxml.de/parsing.html#python-unicode-strings
text_reencoded = original_text.encode("utf-8")
parsed = lxml.etree.fromstring(
text_reencoded, parser=self.reencode_parser)
links = parsed.cssselect("rss channel item link")
# This looks confusing, but what we do here is like map() but
# that might be removed at some point?
pages = [{
"number": self.parse_number(page_link.text),
"link": page_link.text
} for page_link in links]
if pages[0]["number"] == self.last_known_page:
self.bot.logger.info("No new pages")
else:
self.bot.logger.info("Found a new page!")
new_pages = [
page for page in pages if page["number"] > self.last_known_page]
await self.announce_pages(new_pages[-1], new_pages[0])
self.last_known_page = pages[0]["number"]
# Write back our stuff
self.file_handle.seek(0)
self.file_handle.write(str(self.last_known_page))
self.file_handle.truncate()
else:
raise RSSException(original_text)
async def announce_pages(self, oldest_page, newest_page):
"""Alerts the users of a new page!"""
newest_page_n = newest_page["number"]
oldest_page_n = oldest_page["number"]
oldest_page_link = oldest_page["link"]
for guild in self.bot.guilds:
try:
self.bot.logger.debug(f"Announcing new page in {guild.name}")
guild_config = await self.find_guild_config(guild)
self.bot.logger.debug(f"Got guild config for {guild.name}")
channel = guild.get_channel(guild_config["channel_id"])
new_page_role = discord.utils.get(guild.roles, id=guild_config.get(
"role_id")) if guild_config.get("role_id") else None
self.bot.logger.debug(f"Got past role part for {guild.name}")
if self.bot.prod and new_page_role:
await new_page_role.edit(mentionable=True,
reason="New page!")
elif new_page_role:
await new_page_role.edit(mentionable=False,
reason="Local bot, new page without ping")
role_mention_str = new_page_role.mention if new_page_role else ""
await channel.send(f"{role_mention_str} More Ava's demon pages!!\n"
f"Pages {oldest_page_n}-{newest_page_n} were just released"
f"({newest_page_n - oldest_page_n} pages)!\n"
f"View: {oldest_page_link}")
if self.bot.prod and new_page_role:
await new_page_role.edit(mentionable=False,
reason="New page!")
except discord.DiscordException as err:
self.bot.logger.warning(
f"Discord threw an error when we announced in {guild.name}: {err}")
@commands.group()
@commands.guild_only()
async def settings(self, ctx):
"""Change settings
Change settings like where updates are posted and the role it pings (if any)
"""
pass
@settings.command()
@commands.has_permissions(manage_roles=True)
async def role(self, ctx, new_role: commands.RoleConverter = None):
"""Set role to ping on updates (can be assigned with "subscribe" command)"""
await self.update_guild_config(ctx.guild, {
"role_id": new_role.id if new_role else None
})
role_str = f"`{new_role.name}`" if new_role else "nobody"
return await ctx.send(f"Done! I will now ping {role_str} whenever there's an update!")
@settings.command()
@commands.has_permissions(manage_channels=True)
async def channel(self, ctx, new_channel: commands.TextChannelConverter):
"""Set channel to send updates to"""
if not new_channel:
raise MissingRequiredArgument("new_channel")
await self.update_guild_config(ctx.guild, {
"channel_id": new_channel.id
})
return await ctx.send(f"Done! I will now post in {new_channel.name} whenever there's an update!")
async def update_guild_config(self, guild, new_config):
res = await self.bot.r.table("guilds").get(str(guild.id)).run()
if not res:
res = {}
return await self.bot.r.table("guilds").insert({
"id": str(guild.id),
"channel_id": str(new_config.get("channel_id") or res.get("channel_id")) if new_config.get("channel_id") or res.get("channel_id") else None,
"role_id": str(new_config.get("role_id") or res.get("role_id")) if new_config.get("role_id") or res.get("role_id") else None
}, conflict="update").run()
async def find_guild_config(self, guild):
self.bot.logger.debug(f"Finding guild config for {guild}")
res = await self.bot.r.table("guilds").get(str(guild.id)).run()
if not res:
self.bot.logger.debug(
f"No guild config for {guild.name}, generating and saving!")
res = {
"id": str(guild.id),
"channel_id": 0,
"role_id": 0
}
await self.bot.r.table("guilds").insert(res).run()
self.bot.logger.debug(f"Inserted guild config for {guild.name}")
if not res.get("channel_id") or not guild.get_channel(
int(res["channel_id"])):
self.bot.logger.debug(f"No channel for {guild.name}!")
# This insanity chooses the top guild (based on position) we have
# permission to send messages in
res["channel_id"] = sorted(
[
chan for chan in guild.channels if isinstance(
chan,
discord.abc.Messageable) and chan.permissions_for(
guild.me).send_messages],
key=lambda channel: channel.position)[0].id
if res.get("role_id") and not discord.utils.get(
guild.roles, id=int(res["role_id"] or 0)):
self.bot.logger.debug(f"No role for {guild.name}!")
res["role_id"] = None
self.bot.logger.debug(f"Response created for {guild.name}")
return {
"id": guild.id,
"channel_id": int(res.get("channel_id")),
"role_id": int(res.get("role_id")) if res.get("role_id") else None
}
@commands.command()
@commands.is_owner()
async def force_recheck(self, ctx):
await self.check_rss()
await ctx.send("Done")
@commands.command(aliases=["unsubscribe", "unsub", "sub"])
async def subscribe(self, ctx):
"""Subscribes/Unsubscribes from page updates"""
channel = self.bot.get_channel(cfg.alert_channel)
guild_config = await self.find_guild_config(ctx.guild)
if not guild_config.get("role_id"):
return await ctx.send(f"No role is configured for this server! Use `{ctx.prefix}settings role` to set one!")
new_page_role = discord.utils.get(
channel.guild.roles, id=guild_config["role_id"])
if new_page_role not in ctx.author.roles:
await ctx.author.add_roles(new_page_role, reason="Subscribed to page updates", atomic=True)
subscribed = True
else:
await ctx.author.remove_roles(new_page_role, reason="Unsubscribed from page updates", atomic=True)
subscribed = False
action_message = "Subscribed to" if subscribed else "Unsubscribed from"
return await ctx.send(f"{action_message} page updates!")
def parse_number(self, link):
parsed_link = urlparse.urlparse(link)
page_number = urlparse.parse_qs(parsed_link.query)["page"][0]
return int(page_number)
def __unload(self):
self.check_loop.cancel()
self.file_handle.seek(0)
self.file_handle.write(str(self.last_known_page))
self.file_handle.truncate()
self.file_handle.close()
def setup(bot):
bot.add_cog(AvaRSS(bot))
|
from jumper.entity import Entity
from jumper.game_helper import show_text
from jumper.timer import parse_time
WHITE = (255, 255, 255)
YELLOW = (255, 255, 0)
class StageInfo(Entity):
def __init__(self, counter):
self.counter = counter
def render(self, screen):
bound = screen.get_size()
width = bound[0]
height = bound[1]
show_text(screen,
'LEVEL: {}'.format(self.counter.get_level()),
WHITE,
30,
(5, 5))
show_text(screen,
parse_time(self.counter.get_time()),
WHITE,
30,
(width / 2, 5),
align_hor='center')
|
#Software para gestion
#@author Tomas Vargas
import os
print('## Facturacion ##\n====================\n')
name=input('Nombre: ')
Ci=input('CI: ')
fecha=input('Fecha: ')
print('====================')
print('## Compra ##')
prod=[]
op='y'
while(op=='y'):
print('====================')
p=[]
p.append(input('Id del producto: '))
p.append(input('Nombre de producto: '))
p.append(input('Cantidad del producto: '))
p.append(input('Precio unitario: '))
prod.append(p)
op=input('Desea agregar otro producto a la fctura? (y/n): ').lower()
opf=input('¿Desea imprimir la factura? (y/n): ')
os.system("cls")
print('====================\n## FACTURA ##\n====================\n')
print('Nombre: ', name, '\nCI: ', Ci, '\nFecha: ', fecha, '\n')
sum=0
for i in prod:
print('\n====================')
print('Id: ', i[0], '\nNombre de producto: ', i[1], '\nCantidad: ', i[2], '\nPrecio unitario: ', i[3])
sum+=float(i[3])*float(i[2])
print('\n====================')
print('>> Total a pagar: ', sum)
|
import numpy as np
from keras_pretrained_models.imagenet_utils import preprocess_input
from keras.models import Model
from keras.preprocessing import image
from keras_pretrained_models.vgg19 import VGG19
base_model = VGG19(weights='imagenet')
model = Model(input=base_model.input, output=base_model.get_layer('fc2').output)
img_path = '../test_image.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
print (x.shape)
print (np.max(x))
x = np.expand_dims(x, axis=0)
print (x.shape)
x = preprocess_input(x)
print (x.shape)
print (np.max(x))
fc2_features = model.predict(x)
print (fc2_features.shape)
for i in range(4096):
print (fc2_features[:,i]) |
import datetime
import logging
import os
import sys
import time
from collections import namedtuple
import click
from dateutil.parser import parse
import requests
from mutualfunds import DailyNAVPS
LOGFILE = 'runtime.log'
TIMEOUT = 5
OUTDIR = 'Reports'
def stringify_date(date):
"""Convert datetime object into human-readable string"""
return date.strftime('%Y-%m-%d')
def save_report(report):
"""Create the directory structure where output is saved"""
rdate = report.date
fname = 'mf-navps-report-{:%Y-%m-%d}.csv'.format(rdate)
fpath = '{},{:%Y,%m-%B}'.format(OUTDIR, rdate).split(',')
spath = os.path.join(*fpath)
if not os.path.exists(spath):
os.makedirs(spath, exist_ok=True)
report.to_csv(os.path.join(spath, fname))
def set_date_range(start, end):
"""Handle the date parameters properly"""
DateRange = namedtuple('DateRange', 'start end')
dates = None
if end:
end = parse(end)
start = parse(start) if start else end
dates = DateRange(start=start, end=end)
else:
if start:
start = parse(start)
end = datetime.datetime.now()
dates = DateRange(start=start, end=end)
return dates
@click.command()
@click.option('-s', '--start',
help='Starting date (not earlier than Dec. 20, 2004)')
@click.option('-e', '--end',
help='Ending date (up to most recent trading day)')
def run(start, end):
logging.basicConfig(format='%(asctime)s %(message)s',
filename=LOGFILE,
level=logging.INFO)
logging.info('[INFO] Started')
click.echo('Initializing')
dates = set_date_range(start, end)
if dates is None:
msg = 'No dates specified. Exited'
click.echo(msg)
logging.info('[INFO] {}'.format(msg))
sys.exit(1)
curr_date = dates.start
from_str = stringify_date(curr_date)
to_str = stringify_date(dates.end)
msg = 'Set range from {} to {}'.format(from_str, to_str)
click.echo(msg)
logging.info('[INFO] {}'.format(msg))
one_day = datetime.timedelta(1)
with requests.Session() as session:
logging.info('Session established.')
click.echo('Session started')
while curr_date <= dates.end:
if curr_date.weekday() <= 4: # weekdays only
click.echo('Processing report {}'.format(from_str),
nl=False)
try:
report = DailyNAVPS(
session=session,
date=curr_date
)
except Exception as e:
click.echo(' [Error: {}]'.format(e))
logging.error('[ERROR] {} at {}'.format(e, from_str))
else:
if not report.open:
click.echo(' [No data. Date skipped]')
logging.info(
'[INFO] Report {} skipped'.format(from_str)
)
else:
if not report.data:
click.echo(' [Unable to obtain data]')
logging.warning(
'[WARNING] {} is empty'.format(from_str)
)
else:
save_report(report)
click.echo(' [Saved output file]')
finally:
time.sleep(TIMEOUT)
curr_date += one_day
from_str = stringify_date(curr_date)
click.echo('Done')
logging.info('[INFO] Finished')
|
# This file is part of the Indico plugins.
# Copyright (C) 2002 - 2021 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
import re
from flask_multipass import IdentityRetrievalFailed
from indico.core.auth import multipass
from indico.modules.auth import Identity
from indico.modules.users import User
authenticators_re = re.compile(r'\s*,\s*')
def iter_user_identities(user):
"""Iterates over all existing user identities that can be used with Vidyo"""
from indico_vc_vidyo.plugin import VidyoPlugin
providers = authenticators_re.split(VidyoPlugin.settings.get('authenticators'))
done = set()
for provider in providers:
for _, identifier in user.iter_identifiers(check_providers=True, providers={provider}):
if identifier in done:
continue
done.add(identifier)
yield identifier
def get_user_from_identifier(settings, identifier):
"""Get an actual User object from an identifier"""
providers = list(auth.strip() for auth in settings.get('authenticators').split(','))
identities = Identity.query.filter(Identity.provider.in_(providers), Identity.identifier == identifier).all()
if identities:
return sorted(identities, key=lambda x: providers.index(x.provider))[0].user
for provider in providers:
try:
identity_info = multipass.get_identity(provider, identifier)
except IdentityRetrievalFailed:
continue
if identity_info is None:
continue
if not identity_info.provider.settings.get('trusted_email'):
continue
emails = {email.lower() for email in identity_info.data.getlist('email') if email}
if not emails:
continue
user = User.query.filter(~User.is_deleted, User.all_emails.in_(list(emails))).first()
if user:
return user
def iter_extensions(prefix, event_id):
"""Return extension (prefix + event_id) with an optional suffix which is
incremented step by step in case of collision
"""
extension = f'{prefix}{event_id}'
yield extension
suffix = 1
while True:
yield f'{extension}{suffix}'
suffix += 1
def update_room_from_obj(settings, vc_room, room_obj):
"""Updates a VCRoom DB object using a SOAP room object returned by the API"""
vc_room.name = room_obj.name
if room_obj.ownerName != vc_room.data['owner_identity']:
owner = get_user_from_identifier(settings, room_obj.ownerName) or User.get_system_user()
vc_room.vidyo_extension.owned_by_user = owner
vc_room.data.update({
'description': room_obj.description,
'vidyo_id': str(room_obj.roomID),
'url': room_obj.RoomMode.roomURL,
'owner_identity': room_obj.ownerName,
'room_pin': room_obj.RoomMode.roomPIN if room_obj.RoomMode.hasPIN else "",
'moderation_pin': room_obj.RoomMode.moderatorPIN if room_obj.RoomMode.hasModeratorPIN else "",
})
vc_room.vidyo_extension.extension = int(room_obj.extension)
def retrieve_principal(principal):
from indico.modules.users import User
type_, id_ = principal
if type_ in {'Avatar', 'User'}:
return User.get(int(id_))
else:
raise ValueError(f'Unexpected type: {type_}')
|
"""Translation constants."""
import pathlib
PROJECT_ID = "130246255a974bd3b5e8a1.51616605"
DOCKER_IMAGE = "b8329d20280263cad04f65b843e54b9e8e6909a348a678eac959550b5ef5c75f"
INTEGRATIONS_DIR = pathlib.Path("homeassistant/components")
|
import random
from collections import deque, namedtuple, Iterable
import numpy as np
import torch
from serpentrain.reinforcement_learning.memory.replay_buffer import ReplayBuffer
Experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
class SimpleReplayBuffer(ReplayBuffer):
"""Fixed-size buffer to store experience tuples."""
def __init__(self, buffer_size, batch_size):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = Experience(np.expand_dims(state, 0), action, reward, np.expand_dims(next_state, 0), done)
self.memory.append(e)
# Since maxlen was set, when the deque is full and a new item is added, the oldest items are discarded
def sample(self, batch_size=None):
"""Randomly sample a batch of experiences from memory."""
if batch_size is None:
batch_size = self.batch_size
experiences = random.sample(self.memory, k=batch_size)
states = torch.from_numpy(self.__v_stack_impr([e.state for e in experiences if e is not None])) \
.float()
actions = torch.from_numpy(self.__v_stack_impr([e.action for e in experiences if e is not None])) \
.long()
rewards = torch.from_numpy(self.__v_stack_impr([e.reward for e in experiences if e is not None])) \
.float()
next_states = torch.from_numpy(self.__v_stack_impr([e.next_state for e in experiences if e is not None])) \
.float()
dones = torch.from_numpy(self.__v_stack_impr([e.done for e in experiences if e is not None]).astype(np.uint8)) \
.float()
return states, actions, rewards, next_states, dones
def __v_stack_impr(self, states):
sub_dim = len(states[0][0]) if isinstance(states[0], Iterable) else 1
np_states = np.reshape(np.array(states), (len(states), sub_dim))
return np_states
|
from __future__ import annotations
from django.db import transaction
from django.http import HttpRequest
from request_token.models import RequestToken, RequestTokenLog
from request_token.settings import DISABLE_LOGS
def parse_xff(header_value: str) -> str | None:
"""
Parse out the X-Forwarded-For request header.
This handles the bug that blows up when multiple IP addresses are
specified in the header. The docs state that the header contains
"The originating IP address", but in reality it contains a list
of all the intermediate addresses. The first item is the original
client, and then any intermediate proxy IPs. We want the original.
Returns the first IP in the list, else None.
"""
try:
return header_value.split(",")[0].strip()
except (KeyError, AttributeError):
return None
def request_meta(request: HttpRequest) -> dict:
"""Extract values from request to be added to log object."""
user = None if request.user.is_anonymous else request.user
xff = parse_xff(request.META.get("HTTP_X_FORWARDED_FOR"))
remote_addr = request.META.get("REMOTE_ADDR", None)
user_agent = request.META.get("HTTP_USER_AGENT", "unknown")
return {"user": user, "client_ip": xff or remote_addr, "user_agent": user_agent}
@transaction.atomic
def log_token_use(
token: RequestToken, request: HttpRequest, status_code: int
) -> RequestTokenLog | None:
token.increment_used_count()
if DISABLE_LOGS:
return None
return RequestTokenLog.objects.create(
token=token, status_code=status_code, **request_meta(request)
)
|
from .Instrumentable import Instrumentable
from .Instrumentation import Instrumentation
from .Measurement import Measurement
from .Configuration import Configuration
from .ActivationRate import ActivationRate
from .Error import Error
from .ClassificationError import ClassificationError
from .Parameters import Parameters
from .TrainingLoss import TrainingLoss
from .Timestamp import Timestamp
from .Function import Function
|
from __future__ import print_function
import json
import sys
import traceback
import warnings
from functools import partial
from importlib import import_module
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
from django.test.utils import setup_test_environment
try:
# Django 1.4
from django.test.simple import dependency_ordered
except ImportError:
try:
# Django 1.5 - 1.10
from django.test.runner import dependency_ordered
except ImportError:
# Django 1.11+
from django.test.utils import dependency_ordered
from ... import b64pickle, errors
from ...utils import redirect_stdout
def use_test_databases():
"""
Adapted from DjangoTestSuiteRunner.setup_databases
"""
# First pass -- work out which databases connections need to be switched
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
for alias in connections:
connection = connections[alias]
test_mirror = connection.settings_dict.get('TEST_MIRROR')
if test_mirror:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = test_mirror
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# they will have the same test db name.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], [])
)
item[1].append(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = (
connection.settings_dict['TEST_DEPENDENCIES'])
else:
if alias != DEFAULT_DB_ALIAS:
dependencies[alias] = connection.settings_dict.get(
'TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- switch the databases to use test db settings.
for signature, (db_name, aliases) in dependency_ordered(
test_databases.items(), dependencies):
# get test db name from the first connection
connection = connections[aliases[0]]
for alias in aliases:
connection = connections[alias]
test_db_name = connection.creation._get_test_db_name()
# NOTE: if using sqlite for tests, be sure to specify a
# TEST_NAME / TEST:NAME with a real filename to avoid using
# in-memory db
if test_db_name == ':memory:':
# Django converts all sqlite test dbs to :memory: ...but
# they can't be shared between concurrent processes...
# in this case it also means our parent test run used an
# in-memory db that we can't share
warnings.warn(
"In-memory databases can't be shared between concurrent "
"test processes. "
"{parent} -> {test}".format(parent=db_name, test=test_db_name)
)
# we are running late in Django life-cycle so it has already
# opened connections to default db, need to close and re-open
# against test db:
connection.close()
connection.settings_dict['NAME'] = test_db_name
connection.cursor()
for alias, mirror_alias in mirrored_aliases.items():
# we are running late in Django life-cycle so it has already
# opened connections to default db, need to close and re-open
# against test mirror db:
connection = connections[alias]
connection.close()
connection.settings_dict['NAME'] = (
connections[mirror_alias].settings_dict['NAME'])
connection.features = connections[mirror_alias].features
connection.cursor()
def close_db_connections():
for alias in connections:
connection = connections[alias]
connection.close()
class Command(BaseCommand):
"""
The goal of this command is to allow us to do actual concurrent requests
in a test case.
It seems kind of cumbersome to run our function via a manage.py command.
It would be nicer to just use multiprocessing and call our function that
way. However, multiprocessing under Python 2 on Unix always uses os.fork
...and the forked processes inherit sockets, such as postgres db, but in a
broken state. I didn't find a way to successfully fork a Django process
and no-one on SO did either.
So the idea is for the parent test case to set up concurrent calls to this
command via subprocess (e.g. via multiprocessing.Pool)
You don't need to use this command directly, see `django_concurrent_tests.helpers`
for helper functions that you'd use in your unit tests.
"""
if hasattr(BaseCommand, 'option_list'):
# Django < 1.10
option_list = BaseCommand.option_list + (
make_option(
'-k', '--kwargs',
help='kwargs to request client method call (serialized to ascii)',
),
make_option(
'-s', '--serializer',
help='Serialization format',
type='choice', choices=('b64pickle', 'json'),
default='b64pickle',
# json is included to have a hand-editable option, which may be
# useful if running this command directly (dev use only)
),
make_option(
'-t', '--no-test-db',
help="Don't patch connection to use test db",
action='store_true',
),
# (dev use only) if running this command directly, option to use the
# default dbs created via syncdb instead of dbs from parent test run
)
help = "We use nosetests path format - path.to.module:function_name"
def add_arguments(self, parser):
# Django >= 1.10
parser.add_argument(
'--kwargs', '-k',
help='kwargs to request client method call (serialized to ascii)',
)
parser.add_argument(
'-s', '--serializer',
help='Serialization format',
choices=('b64pickle', 'json'),
default='b64pickle',
# json is included to have a hand-editable option, which may be
# useful if running this command directly (dev use only)
)
parser.add_argument(
'-t', '--no-test-db',
help="Don't patch connection to use test db",
action='store_true',
)
parser.add_argument(
'funcpath',
help='path.to.module:function_name'
)
def handle(self, *args, **kwargs):
serializer_name = kwargs['serializer']
if serializer_name == 'json':
serialize = partial(json.dumps, ensure_ascii=True)
deserialize = json.loads
else:
# default
serialize = b64pickle.dumps
deserialize = b64pickle.loads
try:
# Django >= 1.10
func_path = kwargs['funcpath']
except KeyError:
func_path = args[0]
# redirect any printing that may occur from stdout->stderr
# so as not to pollute our stdout output (we serialize the
# return value of func and print to stdout for capture in
# parent process)
with redirect_stdout(sys.stderr):
try:
if not func_path:
raise CommandError(
'Must supply an import path to function to execute')
if serializer_name not in ('json', 'b64pickle'):
raise CommandError(
'Invalid --serializer name')
module_name, function_name = func_path.split(':')
module = import_module(module_name)
try:
f = getattr(module, function_name)
except AttributeError:
print(
"Could not import '{module}.{func}', you may need to use "
"https://github.com/depop/django-concurrent-test-helper/#string-import-paths"
.format(module=module_name, func=function_name)
)
raise
f_kwargs = deserialize(kwargs['kwargs'] or '{}')
setup_test_environment()
# ensure we're using test dbs, shared with parent test run
if not kwargs['no_test_db']:
use_test_databases()
result = f(**f_kwargs)
close_db_connections()
except Exception as e:
_, _, tb_ = sys.exc_info()
traceback.print_tb(tb_)
print(repr(e))
result = errors.WrappedError(e)
print(serialize(result), end='')
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2017 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import pytest
from unittest import mock
from pymeasure.display.Qt import QtGui, QtCore
from pymeasure.display.inputs import ScientificInput, BooleanInput, ListInput
from pymeasure.experiment.parameters import BooleanParameter, ListParameter, FloatParameter
@pytest.mark.parametrize("default_value", [True, False])
class TestBooleanInput:
def test_init_from_param(self, qtbot, default_value):
# set up BooleanInput
bool_param = BooleanParameter('potato', default=default_value)
bool_input = BooleanInput(bool_param)
qtbot.addWidget(bool_input)
# test
assert bool_input.text() == bool_param.name
assert bool_input.value() == default_value
def test_setValue_should_update_value(self, qtbot, default_value):
# set up BooleanInput
bool_param = BooleanParameter('potato', default=default_value)
bool_input = BooleanInput(bool_param)
qtbot.addWidget(bool_input)
bool_input.setValue(not default_value)
assert bool_input.value() == (not default_value)
def test_leftclick_should_update_parameter(self, qtbot, default_value):
# set up BooleanInput
bool_param = BooleanParameter('potato', default=default_value)
with mock.patch('test_inputs.BooleanParameter.value',
new_callable=mock.PropertyMock,
return_value=default_value) as p:
bool_input = BooleanInput(bool_param)
qtbot.addWidget(bool_input)
bool_input.show()
# TODO: fix: fails to toggle on Windows
#qtbot.mouseClick(bool_input, QtCore.Qt.LeftButton)
bool_input.setValue(not default_value)
assert bool_input.value() == (not default_value)
bool_input.parameter # lazy update
p.assert_called_once_with(not default_value)
class TestListInput:
@pytest.mark.parametrize("choices,default_value", [
(["abc", "def", "ghi"], "abc"), # strings
([123, 456, 789], 123), # numbers
(["abc", "def", "ghi"], "def") # default not first value
])
def test_init_from_param(self, qtbot, choices, default_value):
list_param = ListParameter('potato',
choices=choices,
default=default_value,
units='m')
list_input = ListInput(list_param)
qtbot.addWidget(list_input)
assert list_input.isEditable() == False
assert list_input.value() == default_value
def test_setValue_should_update_value(self, qtbot):
# Test write-read loop: verify value -> index -> value conversion
choices = [123, 'abc', 0]
list_param = ListParameter('potato', choices=choices, default=123)
list_input = ListInput(list_param)
qtbot.addWidget(list_input)
for choice in choices:
list_input.setValue(choice)
assert list_input.currentText() == str(choice)
assert list_input.value() == choice
def test_setValue_should_update_parameter(self, qtbot):
choices = [123, 'abc', 0]
list_param = ListParameter('potato', choices=choices, default=123)
list_input = ListInput(list_param)
qtbot.addWidget(list_input)
with mock.patch('test_inputs.ListParameter.value',
new_callable=mock.PropertyMock,
return_value=123) as p:
for choice in choices:
list_input.setValue(choice)
list_input.parameter # lazy update
p.assert_has_calls((mock.call(123), mock.call('abc'), mock.call(0)))
def test_unit_should_append_to_strings(self, qtbot):
list_param = ListParameter('potato', choices=[123, 456], default=123, units='m')
list_input = ListInput(list_param)
qtbot.addWidget(list_input)
assert list_input.currentText() == '123 m'
def test_set_invalid_value_should_raise(self, qtbot):
list_param = ListParameter('potato', choices=[123, 456], default=123, units='m')
list_input = ListInput(list_param)
qtbot.addWidget(list_input)
with pytest.raises(ValueError):
list_input.setValue(789)
class TestScientificInput:
@pytest.mark.parametrize("min_,max_,default_value", [
[0, 20, 12],
[0, 1000, 200], # regression #118: default above default max 99.99
[-1000, 1000, -10] # regression #118: default below default min 0
])
def test_init_from_param(self, qtbot, min_, max_, default_value):
float_param = FloatParameter('potato',
minimum=min_,
maximum=max_,
default=default_value,
units='m')
sci_input = ScientificInput(float_param)
qtbot.addWidget(sci_input)
assert sci_input.minimum() == min_
assert sci_input.maximum() == max_
assert sci_input.value() == default_value
assert sci_input.suffix() == ' m'
def test_setValue_within_range_should_set(self, qtbot):
float_param = FloatParameter('potato',
minimum=-10, maximum=10, default=0)
sci_input = ScientificInput(float_param)
qtbot.addWidget(sci_input)
# test
sci_input.setValue(5)
assert sci_input.value() == 5
def test_setValue_within_range_should_set_regression_118(self, qtbot):
float_param = FloatParameter('potato',
minimum=-1000, maximum=1000, default=0)
sci_input = ScientificInput(float_param)
qtbot.addWidget(sci_input)
# test - validate min/max beyond QDoubleSpinBox defaults
# QDoubleSpinBox defaults are 0 to 99.9 - so test value >= 100
sci_input.setValue(999)
assert sci_input.value() == 999
sci_input.setValue(-999)
assert sci_input.value() == -999
def test_setValue_out_of_range_should_constrain(self, qtbot):
float_param = FloatParameter('potato',
minimum=-1000, maximum=1000, default=0)
sci_input = ScientificInput(float_param)
qtbot.addWidget(sci_input)
# test
sci_input.setValue(1024)
assert sci_input.value() == 1000
sci_input.setValue(-1024)
assert sci_input.value() == -1000
def test_setValue_should_update_param(self, qtbot):
float_param = FloatParameter('potato',
minimum=-1000, maximum=1000, default=10.0)
sci_input = ScientificInput(float_param)
qtbot.addWidget(sci_input)
with mock.patch('test_inputs.FloatParameter.value',
new_callable=mock.PropertyMock,
return_value=10.0) as p:
# test
sci_input.setValue(5.0)
sci_input.parameter # lazy update
p.assert_called_once_with(5.0)
|
# todo: Import libraries
import pandas as pd
from sklearn import linear_model
import matplotlib.pyplot as plt
# todo: Load DataSet
df = pd.read_csv('homeprices.csv')
print(df)
# todo: plotting graph on train dataset
plt.xlabel('area')
plt.ylabel('price')
plt.scatter(df.area, df.price, color='red', marker='+')
# todo: creating separate column of only area and price
new_df = df.drop('price', axis='columns')
print(new_df)
price = df.price
print(price)
# todo: Create linear regression object
reg = linear_model.LinearRegression()
reg.fit(new_df, price)
# todo: Predict price of a home with area = 3300 sq ft
reg.predict([[3300]])
# todo: Y = m * X + b (m is coefficient and b is intercept)
print(reg.coef_)
print(reg.intercept_)
print(3300 * 135.78767123 + 180616.43835616432)
# todo: Predict price of a home with area = 5000 sqr ft
reg.predict([[5000]])
# todo: Generate CSV file with list of home price predictions
area_df = pd.read_csv("areas.csv")
area_df.head(3)
# todo: Prediction on test dataset
p = reg.predict(area_df)
print(p)
area_df['prices'] = p
print(area_df)
# todo: Saving Prediction in CSV File
area_df.to_csv("prediction.csv")
|
import os
from unittest import TestCase
from musurgia.pdf.pdf import Pdf
from musurgia.pdf.table import Table
path = os.path.abspath(__file__).split('.')[0]
# todo
class Test(TestCase):
def test_1(self):
pass
# pdf_path = path + '_test_1.pdf'
# pdf = Pdf()
# table = Table(number_of_rows=20, relative_x=70)
# table.draw(pdf=pdf)
# pdf.write(pdf_path)
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2020 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import six
from django.db import models
from geonode.monitoring.forms import MultiEmailField as MultiEmailFormField
class MultiEmailField(models.Field):
description = "A multi e-mail field stored as a multi-lines text"
def formfield(self, **kwargs):
# This is a fairly standard way to set up some defaults
# while letting the caller override them.
defaults = {'form_class': MultiEmailFormField}
defaults.update(kwargs)
return super(MultiEmailField, self).formfield(**defaults)
def from_db_value(self, value, expression, connection, context):
if value is None:
return []
return value.splitlines()
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, six.string_types):
return value
elif isinstance(value, list):
return "\n".join(value)
def to_python(self, value):
if not value:
return []
if isinstance(value, list):
return value
return value.splitlines()
def get_internal_type(self):
return 'TextField'
|
import random
import torch
import torch.nn.functional as F
from kornia.filters import laplacian, canny
from kornia.morphology import erosion
from itertools import product
def get_feature_representation(feature_map_sequence: torch.Tensor,
keypoint_coordinates: torch.Tensor,
image_sequence: torch.Tensor,
t: int,
k: int) -> torch.Tensor:
N, T, C, H, W = image_sequence.shape
fts = torch.empty(size=(N, 7)).to(feature_map_sequence.device)
fts[:, :3] = keypoint_coordinates[:, t, k, ...]
upscaled_gaussian_map = F.interpolate(feature_map_sequence[:, t: t + 1, k, ...],
size=(H, W))
mask = (upscaled_gaussian_map > 0.1).float().repeat(1, 3, 1, 1)
masked_img = torch.multiply(image_sequence[:, t, ...], mask)
masked_img_magnitude, masked_img_edges = canny(masked_img, low_threshold=0.2,
high_threshold=0.5, kernel_size=(3, 3))
# grad_mask = (masked_img_edges > -0.01).float()
# masked_img_grads = torch.multiply(grad_mask, masked_img_edges)
fts[:, -1] = masked_img_edges.sum(dim=[1, 2, 3])
erosion_kernel = torch.ones(3, 3).to(feature_map_sequence.device)
result_sums = []
feature_map = torch.clone(mask)
# while True:
for _ in range(3):
result_image = torch.multiply(feature_map, image_sequence[:, t, ...])
result_sums.append(torch.sum(result_image, dim=[1, 2, 3]))
feature_map = erosion(feature_map, kernel=erosion_kernel)
if -1e-2 <= result_sums[-1].mean() <= 1e-2:
break
if len(result_sums) >= 3:
fts[:, 3] = result_sums[-3]
fts[:, 4] = result_sums[-2]
fts[:, 5] = result_sums[-1]
elif len(result_sums) == 2:
fts[:, 3] = result_sums[-2]
fts[:, 4] = result_sums[-2]
fts[:, 5] = result_sums[-1]
else:
fts[:, 3] = result_sums[-1]
fts[:, 4] = result_sums[-1]
fts[:, 5] = result_sums[-1]
del mask, masked_img, masked_img_magnitude, masked_img_edges, upscaled_gaussian_map,\
feature_map, result_image, result_sums, erosion_kernel
return fts
def pixelwise_contrastive_loss_fmap_based(
keypoint_coordinates: torch.Tensor,
image_sequence: torch.Tensor,
feature_map_sequence: torch.Tensor,
pos_range: int = 1,
alpha: float = 0.1
) -> torch.Tensor:
""" This version of the pixelwise-contrastive loss uses a combination of the feature maps
with the original images in order to contrast the key-points.
:param keypoint_coordinates: Tensor of key-point positions in (N, T, K, 2/3)
:param image_sequence: Tensor of frames in (N, T, C, H, W)
:param feature_map_sequence: Tensor of feature maps in (N, T, K, H', W')
:param pos_range: Range of time-steps to consider as matches ([anchor - range, anchor + range])
:param alpha: Threshold for matching
:return:
"""
#
# Checking tensor shapes
#
assert keypoint_coordinates.dim() == 4
assert image_sequence.dim() == 5
assert keypoint_coordinates.shape[0:2] == image_sequence.shape[0:2]
N, T, C, H, W = image_sequence.shape
K, D = keypoint_coordinates.shape[2:4]
# Calculate contrastive loss
L = torch.zeros((N,)).to(image_sequence.device)
for t in range(T):
for k in range(K):
#
# Anchor feature representation
#
anchor_ft_representation = get_feature_representation(
feature_map_sequence=feature_map_sequence,
keypoint_coordinates=keypoint_coordinates,
image_sequence=image_sequence,
t=t,
k=k
)
time_steps = range(max(0, t - pos_range), min(T, t + pos_range + 1))
positives = [(t_i, k) for t_i in time_steps]
positives.remove((t, k))
negatives = list(product(time_steps, range(K)))
negatives.remove((t, k))
for (t_p, k_p) in positives:
try:
negatives.remove((t_p, k_p))
except ValueError:
continue
#
# Match feature representation and loss
#
L_p = torch.tensor((N,)).to(image_sequence.device)
# TODO: Use mining instead of random choice / all positives?
# for (t_p, k_p) in positives:
for (t_p, k_p) in [(random.choice(positives) if len(positives) > 1 else positives)]:
match_ft_representation = get_feature_representation(
feature_map_sequence=feature_map_sequence,
keypoint_coordinates=keypoint_coordinates,
image_sequence=image_sequence,
t=t_p,
k=k_p
)
L_p = L_p + torch.norm(anchor_ft_representation - match_ft_representation, p=2, dim=1) ** 2
# L_p /= len(positives)
#
# Non-match feature representation and loss
#
L_n = torch.tensor((N,)).to(image_sequence.device)
# TODO: Use mining instead of random choice / all negatives?
# for (t_n, k_n) in negatives:
for (t_n, k_n) in [(random.choice(negatives) if len(negatives) > 1 else negatives)]:
non_match_ft_representation = get_feature_representation(
feature_map_sequence=feature_map_sequence,
keypoint_coordinates=keypoint_coordinates,
image_sequence=image_sequence,
t=t,
k=k
)
L_n = L_n + torch.norm(anchor_ft_representation - non_match_ft_representation, p=2, dim=1) ** 2
# L_n /= len(negatives)
#
# Total Loss
#
L = torch.add(
L,
torch.maximum(L_p - L_n + alpha, torch.zeros((N,)).to(image_sequence.device))
)
# Average loss across time and key-points
L = L / (T * K)
return torch.mean(L)
if __name__ == "__main__":
N, T, C, H, W = 8, 8, 3, 64, 64
K = 4
time_window = 5
patch_size = (9, 9)
pos_range = max(int(time_window / 2), 1) if time_window > 1 else 0
center_index = int(patch_size[0] / 2)
step_matrix = torch.ones(patch_size + (2,)).to('cuda:0')
step_w = 2 / W
step_h = 2 / H
for k in range(0, patch_size[0]):
for l in range(0, patch_size[1]):
step_matrix[k, l, 0] = (l - center_index) * step_w
step_matrix[k, l, 1] = (k - center_index) * step_h
grid = step_matrix.unsqueeze(0).repeat((N * T * K, 1, 1, 1)).to('cuda:0')
fake_img = torch.rand(size=(N, T, C, H, W)).to('cuda:0')
fake_kpts = torch.rand(size=(N, T, K, 3)).to('cuda:0')
fake_kpts[..., 2] = 1.0
print(pixelwise_contrastive_loss_fmap_based(
keypoint_coordinates=fake_kpts,
image_sequence=fake_img,
pos_range=pos_range,
grid=grid,
patch_size=(9, 9),
alpha=0.01
))
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetBundleResult',
'AwaitableGetBundleResult',
'get_bundle',
]
@pulumi.output_type
class GetBundleResult:
"""
A collection of values returned by getBundle.
"""
def __init__(__self__, bundle_id=None, compute_types=None, description=None, id=None, name=None, owner=None, root_storages=None, user_storages=None):
if bundle_id and not isinstance(bundle_id, str):
raise TypeError("Expected argument 'bundle_id' to be a str")
pulumi.set(__self__, "bundle_id", bundle_id)
if compute_types and not isinstance(compute_types, list):
raise TypeError("Expected argument 'compute_types' to be a list")
pulumi.set(__self__, "compute_types", compute_types)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if owner and not isinstance(owner, str):
raise TypeError("Expected argument 'owner' to be a str")
pulumi.set(__self__, "owner", owner)
if root_storages and not isinstance(root_storages, list):
raise TypeError("Expected argument 'root_storages' to be a list")
pulumi.set(__self__, "root_storages", root_storages)
if user_storages and not isinstance(user_storages, list):
raise TypeError("Expected argument 'user_storages' to be a list")
pulumi.set(__self__, "user_storages", user_storages)
@property
@pulumi.getter(name="bundleId")
def bundle_id(self) -> Optional[str]:
"""
The ID of the bundle.
"""
return pulumi.get(self, "bundle_id")
@property
@pulumi.getter(name="computeTypes")
def compute_types(self) -> List['outputs.GetBundleComputeTypeResult']:
"""
The compute type. See supported fields below.
"""
return pulumi.get(self, "compute_types")
@property
@pulumi.getter
def description(self) -> str:
"""
The description of the bundle.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the compute type.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def owner(self) -> Optional[str]:
"""
The owner of the bundle.
"""
return pulumi.get(self, "owner")
@property
@pulumi.getter(name="rootStorages")
def root_storages(self) -> List['outputs.GetBundleRootStorageResult']:
"""
The root volume. See supported fields below.
"""
return pulumi.get(self, "root_storages")
@property
@pulumi.getter(name="userStorages")
def user_storages(self) -> List['outputs.GetBundleUserStorageResult']:
"""
The user storage. See supported fields below.
"""
return pulumi.get(self, "user_storages")
class AwaitableGetBundleResult(GetBundleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBundleResult(
bundle_id=self.bundle_id,
compute_types=self.compute_types,
description=self.description,
id=self.id,
name=self.name,
owner=self.owner,
root_storages=self.root_storages,
user_storages=self.user_storages)
def get_bundle(bundle_id: Optional[str] = None,
name: Optional[str] = None,
owner: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBundleResult:
"""
Retrieve information about an AWS WorkSpaces bundle.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.workspaces.get_bundle(name="Value with Windows 10 and Office 2016",
owner="AMAZON")
```
:param str bundle_id: The ID of the bundle.
:param str name: The name of the bundle. You cannot combine this parameter with `bundle_id`.
:param str owner: The owner of the bundles. You have to leave it blank for own bundles. You cannot combine this parameter with `bundle_id`.
"""
__args__ = dict()
__args__['bundleId'] = bundle_id
__args__['name'] = name
__args__['owner'] = owner
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:workspaces/getBundle:getBundle', __args__, opts=opts, typ=GetBundleResult).value
return AwaitableGetBundleResult(
bundle_id=__ret__.bundle_id,
compute_types=__ret__.compute_types,
description=__ret__.description,
id=__ret__.id,
name=__ret__.name,
owner=__ret__.owner,
root_storages=__ret__.root_storages,
user_storages=__ret__.user_storages)
|
import os
__author__ = 'Ollie Bennett | http://olliebennett.co.uk/'
LOG_PATH_TWITTER = "logs" + os.sep + "twitter.log"
LOG_PATH_TEMPERATURE = "logs" + os.sep + "temperature.log"
LOG_PATH_TORRENT = "logs" + os.sep + "torrents.log"
LOG_PATH_SERVER = "logs" + os.sep + "server.log"
|
import simplejson as json
from elasticsearch import Elasticsearch
from elasticsearch import helpers
from flask import Flask, request
from flask_restful import Api, Resource
from threading import Thread
from dateutil import parser
import logging
from senml import senml
import os
elastic_host = os.environ.get('ELASTIC_HOST')
if not elastic_host:
elastic_host = 'localhost'
elastic_port = os.environ.get('ELASTIC_PORT')
if not elastic_port:
elastic_port = 9200
es = Elasticsearch([{'host': elastic_host, 'port': elastic_port}])
def parse_senml(data):
try:
documents = []
ml = senml.SenMLDocument.from_json(data['data'])
logger.debug('Data fit in SenMLDocument!')
for measurement in ml.measurements:
resolved = measurement.to_json()
doc = {}
if 't' in resolved:
t = resolved.get('t')
epoch_time_now = parser.parse(data['timestamp']).timestamp()
if 268435456 <= t <= epoch_time_now: # Absolute time cut-off point
resolved['t'] = float(t)
else:
# A timestamp in the future is not relevant for sensor measurements.
resolved['t'] = float(epoch_time_now)
resolved['timestamp'] = data['timestamp']
resolved['uuid'] = data['uuid']
doc['_index'] = 'measurements'
doc['_type'] = '_doc'
doc['_source'] = resolved
doc['pipeline'] = 'dailyindex'
logger.debug(doc)
documents.append(doc)
logger.debug(f'Done parsing message into {len(documents)} documents!')
return documents
except Exception as e:
logger.debug(
f'Could not parse the following document as SenML: {data}, got exception: {e}'
)
def parse_json_document(data):
try:
doc = dict()
doc['_index'] = 'measurements'
doc['_type'] = '_doc'
doc['_source'] = data['data']
doc['_source']['timestamp'] = data['timestamp']
doc['_source']['uuid'] = data['uuid']
doc['pipeline'] = 'dailyindex'
logger.info(str(doc))
return doc
except Exception as e:
logger.debug(f'Could not parse object: {data}, got exception: {e}')
def parse(json_data):
if json_data.get('data'):
if isinstance(json_data['data'], list):
logger.debug('Trying to parse as SenML!')
"""Try to parse as SenML"""
documents = parse_senml(json_data)
if documents:
logger.debug('Parsed message as SenML!')
try:
helpers.bulk(es, documents)
except Exception as e:
logger.error(str(e))
return
logger.info(f'No SenML documents as a result of parsing the message: {json_data}')
else:
logger.debug('Trying to parse as json document')
document = parse_json_document(json_data)
if document:
logger.debug('Parsed message as json document')
documents = [document]
try:
helpers.bulk(es, documents)
except Exception as e:
logger.error(str(e))
return
logger.info(f"Couldn't parse message! {json_data}")
else:
logger.debug(f'Message not from tagger, missing "data" field! {json_data}')
class Parser(Resource):
def post(self):
json_data = json.loads(request.get_json())
Thread(target=parse, args=(json_data,)).start()
return 'OK', 200
logging.basicConfig(level=logging.INFO) # Set this to logging.DEBUG to enable debugging
logger = logging.getLogger('Parser')
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
logging.getLogger('werkzeug').setLevel(logging.ERROR)
app = Flask(__name__)
api = Api(app)
api.add_resource(Parser, '/parse/')
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
#!/usr/bin/env python
#-*- coding=utf-8 -*-
#
# Copyright 2012 Jike Inc. All Rights Reserved.
# Author: liwei@jike.com
import BaseHTTPServer
import CGIHTTPServer
import cgitb; cgitb.enable()
server_address = ("", 8000)
handler = CGIHTTPServer.CGIHTTPRequestHandler
handler.cgi_directories = [""]
httpd = BaseHTTPServer.HTTPServer(server_address, handler)
httpd.serve_forever()
|
import os
from fn_is_ableton_project import fn_is_ableton_project
# Creates a master list of all samples in a folder and its subfolders
# -the program assumes that the folder being searched is a user's "master samples folder"
# where they've stored all their saved samples
# -if an Ableton project folder is encountered, it assumes that this project folder has
# been put here so that any samples it contains are to be treated as samples to be
# used in other projects; therefore, its samples are added to the master list of
# samples
#
# @param folder The folder to be searched for subfolders and samples
#
# @return li_music_folder_contents List of samples and their paths
#
def fn_search_for_all_samples (folder):
li_music_folder_contents = []
folder_contents_fn = os.listdir(folder)
for item in folder_contents_fn:
item_full_path = os.path.join(folder, item)
if os.path.isdir(item_full_path):
if fn_is_ableton_project (item_full_path):
pass
else:
li_music_folder_contents.extend(fn_search_for_all_samples (item_full_path))
elif os.path.isfile(item_full_path):
if item_full_path.endswith((".mp3", ".wav", ".aif", ".aiff", ".flac", ".ogg")):
li_music_folder_contents.append(item_full_path)
else:
print("Problem: " + os.path.abspath(item_full_path))
li_music_folder_contents.sort()
return li_music_folder_contents |
from jsonbender import K, S
from jsonbender.string_ops import Format, ProtectedFormat
class TestFormat:
@staticmethod
def test_format():
bender = Format("{} {} {} {noun}.", K("This"), K("is"), K("a"), noun=K("test"))
assert bender.bend(None) == "This is a test."
class TestProtectedFormat:
@staticmethod
def test_format():
bender = ProtectedFormat(
"{} {} {} {noun}.",
K("This"),
K("is"),
K("a"),
noun=S("noun").optional(),
)
assert bender.bend({}) is None
assert bender.bend({"noun": "test"}) == "This is a test."
|
#!/usr/bin/python
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test suite for tree_status.py"""
import os
import sys
import urllib
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from chromite.cbuildbot import constants
from chromite.cbuildbot import tree_status
from chromite.lib import cros_test_lib
from chromite.lib import timeout_util
# pylint: disable=W0212,R0904
class TestTreeStatus(cros_test_lib.MoxTestCase):
"""Tests TreeStatus method in cros_build_lib."""
status_url = 'https://chromiumos-status.appspot.com/current?format=json'
def setUp(self):
pass
def _TreeStatusFile(self, message, general_state):
"""Returns a file-like object with the status message writtin in it."""
my_response = self.mox.CreateMockAnything()
my_response.json = '{"message": "%s", "general_state": "%s"}' % (
message, general_state)
return my_response
def _SetupMockTreeStatusResponses(self, status_url,
final_tree_status='Tree is open.',
final_general_state=constants.TREE_OPEN,
rejected_tree_status='Tree is closed.',
rejected_general_state=
constants.TREE_CLOSED,
rejected_status_count=0,
retries_500=0,
output_final_status=True):
"""Mocks out urllib.urlopen commands to simulate a given tree status.
Args:
status_url: The status url that status will be fetched from.
final_tree_status: The final value of tree status that will be returned
by urlopen.
final_general_state: The final value of 'general_state' that will be
returned by urlopen.
rejected_tree_status: An intermediate value of tree status that will be
returned by urlopen and retried upon.
rejected_general_state: An intermediate value of 'general_state' that
will be returned by urlopen and retried upon.
rejected_status_count: The number of times urlopen will return the
rejected state.
retries_500: The number of times urlopen will fail with a 500 code.
output_final_status: If True, the status given by final_tree_status and
final_general_state will be the last status returned by urlopen. If
False, final_tree_status will never be returned, and instead an
unlimited number of times rejected_response will be returned.
"""
final_response = self._TreeStatusFile(final_tree_status,
final_general_state)
rejected_response = self._TreeStatusFile(rejected_tree_status,
rejected_general_state)
error_500_response = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(urllib, 'urlopen')
for _ in range(retries_500):
urllib.urlopen(status_url).AndReturn(error_500_response)
error_500_response.getcode().AndReturn(500)
if output_final_status:
for _ in range(rejected_status_count):
urllib.urlopen(status_url).AndReturn(rejected_response)
rejected_response.getcode().AndReturn(200)
rejected_response.read().AndReturn(rejected_response.json)
urllib.urlopen(status_url).AndReturn(final_response)
final_response.getcode().AndReturn(200)
final_response.read().AndReturn(final_response.json)
else:
urllib.urlopen(status_url).MultipleTimes().AndReturn(rejected_response)
rejected_response.getcode().MultipleTimes().AndReturn(200)
rejected_response.read().MultipleTimes().AndReturn(
rejected_response.json)
self.mox.ReplayAll()
def testTreeIsOpen(self):
"""Tests that we return True is the tree is open."""
self._SetupMockTreeStatusResponses(self.status_url,
rejected_status_count=5,
retries_500=5)
self.assertTrue(tree_status.IsTreeOpen(status_url=self.status_url,
period=0))
def testTreeIsClosed(self):
"""Tests that we return false is the tree is closed."""
self._SetupMockTreeStatusResponses(self.status_url,
output_final_status=False)
self.assertFalse(tree_status.IsTreeOpen(status_url=self.status_url,
period=0.1))
def testTreeIsThrottled(self):
"""Tests that we return True if the tree is throttled."""
self._SetupMockTreeStatusResponses(self.status_url,
'Tree is throttled (flaky bug on flaky builder)',
constants.TREE_THROTTLED)
self.assertTrue(tree_status.IsTreeOpen(status_url=self.status_url,
throttled_ok=True))
def testTreeIsThrottledNotOk(self):
"""Tests that we respect throttled_ok"""
self._SetupMockTreeStatusResponses(self.status_url,
rejected_tree_status='Tree is throttled (flaky bug on flaky builder)',
rejected_general_state=constants.TREE_THROTTLED,
output_final_status=False)
self.assertFalse(tree_status.IsTreeOpen(status_url=self.status_url,
period=0.1))
def testWaitForStatusOpen(self):
"""Tests that we can wait for a tree open response."""
self._SetupMockTreeStatusResponses(self.status_url)
self.assertEqual(tree_status.WaitForTreeStatus(status_url=self.status_url),
constants.TREE_OPEN)
def testWaitForStatusThrottled(self):
"""Tests that we can wait for a tree open response."""
self._SetupMockTreeStatusResponses(self.status_url,
final_general_state=constants.TREE_THROTTLED)
self.assertEqual(tree_status.WaitForTreeStatus(status_url=self.status_url,
throttled_ok=True),
constants.TREE_THROTTLED)
def testWaitForStatusFailure(self):
"""Tests that we can wait for a tree open response."""
self._SetupMockTreeStatusResponses(self.status_url,
output_final_status=False)
self.assertRaises(timeout_util.TimeoutError,
tree_status.WaitForTreeStatus,
status_url=self.status_url,
period=0.1)
if __name__ == '__main__':
cros_test_lib.main()
|
import pytest
from django.contrib.sites.models import Site
from django.test import RequestFactory
from cdhweb.pages.templatetags import cdh_tags
def test_url_to_icon():
assert cdh_tags.url_to_icon("/people/staff/") == "ppl"
assert cdh_tags.url_to_icon("/projects/") == "folder"
assert cdh_tags.url_to_icon("/events/") == "cal"
assert cdh_tags.url_to_icon("/contact/") == "convo"
assert cdh_tags.url_to_icon("/grants/seed-grant/") == "seed"
assert cdh_tags.url_to_icon("/graduate-fellowships/") == "medal"
assert cdh_tags.url_to_icon("/grants/") == "grant"
assert cdh_tags.url_to_icon("/unknown/") == ""
assert cdh_tags.url_to_icon("/year-of-data/") == "cal"
assert cdh_tags.url_to_icon(None) == ""
@pytest.mark.django_db
def test_url_to_icon_path():
domain = Site.objects.get_current().domain
assert cdh_tags.url_to_icon_path(
"/people/staff/"
) == "https://{}/static/img/cdh-icons/png@2X/ppl@2x.png".format(domain)
def test_startswith():
assert cdh_tags.startswith("yes", "y")
assert not cdh_tags.startswith("no", "y")
assert not cdh_tags.startswith(3, "y")
def test_url_replace():
rf = RequestFactory()
request = rf.get("/search?q=digital&page=2")
context = {"request": request}
assert cdh_tags.url_replace(context, "page", 3) == "q=digital&page=3"
|
class Nodo(object):
def __init__(self, datos):
self.datos = datos
self.primaria = datos[0]
if type(self.primaria) is str:
self.tipo = 'str'
else:
self.tipo = 'int'
class Tabla(object):
def __init__(self, nombre, columnas):
self.nombre = nombre
self.columnas = columnas
self.vector = []
self.tamano = 15
self.elementos = 0
self.factorCarga = 0
self.tipoPrimaria = None
for i in range(15):
self.vector.append(None)
'''
En caso de que la llave primaria sea una cadena este metodo me devolvera un numerico para
lograr indexarla
'''
def toASCII(self, cadena):
result = 0
for char in cadena:
result += ord(char)
return result
'''
Metodo correspondiente al metodo insert(database, table, columns)
sin embargo desde a esta clase solo llega el columns, que es una lista de
datos
'''
def insertar(self, datos):
if len(datos) == self.columnas:
nuevo = Nodo(datos)
if self.elementos == 0:
self.tipoPrimaria = nuevo.tipo
else:
if self.tipoPrimaria != nuevo.tipo:
return False
posicion = self.funcionHash(nuevo.primaria)
# Ahora se verificara si la posicion ya tiene una lista o esta vacia
if self.vector[posicion] is None:
self.vector[posicion] = []
self.vector[posicion].append(nuevo)
'''self.vector[posicion] = self.OrdenarAnidado(self.vector[posicion])'''
self.elementos += 1
self.factorCarga = self.elementos / self.tamano
if self.factorCarga > 0.8:
self.rehashing()
return True
else:
return False
def rehashing(self):
while not (self.factorCarga < 0.5):
self.tamano += 1
self.factorCarga = self.elementos / self.tamano
nuevosEspacios = self.tamano - len(self.vector)
for i in range(nuevosEspacios):
self.vector.append(None)
def funcionHash(self, primaria):
if self.tipoPrimaria == 'str':
primaria = self.toASCII(primaria)
index = primaria % self.tamano
return index
def imprimir(self):
indice = 0
print('Contenido de la tabla:', self.nombre)
for i in self.vector:
if i is None:
print('Indice:', indice, 'Contenido:', i)
else:
print('Indice:', indice, 'Contenido:', end=' ')
for j in i:
print('{Primaria:', j.primaria, 'Tupla:', str(j.datos) + '}', end=' ')
print('')
indice += 1
tabla = Tabla('Integrantes', 2)
tabla.insertar([10, 'Welmann'])
tabla.insertar([20, 'Welmann1'])
tabla.insertar([16, 'Welmann2'])
tabla.insertar([50, 'Welmann3'])
tabla.insertar([12, 'Welmann4'])
tabla.insertar([18, 'Welmann5'])
tabla.insertar([78, 'Welmann6'])
tabla.insertar([13, 'Welmann7'])
tabla.insertar([72, 'Welmann8'])
tabla.insertar([80, 'Welmann9'])
tabla.insertar([100, 'Welmann10'])
tabla.insertar([160, 'Welmann21'])
tabla.insertar([120, 'Welmann41'])
tabla.insertar([180, 'Welmann51'])
tabla.insertar([780, 'Welmann61'])
tabla.insertar([130, 'Welmann71'])
tabla.insertar([720, 'Welmann81'])
tabla.insertar([800, 'Welmann91'])
tabla.imprimir()
|
"""Plate analysis."""
from contextlib import suppress
from functools import lru_cache
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
from scipy.signal import argrelmin, argrelmax
from . import conf, error, field, phyvars
from ._helpers import saveplot, list_of_vars
from ._step import Field
from .stagyydata import StagyyData
def _vzcheck(iphis, snap, vz_thres):
"""Remove positions where vz is below threshold."""
# verifying vertical velocity
vzabs = np.abs(snap.fields['v3'].values[0, ..., 0])
argdel = []
for i, iphi in enumerate(iphis):
vzm = np.mean(vzabs[[iphi - 1, iphi, iphi + 1], :])
if vzm < vz_thres:
argdel.append(i)
return np.delete(iphis, argdel)
@lru_cache()
def detect_plates(snap, vz_thres_ratio=0):
"""Detect plate limits using derivative of horizontal velocity.
This function is cached for convenience.
Args:
snap (:class:`~stagpy._step.Step`): a step of a StagyyData instance.
vz_thres_ratio (float): if above zero, an additional check based on the
vertical velocities is performed. Limits detected above a region
where the vertical velocity is below vz_thres_ratio * mean(vzabs)
are ignored.
Returns:
tuple of :class:`numpy.array`: itrenches, iridges
1D arrays containing phi-index of detected trenches and ridges.
"""
dvphi = _surf_diag(snap, 'dv2').values
# finding trenches
dvphi_saturated = np.copy(dvphi)
max_dvphi = np.amin(dvphi) * 0.2
dvphi_saturated[dvphi > max_dvphi] = max_dvphi
trench_span = 15 if snap.sdat.par['boundaries']['air_layer'] else 10
itrenches = argrelmin(dvphi_saturated, order=trench_span, mode='wrap')[0]
# finding ridges
dvphi_saturated = np.copy(dvphi)
min_dvphi = np.amax(dvphi) * 0.2
dvphi_saturated[dvphi < min_dvphi] = min_dvphi
ridge_span = 20
iridges = argrelmax(dvphi_saturated, order=ridge_span, mode='wrap')[0]
# elimination of ridges that are too close to a trench
phi = snap.geom.p_centers
phi_trenches = phi[itrenches]
argdel = []
if itrenches.size and iridges.size:
for i, iridge in enumerate(iridges):
mdistance = np.amin(np.abs(phi_trenches - phi[iridge]))
if mdistance < 0.016:
argdel.append(i)
if argdel:
iridges = np.delete(iridges, argdel)
# additional check on vz
if vz_thres_ratio > 0:
r_w = snap.geom.r_walls
vz_mean = (np.sum(snap.rprofs['vzabs'].values * np.diff(r_w)) /
(r_w[-1] - r_w[0]))
vz_thres = vz_mean * vz_thres_ratio
itrenches = _vzcheck(itrenches, snap, vz_thres)
iridges = _vzcheck(iridges, snap, vz_thres)
return itrenches, iridges
def _plot_plate_limits(axis, trenches, ridges):
"""Plot lines designating ridges and trenches."""
for trench in trenches:
axis.axvline(x=trench, color='red', ls='dashed', alpha=0.4)
for ridge in ridges:
axis.axvline(x=ridge, color='green', ls='dashed', alpha=0.4)
def _plot_plate_limits_field(axis, rcmb, trenches, ridges):
"""Plot arrows designating ridges and trenches in 2D field plots."""
for trench in trenches:
xxd = (rcmb + 1.02) * np.cos(trench) # arrow begin
yyd = (rcmb + 1.02) * np.sin(trench) # arrow begin
xxt = (rcmb + 1.35) * np.cos(trench) # arrow end
yyt = (rcmb + 1.35) * np.sin(trench) # arrow end
axis.annotate('', xy=(xxd, yyd), xytext=(xxt, yyt),
arrowprops=dict(facecolor='red', shrink=0.05),
annotation_clip=False)
for ridge in ridges:
xxd = (rcmb + 1.02) * np.cos(ridge)
yyd = (rcmb + 1.02) * np.sin(ridge)
xxt = (rcmb + 1.35) * np.cos(ridge)
yyt = (rcmb + 1.35) * np.sin(ridge)
axis.annotate('', xy=(xxd, yyd), xytext=(xxt, yyt),
arrowprops=dict(facecolor='green', shrink=0.05),
annotation_clip=False)
def _isurf(snap):
"""Return index of surface accounting for air layer."""
if snap.sdat.par['boundaries']['air_layer']:
dsa = snap.sdat.par['boundaries']['air_thickness']
# we are a bit below the surface; delete "-some number" to be just
# below the surface (that is considered plane here); should check if
# in the thermal boundary layer
isurf = np.argmin(
np.abs(1 - dsa - snap.geom.r_centers + snap.geom.rcmb)) - 4
else:
isurf = -1
return isurf
def _surf_diag(snap, name):
"""Get a surface field.
Can be a sfield, a regular scalar field evaluated at the surface,
or dv2 (which is dvphi/dphi).
"""
with suppress(error.UnknownVarError):
return snap.sfields[name]
isurf = _isurf(snap)
with suppress(error.UnknownVarError):
field, meta = snap.fields[name]
return Field(field[0, :, isurf, 0], meta)
if name == 'dv2':
vphi = snap.fields['v2'].values[0, :, isurf, 0]
dvphi = np.diff(vphi) / (snap.geom.r_centers[isurf] *
np.diff(snap.geom.p_walls))
return Field(dvphi, phyvars.Varf(r"$dv_\phi/d\phi$", '1/s'))
raise error.UnknownVarError(name)
def _continents_location(snap, at_surface=True):
"""Location of continents as a boolean array.
If at_surface is True, it is evaluated only at the surface, otherwise it is
evaluated in the entire domain.
"""
if at_surface:
if snap.sdat.par['boundaries']['air_layer']:
icont = _isurf(snap) - 6
else:
icont = -1
else:
icont = slice(None)
csurf = snap.fields['c'].values[0, :, icont, 0]
if snap.sdat.par['boundaries']['air_layer'] and\
not snap.sdat.par['continents']['proterozoic_belts']:
return (csurf >= 3) & (csurf <= 4)
elif (snap.sdat.par['boundaries']['air_layer'] and
snap.sdat.par['continents']['proterozoic_belts']):
return (csurf >= 3) & (csurf <= 5)
elif snap.sdat.par['tracersin']['tracers_weakcrust']:
return csurf >= 3
return csurf >= 2
def plot_at_surface(snap, names):
"""Plot surface diagnostics.
Args:
snap (:class:`~stagpy._step.Step`): a step of a StagyyData instance.
names (str): names of requested surface diagnotics. They are separated
by ``-`` (figures), ``.`` (subplots) and ``,`` (same subplot).
Surface diagnotics can be valid surface field names, field names,
or `"dv2"` which is d(vphi)/dphi.
"""
for vfig in list_of_vars(names):
fig, axes = plt.subplots(nrows=len(vfig), sharex=True,
figsize=(12, 2 * len(vfig)))
axes = [axes] if len(vfig) == 1 else axes
fname = 'plates_surf_'
for axis, vplt in zip(axes, vfig):
fname += '_'.join(vplt) + '_'
label = ''
for name in vplt:
data, meta = _surf_diag(snap, name)
label = meta.description
phi = (snap.geom.p_centers if data.size == snap.geom.nptot
else snap.geom.p_walls)
axis.plot(phi, data, label=label)
axis.set_ylim([conf.plot.vmin, conf.plot.vmax])
if conf.plates.continents:
continents = _continents_location(snap)
ymin, ymax = axis.get_ylim()
axis.fill_between(snap.geom.p_centers, ymin, ymax,
where=continents, alpha=0.2,
facecolor='#8B6914')
axis.set_ylim([ymin, ymax])
phi = snap.geom.p_centers
itrenches, iridges = detect_plates(snap, conf.plates.vzratio)
_plot_plate_limits(axis, phi[itrenches], phi[iridges])
if len(vplt) == 1:
axis.set_ylabel(label)
else:
axis.legend()
axes[-1].set_xlabel(r"$\phi$")
axes[-1].set_xlim(snap.geom.p_walls[[0, -1]])
saveplot(fig, fname, snap.isnap)
def _write_trench_diagnostics(step, vrms_surf, fid):
"""Print out some trench diagnostics."""
itrenches, _ = detect_plates(step, conf.plates.vzratio)
time = step.time * vrms_surf *\
conf.scaling.ttransit / conf.scaling.yearins / 1.e6
isurf = _isurf(step)
trenches = step.geom.p_centers[itrenches]
# vphi at trenches
vphi = step.fields['v2'].values[0, :, isurf, 0]
vphi = (vphi[1:] + vphi[:-1]) / 2
v_trenches = vphi[itrenches]
if 'age' in step.fields:
agefld = step.fields['age'].values[0, :, isurf, 0]
age_surface = np.ma.masked_where(agefld < 1.e-5, agefld)
age_surface_dim = age_surface * vrms_surf *\
conf.scaling.ttransit / conf.scaling.yearins / 1.e6
agetrenches = age_surface_dim[itrenches] # age at the trench
else:
agetrenches = np.zeros(len(itrenches))
if conf.plates.continents:
phi_cont = step.geom.p_centers[_continents_location(step)]
else:
phi_cont = np.array([np.nan])
distance_subd = []
ph_cont_subd = []
for trench_i in trenches:
# compute distance between subduction and continent
angdistance1 = np.abs(phi_cont - trench_i)
angdistance2 = 2 * np.pi - angdistance1
angdistance = np.minimum(angdistance1, angdistance2)
i_closest = np.argmin(angdistance)
ph_cont_subd.append(phi_cont[i_closest])
distance_subd.append(angdistance[i_closest])
# writing the output into a file, all time steps are in one file
for isubd in range(len(trenches)):
fid.write(
"%6.0f %11.7f %11.3f %10.6f %10.6f %10.6f %10.6f %11.3f\n" % (
step.isnap,
step.time,
time,
trenches[isubd],
v_trenches[isubd],
distance_subd[isubd],
ph_cont_subd[isubd],
agetrenches[isubd]))
def plot_scalar_field(snap, fieldname):
"""Plot scalar field with plate information.
Args:
snap (:class:`~stagpy._step.Step`): a step of a StagyyData instance.
fieldname (str): name of the field that should be decorated with plate
informations.
"""
fig, axis, _, _ = field.plot_scalar(snap, fieldname)
if conf.plates.continents:
c_field = np.ma.masked_where(
~_continents_location(snap, at_surface=False),
snap.fields['c'].values[0, :, :, 0])
cbar = conf.field.colorbar
conf.field.colorbar = False
cmap = colors.ListedColormap(["k", "g", "m"])
field.plot_scalar(snap, 'c', c_field, axis, cmap=cmap,
norm=colors.BoundaryNorm([2, 3, 4, 5], cmap.N))
conf.field.colorbar = cbar
# plotting velocity vectors
field.plot_vec(axis, snap, 'sx' if conf.plates.stress else 'v')
# Put arrow where ridges and trenches are
phi = snap.geom.p_centers
itrenches, iridges = detect_plates(snap, conf.plates.vzratio)
_plot_plate_limits_field(axis, snap.geom.rcmb,
phi[itrenches], phi[iridges])
saveplot(fig, f'plates_{fieldname}', snap.isnap,
close=conf.plates.zoom is None)
# Zoom
if conf.plates.zoom is not None:
if not 0 <= conf.plates.zoom <= 360:
raise error.InvalidZoomError(conf.plates.zoom)
if 45 < conf.plates.zoom <= 135:
ladd, radd, uadd, dadd = 0.8, 0.8, 0.05, 0.1
elif 135 < conf.plates.zoom <= 225:
ladd, radd, uadd, dadd = 0.05, 0.1, 0.8, 0.8
elif 225 < conf.plates.zoom <= 315:
ladd, radd, uadd, dadd = 0.8, 0.8, 0.1, 0.05
else: # >315 or <=45
ladd, radd, uadd, dadd = 0.1, 0.05, 0.8, 0.8
xzoom = (snap.geom.rcmb + 1) * np.cos(np.radians(conf.plates.zoom))
yzoom = (snap.geom.rcmb + 1) * np.sin(np.radians(conf.plates.zoom))
axis.set_xlim(xzoom - ladd, xzoom + radd)
axis.set_ylim(yzoom - dadd, yzoom + uadd)
saveplot(fig, f'plates_zoom_{fieldname}', snap.isnap)
def cmd():
"""Implementation of plates subcommand.
Other Parameters:
conf.plates
conf.scaling
conf.plot
conf.core
"""
sdat = StagyyData()
isurf = _isurf(next(iter(sdat.walk)))
vrms_surf = sdat.walk.filter(rprofs=True)\
.rprofs_averaged['vhrms'].values[isurf]
nb_plates = []
time = []
istart, iend = None, None
with open(f'plates_trenches_{sdat.walk.stepstr}.dat', 'w') as fid:
fid.write('# istep time time_My phi_trench vel_trench '
'distance phi_cont age_trench_My\n')
for step in sdat.walk.filter(fields=['T']):
# could check other fields too
_write_trench_diagnostics(step, vrms_surf, fid)
plot_at_surface(step, conf.plates.plot)
plot_scalar_field(step, conf.plates.field)
if conf.plates.nbplates:
time.append(step.timeinfo.loc['t'])
itr, ird = detect_plates(step, conf.plates.vzratio)
nb_plates.append(itr.size + ird.size)
istart = step.isnap if istart is None else istart
iend = step.isnap
if conf.plates.distribution:
phi = step.geom.p_centers
itr, ird = detect_plates(step, conf.plates.vzratio)
limits = np.concatenate((phi[itr], phi[ird]))
limits.sort()
plate_sizes = np.diff(limits, append=2 * np.pi + limits[0])
fig, axis = plt.subplots()
axis.hist(plate_sizes, 10, (0, np.pi))
axis.set_ylabel("Number of plates")
axis.set_xlabel(r"$\phi$-span")
saveplot(fig, 'plates_size_distribution', step.isnap)
if conf.plates.nbplates:
figt, axis = plt.subplots()
axis.plot(time, nb_plates)
axis.set_xlabel("Time")
axis.set_ylabel("Number of plates")
saveplot(figt, f'plates_{istart}_{iend}')
|
import sys
class AbundCSV2NOAPlugin:
def input(self, inputfile):
csvfilename = inputfile
csvfile = open(csvfilename, 'r')
# Read entire abundance file first
firstline = csvfile.readline()
self.bacteria = firstline.split(',')
self.bacteria.remove(self.bacteria[0])
self.abund = []
for i in range(len(self.bacteria)):
self.abund.append(0)
self.numreads = 0
for line in csvfile:
self.numreads += 1
contents = line.split(',')
for i in range(1, len(self.bacteria)):
self.abund[i-1] += float(contents[i])
def run(self):
pass
def output(self, outputfile):
noafilename = outputfile
noafile = open(noafilename, 'w')
noafile.write("Name"+"\t"+"Abundance"+"\n")
i = 0
minabund = 1
maxabund = 0
sumabund = 0
for bacterium in self.bacteria:
bac = bacterium.strip()
avg = self.abund[i-1] / self.numreads
noafile.write(bac[1:len(bac)-1]+"\t"+str(avg)+"\n")
if (avg > maxabund):
maxabund = avg
if (avg < minabund):
minabund = avg
sumabund += avg
i += 1
print ("Min Abundance: "+str(minabund))
print ("Max Abundance: "+str(maxabund))
print ("Avg Abundance: "+str(float(sumabund) / i))
|
import os
from vcflat.HeaderExtraction import VcfHeader
def get_input():
test_data_dir = os.path.join(os.path.dirname(__file__), "..", "test_data")
i = os.path.join(test_data_dir, "test.snpeff.vcf")
vcfh = VcfHeader(i)
output = vcfh.extract_header()
return output
def test_1():
""" checks that output is list """
assert type(get_input()) is list
def test_2():
""" checks that output is not empty """
assert len(get_input()) != 0
|
import configparser
import foss_finder.utils.validators
from .strings import *
###############################################################
# First, define variables by using the INI configuration file #
###############################################################
# Path of the INI configuration file (relative to the root of foss_finder)
INI_PATH = '.foss_finder'
# Processing of the INI configuration file
ini_config = configparser.ConfigParser()
ini_config.read(INI_PATH)
# Define function to clean data from INI configuration file
def clean(data):
if data:
return data.replace(',', '').split('\n')
else:
return []
# Exposed variables
if 'User defined information' in ini_config:
OPTIONAL_COLUMNS = clean(ini_config['User defined information']['optional_columns'])
else:
OPTIONAL_COLUMNS = []
if 'NPM parser' in ini_config:
USE_SEMVER = ini_config['NPM parser'].getboolean('use_semver')
else:
USE_SEMVER = False
if 'NPM sections' in ini_config:
NPM_SECTIONS = {
PRODUCTION: clean(ini_config['NPM sections']['npm_prod']),
DEVELOPMENT: clean(ini_config['NPM sections']['npm_dev']),
}
else:
NPM_SECTIONS = {
PRODUCTION: [],
DEVELOPMENT: [],
}
if 'Python files' in ini_config:
PYTHON_FILES = {
PRODUCTION: clean(ini_config['Python files']['py_prod']),
DEVELOPMENT: clean(ini_config['Python files']['py_dev']),
}
else:
PYTHON_FILES = {
PRODUCTION: [],
DEVELOPMENT: [],
}
if 'Checks' in ini_config:
VALIDATORS = clean(ini_config['Checks']['validators'])
else:
VALIDATORS = []
if 'Ignored repositories' in ini_config:
IGNORED_REPOS = clean(ini_config['Ignored repositories']['ignored_repos'])
else:
IGNORED_REPOS = []
############################################################################
# Now, define variables that are not related to the INI configuration file #
############################################################################
# Default columns of the CSV files (ordered)
DEFAULT_COLUMNS = [
REGISTRY,
PACKAGE,
LICENSE,
VERSION,
URL,
]
# Name of the local user-defined information file (must be at the root of a repository)
USER_DEFINED_INFORMATION_NAME = '.foss.json'
# Name of the global user-defined information file (must be in the folder where you run the script)
GLOBAL_USER_DEFINED_INFORMATION_NAME = '.foss.global.json'
# Fields of the user-defined information file with their required and optional attributes
USER_DEFINED_INFORMATION_FIELDS = {
ADD_PACKAGE_NAME: (
# required
[
PACKAGE,
VERSION,
LICENSE,
OWNER,
],
# optional
DEFAULT_COLUMNS,
),
OVERWRITES_NAME: (
# required
[
PACKAGE,
OWNER,
REASON,
],
# optional
[VERSION] + DEFAULT_COLUMNS,
),
MULTI_LICENSE_SELECTION_NAME: (
# required
[
PACKAGE,
OWNER,
MULTI_LICENSE_SELECTION,
],
# optional
[VERSION],
),
ADDITIONAL_INFO_NAME: (
# required
[
PACKAGE,
OWNER,
],
# optional
[VERSION] + OPTIONAL_COLUMNS,
),
}
# Maps validator keys to the actual check classes
VALIDATORS_MAP = {
GPL_CHECK: foss_finder.utils.validators.GPLCheck,
MULTI_LICENSE_SELECTION_CHECK: foss_finder.utils.validators.MultiLicenseCheck,
}
|
"""
Template App - constants used throughout all files
"""
import numpy as np
# ----------------------------------------------------------------- #
###################################
# colors #
###################################
# basic colors
c_black = "#333333"
c_blue = "#3070b3"
c_white = "#ffffff"
# arrows / objects
c_green = "#a2ad00"
c_orange = "#e37222"
# auxiliary lines / objects
c_gray = "#b3b3b3"
###################################
# beam properties #
###################################
F = 80 # maximum force of the dynamic load
L = 5.0 # length of the beam
mue = 1.0 # mue
EI_real = 10000.0
###################################
# plotting properties #
###################################
n_beam = 401 # value of fragmentation of the beam
n_r = 499 # value of fragmentation of the excitation frequency ratio
max_r = 10 # maximum of displayable excitation frequency ratio
##################################
# external images #
##################################
# images/graphics from external sources
pinned_support_img = "Beam_Modes/static/images/pinned_support.svg"
fixed_support_left_img = "Beam_Modes/static/images/fixed_support_left.svg"
fixed_support_right_img = "Beam_Modes/static/images/fixed_support_right.svg"
# height support images
img_h = 1.0
img_w_pinned = 0.4
img_w_fixed = 0.1
y_fixed = 0.45
|
# Escreva um programa para aprovar um empréstimo bancário para o comprador de uma casa.
# Pergunte o valor da casa, o salário do comprador e em quantos anos ele vai pagar.
# A prestação mensal, não pode execeder 30% do salário ou então o empréstimo será negado.
casa = float(input('Valor da casa: '))
salario = float(input('Salário do comprador: '))
anos = int(input('Em quantos anos será paga a casa: '))
prestacao = casa / (anos * 12)
minimo = salario / 100 * 30
if prestacao > minimo:
print(f'Empréstimo NEGADO! A prestação excedeu 30% do salário do comprador.')
else:
print(f'Empréstimo APROVADO! A casa com valor de R${casa:.2f}, '
f'será paga em {anos} anos, com prestações de R${prestacao:.2f} mensais.')
|
import json
from flask.ext.presst import fields, resource_method, Reference
from tests import PresstTestCase, TestPresstResource
class TestResourceMethod(PresstTestCase):
def setUp(self):
super(TestResourceMethod, self).setUp()
class Citrus(TestPresstResource):
items = [{'id': 1, 'name': 'Orange', 'sweetness': 3},
{'id': 2, 'name': 'Lemon', 'sweetness': 1},
{'id': 3, 'name': 'Clementine', 'sweetness': 5}]
name = fields.String()
sweetness = fields.Integer()
@resource_method('GET')
def name_length(self, citrus, *args, **kwargs):
return len(citrus['name'])
@resource_method('GET', collection=True)
def count(self, item_list):
return len(item_list)
@resource_method('GET')
def sweeter_than(self, citrus, other):
return citrus['sweetness'] > other['sweetness']
@resource_method('POST')
def sweeten(self, citrus, by):
citrus['sweetness'] += by
return self.marshal_item(citrus)
sweeten.add_argument('by', location='args', type=int, required=True)
Citrus.sweeter_than.add_argument('other', type=Reference(Citrus))
self.Citrus = Citrus
self.api.add_resource(Citrus)
self.api.enable_schema()
def test_item_method(self):
with self.app.test_client() as client:
self.assertEqual(self.parse_response(client.get('/citrus/1/name_length')), (6, 200))
self.assertEqual(self.parse_response(client.get('/citrus/2/name_length')), (5, 200))
def test_collection_method(self):
with self.app.test_client() as client:
self.assertEqual(self.parse_response(client.get('/citrus/count')), (3, 200))
def test_arguments(self):
# required & verification & defaults.
pass
def test_callable(self):
with self.app.test_request_context('/citrus/1'):
instance = self.Citrus()
instance.sweeten(self.Citrus.get_item_for_id(1), 5)
self.assertEqual({'id': 1, 'name': 'Orange', 'sweetness': 8}, self.Citrus.get_item_for_id(1))
def test_convert_argument(self):
with self.app.test_client() as client:
self.assertEqual(self.parse_response(client.post('/citrus/3/sweeten?by=2')),
({"name": "Clementine", "resource_uri": "/citrus/3", "sweetness": 7}, 200))
def test_required_argument(self):
with self.app.test_client() as client:
self.assertEqual(self.parse_response(client.post('/citrus/1/sweeten')), (None, 400))
def test_optional_argument(self):
pass
def test_reference_argument(self):
with self.app.test_client() as client:
for citrus_id, other_id, val in ((1, 2, True), (1, 3, False), (2, 1, False), (3, 2, True)):
self.assertEqual(self.parse_response(client.get('/citrus/{}/sweeter_than'.format(citrus_id),
data=json.dumps(
{'other': '/citrus/{}'.format(other_id)}),
content_type='application/json')), (val, 200))
def test_2_level(self):
pass
def test_get_schema(self):
self.request('GET', '/', None, {
"$schema": "http://json-schema.org/hyper-schema#",
"definitions": {
"citrus": {
"definitions": {
"name": {
"type": "string"
},
"resource_uri": {
"format": "uri",
"readOnly": True,
"type": "string"
},
"sweetness": {
"type": "integer"
}
},
"links": [
{
"href": "/citrus/count",
"rel": "count",
"method": "GET",
"schema": {
"properties": {}
}
},
{
"href": "/citrus/{id}/name_length",
"rel": "name_length",
"method": "GET",
"schema": {
"properties": {}
}
},
{
"href": "/citrus/{id}",
"rel": "self"
},
{
"href": "/citrus/{id}/sweeten",
"rel": "sweeten",
"method": "POST",
"schema": { # "by" not shown because via query string and method is POST
"properties": {}
}
},
{
"href": "/citrus/{id}/sweeter_than",
"rel": "sweeter_than",
"method": "GET",
"schema": {
"properties": {
"other": {
"$ref": "#/definitions/citrus/definitions/resource_uri"
}
}
}
}
],
"properties": {
"name": {
"$ref": "#/definitions/citrus/definitions/name"
},
"resource_uri": {
"$ref": "#/definitions/citrus/definitions/resource_uri"
},
"sweetness": {
"$ref": "#/definitions/citrus/definitions/sweetness"
}
}
}
},
"properties": {
"citrus": {"$ref": "#/definitions/citrus"}
}
}, 200) |
#!/usr/bin/env python
__author__ = 'Florian Hase'
#========================================================================
from Communicator.ChatBot.bot import Bot
from Communicator.SlackInterface.slack import SlackCommunicator
from Utilities.misc import Printer
#========================================================================
class Communicator(Printer, SlackCommunicator):
RECEIVED_REQUESTS = []
RECEIVED_FEEDBACK = []
def __init__(self, settings, verbose = True):
Printer.__init__(self, 'COMMUNICATOR', color = 'grey')
self.settings = settings
self.account_details = self.settings['account_details']
self.account_details['exp_names'] = [exp['name'] for exp in self.settings['experiments']]
self.verbose = verbose
self.option = self.settings['communicator']['type']
if self.option == 'auto':
# prepare file dumping mechanism for all experiments
for experiment in self.settings['experiments']:
request = {'exp_identifier': experiment['name'], 'kind': 'start'}
self.RECEIVED_REQUESTS.append(request)
elif self.option == 'gmail':
# prepare gmail communication
# GMailCommunicator.__init__(self.account_details)
self.bot = Bot()
raise NotImplementedError
elif self.option == 'twitter':
# prepare twitter communication
# TwitterCommunicator.__init__(self.account_details)
self.bot = Bot()
raise NotImplementedError
elif self.option == 'slack':
self._print('setting up Slack streaming')
self.bot = Bot()
SlackCommunicator.__init__(self, self.settings['communicator'], self.account_details)
else:
self._print('did not understand option: %s' % self.option)
raise NotImplementedError
def _find_experimental_procedure(self, text):
for exp_name in self.account_details['exp_names']:
if exp_name.lower() in text.lower():
return exp_name
else:
return None
def _process_request(self, author, request, kind = 'start'):
# just store new requests locally as attributes to be picked up by chemOS
exp_name = self._find_experimental_procedure(request.lower())
# print('REQUEST', request)
# print('EXP_NAME', exp_name)
if not exp_name:
self._print('could not find request %s' % request)
message = 'Could not find valid experiment identifier in message: {@FOUND_IDENT}.\nPlease choose your identifier from: {@EXP_IDENTS}'
replace_dict = {'{@EXP_IDENTS}': ','.join(self.account_details['exp_names']),
'{@FOUND_IDENT}': request}
self.send_message(author, message, replace_dict)
return None
# parse the author
if self.option == 'auto':
request_author = {'contact': 'self'}
elif self.option == 'gmail':
request_author = {'contact': str(author)}
elif self.option == 'slack':
request_author = {'contact': str(author)}
elif self.option == 'twitter':
request_author = {}
for prop in dir(author):
if not callable(prop) and not prop.startswith('__'):
att = getattr(author, prop)
try:
request_author[prop] = str(att)
except TypeError:
request_author[prop] = 'NONE'
# construct request dictionary
request = {'exp_identifier': exp_name, 'author': request_author, 'kind': kind}
# store request dict
self.RECEIVED_REQUESTS.append(request)
return exp_name
def _interpret_feedback(self, classification):
self._print('WARNING: cannot interpret feedback yet!')
return 0.
def _process_feedback(self, author, classification):
# interpret the feedback
loss = self._interpret_feedback(classification)
# construct the feedback dictionary
feedback = {'loss': loss, 'author': author}
# store feedback dictionary
self.RECEIVED_FEEDBACK.append(feedback)
def process_message(self, author, body):
# use bot to classify message
self._print('received message: %s | %s' % (author, body))
classification = self.bot.get_classification(body)
self._print('received classification: %s' % classification)
if classification == 'start':
exp_proced = self._process_request(author, body, 'start')
response = self.bot.response(body)
replace_dict = {'{@EXP_PROCED}': exp_proced}
if exp_proced:
self.send_message(author, response, replace_dict)
elif classification == 'restart':
exp_proced = self._process_request(author, body, 'restart')
response = self.bot.response(body)
replace_dict = {'{@EXP_PROCED}': exp_proced}
if exp_proced:
self.send_message(author, response, replace_dict)
elif classification == 'stop':
exp_proced = self._process_request(author, body, 'stop')
response = self.bot.response(body)
replace_dict = {'{@EXP_PROCED}': exp_proced}
if exp_proced:
self.send_message(author, response, replace_dict)
elif classification == 'progress_request':
# find experimental procedure
exp_proced = self._process_request(author, body, 'progress')
# confirm receipt of analysis request
response = self.bot.response(body)
replace_dict = {'{@EXP_PROCED}': exp_proced}
if exp_proced:
self.send_message(author, response, replace_dict)
elif classification == 'description_request':
# find experimental procedure
exp_proced = self._find_experimental_procedure(body)
# respond with description of procedure
response = self.bot.response(body)
for exp in self.settings['experiments']:
if exp['name'] == exp_proced: break
replace_dict = {'{@EXP_DESCRIPTION}': exp['description']}
self.send_message(author, response, replace_dict)
elif classification == 'greeting':
response = self.bot.response(body)
self.send_message(author, response)
def send_message(self, recipient, message, replace_dict = {}, **kwargs):
for key, item in replace_dict.items():
message = message.replace(str(key), str(item))
self._send_message(recipient, message, **kwargs)
def send(self, kind, request_details, file_names = None):
if kind == 'analysis':
message = None
self._send_message(request_details['author'], message, file_names = file_names)
def notify_user(self, info_dict):
if self.option == 'auto':
for experiment in self.settings['experiments']:
# only request new experiments if we maxed out the repetitions
if experiment['name'] == info_dict['exp_identifier'] and experiment['repetitions'] == info_dict['repetition'] + 1:
request = {'exp_identifier': experiment['name'], 'kind': 'start'}
self.RECEIVED_REQUESTS.append(request)
elif self.option == 'slack':
for experiment in self.settings['experiments']:
# only request new experiments if we maxed out the repetitions
if experiment['name'] == info_dict['exp_identifier'] and experiment['repetitions'] == info_dict['repetition'] + 1:
request = {'exp_identifier': experiment['name'], 'kind': 'start'}
self.RECEIVED_REQUESTS.append(request)
def stream(self):
self._stream(self.process_message)
#========================================================================
|
#!/usr/bin/env python
"""
demonstrate NavigationToolbar with GTK3 accessed via pygobject
"""
from gi.repository import Gtk
from matplotlib.figure import Figure
from numpy import arange, sin, pi
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
from matplotlib.backends.backend_gtk3 import NavigationToolbar2GTK3 as NavigationToolbar
win = Gtk.Window()
win.connect("delete-event", Gtk.main_quit )
win.set_default_size(400,300)
win.set_title("Embedding in GTK")
f = Figure(figsize=(5,4), dpi=100)
a = f.add_subplot(1,1,1)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
a.plot(t,s)
vbox = Gtk.VBox()
win.add(vbox)
# Add canvas to vbox
canvas = FigureCanvas(f) # a Gtk.DrawingArea
vbox.pack_start(canvas, True, True, 0)
# Create toolbar
toolbar = NavigationToolbar(canvas, win)
vbox.pack_start(toolbar, False, False, 0)
win.show_all()
Gtk.main()
|
# -*- coding: utf-8 -*-
import re
from urllib.parse import urlparse
from esmigrate.commons import (
Command,
is_valid_json,
JSON_HEADER,
is_valid_ndjson,
NDJSON_HEADER,
http_verbs,
)
from esmigrate.commons.helpers import construct_path
from esmigrate.contexts import ContextConfig
from esmigrate.exceptions import (
InvalidCommandScriptError,
InvalidCommandVerbError,
ContextObjectNotSetError,
InvalidCommandPathError,
InvalidCommandBodyError,
)
class ScriptParser(object):
def __init__(self, ctx: ContextConfig = None):
self._ctx = ctx
self._pattern = re.compile(rf"^({'|'.join(http_verbs)})\s+(.*)$", re.M | re.I)
def init_ctx(self, ctx: ContextConfig):
self._ctx = ctx
def get_commands(self, script_text: str):
if self._ctx is None:
raise ContextObjectNotSetError("Context was not set")
stripped_lines = [line.strip() for line in script_text.split("\n") if len(line.strip()) > 0]
occurs = [idx for idx, line in enumerate(stripped_lines) if self._pattern.match(line)]
if len(occurs) == 0 or occurs[0] != 0:
raise InvalidCommandScriptError(f"Unexpected command found: {stripped_lines[0].split()[0]}")
occurs.append(len(stripped_lines))
for idx in range(len(occurs) - 1):
cmdline = occurs[idx]
m = self._pattern.match(stripped_lines[cmdline])
verb, path = m.group(1).strip(), m.group(2).strip()
if verb not in http_verbs:
raise InvalidCommandVerbError(f"Unexpected verb found: {verb}")
parsed_path = urlparse(path)
if parsed_path.scheme or parsed_path.netloc:
raise InvalidCommandPathError(f"Unexpected URL scheme found: {path}")
path = construct_path(self._ctx.es_host, path)
cmdnext = occurs[idx + 1]
if cmdline + 1 >= cmdnext:
body, head = None, None
else:
body = "\n".join(stripped_lines[cmdline + 1 : cmdnext])
if is_valid_json(body):
head = JSON_HEADER
elif is_valid_ndjson(body):
head = NDJSON_HEADER
else:
raise InvalidCommandBodyError(f"Expected a {JSON_HEADER} or {NDJSON_HEADER} body")
yield Command(verb, path, body, head)
|
import discord, random
from discord.ext import commands
from discord_slash import cog_ext
from discord_slash.utils.manage_commands import create_option, create_choice
class Slash(commands.Cog):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_slash(name="rps", description="Jouer au rock scroll scissors !", options=[
create_option(
name="joueur",
description="Pierre, scroll ou scissors ?",
option_type=3,
required=True,
choices=[
create_choice(
name="pierre",
value="rock"
),
create_choice(
name="papier",
value="scroll"
),
create_choice(
name="ciseaux",
value="scissors"
)])])
async def _rps(self, ctx, joueur: str):
bot_ = ["rock", "scroll", "scissors"]
bot_ = random.choice(bot_)
bot_emoji = f":{bot_}:"
joueur_emoji = f":{joueur}:"
if joueur == bot_: msg = ":crossed_swords: Égalité !"
elif joueur == "rock" and bot_ == "scroll": msg = "Tu as perdu..."
elif joueur == "scroll" and bot_ == "scissors": msg = "Tu as perdu..."
elif joueur == "scissors" and bot_ == "rock": msg = "Tu as perdu..."
elif joueur == "rock" and bot_ == "scissors": msg = "Tu as gagné !"
elif joueur == "scroll" and bot_ == "rock": msg = "Tu as gagné !"
elif joueur == "scissors" and bot_ == "scroll": msg = "Tu as gagné !"
bot_ = bot_.replace("rock", "pierre").replace("scroll", "papier").replace("scissors", "ciseaux")
joueur = joueur.replace("rock", "pierre").replace("scroll", "papier").replace("scissors", "ciseaux")
embed = discord.Embed(title="Pierre Papier Ciseaux")
embed.add_field(name="** **", value=f"{joueur_emoji} {ctx.author.mention} : {joueur}\n{bot_emoji} <@760171813866700850> : {bot_}", inline=False)
embed.add_field(name="** **", value=msg, inline=False)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Slash(bot))
def teardown(bot):
bot.remove_cog("rps") |
import json
from dataclasses import dataclass, field
from typing import List, Optional
import stringcase
from taegis_sdk_python.services.common_types import Epoch, Time
from taegis_sdk_python.services.investigations.enums import InvestigationProcessingState, InvestigationStatusEnum, \
InvestigationTypes, PriorityEnum
from taegis_sdk_python.utils import is_valid_value
@dataclass
class Tenant:
id: str
name: str
def __repr__(self) -> str:
return f"<{self.__class__.__name__} id={self.id}>"
def __str__(self):
return f'Tenant(id={self.id}, description={self.name if self.name else ""})'
@dataclass(repr=False)
class Assignee:
"""Describes the assignee of an investigation."""
def __init__(self, data: dict):
for key, value in data.items():
if key == "tenants" and value:
setattr(self, key, [Tenant(**item) for item in value])
elif is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
id: str
name: str = field(init=False)
roles: List[str] = field(default_factory=list)
status: str = field(init=False)
user_id: str = field(init=False)
email: str = field(init=False)
email_verified: bool = field(init=False)
email_normalized: str = field(init=False)
family_name: str = field(init=False)
given_name: str = field(init=False)
tenants: List[Tenant] = field(default_factory=list, init=False)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} id={self.id}>"
def __str__(self):
return f'Assignee(id={self.id}, description={self.name if self.name else ""})'
def __eq__(self, other):
if not isinstance(other, Assignee):
return False
fields = dir(self)
for f in fields:
if f.startswith("__"):
continue
if hasattr(self, f) and hasattr(other, f):
if getattr(self, f) != getattr(other, f):
return False
return True
def __ne__(self, other):
return not self == other
@dataclass(repr=False, order=True)
class TransitionState:
"""
Represent both the initial transitions (if they exist)
and the current state (handed off, acknowledged, resolved) of an investigation.
"""
def __init__(self, data):
for key, value in data.items():
if key in ["initial_handoff_time",
"initial_acknowledge_time",
"initial_resolution_time",
"handoff_time",
"acknowledge_time",
"resolution_time"] and value:
setattr(self, key, Time.convert(value))
elif is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
handed_off_at_least_once: bool = field(init=False)
initial_handoff_time: Time = field(init=False, default=None)
acknowledged_at_least_once: bool = field(init=False)
initial_acknowledge_time: Time = field(init=False)
resolved_at_least_once: bool = field(init=False)
initial_resolution_time: Time = field(init=False)
handed_off: bool = field(init=False)
handoff_time: Time = field(init=False)
acknowledged: bool = field(init=False)
acknowledge_time: Time = field(init=False)
resolved: bool = field(init=False)
resolution_time: Time = field(init=False)
def __eq__(self, other):
if not isinstance(other, TransitionState):
return False
fields = dir(self)
for f in fields:
if f.startswith("__"):
continue
if hasattr(self, f) and hasattr(other, f):
if getattr(self, f) != getattr(other, f):
return False
return True
def __ne__(self, other):
return not self == other
@dataclass(order=True)
class ActivityLog:
"""Stores details of an investigation activity (Create/Update, etc.)."""
def __init__(self, data):
for key, value in data.items():
if key in ["created_at", "updated_at"] and value:
setattr(self, key, Time.convert(value))
elif is_valid_value(value):
setattr(self, key, value)
id: str
created_at: Time = field(init=False)
updated_at: Time = field(init=False)
tenant_id: str = field(init=False)
user_id: str = field(init=False)
description: str = field(init=False)
type: str = field(init=False)
comment: str = field(init=False)
target: str = field(init=False)
investigation_id: str = field(init=False)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} id={self.id}>"
def __str__(self):
return (
f'ActivityLog(id={self.id}, '
f'created_at={self.created_at.date}, '
f'tenant_id={self.tenant_id}, '
f'user_id={self.user_id}, '
f'type={self.type}, '
f'investigation_id={self.investigation_id})'
)
def __eq__(self, other):
if not isinstance(other, ActivityLog):
return False
fields = dir(self)
for f in fields:
if f.startswith("__"):
continue
if hasattr(self, f) and hasattr(other, f):
if getattr(self, f) != getattr(other, f):
return False
return True
def __ne__(self, other):
return not self == other
@dataclass(order=True)
class InvestigationProcessingResponse:
def __init__(self, data):
for key, value in data.items():
if is_valid_value(value):
setattr(self, stringcase.snakecase(key), InvestigationProcessingState(value))
assets: InvestigationProcessingState = field(init=False)
events: InvestigationProcessingState = field(init=False)
alerts: InvestigationProcessingState = field(init=False)
def __repr__(self):
return (
f'{{ assets: {self.assets}, events: {self.events}, alerts: {self.alerts} }}'
)
def __str__(self):
return (
f'InvestigationProcessingResponse(assets={self.assets}, '
f'events={self.events}, '
f'alerts={self.alerts})')
def __eq__(self, other):
if not isinstance(other, InvestigationProcessingResponse):
return False
fields = dir(self)
for f in fields:
if f.startswith("__"):
continue
if hasattr(self, f) and hasattr(other, f):
if getattr(self, f) != getattr(other, f):
return False
return True
def __ne__(self, other):
return not self == other
@dataclass
class SearchQuery:
id: str
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.id}>"
def __str__(self) -> str:
return f"SearchQuery(id={self.id})"
@dataclass
class Event:
id: str
def __repr__(self) -> str:
return f"<{self.__class__.__name__} id={self.id}>"
def __str__(self) -> str:
return f"Event: {self.id}"
@dataclass(order=True)
class Asset:
"""
Describes an Asset in Taegis XDR.
"""
id: str
def __repr__(self) -> str:
return f"<{self.__class__.__name__} id={self.id}>"
def __str__(self) -> str:
return f"Asset: {self.id}"
@dataclass
class Alert:
id: str
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.id}>"
def __str__(self) -> str:
return f"{self.id}"
def __eq__(self, other):
if not isinstance(other, Alert):
return False
return self.id == other.id
def __ne__(self, other):
return not self == other
@dataclass(repr=False, order=True)
class MitreAttackInfo:
"""
Describes fields related to MitreAttack information for an alert.
"""
def __init__(self, data: dict):
for key, value in data.items():
if is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
technique_id: str = field(init=False)
technique: str = field(init=False)
tactics: List[str] = field(default_factory=list, init=False)
type: str = field(init=False)
description: str = field(init=False)
platform: List[str] = field(default_factory=list, init=False)
system_requirements: List[str] = field(default_factory=list, init=False)
url: str = field(init=False)
data_sources: List[str] = field(default_factory=list, init=False)
defence_bypassed: List[str] = field(default_factory=list, init=False)
contributors: List[str] = field(default_factory=list, init=False)
version: str = field(init=False)
def __eq__(self, other):
if not isinstance(other, MitreAttackInfo):
return False
fields = dir(self)
for f in fields:
if f.startswith("__"):
continue
if hasattr(self, f) and hasattr(other, f):
if getattr(self, f) != getattr(other, f):
return False
return True
def __ne__(self, other):
return not self == other
@dataclass(order=True)
class AccessVector:
def __init__(self, data: dict):
for key, value in data.items():
if key in ["created_at", "updated_at"] and value:
setattr(self, key, Time.convert(value))
elif key == "mitre_info" and value:
setattr(self, "mitre_info", MitreAttackInfo(value))
elif is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
id: str
investigation_id: str = field(init=False)
name: str = field(init=False)
created_at: Time = field(init=False)
updated_at: Time = field(init=False)
mitre_info: MitreAttackInfo = field(init=False)
def __repr__(self):
return str(
{
'id': self.id,
'investigation_id': self.investigation_id,
'name': self.name,
'created_at': self.created_at.date,
}
)
def __str__(self):
return (
f'AccessVector(id={self.id}, '
f'investigation_id={self.investigation_id}, '
f'name={self.name}, '
f'created_at={self.created_at.date})'
)
def __eq__(self, other):
if not isinstance(other, AccessVector):
return False
fields = dir(self)
for f in fields:
if f.startswith("__"):
continue
if hasattr(self, f) and hasattr(other, f):
if getattr(self, f) != getattr(other, f):
return False
return True
def __ne__(self, other):
return not self == other
@dataclass
class InvestigationAlertOutput:
def __init__(self, data: dict):
for key, value in data.items():
if key == "alerts" and value:
setattr(self, key, [Alert(id=item.get("id")) for item in value])
elif is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
alerts: List[Alert] = field(default_factory=list, init=False)
total_count: int
def __repr__(self) -> str:
return f"<{self.__class__.__name__} id={self.total_count}>"
def __str__(self) -> str:
return f"InvestigationAlertOutput: {self.total_count}"
@dataclass(order=True)
class Investigation:
def __init__(self, data: dict):
for key, value in data.items():
if key == "priority" and value is not None:
setattr(self, key, PriorityEnum(value))
elif key == "status" and value:
setattr(self, key, InvestigationStatusEnum(value))
elif key == "type" and value:
setattr(self, key, InvestigationTypes(value))
elif key in ["tags", "contributors", "auth_credentials"] and value:
setattr(self, key, value)
elif key == "processing_status" and value:
if value.get("assets") and value.get("events") and value.get("alerts"):
setattr(self, key, InvestigationProcessingResponse(value))
elif key == "activity_logs" and value:
setattr(self, key, [ActivityLog(item) for item in value])
elif key == "assignee" and value:
setattr(self, key, Assignee(value))
elif key == "transition_state" and value:
setattr(self, key, TransitionState(value))
elif key in ["genesis_events", "events"] and value:
setattr(self, key, [Event(id=item.get("id")) for item in value])
elif key == "search_queries" and value:
setattr(self, key, [SearchQuery(id=item.get("id")) for item in value])
elif key == "access_vectors" and value:
setattr(self, key, [AccessVector(item) for item in value])
elif key in ["genesis_alerts", "alerts"] and value:
setattr(self, key, [Alert(id=item.get("id")) for item in value])
elif key == "assets" and value:
setattr(self, key, [Asset(id=item.get("id")) for item in value])
elif key in ["created_at", "updated_at", "notified_at", "deleted_at"] and value:
setattr(self, key, Time.convert(value))
elif is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
id: str
description: str
tenant_id: str = field(init=False)
tags: List[str] = field(init=False)
genesis_alerts: List[Alert] = field(default_factory=list, init=False)
genesis_events: List[Event] = field(default_factory=list, init=False)
alerts: List[Alert] = field(default_factory=list, init=False)
events: List[Event] = field(default_factory=list, init=False)
assets: List[Asset] = field(default_factory=list, init=False)
search_queries: List[SearchQuery] = field(default_factory=list, init=False)
auth_credentials: List[str] = field(default_factory=list, init=False)
key_findings: str = field(init=False)
created_at: Time = field(init=False)
updated_at: Time = field(init=False)
notified_at: Time = field(init=False)
activity_logs: List[ActivityLog] = field(default_factory=list, init=False)
created_by: str = field(init=False)
status: InvestigationStatusEnum = field(init=False)
contributors: List[str] = field(default_factory=list, init=False)
service_desk_id: str = field(init=False)
service_desk_type: str = field(init=False)
assignee_id: str = field(init=False)
assignee: Optional[Assignee] = field(init=False)
latest_activity: str = field(init=False)
access_vectors: List[AccessVector] = field(default_factory=list, init=False, repr=False)
transition_state: Optional[TransitionState] = field(init=False, repr=False)
deleted_at: Time = field(init=False)
created_by_scwx: bool = field(init=False)
investigation_type: str = field(init=False)
processing_status: InvestigationProcessingResponse = field(init=False)
priority: PriorityEnum = field(init=False)
type: InvestigationTypes = field(init=False)
genesis_alerts_count: int = field(init=False)
genesis_events_count: int = field(init=False)
alerts_count: int = field(init=False)
events_count: int = field(init=None)
assets_count: int = field(init=None)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} id={self.id}>"
def __str__(self):
return f'Investigation: {self.id}, {self.description}'
def __eq__(self, other):
if not isinstance(other, Investigation):
return False
fields = dir(self)
for f in fields:
if f.startswith("__"):
continue
if hasattr(self, f) and hasattr(other, f):
if getattr(self, f) != getattr(other, f):
return False
return True
def __ne__(self, other):
return not self == other
@dataclass(repr=False, order=True)
class InvestigationStatusCountResponse:
open: int
closed: int
active: int
awaiting_action: int
suspended: int
total: int
@dataclass
class EventInfo:
"""
Describes the fields common to all event types.
"""
def __init__(self, data: dict):
for key, value in data.items():
if key == "original_data" and value:
setattr(self, stringcase.snakecase(key), json.loads(value))
elif is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
type: str = field(init=False)
message: str = field(init=False)
resource_id: str = field(init=False)
tenant_id: str = field(init=False)
visibility: str = field(init=False)
normalizer: str = field(init=False)
sensor_type: str = field(init=False)
sensor_event_id: str = field(init=False)
sensor_tenant: str = field(init=False)
sensor_id: str = field(init=False)
sensor_cpe: str = field(init=False)
original_data: dict = field(init=False)
event_time_usec: int = field(init=False)
ingest_time_usec: int = field(init=False)
event_time_fidelity: str = field(init=False)
host_id: str = field(init=False)
source_address: str = field(init=False)
destination_address: str = field(init=False)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.sensor_id}>"
def __str__(self):
return {self.sensor_id}
def __eq__(self, other):
if not isinstance(other, Investigation):
return False
fields = dir(self)
for f in fields:
if f.startswith("__"):
continue
if hasattr(self, f) and hasattr(other, f):
if getattr(self, f) != getattr(other, f):
return False
return True
def __ne__(self, other):
return not self == other
@dataclass(repr=False, order=True)
class InvestigationEventOutput:
def __init__(self, data: dict):
for key, value in data.items():
if key == "events" and value:
setattr(self, key, [EventInfo(item) for item in value])
elif is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
events: List[EventInfo] = field(default_factory=list, init=False)
total_count: int
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.total_count}>"
def __str__(self):
return f'InvestigationEventOutput(total count={self.total_count})'
@dataclass
class InvestigationAssetOutput:
def __init__(self, data: dict):
for key, value in data.items():
if key == "assets" and value:
setattr(self, key, [Asset(id=item.get("id")) for item in value])
elif is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
assets: List[Asset] = field(default_factory=list, init=False)
total_count: int
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.total_count}>"
def __str__(self):
return f'InvestigationAssetOutput(total count={self.total_count})'
@dataclass
class SummaryGroup:
"""
Describes the summary of investigations by status filtered by date.
"""
status: str
count: int
date: str
date_time: Time = field(init=False)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.status}, {self.count}>"
def __str__(self):
return f'SummaryGroup(status={self.status}, count={self.count}, date={self.date})'
def __post_init__(self):
if hasattr(self, "date"):
from dateutil.parser import parse
dt_str = parse(self.date).strftime('%Y-%m-%dT%H:%M:%SZ')
converted = Time.convert(dt_str)
setattr(self, "date_time", converted)
@dataclass
class InvestigationSummary:
"""
Provides a count of investigations per tag.
"""
tag: str
count: int
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.tag}, {self.count}>"
def __str__(self):
return f'InvestigationSummary(tag={self.tag}, count={self.count})'
@dataclass
class Count:
"""
Represents a int count of a given object.
"""
count: int
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.count}>"
def __str__(self):
return str(self.count)
@dataclass(repr=False)
class IndividualTimeSummary:
"""
Represents the amounts of time it took before an investigation transitioned into the handoff,
acknowledge, and resolution states.
"""
def __init__(self, data: dict):
for key, value in data.items():
if key == "investigation" and value is not None:
setattr(self, key, Investigation(value))
elif key in ["time_to_handoff", "time_to_acknowledge", "time_to_resolution"] and value:
setattr(self, key, Epoch(value))
elif is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
time_to_handoff: Epoch
time_to_acknowledge: Epoch
time_to_resolution: Epoch
is_closed: bool
investigation: Investigation
@dataclass(repr=False)
class TimeSummaryForGroup:
"""
Used by MeanTimeSummaryOverPeriod query to represent the average times it took to hand off,
acknowledge, and resolve all investigations over the course of the period.
"""
def __init__(self, data: dict):
for key, value in data.items():
if key == "time_summaries" and value is not None:
setattr(self, key, [IndividualTimeSummary(item) for item in value])
elif key in ["mean_time_to_handoff", "mean_time_to_acknowledge", "mean_time_to_resolution"]:
setattr(self, key, Epoch(value))
elif is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
mean_time_to_handoff: Epoch
mean_time_to_acknowledge: Epoch
mean_time_to_resolution: Epoch
time_summaries: List[IndividualTimeSummary] = field(default_factory=list, init=False, repr=False)
@dataclass(repr=False)
class InvestigationsOutput:
def __init__(self, data: dict):
for key, value in data.items():
if key == "investigations" and value:
setattr(self, key, [Investigation(item) for item in value])
elif is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
investigations: List[Investigation] = field(default_factory=list, init=False, repr=False)
total_count: int
@dataclass
class InvestigationsExportOutput:
def __init__(self, data: dict):
for key, value in data.items():
if is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
column_def: List[str]
rows: List[List[str]]
total_count: int
@dataclass
class InvestigationBulkResponse:
def __init__(self, data: dict):
for key, value in data.items():
if key == "investigations" and value:
setattr(self, key, [Investigation(item) for item in value])
elif is_valid_value(value):
setattr(self, stringcase.snakecase(key), value)
query: str
investigations: List[Investigation] = field(default_factory=list, init=False, repr=False)
@dataclass
class InvestigationInput:
tags: List[str] = field(default=None)
genesis_alerts: List[str] = field(default=None)
genesis_events: List[str] = field(default=None)
alerts: List[str] = field(default=None)
events: List[str] = field(default=None)
assets: List[str] = field(default=None)
auth_credentials: List[str] = field(default=None)
search_queries: List[str] = field(default=None)
key_findings: str = field(default=None)
description: str = field(default=None)
notified_at: str = field(default=None)
created_by: str = field(default=None)
status: str = field(default=None)
contributors: List[str] = field(default=None)
service_desk_id: str = field(default=None)
service_desk_type: str = field(default=None)
assignee_id: str = field(default=None)
notes: str = field(default=None)
acknowledgment: bool = field(default=None)
priority: int = field(default=None)
type: str = field(default=None)
@dataclass
class UpdateInvestigationInput(InvestigationInput):
""" Describes the fields available for updating an investigation. """
pass
@dataclass
class SnowCredentials:
""" Represents credentials required for SNOW authentication """
user: str = field(default=None)
password: str = field(default=None)
@dataclass
class ActivityLogInput:
""" Describes the fields available for creating a new Activity Log. """
description: str = field(default=None)
type: str = field(default=None)
comment: str = field(default=None)
target: str = field(default=None)
|
from machine import UART, Pin
import time
uart1 = UART(1, baudrate=9600, tx=Pin(8), rx=Pin(9))
uart0 = UART(0, baudrate=9600, tx=Pin(0), rx=Pin(1))
button_1 = Pin(2, Pin.IN, Pin.PULL_DOWN)
button_2 = Pin(3, Pin.IN, Pin.PULL_DOWN)
while True:
if button_1.value():
uart1.write(b'Front\n\r')
time.sleep(0.1)
rxData = bytes()
while uart0.any() > 0:
rxData += uart0.read(1)
print(rxData.decode('utf-8'))
if button_2.value():
uart0.write(b'Back\n\r')
time.sleep(0.1)
rxData = bytes()
while uart1.any() > 0:
rxData += uart1.read(1)
print(rxData.decode('utf-8')) |
import numpy as np
import matplotlib.pyplot as plt
from components.utilities.misc import print_orthogonal
from components.utilities.load_write import read_image, load
from components.utilities.VTKFunctions import render_volume
from components.processing.clustering import kmeans_opencv
from joblib import Parallel, delayed
from tqdm import tqdm
# Paths and number of clusters
path = r'Y:\3DHistoData\Test data'
file_2mm = '13_R3L_2_PTA_48h_cor504.png'
file_4mm = 'KP03-L6-4MP2_Cor740.png'
n_clusters = 3
width = 448
# Load
cor_2mm = np.flip(read_image(path, file_2mm))
cor_4mm = np.flip(read_image(path, file_4mm))
data = load(r'C:\Users\Tuomas Frondelius\Desktop\Data\KP03-L6-4MC2_sub01')
# Crop
cor_2mm = cor_2mm[:, 300:748]
cor_4mm = cor_4mm[:, 600:1048]
a = data.shape[0] // 2 - width // 2
b = data.shape[0] // 2 + width // 2
data_cor = data[a:b, data.shape[1] // 2, :].T
# Show images
fig = plt.figure(dpi=300)
ax1 = fig.add_subplot(131)
ax1.imshow(cor_2mm, cmap='gray')
ax1.set_title('2mm image')
ax2 = fig.add_subplot(132)
ax2.imshow(cor_4mm, cmap='gray')
ax2.set_title('4mm image')
ax3 = fig.add_subplot(133)
ax3.imshow(data_cor, cmap='gray')
ax3.set_title('4mm image')
plt.show()
render_volume(data, None, False)
# Downscale images
# K-means clustering
# 3D clustering in parallel
mask = Parallel(n_jobs=12)(delayed(kmeans_opencv)(data[i, :, :].T, n_clusters, scale=True, method='loop')
for i in tqdm(range(data.shape[0]), 'Calculating mask'))
mask = np.transpose(np.array(mask), (0, 2, 1))
print_orthogonal(mask, True)
# 2D clustering
mask_2mm = kmeans_opencv(cor_2mm, n_clusters, True, limit=2, method='loop')
mask_4mm = kmeans_opencv(cor_4mm, n_clusters, True, limit=2, method='loop')
mask_4mm_2 = kmeans_opencv(data_cor, n_clusters, True, limit=2, method='loop')
# Show cluster images
fig = plt.figure(dpi=300)
ax1 = fig.add_subplot(131)
ax1.imshow(mask_2mm)
ax1.set_title('2mm mask')
ax2 = fig.add_subplot(132)
ax2.imshow(mask_4mm)
ax2.set_title('4mm mask')
ax3 = fig.add_subplot(133)
ax3.imshow(mask_4mm_2)
ax3.set_title('4mm mask')
plt.show()
|
#!/usr/bin/python3
# -*- coding: utf8 -*-
# Copyright (c) 2020 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert the circuit to json
"""
from google.protobuf.json_format import MessageToJson
from QCompute.OpenConvertor import ConvertorImplement
from QCompute.QProtobuf import PBProgram
class CircuitToJson(ConvertorImplement):
"""
Circuit to json
"""
def convert(self, program: PBProgram) -> str:
"""
Convert the circuit to json.
Example:
env.publish() # need to generate protobuf circuit data
jsonStr = CircuitToJson().convert(env.program)
:param program: Protobuf format of the circuit
:return: json str
"""
return MessageToJson(program, preserving_proto_field_name=True)
|
phrase = "Sanidhya Academy"
print(phrase.lower())
|
import json
import docutils
import docutils.core
import docutils.frontend
from docutils import nodes
from docutils.parsers.rst import states
def json_safe_dict(d):
"""Deep copy a dict and stringify JSON incompatible values."""
result = {}
for k, v in d.items():
if isinstance(v, dict):
v = json_safe_dict(v)
else:
try:
json.dumps(v)
except TypeError:
v = f"{STRINGIFIED:}{v}"
result[k] = v
return result
class SettingsFaker:
"""Simulate a settings object for docutils."""
def __init__(self):
self._stats = {}
self.__dict__.update({
# Docutils' RST parser references - at least - these settings
# attributes.
"character_level_inline_markup": False,
"debug": 0,
"error_encoding": "utf-8",
"error_encoding_error_handler": "backslashreplace",
"halt_level": 4,
"input_encoding": "unicode",
"language_code": "en",
"output_encoding": "unicode",
"pep_references": None,
"raw_enabled": 1,
"report_level": 2,
"rfc_references": None,
"tab_width": 3,
"warning_stream": None,
})
def __getattr__(self, item):
self._stats[item] = self._stats.get(item, 0) + 1
return None
def parse_rst_string(input):
settings = SettingsFaker()
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document("string", settings)
parser.parse(input, document)
return document
def char_at(s, p):
if p < 0 or p >= len(s):
return None
else:
return s[p]
def iter_chunks(s, hrefs):
"""Split the string into chunks according to list of hrefs.
Behavior:
https://example.org → <a href="https://example.org">https://example.org</a>
(https://example.org) → <a href="https://example.org">https://example.org</a>
[ ](https://example.org) → <a href="https://example.org">https://example.org</a>
<https://example.org> → <<a href="https://example.org">https://example.org</a>>
Home (https://example.org) → <a href="https://example.org">Home</a>
Text [linktext](https://example.org) → Text <a href="https://example.org">linktext</a>
"""
# yield linktext, href, healthy
healthy = True
if not s.strip():
yield s, None, healthy
return
for e, href in enumerate(hrefs):
if not s.strip():
# href but empty s encountered
healthy = False
yield s, None, healthy
return
# be cautious
href = href.strip()
if href.strip():
p = s.find(href)
if p == -1:
# href does not occur
healthy = False
elif p == 0:
# no linktext, so link to itself
yield href, href, healthy
s = s[len(href):]
else:
p_before = p - 1
p_next = p + len(href)
p_right_bracket = p - 2
char_right_bracket = char_at(s, p_right_bracket)
char_before = char_at(s, p_before)
char_next = char_at(s, p_next)
if char_before == "(" and char_next == ")":
p_left_bracket = None
if char_right_bracket == "]":
for i in range(p_right_bracket - 1, -1, -1):
if char_at(s, i) == "[":
p_left_bracket = i
break
if p_left_bracket is not None:
# abc (bcd)[url]
text = s[0:p_left_bracket]
if len(text):
yield text, None, healthy
linktext = s[p_left_bracket + 1: p_right_bracket]
if not linktext.strip():
linktext = href
yield linktext, href, healthy
else:
# create link, drop brackets
linktext = s[0:p_before]
linktext_rstripped = linktext.rstrip()
linktext_ws = linktext[len(linktext_rstripped):]
if linktext_rstripped:
yield linktext_rstripped, href, healthy
else:
if linktext_ws:
yield linktext_ws, None, healthy
yield href, href, healthy
s = s[p_next + 1:]
elif char_before == "<" and char_next == ">":
yield s[:p], None, healthy
yield href, href, healthy
yield s[p_next:p_next + 1], None, healthy
s = s[p_next + 1:]
else:
yield s[:p], None, healthy
yield href, href, healthy
s = s[p + len(href):]
else:
# empty href.strip() encountered
healthy = False
if s:
yield s, None, healthy
def get_replacement_from_string_and_document(text, document):
replacement = []
hrefs = [elm.rawsource for elm in document.traverse() if isinstance(elm, docutils.nodes.reference)]
for linktext, href, healthy in iter_chunks(text, hrefs):
if href:
replacement.append(
nodes.reference(
linktext,
linktext,
refuri=href,
internal=False,
)
)
elif linktext:
replacement.append(nodes.Text(linktext, linktext))
return replacement
def get_replacements_from_parsed_string(text):
document = parse_rst_string(text)
replacement = get_replacement_from_string_and_document(text, document)
return replacement
if __name__ == "__main__":
examples = [
"https://docs.typo3.org/ is waiting for you.",
"(https://docs.typo3.org/) is waiting for you.",
" (https://docs.typo3.org/) is waiting for you.",
"T3Docs (https://docs.typo3.org/) is waiting for you.",
"T3Docs (https://docs.typo3.org/) is waiting for you.",
"Martin Bless <martin.bless@mbless.de> TYPO3 (https://typo3.org/).",
"Martin Bless <martin.bless@mbless.de> See also: [TYPO3](https://typo3.org/).",
]
for e, text in enumerate(examples):
print(f"\n==========\n{text}\n----------\n")
get_replacements_from_parsed_string(text)
|
from django.urls import path
from django.contrib import admin
from payments.mpesaApi.views import LNMCallbackAPIView
urlpatterns = [
path('lnm/', LNMCallbackAPIView.as_view(), name="lnm_callbackurl"),
]
|
# Generated by Django 3.1.4 on 2021-01-18 16:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_post_nickname'),
]
operations = [
migrations.AlterField(
model_name='post',
name='thumb',
field=models.ImageField(blank=True, default='default_profile.png', upload_to=''),
),
]
|
VPN_SWITCH_RETRY_ATTEMPTS = 3
VPN_SWITCH_RETRY_BACKOFF = 2 # multiplier for the attempt number
VPN_THREAD_QUIT_RETRY_ATTEMPTS = 3
VPN_THREAD_QUIT_RETRY_BACKOFF = 2 # multiplier for the attempt number
VPN_MANAGE_CONNECTION_POLLING_SECONDS = 2
VPN_TRACK_CONNECTIONS_POLLING_SECONDS = 1
VPN_TRACK_PREFERENCES_POLLING_SECONDS = 1
VPN_TRACK_THREADS_POLLING_SECONDS = 1
VPN_SORT_NAMES = True
INTERNET_VERIFICATION_TIMEOUT = 2
|
"""
Authors: David K. Duvenaud, Dougal MacLaurin, Matt J. Johnson (Harvard
Intelligent & Probabilistic Systems Group)
Packaged by: Eric J. Ma (MIT)
"""
try:
import autograd.numpy as np
except ImportError:
import numpy as np
from operator import itemgetter
def flatten(value):
"""value can be any nesting of tuples, arrays, dicts.
returns 1D numpy array and an unflatten function."""
if isinstance(value, np.ndarray):
def unflatten(vector):
return np.reshape(vector, value.shape)
return np.ravel(value), unflatten
elif isinstance(value, float):
return np.array([value]), lambda x: x[0]
elif isinstance(value, tuple):
if not value:
return np.array([]), lambda x: ()
flattened_first, unflatten_first = flatten(value[0])
flattened_rest, unflatten_rest = flatten(value[1:])
def unflatten(vector):
N = len(flattened_first)
return (unflatten_first(vector[:N]),) + unflatten_rest(vector[N:])
return np.concatenate((flattened_first, flattened_rest)), unflatten
elif isinstance(value, list):
if not value:
return np.array([]), lambda x: []
flattened_first, unflatten_first = flatten(value[0])
flattened_rest, unflatten_rest = flatten(value[1:])
def unflatten(vector):
N = len(flattened_first)
return [unflatten_first(vector[:N])] + unflatten_rest(vector[N:])
return np.concatenate((flattened_first, flattened_rest)), unflatten
elif isinstance(value, dict):
flattened = []
unflatteners = []
lengths = []
keys = []
for k, v in sorted(value.items(), key=itemgetter(0)):
cur_flattened, cur_unflatten = flatten(v)
flattened.append(cur_flattened)
unflatteners.append(cur_unflatten)
lengths.append(len(cur_flattened))
keys.append(k)
def unflatten(vector):
split_ixs = np.cumsum(lengths)
pieces = np.split(vector, split_ixs)
return {key: unflattener(piece)
for piece, unflattener, key in zip(pieces,
unflatteners,
keys)}
return np.concatenate(flattened), unflatten
else:
raise Exception("Don't know how to flatten type {}".format(type(value))
)
|
from django.template import loader
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.contrib import messages
from django.db.models import Q
from .models import Image
from .forms import (ImageEditForm, ImageUploadForm)
_lr = loader.render_to_string
def index(req):
qs = req.GET.get('q')
if qs:
qs = qs.replace(' ', '') # spaces removal
rows = set(Image.objects.filter(Q(title__icontains=qs) | Q(tags__name__icontains=qs)))
ttl = f'поиск по заголовкам и тегам фразы: {qs}'
else:
rows = Image.objects.filter(active=True).defer('active')[:200]
qs = ''
ttl = 'все изображения'
res = _lr('index.html', {
'title': ttl,
'rows': rows,
'qs': qs
}, req)
return HttpResponse(res)
def img_add(req):
# добавление изображения
if req.method == 'POST':
form = ImageUploadForm(data=req.POST, files=req.FILES)
if form.is_valid():
obj = form.save(commit=False)
obj.save()
messages.success(req, 'Новое изображение загружено')
else:
messages.error(req, 'Ошибка при загрузке')
return HttpResponseRedirect("/")
else:
ctx = {'title': 'загрузка нового изображения', 'form': ImageUploadForm()}
return HttpResponse(_lr('img_add.html', ctx, req))
def img_view(req, slug):
# простотр редактировние описаний
try:
img = Image.objects.get(slug__exact=slug)
except Exception:
raise Http404('Don\'t exists')
if req.method == 'POST':
form = ImageEditForm(instance=img, data=req.POST)
if form.is_valid():
form.save()
messages.success(req, 'описание обновлено')
else:
messages.error(req, 'Ошибка при обновлении')
return HttpResponseRedirect("/")
else:
ctx = {'title': 'изображение', 'form': ImageEditForm(instance=img), 'id': img.id, 'iurl': img.image.url}
return HttpResponse(_lr('img_view.html', ctx, req))
def img_del(req, id):
# удаление изображения
try:
# удаление файла на диске можно добавть тут
Image.objects.filter(id=id).delete()
messages.success(req, 'изображение удалено')
return HttpResponseRedirect("/")
except Exception:
messages.error(req, 'Ошибка при удалении')
|
from __future__ import absolute_import
import six
from datetime import timedelta
from rest_framework.response import Response
from rest_framework.exceptions import ParseError
from sentry import features
from sentry.api.bases import OrganizationEventsEndpointBase, OrganizationEventsError, NoProjects
from sentry.api.event_search import resolve_field_list, InvalidSearchQuery
from sentry.api.serializers.snuba import SnubaTSResultSerializer
from sentry.utils.dates import parse_stats_period
from sentry.utils import snuba
class OrganizationEventsStatsEndpoint(OrganizationEventsEndpointBase):
def get(self, request, organization):
try:
if features.has("organizations:events-v2", organization, actor=request.user):
params = self.get_filter_params(request, organization)
snuba_args = self.get_snuba_query_args(request, organization, params)
else:
snuba_args = self.get_snuba_query_args_legacy(request, organization)
except (OrganizationEventsError, InvalidSearchQuery) as exc:
raise ParseError(detail=six.text_type(exc))
except NoProjects:
return Response({"data": []})
interval = parse_stats_period(request.GET.get("interval", "1h"))
if interval is None:
interval = timedelta(hours=1)
rollup = int(interval.total_seconds())
snuba_args = self.get_field(request, snuba_args)
result = snuba.transform_aliases_and_query(
aggregations=snuba_args.get("aggregations"),
conditions=snuba_args.get("conditions"),
filter_keys=snuba_args.get("filter_keys"),
start=snuba_args.get("start"),
end=snuba_args.get("end"),
orderby="time",
groupby=["time"],
rollup=rollup,
referrer="api.organization-events-stats",
limit=10000,
)
serializer = SnubaTSResultSerializer(organization, None, request.user)
return Response(
serializer.serialize(
snuba.SnubaTSResult(result, snuba_args["start"], snuba_args["end"], rollup)
),
status=200,
)
def get_field(self, request, snuba_args):
y_axis = request.GET.get("yAxis", None)
# These aliases are used by v1 of events.
if not y_axis or y_axis == "event_count":
y_axis = "count()"
elif y_axis == "user_count":
y_axis = "count_unique(user)"
try:
resolved = resolve_field_list([y_axis], {})
except InvalidSearchQuery as err:
raise ParseError(detail=six.text_type(err))
aggregate = resolved["aggregations"][0]
aggregate[2] = "count"
snuba_args["aggregations"] = [aggregate]
return snuba_args
|
#!/usr/bin/python
import sys
import csv
import heapq, random
import operator
def mapper():
reader = csv.reader(sys.stdin, delimiter='\t')
writer = csv.writer(sys.stdout, delimiter='\t', quotechar='"', quoting=csv.QUOTE_ALL)
mylist = []
dict = {}
index = 0
for line in reader:
mylist.append(line)
dict[index] = len(line[4])
index += 1
sorted_x = sorted(dict.items(), key=operator.itemgetter(1), reverse=True)
for i in reversed(sorted_x[0:10]):
print """\"\"\t\"\"\t\"\"\t\"\"\t\"{0}\"\t\"\"""".format(str(mylist[i[0]][4]))
mapper()
|
"""
Copyright (c) 2020 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import, unicode_literals
class Labels(object):
"""
Provide access to a set of labels which have specific semantics
The set of labels comes from here:
https://github.com/projectatomic/ContainerApplicationGenericLabels
Note that only a subset of label types (those used by OSBS) is supported:
- LABEL_TYPE_NAME: repository name of the image
- LABEL_TYPE_VERSION: version of the image
- LABEL_TYPE_RELEASE: release number for this version
- LABEL_TYPE_ARCH: architecture for the image
- LABEL_TYPE_VENDOR: owner of the image
- LABEL_TYPE_SOURCE: authoritative location for publishing
- LABEL_TYPE_COMPONENT: Bugzilla (or other tracker) component
- LABEL_TYPE_HOST: build host used to create the image
- LABEL_TYPE_RUN: command to run the image
- LABEL_TYPE_INSTALL: command to install the image
- LABEL_TYPE_UNINSTALL: command to uninstall the image
- LABEL_TYPE_OPERATOR_MANIFESTS: flags the presence of appregistry operators metadata
- LABEL_TYPE_OPERATOR_BUNDLE_MANIFESTS: flags the presence of operators bundle metadata
"""
LABEL_TYPE_NAME = object()
LABEL_TYPE_VERSION = object()
LABEL_TYPE_RELEASE = object()
LABEL_TYPE_ARCH = object()
LABEL_TYPE_VENDOR = object()
LABEL_TYPE_SOURCE = object()
LABEL_TYPE_COMPONENT = object()
LABEL_TYPE_HOST = object()
LABEL_TYPE_RUN = object()
LABEL_TYPE_INSTALL = object()
LABEL_TYPE_UNINSTALL = object()
LABEL_TYPE_OPERATOR_MANIFESTS = object()
LABEL_TYPE_OPERATOR_BUNDLE_MANIFESTS = object()
LABEL_NAMES = {
LABEL_TYPE_NAME: ('name', 'Name'),
LABEL_TYPE_VERSION: ('version', 'Version'),
LABEL_TYPE_RELEASE: ('release', 'Release'),
LABEL_TYPE_ARCH: ('architecture', 'Architecture'),
LABEL_TYPE_VENDOR: ('vendor', 'Vendor'),
LABEL_TYPE_SOURCE: ('authoritative-source-url', 'Authoritative_Registry'),
LABEL_TYPE_COMPONENT: ('com.redhat.component', 'BZComponent'),
LABEL_TYPE_HOST: ('com.redhat.build-host', 'Build_Host'),
LABEL_TYPE_RUN: ('run', 'RUN'),
LABEL_TYPE_INSTALL: ('install', 'INSTALL'),
LABEL_TYPE_UNINSTALL: ('uninstall', 'UNINSTALL'),
LABEL_TYPE_OPERATOR_MANIFESTS: ('com.redhat.delivery.appregistry',),
LABEL_TYPE_OPERATOR_BUNDLE_MANIFESTS: ('com.redhat.delivery.operator.bundle',),
}
def __init__(self, df_labels):
"""
Create a new Labels object
providing access to actual newest labels as well as old ones
"""
self._df_labels = df_labels
self._label_values = {}
for label_type, label_names in Labels.LABEL_NAMES.items():
for lbl_name in label_names:
if lbl_name in df_labels:
self._label_values[label_type] = (lbl_name, df_labels[lbl_name])
break
def get_name(self, label_type):
"""
returns the most preferred label name
if there isn't any correct name in the list
it will return newest label name
"""
if label_type in self._label_values:
return self._label_values[label_type][0]
else:
return Labels.LABEL_NAMES[label_type][0]
@staticmethod
def get_new_names_by_old():
"""Return dictionary, new label name indexed by old label name."""
newdict = {}
for label_type, label_names in Labels.LABEL_NAMES.items():
for oldname in label_names[1:]:
newdict[oldname] = Labels.LABEL_NAMES[label_type][0]
return newdict
def get_name_and_value(self, label_type):
"""
Return tuple of (label name, label value)
Raises KeyError if label doesn't exist
"""
if label_type in self._label_values:
return self._label_values[label_type]
else:
return (label_type, self._df_labels[label_type])
|
# DARC lib
# Compiles and decompiles .darc file archives
# Compiled files have 1:1 parity to the ones produced by Nintendo
# Written by Jaames
# github.com/jaames | jamesdaniel.dev
import struct
class DarcEntry:
def __init__(self, name='', data=bytes(0)):
self.name = name
self.data = data
class DarcGroup:
def __init__(self, name=''):
self.name = name
self.entries = []
def add_entry(self, name='', data=bytes(0)):
entry = DarcEntry(name=name, data=data)
self.entries.append(entry)
return entry
class Darc:
def __init__(self):
self.root = DarcGroup()
self.endian = '<'
@classmethod
def Open(cls, path):
darc = cls()
with open(path, 'rb') as f:
darc.read(f)
return darc
def save(self, path):
with open(path, 'wb') as f:
f.write(self.write())
def read_label(self, buffer, offset):
cur = buffer.tell()
buffer.seek(offset)
result = bytes()
while True:
char = buffer.read(2)
if char != b'\x00\x00':
result += char
else:
buffer.seek(cur)
return result.decode('UTF-16' + 'LE' if self.endian == '<' else 'BE')
def read(self, buffer):
magic, byte_order_mark = struct.unpack('>4sH', buffer.read(6))
self.endian = '<' if byte_order_mark == 0xFFFE else '>'
header_size, version, darc_size = struct.unpack('%sH2I'%self.endian, buffer.read(10))
table_offset, table_size, data_offset = struct.unpack('%s3I'%self.endian, buffer.read(12))
total_entries = 0
entry_index = 0
while True:
buffer.seek(table_offset + entry_index * 12)
label_offset, entry_offset, entry_size = struct.unpack('%s3I'%self.endian, buffer.read(12))
# upper 16 bits indicates whether this is for a folder entry
is_folder = label_offset & 0xFF000000
label_offset &= 0x00FFFFFF
# if folder entry (there doesnt seem to be subfolders, just a single root folder)
if is_folder:
# get entry count from root node
if label_offset == 0:
self.root = DarcGroup()
total_entries = entry_size
# get root node label
else:
self.root.name = self.read_label(buffer, (table_offset + total_entries * 12) + label_offset)
# if normal entry
else:
entry = self.root.add_entry()
buffer.seek(entry_offset)
entry.data = buffer.read(entry_size)
entry.name = self.read_label(buffer, (table_offset + total_entries * 12) + label_offset)
entry_index += 1
if entry_index >= total_entries:
break
def write_label(self, label):
return label.encode('UTF-16' + 'LE' if self.endian == '<' else 'BE') + bytes(2)
def write(self, little_endian=True):
self.endian = '<' if little_endian else '>'
labels = bytes(2) # label data always begins with 2 empty bytes?
label_offsets = []
data = bytes()
data_offsets = []
# add root label, which is always '.'
labels += self.write_label('.')
# pack entry labels + data
num_entries = len(self.root.entries) + 2
# sort entries by alphabetical order
for entry in sorted(self.root.entries, key=lambda entry: entry.name):
label_offsets.append(len(labels))
data_offset = len(data)
# align data offset to a multiple of 0x80
if data_offset % 0x80 != 0:
align = 0x80 - data_offset % 0x80
data += bytes(align)
data_offset += align
data_offsets.append(data_offset)
labels += self.write_label(entry.name)
data += entry.data
# add padding to the label section so that the data section aligns to a multiple of 0x80
table_size = (num_entries * 12) + len(labels)
base_data_offset = 28 + table_size
if base_data_offset % 0x80 != 0:
align = 0x80 - base_data_offset % 0x80
labels += bytes(align)
base_data_offset += align
# write entry table
table = bytes()
# add root node
table += struct.pack('%s3I'%self.endian, 0x01000000, 0, num_entries)
# add root label node
table += struct.pack('%s3I'%self.endian, 0x01000002, 0, num_entries)
# write entries
for i in range(num_entries - 2):
label_offset = label_offsets[i]
data_offset = base_data_offset + data_offsets[i]
data_size = len(self.root.entries[i].data)
# pack table entry
table += struct.pack('%s3I'%self.endian, label_offset, data_offset, data_size)
# write header
header = bytes()
# pack magic + byte order mark
header += struct.pack('>4sH', b'darc', 0xFFFE if little_endian else 0xFEFF)
# pack headersize, version, filesize
header += struct.pack('%sH2I'%self.endian, 28, 16777216, base_data_offset + len(data))
# pack table offset, table size, data offset
header += struct.pack('%s3I'%self.endian, 28, table_size, base_data_offset)
return header + table + labels + data |
from django.urls import path
from .views import *
urlpatterns = [
path("", api_overview),
path('list-api/', task_list_api),
path('create-task/', create_task_api),
path('update-task/<int:pk>/', update_task_api),
path('partially-task/<int:pk>/', partially_task_api),
path('delete-task/<int:pk>/', delete_task_api),
]
|
'''
Utils/Masstools/formula
_______________________
Tools for calculating theoretical peptide formulas and masses.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load future
from __future__ import print_function
# load modules
import sys
from xldlib import chemical
from xldlib.definitions import ZIP
from xldlib.resources import chemical_defs
from xldlib.utils import logger
__all__ = [
'CrosslinkedMass',
'getpeptideformula',
]
# STANDARD PEPTIDES
# -----------------
def add_modifications(self, *ids, **kwds):
'''
Add modifications from `ids` to `self`, calling `update_formula`
to add the new elements.
Args:
ids (int): integer IDs for the modifications
Kwds:
crosslinkers (bool): include crosslinker framgents (False)
'''
mods = (chemical_defs.MODIFICATIONS[i] for i in ids)
filtered = (i for i in mods if kwds.get('crosslinkers') or not i.fragment)
for modification in filtered:
self.update_formula(formula=modification.formula)
#def add_modifications(modificationids, atomcounts, with_crosslinkers):
# '''Adds modification formulas to the current chemical.Molecule inst'''
#
# for modification_id in modificationids:
# db_modification = chemical_defs.MODIFICATIONS[modification_id]
# if with_crosslinkers or not db_modification.fragment:
# atomcounts.update_formula(formula=db_modification.formula)
def getmodificationsformula(modifications, engine, **kwds):
'''
Calculates formula for all mods within standard library,
potentially including chemical crosslinker fragments if
with_crosslinkers is set to True.
'''
atom_counts = chemical.Molecule()
modifications = modifications.byposition()
for position, names in modifications.items():
try:
if frozenset(names) in chemical_defs.MODIFICATION_INTERACTIONS:
# add the converted modification from id
id_ = chemical_defs.MODIFICATION_INTERACTIONS[names].converted
add_modifications(atom_counts, id_, **kwds)
else:
# no modification interaction, just add
#ids = (engine.defaults.modifications.get(i)[0] for i in names)
ids = [engine.defaults.modifications.get(i)[0] for i in names]
add_modifications(atom_counts, *ids, **kwds)
except (TypeError, IndexError):
# Nonetype from .get(name)[0] or [][0]
print("Modification not recognized: " + str(names), file=sys.stderr)
return atom_counts
def getpeptideformula(peptide, mods, engine, **kwds):
'''
Returns the full mass for the peptide, with modifications, assuming
by default only modifications in the standard library.
'''
atomcounts = getmodificationsformula(mods, engine, **kwds)
atomcounts.update_formula(peptide=peptide)
return atomcounts
# CROSSLINKED PEPTIDES
# --------------------
@logger.init('chemical', 'DEBUG')
class CrosslinkedMass(object):
'''
Definitions for finding crosslinker link masses and crosslinked peptide
masses.
'''
def __init__(self, row, crosslinker):
super(CrosslinkedMass, self).__init__()
self.crosslinker = crosslinker
self.engine = row.engines['matched']
self.setdeadendmass(crosslinker.ends)
self.bridgemass = chemical.Molecule(crosslinker.bridge).mass
# SETTERS
def setdeadendmass(self, ends):
'''
Sets the mass for each deadend modificiation on a crosslinker at
each reactive site, allowing quick lookups.
'''
zipped = ZIP(ends.aminoacid, ends.deadend)
self.deadendmass = {r: chemical.Molecule(d).mass for r, d in zipped}
# GETTERS
def getpeptidemass(self, ends, formulas, modifications):
'''Returns the total mass for a crosslinked peptide'''
masses = (i.mass for i in formulas)
neutrallosses = (i['neutralloss'] for i in modifications)
return sum(masses) + sum(neutrallosses) + self.getlinkmass(ends)
def getlinkmass(self, ends):
'''Calculates the linkmass of the crosslinked peptide bridging mode'''
deadend = self.getdeadendmass(ends)
return sum(deadend) + (ends.number * self.bridgemass)
def getdeadendmass(self, ends):
'''Calculates the mass changes from each deadend modification.'''
for reactivity, count in ends.dead.items():
yield self.deadendmass[reactivity] * count
|
# -*- coding: utf-8 -*-
# TODO(@sobolevn): write docs for each error, remove ignore from setup.cfg
"""
All style errors are defined here.
They should be sorted by ``_code``.
"""
from ast import AST
from typing import Tuple
class BaseStyleViolation(object):
"""
This is a base class for all style errors.
It basically just defines how to create any error and how to format
this error later on.
"""
_error_tmpl: str
_code: str
def __init__(self, node: AST, text: str = None) -> None:
"""Creates new instance of style error."""
self.node = node
if text is None:
self._text = node.__class__.__name__.lower()
else:
self._text = text
def message(self) -> str:
"""
Returns error's formated message.
>>> import ast
>>> error = WrongKeywordViolation(ast.Pass())
>>> error.message()
'WPS100 Found wrong keyword "pass"'
>>> error = WrongKeywordViolation(ast.Delete(), text='del')
>>> error.message()
'WPS100 Found wrong keyword "del"'
"""
return self._error_tmpl.format(self._code, self._text)
def node_items(self) -> Tuple[int, int, str]:
"""Returns `Tuple` to match `flake8` API format."""
lineno = getattr(self.node, 'lineno', 0)
col_offset = getattr(self.node, 'col_offset', 0)
return lineno, col_offset, self.message()
class WrongKeywordViolation(BaseStyleViolation):
"""
This rule forbids to use some keywords from ``python``.
We do this, since some keywords are anti-patterns.
Example::
# Wrong:
pass
exec
eval
Note:
Returns WPS100 as error code
"""
_error_tmpl = '{0} Found wrong keyword "{1}"'
_code = 'WPS100'
class BareRiseViolation(BaseStyleViolation):
"""
This rule forbids using bare `raise` keyword outside of `except` block.
This may be a serious error in your application,
so we should prevent that.
Example::
# Correct:
raise ValueError('Value is to low')
# Wrong:
raise
Note:
Returns WPS101 as error code
"""
_error_tmpl = '{0} Found bare raise outside of except "{1}"'
_code = 'WPS101'
class RaiseNotImplementedViolation(BaseStyleViolation):
"""
This rule forbids to use `NotImplemented` error.
These two errors have different use cases.
Use cases of `NotImplemented` is too limited to be generally available.
Example::
# Correct:
raise NotImplementedError('To be done')
# Wrong:
raise NotImplemented
See Also:
https://stackoverflow.com/a/44575926/4842742
Note:
Returns WPS102 as error code
"""
_error_tmpl = '{0} Found raise NotImplemented "{1}"'
_code = 'WPS102'
class WrongFunctionCallViolation(BaseStyleViolation):
"""
This rule forbids to call some built-in functions.
Since some functions are only suitable for very specific usecases,
we forbid to use them in a free manner.
Note:
Returns WPS110 as error code
"""
_error_tmpl = '{0} Found wrong function call "{1}"'
_code = 'WPS110'
class WrongVariableNameViolation(BaseStyleViolation):
"""
This rule forbids to have blacklisted variable names.
Example::
# Correct:
html_node = None
# Wrong:
item = None
Note:
Returns WPS120 as error code
"""
_error_tmpl = '{0} Found wrong variable name "{1}"'
_code = 'WPS120'
class TooShortVariableNameViolation(BaseStyleViolation):
"""
This rule forbids to have too short variable names.
Example::
# Correct:
x_coord = 1
# Wrong:
x = 1
Note:
Returns WPS121 as error code
"""
_error_tmpl = '{0} Found too short variable name "{1}"'
_code = 'WPS121'
class WrongArgumentNameViolation(BaseStyleViolation):
"""
This rule forbids to have blacklisted function argument names.
Example::
# Correct:
def parse(xml_tree): ...
# Wrong:
def parse(value): ...
Note:
Returns WPS122 as error code
"""
_error_tmpl = '{0} Found wrong argument name "{1}"'
_code = 'WPS122'
class TooShortArgumentNameViolation(BaseStyleViolation):
"""
This rule forbids to have short argument names.
Example::
# Correct:
def test(username): ...
# Wrong:
def test(a): ...
Note:
Returns WPS123 as error code
"""
_error_tmpl = '{0} Found too short argument name "{1}"'
_code = 'WPS123'
class WrongAttributeNameViolation(BaseStyleViolation):
"""
This rule forbids to have attributes with blacklisted names.
Example::
# Correct:
class NormalClass:
request_payload = None
# Wrong:
class WithBlacklisted:
data = None
Note:
Returns WPS124 as error code
"""
_error_tmpl = '{0} Found wrong attribute name "{1}"'
_code = 'WPS124'
class TooShortAttributeNameViolation(BaseStyleViolation):
"""
This rule forbids to have attributes with short names.
Example::
# Correct:
class WithAttributes:
def __init__(self):
self.room_number = 1
# Wrong:
class WithAttributes:
def __init__(self):
self.a = 1
Note:
Returns WPS125 as error code
"""
_error_tmpl = '{0} Found too short attribute name "{1}"'
_code = 'WPS125'
class WrongFunctionNameViolation(BaseStyleViolation):
"""
This rule forbids to have functions with blacklisted names.
Example::
# Correct:
def request_dispatcher(): ...
# Wrong:
def handler(): ...
Note:
Returns WPS126 as error code
"""
_error_tmpl = '{0} Found wrong function name "{1}"'
_code = 'WPS126'
class TooShortFunctionNameViolation(BaseStyleViolation):
"""
This rule forbids to have functions with short names.
Example::
# Correct:
def collect_coverage(): ...
# Wrong:
def c(): ...
Note:
Returns WPS127 as error code
"""
_error_tmpl = '{0} Found too short function name "{1}"'
_code = 'WPS127'
class WrongModuleMetadataViolation(BaseStyleViolation):
"""
This rule forbids to have some module level variables.
We discourage using module variables like ``__author__``, because
there's no need in them. Use proper docstrings and classifiers.
Example::
# Wrong:
__author__ = 'Nikita Sobolev'
Note:
Returns WPS126 as error code
"""
_error_tmpl = '{0} Found wrong metadata variable {1}'
_code = 'WPS126'
class LocalFolderImportViolation(BaseStyleViolation):
"""
This rule forbids to have imports relative to the current folder.
Example::
# Correct:
from my_package.version import get_version
# Wrong:
from .version import get_version
from ..drivers import MySQLDriver
Note:
Returns WPS130 as error code
"""
_error_tmpl = '{0} Found local folder import "{1}"'
_code = 'WPS130'
class NestedImportViolation(BaseStyleViolation):
"""
This rule forbids to have nested imports in functions.
Nested imports show that there's an issue with you design.
So, you don't need nested imports, you need to refactor your code.
Example::
# Wrong:
def some():
from my_module import some_function
Note:
Returns WPS131 as error code
"""
_error_tmpl = '{0} Found nested import "{1}"'
_code = 'WPS131'
class DynamicImportViolation(BaseStyleViolation):
"""
This rule forbids importing your code with ``__import__()`` function.
This is almost never a good idea. So, it is an error by default.
Use regular imports instead.
Or use ``importlib.import_module()`` in case you know what you are doing.
Example::
# Wrong:
my_module = __import__('my_module')
See Also:
https://docs.python.org/3/library/functions.html#__import__
Note:
Returns WPS132 as error code
"""
_error_tmpl = '{0} Found dynamic import "{1}"'
_code = 'WPS132'
class NestedFunctionViolation(BaseStyleViolation):
"""
This rule forbids to have nested functions.
Just write flat functions, there's no need to nest them.
However, there are some whitelisted names like: ``decorator``.
Example::
# Wrong:
def do_some():
def inner():
...
Note:
Returns WPS140 as error code
"""
_error_tmpl = '{0} Found nested function "{1}"'
_code = 'WPS140'
class NestedClassViolation(BaseStyleViolation):
"""
This rule forbids to have nested classes.
Just write flat classes, there's no need nest them.
However, there are some whitelisted class names like: ``Meta``.
Example::
# Wrong:
class Some:
class Inner:
...
Note:
Returns WPS141 as error code
"""
_error_tmpl = '{0} Found nested class "{1}"'
_code = 'WPS141'
class TooManyLocalsViolation(BaseStyleViolation):
"""
This rule forbids to have too many local variables in the unit of code.
If you have too many variables in a function, you have to refactor it.
Note:
Returns WPS150 as error code
"""
_error_tmpl = '{0} Found too many local variables "{1}"'
_code = 'WPS150'
class TooManyArgumentsViolation(BaseStyleViolation):
"""
This rule forbids to have too many arguments for a function or method.
This is an indecator of a bad desing.
When function requires many arguments
it shows that it is required to refactor this piece of code.
Note:
Returns WPS10 as error code
"""
_error_tmpl = '{0} Found too many arguments "{1}"'
_code = 'WPS151'
class TooManyBranchesViolation(BaseStyleViolation):
"""
This rule forbids to have to many branches in a function.
When there are too many branches, functions are hard to test.
They are also hard to read and hard to change and read.
Note:
Returns WPS152 as error code
"""
_error_tmpl = '{0} Found too many branches "{1}"'
_code = 'WPS152'
class TooManyReturnsViolation(BaseStyleViolation):
"""
This rule forbids placing too many ``return`` statements into the function.
When there are too many ``return`` keywords, functions are hard to test.
They are also hard to read and hard to change and read.
Note:
Returns WPS153 as error code
"""
_error_tmpl = '{0} Found too many return statements "{1}"'
_code = 'WPS153'
class TooManyExpressionsViolation(BaseStyleViolation):
"""
This rule forbids putting to many expression is a unit of code.
Because when there are too many expression, it means, that code has
some logical or structural problems.
We only have to identify them.
Note:
Returns WPS154 as error code
"""
_error_tmpl = '{0} Found too many expressions "{1}"'
_code = 'WPS154'
class TooDeepNestingViolation(BaseStyleViolation):
"""
This rule forbids nesting blocks too deep.
If nesting is too deep that indicates of another problem,
that there's to many things going on at the same time.
So, we need to check these cases before
they have made their way to production.
Note:
Returns WPS155 as error code
"""
_error_tmpl = '{0} Found too deep nesting "{1}"'
_code = 'WPS155'
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from .models import UserRating, Strain
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email', 'groups']
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ['url', 'name']
class UserRatingSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UserRating
fields = [
'created', 'username', 'userclass',
'strain_name', 'rating'
]
class StrainSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Strain
fields = [
'created', 'updated',
'strain_name', 'strain_effect_list', 'strain_flavor_list',
'strain_desc',
# 'strain_effect_embed', 'strain_flavor_embed', 'strain_desc_embed'
] |
import pytest
from clean import remove_punctuation
@pytest.mark.parametrize("input_argument, expected_return", [
('Hello, I am Tim.', 'Hello I am Tim'),
(';String. with. punctuation characters!',
'String with punctuation characters'),
('Watch out!!!', 'Watch out'),
('Spaces - should - work the same, yes?',
'Spaces should work the same yes'),
("Some other (chars) |:-^, let's delete them",
'Some other chars lets delete them'),
])
def test_remove_punctuation(input_argument, expected_return):
assert remove_punctuation(input_argument) == expected_return |
def prime(a):
return not (a < 2 or any(a % x == 0 for x in xrange(2, int(a**0.5) + 1)))
|
class Leapx_org():
mul_num = 1.20
count= 0
def __init__(self,first,last,pay):
self.f_name = first
self.l_name = last
self.pay_amt = pay
self.full_name = first+" "+last
Leapx_org.count = Leapx_org.count+1
def make_email(self):
return self.f_name+ "."+self.l_name+"@xyz.com"
def incrementpay(self):
self.pay_amt = int(self.pay_amt*self.mul_num)
return self.pay_amt
class instructor(Leapx_org):
pass
I_obj1 = instructor('mohit', 'RAJ', 60000)
I_obj2 = instructor('Ravender', 'Dahiya',70000)
print "number of employees ", instructor.count
print I_obj1.make_email()
print I_obj2.make_email()
print (help(instructor))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.