hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce027805c06db61c04f315262615e01faa30ae5a
| 18,148
|
py
|
Python
|
metview/param.py
|
ecmwf/metview-python
|
641e57716ac1bb105394dd3a871ccd1e5ed60b26
|
[
"Apache-2.0"
] | 88
|
2018-06-08T14:21:18.000Z
|
2022-03-31T12:25:59.000Z
|
metview/param.py
|
ecmwf/metview-python
|
641e57716ac1bb105394dd3a871ccd1e5ed60b26
|
[
"Apache-2.0"
] | 37
|
2018-11-01T09:50:07.000Z
|
2022-02-24T12:20:16.000Z
|
metview/param.py
|
ecmwf/metview-python
|
641e57716ac1bb105394dd3a871ccd1e5ed60b26
|
[
"Apache-2.0"
] | 26
|
2018-06-08T14:21:28.000Z
|
2022-01-28T12:55:16.000Z
|
# (C) Copyright 2017- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import logging
from metview import dataset
import re
import pandas as pd
import metview as mv
from metview.indexer import GribIndexer
# logging.basicConfig(level=logging.INFO, format="%(levelname)s - %(message)s")
# logging.basicConfig(level=logging.DEBUG, format="%(levelname)s - %(message)s")
LOG = logging.getLogger(__name__)
PANDAS_ORI_OPTIONS = {}
def init_pandas_options():
global PANDAS_ORI_OPTIONS
if len(PANDAS_ORI_OPTIONS) == 0:
opt = {
"display.max_colwidth": 300,
"display.colheader_justify": "center",
"display.max_columns": 100,
"display.max_rows": 500,
"display.width": None,
}
for k, _ in opt.items():
PANDAS_ORI_OPTIONS[k] = pd.get_option(k)
for k, v in opt.items():
pd.set_option(k, v)
def reset_pandas_options():
global PANDAS_ORI_OPTIONS
if len(PANDAS_ORI_OPTIONS) > 0:
for k, v in PANDAS_ORI_OPTIONS.items():
pd.set_option(k, v)
PANDAS_ORI_OPTIONS = {}
class ParamInfo:
SUFFIXES = {
"hPa": "isobaricInhPa",
"hpa": "isobaricInhPa",
"K": "theta",
"ml": "hybrid",
}
LEVEL_TYPES = {"sfc": "surface", "pl": "isobaricInhPa", "ml": "hybrid"}
LEVEL_RE = re.compile(r"(\d+)")
NUM_RE = re.compile(r"[0-9]+")
SURF_RE = re.compile(r"^\d+\w+")
# SURF_NAME_MAPPER = {"t2": "2t", "q2": "2q", "u10": "10u", "v10": "10v"}
KNOWN_SURF_NAMES = ["2t", "2q", "10u", "10v", "msl", "wind10m"]
VECTOR_NAMES = ["wind10m", "wind3d", "wind"] # the longest ones first
def __init__(self, name, meta=None, scalar=None):
self.name = name
self.scalar = scalar if scalar is not None else True
self.meta = {} if meta is None else meta
if len(self.meta) == 0:
self.meta["shortName"] = name
def make_filter(self):
dims = {}
if self.name:
dims["shortName"] = [self.name]
for n in ["level", "typeOfLevel"]:
v = self.meta.get(n, None)
if v is not None:
dims[n] = [v]
return dims
@staticmethod
def build_from_name(full_name, param_level_types=None):
full_name = full_name
name = full_name
level = None
level_type = ""
# the name is a known param name
if param_level_types:
if name in param_level_types:
lev_t = param_level_types.get(name, [])
meta = {}
if len(lev_t) == 1:
meta = {"typeOfLevel": lev_t[0], "level": None}
scalar = not name in ParamInfo.VECTOR_NAMES
return ParamInfo(name, meta=meta, scalar=scalar)
t = full_name
# surface fields
if t in ParamInfo.KNOWN_SURF_NAMES or ParamInfo.SURF_RE.match(t) is not None:
level_type = "surface"
else:
# guess the level type from the suffix
for k, v in ParamInfo.SUFFIXES.items():
if full_name.endswith(k):
level_type = v
t = full_name[: -(len(k))]
break
# recognise vector params
for v in ParamInfo.VECTOR_NAMES:
if t.startswith(v):
name = v
t = t[len(v) :]
break
# determine level value
m = ParamInfo.LEVEL_RE.search(t)
if m and m.groups() and len(m.groups()) == 1:
level = int(m.group(1))
if level_type == "" and level > 10:
level_type = "isobaricInhPa"
if name == full_name:
name = ParamInfo.NUM_RE.sub("", t)
# check param name in the conf
if param_level_types:
if not name in param_level_types:
raise Exception(
f"Param={name} (guessed from name={full_name}) is not found in dataset!"
)
lev_t = param_level_types.get(name, [])
if lev_t:
if not level_type and len(lev_t) == 1:
level_type = lev_t[0]
elif level_type and level_type not in lev_t:
raise Exception(
f"Level type cannot be guessed from param name={full_name}!"
)
if level_type == "":
level = None
scalar = not name in ParamInfo.VECTOR_NAMES
LOG.debug(f"scalar={scalar}")
meta = {"level": level, "typeOfLevel": level_type}
return ParamInfo(name, meta=meta, scalar=scalar)
@staticmethod
def build_from_fieldset(fs):
assert isinstance(fs, mv.Fieldset)
f = fs[0:3] if len(fs) >= 3 else fs
m = ParamInfo._grib_get(f, GribIndexer.DEFAULT_ECC_KEYS)
name = level = lev_type = ""
scalar = True
meta_same = True
for x in m.keys():
if x != "shortName" and m[x].count(m[x][0]) != len(m[x]):
same = False
break
if meta_same:
if len(m["shortName"]) == 3 and m["shortName"] == ["u", "v", "w"]:
name = "wind3d"
scalar = False
elif len(m["shortName"]) >= 2:
if m["shortName"][0:2] == ["u", "v"]:
name = "wind"
scalar = False
elif m["shortName"][0:2] == ["10u", "10v"]:
name = "wind10m"
m["level"][0] = 0
m["typeOfLevel"][0] = "sfc"
scalar = False
if not name:
name = m["shortName"][0]
if name:
return ParamInfo(name, meta={k: v[0] for k, v in m.items()}, scalar=scalar)
else:
return None
def _meta_match(self, meta, key):
local_key = key if key != "levelist" else "level"
if (
key in meta
and meta[key] is not None
and meta[key]
and local_key in self.meta
):
# print(f"local={self.meta[local_key]} other={meta[key]}")
if isinstance(meta[key], list):
return str(self.meta[local_key]) in meta[key]
else:
return meta[key] == str(self.meta[local_key])
else:
return False
def match(self, name, meta):
# print(f"{self}, name={name}, meta={meta}")
r = 0
if self.name == name:
r += 3
for n in ["shortName", "paramId"]:
if self._meta_match(meta, n):
r += 1
# we only check the rest if the param is ok
if r > 0:
if self._meta_match(meta, "typeOfLevel"):
r += 1
if self._meta_match(meta, "levelist"):
r += 1
return r
def update_meta(self, meta):
self.meta = {**meta, **self.meta}
@staticmethod
def _grib_get(f, keys):
md = mv.grib_get(f, keys, "key")
m = {}
for k, v in zip(keys, md):
key_val = k.split(":")[0]
val = v
if k.endswith(":l"):
val = []
for x in v:
try:
val.append(int(x))
except:
val.append(None)
m[key_val] = val
return m
def __str__(self):
return "{}[name={}, scalar={}, meta={}]".format(
self.__class__.__name__, self.name, self.scalar, self.meta
)
class ParamDesc:
def __init__(self, name):
self.db = None
# self.name = name
self.md = {}
self.levels = {}
self._short_name = None
self._param_id = None
self._long_name = None
self._units = None
def load(self, db):
raise NotImplementedError
def _parse(self, md):
if "level" in md and len(md["level"]) > 0:
df = pd.DataFrame(md)
md.pop("typeOfLevel")
md.pop("level")
for md_key in list(md.keys()):
d = df[md_key].unique().tolist()
self.md[md_key] = d
lev_types = df["typeOfLevel"].unique().tolist()
for t in lev_types:
# print(f" t={t}")
self.levels[t] = []
q = f"typeOfLevel == '{t}'"
# print(q)
dft = df.query(q)
if dft is not None:
self.levels[t] = dft["level"].unique().tolist()
@property
def short_name(self):
if self._short_name is None:
self._short_name = ""
if self.md["shortName"]:
self._short_name = self.md["shortName"][0]
return self._short_name
@property
def param_id(self):
if self._param_id is None:
self._param_id = ""
if self.md["paramId"]:
self._param_id = self.md["paramId"][0]
return self._param_id
@property
def long_name(self):
if self._long_name is None:
self._long_name = ""
if self.db is not None:
self._long_name, self._units = self.db.get_longname_and_units(
self.short_name, self.param_id
)
return self._long_name
@property
def units(self):
if self._units is None:
self._units = ""
if self.db:
self._long_name, self._units = self.db.get_longname_and_units(
self.short_name, self.param_id
)
return self._units
@staticmethod
def describe(db, param=None):
in_jupyter = False
labels = {"marsClass": "class", "marsStream": "stream", "marsType": "type"}
try:
import IPython
# test whether we're in the Jupyter environment
if IPython.get_ipython() is not None:
in_jupyter = True
except:
pass
# describe all the params
if param is None:
t = {"parameter": [], "typeOfLevel": [], "level": []}
need_number = False
for k, v in db.param_meta.items():
if not v.md.get("number", None) in [["0"], [None]]:
need_number = True
break
for k, v in db.param_meta.items():
t["parameter"].append(k)
if len(v.levels) > 1:
lev_type = ""
level = ""
cnt = 0
for md_k, md in v.levels.items():
if in_jupyter:
lev_type += md_k + "<br>"
level += str(ParamDesc.format_list(md)) + "<br>"
else:
prefix = " " if cnt > 0 else ""
lev_type += prefix + f"[{cnt+1}]:" + md_k
level += (
prefix + f"[{cnt+1}]:" + str(ParamDesc.format_list(md))
)
cnt += 1
t["typeOfLevel"].append(lev_type)
t["level"].append(level)
else:
for md_k, md in v.levels.items():
t["typeOfLevel"].append(md_k)
t["level"].append(ParamDesc.format_list(md))
for md_k, md in v.md.items():
if md_k != "number" or need_number:
md_k = labels.get(md_k, md_k)
if not md_k in t:
t[md_k] = []
t[md_k].append(ParamDesc.format_list(md))
if in_jupyter:
txt = ParamDesc._make_html_table(t)
from IPython.display import HTML
return HTML(txt)
else:
df = pd.DataFrame.from_dict(t)
df = df.set_index(["parameter"])
init_pandas_options()
print(df)
# specific param
else:
v = None
if isinstance(param, str):
v = db.param_meta.get(param, None)
elif isinstance(param, int):
v = db.param_id_meta(param)
if v is None:
print(f"No shortName/paramId={param} found in data!")
return
# if v is not None:
t = {
"key": ["shortName"],
"val": [v.short_name],
}
if v.long_name != "" or v.units != "":
t["key"].append("name")
t["val"].append(v.long_name)
t["key"].append("paramId")
t["val"].append(v.param_id)
# ParamDesc.format_list(v.md["shortName"], full=True),
if v.long_name != "" or v.units != "":
t["key"].append("units")
t["val"].append(v.units)
add_cnt = len(v.levels) > 1
cnt = 0
for md_k, md in v.levels.items():
t["key"].append("typeOfLevel" + (f"[{cnt+1}]" if add_cnt else ""))
t["val"].append(md_k)
t["key"].append("level" + (f"[{cnt+1}]" if add_cnt else ""))
t["val"].append(ParamDesc.format_list(md, full=True))
cnt += 1
for kk, md_v in v.md.items():
if kk == "number" and md_v == ["0"]:
continue
if not kk in ["shortName", "paramId"]:
t["key"].append(labels.get(kk, kk))
t["val"].append(ParamDesc.format_list(md_v, full=True))
if in_jupyter:
from IPython.display import HTML
txt = ParamDesc._make_html_table(t, header=False)
return HTML(txt)
else:
df = pd.DataFrame.from_dict(t)
df = df.set_index("key")
init_pandas_options()
print(df)
@staticmethod
def _make_html_table(d, header=None):
header = header if header is not None else True
if len(d) > 1:
first_column_name = list(d.keys())[0]
txt = """
<table>
<tr>{}</tr>
{}
</table>""".format(
"" if not header else "".join([f"<th>{k}</th>" for k in d.keys()]),
"".join(
[
"<tr><th style='text-align: right;'>"
+ d[first_column_name][i]
+ "</th>"
+ "".join(
[
f"<td style='text-align: left;'>{ParamDesc.format_list(d[k][i], full=True)}</td>"
for k in list(d.keys())[1:]
]
)
+ "</tr>"
for i in range(len(d[first_column_name]))
]
),
)
return txt
else:
return ""
@staticmethod
def format_list(v, full=None):
if isinstance(v, list):
if full is True:
return ",".join([str(x) for x in v])
else:
if len(v) == 1:
return v[0]
if len(v) > 2:
return ",".join([str(x) for x in [v[0], v[1], "..."]])
else:
return ",".join([str(x) for x in v])
else:
return v
class ParamNameDesc(ParamDesc):
def __init__(self, name):
super().__init__(name)
self._short_name = name
def load(self, db):
md = {
"typeOfLevel": [],
"level": [],
"date": [],
"time": [],
"step": [],
"number": [],
"paramId": [],
"marsClass": [],
"marsStream": [],
"marsType": [],
"experimentVersionNumber": [],
}
self.db = db
self.md = {}
self.levels = {}
# print(f"par={par}")
for b_name, b_df in db.blocks.items():
if b_name == "scalar":
q = f"shortName == '{self.short_name}'"
dft = b_df.query(q)
elif b_name == self.short_name:
dft = b_df
else:
dft = None
if dft is not None:
for k in md.keys():
# print(f"{self.name}/{k}")
md[k].extend(dft[k].tolist())
# print(f" df[{k}]={df[k]}")
# print(df)
self._parse(md)
class ParamIdDesc(ParamDesc):
def __init__(self, param_id):
super().__init__("")
self._param_id = param_id
def load(self, db):
md = {
"shortName": [],
"typeOfLevel": [],
"level": [],
"date": [],
"time": [],
"step": [],
"number": [],
"paramId": [],
"marsClass": [],
"marsStream": [],
"marsType": [],
"experimentVersionNumber": [],
}
self.db = db
self.md = {}
self.levels = {}
# print(f"par={par}")
b_df = db.blocks.get("scalar", None)
if b_df is not None:
q = f"paramId == '{self._param_id}'"
dft = b_df.query(q)
if dft is not None:
for k in md.keys():
md[k].extend(dft[k].tolist())
self._parse(md)
| 32.407143
| 113
| 0.454761
| 16,712
| 0.920873
| 0
| 0
| 11,216
| 0.61803
| 0
| 0
| 3,253
| 0.179248
|
ce03e73e55d15e74f86d8e0bd047fcc03b6a00ce
| 316
|
py
|
Python
|
flask_edu_1/file1.py
|
fulkgl/Flask_edu_1
|
cccb70742949577fce5ed279a9d70e6348465643
|
[
"MIT"
] | 1
|
2019-12-16T21:55:53.000Z
|
2019-12-16T21:55:53.000Z
|
flask_edu_1/file1.py
|
fulkgl/Flask_edu_1
|
cccb70742949577fce5ed279a9d70e6348465643
|
[
"MIT"
] | null | null | null |
flask_edu_1/file1.py
|
fulkgl/Flask_edu_1
|
cccb70742949577fce5ed279a9d70e6348465643
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: UTF-8
'''!
module description
@author <A href="email:fulkgl@gmail.com">George L Fulk</A>
'''
__version__ = 0.01
def main():
'''!
main description
'''
print("Hello world")
return 0
if __name__ == "__main__":
# command line entry point
main()
# END #
| 13.166667
| 58
| 0.598101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 212
| 0.670886
|
ce07948f6f31a33c9447bac9ba7da84e0cc0cfdb
| 25
|
py
|
Python
|
write_grok/__init__.py
|
namedyangfan/Python_practice
|
7f7394d82bb5afc13b039eec286b9485a775ae39
|
[
"MIT"
] | null | null | null |
write_grok/__init__.py
|
namedyangfan/Python_practice
|
7f7394d82bb5afc13b039eec286b9485a775ae39
|
[
"MIT"
] | null | null | null |
write_grok/__init__.py
|
namedyangfan/Python_practice
|
7f7394d82bb5afc13b039eec286b9485a775ae39
|
[
"MIT"
] | null | null | null |
from .write_grok import *
| 25
| 25
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ce079ba915fb3b960bd7c0c9b579e190a8341d22
| 1,883
|
py
|
Python
|
backend/usuarios/views.py
|
alfmorais/pi-univesp
|
45a149e9a404f7b0238b84eb335db7111cd15ebb
|
[
"MIT"
] | 1
|
2021-12-24T20:32:51.000Z
|
2021-12-24T20:32:51.000Z
|
backend/usuarios/views.py
|
alfmorais/pi-univesp
|
45a149e9a404f7b0238b84eb335db7111cd15ebb
|
[
"MIT"
] | null | null | null |
backend/usuarios/views.py
|
alfmorais/pi-univesp
|
45a149e9a404f7b0238b84eb335db7111cd15ebb
|
[
"MIT"
] | null | null | null |
from hashlib import sha256
from django.http import HttpResponse
from django.shortcuts import redirect, render
from .models import Usuarios
def login(request):
if request.session.get('usuario'):
return redirect('/livro/home/')
status = request.GET.get('status')
return render(request, 'login.html', {'status': status})
def cadastro(request):
if request.session.get('usuario'):
return redirect('/livro/home/')
status = request.GET.get('status')
return render(request, 'cadastro.html', {'status': status})
def valida_cadastro(request):
nome = request.POST.get('nome')
senha = request.POST.get('senha')
email = request.POST.get('email')
usuario = Usuarios.objects.filter(email = email)
if len(nome.strip()) == 0 or len(email.strip()) == 0:
return redirect('/auth/cadastro/?status=1')
if len(senha) < 8:
return redirect('/auth/cadastro/?status=2')
if len(usuario) > 0:
return redirect('/auth/cadatro/?status=3')
try:
senha = sha256(senha.encode()).hexdigest()
usuario = Usuarios(
nome=nome,
email=email,
senha=senha,
)
usuario.save()
return redirect('/auth/cadastro/?status=0')
except:
return redirect('/auth/cadastro/?status=4')
def validar_login(request):
email = request.POST.get('email')
senha = request.POST.get('senha')
senha = sha256(senha.encode()).hexdigest()
usuario = Usuarios.objects.filter(email=email).filter(senha=senha)
if len(usuario) == 0:
return redirect('/auth/login/?status=1')
elif len(usuario) > 0:
request.session['usuario'] = usuario[0].id
return redirect(f'/livro/home/')
return HttpResponse(f"{email} {senha}")
def sair(request):
request.session.flush()
return redirect('/auth/login/')
| 25.445946
| 70
| 0.627722
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 347
| 0.18428
|
ce07ca9cf794023383e230a89ff64c045e2a41a9
| 2,737
|
py
|
Python
|
textclf/tester/dl_tester.py
|
lswjkllc/textclf
|
e4e7504989dd5d39c9376eafda1abc580c053913
|
[
"MIT"
] | 146
|
2020-02-20T02:29:55.000Z
|
2022-01-21T09:49:40.000Z
|
textclf/tester/dl_tester.py
|
lswjkllc/textclf
|
e4e7504989dd5d39c9376eafda1abc580c053913
|
[
"MIT"
] | 4
|
2020-03-08T03:24:16.000Z
|
2021-03-26T05:34:09.000Z
|
textclf/tester/dl_tester.py
|
lswjkllc/textclf
|
e4e7504989dd5d39c9376eafda1abc580c053913
|
[
"MIT"
] | 16
|
2020-02-26T04:45:40.000Z
|
2021-05-08T03:52:38.000Z
|
import torch
from transformers import BertTokenizer
from .base_tester import Tester
from textclf.utils.raw_data import create_tokenizer
from textclf.utils.create import create_instance
from textclf.config import DLTesterConfig
from textclf.data.dictionary import Dictionary
class DLTester(Tester):
"""负责Deep Learning model的测试"""
def __init__(self, config: DLTesterConfig):
super().__init__(config)
self.tokenizer = create_tokenizer(self.config.tokenizer)
self.use_cuda = self.config.use_cuda and torch.cuda.is_available()
print(f"Load checkpoint from {self.config.model_path}..")
checkpoint = torch.load(self.config.model_path)
self.model_conf, self.dictionary, self.label2id = \
checkpoint["info_for_test"]
self.model = create_instance(self.model_conf)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.classes = sorted(self.label2id, key=self.label2id.get)
def _preprocess(self, text):
text_tokenized = self.tokenizer(text)
if isinstance(self.dictionary, Dictionary):
text_processed = self.dictionary.tokens_to_tensor(
text_tokenized, max_len=self.config.max_len
)
text_len = (text_processed != self.dictionary.pad()).sum()
elif isinstance(self.dictionary, BertTokenizer):
text_processed = torch.LongTensor(
self.dictionary.encode(text_tokenized, add_special_tokens=True)[:-1])
max_len = self.config.max_len
pad_id = self.dictionary.pad_token_id
if len(text_processed) >= max_len:
text_processed = text_processed[:max_len]
else:
text_processed = torch.cat([
text_processed,
torch.ones(max_len-len(text_processed)).long()*pad_id
])
text_len = (text_processed != pad_id).sum()
if self.use_cuda:
text_processed = text_processed.cuda()
text_len = text_len.cuda()
return text_processed.unsqueeze(0), text_len.unsqueeze(0)
def predict_label(self, text):
text_processed, text_len = self._preprocess(text)
self.model.eval()
with torch.no_grad():
logits = self.model(text_processed, text_len)
label_id = torch.argmax(logits)
return self.classes[label_id]
def predict_prob(self, text):
text_processed, text_len = self._preprocess(text)
self.model.eval()
with torch.no_grad():
logits = self.model(text_processed, text_len)[0]
return torch.softmax(logits, dim=0).tolist()
def get_all_labels(self):
return self.classes
| 38.549296
| 85
| 0.652905
| 2,469
| 0.898799
| 0
| 0
| 0
| 0
| 0
| 0
| 123
| 0.044776
|
ce0890d24a487d376e2478b4bdab9793e27e76ac
| 3,303
|
py
|
Python
|
scripts/pughpore/randomwalk/get_D_old.py
|
jhwnkim/nanopores
|
98b3dbb5d36464fbdc03f59d224d38e4255324ce
|
[
"MIT"
] | 8
|
2016-09-07T01:59:31.000Z
|
2021-03-06T12:14:31.000Z
|
scripts/pughpore/randomwalk/get_D_old.py
|
jhwnkim/nanopores
|
98b3dbb5d36464fbdc03f59d224d38e4255324ce
|
[
"MIT"
] | null | null | null |
scripts/pughpore/randomwalk/get_D_old.py
|
jhwnkim/nanopores
|
98b3dbb5d36464fbdc03f59d224d38e4255324ce
|
[
"MIT"
] | 4
|
2017-12-06T17:43:01.000Z
|
2020-05-01T05:41:14.000Z
|
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
import numpy as np
import os
from nanopores.tools import fields
from scipy.interpolate import interp1d
HOME = os.path.expanduser("~")
DATADIR = os.path.join(HOME, "Dropbox", "nanopores", "fields")
fields.set_dir(DATADIR)
data = fields.get_fields("pugh_diff3D_cross", bulkbc=True, rMolecule=2.0779)
def smooth3(l):
A=np.array(l)
B=A[:]
ker=np.array([1./3,1./3,1./3])
n=int(ker.shape[0]/2.)
for i in range(n,A.shape[0]-n):
B[i]=np.inner(A[i-n:i+n+1],ker)
return list(B)
def smooth5(l):
A=np.array(l)
B=A[:]
ker=np.array([.2,.2,.2,.2,.2])
n=int(ker.shape[0]/2.)
for i in range(n,A.shape[0]-n):
B[i]=np.inner(A[i-n:i+n+1],ker)
return list(B)
def smootha(l):
A=np.array(l)
B=A[:]
ker=np.array([10.,12.,15.,12.,10.])
ker=ker/np.sum(ker)
n=int(ker.shape[0]/2.)
for i in range(n,A.shape[0]-n):
B[i]=np.inner(A[i-n:i+n+1],ker)
return list(B)
x = [z[0] for z in data["x"]]
data, x = fields._sorted(data, x)
eps=5e-3
x_=x[:]
#x_.extend([1.,1.+eps,1.+2*eps,1.+3*eps])
x.extend([(x[-1]+1.)/2.,1.,1.+eps,1.+2*eps,1.+3*eps,1.+4*eps,1.+5*eps])
dstr = ["x", "y", "z"]
Dxx = [D[0][0] for D in data["D"]]
Dyy = [D[1][1] for D in data["D"]]
Dzz = [D[2][2] for D in data["D"]]
Dxx_ = [D[0][0] for D in data["D"]]
Dyy_ = [D[1][1] for D in data["D"]]
Dzz_ = [D[2][2] for D in data["D"]]
Dxx.extend([0.,0.,0.,0.,0.,0.,0.])
Dyy.extend([Dyy[-1]/2.,0.,0.,0.,0.,0.,0.])
Dzz.extend([Dzz[-1]/2.,0.,0.,0.,0.,0.,0.])
#Dxx_.extend([0.,0.,0.,0.])
#Dyy_.extend([0.,0.,0.,0.])
#Dzz_.extend([0.,0.,0.,0.])
Dxx=smooth5(smooth3(Dxx))
Dyy=smooth5(smooth3(Dyy))
Dzz=smooth5(smooth3(Dzz))
Dx = interp1d(x,Dxx)
Dy = interp1d(x,Dyy)
Dz = interp1d(x,Dzz)
DDxx = [0.]+[(Dxx[i+1]-Dxx[i-1])/(x[i+1]-x[i-1]) for i in range(1,len(x)-1)]+[0.]
DDyy = [0.]+[(Dyy[i+1]-Dyy[i-1])/(x[i+1]-x[i-1]) for i in range(1,len(x)-1)]+[0.]
DDzz = [0.]+[(Dzz[i+1]-Dzz[i-1])/(x[i+1]-x[i-1]) for i in range(1,len(x)-1)]+[0.]
dDx = interp1d(x,DDxx)
dDy = interp1d(x,DDyy)
dDz = interp1d(x,DDzz)
if __name__=='__main__':
xc=np.linspace(0.,1.,100)
plt.plot(x_,Dxx_,color='blue',linestyle=':')
plt.scatter(x_,Dxx_,color='blue')
plt.scatter(x,Dxx,color='blue')
#plt.plot(x,Dxx,color='blue')
plt.plot(xc,Dx(xc),color='blue',label=r"$D_{%s%s}$" % (dstr[0], dstr[0]))
plt.scatter(x,DDxx,color='blue')
#plt.plot(x,DDxx,color='blue')
plt.plot(xc,dDx(xc),color='blue')
plt.plot(x_,Dyy_,color='red',linestyle=':')
plt.scatter(x_,Dyy_,color='red')
plt.scatter(x,Dyy,color='red')
#plt.plot(x,Dyy,color='red')
plt.plot(xc,Dy(xc),color='red',label=r"$D_{%s%s}$" % (dstr[1], dstr[1]))
plt.scatter(x,DDyy,color='red')
#plt.plot(x,DDyy,color='red')
plt.plot(xc,dDy(xc),color='red')
plt.plot(x_,Dzz_,color='green',linestyle=':')
plt.scatter(x_,Dzz_,color='green')
plt.scatter(x,Dzz,color='green')
#plt.plot(x,Dzz,color='green')
plt.plot(xc,Dz(xc),color='green',label=r"$D_{%s%s}$" % (dstr[2], dstr[2]))
plt.scatter(x,DDzz,color='green')
#plt.plot(x,DDzz,color='green')
plt.plot(xc,dDz(xc),color='green')
plt.xlabel('distance from pore center [nm]')
plt.ylabel('diffusivity relative to bulk')
plt.legend(loc='lower left')
plt.tight_layout()
plt.savefig('get_new.png')
| 28.230769
| 81
| 0.599758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 637
| 0.192855
|
ce08db747b526cc7a8cef1e5d71b70335cd56cae
| 7,885
|
py
|
Python
|
scripts/unseen_pairs_prepare.py
|
dhh1995/SCL
|
6b481709c11acc10909fed2105a7b485dab0887c
|
[
"MIT"
] | 32
|
2020-07-10T04:50:03.000Z
|
2021-11-26T16:57:01.000Z
|
scripts/unseen_pairs_prepare.py
|
dhh1995/SCL
|
6b481709c11acc10909fed2105a7b485dab0887c
|
[
"MIT"
] | 5
|
2020-07-10T07:55:34.000Z
|
2021-11-24T02:45:32.000Z
|
scripts/unseen_pairs_prepare.py
|
dhh1995/SCL
|
6b481709c11acc10909fed2105a7b485dab0887c
|
[
"MIT"
] | 3
|
2020-08-20T15:10:35.000Z
|
2022-02-20T16:31:01.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : unseen_pairs_prepare.py
# Author : Honghua Dong
# Email : dhh19951@gmail.com
# Date : 02/04/2019
#
# Distributed under terms of the MIT license.
'''
To split dataset into {train/val/test}_split_{rel}_{attr}_{args}.pkl
It will produce a set of indexes stored in pkls and can be used by specifying
both --index-file-dir and --split args of the main.py program.
[NOTE] It may require more examples to fulfill the 6k,2k,2k split regime.
# Usage
python3 unseen_pairs_prepare.py $DATA_DIR $NUM -r $REL(s) -a $ATTR(s)
# [NOTE] '-indenp' can prepare all required split for a table result
# (only held-out a certain pair)
'''
import argparse
import collections
import numpy as np
import os
import os.path as osp
import pickle
from utils import get_rule_pairs_from_meta_matrix
# from IPython import embed
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', type=str, help='the dataset file')
parser.add_argument('num', type=int, help='the dataset size')
# parser.add_argument('--task', '-t', type=str, required=True,
# choices=['center_single', 'up_down', 'left_right', 'in_out',
# 'distribute_four', 'distribute_nine'], help='the task')
parser.add_argument('--relations', '-r', type=int, nargs='+', required=True,
help='the held-out relations for (rel, attr) pairs, 0:Const, 1:Pro, 2:Arith, 3:Union')
parser.add_argument('--attributes', '-a', type=int, nargs='+', required=True,
help='the helo-out attributes for (rel, attr) pairs, 0:Num, 1:Pos, 2:Type, 3:Size, 4:Color')
parser.add_argument('--list-format', '-lf', action='store_true',
help='regard the rels and attrs as list of pairs, rather the tensor prod, if True')
parser.add_argument('--all-belong-to', '-all', action='store_true',
help='split to val when all (instead of any) rule_pairs of data belong to held-out-pairs, if True')
parser.add_argument('--dump-dir', '-du', type=str, required=True,
help='the dump dir for inds')
parser.add_argument('--use-visual-inputs', '-v', action='store_true',
help='Use visual inputs if True')
parser.add_argument('--independent-split', '-indenp', action='store_true',
help='regard the held-out pairs independently, and split for each of them')
# exclude
parser.add_argument('--exclude-relations', '-er', type=int, nargs='+', default=[],
help='the exclude relations for (rel, attr) pairs, 0:Const, 1:Pro, 2:Arith, 3:Union')
parser.add_argument('--exclude-attributes', '-ea', type=int, nargs='+', default=[],
help='the exclude attributes for (rel, attr) pairs, 0:Num, 1:Pos, 2:Type, 3:Size, 4:Color')
parser.add_argument('--exclude-list-format', '-elf', action='store_true',
help='regard the ex-rels and ex-attrs as list of pairs, rather the tensor prod, if True')
args = parser.parse_args()
ORIGIN_DATA_SPLIT = {
'train': [0, 1, 2, 3, 4, 5],
'val': [6, 7],
'test': [8, 9],
}
# relations are represented by a 8x9 meta matrix
# Meta matrix format
# ["Constant", "Progression", "Arithmetic", "Distribute_Three",
# "Number", "Position", "Type", "Size", "Color"]
# check whether this data-point should be held out
def held_out(meta_matrix, held_out_pairs, all_belong_to=False):
flag = True
rule_pairs = get_rule_pairs_from_meta_matrix(meta_matrix)
for rule in rule_pairs:
if rule in held_out_pairs:
if not all_belong_to: # any belong to
return True
else:
# not belong to, so the flag becomes false
flag = False
return flag
def to_str(list_of_int):
s = ''
for i in list_of_int:
s += str(i)
return s
def get_dump_name(relations, attributes, all_belong_to=False, list_format=False):
dump_name = 'split_r{}_a{}'.format(to_str(relations), to_str(attributes))
if all_belong_to:
dump_name += '_all'
else:
dump_name += '_any'
if list_format:
dump_name += '_lf'
if args.use_visual_inputs:
dump_name += '_visual'
return dump_name
def dump_dataset(dump_dir, inds, name):
for mode in ['train', 'val', 'test']:
file_name = osp.join(dump_dir, '{}_{}_inds.pkl'.format(mode, name))
with open(file_name, 'wb') as f:
pickle.dump(inds[mode], f)
print('[index file] {} saved, {} inds'.format(
file_name, len(inds[mode])))
def process(inds, bins, exclude=[], request=[6000, 2000, 2000]):
datasets = [collections.defaultdict(list) for i in range(len(bins))]
all_belong_to = args.all_belong_to and not args.independent_split
train_val_flag = False
test_flag = False
for i, ind in enumerate(inds):
name = ind + '.npz'
file_name = osp.join(args.data_dir, name)
data = np.load(file_name)
meta_matrix = data['meta_matrix']
for held_out_pairs, dataset in zip(bins, datasets):
if len(exclude) > 0 and held_out(meta_matrix, exclude):
# exclude
pass
elif held_out(meta_matrix, held_out_pairs,
all_belong_to=all_belong_to):
if len(dataset['test']) < request[2]:
dataset['test'].append(name)
else:
test_flag = True
else:
if len(dataset['train']) < request[0]:
dataset['train'].append(name)
elif len(dataset['val']) < request[1]:
dataset['val'].append(name)
else:
train_val_flag = True
if not args.independent_split and train_val_flag and test_flag:
break
if i % 1000 == 0:
print('nr_examples', i)
for held_out_pairs, dataset in zip(bins, datasets):
print('held {}, train {}, val {}, test {}'.format(
held_out_pairs, len(dataset['train']),
len(dataset['val']), len(dataset['test'])))
return datasets
def get_held_out_pairs(rels, attrs, list_format=False):
held_out_pairs = []
if list_format:
assert len(rels) == len(attrs), \
'in the list_format, nr_rel=nr_attr should holds'
for rel, attr in zip(rels, attrs):
held_out_pairs.append((rel, attr))
else:
for rel in rels:
for attr in attrs:
held_out_pairs.append((rel, attr))
return held_out_pairs
def main():
held_out_pairs = get_held_out_pairs(args.relations,
args.attributes, args.list_format)
exclude_pairs = get_held_out_pairs(args.exclude_relations,
args.exclude_attributes, args.exclude_list_format)
partition = ['' for i in range(10)]
for k in ORIGIN_DATA_SPLIT.keys():
for i in ORIGIN_DATA_SPLIT[k]:
partition[i] = k
if args.num % 10 != 0:
print('[Warning] dataset size {} is not a multipler of 10'.format(
args.num))
n = args.num // 10
inds = []
for i in range(n):
for j in range(10):
ind = i * 10 + j
file_prefix = 'RAVEN_{}_{}'.format(ind, partition[j])
inds.append(file_prefix)
bins = []
names = []
if args.independent_split:
for r, a in held_out_pairs:
name = get_dump_name([r], [a])
bins.append([(r, a)])
names.append(name)
else:
name = get_dump_name(args.relations, args.attributes,
args.all_belong_to, args.list_format)
bins.append(held_out_pairs)
names.append(name)
datasets = process(inds, bins, exclude=exclude_pairs)
print('dump_dir is {}'.format(args.dump_dir))
os.makedirs(args.dump_dir, exist_ok=True)
for dataset, name in zip(datasets, names):
print('the name of the dataset is: {}'.format(name))
dump_dataset(args.dump_dir, dataset, name)
if __name__ == '__main__':
main()
| 36.50463
| 103
| 0.625491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,689
| 0.341027
|
ce0afbb54da9c5cda767047eb0fb4add36a18205
| 1,533
|
py
|
Python
|
apis/common/models/movie.py
|
sunil28rana/flask-imdb-sample-project
|
df28655327a42c0ec28e485d64ebbc5d525275e7
|
[
"MIT"
] | null | null | null |
apis/common/models/movie.py
|
sunil28rana/flask-imdb-sample-project
|
df28655327a42c0ec28e485d64ebbc5d525275e7
|
[
"MIT"
] | null | null | null |
apis/common/models/movie.py
|
sunil28rana/flask-imdb-sample-project
|
df28655327a42c0ec28e485d64ebbc5d525275e7
|
[
"MIT"
] | 1
|
2020-10-22T10:31:00.000Z
|
2020-10-22T10:31:00.000Z
|
from datetime import datetime
from sqlalchemy import UniqueConstraint
from apis.initialization import db
class Movie(db.Model):
""" Movie Model for storing movie details"""
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
ninety_nine_popularity = db.Column(db.Float, index=True, nullable=False)
name = db.Column(db.String(100), index=True, nullable=False)
director = db.Column(db.String(100), index=True, nullable=False)
imdb_score = db.Column(db.Float, index=True, nullable=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
is_deleted = db.Column(db.Boolean, default=False, nullable=False)
__table_args__ = (UniqueConstraint('name', 'director', name='move_name_director_name'),)
# relations
genres = db.relationship(
'MovieGenre', backref='movie',
primaryjoin='and_(MovieGenre.movie_id==Movie.id, MovieGenre.is_deleted==False)',
lazy='dynamic'
)
def __repr__(self):
return '<Movie %r>' % self.name
class MovieGenre(db.Model):
"""MovieGenre Model for genres of movie"""
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(100), index=True, nullable=False)
movie_id = db.Column(db.Integer, db.ForeignKey('movie.id'))
is_deleted = db.Column(db.Boolean, default=False, nullable=False)
def __repr__(self):
return '<Movie %r>' % self.name
| 35.651163
| 92
| 0.703849
| 1,420
| 0.926288
| 0
| 0
| 0
| 0
| 0
| 0
| 267
| 0.174168
|
ce0ca8f2fe98f3ab332870eee82d60c59dac39aa
| 719
|
py
|
Python
|
setup.py
|
DewMaple/toolkit
|
a1f04d1b53420c64e15f684c83acb54276031346
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
DewMaple/toolkit
|
a1f04d1b53420c64e15f684c83acb54276031346
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
DewMaple/toolkit
|
a1f04d1b53420c64e15f684c83acb54276031346
|
[
"BSD-3-Clause"
] | null | null | null |
# from distutils.core import setup
from setuptools import setup, find_packages
setup(
name='py-toolkit',
version='0.0.3',
packages=find_packages(exclude=("tests",)),
url='https://github.com/DewMaple/toolkit',
description='python toolkit for common usage',
author='DewMaple',
author_email='dewmaple@gmail.com',
license='',
keywords=['python', "schema meta"],
classifiers=['Programming Language :: Python :: 3.6'],
project_urls={
'Bug Reports': 'https://github.com/DewMaple/toolkit/issues',
'Source': 'https://github.com/DewMaple/toolkit',
},
tests_require=[
"pytest",
"pytest-cov",
"pytest-xprocess",
],
zip_safe=True
)
| 28.76
| 68
| 0.631433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 361
| 0.502086
|
ce0da279383850a16ffabcd3fe15ce7341142e46
| 3,934
|
py
|
Python
|
ui.py
|
xKynn/zerox-assistant
|
292525bf55cd08f930338310869dba1c25a00cf4
|
[
"MIT"
] | 1
|
2021-11-07T14:49:13.000Z
|
2021-11-07T14:49:13.000Z
|
ui.py
|
xKynn/pyTunes
|
292525bf55cd08f930338310869dba1c25a00cf4
|
[
"MIT"
] | null | null | null |
ui.py
|
xKynn/pyTunes
|
292525bf55cd08f930338310869dba1c25a00cf4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'hnc.ui'
#
# Created by: PyQt5 UI code generator 5.15.6
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self, cc, tm, parent=None):
super().__init__(parent)
self.cc = cc
self.setupUi(self)
self.tm = tm
self.connect_signals()
def update_stlbl(self):
if self.cc.poll():
msg = self.cc.recv()
self.statuslbl.setText(msg)
def comm(self):
self.cc.send("listen")
def connect_signals(self):
self.pushButton.clicked.connect(self.comm)
self.tm.timeout.connect(self.update_stlbl)
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(365, 403)
MainWindow.setStyleSheet("background-color: rgb(53, 53, 53);")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(110, 230, 150, 150))
self.pushButton.setMinimumSize(QtCore.QSize(40, 40))
self.pushButton.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.pushButton.setStyleSheet("QPushButton {\n"
" color: rgb(39, 212, 111);\n"
" border: 2px solid #555;\n"
" border-radius: 75px;\n"
" border-style: outset;\n"
" background:rgb(58, 255, 81);\n"
" padding: 5px;\n"
" }\n"
"\n"
"QPushButton:hover {\n"
" background: rgb(63, 231, 74)\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: rgb(53, 221, 64)\n"
" }")
self.pushButton.setText("")
self.pushButton.setIconSize(QtCore.QSize(40, 40))
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(29, 30, 81, 31))
self.label.setStyleSheet("font: 25 20pt \"Bahnschrift Light Condensed\";\n"
"color: rgb(96, 255, 60);")
self.label.setObjectName("label")
self.statuslbl = QtWidgets.QLabel(self.centralwidget)
self.statuslbl.setGeometry(QtCore.QRect(30, 180, 500, 31))
self.statuslbl.setStyleSheet("font: 25 15pt \"Bahnschrift Light Condensed\";\n"
"color: rgb(96, 255, 60);")
self.statuslbl.setText("")
self.statuslbl.setObjectName("statuslbl")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", " "))
self.label.setText(_translate("MainWindow", "pyTunes"))
self.statuslbl.setText(_translate("MainWindow", "Press the button to begin.."))
def uifunc(cc):
app = QtWidgets.QApplication([])
tm = QtCore.QTimer()
win = Ui_MainWindow(cc, tm)
win.show()
tm.start(1000)
app.exec()
| 42.76087
| 87
| 0.543976
| 3,444
| 0.875445
| 0
| 0
| 0
| 0
| 0
| 0
| 987
| 0.25089
|
ce0dbcf0753017f4de48e972ead2feb9166619cc
| 6,373
|
py
|
Python
|
text_clf/data_load.py
|
kejunxiao/TextClf
|
aa1c195cb5908c32a3e6ed6891142603cb198d87
|
[
"BSD-3-Clause"
] | 2
|
2018-05-13T13:00:10.000Z
|
2018-05-13T13:00:12.000Z
|
text_clf/data_load.py
|
kejunxiao/TextClf
|
aa1c195cb5908c32a3e6ed6891142603cb198d87
|
[
"BSD-3-Clause"
] | null | null | null |
text_clf/data_load.py
|
kejunxiao/TextClf
|
aa1c195cb5908c32a3e6ed6891142603cb198d87
|
[
"BSD-3-Clause"
] | null | null | null |
"""
data preprocessing and get batch
"""
import os
import re
import logging
import itertools
from collections import Counter
import numpy as np
import pandas as pd
class DataLoad(object):
logging.getLogger().setLevel(logging.INFO)
def __init__(self, data_path, batch_size, num_epochs, dev_sample_rate, forced_seq_len=None):
"""
params:
data_path: source data path
mode: 'tarin' or 'dev'
dev_sample_rate: percentage of the training data to use for validation
"""
self.data_path = data_path
self.batch_size = batch_size
self.num_epochs = num_epochs
self.forced_seq_len = forced_seq_len
self.dev_sample_rate = dev_sample_rate
self._load_data()
def train_batch_iter(self, shuffle=True):
"""
params:
returns:
"""
x, y, data_size = self._split_train_dev('train')
num_batchs_per_epoch = data_size // self.batch_size + 1
for _ in range(self.num_epochs):
if shuffle:
shuffled_indices = np.random.permutation(np.arange(data_size))
x, y = x[shuffled_indices], y[shuffled_indices]
for i in range(num_batchs_per_epoch):
start_idx = i * self.batch_size
end_idx = min((i+1) * self.batch_size, data_size)
yield x[start_idx:end_idx], y[start_idx:end_idx]
def get_dev_data(self, shuffle=True):
"""
params:
returns:
"""
dev_x, dev_y, dev_size = self._split_train_dev('dev')
if shuffle:
shuffled_indices = np.random.permutation(np.arange(dev_size))
dev_x, dev_y = dev_x[shuffled_indices], dev_y[shuffled_indices]
return dev_x, dev_y
@staticmethod
def _clean_str(s):
s = re.sub(r"[^A-Za-z0-9:(),!?\'\`]", " ", s)
s = re.sub(r" : ", ":", s)
s = re.sub(r"\'s", " \'s", s)
s = re.sub(r"\'ve", " \'ve", s)
s = re.sub(r"n\'t", " n\'t", s)
s = re.sub(r"\'re", " \'re", s)
s = re.sub(r"\'d", " \'d", s)
s = re.sub(r"\'ll", " \'ll", s)
s = re.sub(r",", " , ", s)
s = re.sub(r"!", " ! ", s)
s = re.sub(r"\(", " \( ", s)
s = re.sub(r"\)", " \) ", s)
s = re.sub(r"\?", " \? ", s)
s = re.sub(r"\s{2,}", " ", s)
return s.strip().lower()
def _load_data(self):
"""
params:
returns:
x: 2D np.array
samples, dimension is (N, self.forced_seq_len)
y: 2D np.array
labels, dimension is (N, len(labels))
token2id: python dict object
id2token: python dict object
df: pd.DataFrame
labels: 1D np.array
"""
df = pd.read_csv(self.data_path)
selected_cols = ['Descript', 'Category']
df = df.loc[:, selected_cols].dropna(axis=0, how='any')
# construct label one-hot vectors
labels = np.unique(
np.array(df.loc[:, selected_cols[1]], dtype=np.object))
one_hot = np.zeros([len(labels), len(labels)], np.float)
np.fill_diagonal(one_hot, 1)
# {laebl: one hot vector for this label}
labels2vec = dict(zip(labels, one_hot))
raw_x = np.array(df.loc[:, selected_cols[0]].apply(
lambda x: DataLoad._clean_str(x).split(' ')), dtype=np.object)
raw_y = df.loc[:, selected_cols[1]].apply(
lambda y: labels2vec[y]).tolist()
# padding sentence
padded_x = self._pad_sentence(raw_x)
token2id = self._build_vocab(padded_x)
x = []
for sent in padded_x:
xs = []
for token in sent:
if token not in token2id:
token = '<OOV>'
xs.append(token2id[token])
x.append(xs)
self.x = np.array(x, dtype=np.int64)
self.y = np.array(raw_y, dtype=np.float)
def _split_train_dev(self, mode):
# split data into train set or dev set
data_size = self.x.shape[0]
dev_size = int(data_size * self.dev_sample_rate)
train_size = data_size - dev_size
# maybe using cross-validation is better
if mode == 'train':
return self.x[:train_size], self.y[:train_size], train_size
elif mode == 'dev':
return self.x[dev_size:], self.y[dev_size:], dev_size
else:
raise ValueError('mode shoudle be train or dev.')
def _pad_sentence(self, sentences, padding_word='<PAD>'):
if self.forced_seq_len is None:
# forced_seq_len = max length of all sentences
self.forced_seq_len = max([len(sent) for sent in sentences])
padded_sentences = []
for sent in sentences:
if len(sent) < self.forced_seq_len:
sent.extend([padding_word] * (self.forced_seq_len-len(sent)))
padded_sent = sent
elif len(sent) > self.forced_seq_len:
logging.info('Because the length of the sentence is larger the self.forced_seq_len,'
'so need to cut off the sentence.')
padded_sent = sent[:self.forced_seq_len]
padded_sentences.append(padded_sent)
return padded_sentences
def _build_vocab(self, sentences):
tokens_count = Counter(itertools.chain(*sentences))
vocab = [token[0]
for token in tokens_count.most_common(self.forced_seq_len)]
vocab += ['<OOV>'] # out of vocablary
token2id = {token: i for i, token in enumerate(vocab)}
self.vocab_size = len(vocab)
return token2id
if __name__ == '__main__':
params = {
'data_path': '../dataset/San_Francisco_Crime/train.csv.zip',
'batch_size': 32,
'num_epochs': 200,
'forced_seq_len': 14,
'dev_sample_rate':0.05
}
data = DataLoad(data_path=params['data_path'],
batch_size=params['batch_size'],
num_epochs=params['num_epochs'],
forced_seq_len=params['forced_seq_len'],
dev_sample_rate=params['dev_sample_rate'])
batches = data.train_batch_iter()
batch_x, batch_y = next(batches)
# print(len(batches))
print(batch_x.shape)
print(batch_y.shape)
| 35.209945
| 100
| 0.557822
| 5,540
| 0.869292
| 658
| 0.103248
| 613
| 0.096187
| 0
| 0
| 1,523
| 0.238977
|
ce0e92d74f72ee04e6c2fbb871130425f6c911e3
| 11,629
|
py
|
Python
|
pydsm/audio_weightings.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
pydsm/audio_weightings.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
pydsm/audio_weightings.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Sergio Callegari
# All rights reserved.
# This file is part of PyDSM.
# PyDSM is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PyDSM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PyDSM. If not, see <http://www.gnu.org/licenses/>.
"""
Acoustic weighting functions (:mod:`pydsm.audio_weightings`)
============================================================
Some standard acoustic weighting functions.
This module includes the A-, B- and C-weightings from the
ANSI Standards S1.4-1983 and S1.42-2001.
It also includes the D-weighting from the now withdrawn IEC 537.
It also includes the F-weighting proposed by R. A. Wannamaker.
The weighting functions can be expressed either in terms of
acoustic power or in terms of signal amplitude.
The weighting functions are also available in terms of a filter-based
implementation. In this case, be careful since no normalization is
present so that the gain at 1 kHz can be arbitrary. The filter
transfer function is referred to a signal amplitude weighting.
.. currentmodule:: pydsm.audio_weightings
Weighting functions
-------------------
.. autosummary::
:toctree: generated/
a_weighting
b_weighting
c_weighting
d_weighting
f_weighting
Filter implementation of weighting functions
--------------------------------------------
.. autodata:: a_zpk
:annotation:
.. autodata:: b_zpk
:annotation:
.. autodata:: c_zpk
:annotation:
.. autodata:: d_zpk
:annotation:
.. autodata:: f_zpk
:annotation:
Normalization constants
-----------------------
.. autodata:: a_weighting_gain
:annotation:
.. autodata:: b_weighting_gain
:annotation:
.. autodata:: c_weighting_gain
:annotation:
.. autodata:: d_weighting_gain
:annotation:
.. autodata:: f_weighting_gain
:annotation:
Notes
-----
The ANSI and IEC weightings are also described in Wikipedia [1]
and summarized in some illustrative web pages such as [2]_ and
[3]_. The F-weighting is documented in [4]_.
The filter-based implementation of the F-weighting is so high-order that
evaluation of the transfer function may require special care.
.. [1] Wikipedia (http://en.wikipedia.org/wiki/A-weighting)
.. [2] Cross spectrum (http://www.cross-spectrum.com/audio/weighting.html)
.. [3] Product Technology Parters "Noise Measurement Briefing"
(http://www.ptpart.co.uk/noise-measurement-briefing/)
.. [4] Robert A. Wannamaker "Psychoacoustically Optimal Noise
Shaping," J. Audio Eng. Soc., Vol. 40 No. 7/8 1992 July/August
"""
from __future__ import division, print_function
import numpy as np
__all__ = ["a_zpk", "a_weighting", "b_zpk", "b_weighting",
"c_zpk", "c_weighting", "d_zpk", "d_weighting",
"f_zpk", "f_weighting"]
a_zpk = (2*np.pi*np.asarray([0., 0., 0., 0.]),
2*np.pi*np.asarray([-20.6, -20.6, -107.7, -739.9, -12200., -12200.]),
(2*np.pi*12200.)**2)
"""A-weighting filter in zpk form."""
b_zpk = (2*np.pi*np.asarray([0., 0., 0.]),
2*np.pi*np.asarray([-20.6, -20.6, -158.5, -12200., -12200.]),
(2*np.pi*12200.)**2)
"""B-weighting filter in zpk form."""
c_zpk = (2*np.pi*np.asarray([0., 0.]),
2*np.pi*np.asarray([-20.6, -20.6, -12200., -12200.]),
(2*np.pi*12200.)**2)
"""C-weighting filter in zpk form."""
d_zpk = (2*np.pi*np.asarray([0., -519.8+876.2j, -519.8-876.2j]),
2*np.pi*np.asarray([-282.7, -1160., -1712+2628j, -1712-2628j]),
91104.32)
"""D-weighting filter in zpk form."""
f_zpk = (2*np.pi*np.asarray([0., 0., 0.,
-580+1030j, -580-1030j,
-3180+8750j, -3180-8750j,
-3180+8750j, -3180-8750j,
-3180+8750j, -3180-8750j]),
2*np.pi*np.asarray([-180., -180., -180.,
-1630., -1630.,
-2510+3850j, -2510-3850j,
-2510+3850j, -2510-3850j,
-2510+3850j, -2510-3850j,
-2510+3850j, -2510-3850j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j]),
1.6810544531883432e+207)
"""F-weighting filter in zpk form."""
# Note: evaluating the transfer function of f_zpk may require special care
# since the high order implies that for many frequency values both the
# numerator and the denominator take very large values (in magnitude). Taking
# the ratio of large complex values may lead to overflow in numpy even if
# individually the numerator, the denominator and the result should not
# overflow.
def a_weighting(f, normal=True, power=True):
"""Returns the A-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
"""
if power:
return a_weighting(f, normal, power=False)**2
w = (12200.0**2*f**4)/((f**2+20.6**2) *
np.sqrt((f**2+107.7**2) *
(f**2+737.9**2))*(f**2+12200.0**2))
return w if not normal else w*a_weighting_gain
a_weighting_gain = 1/a_weighting(1000, normal=False, power=False)
"""Normalization gain to apply to A-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
def b_weighting(f, normal=True, power=True):
"""Returns the B-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
"""
if power:
return b_weighting(f, normal, power=False)**2
w = (12200.0**2*f**3)/((f**2+20.6**2) *
np.sqrt(f**2+158.5**2)*(f**2+12200.0**2))
return w if not normal else w*b_weighting_gain
b_weighting_gain = 1/b_weighting(1000, normal=False, power=False)
"""Normalization gain to apply to B-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
def c_weighting(f, normal=True, power=True):
"""Returns the C-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
"""
if power:
return c_weighting(f, normal, power=False)**2
w = (12200.0**2*f**2)/((f**2+20.6**2)*(f**2+12200.0**2))
return w if not normal else w*c_weighting_gain
c_weighting_gain = 1/c_weighting(1000, normal=False, power=False)
"""Normalization gain to apply to C-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
def d_weighting(f, normal=True, power=True):
"""Returns the D-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz. This parameter is ignored, since this weighting function
is always normalized.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
"""
if power:
return d_weighting(f, normal, power=False)**2
def h(f):
return (((1037918.48-f**2)**2+1080768.16*f**2) /
((9837328.0-f**2)**2+11723776.0*f**2))
return (f/6.8966888496476E-5 *
np.sqrt(h(f)/((f**2+79919.29)*(f**2+1345600.0))))
d_weighting_gain = 1.
"""Normalization gain to apply to D-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
def f_weighting(f, normal=True, power=True):
"""Returns the F-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
Notes
-----
The F-weighting function is documented in [1]_.
.. [1] Robert A. Wannamaker "Psychoacoustically Optimal Noise Shaping,"
J. Audio Eng. Soc., Vol. 40 No. 7/8 1992 July/August
"""
if not power:
return np.sqrt(f_weighting(f, normal, power=True))
fx = f/1000.
g = 2.536e-5
z1 = fx**2
z2 = ((0.58**2)+(1.03**2)-z1)**2 + 4.0*(0.58**2)*z1
z3 = ((3.18**2)+(8.75**2)-z1)**2 + 4.0*(3.18**2)*z1
p1 = 0.18**2+z1
p2 = 1.63**2+z1
p3 = ((2.51**2)+(3.85**2)-z1)**2 + 4.0*(2.51**2)*z1
p4 = ((6.62**2)+(14.29**2)-z1)**2 + 4.0*(6.62**2)*z1
w = ((g*((z1**3)*z2*(z3**3)) /
((p1**3)*(p2**2)*(p3**4))*((1e5/p4)**20)))
return w if not normal else w*f_weighting_gain
# Set normalization gain
f_weighting_gain = 1/f_weighting(1000, normal=False, power=True)
"""Normalization gain to apply to F-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
| 33.707246
| 78
| 0.600224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,058
| 0.606931
|
ce0ea9cd4625661b89c457658572716294eaef3b
| 1,258
|
py
|
Python
|
data_custom/data_load.py
|
icon-lab/provoGAN
|
e4abee668ca5a5733a04c0e27e379a0434b0270f
|
[
"BSD-3-Clause"
] | 1
|
2022-03-27T09:16:22.000Z
|
2022-03-27T09:16:22.000Z
|
data_custom/data_load.py
|
icon-lab/provoGAN
|
e4abee668ca5a5733a04c0e27e379a0434b0270f
|
[
"BSD-3-Clause"
] | null | null | null |
data_custom/data_load.py
|
icon-lab/provoGAN
|
e4abee668ca5a5733a04c0e27e379a0434b0270f
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import nibabel
import numpy as np
import random
from scipy import ndimage
import SimpleITK as sitk
def load_nifty_volume_as_array(filename, with_header = False):
"""
load nifty image into numpy array, and transpose it based on the [z,y,x] axis order
The output array shape is like [Depth, Height, Width]
inputs:
filename: the input file name, should be *.nii or *.nii.gz
with_header: return affine and hearder infomation
outputs:
data: a numpy data array
"""
img = nibabel.load(filename)
data = img.get_data()
data = np.transpose(data, [2,1,0])
if(with_header):
return data, img.affine, img.header
else:
return data
def save_array_as_nifty_volume(data, filename, reference_name = None):
"""
save a numpy array as nifty image
inputs:
data: a numpy array with shape [Depth, Height, Width]
filename: the ouput file name
reference_name: file name of the reference image of which affine and header are used
outputs: None
"""
img = sitk.GetImageFromArray(data)
if(reference_name is not None):
img_ref = sitk.ReadImage(reference_name)
img.CopyInformation(img_ref)
sitk.WriteImage(img, filename)
| 29.952381
| 92
| 0.678855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 612
| 0.486486
|
ce0ffdd605799570a773639f27bdbc3a5cc51708
| 8,802
|
py
|
Python
|
project/server/user/views.py
|
kangusrm/XML-parser
|
adb2a7049b5946fb6293f58e20c860fbb07a6806
|
[
"MIT"
] | 1
|
2016-09-20T09:07:34.000Z
|
2016-09-20T09:07:34.000Z
|
project/server/user/views.py
|
kangusrm/XML-parser
|
adb2a7049b5946fb6293f58e20c860fbb07a6806
|
[
"MIT"
] | null | null | null |
project/server/user/views.py
|
kangusrm/XML-parser
|
adb2a7049b5946fb6293f58e20c860fbb07a6806
|
[
"MIT"
] | 1
|
2016-09-20T09:07:37.000Z
|
2016-09-20T09:07:37.000Z
|
# project/server/user/views.py
#################
#### imports ####
#################
from flask import render_template, Blueprint, url_for, \
redirect, flash, request, session
from flask_login import login_user, logout_user, login_required
from project.server import bcrypt, db
from project.server.models import User, Data, prevod
from project.server.user.forms import LoginForm, RegisterForm, UploadForm, ConnectForm
import xml.etree.ElementTree as ET
import pymysql
import pymysql.cursors
import tempfile
import os
################
#### config ####
################
user_blueprint = Blueprint('user', __name__, )
################
#### routes ####
################
@user_blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if form.validate_on_submit():
user = User(
email=form.email.data,
password=form.password.data
)
db.session.add(user)
db.session.commit()
login_user(user)
flash('Thank you for registering.', 'success')
return redirect(url_for("user.home"))
return render_template('user/register.html', form=form)
@user_blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(
user.password, request.form['password']):
login_user(user)
flash('You are logged in. Welcome!', 'success')
return redirect(url_for('main.home'))
else:
flash('Invalid email and/or password.', 'danger')
return render_template('user/login.html', form=form)
return render_template('user/login.html', title='Please Login', form=form)
@user_blueprint.route('/logout')
@login_required
def logout():
logout_user()
flash('You were logged out. Bye!', 'success')
return redirect(url_for('main.home'))
@user_blueprint.route('/xmlparser', methods=['POST', 'GET'])
@login_required
def xmlparser():
try:
host = session['db_host']
session.pop('db_host', None)
user = session['db_user']
session.pop('db_user', None)
password = session['db_password']
session.pop('db_password', None)
db = session['db_database']
session.pop('db_database', None)
destination = session['file']
session.pop('file', None)
file = open(destination, "r")
tree = ET.parse(file)
root = tree.getroot()
tagy = []
radky = 0
for child in root:
sloupce = 0
for tag in child:
if root[radky][sloupce].tag not in tagy:
tagy.append(prevod(root[radky][sloupce].tag))
sloupce += 1
radky += 1
data = Data(radky)
radkyX = 0
attributy1 = 0
attributy2 = 0
for child in root:
sloupceY = 0
if child.attrib != "{}":
for key in child.attrib:
if not key in tagy:
tagy.append(key)
attributy1 += 1
data.setData(radkyX, key, child.attrib[key])
for tag in child:
data.setData(radkyX, root[radkyX][sloupceY].tag, root[radkyX][sloupceY].text)
if tag.attrib != "{}":
for key in tag.attrib:
if not key in tagy:
tagy.append(key)
attributy2 += 1
data.setData(radkyX, key, tag.attrib[key])
sloupceY += 1
radkyX += 1
sql = 'INSERT INTO `' + prevod(session['db_table']) + '` ('
for tag in tagy:
if request.form[tag] != "":
if sql == 'INSERT INTO `' + prevod(session['db_table']) + '` (':
sql += prevod(request.form[tag])
else:
sql += ',' + prevod(request.form[tag])
session.pop('db_table', None)
sql += ') VALUES '
for x in range(0,radky):
prvni = True
if x == 0:
sql += "("
else:
sql += ",("
for tag in tagy:
if request.form[tag] != "":
if prvni is True:
sql += "'" + prevod(data.getData(x,tag)) + "'"
prvni = False
else:
sql += ",'" + prevod(data.getData(x,tag)) + "'"
sql += ")"
sql += ";"
conn = pymysql.connect(host=host, user=user, password=password, db=db, cursorclass=pymysql.cursors.DictCursor)
a = conn.cursor()
file.close()
os.remove(destination)
a.execute(sql)
conn.commit()
flash('Success', 'success')
return render_template('main/home.html')
except:
flash('Unexpected error', 'danger')
return redirect(url_for("user.upload"))
@user_blueprint.route('/upload', methods=['POST', 'GET'])
@login_required
def upload():
if 'db_host' not in session:
form = ConnectForm(request.form)
return render_template('user/connect.html',form=form)
form = UploadForm(request.form)
return render_template('user/upload.html',form=form)
@user_blueprint.route('/connect', methods=['POST', 'GET'])
@login_required
def connect():
session['db_host'] = request.form['host']
session['db_user'] = request.form['user']
session['db_password'] = request.form['password']
session['db_database'] = request.form['database']
session['db_table'] = request.form['table']
return redirect(url_for("user.upload"))
@user_blueprint.route('/process', methods=['POST', 'GET'])
@login_required
def process():
if 'file' not in request.files:
flash('No file part', 'danger')
return redirect(url_for("user.upload"))
file = request.files['file']
if file.filename == '':
flash('No selected file', 'danger')
return redirect(url_for("user.upload"))
if file.filename.rsplit('.', 1)[1].lower() not in ['xml']:
flash('This is not .xml file', 'danger')
return redirect(url_for("user.upload"))
try:
filename = file.filename
target = tempfile.gettempdir()
destination = "/".join([target, filename])
file.save(destination)
session['file'] = destination
file = open(destination,"r")
tree = ET.parse(file)
root = tree.getroot()
tagy = []
radky = 0
for child in root:
sloupce = 0
for tag in child:
if root[radky][sloupce].tag not in tagy:
tagy.append(root[radky][sloupce].tag)
sloupce += 1
radky += 1
data = Data(radky)
radkyX = 0
attributy1 = 0
attributy2 = 0
for child in root:
sloupceY = 0
if child.attrib != "{}":
for key in child.attrib:
if not key in tagy:
tagy.append(key)
attributy1 += 1
data.setData(radkyX, key, child.attrib[key])
for tag in child:
data.setData(radkyX, root[radkyX][sloupceY].tag, root[radkyX][sloupceY].text)
if tag.attrib != "{}":
for key in tag.attrib:
if not key in tagy:
tagy.append(key)
attributy2 += 1
data.setData(radkyX, key, tag.attrib[key])
sloupceY += 1
radkyX += 1
sloupce = sloupceY + attributy1 + attributy2
conn = pymysql.connect(host = session['db_host'], user = session['db_user'], password = session['db_password'],
db=session['db_database'], cursorclass=pymysql.cursors.DictCursor)
a = conn.cursor()
sql = "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = '" + prevod(session['db_table']) + "'"
a.execute(sql)
result = a.fetchall()
db_tagy = []
for column in result:
db_tagy.append(column['COLUMN_NAME'])
except:
session.pop('db_host', None)
session.pop('db_user', None)
session.pop('db_password', None)
session.pop('db_database', None)
session.pop('db_table', None)
flash('Unexpected error', 'danger')
return redirect(url_for("user.upload"))
return render_template('user/xmlparser.html', data=data, tagy=tagy, db_tagy=db_tagy, sloupce=sloupce)
| 32.360294
| 123
| 0.538287
| 0
| 0
| 0
| 0
| 8,104
| 0.9207
| 0
| 0
| 1,447
| 0.164394
|
ce10a73d0706d4c9c4b471fbf0c74937c35cf813
| 5,477
|
py
|
Python
|
active_feature_extractor/experiments/linear_q_learner.py
|
benblack769/atari_q_learner
|
adae53e91ec6013ffaeefc9a058c7ab933593cea
|
[
"MIT"
] | null | null | null |
active_feature_extractor/experiments/linear_q_learner.py
|
benblack769/atari_q_learner
|
adae53e91ec6013ffaeefc9a058c7ab933593cea
|
[
"MIT"
] | null | null | null |
active_feature_extractor/experiments/linear_q_learner.py
|
benblack769/atari_q_learner
|
adae53e91ec6013ffaeefc9a058c7ab933593cea
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
from torch import nn
def get_avg_discounted_value(next_dones, next_mask, next_rewards, next_value_preds, gamma):
minibatch_size, td_lambda = next_dones.shape
lambda_gamma = torch.cumprod(torch.ones(td_lambda)*gamma,dim=0) * (1/gamma)
# all dones also must be masks, by definition
next_value_mask = next_mask * (1 - next_dones)
# mask to apply to values
lambda_mask = torch.cumprod(next_value_mask, dim=1)
# still reward agents if they are done that step
lambda_rew_mask = lambda_mask + next_dones
lambda_rews = lambda_rew_mask*torch.cumsum(next_rewards,dim=1)
lambda_values = lambda_mask * next_value_preds
num_rews = torch.sum(lambda_rew_mask, dim=1)
discounted_values = lambda_gamma * (lambda_rews + gamma * lambda_values)
avg_discounted_value = torch.sum(discounted_values, dim=1) / (num_rews + 1e-10)
return avg_discounted_value
# class DefaultModel(nn.Model):
# def __init__(self)
class LinearTDLearner:
def __init__(self, feature_size, device, feature_preproc, learn_rate=0.0001):
self.value_function = nn.Linear(feature_size, 1, bias=False).to(device)
self.optim = torch.optim.SGD(self.value_function.parameters(), lr=learn_rate)
self.feature_size = feature_size
self.device = device
self.feature_preproc = feature_preproc
def values(self, features):
values = self.value_function.forward(features)
return values
def update_epoc(self, feature_sequence, dones, mask, rewards, minibatch_size, td_lambda, gamma):
num_samples = len(feature_sequence)-td_lambda-1
tot_td_err = 0
num_steps = 0
order = torch.randperm(num_samples, device=self.device)
for i in range(0, num_samples, minibatch_size):
maxi = min(num_samples, i+minibatch_size)
curbatch_size = maxi - i
with torch.no_grad():
idxs = order[i:maxi]
all_idx_block = idxs.view(-1,1) + torch.arange(0, td_lambda+1, device=self.device).view(1,-1)
all_idxs = all_idx_block.flatten()
next_idxs = all_idx_block[:,1:].flatten()
all_features = feature_sequence[all_idxs]
processed_features = self.feature_preproc(all_features)
value_preds = self.value_function.forward(processed_features).view(curbatch_size, -1)
with torch.no_grad():
next_value_preds = value_preds[:,1:]
next_dones = dones[next_idxs].view(curbatch_size,-1).float()
next_mask = mask[next_idxs].view(curbatch_size,-1).float()
next_rewards = rewards[next_idxs].view(curbatch_size,-1)
avg_discounted_value = get_avg_discounted_value(next_dones, next_mask, next_rewards, next_value_preds, gamma)
cur_values = value_preds[:,0]
self.optim.zero_grad()
td_err = torch.sum((avg_discounted_value.detach() - cur_values)**2)/minibatch_size
td_err.backward()
self.optim.step()
tot_td_err += float(td_err.detach().numpy())
num_steps += 1
return tot_td_err / (num_steps*minibatch_size)
@torch.no_grad()
def _predict_values(self, feature_sequence, minibatch_size):
num_samples = len(feature_sequence)
value_batch_outs = []
for i in range(0, num_samples, minibatch_size):
maxi = min(num_samples, i+minibatch_size)
feature_batch = feature_sequence[i: maxi]
proced_features = self.feature_preproc(feature_batch)
pred_vals = self.value_function.forward(proced_features).flatten()
value_batch_outs.append(pred_vals)
predicted_values = torch.cat(value_batch_outs, axis=0)
print(predicted_values.shape)
return predicted_values
def _get_actual_values(self, dones, masks, rewards, gamma):
value = 0
actual_values = []
rewards = rewards.cpu().detach().numpy().tolist()
dones = dones.cpu().detach().numpy().tolist()
masks = masks.cpu().detach().numpy().tolist()
for rew, done, mask in zip(rewards, dones, masks):
# TODO: This does not implement mask logic!!!
value += rew
actual_values.append(value)
value *= gamma
if done:
value = 0
actual_values.reverse()
return torch.tensor(actual_values)
def evaluate(self, feature_sequence, minibatch_size, dones, masks, rewards, gamma):
actual_values = self._get_actual_values(dones, masks, rewards, gamma)
pred_values = self._predict_values(feature_sequence, minibatch_size)
obs1 = feature_sequence[:55,1].detach().numpy()
obs2 = feature_sequence[:55,0].detach().numpy()
pred_values_n = pred_values.detach().numpy()
actual_values_n = actual_values.detach().numpy()
print("iterdata")
print("\n".join("\t".join([str(e) for e in el]) for el in zip(pred_values_n, actual_values_n, obs1, obs2)))
# print(list(feature_sequence[:55,1].detach().numpy()))
# print(list(feature_sequence[:55,0].detach().numpy()))
# print(list(actual_values.detach().numpy()))
# print(list(pred_values.detach().numpy()))
# print(actual_values)
# print(pred_values)
return torch.mean(torch.abs(actual_values[1:] - pred_values[1:]))
#
| 40.57037
| 125
| 0.651999
| 4,475
| 0.817053
| 0
| 0
| 644
| 0.117583
| 0
| 0
| 477
| 0.087091
|
ce1252998459ede1ce9e5326a029f03393ec65ef
| 660
|
py
|
Python
|
Qualification/lazyLoader.py
|
monisjaved/Facebook-Hacker-Cup
|
569052ecf1c94162cfbbef2533519b46d73d9328
|
[
"MIT"
] | null | null | null |
Qualification/lazyLoader.py
|
monisjaved/Facebook-Hacker-Cup
|
569052ecf1c94162cfbbef2533519b46d73d9328
|
[
"MIT"
] | null | null | null |
Qualification/lazyLoader.py
|
monisjaved/Facebook-Hacker-Cup
|
569052ecf1c94162cfbbef2533519b46d73d9328
|
[
"MIT"
] | null | null | null |
# https://www.facebook.com/hackercup/problem/169401886867367/
__author__ = "Moonis Javed"
__email__ = "monis.javed@gmail.com"
def numberOfDays(arr):
arr = sorted(arr)
n = 0
while len(arr) > 0:
k = arr[-1]
w = k
del arr[-1]
while w <= 50:
try:
del arr[0]
w += k
except:
break
if w > 50:
n += 1
return n
if __name__ == "__main__":
f = open("input2.txt").read().split("\n")
writeF = open("output2.txt","w")
n = int(f[0])
del f[0]
for i in range(1,n+1):
t = int(f[0])
del f[0]
arr =[None]*t
for j in xrange(t):
arr[j] = int(f[0])
del f[0]
writeF.write("Case #%d: %d\n" % (i,numberOfDays(arr)))
# print i
| 18.333333
| 61
| 0.568182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 165
| 0.25
|
ce14a275a0957db889a40c045e5b451a0b8f836f
| 331
|
py
|
Python
|
examples/robot_mock.py
|
gmamaladze/robo-pi
|
f9affc63760774073a3b1de4e4ea064bde2eb074
|
[
"MIT"
] | 1
|
2020-04-24T21:34:01.000Z
|
2020-04-24T21:34:01.000Z
|
examples/robot_mock.py
|
gmamaladze/robo-pi
|
f9affc63760774073a3b1de4e4ea064bde2eb074
|
[
"MIT"
] | 10
|
2019-12-16T20:55:38.000Z
|
2022-02-09T23:33:51.000Z
|
examples/robot_mock.py
|
gmamaladze/tf-voice-pi
|
f9affc63760774073a3b1de4e4ea064bde2eb074
|
[
"MIT"
] | null | null | null |
class Robot:
def __init__(self, left="MOTOR4", right="MOTOR2", config=1):
print("init")
def forward(self):
print("forward")
def backward(self):
print("backward")
def left(self):
print("left")
def right(self):
print("right")
def stop(self):
print("stop")
| 17.421053
| 64
| 0.537764
| 330
| 0.996979
| 0
| 0
| 0
| 0
| 0
| 0
| 60
| 0.181269
|
ce14ba7248ea553bc8bf340da9e895166445335c
| 47
|
py
|
Python
|
libs/messaging_service/__init__.py
|
wip-abramson/aries-jupyter-playground
|
872f1a319f9072d7160298fcce82fb64c93d7397
|
[
"Apache-2.0"
] | 6
|
2021-05-27T12:51:32.000Z
|
2022-01-11T05:49:12.000Z
|
libs/messaging_service/__init__.py
|
SoftwareImpacts/SIMPAC-2021-64
|
4089946109e05516bbea70359d3bf1d02b245f4a
|
[
"Apache-2.0"
] | 2
|
2021-10-05T07:38:05.000Z
|
2022-02-10T11:38:18.000Z
|
libs/messaging_service/__init__.py
|
SoftwareImpacts/SIMPAC-2021-64
|
4089946109e05516bbea70359d3bf1d02b245f4a
|
[
"Apache-2.0"
] | 7
|
2021-04-22T14:18:06.000Z
|
2022-02-14T10:30:52.000Z
|
from .messaging_service import MessagingService
| 47
| 47
| 0.914894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ce1563691214ec353e2ec66f0c158ddd18f4c456
| 556
|
py
|
Python
|
Ex087.py
|
andrade-lcs/ex_curso_em_video_python
|
f2d029efe7a20cdf0fcb5b602f9992e27d37c263
|
[
"MIT"
] | null | null | null |
Ex087.py
|
andrade-lcs/ex_curso_em_video_python
|
f2d029efe7a20cdf0fcb5b602f9992e27d37c263
|
[
"MIT"
] | null | null | null |
Ex087.py
|
andrade-lcs/ex_curso_em_video_python
|
f2d029efe7a20cdf0fcb5b602f9992e27d37c263
|
[
"MIT"
] | null | null | null |
from random import randint
s = t = ma = 0
m = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for l in range(0, 3):
for c in range(0, 3):
m[l][c] = randint(0, 100)
print('-='*15)
for l in range(0, 3):
t += m[l][2]
for c in range(0, 3):
print(f'[{m[l][c]:^5}]', end='')
if m[l][c] % 2 == 0:
s += m[l][c]
if m[1][c] > ma:
ma = m[1][c]
print()
print('-='*15)
print(f'A soma dos núemros pares é {s}')
print(f'A soma dos valores da terceira coluna é {t}')
print(f'O maior valor da segunda linha é {ma}')
| 27.8
| 53
| 0.47482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 150
| 0.267857
|
ce1579bf8768e7cef70aebd7b3896b98ea1a0187
| 54
|
py
|
Python
|
networkx-d3-v2/networkx/tests/__init__.py
|
suraj-testing2/Clock_Websites
|
0e65331da40cfd3766f1bde17f0a9c7ff6666dea
|
[
"Apache-2.0"
] | null | null | null |
networkx-d3-v2/networkx/tests/__init__.py
|
suraj-testing2/Clock_Websites
|
0e65331da40cfd3766f1bde17f0a9c7ff6666dea
|
[
"Apache-2.0"
] | null | null | null |
networkx-d3-v2/networkx/tests/__init__.py
|
suraj-testing2/Clock_Websites
|
0e65331da40cfd3766f1bde17f0a9c7ff6666dea
|
[
"Apache-2.0"
] | null | null | null |
from .utils_tests import *
from .views_tests import *
| 18
| 26
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ce1581e90ef98f01e93c9852612c4c137d683a10
| 7,851
|
py
|
Python
|
ros/src/waypoint_updater/waypoint_updater.py
|
dan-fern/CarND-Capstone-P9
|
004853c7a14dfd5e99563c4082e7609885b4f6b2
|
[
"MIT"
] | null | null | null |
ros/src/waypoint_updater/waypoint_updater.py
|
dan-fern/CarND-Capstone-P9
|
004853c7a14dfd5e99563c4082e7609885b4f6b2
|
[
"MIT"
] | null | null | null |
ros/src/waypoint_updater/waypoint_updater.py
|
dan-fern/CarND-Capstone-P9
|
004853c7a14dfd5e99563c4082e7609885b4f6b2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy as rp
import numpy as np
import math as math
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
from std_msgs.msg import Int32
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
# Number of waypoints we will publish.
LOOKAHEAD_WPS = 150
MAX_DECEL = 0.5
class MotionState( ):
Go, Stop = range( 2 )
class WaypointUpdater( object ):
def __init__( self ):
rp.init_node( 'waypoint_updater' )
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rp.Subscriber( '/current_pose', PoseStamped, self.pose_cb )
rp.Subscriber( '/base_waypoints', Lane, self.waypoints_cb )
rp.Subscriber( '/traffic_waypoint', Int32, self.traffic_cb )
rp.Subscriber( '/current_velocity', TwistStamped, self.velocity_cb )
self.final_waypoints_pub = rp.Publisher(
'final_waypoints',
Lane,
queue_size=1 )
# TODO: Add other member variables you need below
self.base_lane = None
self.pose = None
self.waypoints_2d = None
self.waypoint_tree = None
self.nearest_light = None
self.vehicle_velocity = None # in m/s
self.motion_state = MotionState.Go
self.deceleration_rate = None
self.acceleration_rate = 0.75 # m/s
self.previous_velocity = None
self.loop( )
def loop( self ):
rate = rp.Rate( 10 )
while not rp.is_shutdown( ):
if self.pose and self.base_lane and self.waypoint_tree:
# get closest waypoint
#closest_waypoint_index = self.get_closest_waypoint_id( )
self.publish_waypoints( )
self.previous_velocity = self.vehicle_velocity
rate.sleep( )
def publish_waypoints( self ):
self.final_waypoints_pub.publish( self.generate_lane( ) )
def generate_lane( self ):
lane = Lane( )
closest_idx = self.get_closest_waypoint_id( )
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_lane[ closest_idx:farthest_idx ]
if self.nearest_light != None and \
self.nearest_light <= farthest_idx and \
self.nearest_light >= closest_idx:
self.motion_state = MotionState.Stop
base_waypoints = self.decelerate( base_waypoints, closest_idx )
elif self.motion_state == MotionState.Stop:
self.motion_state = MotionState.Go
self.deceleration_rate = None
if self.motion_state == MotionState.Go:
if abs( self.vehicle_velocity - self.get_waypoint_velocity( \
base_waypoints[ 0 ] ) ) > 1.0:
if self.previous_velocity == None:
start_vel = self.vehicle_velocity
else:
start_vel = max(
self.previous_velocity + 0.2,
self.vehicle_velocity )
base_waypoints = self.accelerate( base_waypoints, start_vel )
else:
self.acceleration_start_velocity = None
lane.waypoints = base_waypoints
return lane
def accelerate( self, waypoints, start_velocity ):
new_waypoints = [ ]
for i, wp in enumerate( waypoints ):
p = Waypoint( )
p.pose = wp.pose
distance = self.distance( waypoints, 0, i )
target_vel = start_velocity + distance * self.acceleration_rate
if target_vel < 0.5:
target_vel = 0.5
p.twist.twist.linear.x = min(
target_vel,
self.get_waypoint_velocity( wp ) )
new_waypoints.append( p )
return new_waypoints
def decelerate( self, waypoints, start_idx ):
new_waypoints = [ ]
speed = self.vehicle_velocity
# two waypoints back from line so front of car stops earlier
stop_idx = self.nearest_light - start_idx - 2
for i, wp in enumerate( waypoints ):
p = Waypoint( )
p.pose = wp.pose
dist = self.distance( waypoints, i, stop_idx )
if i >= stop_idx:
target_vel = 0
elif dist < 15:
if self.deceleration_rate == None:
self.deceleration_rate = self.vehicle_velocity / dist
target_vel = self.deceleration_rate * dist
if target_vel <= 1.0:
target_vel = 0.0
target_vel = min( target_vel, self.get_waypoint_velocity( wp ) )
else:
target_vel = self.get_waypoint_velocity( wp )
p.twist.twist.linear.x = target_vel
new_waypoints.append( p )
return new_waypoints
def get_closest_waypoint_id( self ):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query( [x, y], 1 )[1]
# Check if closest waypoint is ahead or behind the vehicle
closest_wp = np.array( self.waypoints_2d[ closest_idx ] )
previous_wp = np.array( self.waypoints_2d[ closest_idx - 1 ] )
# Equation for hyperplane through closest_coords
waypoint_vector = closest_wp - previous_wp
position_vector = np.array( [x, y] ) - closest_wp
val = np.dot( waypoint_vector, position_vector )
if val > 0:
closest_idx = ( closest_idx + 1 ) % len( self.waypoints_2d )
return closest_idx
def pose_cb(self, msg):
# TODO: Implement
self.pose = msg
def waypoints_cb( self, waypoints ):
# TODO: Implement
self.base_lane = waypoints.waypoints
if not self.waypoints_2d:
self.waypoints_2d = [ [ waypoint.pose.pose.position.x,
waypoint.pose.pose.position.y ]
for waypoint in waypoints.waypoints ]
self.waypoint_tree = KDTree( self.waypoints_2d )
def traffic_cb( self, msg ):
# TODO: Callback for /traffic_waypoint message. Implement
if( msg.data == -1 ):
self.nearest_light = None
else:
self.nearest_light = msg.data
def velocity_cb( self, velocity ):
self.vehicle_velocity = velocity.twist.linear.x
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity( self, waypoint ):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity( self, waypoints, waypoint, velocity ):
waypoints[ waypoint ].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rp.ROSInterruptException:
rp.logerr('Could not start waypoint updater node.')
| 31.154762
| 98
| 0.610495
| 6,734
| 0.857725
| 0
| 0
| 0
| 0
| 0
| 0
| 1,391
| 0.177175
|
ce1627eb06d19834ba84ea0cd7b1055080fe6187
| 595
|
py
|
Python
|
oandapy/exceptions.py
|
extreme4all/oandapy
|
48dcfbe154316a83ca6e62e6b939062165cabc3e
|
[
"MIT"
] | null | null | null |
oandapy/exceptions.py
|
extreme4all/oandapy
|
48dcfbe154316a83ca6e62e6b939062165cabc3e
|
[
"MIT"
] | null | null | null |
oandapy/exceptions.py
|
extreme4all/oandapy
|
48dcfbe154316a83ca6e62e6b939062165cabc3e
|
[
"MIT"
] | null | null | null |
"""Exceptions."""
class OandaError(Exception):
""" Generic error class, catches oanda response errors
"""
def __init__(self, error_response):
self.error_response = error_response
msg = f"OANDA API returned error code {error_response['code']} ({error_response['message']}) "
super(OandaError, self).__init__(msg)
class BadEnvironment(Exception):
"""environment should be: sandbox, practice or live."""
def __init__(self, environment):
msg = f"Environment '{environment}' does not exist"
super(BadEnvironment, self).__init__(msg)
| 29.75
| 103
| 0.67563
| 571
| 0.959664
| 0
| 0
| 0
| 0
| 0
| 0
| 267
| 0.448739
|
ce1666960c0a0228d2a06407d11294362e8b8691
| 4,444
|
py
|
Python
|
synthesis/reverse_map/reverse_map_ast.py
|
jajajaqlt/nsg
|
1873f2b5e10441110c3c69940ceb4650f9684ac0
|
[
"Apache-2.0"
] | 10
|
2021-11-02T18:30:38.000Z
|
2022-03-21T06:31:33.000Z
|
synthesis/reverse_map/reverse_map_ast.py
|
rohanmukh/nag
|
f2c4b8e60a97c58a6a1c549cc8b4753ebfe8a5e3
|
[
"Apache-2.0"
] | 2
|
2021-11-05T18:40:42.000Z
|
2022-03-30T04:33:08.000Z
|
synthesis/reverse_map/reverse_map_ast.py
|
rohanmukh/nag
|
f2c4b8e60a97c58a6a1c549cc8b4753ebfe8a5e3
|
[
"Apache-2.0"
] | 2
|
2021-11-03T19:14:06.000Z
|
2021-11-03T23:47:09.000Z
|
# Copyright 2017 Rice University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from program_helper.ast.ops import DAPIInvoke
from synthesis.ops.candidate_ast import SYMTAB_MOD, TYPE_NODE, API_NODE, VAR_NODE, OP_NODE, METHOD_NODE, CLSTYPE_NODE, \
VAR_DECL_NODE
class AstReverseMapper:
def __init__(self, vocab):
self.vocab = vocab
self.nodes, self.edges, self.targets = [], [], []
self.var_decl_ids = []
self.node_type_numbers = []
self.type_helper_val, self.expr_type_val, self.ret_type_val = [], [], []
self.num_data = 0
return
def add_data(self, nodes, edges, targets,
var_decl_ids,
node_type_number,
type_helper_val, expr_type_val, ret_type_val):
self.nodes.extend(nodes)
self.edges.extend(edges)
self.targets.extend(targets)
self.var_decl_ids.extend(var_decl_ids)
self.node_type_numbers.extend(node_type_number)
self.type_helper_val.extend(type_helper_val)
self.expr_type_val.extend(expr_type_val)
self.ret_type_val.extend(ret_type_val)
self.num_data += len(nodes)
def get_element(self, id):
return self.nodes[id], self.edges[id], self.targets[id], \
self.var_decl_ids[id], \
self.node_type_numbers[id], \
self.type_helper_val[id], self.expr_type_val[id], self.ret_type_val[id]
def decode_ast_paths(self, ast_element, partial=True):
nodes, edges, targets, \
var_decl_ids, \
node_type_numbers, \
type_helper_vals, expr_type_vals, ret_type_vals = ast_element
for node in nodes:
print(self.vocab.chars_concept[node], end=',')
print()
#
for edge in edges:
print(edge, end=',')
print()
for _, _, target, \
var_decl_id, \
node_type_numbers, \
type_helper_val, expr_type_val, ret_type_val in zip(*ast_element):
if node_type_numbers == SYMTAB_MOD:
print('--symtab--', end=',')
elif node_type_numbers == VAR_NODE:
print(self.vocab.chars_var[target], end=',')
elif node_type_numbers == VAR_DECL_NODE:
print(self.vocab.chars_var[target], end=',')
elif node_type_numbers == TYPE_NODE:
print(self.vocab.chars_type[target], end=',')
elif node_type_numbers == CLSTYPE_NODE:
print(self.vocab.chars_type[target], end=',')
elif node_type_numbers == API_NODE:
api = self.vocab.chars_api[target]
api = api.split(DAPIInvoke.delimiter())[0]
print(api, end=',')
elif node_type_numbers == OP_NODE:
op = self.vocab.chars_op[target]
print(op, end=',')
elif node_type_numbers == METHOD_NODE:
op = self.vocab.chars_method[target]
print(op, end=',')
else:
print(self.vocab.chars_concept[target], end=',')
print()
if not partial:
for var_decl_id in var_decl_ids:
print(var_decl_id, end=',')
print()
for type_helper_val in type_helper_vals:
print(self.vocab.chars_type[type_helper_val], end=',')
print()
for expr_type_val in expr_type_vals:
print(self.vocab.chars_type[expr_type_val], end=',')
print()
for ret_type_val in ret_type_vals:
print(self.vocab.chars_type[ret_type_val], end=',')
print()
print()
def reset(self):
self.nodes, self.edges, self.targets = [], [], []
self.var_decl_ids = []
self.node_type_numbers = []
self.type_helper_val, self.expr_type_val, self.ret_type_val = [], [], []
self.num_data = 0
| 36.727273
| 120
| 0.599685
| 3,679
| 0.827858
| 0
| 0
| 0
| 0
| 0
| 0
| 623
| 0.140189
|
ce17307a9a0665319fcd15ea71bb54693784de3c
| 135
|
py
|
Python
|
ch10/myproject_virtualenv/src/django-myproject/myproject/settings/production.py
|
PacktPublishing/Django-3-Web-Development-Cookbook
|
6ffe6e0add93a43a9abaff62e0147dc1f4f5351a
|
[
"MIT"
] | 159
|
2019-11-13T14:11:39.000Z
|
2022-03-24T05:47:10.000Z
|
ch10/myproject_virtualenv/src/django-myproject/myproject/settings/production.py
|
PacktPublishing/Django-3-Web-Development-Cookbook
|
6ffe6e0add93a43a9abaff62e0147dc1f4f5351a
|
[
"MIT"
] | 34
|
2019-11-06T08:32:48.000Z
|
2022-01-14T11:31:29.000Z
|
ch10/myproject_virtualenv/src/django-myproject/myproject/settings/production.py
|
PacktPublishing/Django-3-Web-Development-Cookbook
|
6ffe6e0add93a43a9abaff62e0147dc1f4f5351a
|
[
"MIT"
] | 103
|
2019-08-15T21:35:26.000Z
|
2022-03-20T05:29:11.000Z
|
from ._base import *
DEBUG = False
WEBSITE_URL = "https://example.com" # without trailing slash
MEDIA_URL = f"{WEBSITE_URL}/media/"
| 19.285714
| 61
| 0.718519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 68
| 0.503704
|
ce1739ffa8890ca468f44112dbe677b551c2a05c
| 1,657
|
py
|
Python
|
v2/gui.py
|
appills/pyascii
|
525411327ecb8835e14f8f84b3ac19f059dbd0bc
|
[
"MIT"
] | null | null | null |
v2/gui.py
|
appills/pyascii
|
525411327ecb8835e14f8f84b3ac19f059dbd0bc
|
[
"MIT"
] | null | null | null |
v2/gui.py
|
appills/pyascii
|
525411327ecb8835e14f8f84b3ac19f059dbd0bc
|
[
"MIT"
] | null | null | null |
from tkinter import *
from tkinter import filedialog
from pyascii import main
class App:
def __init__(self, master):
#initalize myFile instance variable
self.myFile = None
self.saveFile = None
#set window height
master.minsize(height = 440, width = 680)
#create frame
frame = Frame(master)
frame.pack()
#first label
self.labelTop = Label(frame, text="Choose an image file!", font=("Arial", 12))
self.labelTop.pack()
#first button
self.buttonTop = Button(frame, text="Choose file", fg="blue", width = 10, command = self.chooseImageFile)
self.buttonTop.pack()
#second label
self.labelMid = Label(frame,
text="The ascii text file will be saved in the same directory as the image.\n"
"If your image name was foo.jpg, the text file will be foo.txt",
font=("Arial", 12))
self.labelMid.pack()
#second button
self.buttonMid = Button(frame, text="Pyascii!", fg="green", width = 10, command=self.pyascii)
self.buttonMid.pack()
def chooseImageFile(self):
self.myFile = filedialog.askopenfilename(parent = root, title='Choose your picture!')
while self.verifyFileExtension() is False:
self.myFile = filedialog.askopenfilename(parent = root, title='Invalid file!')
def pyascii(self):
main(self.myFile)
def verifyFileExtension(self):
if self.myFile is None:
return False
else:
validExtensions = ["jpeg", "jpg", "bmp", "png"]
result = False
for extension in validExtensions:
if self.myFile.endswith(extension):
result = True
return result
root=Tk()
root.wm_title("Pyascii")
app = App(root)
root.mainloop()
| 30.127273
| 108
| 0.677127
| 1,499
| 0.904647
| 0
| 0
| 0
| 0
| 0
| 0
| 401
| 0.242004
|
ce1986e97c39f7b0d9070c20a8cf44a57d43a5a3
| 13,093
|
py
|
Python
|
tests/integration_tests/test_solution/test_solution_interior.py
|
cwentland0/perform
|
e08771cb776a7e6518c43350746e2ca72f79b153
|
[
"MIT"
] | 6
|
2021-03-24T21:42:06.000Z
|
2022-01-28T20:00:13.000Z
|
tests/integration_tests/test_solution/test_solution_interior.py
|
cwentland0/perform
|
e08771cb776a7e6518c43350746e2ca72f79b153
|
[
"MIT"
] | 38
|
2021-04-15T15:30:21.000Z
|
2022-01-29T01:23:57.000Z
|
tests/integration_tests/test_solution/test_solution_interior.py
|
cwentland0/perform
|
e08771cb776a7e6518c43350746e2ca72f79b153
|
[
"MIT"
] | 1
|
2021-07-03T03:13:36.000Z
|
2021-07-03T03:13:36.000Z
|
import unittest
import os
import numpy as np
from constants import (
del_test_dir,
gen_test_dir,
get_output_mode,
solution_domain_setup,
CHEM_DICT_REACT,
SOL_PRIM_IN_REACT,
TEST_DIR,
)
from perform.constants import REAL_TYPE
from perform.system_solver import SystemSolver
from perform.input_funcs import read_restart_file
from perform.gas_model.calorically_perfect_gas import CaloricallyPerfectGas
from perform.time_integrator.implicit_integrator import BDF
from perform.solution.solution_interior import SolutionInterior
class SolutionIntInitTestCase(unittest.TestCase):
def setUp(self):
self.output_mode, self.output_dir = get_output_mode()
# set chemistry
self.chem_dict = CHEM_DICT_REACT
self.gas = CaloricallyPerfectGas(self.chem_dict)
# set time integrator
self.param_dict = {}
self.param_dict["dt"] = 1e-7
self.param_dict["time_scheme"] = "bdf"
self.param_dict["time_order"] = 2
self.time_int = BDF(self.param_dict)
# generate working directory
gen_test_dir()
# generate input text files
solution_domain_setup()
# set SystemSolver
self.solver = SystemSolver(TEST_DIR)
self.num_cells = 2
self.num_reactions = 1
def tearDown(self):
del_test_dir()
def test_solution_int_init(self):
sol = SolutionInterior(
self.gas, SOL_PRIM_IN_REACT, self.solver, self.num_cells, self.num_reactions, self.time_int
)
if self.output_mode:
np.save(os.path.join(self.output_dir, "sol_int_init_sol_cons.npy"), sol.sol_cons)
else:
self.assertTrue(np.array_equal(sol.sol_prim, SOL_PRIM_IN_REACT))
self.assertTrue(
np.allclose(sol.sol_cons, np.load(os.path.join(self.output_dir, "sol_int_init_sol_cons.npy")))
)
# TODO: a LOT of checking of other variables
class SolutionIntMethodsTestCase(unittest.TestCase):
def setUp(self):
self.output_mode, self.output_dir = get_output_mode()
# set chemistry
self.chem_dict = CHEM_DICT_REACT
self.gas = CaloricallyPerfectGas(self.chem_dict)
# set time integrator
self.param_dict = {}
self.param_dict["dt"] = 1e-7
self.param_dict["time_scheme"] = "bdf"
self.param_dict["time_order"] = 2
self.time_int = BDF(self.param_dict)
# generate working directory
gen_test_dir()
# generate input text files
solution_domain_setup()
# set SystemSolver
self.solver = SystemSolver(TEST_DIR)
self.num_cells = 2
self.num_reactions = 1
self.sol = SolutionInterior(
self.gas, SOL_PRIM_IN_REACT, self.solver, self.num_cells, self.num_reactions, self.time_int
)
def tearDown(self):
del_test_dir()
def test_calc_sol_jacob(self):
sol_jacob = self.sol.calc_sol_jacob(inverse=False)
sol_jacob_inv = self.sol.calc_sol_jacob(inverse=True)
if self.output_mode:
np.save(os.path.join(self.output_dir, "sol_int_sol_jacob.npy"), sol_jacob)
np.save(os.path.join(self.output_dir, "sol_int_sol_jacob_inv.npy"), sol_jacob_inv)
else:
self.assertTrue(np.allclose(sol_jacob, np.load(os.path.join(self.output_dir, "sol_int_sol_jacob.npy"))))
self.assertTrue(
np.allclose(sol_jacob_inv, np.load(os.path.join(self.output_dir, "sol_int_sol_jacob_inv.npy")))
)
def test_update_snapshots(self):
# update the snapshot matrix
for self.solver.iter in range(1, self.solver.num_steps + 1):
if (self.solver.iter % self.solver.out_interval) == 0:
self.sol.update_snapshots(self.solver)
self.assertTrue(np.array_equal(self.sol.prim_snap, np.repeat(self.sol.sol_prim[:, :, None], 6, axis=2)))
self.assertTrue(np.array_equal(self.sol.cons_snap, np.repeat(self.sol.sol_cons[:, :, None], 6, axis=2)))
self.assertTrue(
np.array_equal(self.sol.reaction_source_snap, np.repeat(self.sol.reaction_source[:, :, None], 5, axis=2))
)
self.assertTrue(
np.array_equal(self.sol.heat_release_snap, np.repeat(self.sol.heat_release[:, None], 5, axis=1))
)
self.assertTrue(np.array_equal(self.sol.rhs_snap, np.repeat(self.sol.rhs[:, :, None], 5, axis=2)))
def test_snapshot_output(self):
for self.solver.iter in range(1, self.solver.num_steps + 1):
# update the snapshot matrix
if (self.solver.iter % self.solver.out_interval) == 0:
self.sol.update_snapshots(self.solver)
# write and check intermediate results
if ((self.solver.iter % self.solver.out_itmdt_interval) == 0) and (
self.solver.iter != self.solver.num_steps
):
self.sol.write_snapshots(self.solver, intermediate=True, failed=False)
sol_prim_itmdt = np.load(
os.path.join(self.solver.unsteady_output_dir, "sol_prim_" + self.solver.sim_type + "_ITMDT.npy")
)
sol_cons_itmdt = np.load(
os.path.join(self.solver.unsteady_output_dir, "sol_cons_" + self.solver.sim_type + "_ITMDT.npy")
)
source_itmdt = np.load(
os.path.join(self.solver.unsteady_output_dir, "source_" + self.solver.sim_type + "_ITMDT.npy")
)
heat_release_itmdt = np.load(
os.path.join(self.solver.unsteady_output_dir, "heat_release_" + self.solver.sim_type + "_ITMDT.npy")
)
rhs_itmdt = np.load(
os.path.join(self.solver.unsteady_output_dir, "rhs_" + self.solver.sim_type + "_ITMDT.npy")
)
self.assertTrue(np.array_equal(sol_prim_itmdt, np.repeat(self.sol.sol_prim[:, :, None], 3, axis=2)))
self.assertTrue(np.array_equal(sol_cons_itmdt, np.repeat(self.sol.sol_cons[:, :, None], 3, axis=2)))
self.assertTrue(
np.array_equal(source_itmdt, np.repeat(self.sol.reaction_source[:, :, None], 2, axis=2))
)
self.assertTrue(
np.array_equal(heat_release_itmdt, np.repeat(self.sol.heat_release[:, None], 2, axis=1))
)
self.assertTrue(np.array_equal(rhs_itmdt, np.repeat(self.sol.rhs[:, :, None], 2, axis=2)))
# write and check "failed" snapshots
if self.solver.iter == 7:
self.sol.write_snapshots(self.solver, intermediate=False, failed=True)
sol_prim_failed = np.load(
os.path.join(self.solver.unsteady_output_dir, "sol_prim_" + self.solver.sim_type + "_FAILED.npy")
)
sol_cons_failed = np.load(
os.path.join(self.solver.unsteady_output_dir, "sol_cons_" + self.solver.sim_type + "_FAILED.npy")
)
source_failed = np.load(
os.path.join(self.solver.unsteady_output_dir, "source_" + self.solver.sim_type + "_FAILED.npy")
)
heat_release_failed = np.load(
os.path.join(
self.solver.unsteady_output_dir, "heat_release_" + self.solver.sim_type + "_FAILED.npy"
)
)
rhs_failed = np.load(
os.path.join(self.solver.unsteady_output_dir, "rhs_" + self.solver.sim_type + "_FAILED.npy")
)
self.assertTrue(np.array_equal(sol_prim_failed, np.repeat(self.sol.sol_prim[:, :, None], 4, axis=2)))
self.assertTrue(np.array_equal(sol_cons_failed, np.repeat(self.sol.sol_cons[:, :, None], 4, axis=2)))
self.assertTrue(
np.array_equal(source_failed, np.repeat(self.sol.reaction_source[:, :, None], 3, axis=2))
)
self.assertTrue(
np.array_equal(heat_release_failed, np.repeat(self.sol.heat_release[:, None], 3, axis=1))
)
self.assertTrue(np.array_equal(rhs_failed, np.repeat(self.sol.rhs[:, :, None], 3, axis=2)))
# delete intermediate results and check that they deleted properly
self.sol.delete_itmdt_snapshots(self.solver)
self.assertFalse(
os.path.isfile(
os.path.join(self.solver.unsteady_output_dir, "sol_prim_" + self.solver.sim_type + "_ITMDT.npy")
)
)
self.assertFalse(
os.path.isfile(
os.path.join(self.solver.unsteady_output_dir, "sol_cons_" + self.solver.sim_type + "_ITMDT.npy")
)
)
self.assertFalse(
os.path.isfile(
os.path.join(self.solver.unsteady_output_dir, "source_" + self.solver.sim_type + "_ITMDT.npy")
)
)
self.assertFalse(
os.path.isfile(
os.path.join(self.solver.unsteady_output_dir, "heat_release_" + self.solver.sim_type + "_ITMDT.npy")
)
)
self.assertFalse(
os.path.isfile(os.path.join(self.solver.unsteady_output_dir, "rhs_" + self.solver.sim_type + "_ITMDT.npy"))
)
# write final snapshots
self.sol.write_snapshots(self.solver, intermediate=False, failed=False)
sol_prim_final = np.load(
os.path.join(self.solver.unsteady_output_dir, "sol_prim_" + self.solver.sim_type + ".npy")
)
sol_cons_final = np.load(
os.path.join(self.solver.unsteady_output_dir, "sol_cons_" + self.solver.sim_type + ".npy")
)
source_final = np.load(os.path.join(self.solver.unsteady_output_dir, "source_" + self.solver.sim_type + ".npy"))
heat_release_final = np.load(
os.path.join(self.solver.unsteady_output_dir, "heat_release_" + self.solver.sim_type + ".npy")
)
rhs_final = np.load(os.path.join(self.solver.unsteady_output_dir, "rhs_" + self.solver.sim_type + ".npy"))
self.assertTrue(np.array_equal(sol_prim_final, np.repeat(self.sol.sol_prim[:, :, None], 6, axis=2)))
self.assertTrue(np.array_equal(sol_cons_final, np.repeat(self.sol.sol_cons[:, :, None], 6, axis=2)))
self.assertTrue(np.array_equal(source_final, np.repeat(self.sol.reaction_source[:, :, None], 5, axis=2)))
self.assertTrue(np.array_equal(heat_release_final, np.repeat(self.sol.heat_release[:, None], 5, axis=1)))
self.assertTrue(np.array_equal(rhs_final, np.repeat(self.sol.rhs[:, :, None], 5, axis=2)))
def test_write_restart_file(self):
sol_cons = self.sol.sol_cons
self.solver.sol_time = 1e-4
self.solver.iter = 2
self.solver.restart_iter = 4
self.sol.write_restart_file(self.solver)
self.assertEqual(self.solver.restart_iter, 5)
# check restart files
restart_data = np.load(os.path.join(self.solver.restart_output_dir, "restart_file_4.npz"))
self.assertTrue(
np.array_equal(
restart_data["sol_prim"],
np.repeat(SOL_PRIM_IN_REACT[:, :, None], 2, axis=-1),
)
)
self.assertTrue(
np.array_equal(
restart_data["sol_cons"],
np.repeat(sol_cons[:, :, None], 2, axis=-1),
)
)
self.assertEqual(float(restart_data["sol_time"]), 1e-4)
# check iteration files
restart_iter = int(np.loadtxt(os.path.join(self.solver.restart_output_dir, "restart_iter.dat")))
self.assertEqual(restart_iter, 4)
def test_read_restart_file(self):
self.solver.sol_time = 1e-4
self.solver.iter = 2
self.solver.restart_iter = 4
self.sol.write_restart_file(self.solver)
sol_time, sol_prim, restart_iter = read_restart_file(self.solver)
self.assertEqual(sol_time, 1e-4)
self.assertEqual(restart_iter, 5) # 1 is added to avoid overwriting
self.assertTrue(
np.array_equal(
sol_prim,
np.repeat(SOL_PRIM_IN_REACT[:, :, None], 2, axis=-1),
)
)
def test_calc_d_sol_norms(self):
self.solver.iter = 3
self.sol.d_sol_norm_hist = np.zeros((self.solver.num_steps, 2), dtype=REAL_TYPE)
self.sol.sol_hist_prim[0] = self.sol.sol_prim * 2.0
self.sol.calc_d_sol_norms(self.solver, "implicit")
self.assertAlmostEqual(self.sol.d_sol_norm_hist[2, 0], 3.46573790883)
self.assertAlmostEqual(self.sol.d_sol_norm_hist[2, 1], 3.45416666667)
def test_calc_res_norms(self):
self.solver.iter = 3
self.sol.res = self.sol.sol_prim.copy()
self.sol.calc_res_norms(self.solver, 0)
self.assertAlmostEqual(self.sol.res_norm_hist[2, 0], 3.46573790883)
self.assertAlmostEqual(self.sol.res_norm_hist[2, 1], 3.45416666667)
| 39.796353
| 120
| 0.612465
| 12,535
| 0.957382
| 0
| 0
| 0
| 0
| 0
| 0
| 1,281
| 0.097839
|
ce1a18c48b194d0b3451c941f83d9e8945a1714d
| 4,139
|
py
|
Python
|
tests/system/post_cars_positive_test.py
|
ikostan/REST_API_AUTOMATION
|
cdb4d30fbc7457b2a403b4dad6fe1efa2e754681
|
[
"Unlicense"
] | 8
|
2020-03-17T09:15:28.000Z
|
2022-01-29T19:50:45.000Z
|
tests/system/post_cars_positive_test.py
|
ikostan/REST_API_AUTOMATION
|
cdb4d30fbc7457b2a403b4dad6fe1efa2e754681
|
[
"Unlicense"
] | 1
|
2021-06-02T00:26:58.000Z
|
2021-06-02T00:26:58.000Z
|
tests/system/post_cars_positive_test.py
|
ikostan/REST_API_AUTOMATION
|
cdb4d30fbc7457b2a403b4dad6fe1efa2e754681
|
[
"Unlicense"
] | 1
|
2021-11-22T16:10:27.000Z
|
2021-11-22T16:10:27.000Z
|
#!/path/to/interpreter
"""
Flask App REST API testing: POST
"""
# Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
import allure
import requests
from tests.system.base_test import BaseTestCase
from api.cars_app import USER_LIST
@allure.epic('Simple Flask App')
@allure.parent_suite('REST API')
@allure.suite("System Tests")
@allure.sub_suite("Positive Tests")
@allure.feature("POST")
@allure.story('Cars')
class PostCarsPositiveTestCase(BaseTestCase):
"""
Simple Flask App Positive Test: POST call
"""
def setUp(self) -> None:
"""
Test data preparation
:return:
"""
with allure.step("Prepare test data"):
self.cars_url = '/cars'
self.message = ''
self.new_car = {'name': 'Figo',
'brand': 'Ford',
'price_range': '2-3 lacs',
'car_type': 'hatchback'}
def tearDown(self) -> None:
"""
Post test procedure
:return:
"""
with allure.step("Remove new added car from the list"):
username = USER_LIST[0]['name']
password = USER_LIST[0]['password']
requests.delete(url=self.URL +
self.cars_url +
'/remove/' +
self.new_car['name'],
auth=(username,
password))
def test_post_car_admin(self):
"""
Add new car using admin user credentials.
:return:
"""
allure.dynamic.title("Add new car "
"using admin user credentials")
allure.dynamic.severity(allure.severity_level.BLOCKER)
with allure.step("Verify user permissions"):
username = USER_LIST[0]['name']
password = USER_LIST[0]['password']
self.assertEqual("admin",
USER_LIST[0]['perm'])
with allure.step("Send POST request"):
response = requests.post(self.URL +
self.cars_url +
'/add',
json=self.new_car,
auth=(username,
password))
with allure.step("Verify status code"):
self.assertEqual(200,
response.status_code)
with allure.step("Verify 'successful' flag"):
self.assertTrue(response.json()['successful'])
with allure.step("Verify retrieved cars list"):
self.assertDictEqual(self.new_car,
response.json()['car'])
def test_post_car_non_admin(self):
"""
Add new car using non admin user credentials.
:return:
"""
allure.dynamic.title("Add new car "
"using non admin user credentials")
allure.dynamic.severity(allure.severity_level.BLOCKER)
with allure.step("Verify user permissions"):
username = USER_LIST[1]['name']
password = USER_LIST[1]['password']
self.assertEqual("non_admin",
USER_LIST[1]['perm'])
with allure.step("Send POST request"):
response = requests.post(self.URL +
self.cars_url +
'/add',
json=self.new_car,
auth=(username,
password))
with allure.step("Verify status code"):
self.assertEqual(200,
response.status_code)
with allure.step("Verify 'successful' flag"):
self.assertTrue(response.json()['successful'])
with allure.step("Verify retrieved cars list"):
self.assertDictEqual(self.new_car,
response.json()['car'])
| 31.59542
| 64
| 0.490457
| 3,659
| 0.88403
| 0
| 0
| 3,837
| 0.927036
| 0
| 0
| 1,196
| 0.288959
|
ce1a3fb80b6bbd849c64cd660cd72979f447cba6
| 1,165
|
py
|
Python
|
bin/h5zero.py
|
ickc/dautil-py
|
9cdd87080ec85774d7386e3cd2f55c2bc6b6aadd
|
[
"BSD-3-Clause"
] | null | null | null |
bin/h5zero.py
|
ickc/dautil-py
|
9cdd87080ec85774d7386e3cd2f55c2bc6b6aadd
|
[
"BSD-3-Clause"
] | null | null | null |
bin/h5zero.py
|
ickc/dautil-py
|
9cdd87080ec85774d7386e3cd2f55c2bc6b6aadd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
'''Assert HDF5 input is non-zero.
Print to stderr if not.
For example,
find . -iname '*.hdf5' -exec h5zero.py {} +
'''
from __future__ import print_function
import argparse
import sys
import h5py
from dautil.IO.h5 import h5assert_nonzero
__version__ = '0.1'
def main(args):
for filename in args.input:
with h5py.File(filename, "r") as f:
try:
h5assert_nonzero(f, verbose=args.verbose)
except AssertionError:
print(filename, file=sys.stderr)
def cli():
parser = argparse.ArgumentParser(description='Assert HDF5 input is non-zero.')
parser.set_defaults(func=main)
# define args
parser.add_argument('-v', '--version', action='version',
version='%(prog)s {}'.format(__version__))
parser.add_argument('input', nargs='+',
help='Input HDF5 files. Can be more than 1.')
parser.add_argument('-V', '--verbose', action='store_true',
help='verbose to stdout.')
# parsing and run main
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
cli()
| 23.3
| 82
| 0.612017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 358
| 0.307296
|
ce1a78c4b8b64234867f3d62b124351c7a4de964
| 195
|
py
|
Python
|
cla_backend/apps/core/validators.py
|
uk-gov-mirror/ministryofjustice.cla_backend
|
4d524c10e7bd31f085d9c5f7bf6e08a6bb39c0a6
|
[
"MIT"
] | 3
|
2019-10-02T15:31:03.000Z
|
2022-01-13T10:15:53.000Z
|
cla_backend/apps/core/validators.py
|
uk-gov-mirror/ministryofjustice.cla_backend
|
4d524c10e7bd31f085d9c5f7bf6e08a6bb39c0a6
|
[
"MIT"
] | 206
|
2015-01-02T16:50:11.000Z
|
2022-02-16T20:16:05.000Z
|
cla_backend/apps/core/validators.py
|
uk-gov-mirror/ministryofjustice.cla_backend
|
4d524c10e7bd31f085d9c5f7bf6e08a6bb39c0a6
|
[
"MIT"
] | 6
|
2015-03-23T23:08:42.000Z
|
2022-02-15T17:04:44.000Z
|
from django.core.exceptions import ValidationError
def validate_first_of_month(value):
if value.day != 1:
raise ValidationError("%s should only be first day of the month." % value)
| 27.857143
| 82
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 0.220513
|
ce1ae0dcedfa059f4a8bffab465b0fca2f146769
| 51
|
py
|
Python
|
app/_version.py
|
sunhailin-Leo/myMacAssistant
|
30ba955a4f91a800197cbfdc2ab5d3a5cd993eef
|
[
"MIT"
] | 63
|
2020-11-02T00:58:49.000Z
|
2022-03-20T21:39:02.000Z
|
fastapi_profiler/_version.py
|
sunhailin-Leo/fastapi_profiler
|
b414af6f0b2d92e7b509b6b3e54cde13ec5795e2
|
[
"MIT"
] | 10
|
2021-02-23T11:00:39.000Z
|
2022-02-07T02:44:05.000Z
|
app/_version.py
|
sunhailin-Leo/myMacAssistant
|
30ba955a4f91a800197cbfdc2ab5d3a5cd993eef
|
[
"MIT"
] | 7
|
2020-11-24T08:34:46.000Z
|
2022-01-10T12:58:51.000Z
|
__version__ = "1.0.0"
__author__ = "sunhailin-Leo"
| 17
| 28
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.431373
|
ce1e707dde07e49cd3190510d21820c11fc3a580
| 1,525
|
py
|
Python
|
Week6/GFG(Day8-14)/Day14/Day14 - Solution.py
|
ShreyaPanale/100DaysOfCode
|
de7832d97fca36f783812868b867676b6f77c7b3
|
[
"MIT"
] | 22
|
2021-05-25T16:01:31.000Z
|
2021-06-07T06:32:27.000Z
|
Week6/GFG(Day8-14)/Day14/Day14 - Solution.py
|
shreya-panale/100DaysOfCode
|
de7832d97fca36f783812868b867676b6f77c7b3
|
[
"MIT"
] | null | null | null |
Week6/GFG(Day8-14)/Day14/Day14 - Solution.py
|
shreya-panale/100DaysOfCode
|
de7832d97fca36f783812868b867676b6f77c7b3
|
[
"MIT"
] | null | null | null |
#User function Template for python3
class Solution:
def calculateSpan(self,a,n):
Span = [0 for i in range(n)]
stack = [0]
#span value of first day is always 1.
Span[0] = 1
for i in range(1, n):
#we pop elements from the stack till price at top of stack is less than or equal to current price.
while (len(stack) > 0 and a[stack[-1]] <= a[i]):
stack.pop()
#if stack becomes empty, then price[i] is greater than all elements on left of it in list so span is i+1.
#Else price[i] is greater than elements after value at top of stack.
if(len(stack) <= 0):
Span[i] = i + 1
else:
Span[i] = i - stack[-1]
#pushing this element to stack.
stack.append(i)
#returning the list.
return Span
#{
# Driver Code Starts
#Initial Template for Python 3
import atexit
import io
import sys
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
@atexit.register
def write():
sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())
if __name__ == '__main__':
test_cases = int(input())
for cases in range(test_cases) :
n = int(input())
a = list(map(int,input().strip().split()))
obj = Solution()
ans = obj.calculateSpan(a, n);
print(*ans) # print space seperated elements of span array
# } Driver Code Ends
| 28.773585
| 117
| 0.593443
| 848
| 0.556066
| 0
| 0
| 82
| 0.05377
| 0
| 0
| 523
| 0.342951
|
ce1e9aca26ecdef56f6ff4c3c6d9a23230b8bd4f
| 2,768
|
py
|
Python
|
test/tests.py
|
jasedit/papers_base
|
af8aa6e9a164861ad7b44471ce543002fa7129d9
|
[
"MIT"
] | 8
|
2016-08-17T14:40:49.000Z
|
2020-03-05T00:08:07.000Z
|
test/tests.py
|
jasedit/scriptorium
|
af8aa6e9a164861ad7b44471ce543002fa7129d9
|
[
"MIT"
] | 35
|
2016-08-07T19:58:02.000Z
|
2021-05-09T10:08:06.000Z
|
test/tests.py
|
jasedit/scriptorium
|
af8aa6e9a164861ad7b44471ce543002fa7129d9
|
[
"MIT"
] | 2
|
2017-09-21T17:57:46.000Z
|
2019-06-30T13:06:21.000Z
|
#!python
# -*- coding: utf-8 -*-
"""Unit testing for scriptorium"""
import os
import tempfile
import shutil
import textwrap
import unittest
import scriptorium
class TestScriptorium(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Set up unit tests for scriptorium"""
TestScriptorium.template_dir = tempfile.mkdtemp()
TestScriptorium.paper_dir = tempfile.mkdtemp()
scriptorium.CONFIG['TEMPLATE_DIR'] = TestScriptorium.template_dir
scriptorium.install_template("https://github.com/jasedit/simple_templates.git")
@classmethod
def tearDownClass(cls):
"""Tear down unit test structure."""
shutil.rmtree(TestScriptorium.template_dir, ignore_errors=True)
shutil.rmtree(TestScriptorium.paper_dir, ignore_errors=True)
def testTemplates(self):
"""Test that template has been installed"""
self.assertEqual(TestScriptorium.template_dir, scriptorium.CONFIG['TEMPLATE_DIR'])
self.assertTrue(os.path.exists(os.path.join(TestScriptorium.template_dir, 'simple_templates')))
ex_tdir = os.path.join(scriptorium.CONFIG['TEMPLATE_DIR'], 'simple_templates', 'report')
self.assertEqual(scriptorium.find_template('report'), ex_tdir)
def testCreation(self):
"""Test simple paper creation."""
example_config = {
'author': 'John Doe',
'title': 'Example Report'
}
old_dir = os.getcwd()
os.chdir(TestScriptorium.paper_dir)
self.assertEqual(scriptorium.create('ex_report', 'report', config=example_config), set())
os.chdir('ex_report')
self.assertEqual(scriptorium.paper_root('.'), 'paper.mmd')
self.assertEqual(scriptorium.get_template('paper.mmd'), 'report')
example_text = textwrap.dedent("""\n
# Introduction
This is an example paper.
# Conclusion
This paper is awesome.
""")
with open('paper.mmd', 'a') as fp:
fp.write(example_text)
pdf_path = scriptorium.to_pdf('.')
self.assertTrue(os.path.exists(pdf_path))
os.chdir(old_dir)
def testConfigLoading(self):
"""Test saving and loading configuration."""
config = scriptorium.CONFIG.copy()
scriptorium.save_config()
scriptorium.read_config()
self.assertEqual(config, scriptorium.CONFIG)
def testConfiguration(self):
"""Test configuration option issues"""
test_template_dir = "~/.scriptorium"
scriptorium.CONFIG['TEMPLATE_DIR'] = test_template_dir
scriptorium.save_config()
scriptorium.read_config()
self.assertEqual(scriptorium.CONFIG['TEMPLATE_DIR'], os.path.expanduser(test_template_dir))
scriptorium.CONFIG['TEMPLATE_DIR'] = self.template_dir
if __name__ == '__main__':
unittest.main()
| 31.816092
| 101
| 0.688223
| 2,557
| 0.923772
| 0
| 0
| 570
| 0.205925
| 0
| 0
| 759
| 0.274205
|
ce1f98db217162180757b8a6044a17804f866924
| 4,794
|
py
|
Python
|
imblearn/combine/tests/test_smote_enn.py
|
themrzmaster/imbalanced-learn
|
e1be8695b22ca58aa5443057b9ae3f2885a45d60
|
[
"MIT"
] | 2
|
2019-09-14T23:23:35.000Z
|
2019-09-16T18:17:19.000Z
|
imblearn/combine/tests/test_smote_enn.py
|
themrzmaster/imbalanced-learn
|
e1be8695b22ca58aa5443057b9ae3f2885a45d60
|
[
"MIT"
] | null | null | null |
imblearn/combine/tests/test_smote_enn.py
|
themrzmaster/imbalanced-learn
|
e1be8695b22ca58aa5443057b9ae3f2885a45d60
|
[
"MIT"
] | 1
|
2021-04-23T04:46:10.000Z
|
2021-04-23T04:46:10.000Z
|
"""Test the module SMOTE ENN."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import pytest
import numpy as np
from sklearn.utils.testing import assert_allclose, assert_array_equal
from imblearn.combine import SMOTEENN
from imblearn.under_sampling import EditedNearestNeighbours
from imblearn.over_sampling import SMOTE
RND_SEED = 0
X = np.array([[0.11622591, -0.0317206], [0.77481731, 0.60935141], [
1.25192108, -0.22367336
], [0.53366841, -0.30312976], [1.52091956,
-0.49283504], [-0.28162401, -2.10400981],
[0.83680821,
1.72827342], [0.3084254, 0.33299982], [0.70472253, -0.73309052],
[0.28893132, -0.38761769], [1.15514042, 0.0129463], [
0.88407872, 0.35454207
], [1.31301027, -0.92648734], [-1.11515198, -0.93689695], [
-0.18410027, -0.45194484
], [0.9281014, 0.53085498], [-0.14374509, 0.27370049], [
-0.41635887, -0.38299653
], [0.08711622, 0.93259929], [1.70580611, -0.11219234]])
Y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
R_TOL = 1e-4
def test_sample_regular():
smote = SMOTEENN(random_state=RND_SEED)
X_resampled, y_resampled = smote.fit_resample(X, Y)
X_gt = np.array([[1.52091956, -0.49283504], [0.84976473, -0.15570176], [
0.61319159, -0.11571667
], [0.66052536, -0.28246518], [-0.28162401, -2.10400981],
[0.83680821, 1.72827342], [0.08711622, 0.93259929]])
y_gt = np.array([0, 0, 0, 0, 1, 1, 1])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_sample_regular_pass_smote_enn():
smote = SMOTEENN(
smote=SMOTE(sampling_strategy='auto', random_state=RND_SEED),
enn=EditedNearestNeighbours(sampling_strategy='all'),
random_state=RND_SEED)
X_resampled, y_resampled = smote.fit_resample(X, Y)
X_gt = np.array([[1.52091956, -0.49283504], [0.84976473, -0.15570176], [
0.61319159, -0.11571667
], [0.66052536, -0.28246518], [-0.28162401, -2.10400981],
[0.83680821, 1.72827342], [0.08711622, 0.93259929]])
y_gt = np.array([0, 0, 0, 0, 1, 1, 1])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_sample_regular_half():
sampling_strategy = {0: 10, 1: 12}
smote = SMOTEENN(
sampling_strategy=sampling_strategy, random_state=RND_SEED)
X_resampled, y_resampled = smote.fit_resample(X, Y)
X_gt = np.array([[1.52091956, -0.49283504], [-0.28162401, -2.10400981],
[0.83680821, 1.72827342], [0.08711622, 0.93259929]])
y_gt = np.array([0, 1, 1, 1])
assert_allclose(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_validate_estimator_init():
smote = SMOTE(random_state=RND_SEED)
enn = EditedNearestNeighbours(sampling_strategy='all')
smt = SMOTEENN(smote=smote, enn=enn, random_state=RND_SEED)
X_resampled, y_resampled = smt.fit_resample(X, Y)
X_gt = np.array([[1.52091956, -0.49283504], [0.84976473, -0.15570176], [
0.61319159, -0.11571667
], [0.66052536, -0.28246518], [-0.28162401, -2.10400981],
[0.83680821, 1.72827342], [0.08711622, 0.93259929]])
y_gt = np.array([0, 0, 0, 0, 1, 1, 1])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_validate_estimator_default():
smt = SMOTEENN(random_state=RND_SEED)
X_resampled, y_resampled = smt.fit_resample(X, Y)
X_gt = np.array([[1.52091956, -0.49283504], [0.84976473, -0.15570176], [
0.61319159, -0.11571667
], [0.66052536, -0.28246518], [-0.28162401, -2.10400981],
[0.83680821, 1.72827342], [0.08711622, 0.93259929]])
y_gt = np.array([0, 0, 0, 0, 1, 1, 1])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_parallelisation():
# Check if default job count is 1
smt = SMOTEENN(random_state=RND_SEED)
smt._validate_estimator()
assert smt.n_jobs == 1
assert smt.smote_.n_jobs == 1
assert smt.enn_.n_jobs == 1
# Check if job count is set
smt = SMOTEENN(random_state=RND_SEED, n_jobs=8)
smt._validate_estimator()
assert smt.n_jobs == 8
assert smt.smote_.n_jobs == 8
assert smt.enn_.n_jobs == 8
@pytest.mark.parametrize(
"smote_params, err_msg",
[({'smote': 'rnd'}, "smote needs to be a SMOTE"),
({'enn': 'rnd'}, "enn needs to be an ")]
)
def test_error_wrong_object(smote_params, err_msg):
smt = SMOTEENN(**smote_params)
with pytest.raises(ValueError, match=err_msg):
smt.fit_resample(X, Y)
| 38.047619
| 79
| 0.635378
| 0
| 0
| 0
| 0
| 325
| 0.067793
| 0
| 0
| 295
| 0.061535
|
ce21a48448d28f3cf598b5cbc7c2ecedcc9ebfb2
| 46,925
|
py
|
Python
|
tests/unittests/test_mock_network_plugin_public_nat.py
|
cloudify-cosmo/tosca-vcloud-plugin
|
c5196abd066ba5315b66911e5390b0ed6c15988f
|
[
"Apache-2.0"
] | 4
|
2015-02-25T12:39:01.000Z
|
2018-02-14T15:14:16.000Z
|
tests/unittests/test_mock_network_plugin_public_nat.py
|
cloudify-cosmo/tosca-vcloud-plugin
|
c5196abd066ba5315b66911e5390b0ed6c15988f
|
[
"Apache-2.0"
] | 45
|
2015-01-13T13:55:10.000Z
|
2020-02-04T15:06:15.000Z
|
tests/unittests/test_mock_network_plugin_public_nat.py
|
cloudify-cosmo/tosca-vcloud-plugin
|
c5196abd066ba5315b66911e5390b0ed6c15988f
|
[
"Apache-2.0"
] | 21
|
2015-01-21T17:17:18.000Z
|
2021-05-05T14:08:25.000Z
|
# Copyright (c) 2014-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from cloudify import exceptions as cfy_exc
from tests.unittests import test_mock_base
from vcloud_network_plugin import public_nat
from vcloud_network_plugin import utils
import vcloud_network_plugin
import vcloud_plugin_common
from IPy import IP
class NetworkPluginPublicNatMockTestCase(test_mock_base.TestBase):
def test_is_rule_exists(self):
rule_inlist = self.generate_nat_rule(
'SNAT', 'external', '22', 'internal', '11', 'TCP'
)
# exist
self.assertTrue(
public_nat._is_rule_exists(
[rule_inlist], 'SNAT', 'external', '22', 'internal',
'11', 'TCP')
)
# not exist
self.assertFalse(
public_nat._is_rule_exists(
[rule_inlist], 'DNAT', 'external', '22', 'internal',
'11', 'UDP')
)
def test_get_original_port_for_delete(self):
# no replacement
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {}}
self.assertEqual(
public_nat._get_original_port_for_delete(
fake_ctx, "10.1.1.1", "11"),
"11"
)
# replacement for other
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {
"10.1.1.2:11": '12'
}
}
self.assertEqual(
public_nat._get_original_port_for_delete(
fake_ctx, "10.1.1.1", "11"),
"11"
)
# replacement for other
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {
"10.1.1.2:11": '12'
}
}
self.assertEqual(
public_nat._get_original_port_for_delete(
fake_ctx, "10.1.1.2", "11"),
"12"
)
def test_get_original_port_for_create(self):
gateway = mock.Mock()
fake_ctx = self.generate_relation_context_with_current_ctx()
rule_inlist = self.generate_nat_rule(
'DNAT', 'external', 'any', 'internal', '11', 'TCP')
gateway.get_nat_rules = mock.MagicMock(return_value=[rule_inlist])
# exeption about same port
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._get_original_port_for_create(
fake_ctx, gateway, 'DNAT', 'external', 'any', 'internal',
'11', 'TCP'
)
# everythiong fine with different port
self.assertEqual(
public_nat._get_original_port_for_create(
fake_ctx, gateway, 'DNAT', 'external', '12', 'internal',
'12', 'TCP'
),
12)
# relink some port to other
# port have not used yet
self.assertEqual(
public_nat._get_original_port_for_create(
fake_ctx, gateway, 'SNAT', 'external', 13, 'internal',
'12', 'TCP'),
13)
def test_get_original_port_for_create_with_ctx(self):
# with replace, but without replace table - up port +1
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {}
}
gateway = mock.Mock()
rule_inlist = self.generate_nat_rule(
'SNAT', 'external', 10, 'internal', 11, 'TCP'
)
gateway.get_nat_rules = mock.MagicMock(return_value=[rule_inlist])
self.assertEqual(
public_nat._get_original_port_for_create(
fake_ctx, gateway, 'SNAT', 'external', '10', 'internal',
'11', 'TCP'
),
11
)
self.assertEqual(
fake_ctx._target.instance.runtime_properties,
{
public_nat.PORT_REPLACEMENT: {
'external:10': 11
}
}
)
# same but without replacement at all
fake_ctx._target.instance.runtime_properties = {}
self.assertEqual(
public_nat._get_original_port_for_create(
fake_ctx, gateway, 'SNAT', 'external', '10', 'internal',
'11', 'TCP'
),
11
)
self.assertEqual(
fake_ctx._target.instance.runtime_properties,
{
public_nat.PORT_REPLACEMENT: {
'external:10': 11
}
}
)
# we dont have enought ports
rule_inlist = self.generate_nat_rule(
'SNAT', 'external', utils.MAX_PORT_NUMBER,
'internal', 11, 'TCP'
)
gateway.get_nat_rules = mock.MagicMock(return_value=[rule_inlist])
fake_ctx._target.instance.runtime_properties = {}
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._get_original_port_for_create(
fake_ctx, gateway, 'SNAT', 'external',
utils.MAX_PORT_NUMBER, 'internal', '11', 'TCP'
)
def test_get_gateway_ip_range(self):
gate = mock.Mock()
# empty list of networks
gate.get_dhcp_pools = mock.MagicMock(return_value=[])
self.assertEqual(
public_nat._get_gateway_ip_range(gate, 'something'),
None
)
# exist other network
gate.get_dhcp_pools = mock.MagicMock(return_value=[
self.genarate_pool(
'test_network', '127.0.0.1', '127.0.0.255'
)
])
self.assertEqual(
public_nat._get_gateway_ip_range(gate, 'something'),
None
)
# exist correct network
self.assertEqual(
public_nat._get_gateway_ip_range(gate, 'test_network'),
(IP('127.0.0.1'), IP('127.0.0.255'))
)
def test_obtain_public_ip(self):
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
gateway = mock.Mock()
fake_client = mock.Mock()
# exist some ip for delete
self.assertEqual(
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway, vcloud_network_plugin.DELETE
),
'192.168.1.1'
)
# no ip for delete
fake_ctx._target.instance.runtime_properties = {}
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway, vcloud_network_plugin.DELETE
)
# unknow operation
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway, 'unknow operation'
)
# exist some public ip
fake_ctx._target.node.properties = {
'nat': {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
}
self.assertEqual(
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway, vcloud_network_plugin.CREATE
),
'192.168.1.1'
)
# no public ip yet
fake_ctx._target.node.properties = {
'nat': {}
}
fake_ctx._source.node.properties = {
'vcloud_config': {
'vdc': 'vdc_name',
'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
}
}
gateway.get_public_ips = mock.MagicMock(return_value=[
'10.18.1.1', '10.18.1.2'
])
rule_inlist = self.generate_nat_rule(
'DNAT', '10.18.1.1', 'any', 'internal', '11', 'TCP'
)
gateway.get_nat_rules = mock.MagicMock(
return_value=[rule_inlist]
)
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
self.assertEqual(
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway,
vcloud_network_plugin.CREATE
),
'10.18.1.2'
)
def test_get_network_ip_range(self):
# dont have ip range for this network
fake_client = self.generate_client()
self.assertEqual(
public_nat._get_network_ip_range(
fake_client, "some_org", "some_network"
),
None
)
fake_client.get_networks.assert_called_with("some_org")
# different network
network = self.generate_fake_client_network(
name="some", start_ip="127.1.1.1", end_ip="127.1.1.255"
)
fake_client.get_networks = mock.MagicMock(return_value=[network])
self.assertEqual(
public_nat._get_network_ip_range(
fake_client, "some_org", "some_network"
),
None
)
# correct network name
fake_client.get_networks = mock.MagicMock(return_value=[network])
self.assertEqual(
public_nat._get_network_ip_range(
fake_client, "some_org", "some"
),
(IP('127.1.1.1'), IP('127.1.1.255'))
)
def test_create_ip_range(self):
# context
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._source.instance.runtime_properties = {
vcloud_network_plugin.network.VCLOUD_NETWORK_NAME: "some"
}
fake_ctx._source.node.properties = {
'vcloud_config': {
'org': 'some_org',
'vdc': 'some_vdc'
}
}
fake_ctx._target.instance.runtime_properties = {}
# vca client
fake_client = self.generate_client()
# gateway
gate = fake_client._vdc_gateway
gate.get_dhcp_pools = mock.MagicMock(return_value=[])
network = self.generate_fake_client_network(
name="some", start_ip="127.1.1.100", end_ip="127.1.1.200"
)
fake_client.get_networks = mock.MagicMock(return_value=[network])
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
# empty gateway dhcp pool
# vca pool: 127.1.1.100..127.1.1.200
self.assertEqual(
public_nat._create_ip_range(fake_ctx, fake_client, gate),
'127.1.1.100 - 127.1.1.200'
)
fake_client.get_networks.assert_called_with("some_vdc")
# network from gate
gate.get_dhcp_pools = mock.MagicMock(return_value=[
self.genarate_pool(
"some", '127.1.1.1', '127.1.1.255'
)
])
self.assertEqual(
public_nat._create_ip_range(fake_ctx, fake_client, gate),
'127.1.1.1 - 127.1.1.255'
)
# network not exist
network = self.generate_fake_client_network(
name="other", start_ip="127.1.1.100",
end_ip="127.1.1.200"
)
fake_client.get_networks = mock.MagicMock(
return_value=[network]
)
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._create_ip_range(fake_ctx, fake_client, gate)
def test_save_configuration(self):
def _context_for_delete(service_type):
"""
create correct context for delete
"""
fake_ctx = self.generate_relation_context_with_current_ctx()
self.set_services_conf_result(
gateway, vcloud_plugin_common.TASK_STATUS_SUCCESS
)
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: "1.2.3.4",
public_nat.PORT_REPLACEMENT: {
'127.0.0.1:10': '100'
},
vcloud_network_plugin.SSH_PORT: '23',
vcloud_network_plugin.SSH_PUBLIC_IP: '10.1.1.1'
}
properties = {
'vcloud_config': {
'edge_gateway': 'gateway',
'vdc': 'vdc',
'org': 'some_org'
}
}
if service_type:
properties['vcloud_config']['service_type'] = service_type
fake_ctx._source.node.properties = properties
return fake_ctx
def _ip_exist_in_runtime(fake_ctx):
"""
ip still exist in ctx
"""
runtime_properties = fake_ctx._target.instance.runtime_properties
return vcloud_network_plugin.PUBLIC_IP in runtime_properties
fake_client = self.generate_client()
gateway = fake_client._vdc_gateway
# cant save configuration: server busy
self.set_services_conf_result(
gateway, None
)
self.set_gateway_busy(gateway)
fake_ctx = self.generate_relation_context_with_current_ctx()
self.assertFalse(public_nat._save_configuration(
fake_ctx, gateway, fake_client, vcloud_network_plugin.CREATE,
"1.2.3.4"
))
# operation create
fake_ctx = self.generate_relation_context_with_current_ctx()
self.set_services_conf_result(
gateway, vcloud_plugin_common.TASK_STATUS_SUCCESS
)
# success save configuration
with mock.patch('vcloud_plugin_common.ctx', fake_ctx):
public_nat._save_configuration(
fake_ctx, gateway, fake_client, vcloud_network_plugin.CREATE,
"1.2.3.4")
self.assertEqual(
fake_ctx._target.instance.runtime_properties,
{
vcloud_network_plugin.PUBLIC_IP: "1.2.3.4"
}
)
# delete - subscription service
fake_ctx = _context_for_delete(
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
)
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat._save_configuration(
fake_ctx, gateway, fake_client, vcloud_network_plugin.DELETE,
"1.2.3.4"
)
self.assertFalse(_ip_exist_in_runtime(fake_ctx))
# delete - without service
fake_ctx = _context_for_delete(None)
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat._save_configuration(
fake_ctx, gateway, fake_client, vcloud_network_plugin.DELETE,
"1.2.3.4"
)
self.assertFalse(_ip_exist_in_runtime(fake_ctx))
# delete - ondemand service - nat
fake_ctx = _context_for_delete(
vcloud_plugin_common.ONDEMAND_SERVICE_TYPE
)
fake_ctx._target.node.properties = {
'nat': {
vcloud_network_plugin.PUBLIC_IP: "1.2.3.4"
}
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat._save_configuration(
fake_ctx, gateway, fake_client, vcloud_network_plugin.DELETE,
"1.2.3.4"
)
self.assertFalse(_ip_exist_in_runtime(fake_ctx))
# delete - ondemand - not nat
gateway.deallocate_public_ip = mock.MagicMock(
return_value=self.generate_task(
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
)
fake_ctx = _context_for_delete(
vcloud_plugin_common.ONDEMAND_SERVICE_TYPE
)
fake_ctx._target.node.properties = {
'nat': {}
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
# import pdb;pdb.set_trace()
public_nat._save_configuration(
fake_ctx, gateway, fake_client, vcloud_network_plugin.DELETE,
"1.2.3.4"
)
gateway.deallocate_public_ip.assert_called_with("1.2.3.4")
self.assertFalse(_ip_exist_in_runtime(fake_ctx))
runtime_properties = fake_ctx._target.instance.runtime_properties
self.assertFalse(
public_nat.PORT_REPLACEMENT in runtime_properties
)
self.assertFalse(
vcloud_network_plugin.SSH_PORT in runtime_properties
)
self.assertFalse(
vcloud_network_plugin.SSH_PUBLIC_IP in runtime_properties
)
def test_nat_network_operation(self):
fake_client = self.generate_client()
fake_ctx = self.generate_relation_context_with_current_ctx()
gateway = fake_client._vdc_gateway
# used wrong operation
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.nat_network_operation(
fake_ctx, fake_client, gateway, "unknow", "DNAT", "1.2.3.4",
"2.3.4.5", "11", "11", "TCP"
)
# run correct operation/rule
for operation in [
vcloud_network_plugin.DELETE, vcloud_network_plugin.CREATE
]:
for rule_type in ["SNAT", "DNAT"]:
# cleanup properties
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {}}
fake_ctx._source.instance.runtime_properties = {}
# checks
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.nat_network_operation(
fake_ctx, fake_client, gateway, operation,
rule_type,
"1.2.3.4", "2.3.4.5", "11", "11", "TCP"
)
if rule_type == "DNAT":
if operation == vcloud_network_plugin.DELETE:
gateway.del_nat_rule.assert_called_with(
'DNAT', '1.2.3.4', '11', '2.3.4.5', '11',
'TCP'
)
else:
gateway.add_nat_rule.assert_called_with(
'DNAT', '1.2.3.4', '11', '2.3.4.5', '11',
'TCP'
)
else:
if operation == vcloud_network_plugin.DELETE:
gateway.del_nat_rule.assert_called_with(
'SNAT', '2.3.4.5', 'any', '1.2.3.4', 'any',
'any'
)
else:
gateway.add_nat_rule.assert_called_with(
'SNAT', '2.3.4.5', 'any', '1.2.3.4', 'any',
'any'
)
# cleanup properties
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {}}
fake_ctx._source.instance.runtime_properties = {}
# save ssh port
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.nat_network_operation(
fake_ctx, fake_client, gateway,
vcloud_network_plugin.CREATE,
"DNAT", "1.2.3.4", "2.3.4.5", "43", "22", "TCP"
)
self.assertEqual(
{'port_replacement': {'1.2.3.4:43': 43}},
fake_ctx._target.instance.runtime_properties
)
self.assertEqual(
{'ssh_port': '43', 'ssh_public_ip': '1.2.3.4'},
fake_ctx._source.instance.runtime_properties
)
# error with type
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.nat_network_operation(
fake_ctx, fake_client, gateway,
vcloud_network_plugin.CREATE,
"QNAT", "1.2.3.4", "2.3.4.5", "43", "22", "TCP"
)
def generate_client_and_context_server(self, no_vmip=False):
"""
for test prepare_server_operation based operations
"""
vm_ip = '1.1.1.1' if not no_vmip else None
fake_client = self.generate_client(vms_networks=[{
'is_connected': True,
'network_name': 'network_name',
'is_primary': True,
'ip': vm_ip
}])
self.set_network_routed_in_client(fake_client)
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
}
}
fake_ctx._source.node.properties = {
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
}
}
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
self.set_services_conf_result(
fake_client._vdc_gateway,
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
return fake_client, fake_ctx
def test_prepare_server_operation(self):
fake_client, fake_ctx = self.generate_client_and_context_server()
# no rules for update
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.prepare_server_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
# public ip equal to None in node properties
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT',
'protocol': 'TCP',
'original_port': "11",
'translated_port': "11"
}]
}
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: None
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
self.assertFalse(
public_nat.prepare_server_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
)
# we dont have connected private ip
fake_client, fake_ctx = self.generate_client_and_context_server(
no_vmip=True
)
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT',
'protocol': 'TCP',
'original_port': "11",
'translated_port': "11"
}]
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
self.assertFalse(
public_nat.prepare_server_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
)
# with some rules
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT',
'protocol': 'TCP',
'original_port': "11",
'translated_port': "11"
}]
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.prepare_server_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', '11', '1.1.1.1', '11', 'TCP'
)
# with default value
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.prepare_server_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', 'any', '1.1.1.1', 'any', 'any'
)
# with SNAT rules
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{'type': 'SNAT'}, {'type': 'SNAT'}]
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.prepare_server_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'SNAT', '1.1.1.1', 'any', '192.168.1.1', 'any', 'any'
)
def generate_client_and_context_network(self):
"""
for test prepare_network_operation based operations
"""
fake_client = self.generate_client(vms_networks=[{
'is_connected': True,
'network_name': 'network_name',
'is_primary': True,
'ip': '1.1.1.1'
}])
self.set_network_routed_in_client(fake_client)
gate = fake_client._vdc_gateway
gate.get_dhcp_pools = mock.MagicMock(return_value=[])
network = self.generate_fake_client_network(
name="some", start_ip="127.1.1.100", end_ip="127.1.1.200"
)
fake_client.get_networks = mock.MagicMock(return_value=[network])
self.set_services_conf_result(
fake_client._vdc_gateway,
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
# ctx
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._source.instance.runtime_properties = {
vcloud_network_plugin.network.VCLOUD_NETWORK_NAME: "some"
}
fake_ctx._source.node.properties = {
'vcloud_config': {
'org': 'some_org',
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
}
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
}
}
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
return fake_client, fake_ctx
def test_prepare_network_operation(self):
# no rules
fake_client, fake_ctx = self.generate_client_and_context_network()
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.prepare_network_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
# public ip equal to None in node properties
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: None
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT',
}]
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
self.assertFalse(
public_nat.prepare_network_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
)
# rules with default values
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.prepare_network_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', 'any', '127.1.1.100 - 127.1.1.200',
'any', 'any'
)
def test_creation_validation(self):
fake_client = self.generate_client()
# no nat
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# no gateway
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name'
},
'nat': {
'some_field': 'something'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# wrong ip
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
vcloud_network_plugin.PUBLIC_IP: 'any'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# no free ip
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# no rules
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
vcloud_network_plugin.PUBLIC_IP: '10.12.2.1'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# wrong protocol
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
vcloud_network_plugin.PUBLIC_IP: '10.12.2.1'
},
'rules': [{
'type': 'DNAT',
'protocol': "some"
}]
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# wrong original_port
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
vcloud_network_plugin.PUBLIC_IP: '10.12.2.1'
},
'rules': [{
'type': 'DNAT',
'protocol': "TCP",
'original_port': 'some'
}]
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# wrong original_port
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
vcloud_network_plugin.PUBLIC_IP: '10.12.2.1'
},
'rules': [{
'type': 'DNAT',
'protocol': "TCP",
'original_port': 11,
'translated_port': 'some'
}]
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# fine
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
vcloud_network_plugin.PUBLIC_IP: '10.12.2.1'
},
'rules': [{
'type': 'DNAT',
'protocol': "TCP",
'original_port': 11,
'translated_port': 12
}]
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
def _server_disconnect_to_nat_noexternal(self):
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
fake_ctx._source.node.properties = {
'vcloud_config':
{
'edge_gateway': 'gateway',
'vdc': 'vdc'
}
}
fake_ctx._source.instance.runtime_properties = {
'gateway_lock': False,
'vcloud_vapp_name': 'vapp'
}
return fake_client, fake_ctx
def test_server_disconnect_from_nat(self):
# successful
fake_client, fake_ctx = self._server_disconnect_to_nat_noexternal()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.server_disconnect_from_nat(ctx=fake_ctx,
vca_client=None)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', 'any', '1.1.1.1', 'any', 'any'
)
# check retry
fake_client, fake_ctx = self._server_disconnect_to_nat_noexternal()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
self.prepere_gatway_busy_retry(fake_client, fake_ctx)
public_nat.server_disconnect_from_nat(ctx=fake_ctx,
vca_client=None)
self.check_retry_realy_called(fake_ctx)
def _server_connect_to_nat_noexternal(self):
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
fake_ctx._source.instance.runtime_properties = {
'gateway_lock': False,
'vcloud_vapp_name': 'vapp'
}
fake_ctx._source.node.properties = {
'vcloud_config':
{
'edge_gateway': 'gateway',
'vdc': 'vdc'
}
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
fake_client._vdc_gateway.get_public_ips = mock.MagicMock(
return_value=['10.18.1.1']
)
return fake_client, fake_ctx
def test_server_connect_to_nat(self):
fake_client, fake_ctx = self._server_connect_to_nat_noexternal()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.server_connect_to_nat(ctx=fake_ctx, vca_client=None)
fake_client._vdc_gateway.add_nat_rule.assert_called_with(
'DNAT', '10.18.1.1', 'any', '1.1.1.1', 'any', 'any'
)
fake_client, fake_ctx = self._server_connect_to_nat_noexternal()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
self.prepere_gatway_busy_retry(fake_client, fake_ctx)
public_nat.server_connect_to_nat(ctx=fake_ctx, vca_client=None)
self.check_retry_realy_called(fake_ctx)
def _net_disconnect_from_nat_noexternal(self):
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
fake_ctx._source.node.properties = {
'vcloud_config':
{
'edge_gateway': 'gateway',
'vdc': 'vdc'
}
}
return fake_client, fake_ctx
def test_net_disconnect_from_nat(self):
# use external
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'use_external_resource': True
}
fake_ctx._source.node.properties = {
'vcloud_config':
{
'edge_gateway': 'gateway',
'vdc': 'vdc'
}
}
fake_ctx._source.instance.runtime_properties = {
'gateway_lock': False,
'vcloud_vapp_name': 'vapp'
}
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_disconnect_from_nat(ctx=fake_ctx,
vca_client=fake_client)
# no external
fake_client, fake_ctx = self._net_disconnect_from_nat_noexternal()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_disconnect_from_nat(ctx=fake_ctx, vca_client=None)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', 'any', '127.1.1.100 - 127.1.1.200',
'any', 'any'
)
# retry check
fake_client, fake_ctx = self._net_disconnect_from_nat_noexternal()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
self.prepere_gatway_busy_retry(fake_client, fake_ctx)
public_nat.net_disconnect_from_nat(ctx=fake_ctx, vca_client=None)
self.check_retry_realy_called(fake_ctx)
def test_net_connect_to_nat(self):
# use external
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'use_external_resource': True
}
fake_ctx._source.node.properties = {
'vcloud_config':
{
'edge_gateway': 'gateway',
'vdc': 'vdc'
}
}
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_connect_to_nat(ctx=fake_ctx, vca_client=None)
# no external
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
fake_ctx._source.node.properties = {
'vcloud_config':
{
'edge_gateway': 'gateway',
'vdc': 'vdc'
}
}
fake_client._vdc_gateway.get_public_ips = mock.MagicMock(return_value=[
'10.18.1.1'
])
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_connect_to_nat(ctx=fake_ctx, vca_client=None)
fake_client._vdc_gateway.add_nat_rule.assert_called_with(
'DNAT', '10.18.1.1', 'any', '127.1.1.100 - 127.1.1.200',
'any', 'any'
)
# retry check
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
self.prepere_gatway_busy_retry(fake_client, fake_ctx)
public_nat.net_connect_to_nat(ctx=fake_ctx, vca_client=None)
self.check_retry_realy_called(fake_ctx)
def test_net_connect_to_nat_preconfigure(self):
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.net_connect_to_nat_preconfigure(ctx=fake_ctx,
vca_client=None)
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'SNAT'
}]
}
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_connect_to_nat_preconfigure(ctx=fake_ctx,
vca_client=None)
# empty rules
fake_ctx._target.node.properties.update({'rules': []})
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.net_connect_to_nat_preconfigure(ctx=fake_ctx,
vca_client=None)
if __name__ == '__main__':
unittest.main()
| 36.919748
| 79
| 0.542248
| 45,979
| 0.97984
| 0
| 0
| 0
| 0
| 0
| 0
| 8,553
| 0.18227
|
ce21d57f1cc21cb2e5990bffc69d3403f42d2835
| 519
|
py
|
Python
|
Taller_estruturas_de_control_secuenciales/Python_yere/Ejercicio_17.py
|
Matieljimenez/Algoritmos_y_programacion
|
cdc381478581e6842c6672d4840dd948833c4ec7
|
[
"MIT"
] | null | null | null |
Taller_estruturas_de_control_secuenciales/Python_yere/Ejercicio_17.py
|
Matieljimenez/Algoritmos_y_programacion
|
cdc381478581e6842c6672d4840dd948833c4ec7
|
[
"MIT"
] | null | null | null |
Taller_estruturas_de_control_secuenciales/Python_yere/Ejercicio_17.py
|
Matieljimenez/Algoritmos_y_programacion
|
cdc381478581e6842c6672d4840dd948833c4ec7
|
[
"MIT"
] | null | null | null |
"""
Entradas
monto de dinero presupuestal-->float-->a
Salidas
dinero correspondiente para ginecologia-->float-->b
dinero correspondiente para traumatologia-->float-->c
dinero correspondiente para pediatria-->float-->d
"""
a=float(input("Presupuesto anual al Hospital rural "))
b=a*0.40
c=a*0.30
d=a*0.30
print("El presupuesto del hospital rural para ginecología es: "+str(b))
print("El presupuesto del hospital rural para traumatología es: "+str(c))
print("El presupuesto del hospital rural para pediatría es: "+str(d))
| 34.6
| 73
| 0.759152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 433
| 0.829502
|
ce2288a47d9c672cc8785e5719f15a00192e23e2
| 5,926
|
py
|
Python
|
tools/generate_things/generate_navigation.py
|
akalenuk/wordsandbuttons
|
c8ad9e8668fc49f4c39ae3b510e36a5a52ec3c91
|
[
"Unlicense"
] | 367
|
2018-01-29T17:45:00.000Z
|
2022-03-08T03:50:52.000Z
|
tools/generate_things/generate_navigation.py
|
akalenuk/wordsandbuttons
|
c8ad9e8668fc49f4c39ae3b510e36a5a52ec3c91
|
[
"Unlicense"
] | 9
|
2017-12-21T16:48:08.000Z
|
2021-01-23T17:20:20.000Z
|
tools/generate_things/generate_navigation.py
|
akalenuk/wordsandbuttons
|
c8ad9e8668fc49f4c39ae3b510e36a5a52ec3c91
|
[
"Unlicense"
] | 20
|
2018-02-18T11:52:36.000Z
|
2021-11-22T09:46:53.000Z
|
import os
import subprocess
PAGES_DIR = "../../pages"
keyword_note = {
'tutorials': '',
'demos': '',
'quizzes': '',
'mathematics': '',
'algorithms': '',
'programming': 'By the way, if you prefer books to blogs, <a href="https://wordsandbuttons.online/SYTYKC.pdf">there is a free book</a> that was originally made from this section.'
}
index_title = 'Hello, world!'
index_description = 'This is <i>Words and Buttons Online</i> — a growing collection of interactive tutorials, demos, and quizzes about maths, algorithms, and programming.'
all_span_ids = []
def read_index_spans(path):
global all_span_ids
index_spans = []
for file_name in os.listdir(path):
if os.path.isfile(path + '/' + file_name):
if file_name.endswith('.html'):
html = open(path + '/' + file_name, 'r')
text = html.read()
html.close()
spans = text.split('<span id="index_')
if spans != []:
spans = spans[1:]
Spans = text.split('<Span id="index_')
if Spans != []:
Spans = Spans[1:]
span_ids = ['index_' + s.split('"')[0] for s in spans]
span_titles = [s.split('>')[1].split('<')[0].lower() for s in spans]
span_ids += ['index_' + s.split('"')[0] for s in Spans]
span_titles += [s.split('>')[1].split('<')[0] for s in Spans]
for i in range(0, len(span_ids)):
index_spans += [ (file_name, span_ids[i], span_titles[i]) ]
for span_id in span_ids:
if span_id in all_span_ids:
print('Duplicated index span id: ' + span_id + " in " + file_name)
all_span_ids += [span_id]
return index_spans
date_link_title_description_keywords = []
all_keywords = set()
for filename in os.listdir(PAGES_DIR):
if filename == 'index.html':
continue
if filename == 'faq.html':
continue
if filename.endswith(".html"):
f = open(PAGES_DIR + "/" + filename, 'rt')
content = f.read()
f.close
if content.find("meta name=\"keywords\"") == -1:
continue
date_from_git = subprocess.run(["git", "log", "--reverse", "--date=iso", "--format=%cd", "--", filename], \
cwd=PAGES_DIR, \
stdout=subprocess.PIPE)
full_date = date_from_git.stdout.decode('utf-8')
date = full_date.split(' ')[0]
title = content.split("<title>")[1].split("</title>")[0]
description = content.split('<meta name="description" content="')[1].split('">')[0]
keywords = content.split('<meta name="keywords" content="')[1].split('">')[0].split(', ')
if keywords[0] == "":
continue
date_link_title_description_keywords += [(date, filename, title, description, keywords)]
all_keywords.update(keywords)
date_link_title_description_keywords.sort()
# index
f = open('index.template')
template = f.read()
f.close()
index = '%s' % template
f = open('links.txt')
links = f.readlines()
f.close()
links_html = '<h1>More interactive learning</h1>'
for link in links:
if link.strip().find(' ') != -1:
url = link.split(' ')[0]
title_chunks = link.split(' ')[1:]
title = title_chunks[0]
for chunk in title_chunks[1:]: # no hanging short words
if len(chunk) < 2:
title += ' ' + chunk
else:
title += ' ' + chunk
links_html += '<p style="margin-bottom: 12pt;">'+title+'<br><a href="'+url+'">'+url+'</a></p>\n'
menu = '<p class="links" style="width: 555pt;">'
for (kw, _) in keyword_note.items():
menu += '<nobr><a style="padding-right: 4pt;" href="all_' + kw + '.html">#' + kw + '</a></nobr> '
menu += '</p>'
# index is now real index not a timeline
the_index = '<h1 title="A real index on index.html! How cool is that!">Index</h1>'
spans = read_index_spans(PAGES_DIR)
cur_letter = ''
for (f, i, t) in sorted(spans, key = lambda fit: fit[2].upper()):
letter = t[0].upper()
if cur_letter != letter:
if cur_letter != '':
the_index += '</p>\n'
the_index += '<h2>'+letter+'</h2>\n'
the_index += '<p class="index_items">\n'
cur_letter = letter
the_index += '<nobr><a style="padding-right: 24pt;" href="' + f + '#' + i + '">' + t + '</a></nobr>\n'
the_index += '</p>\n'
index = index.replace('<h1>Title</h1>', '<h1>' + index_title + '</h1>')
index = index.replace('<p>Description</p>', '<p style="width: 555pt;">' + index_description + '</p>')
index = index.replace('<div id="menu"></div>', '\n' + menu + '\n')
index = index.replace('<p>Note</p>', '')
index = index.replace('<div id="timeline"></div>', '\n' + the_index + '\n')
index = index.replace('<div id="links"></div>', '\n' + links_html + '\n')
f = open('../../pages/' + 'index.html', 'w')
f.write(index)
f.close
# tag's all_* pages
for title in list(all_keywords):
page = '%s' % template
timeline = ''
menu = '<p class="links" style="width: 555pt;">'
for (kw, _) in keyword_note.items():
if kw == title:
menu += '<nobr><span style="padding-right: 4pt; color: #999;">#' + kw + '</span></nobr> '
else:
menu += '<nobr><a style="padding-right: 4pt;" href="all_' + kw + '.html">#' + kw + '</a></nobr> '
menu += '</p>'
for (d, l, t, desc, kwds) in date_link_title_description_keywords[::-1]:
if not title in kwds:
continue
timeline += '<p class="title">' + '<a href="' + l + '">' + t + '</a></p>\n'
timeline += '<p class="description">' + desc + '</p>\n'
timeline += '<p class="links">'
for kw in sorted(list(kwds)):
if kw == title:
timeline += '<span style="padding-right: 8pt; color: #999;">#' + kw + '</span> '
else:
timeline += '<a style="padding-right: 8pt;" href="all_' + kw + '.html">#' + kw + '</a> '
timeline += '</p>\n'
page = page.replace('<h1>Title</h1>', '<h1><a href="index.html">Words and Buttons</a>: ' + title + '</h1>')
page = page.replace('<p>Description</p>', '')
page = page.replace('<div id="menu"></div>', '\n' + menu + '\n')
page = page.replace('<p>Note</p>', '<p style="width: 555pt;">' + keyword_note[title] + '</p>')
page = page.replace('<div id="timeline"></div>', '\n' + timeline + '\n')
page = page.replace('<div id="links"></div>', '')
f = open('../../pages/all_' + title + '.html', 'w')
f.write(page)
f.close
| 35.48503
| 179
| 0.602599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,244
| 0.378287
|
ce25004f312bc46b4d6a3d278373562bc87e4202
| 316
|
py
|
Python
|
apps/listings/migrations/0002_remove_post_author.py
|
favours-io/favours
|
6f26a207d2684e752857aa21e5fafa607a4707e6
|
[
"MIT"
] | 11
|
2020-07-23T19:07:32.000Z
|
2021-11-18T17:16:29.000Z
|
apps/listings/migrations/0002_remove_post_author.py
|
favours-io/favours
|
6f26a207d2684e752857aa21e5fafa607a4707e6
|
[
"MIT"
] | 16
|
2020-08-29T01:57:05.000Z
|
2022-01-13T03:16:41.000Z
|
apps/listings/migrations/0002_remove_post_author.py
|
favours-io/favours
|
6f26a207d2684e752857aa21e5fafa607a4707e6
|
[
"MIT"
] | 4
|
2020-09-18T18:40:12.000Z
|
2021-11-09T06:36:36.000Z
|
# Generated by Django 3.0.7 on 2020-09-22 05:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('listings', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='author',
),
]
| 17.555556
| 47
| 0.575949
| 231
| 0.731013
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.268987
|
ce2713a447d11afd7d04a70a5793ef6b8c8b2009
| 303
|
py
|
Python
|
venv/Lib/site-packages/bootstrap4/widgets.py
|
HRangelov/gallery
|
3ccf712ef2e1765a6dfd6567d58e6678e0b2ff6f
|
[
"MIT"
] | 3
|
2021-02-02T11:13:15.000Z
|
2021-02-10T07:26:10.000Z
|
venv/Lib/site-packages/bootstrap4/widgets.py
|
HRangelov/gallery
|
3ccf712ef2e1765a6dfd6567d58e6678e0b2ff6f
|
[
"MIT"
] | 3
|
2021-03-30T14:15:20.000Z
|
2021-09-22T19:31:57.000Z
|
cypher_venv/Lib/site-packages/bootstrap4/widgets.py
|
FrancisLangit/cypher
|
4921e2f53ef8154ad63ff4de7f8068b27f29f485
|
[
"MIT"
] | null | null | null |
from django.forms import RadioSelect
class RadioSelectButtonGroup(RadioSelect):
"""
This widget renders a Bootstrap 4 set of buttons horizontally instead of typical radio buttons.
Much more mobile friendly.
"""
template_name = "bootstrap4/widgets/radio_select_button_group.html"
| 25.25
| 99
| 0.762376
| 263
| 0.867987
| 0
| 0
| 0
| 0
| 0
| 0
| 194
| 0.640264
|
ce2810e264659103f1cf2c4c793eb498a673a023
| 2,990
|
py
|
Python
|
workflower/services/workflow/loader.py
|
dmenezesgabriel/workflower
|
db2358abdd2d133b85baea726e013e71171e5cf3
|
[
"MIT"
] | null | null | null |
workflower/services/workflow/loader.py
|
dmenezesgabriel/workflower
|
db2358abdd2d133b85baea726e013e71171e5cf3
|
[
"MIT"
] | null | null | null |
workflower/services/workflow/loader.py
|
dmenezesgabriel/workflower
|
db2358abdd2d133b85baea726e013e71171e5cf3
|
[
"MIT"
] | null | null | null |
import logging
import os
import traceback
from typing import List
from workflower.adapters.sqlalchemy.setup import Session
from workflower.adapters.sqlalchemy.unit_of_work import SqlAlchemyUnitOfWork
from workflower.application.event.commands import CreateEventCommand
from workflower.application.workflow.commands import (
ActivateWorkflowCommand,
LoadWorkflowFromYamlFileCommand,
SetWorkflowTriggerCommand,
)
from workflower.domain.entities.workflow import Workflow
logger = logging.getLogger("workflower.loader")
class WorkflowLoaderService:
def __init__(self) -> None:
self._workflows = None
@property
def workflows(self) -> List[Workflow]:
return self._workflows
def load_one_workflow_file(self, path: str, trigger: str = "on_schedule"):
"""
Load one workflow from file.
Args:
- path (str): workflow file path
- trigger (str): expects "on_schedule" or "on_demand".
"""
session = Session()
uow = SqlAlchemyUnitOfWork(session)
# TODO
# Add strategy pattern
command = LoadWorkflowFromYamlFileCommand(uow, path)
workflow = None
try:
workflow = command.execute()
except Exception:
logger.error(f"Error loading {path}:" f" {traceback.format_exc()}")
create_event_command = CreateEventCommand(
uow,
model="workflow",
model_id=None,
name="workflow_load_error",
exception=traceback.format_exc(),
)
create_event_command.execute()
if workflow:
set_trigger_command = SetWorkflowTriggerCommand(
uow, workflow.id, trigger
)
set_trigger_command.execute()
activate_Workflow_command = ActivateWorkflowCommand(
uow, workflow.id
)
activate_Workflow_command.execute()
return workflow
def load_all_from_dir(
self, path: str, trigger: str = "on_schedule"
) -> List[Workflow]:
"""
Load all workflow files from a given directory
Args:
- path (str): workflows file path
- trigger (str): expects "on_schedule" or "on_demand".
"""
self._workflows = []
logger.info(f"Loading Workflows from directory: {path}")
counter = 0
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(".yml") or file.endswith(".yaml"):
workflow_path = os.path.join(root, file)
workflow = self.load_one_workflow_file(
workflow_path, trigger=trigger
)
if workflow:
self._workflows.append(workflow)
counter += 1
logger.info(f"Workflows Loaded {counter}")
return self._workflows
| 32.5
| 79
| 0.596321
| 2,457
| 0.821739
| 0
| 0
| 83
| 0.027759
| 0
| 0
| 619
| 0.207023
|
ce281d8807b114456a5700d5486fb898099afb81
| 2,492
|
py
|
Python
|
setup.py
|
neuroticnerd/dragoncon-bot
|
44c4d96743cf11ea0e8eaa567100e42afa4de565
|
[
"Apache-2.0"
] | 2
|
2015-12-18T05:28:02.000Z
|
2018-05-24T04:18:26.000Z
|
setup.py
|
neuroticnerd/dragoncon-bot
|
44c4d96743cf11ea0e8eaa567100e42afa4de565
|
[
"Apache-2.0"
] | 11
|
2016-08-27T22:05:18.000Z
|
2021-12-13T19:41:44.000Z
|
setup.py
|
neuroticnerd/dragoncon-bot
|
44c4d96743cf11ea0e8eaa567100e42afa4de565
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import io
import os
import re
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
PROJECT_MODULE = 'dragonite'
PROJECT = 'dragonite'
AUTHOR = 'Bryce Eggleton'
EMAIL = 'eggleton.bryce@gmail.com'
DESC = 'Dragon Con command line utility'
LONG_DESC = ''
KEYWORDS = ('dragonite', 'dragoncon', 'dragon', 'con')
URL = "https://github.com/neuroticnerd/dragoncon-bot"
REQUIRES = []
EXTRAS = {
'dev': (
'flake8 >= 2.5.0',
'twine >= 1.8.1',
'pytest >= 2.8.4',
'coverage >= 4.0.3',
),
# 'caching': (
# 'redis>=2.10.3',
# 'hiredis>=0.2.0',
# ),
}
SCRIPTS = {
"console_scripts": [
'dragonite = dragonite.cli:dragonite',
]}
LICENSE = 'Apache License, Version 2.0'
VERSION = ''
CLASSIFIERS = [
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
]
version_file = os.path.join(here, '{0}/__init__.py'.format(PROJECT_MODULE))
ver_find = r'^\s*__version__\s*=\s*[\"\'](.*)[\"\']$'
with io.open(version_file, 'r', encoding='utf-8') as ver_file:
VERSION = re.search(ver_find, ver_file.read(), re.MULTILINE).group(1)
readme_file = os.path.join(here, 'README.rst')
with io.open(readme_file, 'r', encoding='utf-8') as f:
LONG_DESC = f.read()
requirements_file = os.path.join(here, 'requirements.txt')
with io.open(requirements_file, 'r') as reqs_file:
for rawline in reqs_file:
line = rawline.strip()
if line.startswith('http'):
continue
REQUIRES.append(' >= '.join(line.split('==')))
if __name__ == '__main__':
setup(
name=PROJECT,
version=VERSION,
packages=find_packages(include=[PROJECT_MODULE + '*']),
author=AUTHOR,
author_email=EMAIL,
url=URL,
description=DESC,
long_description=LONG_DESC,
classifiers=CLASSIFIERS,
platforms=('any',),
license=LICENSE,
keywords=KEYWORDS,
install_requires=REQUIRES,
extras_require=EXTRAS,
entry_points=SCRIPTS,
)
| 28
| 75
| 0.617978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 940
| 0.377207
|
ce282a6ed0fc710a4b6a368e5d2307c23cfaf901
| 3,427
|
py
|
Python
|
backend/api.py
|
RuiL1904/Hackathon
|
94eed04b2fa3fb48b3479045a0b279b0217744fb
|
[
"MIT"
] | 5
|
2022-02-20T12:59:19.000Z
|
2022-02-20T17:30:49.000Z
|
backend/api.py
|
RuiL1904/Hackathon
|
94eed04b2fa3fb48b3479045a0b279b0217744fb
|
[
"MIT"
] | null | null | null |
backend/api.py
|
RuiL1904/Hackathon
|
94eed04b2fa3fb48b3479045a0b279b0217744fb
|
[
"MIT"
] | 1
|
2022-03-08T20:21:03.000Z
|
2022-03-08T20:21:03.000Z
|
import random
import database
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
# Instantiate FastAPI
app = FastAPI()
# Whitelist origins
app.add_middleware(
CORSMiddleware,
allow_origins = ["*"],
allow_credentials = True,
allow_methods = ["*"],
allow_headers = ["*"]
)
# POST
@app.post('/api/create', response_description = "Add new schedule")
async def create(req: Request) -> dict:
json = await req.json()
id = database.add_schedule(json)
return {"id": id}
# GET
@app.get('/api/{id}/tags', response_description = "Get tags associated with given id")
async def tags(id: str) -> list:
data = database.get_schedule(id)
tags = []
for elem in data:
tags += elem['tags']
tags = set(tags) # Remove duplicates
return tags
# POST
@app.post('/api/{id}/schedule', response_description = "Get best schedule associated with given id and user chosen tags")
async def schedule(req: Request, id: str) -> list:
schedule = database.get_schedule(id)
tags = await req.json()
to_remove = []
colided = check_colide(schedule) # Returns a list of tuples containing events happening at the same time
for event in colided:
h1 = schedule[event[0]]
h1_sum = 0.0
h1_tags = 0
h2 = schedule[event[1]]
h2_sum = 0.0
h2_tags = 0
for tag in h1.get('tags', []):
h1_tags += 1
h1_sum += int(tags[tag])
for tag in h2.get('tags', []):
h2_tags += 1
h2_sum += int(tags[tag])
if h1_tags != 0:
h1_sum = h1_sum / h1_tags
if h1_sum == 0:
to_remove.append(h1)
if h2_tags != 0:
h2_sum = h2_sum / h2_tags
if h2_sum == 0:
to_remove.append(h2)
h1_len = len(h1.get('tags', []))
h2_len = len(h2.get('tags', []))
if (h1_sum > h2_sum) and (h2_len > 0):
to_remove.append(h2)
elif (h1_sum < h2_sum) and (h1_len > 0):
to_remove.append(h1)
elif (h1_sum == h2_sum) and (h1_len > 0) and (h2_len > 0):
# Chooses a random schedule and remove it
if (random.randint(0,1)) == 0:
to_remove.append(h1)
else:
to_remove.append(h2)
for elem in to_remove:
if elem in schedule:
schedule.remove(elem)
return schedule
# Checks for coliding events inside the main schedule
def check_colide(schedule: list) -> list:
colided = []
for i in range(len(schedule)):
for j in range(i + 1, len(schedule)):
if (check_colide_aux(schedule[i], schedule[j])):
colided.append((i,j))
return colided
def check_colide_aux(h1, h2) -> bool:
start1 = h1['date_start']
end1 = h1['date_end']
start2 = h2['date_start']
end2 = h2['date_end']
if start1 == start2 and end1 == end2:
return True
if start1 < start2 and end1 > start2:
return True
if start1 > start2 and end1 < end2:
return True
if start1 < start2 and end1 > start2:
return True
if start1 > start2 and end1 < end2:
return True
return False
if __name__ == "__main__":
uvicorn.run("api:app", host = "0.0.0.0", port = 8000, reload = True)
| 26.160305
| 121
| 0.569594
| 0
| 0
| 0
| 0
| 2,139
| 0.624161
| 1,862
| 0.543332
| 523
| 0.152612
|
ce282fdf98dc253cf62921347890761e924022a6
| 1,211
|
py
|
Python
|
lfs/portlet/models/pages.py
|
zhammami/django-lfs
|
b921295e71fe827377a67b5e7ae1a8bf7f72a1e6
|
[
"BSD-3-Clause"
] | null | null | null |
lfs/portlet/models/pages.py
|
zhammami/django-lfs
|
b921295e71fe827377a67b5e7ae1a8bf7f72a1e6
|
[
"BSD-3-Clause"
] | null | null | null |
lfs/portlet/models/pages.py
|
zhammami/django-lfs
|
b921295e71fe827377a67b5e7ae1a8bf7f72a1e6
|
[
"BSD-3-Clause"
] | null | null | null |
# django imports
from django import forms
from django.conf import settings
from django.core.cache import cache
from django.template.loader import render_to_string
# portlets imports
from portlets.models import Portlet
# lfs imports
from lfs.page.models import Page
class PagesPortlet(Portlet):
"""Portlet to display pages.
"""
class Meta:
app_label = 'portlet'
def __unicode__(self):
return u"%s" % self.id
def render(self, context):
"""Renders the portlet as html.
"""
request = context.get("request")
cache_key = "%s-pages" % settings.CACHE_MIDDLEWARE_KEY_PREFIX
pages = cache.get(cache_key)
if pages is None:
pages = Page.objects.filter(active=True, exclude_from_navigation=False)
cache.set(cache_key, pages)
return render_to_string("lfs/portlets/pages.html", request=request, context={
"title": self.title,
"pages": pages,
})
def form(self, **kwargs):
return PagesForm(instance=self, **kwargs)
class PagesForm(forms.ModelForm):
"""Form for the PagesPortlet.
"""
class Meta:
model = PagesPortlet
exclude = ()
| 24.714286
| 85
| 0.641618
| 938
| 0.774566
| 0
| 0
| 0
| 0
| 0
| 0
| 235
| 0.194055
|
ce2b25ff23e864e881234a2380df580d2b3d114d
| 829
|
py
|
Python
|
feed/models.py
|
kassupto007/photo-sharing-app
|
97ed237815134fd3d53431be348a050c505db499
|
[
"Apache-2.0"
] | null | null | null |
feed/models.py
|
kassupto007/photo-sharing-app
|
97ed237815134fd3d53431be348a050c505db499
|
[
"Apache-2.0"
] | null | null | null |
feed/models.py
|
kassupto007/photo-sharing-app
|
97ed237815134fd3d53431be348a050c505db499
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings
from django.db import models
from django.utils import timezone
from users.models import Profile
class Post(models.Model):
description = models.CharField(max_length=255)
picture = models.ImageField(upload_to='posts', blank=True)
date_posted = models.DateTimeField(auto_now_add=True, auto_now=False)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
def __str__(self):
return self.description
class Comment(models.Model):
post = models.ForeignKey(Post, related_name='comments', on_delete=models.CASCADE)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='comments', on_delete=models.CASCADE)
comment = models.CharField(max_length=255)
comment_date = models.DateTimeField(auto_now_add=True, auto_now=False)
| 37.681818
| 106
| 0.77684
| 694
| 0.837153
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.032569
|
ce2ba9ff2aa3d5ef4daa942e79661e4a012dddf3
| 2,168
|
py
|
Python
|
zampol/osoba/admin.py
|
VadymRud/zampolit
|
80bbd5dc197041c3595831a8d0ddae130e10418c
|
[
"Apache-2.0"
] | null | null | null |
zampol/osoba/admin.py
|
VadymRud/zampolit
|
80bbd5dc197041c3595831a8d0ddae130e10418c
|
[
"Apache-2.0"
] | null | null | null |
zampol/osoba/admin.py
|
VadymRud/zampolit
|
80bbd5dc197041c3595831a8d0ddae130e10418c
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from django.db import models
from django.utils.translation import gettext as _
from .models import (MilitaryRank, Platoon, ServiseID, Unit, OfficialPosition, Company,
Education, Creed, Nationality, Command)
from osoba.widgets import CustomDatePickerInput
class ServiseIDAdmin(admin.ModelAdmin):
fieldsets = (
# (None, {
# 'fields': ('field1', 'field2', 'field3')
# }),
(_('Main data'), {
'fields': ('name', 'sename', 'third_name', 'birth_date')
}),
(_('Names in accs'), { #давальний відмінок
'fields': ('name_accs', 'sename_accs', 'third_name_accs')
}),
(_('Company'), {
'fields': ('military_ranks', )
}),
(_('Info for Service begin'), {
'fields': ('military_office', 'date_of_conscription', 'order_date', 'order_number')
}),
(_('General information'), {
'fields': ('orphan',
'married', 'halforphan', 'work', 'mobilization', 'driveid', 'creed',
'nationality', 'education', 'blood_type', 'rh')
}),
(_('militaryID'), {
'fields': ('militaryID_seria', 'militaryID_number', 'who_militaryID',
'militaryID_date', 'weapon', 'military_rank_id', 'military_rank_date')
}),
(_('ID'), {
'fields': ('ID_seria', 'ID_number', 'who_ID',
'ID_date', 'ipn')
}),
(_('Address'), {
'fields': ('addres_pr', 'addres_fact')
}),
(_('Images'), {
'fields': ('image_face3x4',)
})
)
change_form_template = 'admin/ocoba_change_form.html'
# formfield_overrides = {
# models.DateField: {'widget': MonthPickerInput}
# }
admin.site.register(Company)
admin.site.register(MilitaryRank)
admin.site.register(Platoon)
admin.site.register(ServiseID, ServiseIDAdmin)
admin.site.register(Unit)
admin.site.register(OfficialPosition)
admin.site.register(Creed)
admin.site.register(Nationality)
admin.site.register(Education)
admin.site.register(Command)
| 31.882353
| 95
| 0.571494
| 1,549
| 0.708924
| 0
| 0
| 0
| 0
| 0
| 0
| 862
| 0.394508
|
ce2c3b1def15247a90a747a7d6db93245d2f364a
| 725
|
py
|
Python
|
python/src/problem/leetcode/easy/leetcode_189.py
|
yipwinghong/Algorithm
|
e594df043c9d965dbfbd958554e88c533c844a45
|
[
"MIT"
] | 9
|
2019-10-31T16:58:31.000Z
|
2022-02-08T08:42:30.000Z
|
python/src/problem/leetcode/easy/leetcode_189.py
|
yipwinghong/Algorithm
|
e594df043c9d965dbfbd958554e88c533c844a45
|
[
"MIT"
] | null | null | null |
python/src/problem/leetcode/easy/leetcode_189.py
|
yipwinghong/Algorithm
|
e594df043c9d965dbfbd958554e88c533c844a45
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from typing import List
class Solution:
"""
旋转数组
"""
def reverse(self, nums: List[int], i: int, j: int) -> None:
"""
:param nums:
:param i:
:param j:
:return:
"""
while i < j:
nums[i], nums[j] = nums[j], nums[i]
i += 1
j -= 1
def rotate(self, nums: List[int], k: int) -> None:
"""
Time: O(n), Space: O(1)
:param nums:
:param k:
:return:
"""
if not nums or k <= 0:
return
n, m = len(nums), k % len(nums)
self.reverse(nums, 0, n - 1)
self.reverse(nums, 0, m - 1)
self.reverse(nums, m, n - 1)
| 18.589744
| 63
| 0.411034
| 690
| 0.941337
| 0
| 0
| 0
| 0
| 0
| 0
| 235
| 0.3206
|
ce2e1eeb2d14e83f19c6e30702d48f326de87b43
| 931
|
py
|
Python
|
brainmix_register/display/display.py
|
ThunderShiviah/brainmix-register
|
fd42445ed2649ae8bdbb3c3e653adc4465190052
|
[
"MIT",
"Unlicense"
] | 4
|
2015-07-10T01:13:43.000Z
|
2018-07-08T09:05:05.000Z
|
brainmix_register/display/display.py
|
ThunderShiviah/brainmix-register
|
fd42445ed2649ae8bdbb3c3e653adc4465190052
|
[
"MIT",
"Unlicense"
] | 3
|
2015-04-08T17:51:36.000Z
|
2015-06-01T04:19:33.000Z
|
brainmix_register/display/display.py
|
ThunderShiviah/brainmix_register
|
fd42445ed2649ae8bdbb3c3e653adc4465190052
|
[
"MIT",
"Unlicense"
] | null | null | null |
import sys, os, glob
from skimage import io
from skimage import viewer
import registration as reg
from skimage import data
def display():
pass
if __name__ == "__main__":
# ------------------Create input ndarray------------------------
inputDir = '../data/test/'
imageFiles = glob.glob(os.path.join(inputDir, '*.jpg'))
imageVolume = io.ImageCollection(imageFiles, as_grey=True).concatenate()
stack = imageVolume
# ------------------Check that single image registration works----
src = stack[0]
dst = stack[1]
reg_dst = reg.reg(src, dst)
# ------------- Check that stack registration works -----------
reg_stack = reg.registration(stack)
merged = [reg.overlay_pics(stack[0], img) for img in stack]
merged_reg = [reg.overlay_pics(reg_stack[0], img) for img in reg_stack]
image = data.coins()
viewer = viewer.CollectionViewer(merged_reg)
viewer.show()
| 25.861111
| 76
| 0.61869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 225
| 0.241676
|
ce30447567aca3b3740596e2dcf70ae66968d0b3
| 1,605
|
py
|
Python
|
lib/datasets/LFW2G.py
|
blacknwhite5/facial-anonymizer
|
48878f0b704cc9203b6e13b962f0b53cecae78c6
|
[
"MIT"
] | 10
|
2019-04-18T03:30:55.000Z
|
2021-04-03T22:51:50.000Z
|
lib/datasets/LFW2G.py
|
blacknwhite5/facial-anonymizer
|
48878f0b704cc9203b6e13b962f0b53cecae78c6
|
[
"MIT"
] | 3
|
2020-05-28T15:04:05.000Z
|
2020-12-16T10:31:42.000Z
|
lib/datasets/LFW2G.py
|
blacknwhite5/facial-anonymizer
|
48878f0b704cc9203b6e13b962f0b53cecae78c6
|
[
"MIT"
] | 6
|
2019-04-15T11:16:02.000Z
|
2021-09-08T03:16:49.000Z
|
import numpy as np
import torch
import torch.utils.data as data
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import os, random, glob, cv2
class LFW2G(data.Dataset):
def __init__(self, data_dir='data', transform=None):
super(LFW2G, self).__init__()
self.indexlist = glob.glob(os.path.join(data_dir, 'images/*/*.jpg'))
# load images
self.data_dir = data_dir
self.img_dir = 'images'
self.transform = transform
def load_img(self, index):
info = self.indexlist[index]
img_path = info
img = Image.open(img_path).convert('RGB')
return img
def __getitem__(self, index):
_img = self.load_img(index)
if self.transform:
img = self.transform(_img)
return img, index
def __len__(self):
return len(self.indexlist)
def main():
import torch
from torchvision import transforms, utils
from torch.utils.data import DataLoader
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
g_data = LFW2G(data_dir='/home/SSD5/jason-data/Privacy/dataset/LFW',
transform=transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(), normalize]))
print(len(g_data))
dataloader = DataLoader(g_data, batch_size=4, shuffle=False, num_workers=1)
for i, data in enumerate(dataloader):
im,_ = data
print(im.size())
utils.save_image(im, str(i) + '.jpg', normalize=True)
if __name__ == "__main__":
main()
| 32.1
| 91
| 0.63053
| 704
| 0.438629
| 0
| 0
| 0
| 0
| 0
| 0
| 107
| 0.066667
|
ce307dac43c76b9afca0ff0e962a64169f480199
| 4,536
|
py
|
Python
|
questions.py
|
lasyasreepada/iplaw-for-digital-teens
|
a1ac53f7b3438876db644450413f78ec8d612bac
|
[
"MIT"
] | null | null | null |
questions.py
|
lasyasreepada/iplaw-for-digital-teens
|
a1ac53f7b3438876db644450413f78ec8d612bac
|
[
"MIT"
] | null | null | null |
questions.py
|
lasyasreepada/iplaw-for-digital-teens
|
a1ac53f7b3438876db644450413f78ec8d612bac
|
[
"MIT"
] | null | null | null |
"""Set of questions for the IP Law quiz
questions.py
Lasya Sreepada
Yale College '19
May 6, 2017
"""
from random import shuffle
import time
def quiz():
questions = [
("Copyright protects both expression of an idea and the idea itself. \nTrue or False?", "f", "cp"),
("Clothing, such as Katy Perry’s “Left Shark” costume is a useful article and is therefore copyrightable. \nTrue or False?", "f", "cp"),
("One of the factors of evaluation for fair use is the effect of the use upon the potential market for the work in question. \nTrue or False?", "t", "cp"),
("In Cariou vs. Prince, the defendant was brought to court because he used images from Cariou’s 2000 book, Yes Rasta, to create a new exhibition of photos with some apparent modifications. This was not fair use because it did not comment on the original work about the nature of the photographs. \nTrue or False?", "t", "cp"),
("Copyright is an inevitable, “divine” grant entrusting total ownership rights to the creator of a work. \nTrue or False?", "f", "cp"),
("In Christian Louboutin vs. YSL, the defendant was brought to court for using a red outsole on women’s shoes that were also red in color. The court ruled that this was a trademark infringement. \nTrue or False? ", "f", "tm"),
("Descriptive names for a company or product (e.g. FishFri) are never trademarkable. \nTrue or False?", "f", "tm"),
("Companies such as Google can potentially lose their trademark protection because of genericide. \nTrue or False?", "t", "tm"),
("The same trademark (e.g. a word) can be registered by different parties, so long as the trademarks are in different classes. \nTrue or False?", "t", "tm"),
("Trademarked goods or services must be made available for commercial sale on a national level (beyond state boundaries). \nTrue or False?", "t", "tm"),
("A utility patent application consists of three parts: drawings, a written description, and claim statements. The drawings of the product are most important in determining what exactly gets patented. \nTrue or False?", "f", "pt"),
("Naturally occurring processes or products, such as human DNA, are not patentable. \nTrue or False?", "t", "pt"),
("The tests that the court used in Alice Corp v. CLS Bank Intl to determine whether the computer software was patent eligible were (1) whether the claims directed to an abstract idea and (2) whether the claims added something inventive. \nTrue or False?", "t", "pt"),
("The level of skill required to develop a product or process is not considered when determining whether it satisfies the non-obviousness requirement for patent eligibility. \nTrue or False?", "f", "pt"),
("There is a distinction between utility and design, therefore, it is possible for one product to have both a utility patent and a design patent. \nTrue or False?", "t", "pt")
]
shuffle(questions)
cp_correct = 0
tm_correct = 0
pt_correct = 0
print("Welcome to Which IP Law Is for You!")
time.sleep(2)
print("We will ask you a series of true or false questions about various cases related with copyrights, trademarks, and patents.")
time.sleep(5)
print("When a question is displayed, you will be prompted for an answer. Please type t for true, and f for false.")
time.sleep(5)
print("At the end of the quiz, we will sort you into a branch of IP Law based on your quiz performance. Good Luck!")
time.sleep(5)
print()
for question, correct_ans, typ in questions:
print(question)
answer = input()
if answer == correct_ans:
print("correct!")
print()
if typ == "cp":
cp_correct += 1
elif typ == "tm":
tm_correct += 1
else: pt_correct += 1
time.sleep(1)
else:
print("incorrect")
print()
time.sleep(1)
total_correct = cp_correct + tm_correct + pt_correct
if total_correct == len(questions):
print("Congratulations, you are the IP Law Supreme Overlord! You got all the questions right and would do well in any branch.")
else:
if (cp_correct >= tm_correct and cp_correct >= pt_correct):
print("You are Copyright Law!")
elif (tm_correct >= cp_correct and tm_correct >= pt_correct):
print("You are Trademark Law!")
else:
print("You are Patent Law!")
quiz()
| 49.846154
| 334
| 0.665785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,227
| 0.709231
|
ce313caa11cce1219bbc0ca784238958335d4a0b
| 529
|
py
|
Python
|
Python/leetcode/Triangle.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | 2
|
2015-12-02T06:44:01.000Z
|
2016-05-04T21:40:54.000Z
|
Python/leetcode/Triangle.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | null | null | null |
Python/leetcode/Triangle.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | null | null | null |
'''
Created on 1.12.2016
@author: Darren
''''''
Given a triangle, find the minimum path sum from top to bottom. Each step you may move to adjacent numbers on the row below.
For example, given the following triangle
[
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).
Note:
Bonus point if you are able to do this using only O(n) extra space, where n is the total number of rows in the triangle.
"
'''
| 18.892857
| 126
| 0.587902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 527
| 0.996219
|
ce31a76d07584d9441c2b8024946e9ee56bc2a7f
| 8,286
|
py
|
Python
|
regulations/tests/layers_toc_applier_tests.py
|
contolini/regulations-site
|
c31a9ce3097910877657f61b4c19a4ccbd0f967f
|
[
"CC0-1.0"
] | 18
|
2015-01-14T15:58:45.000Z
|
2019-08-17T06:15:59.000Z
|
regulations/tests/layers_toc_applier_tests.py
|
contolini/regulations-site
|
c31a9ce3097910877657f61b4c19a4ccbd0f967f
|
[
"CC0-1.0"
] | 142
|
2015-01-08T15:28:50.000Z
|
2018-07-16T16:48:07.000Z
|
regulations/tests/layers_toc_applier_tests.py
|
contolini/regulations-site
|
c31a9ce3097910877657f61b4c19a4ccbd0f967f
|
[
"CC0-1.0"
] | 45
|
2015-01-26T16:24:46.000Z
|
2021-02-20T10:50:59.000Z
|
from unittest import TestCase
from regulations.generator.layers.toc_applier import *
class TableOfContentsLayerTest(TestCase):
def test_section(self):
toc = TableOfContentsLayer(None)
el = {}
toc.section(el, {'index': ['1']})
self.assertEqual({}, el)
toc.section(el, {'index': ['1', '2', '3']})
self.assertEqual({}, el)
toc.section(el, {'index': ['1', 'B']})
self.assertEqual({}, el)
toc.section(el, {'index': ['1', 'Interpretations']})
self.assertEqual({}, el)
toc.section(el, {'index': ['1', '2'], 'title': '1.2 - Awesome'})
self.assertEqual(el, {
'is_section': True,
'section_id': '1-2',
'label': '1.2',
'sub_label': 'Awesome'
})
toc.section(el, {'index': ['2', '1'], 'title': '2.1Sauce'})
self.assertEqual(el, {
'is_section': True,
'section_id': '2-1',
'label': '2.1',
'sub_label': 'Sauce'
})
def test_appendix_supplement(self):
toc = TableOfContentsLayer(None)
el = {}
toc.appendix_supplement(el, {'index': ['1']})
self.assertEqual({}, el)
toc.appendix_supplement(el, {'index': ['1', '2', '3']})
self.assertEqual({}, el)
toc.appendix_supplement(el, {'index': ['1', 'B', '3']})
self.assertEqual({}, el)
toc.appendix_supplement(el, {'index': ['1', 'Interp', '3']})
self.assertEqual({}, el)
toc.appendix_supplement(el, {
'index': ['1', 'B'],
'title': 'Appendix B - Bologna'})
self.assertEqual(el, {
'is_appendix': True,
'is_first_appendix': True,
'label': 'Appendix B',
'sub_label': 'Bologna',
'section_id': '1-B'
})
el = {}
toc.appendix_supplement(el, {
'index': ['204', 'A'],
'title': 'Appendix A to Part 204 - Model Forms'})
self.assertEqual(el, {
'is_appendix': True,
'is_first_appendix': True,
'label': 'Appendix A to Part 204',
'sub_label': 'Model Forms',
'section_id': '204-A'
})
el = {}
toc.appendix_supplement(el, {
'index': ['1', 'Interp'],
'title': 'Supplement I to 8787 - I am Iron Man'})
self.assertEqual(el, {
'is_supplement': True,
'label': 'Supplement I to 8787',
'sub_label': 'I am Iron Man',
'section_id': '1-Interp'
})
def test_apply_layer_url(self):
toc = TableOfContentsLayer({'100': [
{'title': '100.1 Intro', 'index': ['100', '1']}]})
result = toc.apply_layer('100')
self.assertEqual('#100-1', result[1][0]['url'])
toc.sectional = True
toc.version = 'verver'
result = toc.apply_layer('100')
self.assertTrue('100-1/verver#100-1' in result[1][0]['url'])
def test_apply_layer_compatibility(self):
toc = TableOfContentsLayer({'100': [
{'title': '100.1 Intro', 'index': ['100', '1']},
{'title': 'Appendix A', 'index': ['100', 'A']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}]})
_, result = toc.apply_layer('100')
self.assertEqual(3, len(result))
toc = TableOfContentsLayer({
'100': [
{'title': 'Subpart A', 'index': ['100', 'Subpart', 'A']},
{'title': 'Appendix A', 'index': ['100', 'A']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}],
'100-Subpart-A': [
{'title': '100.1 Intro', 'index': ['100', '1']},
{'title': '100.2 Sec2', 'index': ['100', '2']},
{'title': '100.3 Sec3', 'index': ['100', '3']}]
})
_, result = toc.apply_layer('100')
self.assertEqual(3, len(result))
self.assertEqual(3, len(result[0]['sub_toc']))
def test_apply_layer_first_appendix(self):
toc = TableOfContentsLayer({'100': [
{'title': 'Appendix A', 'index': ['100', 'A']},
{'title': 'Appendix B', 'index': ['100', 'B']},
{'title': 'Appendix C', 'index': ['100', 'C']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}]})
_, result = toc.apply_layer('100')
self.assertEqual(4, len(result))
aA, aB, aC, sI = result
self.assertTrue(aA['is_first_appendix'])
self.assertFalse(aB['is_first_appendix'])
self.assertFalse(aC['is_first_appendix'])
self.assertFalse(sI.get('is_first_appendix', False))
toc = TableOfContentsLayer({'100': [
{'title': 'Supplement I', 'index': ['100', 'Interp']}]})
_, result = toc.apply_layer('100')
self.assertEqual(1, len(result))
self.assertFalse(result[0].get('is_first_appendix', False))
def test_apply_layer_interp_emptysubpart(self):
toc = TableOfContentsLayer({'100': [
{'title': '100.1 Intro', 'index': ['100', '1']},
{'title': '100.2 Second', 'index': ['100', '2']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}]})
_, result = toc.apply_layer('100')
self.assertEqual(3, len(result))
s1, s2, interp = result
self.assertEqual(1, len(interp['sub_toc']))
nosubpart = interp['sub_toc'][0]
self.assertEqual('Regulation Text', nosubpart['label'])
self.assertEqual(['100', 'Subpart', 'Interp'], nosubpart['index'])
toc = TableOfContentsLayer({'100': [
{'title': '100.1 Intro', 'index': ['100', '1']},
{'title': '100.2 Second', 'index': ['100', '2']},
{'title': 'Appendix A', 'index': ['100', 'A']},
{'title': 'Appendix C', 'index': ['100', 'C']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}]})
_, result = toc.apply_layer('100')
self.assertEqual(5, len(result))
s1, s2, appA, appC, interp = result
self.assertEqual(2, len(interp['sub_toc']))
nosubpart, appendices = interp['sub_toc']
self.assertEqual('Regulation Text', nosubpart['label'])
self.assertEqual(['100', 'Subpart', 'Interp'], nosubpart['index'])
self.assertEqual('Appendices', appendices['label'])
self.assertEqual(['100', 'Appendices', 'Interp'], appendices['index'])
def test_apply_layer_interp_subparts(self):
toc = TableOfContentsLayer({
'100': [
{'title': 'Subpart A', 'index': ['100', 'Subpart', 'A']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}],
'100-Subpart-A': [
{'title': '100.1 Intro', 'index': ['100', '1']},
{'title': '100.2 Second', 'index': ['100', '2']}]})
_, result = toc.apply_layer('100')
self.assertEqual(2, len(result))
subpartA, interp = result
self.assertEqual(2, len(subpartA['sub_toc']))
self.assertEqual(1, len(interp['sub_toc']))
nosubpart = interp['sub_toc'][0]
self.assertEqual('Subpart A', nosubpart['label'])
self.assertEqual(['100', 'Subpart', 'A', 'Interp'], nosubpart['index'])
toc = TableOfContentsLayer({
'100': [
{'title': 'Subpart A', 'index': ['100', 'Subpart', 'A']},
{'title': 'Appendix A', 'index': ['100', 'A']},
{'title': 'Appendix C', 'index': ['100', 'C']},
{'title': 'Supplement I', 'index': ['100', 'Interp']}],
'100-Subpart-A': [
{'title': '100.1 Intro', 'index': ['100', '1']},
{'title': '100.2 Second', 'index': ['100', '2']}]})
_, result = toc.apply_layer('100')
self.assertEqual(4, len(result))
subpartA, appA, appC, interp = result
self.assertEqual(2, len(interp['sub_toc']))
nosubpart, appendices = interp['sub_toc']
self.assertEqual('Subpart A', nosubpart['label'])
self.assertEqual(['100', 'Subpart', 'A', 'Interp'], nosubpart['index'])
self.assertEqual('Appendices', appendices['label'])
self.assertEqual(['100', 'Appendices', 'Interp'], appendices['index'])
| 40.223301
| 79
| 0.506758
| 8,197
| 0.989259
| 0
| 0
| 0
| 0
| 0
| 0
| 2,661
| 0.321144
|
ce33d42b39da049e5244eeed1b27927c33f5fb8c
| 1,929
|
py
|
Python
|
array_range.py
|
fasiha/array-range-slices-py
|
940bfd1879a7e041b59349f6d9cbc2d79dacb891
|
[
"Unlicense"
] | 1
|
2021-02-03T14:01:56.000Z
|
2021-02-03T14:01:56.000Z
|
array_range.py
|
fasiha/array-range-slices-py
|
940bfd1879a7e041b59349f6d9cbc2d79dacb891
|
[
"Unlicense"
] | null | null | null |
array_range.py
|
fasiha/array-range-slices-py
|
940bfd1879a7e041b59349f6d9cbc2d79dacb891
|
[
"Unlicense"
] | null | null | null |
"""
Numpy's `split` can split a multidimensional array into non-overlapping
sub-arrays. However, this is not a memory-efficient way of dealing with
non-overlapping partitions of an array because it effectively doubles
memory usage.
This module provides an iterable generator that produces tuples of slices,
each of which can be used to index into a Numpy array and obtain a small
view into it. It is very memory-efficient since no copy of the array is
ever created.
This all works because Numpy ndarrays can be indexed using a tuple of
slices: that is, `arr[a:b, c:d, e:f]` is equivalent to
`arr[(slice(a, b), slice(c, d), slice(e, f))]`.
This module doesn't import Numpy at all since it generates Python slices.
"""
from itertools import product
from typing import List, Iterable, Tuple
def array_range(start: List[int], stop: List[int], step: List[int]) -> Iterable[Tuple]:
"""
Makes an iterable of non-overlapping slices, e.g., to partition an array
Returns an iterable of tuples of slices, each of which can be used to
index into a multidimensional array such as Numpy's ndarray.
>> [arr[tup] for tup in array_range([0, 0], arr.shape, [5, 7])]
where `arr` can be indexed with a tuple of slices (e.g., Numpy), will
evaluate to a list of sub-arrays.
Same arguments as `range` except all three arguments are required and
expected to be list-like of same length. `start` indicates the indexes
to start each dimension. `stop` indicates the stop index for each
dimension. `step` is the size of the chunk in each dimension.
"""
assert len(start) == len(stop)
assert len(stop) == len(step)
assert all(map(lambda x: x > 0, step))
startRangesGen = map(lambda v: range(*v), zip(start, stop, step))
startToSliceMapper = lambda multiStart: tuple(
slice(i, min(i + step, stop)) for i, stop, step in zip(multiStart, stop, step))
return map(startToSliceMapper, product(*startRangesGen))
| 41.042553
| 87
| 0.729912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,395
| 0.723173
|
ce34ebaf15612703873e6a27020070246ab042d8
| 7,197
|
py
|
Python
|
test-framework/test-suites/integration/tests/add/test_add_host_bonded.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | 123
|
2015-05-12T23:36:45.000Z
|
2017-07-05T23:26:57.000Z
|
test-framework/test-suites/integration/tests/add/test_add_host_bonded.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | 177
|
2015-06-05T19:17:47.000Z
|
2017-07-07T17:57:24.000Z
|
test-framework/test-suites/integration/tests/add/test_add_host_bonded.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | 32
|
2015-06-07T02:25:03.000Z
|
2017-06-23T07:35:35.000Z
|
import json
from textwrap import dedent
import pytest
@pytest.mark.usefixtures("add_host_with_interface")
class TestAddHostBonded:
def test_no_hosts(self, host):
result = host.run('stack add host bonded')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "host" argument is required
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_no_matching_hosts(self, host):
result = host.run('stack add host bonded a:test')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "host" argument is required
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_multiple_hosts(self, host):
result = host.run('stack add host bonded frontend-0-0 backend-0-0')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "host" argument must be unique
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_no_channel(self, host):
result = host.run('stack add host bonded backend-0-0')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "channel" parameter is required
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_no_interfaces(self, host):
result = host.run('stack add host bonded backend-0-0 channel=bond0')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "interfaces" parameter is required
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_no_ip(self, host):
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces=eth0,eth1')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "ip" parameter is required
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_no_network(self, host):
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces=eth0,eth1 ip=192.168.0.1')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "network" parameter is required
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_invalid_network(self, host):
# Add a second interface to our backend
result = host.run('stack add host interface backend-0-0 interface=eth1')
assert result.rc == 0
# Add the bonded interface
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces=eth0,eth1 ip=192.168.0.1 network=test')
assert result.rc == 255
assert result.stderr == 'error - network "test" does not exist\n'
def test_missing_interface(self, host):
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces=eth0,eth1 ip=192.168.0.1 network=private')
assert result.rc == 255
assert result.stderr == 'error - interface "eth1" does not exist for host "backend-0-0"\n'
def test_comma_seperated_interfaces(self, host):
# Add a second interface to our backend
result = host.run('stack add host interface backend-0-0 interface=eth1')
assert result.rc == 0
# Add the bonded interface
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces=eth0,eth1 ip=192.168.0.1 network=private')
assert result.rc == 0
# Check the interface is in the database now
result = host.run('stack list host interface backend-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'channel': None,
'default': None,
'host': 'backend-0-0',
'interface': 'bond0',
'ip': '192.168.0.1',
'mac': None,
'module': 'bonding',
'name': 'backend-0-0',
'network': 'private',
'options': None,
'vlan': None
},
{
'channel': 'bond0',
'default': None,
'host': 'backend-0-0',
'interface': 'eth0',
'ip': None,
'mac': None,
'module': None,
'name': 'backend-0-0',
'network': None,
'options': None,
'vlan': None
},
{
'channel': 'bond0',
'default': None,
'host': 'backend-0-0',
'interface': 'eth1',
'ip': None,
'mac': None,
'module': None,
'name': 'backend-0-0',
'network': None,
'options': None,
'vlan': None
}
]
def test_space_seperated_interfaces(self, host):
# Add a second interface to our backend
result = host.run('stack add host interface backend-0-0 interface=eth1')
assert result.rc == 0
# Add the bonded interface
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces="eth0 eth1" ip=192.168.0.1 network=private')
assert result.rc == 0
# Check the interface is in the database now
result = host.run('stack list host interface backend-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'channel': None,
'default': None,
'host': 'backend-0-0',
'interface': 'bond0',
'ip': '192.168.0.1',
'mac': None,
'module': 'bonding',
'name': 'backend-0-0',
'network': 'private',
'options': None,
'vlan': None
},
{
'channel': 'bond0',
'default': None,
'host': 'backend-0-0',
'interface': 'eth0',
'ip': None,
'mac': None,
'module': None,
'name': 'backend-0-0',
'network': None,
'options': None,
'vlan': None
},
{
'channel': 'bond0',
'default': None,
'host': 'backend-0-0',
'interface': 'eth1',
'ip': None,
'mac': None,
'module': None,
'name': 'backend-0-0',
'network': None,
'options': None,
'vlan': None
}
]
def test_default_with_options(self, host):
# Add a second interface to our backend
result = host.run('stack add host interface backend-0-0 interface=eth1 default=true')
assert result.rc == 0
# Add the bonded interface
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces=eth0,eth1 ip=192.168.0.1 network=private options=test_options')
assert result.rc == 0
# Check the interface is in the database now
result = host.run('stack list host interface backend-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'channel': None,
'default': True,
'host': 'backend-0-0',
'interface': 'bond0',
'ip': '192.168.0.1',
'mac': None,
'module': 'bonding',
'name': 'backend-0-0',
'network': 'private',
'options': 'bonding-opts="test_options"',
'vlan': None
},
{
'channel': 'bond0',
'default': None,
'host': 'backend-0-0',
'interface': 'eth0',
'ip': None,
'mac': None,
'module': None,
'name': 'backend-0-0',
'network': None,
'options': None,
'vlan': None
},
{
'channel': 'bond0',
'default': None,
'host': 'backend-0-0',
'interface': 'eth1',
'ip': None,
'mac': None,
'module': None,
'name': 'backend-0-0',
'network': None,
'options': None,
'vlan': None
}
]
| 28.559524
| 106
| 0.632208
| 7,087
| 0.984716
| 0
| 0
| 7,139
| 0.991941
| 0
| 0
| 4,104
| 0.570238
|
ce3501af1f45e1223934bba47fc0e9a49f9b32bd
| 1,669
|
py
|
Python
|
BITs/2014/Kozlov_A_D/task_8_11.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
BITs/2014/Kozlov_A_D/task_8_11.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
BITs/2014/Kozlov_A_D/task_8_11.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
#Задача 8. Вариант 11.
#1-50. Доработайте игру "Анаграммы" (см. М.Доусон Программируем на Python. Гл.4) так, чтобы к каждому слову полагалась подсказка. Игрок должен получать право на подсказку в том случае, если у него нет никаких предположений. Разработайте систему начисления очков, по которой бы игроки, отгадавшие слово без подсказки, получали больше тех, кто запросил подсказку.
#Козлов А.Д.
#04.04.2016
import random
words = ("питон","анаграмма","простая","сложная","ответ","подстаканник")
word=random.choice(words)
correct=word
score=10;
i=0
jumble=""
while word:
position=random.randrange(len(word))
jumble+=word[position]
word=word[:position]+word[(position+1):]
print("""
Добро пожаловать в игру 'Анаграммы'!
Надо перемтавитьбуквы так, чтобы получилось осмысленное слово.
Для вызова подсказки напишите: подсказка.
(Для выхода нажмите Enter,не вводя своей версии.)
""")
print("Вот анаграмма: ", jumble)
guess=input("Попробуйте отгадать исходное слово: ")
if guess=="подсказка":
score-=1
print(str(i+1),"буква: ",correct[i])
i+=1
while guess !=correct and guess!="":
guess=input("Попробуйте отгадать исходное слово: ")
if guess=="подсказка":
if i==len(correct):
print("Все буквы уже выведены.")
continue
score-=1
print(str(i+1),"буква: ",correct[i])
i+=1
continue
if guess==correct:
print("Да. Именно так! Вы отгадали! Вы зарабботали ",score," очков!")
else:
print("К сожалению, Вы неправы.")
print("Спасибо за игру.")
input("\n\nНажмите Enter, чтобы выйти")
| 37.088889
| 362
| 0.656681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,689
| 0.710261
|
ce358cccd6bb9246d24f50b9e468818c256a0701
| 1,254
|
py
|
Python
|
master/teachkids-master/teachkids-master/ch09/Challenge2_ColorPaint.py
|
AlexRogalskiy/DevArtifacts
|
931aabb8cbf27656151c54856eb2ea7d1153203a
|
[
"MIT"
] | 4
|
2018-09-07T15:35:24.000Z
|
2019-03-27T09:48:12.000Z
|
master/teachkids-master/teachkids-master/ch09/Challenge2_ColorPaint.py
|
AlexRogalskiy/DevArtifacts
|
931aabb8cbf27656151c54856eb2ea7d1153203a
|
[
"MIT"
] | 371
|
2020-03-04T21:51:56.000Z
|
2022-03-31T20:59:11.000Z
|
master/teachkids-master/teachkids-master/ch09/Challenge2_ColorPaint.py
|
AlexRogalskiy/DevArtifacts
|
931aabb8cbf27656151c54856eb2ea7d1153203a
|
[
"MIT"
] | 3
|
2019-06-18T19:57:17.000Z
|
2020-11-06T03:55:08.000Z
|
# ColorPaint.py
import pygame # setup
import random
pygame.init()
screen = pygame.display.set_mode([800, 600])
pygame.display.set_caption('Click and drag to draw, using up to 3 mouse buttons')
keepGoing = True
ORANGE = (255,255,0) # RGB color triplets for 3 mousebutton colors
GREEN = (0,255,0)
PURPLE = (128,0,128)
radius = 15
mousedown = False
while keepGoing: # game loop
for event in pygame.event.get(): # handling events
if event.type == pygame.QUIT:
keepGoing = False
if event.type == pygame.MOUSEBUTTONDOWN:
mousedown = True
if event.type == pygame.MOUSEBUTTONUP:
mousedown = False
if mousedown: # draw/update graphics
spot = pygame.mouse.get_pos()
if pygame.mouse.get_pressed()[0] : # boolean for button1
button_color = ORANGE
elif pygame.mouse.get_pressed()[1]: # boolean for button2
button_color = GREEN
else: # must be button3
button_color = PURPLE
pygame.draw.circle(screen, button_color, spot, radius)
pygame.display.update() # update display
pygame.quit() # exit
| 36.882353
| 81
| 0.587719
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 251
| 0.200159
|
ce35c483fa1d1e28e070fa3ddb8145549538c79c
| 14,508
|
py
|
Python
|
eventmanager/events/tests.py
|
karinakozarova/EventManager
|
b09fa7a788b4aa11761fc34096cc711304c288c7
|
[
"MIT"
] | 4
|
2019-01-06T16:58:20.000Z
|
2019-04-08T10:20:46.000Z
|
eventmanager/events/tests.py
|
EventManagerTeam/EventManager
|
b09fa7a788b4aa11761fc34096cc711304c288c7
|
[
"MIT"
] | 297
|
2018-11-14T13:59:19.000Z
|
2022-03-11T23:33:28.000Z
|
eventmanager/events/tests.py
|
karinakozarova/EventManager
|
b09fa7a788b4aa11761fc34096cc711304c288c7
|
[
"MIT"
] | 1
|
2019-04-22T15:17:32.000Z
|
2019-04-22T15:17:32.000Z
|
import datetime
import unittest
from accounts.models import AccountDetails
from categories.models import Category
from django.contrib.auth.models import User
from django.test import Client
from django.test import TestCase
from django.urls import reverse
from events.models import Comment
from events.models import Event
from events.models import Invite
from tasks.models import Task
class EventsTestCase(TestCase):
def setUp(self):
self.total_number_of_events = 25
self.client = Client()
self.client.login(username='john', password='johnpassword')
category = Category.objects.create(
name='test event category',
description='cool description',
slug='test',
)
for event_id in range(self.total_number_of_events):
eventstring = 'test' + str(event_id)
self.event = Event.objects.create(
title=eventstring,
description=eventstring,
)
self.event.save()
self.event.category.add(category)
self.event.save()
def test_list_events_lists_event(self):
url = reverse('events.list')
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(b'test1', resp.content)
def test_list_events_lists_categories(self):
url = reverse('events.list')
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(b'test event category', resp.content)
def test_create_event(self):
pass
def test_update_event(self):
pass
def test_delete_event(self):
pass
def test_view_event(self):
pass
def test_join_event(self):
pass
def test_unjoin_event(self):
pass
def test_add_teammate(self):
pass
class EventsFeedsTestCase(TestCase):
def setUp(self):
self.total_number_of_events = 25
self.client = Client()
self.client.login(username='john', password='johnpassword')
category = Category.objects.create(
name='test event',
description='cool description',
slug='test',
)
for event_id in range(self.total_number_of_events):
eventstring = 'test' + str(event_id)
self.event = Event.objects.create(
title=eventstring,
description=eventstring,
)
self.event.save()
self.event.category.add(category)
self.event.save()
def test_all_events_feed(self):
response = self.client.get(reverse('event_feed'))
latest_event = 'test' + str(self.total_number_of_events - 1)
self.assertContains(response, latest_event)
self.assertContains(response, 'test' + str(1))
def test_latest_events_feed(self):
response = self.client.get(reverse('latest_event_feed'))
first_event_title = 'test' + str(self.total_number_of_events)
self.assertNotContains(response, first_event_title)
latest_event_title = 'test' + str(1)
self.assertContains(response, latest_event_title)
class EventsUrlsTestClass(TestCase):
client = Client()
def setUp(self):
self.client = Client()
self.user = User.objects.create_user(
'john',
'lennon@thebeatles.com',
'johnpassword'
)
self.user.details = AccountDetails.objects.create(
user=self.user,
description='cool description',
slug='userslug'
)
self.client.login(username='john', password='johnpassword')
category = Category.objects.create(
name='test event',
description='cool description',
slug='test',
)
self.event = Event.objects.create(
title='testy',
description='cool description',
slug='event',
added_by=self.user,
)
self.event.save()
self.event.category.add(category)
self.event.team_members.add(self.user)
self.event.save()
def url_returns_200(self, url, status_code=200):
response = self.client.get(url)
self.assertEqual(response.status_code, status_code)
def test_list_events_url(self):
self.url_returns_200(reverse('events.list'))
def test_create_event_url(self):
self.url_returns_200(reverse('events.create_event'))
def test_delete_event_url(self):
user = User.objects.create_user(
'johnaaaa',
'lennonaaa@thebeatles.com',
'johnpasswordaaa'
)
self.client.login(username='johnaaaa', password='johnpasswordaaa')
category = Category.objects.create(
name='unisdjsd',
description='cool description',
slug='tesddssst',
)
event = Event.objects.create(
title='delete',
description='cool description',
slug='delete',
added_by=user,
)
event.save()
event.category.add(category)
self.url_returns_200(reverse('events.del', kwargs={'slug': 'delete'}))
def test_delete_event_url_unsuccessful(self):
user = User.objects.create_user(
'johnaaaa',
'lennonaaa@thebeatles.com',
'johnpasswordaaa'
)
user2 = User.objects.create_user(
'johnaaaa2',
'lennonaaa2@thebeatles.com',
'johnpasswordaaa2'
)
self.client.login(username='johnaaaa', password='johnpasswordaaa')
category = Category.objects.create(
name='unisdjsd',
description='cool description',
slug='tesddssst',
)
event = Event.objects.create(
title='delete',
description='cool description',
slug='delete',
added_by=user2,
)
event.save()
event.category.add(category)
response = self.client.get(
reverse(
'events.del', kwargs={
'slug': 'delete'}))
self.assertEquals(response.status_code, 403)
def test_view_event_url(self):
user2 = User.objects.create_user(
username='testuser2',
password='12345'
)
user2.details = AccountDetails.objects.create(
user=user2,
description='cool description',
slug='userslug2'
)
self.user.details.friends.add(user2)
self.url_returns_200(reverse('event', kwargs={'slug': 'event'}))
def test_all_events_feed_url(self):
self.url_returns_200(reverse('event_feed'))
def test_latest_events_feed_url(self):
self.url_returns_200(reverse('latest_event_feed'))
def test_join_event(self):
self.url_returns_200(reverse('events.join', kwargs={'slug': 'event'}))
def test_unjoin_event(self):
self.url_returns_200(
reverse(
'events.rm_join',
kwargs={
'slug': 'event'}))
def test_event_settings_url(self):
self.url_returns_200(
reverse(
'events.settings',
kwargs={
'slug': 'event'}))
def test_event_invites_url(self):
self.url_returns_200(reverse('events.invites'))
def test_event_invite_url(self):
self.url_returns_200(
reverse(
'events.invite',
kwargs={
'slug': 'userslug',
'event': 'event'}))
def test_event_url(self):
self.url_returns_200(reverse('events.event', kwargs={'slug': 'event'}))
def test_add_team_member(self):
user = User.objects.create_user(
'johnaaaa',
'lennonaaa@thebeatles.com',
'johnpasswordaaa'
)
event = Event.objects.create(
title='testy',
description='cool description',
slug='eventааааа',
added_by=user,
)
self.client.login(username='johnaaaa', password='johnpasswordaaa')
self.url_returns_200('events/userslug/eventааааа/add_teammate')
def test_get_tasks_no_tasks(self):
response = self.client.get(reverse('events.tasks'))
self.assertNotContains(response, 'TO DO:')
self.assertNotContains(response, 'DOING:')
self.assertEqual(response.status_code, 200)
# def test_get_tasks(self):
# task_title = 'Very cooollll'
# task = Task.objects.create(
# title=task_title,
# event=self.event,
# slug='event',
# assignee=self.user,
# status='TODO'
# )
# self.client.login(username='john', password='johnpassword')
# response = self.client.get(reverse('events.tasks'))
# self.assertContains(response, task_title)
# self.assertEqual(response.status_code, 200)
def test_confirm_invite(self):
user2 = User.objects.create_user(
'johnaaaa',
'lennonaaa@thebeatles.com',
'johnpasswordaaa'
)
Invite.objects.create(
invited_user=self.user,
invited_by=user2,
event=self.event)
self.url_returns_200(
reverse(
'events.confirm_invite',
kwargs={
'slug': self.event.slug}))
def test_decline_invite(self):
user2 = User.objects.create_user(
'johnaaaa',
'lennonaaa@thebeatles.com',
'johnpasswordaaa'
)
Invite.objects.create(
invited_user=self.user,
invited_by=user2,
event=self.event)
self.url_returns_200(
reverse(
'invites.decline_invite',
kwargs={
'slug': self.event.slug}))
def test_add_teammate_no_friends(self):
self.url_returns_200(
reverse(
'events.add_teammate',
kwargs={
'slug': self.event.slug}))
response = self.client.get(
reverse(
'events.add_teammate',
kwargs={
'slug': self.event.slug}))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'Find')
def test_add_teammate(self):
user2 = User.objects.create_user(
'friendddddddd',
'lennon@thebeatles.com',
'johnpassword'
)
user2.details = AccountDetails.objects.create(
user=user2,
description='cool description',
slug='useddddrslug'
)
self.user.details.friends.add(user2)
self.user.save()
self.user.details.save()
self.url_returns_200(
reverse(
'events.add_teammate',
kwargs={
'slug': self.event.slug
}
)
)
response = self.client.get(
reverse(
'events.add_teammate',
kwargs={
'slug': self.event.slug}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Find')
def test_event_team_add(self):
user2 = User.objects.create_user(
'johnaaaa',
'lennonaaa@thebeatles.com',
'johnpasswordaaa'
)
response = self.client.get(
reverse(
'events.event_team_add',
kwargs={
'slug': self.event.slug,
'user': user2
}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Success')
self.assertContains(response, user2.username)
def test_delete_comment_by_slug(self):
Comment.objects.create(
event=self.event,
author=self.user,
title='opaaa',
content='sdasdsa')
comment = Comment.objects.first()
self.url_returns_200(
reverse(
'events.comment.del',
kwargs={
'slug': self.event.slug,
'comment': comment.pk}))
def test_edit_comment_by_slug(self):
Comment.objects.create(
event=self.event,
author=self.user,
title='opaaa',
content='sdasdsa')
comment = Comment.objects.first()
response = self.client.get(
reverse(
'events.comment.edit',
kwargs={
'slug': self.event.slug,
'comment': comment.pk}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'opaaa')
def test_event_board(self):
self.url_returns_200(
reverse(
'events.board', kwargs={
'slug': self.event.slug}))
def test_my_events(self):
event = Event.objects.create(
title='testy',
description='cool description',
slug='eventааааа',
added_by=self.user,
)
event.attendees.add(self.user)
response = self.client.get(reverse('events.my_events'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testy')
def test_events_I_host(self):
event = Event.objects.create(
title='testy',
description='cool description',
slug='eventааааа',
added_by=self.user,
)
event.attendees.add(self.user)
response = self.client.get(reverse('events.events_I_host'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testy')
def test_show_random_event(self):
event = Event.objects.create(
title='testy',
description='cool description',
slug='eventааааа',
added_by=self.user,
)
response = self.client.get(reverse('events.show_random_event'))
self.assertEqual(response.status_code, 302)
def test_search_json(self):
response = self.client.get(reverse('events.search_json', kwargs={
'category_id': 1,
'slug': 'test'}))
self.assertEqual(response.status_code, 200)
| 30.543158
| 79
| 0.565826
| 14,135
| 0.972614
| 0
| 0
| 0
| 0
| 0
| 0
| 2,615
| 0.179935
|
ce35f5d501c181ecbb1339e8615379517cb18794
| 159
|
py
|
Python
|
billing/tests/views.py
|
hkhanna/django-stripe-billing
|
75a53c183ff86b1c7edf741683ffe3330e733d87
|
[
"MIT"
] | 1
|
2022-03-29T20:16:34.000Z
|
2022-03-29T20:16:34.000Z
|
billing/tests/views.py
|
hkhanna/django-stripe-billing
|
75a53c183ff86b1c7edf741683ffe3330e733d87
|
[
"MIT"
] | 2
|
2022-02-21T17:38:22.000Z
|
2022-02-22T20:56:39.000Z
|
billing/tests/views.py
|
hkhanna/django-stripe-billing
|
75a53c183ff86b1c7edf741683ffe3330e733d87
|
[
"MIT"
] | null | null | null |
from django.views.generic import TemplateView
from .. import mixins
class ProfileView(mixins.BillingMixin, TemplateView):
template_name = "profile.html"
| 22.714286
| 53
| 0.792453
| 88
| 0.553459
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.08805
|
ce36dcd7976f6556078f7dfa2fbd33e0565d593e
| 4,225
|
py
|
Python
|
core/model/meta/mtl.py
|
Aamer98/LibFewShot_NoAffine
|
1203d2a9f5cb4705038748dbda03a4b7c37bf647
|
[
"MIT"
] | 1
|
2021-11-07T03:34:41.000Z
|
2021-11-07T03:34:41.000Z
|
core/model/meta/mtl.py
|
taylor1355/LibFewShot
|
c53b4ee3772c5c8033fd54aa73586091eee2d0b0
|
[
"MIT"
] | null | null | null |
core/model/meta/mtl.py
|
taylor1355/LibFewShot
|
c53b4ee3772c5c8033fd54aa73586091eee2d0b0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@inproceedings{DBLP:conf/cvpr/SunLCS19,
author = {Qianru Sun and
Yaoyao Liu and
Tat{-}Seng Chua and
Bernt Schiele},
title = {Meta-Transfer Learning for Few-Shot Learning},
booktitle = {{IEEE} Conference on Computer Vision and Pattern Recognition, {CVPR}
2019, Long Beach, CA, USA, June 16-20, 2019},
pages = {403--412},
year = {2019},
url = {http://openaccess.thecvf.com/content_CVPR_2019/html/Sun_Meta-Transfer_Learning_for_Few
-Shot_Learning_CVPR_2019_paper.html},
doi = {10.1109/CVPR.2019.00049}
}
https://arxiv.org/abs/1812.02391
Adapted from https://github.com/yaoyao-liu/meta-transfer-learning.
"""
import torch
from torch import digamma, nn
import torch.nn.functional as F
import copy
from core.utils import accuracy
from .meta_model import MetaModel
from ..backbone.utils import convert_mtl_module
class MTLBaseLearner(nn.Module):
"""The class for inner loop."""
def __init__(self, ways, z_dim):
super().__init__()
self.ways = ways
self.z_dim = z_dim
self.vars = nn.ParameterList()
self.fc1_w = nn.Parameter(torch.ones([self.ways, self.z_dim]))
torch.nn.init.kaiming_normal_(self.fc1_w)
self.vars.append(self.fc1_w)
self.fc1_b = nn.Parameter(torch.zeros(self.ways))
self.vars.append(self.fc1_b)
def forward(self, input_x, the_vars=None):
if the_vars is None:
the_vars = self.vars
fc1_w = the_vars[0]
fc1_b = the_vars[1]
net = F.linear(input_x, fc1_w, fc1_b)
return net
def parameters(self):
return self.vars
class MTL(MetaModel):
def __init__(self, feat_dim, num_classes, inner_param, use_MTL, **kwargs):
super(MTL, self).__init__(**kwargs)
self.feat_dim = feat_dim
self.num_classes = num_classes
self.base_learner = MTLBaseLearner(self.way_num, z_dim=self.feat_dim).to(self.device)
self.inner_param = inner_param
self.loss_func = nn.CrossEntropyLoss()
convert_mtl_module(self, use_MTL)
def set_forward(self, batch):
"""
meta-validation
"""
image, global_target = batch
image = image.to(self.device)
global_target = global_target.to(self.device)
feat = self.emb_func(image)
support_feat, query_feat, support_target, query_target = self.split_by_episode(feat, mode=4)
classifier, base_learner_weight = self.set_forward_adaptation(support_feat, support_target)
output = classifier(query_feat, base_learner_weight)
acc = accuracy(output, query_target)
return output, acc
def set_forward_loss(self, batch):
"""
meta-train
"""
image, global_target = batch
image = image.to(self.device)
global_target = global_target.to(self.device)
feat = self.emb_func(image)
support_feat, query_feat, support_target, query_target = self.split_by_episode(feat, mode=4)
classifier, base_learner_weight = self.set_forward_adaptation(support_feat, support_target)
output = classifier(query_feat, base_learner_weight)
loss = self.loss_func(output, query_target)
acc = accuracy(output, query_target)
return output, acc, loss
def set_forward_adaptation(self, support_feat, support_target):
classifier = self.base_learner
logit = self.base_learner(support_feat)
loss = self.loss_func(logit, support_target)
grad = torch.autograd.grad(loss, self.base_learner.parameters())
fast_parameters = list(
map(
lambda p: p[1] - 0.01 * p[0],
zip(grad, self.base_learner.parameters()),
)
)
for _ in range(1, self.inner_param["iter"]):
logit = self.base_learner(support_feat, fast_parameters)
loss = F.cross_entropy(logit, support_target)
grad = torch.autograd.grad(loss, fast_parameters)
fast_parameters = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, fast_parameters)))
return classifier, fast_parameters
| 32.751938
| 101
| 0.64568
| 3,279
| 0.776095
| 0
| 0
| 0
| 0
| 0
| 0
| 846
| 0.200237
|
ce378179f8b40837991f7c71e128ec7eb52c6132
| 1,023
|
py
|
Python
|
game.py
|
gustavonaldoni/command-line-hangman
|
a740a446ce1dfad2100ab7e6ea1db817c6a57a47
|
[
"MIT"
] | null | null | null |
game.py
|
gustavonaldoni/command-line-hangman
|
a740a446ce1dfad2100ab7e6ea1db817c6a57a47
|
[
"MIT"
] | null | null | null |
game.py
|
gustavonaldoni/command-line-hangman
|
a740a446ce1dfad2100ab7e6ea1db817c6a57a47
|
[
"MIT"
] | null | null | null |
from capture_words import capture_words_from_file
import random
def find_letter_indexes(letter, word):
indexes = []
for index, l in enumerate(word):
if l == letter:
indexes.append(index)
return indexes
def convert_word_to_asterisks(word):
final_word = '*' * len(word)
return final_word
def check_user_choice(word, user_choice):
if user_choice in word:
return True
return False
def get_last_formatted_word(real_word, new_formatted_word, user_choice):
new_formatted_word = list(new_formatted_word)
if check_user_choice(real_word, user_choice):
indexes = find_letter_indexes(user_choice, real_word)
for index in indexes:
new_formatted_word[index] = user_choice
return "".join(new_formatted_word)
def has_user_lost(user_chances):
if user_chances <= 0:
return True
return False
def has_user_won(real_word, user_word):
if real_word == user_word:
return True
return False
| 21.765957
| 72
| 0.691105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0.004888
|
ce37b76dcc82f7204803dfa179451058b3f38a92
| 4,895
|
py
|
Python
|
src/OTLMOW/OTLModel/Classes/DwarseMarkeringVerschuind.py
|
davidvlaminck/OTLClassPython
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | 2
|
2022-02-01T08:58:11.000Z
|
2022-02-08T13:35:17.000Z
|
src/OTLMOW/OTLModel/Classes/DwarseMarkeringVerschuind.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
src/OTLMOW/OTLModel/Classes/DwarseMarkeringVerschuind.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.DwarseMarkeringToegang import DwarseMarkeringToegang
from OTLMOW.OTLModel.Datatypes.KlDwarseMarkeringVerschuindCode import KlDwarseMarkeringVerschuindCode
from OTLMOW.OTLModel.Datatypes.KlDwarseMarkeringVerschuindSoort import KlDwarseMarkeringVerschuindSoort
from OTLMOW.OTLModel.Datatypes.KwantWrdInDecimaleGraden import KwantWrdInDecimaleGraden
from OTLMOW.OTLModel.Datatypes.KwantWrdInVierkanteMeter import KwantWrdInVierkanteMeter
# Generated with OTLClassCreator. To modify: extend, do not edit
class DwarseMarkeringVerschuind(DwarseMarkeringToegang):
"""Een schuine markering dwars op de weg aangebracht om het verkeer te waarschuwen, informeren of regelen."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DwarseMarkeringVerschuind'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
super().__init__()
self._basisoppervlakte = OTLAttribuut(field=KwantWrdInVierkanteMeter,
naam='basisoppervlakte',
label='oppervlakte',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DwarseMarkeringVerschuind.basisoppervlakte',
definition='De basisoppervlakte van de dwarse markering in vierkante meter.',
owner=self)
self._code = OTLAttribuut(field=KlDwarseMarkeringVerschuindCode,
naam='code',
label='code',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DwarseMarkeringVerschuind.code',
definition='De (COPRO/BENOR) code van dwarse markering.',
owner=self)
self._hoek = OTLAttribuut(field=KwantWrdInDecimaleGraden,
naam='hoek',
label='hoek',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DwarseMarkeringVerschuind.hoek',
definition='De hoek van de verschuinde dwarsmarkering in decimale graden.',
owner=self)
self._oppervlakte = OTLAttribuut(field=KwantWrdInVierkanteMeter,
naam='oppervlakte',
label='oppervlakte',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DwarseMarkeringVerschuind.oppervlakte',
definition='De oppervlakte van een dwarsmarkering na verschuining.',
owner=self)
self._soortOmschrijving = OTLAttribuut(field=KlDwarseMarkeringVerschuindSoort,
naam='soortOmschrijving',
label='soort omschrijving',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DwarseMarkeringVerschuind.soortOmschrijving',
definition='De soort en tevens de omschrijving van dwarse markering.',
owner=self)
@property
def basisoppervlakte(self):
"""De basisoppervlakte van de dwarse markering in vierkante meter."""
return self._basisoppervlakte.get_waarde()
@basisoppervlakte.setter
def basisoppervlakte(self, value):
self._basisoppervlakte.set_waarde(value, owner=self)
@property
def code(self):
"""De (COPRO/BENOR) code van dwarse markering."""
return self._code.get_waarde()
@code.setter
def code(self, value):
self._code.set_waarde(value, owner=self)
@property
def hoek(self):
"""De hoek van de verschuinde dwarsmarkering in decimale graden."""
return self._hoek.get_waarde()
@hoek.setter
def hoek(self, value):
self._hoek.set_waarde(value, owner=self)
@property
def oppervlakte(self):
"""De oppervlakte van een dwarsmarkering na verschuining."""
return self._oppervlakte.get_waarde()
@oppervlakte.setter
def oppervlakte(self, value):
self._oppervlakte.set_waarde(value, owner=self)
@property
def soortOmschrijving(self):
"""De soort en tevens de omschrijving van dwarse markering."""
return self._soortOmschrijving.get_waarde()
@soortOmschrijving.setter
def soortOmschrijving(self, value):
self._soortOmschrijving.set_waarde(value, owner=self)
| 49.444444
| 158
| 0.608784
| 4,282
| 0.87477
| 0
| 0
| 1,293
| 0.264147
| 0
| 0
| 1,529
| 0.31236
|
ce37e19c6bb3e23ffae3d35e78de1e2b5a16ea5f
| 549
|
py
|
Python
|
backend/reviews/forms.py
|
ranwise/djangochannel
|
9c719d292b5c1d0fd008a16a64509a309bdd642e
|
[
"BSD-3-Clause"
] | 45
|
2019-10-04T10:12:54.000Z
|
2022-03-29T18:12:34.000Z
|
backend/reviews/forms.py
|
ranwise/djangochannel
|
9c719d292b5c1d0fd008a16a64509a309bdd642e
|
[
"BSD-3-Clause"
] | 6
|
2019-10-09T07:37:14.000Z
|
2022-01-27T16:41:16.000Z
|
backend/reviews/forms.py
|
ranwise/djangochannel
|
9c719d292b5c1d0fd008a16a64509a309bdd642e
|
[
"BSD-3-Clause"
] | 35
|
2019-10-04T10:18:48.000Z
|
2022-01-14T22:40:38.000Z
|
from django import forms
from .models import Review
class ReviewsForm(forms.ModelForm):
"""Форма добавления отзыва"""
class Meta:
model = Review
fields = ["name", "text", "social_link", "git_link"]
labels = {
"name": (""),
"text": (""),
# "social_link": (""),
# "git_link": ("")
}
widgets = {
'name': forms.TextInput(attrs={'placeholder': 'Ваше имя'}),
'text': forms.Textarea(attrs={'placeholder': 'Текст отзыва'}),
}
| 26.142857
| 74
| 0.495446
| 532
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 221
| 0.37585
|
ce3a314613ced74eaa69ae3cdf828f6c6582b325
| 6,875
|
py
|
Python
|
scripts/jenkins_console_log_search.py
|
hrajput89/kv_engine
|
33fb1ab2c9787f55555e5f7edea38807b3dbc371
|
[
"BSD-3-Clause"
] | 1
|
2019-06-13T07:33:09.000Z
|
2019-06-13T07:33:09.000Z
|
scripts/jenkins_console_log_search.py
|
paolococchi/kv_engine
|
40256dca6bf77fb4bcc18e8ef7d9b8f991bf4e45
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/jenkins_console_log_search.py
|
paolococchi/kv_engine
|
40256dca6bf77fb4bcc18e8ef7d9b8f991bf4e45
|
[
"BSD-3-Clause"
] | 1
|
2020-01-15T16:52:37.000Z
|
2020-01-15T16:52:37.000Z
|
#!/usr/bin/env python3
"""
Copyright 2018 Couchbase, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This short script uses curl requests to search the last 100 builds of
a jenkins job to find recurring errors, written in Python3.
It results in printing a list of links to builds that match the search
As the requests package is not included within kv, you will need to either
download this package yourself or reference the one included inside
couchbase-cli.
This is currently limited to searching for log patterns contained within
one line of the logs, as the search checks line-by-line.
Usage: python jenkins_console_log_search.py -j <job-name> -s <RegEx Search term>
"""
import argparse
import re
import requests
import sys
import time
class ASCIIFormat:
BOLD = '\033[1m'
END = '\033[0m'
# Search for searchParameter in logText, handling either a string or a RegEx inside
# searchPattern depending on whether the regex flag is True, and assuming that logText
# is line separated by \n's
def search(logText, searchPattern, isRegex):
output = []
if isRegex:
# Check regex against whole text
for find in re.finditer(pattern, logText):
group_list = []
if find.groups():
group_list.extend(find.groups())
else:
group_list.append(find.group(0))
for term in group_list:
output.append(term)
else: # Not a RegEx
lines = []
for line in logText.split('\n'):
result = line.find(searchPattern)
if result != -1:
# Wrap the search term in ASCII formatting to make it bold
lines.append(line.replace(searchPattern, ASCIIFormat.BOLD
+ searchPattern + ASCIIFormat.END))
output.extend(lines)
return output
# --- Start Main Script ---
# Create argparser so the user can specify which job to search
argParser = argparse.ArgumentParser()
argParser.add_argument('--job', '-j', type=str,
help='The cv job to query. '
"Common jobs are: 'kv_engine-ASan-UBSan-master', "
"'kv_engine-clang_analyzer-master', "
"'kv_engine-linux-master', "
"'kv_engine-threadsanitizer-master', "
"'kv_engine-windows-master', "
"'kv_engine-clang_format', "
"'kv-engine-cv-perf'", required=True)
argParser.add_argument('--search', '-s', type=str, required=True,
help='The string to search the logs for in a RegEx format')
argParser.add_argument('--build-no', '-b', type=int,
help='The build number of cv job to check backwards from. '
'0 (default) fetches latest build number', default=0)
argParser.add_argument('--no-of-builds', '-n', type=int,
help='The number of builds to check back', default=100)
argParser.add_argument('--format', '-f', default="plain", type=str,
help="Select the format to print results. "
"Available formats are: "
"plain (default), log-line, jira")
argParser.add_argument('--url-prefix', '-u', type=str, default='cv',
help='Determine the endpoint of logs to check, '
'http://<url-prefix>.jenkins.couchbase.com')
args = argParser.parse_args()
job = 'job/' + args.job + '/'
serverURL = 'http://' + str(args.url_prefix) + '.jenkins.couchbase.com/'
# Control the eventual output format of the findings
availableFormats = ["plain", "log-line", "jira"]
outputFormat = args.format.lower()
assert outputFormat in availableFormats, "%r format is not supported" % outputFormat
consoleText = '/consoleText/'
resultURLs = {}
failedBuildNums = []
if args.build_no == 0:
# Need to fetch the latest build number
r = requests.get(serverURL + job + 'lastBuild/api/json')
j = r.json()
args.build_no = j['number']
# Determine whether the inputted search parameter is a regex
isRegex = True
try:
pattern = re.compile(args.search)
searchingFor = 'RegEx "' + args.search + '"'
except re.error:
isRegex = False
pattern = args.search
searchingFor = '"' + args.search + '"'
print("Searching for", searchingFor, "in console logs of job:",
args.job, "between build", args.build_no - (args.no_of_builds - 1),
"and", args.build_no, file=sys.stderr)
# Trigger timing check start
start_time = time.time()
for i in range(0, args.no_of_builds):
print('\r >>> Current progress: {} '.format(str(i)), end='',
flush=True, file=sys.stderr)
# Get the console log text from the jenkins job
r = requests.get(serverURL + job + str(args.build_no-i) + consoleText)
if r.status_code != 200:
failedBuildNums.append(args.build_no-i)
# Perform Search
output = []
output.extend(search(r.text, pattern, isRegex))
if output:
resultURLs[serverURL + job + str(args.build_no-i) + '/console/'] = output
# Finish timing
print('\r Completed search in', (time.time() - start_time), 's', file=sys.stderr)
if failedBuildNums:
print("Failed log request on build(s) no:", failedBuildNums, file=sys.stderr)
# Ensure above prints actually print before results (and not mangled inside results)
sys.stderr.flush()
# Result output
if not resultURLs:
# Empty results, did not find any matches
print("No matches found")
elif outputFormat == 'jira':
# Print in a JIRA format
print("{panel:title=Search for", searchingFor,
"in console logs of job", args.job, "between build no",
args.build_no - (args.no_of_builds - 1), "and", args.build_no, '}')
for url in resultURLs:
print('[', url, ']', sep="")
print('{noformat}')
for line in resultURLs[url]:
print(line.replace(ASCIIFormat.BOLD, '').replace(ASCIIFormat.END, ''))
print('{noformat}')
print("{panel}")
elif outputFormat == "log-line":
# Print findings with log line attached
for url in resultURLs:
print(url, ':')
for line in resultURLs[url]:
print('\t', line)
else: # outputFormat == "plain"
# Print findings normally
for url in resultURLs:
print(url)
| 37.162162
| 86
| 0.6336
| 59
| 0.008582
| 0
| 0
| 0
| 0
| 0
| 0
| 3,284
| 0.477673
|
ce3ac2a462ca934025f075aabb0be44931935eba
| 542
|
py
|
Python
|
geokey/projects/migrations/0004_auto_20150123_1507.py
|
universityofsussex/geokey
|
25e161dbc81841c57c148053dbe99facc81e84b8
|
[
"Apache-2.0"
] | null | null | null |
geokey/projects/migrations/0004_auto_20150123_1507.py
|
universityofsussex/geokey
|
25e161dbc81841c57c148053dbe99facc81e84b8
|
[
"Apache-2.0"
] | null | null | null |
geokey/projects/migrations/0004_auto_20150123_1507.py
|
universityofsussex/geokey
|
25e161dbc81841c57c148053dbe99facc81e84b8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0003_auto_20150123_1148'),
]
operations = [
migrations.AlterField(
model_name='project',
name='everyone_contributes',
field=models.CharField(default=b'auth', max_length=20, choices=[(b'true', b'true'), (b'auth', b'auth'), (b'false', b'false')]),
preserve_default=True,
),
]
| 25.809524
| 139
| 0.608856
| 433
| 0.798893
| 0
| 0
| 0
| 0
| 0
| 0
| 140
| 0.258303
|
ce3b5d59730c0d6fb21fce8076ca9f2a4f217a30
| 2,506
|
py
|
Python
|
hr_attendance_ex/models/sql_ser_config.py
|
alexhong121/odoo_model
|
4eff41c672bd03084eaa6eae81c8f3d359c2fb8d
|
[
"MIT"
] | null | null | null |
hr_attendance_ex/models/sql_ser_config.py
|
alexhong121/odoo_model
|
4eff41c672bd03084eaa6eae81c8f3d359c2fb8d
|
[
"MIT"
] | null | null | null |
hr_attendance_ex/models/sql_ser_config.py
|
alexhong121/odoo_model
|
4eff41c672bd03084eaa6eae81c8f3d359c2fb8d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
# import pyodbc
from odoo import models, fields, api, _
from odoo.exceptions import UserError, AccessError, MissingError
_logger = logging.getLogger(__name__)
class SQLConfig(models.Model):
_name = 'sql.config'
_description='sql_config'
_sql_constraints = [('check_syncing', 'UNIQUE(syncing)', '資料庫同步只能設定一台')]
name=fields.Char(string='名稱',required=True)
server = fields.Char(string='伺服器')
port = fields.Char(string='連接阜')
database = fields.Char(string='資料庫')
username = fields.Char(string='使用者名稱')
password = fields.Char(string='密碼')
odbc = fields.Selection([
('{ODBC Driver 17 for SQL Server}', 'ODBC Driver 17 for SQL Server')],
string='ODBC 驅動程式',
default='{ODBC Driver 17 for SQL Server}',
required=True
)
syncing=fields.Boolean(string='同步中')
@api.multi
def test_connection(self):
sql = self.sql_server_connection(
odbc=self.odbc, server=self.server, port=self.port, database=self.database, username=self.username,
password=self.password
)
#連線失敗時
if not sql['sqlstate']:
raise UserError(_(cursor['msg']))
# 連線成功時
if sql['sqlstate']:
raise UserError(_("Connection Test Succeeded!"))
def sql_server_connection(self, **kwargs):
try:
info = 'DRIVER={0}; SERVER={1},{2}; DATABASE={3}; UID={4}; PWD={5}'.format(
kwargs['odbc'], kwargs['server'], kwargs['port'], kwargs['database'], kwargs['username'],
kwargs['password']
)
sql = pyodbc.connect(info)
return {'sqlstate':True,'sql':sql,'msg':None}
except pyodbc.Error as err:
sqlmsg = err.args[1]
_logger.error(sqlmsg)
return {'sqlstate':False,'sql':None,'msg':sqlmsg}
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
@api.multi
def sql_ser_config(self):
self.ensure_one()
template_form = self.env.ref('hr_attendance_extend.SQL_ser_config_form_themes')
template_list = self.env.ref('hr_attendance_extend.SQL_ser_config_list_themes')
return {
'name': _('Choose'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'sql.config',
'views': [(template_list.id,'list'),(template_form.id, 'form')],
'target': 'current',
}
| 32.973684
| 111
| 0.602554
| 2,392
| 0.920708
| 0
| 0
| 989
| 0.380677
| 0
| 0
| 796
| 0.30639
|
ce3bab3735a9a905747cfb1ff78c996de02c146a
| 223
|
py
|
Python
|
core/templatetags/my_custom_tags.py
|
SubhanRzayev/E-commerce-Tmart
|
239218397f4ee55ab6ae4ef1798fbc83bc7d1159
|
[
"MIT"
] | 2
|
2021-08-13T14:23:34.000Z
|
2021-09-18T08:48:29.000Z
|
core/templatetags/my_custom_tags.py
|
SubhanRzayev/E-commerce-Tmart
|
239218397f4ee55ab6ae4ef1798fbc83bc7d1159
|
[
"MIT"
] | null | null | null |
core/templatetags/my_custom_tags.py
|
SubhanRzayev/E-commerce-Tmart
|
239218397f4ee55ab6ae4ef1798fbc83bc7d1159
|
[
"MIT"
] | null | null | null |
from blog.models import Category
from django.template import Library
from core.models import *
register = Library()
@register.filter
def main_catagory(self):
if self.category == None:
return self.category
| 14.866667
| 35
| 0.735426
| 0
| 0
| 0
| 0
| 100
| 0.44843
| 0
| 0
| 0
| 0
|
ce3c2d8194ace948fc686ddfcb1f37ff3e1e1403
| 4,476
|
py
|
Python
|
Object.py
|
LeenJooken/RFMCollaborationMiner
|
5e8b2933bc9977dcc1707474f8163964dc29ea9d
|
[
"MIT"
] | null | null | null |
Object.py
|
LeenJooken/RFMCollaborationMiner
|
5e8b2933bc9977dcc1707474f8163964dc29ea9d
|
[
"MIT"
] | null | null | null |
Object.py
|
LeenJooken/RFMCollaborationMiner
|
5e8b2933bc9977dcc1707474f8163964dc29ea9d
|
[
"MIT"
] | null | null | null |
#Represents an object
class Object:
def __init__(self,ID,name):
self.name = name
self.ID = ID
self.importance = 1
#keep track of the events in which this file was the object
self.modifiedIn = []
self.addedIn = []
self.deletedIn = []
def getName(self):
return self.name
def getID(self):
return self.ID
def getImportance(self):
return self.importance
#Add an event to the right list according to the modifier
#@param event : Event object
#@param modifier : "Added" "Deleted" or "Modified"
def addEvent(self, event, modifier):
if modifier == "Added":
if(not event in self.addedIn):
self.addedIn.append(event)
elif modifier == "Deleted":
if(not event in self.deletedIn):
self.deletedIn.append(event)
else:
if(not event in self.modifiedIn):
self.modifiedIn.append(event)
#Function that calculates the importance of a object based on a ratio:
#the number of months in which it was changed / the number of months is exists
#@param firstAndLastTimeStamp = tuple with the first timestamp of the log and the last
def calculateImportanceRatio(self,firstAndLastTimeStamp):
addedTimestamps = []
for event in self.addedIn:
addedTimestamps.append(event.getTimestamp())
addedTimestamps.sort()
deletedTimestamps = []
for event in self.deletedIn:
deletedTimestamps.append(event.getTimestamp())
deletedTimestamps.sort()
timestamps = []
for event in self.modifiedIn:
timestamps.append(event.getTimestamp())
for event in self.addedIn:
timestamps.append(event.getTimestamp())
numberOfMonthsExistence = 0
numberOfMonthsChanged = 0
iteratorAdded = 0
iteratorDeleted = 0
if(not addedTimestamps):
beginstamp = firstAndLastTimeStamp[0]
#only 2 scenarios possible : 0 or 1 deleted timestamp
if(not deletedTimestamps):
endstamp = firstAndLastTimeStamp[1]
else:
endstamp = deletedTimestamps[0]
numberOfMonthsExistence += self.calculateNumberOfMonthsExistence(beginstamp,endstamp)
numberOfMonthsChanged += self.calculateNumberOfMonthsChanged(beginstamp,endstamp,timestamps)
while(iteratorAdded < len(addedTimestamps)):
beginstamp = addedTimestamps[iteratorAdded]
iteratorAdded += 1
if(iteratorDeleted == len(deletedTimestamps)):
#all deleted stamps are done
endstamp = firstAndLastTimeStamp[1]
else:
endstamp = deletedTimestamps[iteratorDeleted]
iteratorDeleted += 1
if(endstamp < beginstamp):
beginstamp = firstAndLastTimeStamp[0]
iteratorAdded -= 1
numberOfMonthsExistence += self.calculateNumberOfMonthsExistence(beginstamp,endstamp)
numberOfMonthsChanged += self.calculateNumberOfMonthsChanged(beginstamp,endstamp,timestamps)
importance = numberOfMonthsChanged/numberOfMonthsExistence
#TO DO: what if importance = 0 ?
if importance == 0 :
importance = 0.00001
self.importance = importance
#calculate how many months this object exists between these 2 timestamps
def calculateNumberOfMonthsExistence(self,beginstamp, endstamp):
numberOfMonths = abs(endstamp.year - beginstamp.year) * 12 + abs(endstamp.month - beginstamp.month)
numberOfMonths += 1
return numberOfMonths
#calculate in how many months between begin and end the object was changed
#@param timestamps = list of timestamps when the file was committed
def calculateNumberOfMonthsChanged(self,beginstamp,endstamp,timestamps):
timestamps.sort()
numberOfMonths = 0
currentMonth = -1
currentYear = -1
for stamp in timestamps:
#only consider the timestamps between the timespan
if((stamp >= beginstamp)and(stamp <= endstamp)):
if((stamp.month != currentMonth) or (currentYear != stamp.year)):
currentMonth = stamp.month
currentYear = stamp.year
numberOfMonths += 1
return numberOfMonths
| 33.402985
| 107
| 0.629133
| 4,452
| 0.994638
| 0
| 0
| 0
| 0
| 0
| 0
| 843
| 0.188338
|
ce3e44815e1657902dc5c20dbf4073f8b104c4db
| 4,336
|
py
|
Python
|
centraloffice/src/ngconfiginterface/nginterface.py
|
dmazzer/CogRIoT
|
a2d71916b0f1bd79d0f5b444865279530eb6b836
|
[
"MIT"
] | null | null | null |
centraloffice/src/ngconfiginterface/nginterface.py
|
dmazzer/CogRIoT
|
a2d71916b0f1bd79d0f5b444865279530eb6b836
|
[
"MIT"
] | null | null | null |
centraloffice/src/ngconfiginterface/nginterface.py
|
dmazzer/CogRIoT
|
a2d71916b0f1bd79d0f5b444865279530eb6b836
|
[
"MIT"
] | null | null | null |
"""
nginterface.py: NovaGenesis Interface
"""
__author__ = "Daniel Mazzer"
__copyright__ = "Copyright 2016, CogRIoT Project"
__credits__ = "Antonio Marcos Alberti"
__license__ = "MIT"
__maintainer__ = "Daniel Mazzer"
__email__ = "dmazzer@gmail.com"
import sys
import zmq
import threading
from bzrlib.plugins.launchpad.lp_api_lite import json
sys.path.append("../../")
from utils.logmsgs import logger
class NGInterface():
'''
Provide interface communication between CellController and NovaGenesis
to exchange configuration parameters with NovaGenesis
'''
def __init__(self, config, ng_local_address, ng_remote_address):
'''
Constructor
Receives pull and push zmq socket address
Also receives the instance of the class that access the oppened configuration file.
'''
#TODO: implement the threads (timers) to listen and to publish messages to/from novagenesis
#TODO: Parse and return configuration changes to CellController
#TODO: CellController must apply config changes
self.logger = logger.Logger()
self.logger.log('NovaGenesis interface - Starting')
self.config = config
contextPull = zmq.Context()
# recieve socket
self.consumer_receiver = contextPull.socket(zmq.PULL)
self.consumer_receiver.connect(ng_remote_address)
contextPush = zmq.Context()
# transmit socket
self.consumer_sender = contextPush.socket(zmq.PUSH)
self.consumer_sender.bind(ng_local_address)
# starting thread work thread
workThread = threading.Thread(name='NGZMQPullerWork', target=self.ZMQPuller)
workThread.daemon = True
workThread.start()
self.logger.log("NovaGenesis interface ZMQ puller thread started.")
self.logger.log('NovaGenesis interface - Started')
def setConfiguration(self, Configuration):
self.logger.log('[NG-CommandParser] You should implement this feature some time')
return 'ack'
def getInformation(self):
strout = {'capacities':
{
'sensing_freq_min': '100000000' ,
'sensing_freq_max': '1800000000',
'sensing_bw_min': '1024000',
'sensing_bw_max': '2048000',
'sensing_sectors': '1',
'sensing_direction': '0',
},
'cell_info':
{
'cellcontroller_id': self.config.cellcontroller_id ,
'cellcontroller_location': self.config.cellcontroller_location
},
'current_config':
{
'sensing_freq_start': [str(self.config.sensing_start_freq), "-1" ],
'sensing_freq_stop': [str(self.config.sensing_stop_freq), "-1"],
'sensing_bw': str(self.config.sensing_band_width),
}
}
return strout
def CommandParser(self, ReceivedCommand):
self.logger.log('[NG-CommandParser] Received command, will be analysed')
if 'set_config' in ReceivedCommand:
self.logger.log('[NG-CommandParser] Received command set_config')
return_message = self.setConfiguration(ReceivedCommand)
if return_message is 'ack':
answer = {'ans':'ack'}
else:
answer = {'ans':'nak'}
self.ZMQPusher(answer)
elif 'get_info' in ReceivedCommand:
self.logger.log('[NG-CommandParser] Received command get_info')
print(ReceivedCommand)
answer = self.getInformation()
self.ZMQPusher(answer)
else:
self.logger.log('[NG-CommandParser] Received unrecognized command')
answer = {'ans':'nak'}
self.ZMQPusher(answer)
def ZMQPuller(self):
while True:
JsonMessage = self.consumer_receiver.recv_json()
DictMessage = json.dumps(JsonMessage)
self.CommandParser(DictMessage)
def ZMQPusher(self, answer):
self.consumer_sender.send_json(answer)
pass
| 34.688
| 99
| 0.592943
| 3,924
| 0.904982
| 0
| 0
| 0
| 0
| 0
| 0
| 1,504
| 0.346863
|
ce3f6405d41b2f32d5fb0b9dca8c2d47c32a7949
| 2,625
|
py
|
Python
|
tests/test_connect.py
|
mkniewallner/edgedb-python
|
2086b866d3c87c215eecf644b2393ddd857457e0
|
[
"Apache-2.0"
] | 214
|
2019-01-19T03:56:10.000Z
|
2022-03-31T01:37:33.000Z
|
tests/test_connect.py
|
mkniewallner/edgedb-python
|
2086b866d3c87c215eecf644b2393ddd857457e0
|
[
"Apache-2.0"
] | 120
|
2019-03-19T23:01:52.000Z
|
2022-03-14T08:41:27.000Z
|
tests/test_connect.py
|
mkniewallner/edgedb-python
|
2086b866d3c87c215eecf644b2393ddd857457e0
|
[
"Apache-2.0"
] | 24
|
2019-04-29T22:41:10.000Z
|
2021-11-15T00:28:01.000Z
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import socket
import edgedb
from edgedb import _testbase as tb
class TestConnect(tb.AsyncQueryTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.port = cls._get_free_port()
@classmethod
def _get_free_port(cls):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(('127.0.0.1', 0))
return sock.getsockname()[1]
except Exception:
return None
finally:
sock.close()
async def test_connect_async_01(self):
orig_conn_args = self.get_connect_args()
conn_args = orig_conn_args.copy()
conn_args['port'] = self.port
conn_args['wait_until_available'] = 0
with self.assertRaisesRegex(
edgedb.ClientConnectionError,
f'(?s).*Is the server running.*port {self.port}.*'):
conn_args['host'] = '127.0.0.1'
await edgedb.async_connect(**conn_args)
with self.assertRaisesRegex(
edgedb.ClientConnectionError,
f'(?s).*Is the server running.*port {self.port}.*'):
conn_args['host'] = orig_conn_args['host']
await edgedb.async_connect(**conn_args)
def test_connect_sync_01(self):
orig_conn_args = self.get_connect_args()
conn_args = orig_conn_args.copy()
conn_args['port'] = self.port
conn_args['wait_until_available'] = 0
with self.assertRaisesRegex(
edgedb.ClientConnectionError,
f'(?s).*Is the server running.*port {self.port}.*'):
conn_args['host'] = '127.0.0.1'
edgedb.connect(**conn_args)
with self.assertRaisesRegex(
edgedb.ClientConnectionError,
f'(?s).*Is the server running.*port {self.port}.*'):
conn_args['host'] = orig_conn_args['host']
edgedb.connect(**conn_args)
| 32.8125
| 74
| 0.632
| 1,876
| 0.714667
| 0
| 0
| 398
| 0.151619
| 722
| 0.275048
| 987
| 0.376
|
ce3f92dd86129583471cf90aca5f82b89a2e5147
| 19,947
|
py
|
Python
|
.venv/Lib/site-packages/pdoc/cli.py
|
JohanK91/MethodDice
|
73a8962c762ff48da331c9212f10676f066ed940
|
[
"MIT"
] | null | null | null |
.venv/Lib/site-packages/pdoc/cli.py
|
JohanK91/MethodDice
|
73a8962c762ff48da331c9212f10676f066ed940
|
[
"MIT"
] | null | null | null |
.venv/Lib/site-packages/pdoc/cli.py
|
JohanK91/MethodDice
|
73a8962c762ff48da331c9212f10676f066ed940
|
[
"MIT"
] | 1
|
2021-02-22T13:55:32.000Z
|
2021-02-22T13:55:32.000Z
|
#!/usr/bin/env python3
"""pdoc's CLI interface and helper functions."""
import argparse
import ast
import importlib
import inspect
import os
import os.path as path
import json
import re
import sys
import warnings
from contextlib import contextmanager
from functools import lru_cache
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Dict, List, Sequence
from warnings import warn
import pdoc
parser = argparse.ArgumentParser(
description="Automatically generate API docs for Python modules.",
epilog="Further documentation is available at <https://pdoc3.github.io/pdoc/doc>.",
)
aa = parser.add_argument
mode_aa = parser.add_mutually_exclusive_group().add_argument
aa(
'--version', action='version', version='%(prog)s ' + pdoc.__version__)
aa(
"modules",
type=str,
metavar='MODULE',
nargs="+",
help="The Python module name. This may be an import path resolvable in "
"the current environment, or a file path to a Python module or "
"package.",
)
aa(
"-c", "--config",
type=str,
metavar='OPTION=VALUE',
action='append',
default=[],
help="Override template options. This is an alternative to using "
"a custom config.mako file in --template-dir. This option "
"can be specified multiple times.",
)
aa(
"--filter",
type=str,
metavar='STRING',
default=None,
help="Comma-separated list of filters. When specified, "
"only identifiers containing the specified string "
"will be shown in the output. Search is case sensitive. "
"Has no effect when --http is set.",
)
aa(
"-f", "--force",
action="store_true",
help="Overwrite any existing generated (--output-dir) files.",
)
mode_aa(
"--html",
action="store_true",
help="When set, the output will be HTML formatted.",
)
mode_aa(
"--pdf",
action="store_true",
help="When set, the specified modules will be printed to standard output, "
"formatted in Markdown-Extra, compatible with most "
"Markdown-(to-HTML-)to-PDF converters.",
)
aa(
"--html-dir",
type=str,
help=argparse.SUPPRESS,
)
aa(
"-o", "--output-dir",
type=str,
metavar='DIR',
help="The directory to output generated HTML/markdown files to "
"(default: ./html for --html).",
)
aa(
"--html-no-source",
action="store_true",
help=argparse.SUPPRESS,
)
aa(
"--overwrite",
action="store_true",
help=argparse.SUPPRESS,
)
aa(
"--external-links",
action="store_true",
help=argparse.SUPPRESS,
)
aa(
"--template-dir",
type=str,
metavar='DIR',
default=None,
help="Specify a directory containing Mako templates "
"(html.mako, text.mako, config.mako and/or any templates they include). "
"Alternatively, put your templates in $XDG_CONFIG_HOME/pdoc and "
"pdoc will automatically find them.",
)
aa(
"--link-prefix",
type=str,
help=argparse.SUPPRESS,
)
aa(
"--close-stdin",
action="store_true",
help="When set, stdin will be closed before importing, to account for "
"ill-behaved modules that block on stdin."
)
DEFAULT_HOST, DEFAULT_PORT = 'localhost', 8080
def _check_host_port(s):
if s and ':' not in s:
raise argparse.ArgumentTypeError(
"'{}' doesn't match '[HOST]:[PORT]'. "
"Specify `--http :` to use default hostname and port.".format(s))
return s
aa(
"--http",
default='',
type=_check_host_port,
metavar='HOST:PORT',
help="When set, pdoc will run as an HTTP server providing documentation "
"for specified modules. If you just want to use the default hostname "
"and port ({}:{}), set the parameter to :.".format(DEFAULT_HOST, DEFAULT_PORT),
)
aa(
"--skip-errors",
action="store_true",
help="Upon unimportable modules, warn instead of raising."
)
args = argparse.Namespace()
class _WebDoc(BaseHTTPRequestHandler):
args = None # Set before server instantiated
template_config = None
def do_HEAD(self):
status = 200
if self.path != "/":
status = self.check_modified()
self.send_response(status)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
def check_modified(self):
try:
module = pdoc.import_module(self.import_path_from_req_url)
new_etag = str(os.stat(module.__file__).st_mtime)
except ImportError:
return 404
old_etag = self.headers.get('If-None-Match', new_etag)
if old_etag == new_etag:
# Don't log repeating checks
self.log_request = lambda *args, **kwargs: None
return 304
return 205
def do_GET(self):
# Deny favicon shortcut early.
if self.path == "/favicon.ico":
return None
importlib.invalidate_caches()
code = 200
if self.path == "/":
modules = [pdoc.import_module(module, reload=True)
for module in self.args.modules]
modules = sorted((module.__name__, inspect.getdoc(module))
for module in modules)
out = pdoc._render_template('/html.mako',
modules=modules,
**self.template_config)
elif self.path.endswith(".ext"):
# External links are a bit weird. You should view them as a giant
# hack. Basically, the idea is to "guess" where something lives
# when documenting another module and hope that guess can actually
# track something down in a more global context.
#
# The idea here is to start specific by looking for HTML that
# exists that matches the full external path given. Then trim off
# one component at the end and try again.
#
# If no HTML is found, then we ask `pdoc` to do its thang on the
# parent module in the external path. If all goes well, that
# module will then be able to find the external identifier.
import_path = self.path[:-4].lstrip("/")
resolved = self.resolve_ext(import_path)
if resolved is None: # Try to generate the HTML...
print("Generating HTML for %s on the fly..." % import_path, file=sys.stderr)
try:
out = pdoc.html(import_path.split(".")[0], **self.template_config)
except Exception as e:
print('Error generating docs: {}'.format(e), file=sys.stderr)
# All hope is lost.
code = 404
out = "External identifier <code>%s</code> not found." % import_path
else:
return self.redirect(resolved)
# Redirect '/pdoc' to '/pdoc/' so that relative links work
# (results in '/pdoc/cli.html' instead of 'cli.html')
elif not self.path.endswith(('/', '.html')):
return self.redirect(self.path + '/')
# Redirect '/pdoc/index.html' to '/pdoc/' so it's more pretty
elif self.path.endswith(pdoc._URL_PACKAGE_SUFFIX):
return self.redirect(self.path[:-len(pdoc._URL_PACKAGE_SUFFIX)] + '/')
else:
try:
out = self.html()
except Exception:
import traceback
from html import escape
code = 404
out = "Error importing module <code>{}</code>:\n\n<pre>{}</pre>".format(
self.import_path_from_req_url, escape(traceback.format_exc()))
out = out.replace('\n', '<br>')
self.send_response(code)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
self.echo(out)
def redirect(self, location):
self.send_response(302)
self.send_header("Location", location)
self.end_headers()
def echo(self, s):
self.wfile.write(s.encode("utf-8"))
def html(self):
"""
Retrieves and sends the HTML belonging to the path given in
URL. This method is smart and will look for HTML files already
generated and account for whether they are stale compared to
the source code.
"""
return pdoc.html(self.import_path_from_req_url,
reload=True, http_server=True, external_links=True,
skip_errors=args.skip_errors,
**self.template_config)
def resolve_ext(self, import_path):
def exists(p):
p = path.join(args.output_dir, p)
pkg = path.join(p, pdoc._URL_PACKAGE_SUFFIX.lstrip('/'))
mod = p + pdoc._URL_MODULE_SUFFIX
if path.isfile(pkg):
return pkg[len(args.output_dir):]
elif path.isfile(mod):
return mod[len(args.output_dir):]
return None
parts = import_path.split(".")
for i in range(len(parts), 0, -1):
p = path.join(*parts[0:i])
realp = exists(p)
if realp is not None:
return "/%s#%s" % (realp.lstrip("/"), import_path)
return None
@property
def import_path_from_req_url(self):
pth = self.path.split('#')[0].lstrip('/')
for suffix in ('/',
pdoc._URL_PACKAGE_SUFFIX,
pdoc._URL_INDEX_MODULE_SUFFIX,
pdoc._URL_MODULE_SUFFIX):
if pth.endswith(suffix):
pth = pth[:-len(suffix)]
break
return pth.replace('/', '.')
def module_path(m: pdoc.Module, ext: str):
return path.join(args.output_dir, *re.sub(r'\.html$', ext, m.url()).split('/'))
def _quit_if_exists(m: pdoc.Module, ext: str):
if args.force:
return
paths = [module_path(m, ext)]
if m.is_package: # If package, make sure the dir doesn't exist either
paths.append(path.dirname(paths[0]))
for pth in paths:
if path.lexists(pth):
print("File '%s' already exists. Delete it, or run with --force" % pth,
file=sys.stderr)
sys.exit(1)
@contextmanager
def _open_write_file(filename):
try:
with open(filename, 'w', encoding='utf-8') as f:
yield f
print(filename) # print created file path to stdout
except Exception:
try:
os.unlink(filename)
except Exception:
pass
raise
def recursive_write_files(m: pdoc.Module, ext: str, **kwargs):
assert ext in ('.html', '.md')
filepath = module_path(m, ext=ext)
dirpath = path.dirname(filepath)
if not os.access(dirpath, os.R_OK):
os.makedirs(dirpath)
with _open_write_file(filepath) as f:
if ext == '.html':
f.write(m.html(**kwargs))
elif ext == '.md':
f.write(m.text(**kwargs))
for submodule in m.submodules():
recursive_write_files(submodule, ext=ext, **kwargs)
def _flatten_submodules(modules: Sequence[pdoc.Module]):
for module in modules:
yield module
for submodule in module.submodules():
yield from _flatten_submodules((submodule,))
def _print_pdf(modules, **kwargs):
modules = list(_flatten_submodules(modules))
print(pdoc._render_template('/pdf.mako', modules=modules, **kwargs))
def _warn_deprecated(option, alternative='', use_config_mako=False):
msg = 'Program option `{}` is deprecated.'.format(option)
if alternative:
msg += ' Use `' + alternative + '`'
if use_config_mako:
msg += ' or override config.mako template'
msg += '.'
warn(msg, DeprecationWarning, stacklevel=2)
def _generate_lunr_search(modules: List[pdoc.Module],
index_docstrings: bool,
template_config: dict):
"""Generate index.js for search"""
def trim_docstring(docstring):
return re.sub(r'''
\s+| # whitespace sequences
\s+[-=~]{3,}\s+| # title underlines
^[ \t]*[`~]{3,}\w*$| # code blocks
\s*[`#*]+\s*| # common markdown chars
\s*([^\w\d_>])\1\s*| # sequences of punct of the same kind
\s*</?\w*[^>]*>\s* # simple HTML tags
''', ' ', docstring, flags=re.VERBOSE | re.MULTILINE)
def recursive_add_to_index(dobj):
info = {
'ref': dobj.refname,
'url': to_url_id(dobj.module),
}
if index_docstrings:
info['doc'] = trim_docstring(dobj.docstring)
if isinstance(dobj, pdoc.Function):
info['func'] = 1
index.append(info)
for member_dobj in getattr(dobj, 'doc', {}).values():
recursive_add_to_index(member_dobj)
@lru_cache()
def to_url_id(module):
url = module.url()
if url not in url_cache:
url_cache[url] = len(url_cache)
return url_cache[url]
index = [] # type: List[Dict]
url_cache = {} # type: Dict[str, int]
for top_module in modules:
recursive_add_to_index(top_module)
urls = sorted(url_cache.keys(), key=url_cache.__getitem__)
main_path = args.output_dir
with _open_write_file(path.join(main_path, 'index.js')) as f:
f.write("URLS=")
json.dump(urls, f, indent=0, separators=(',', ':'))
f.write(";\nINDEX=")
json.dump(index, f, indent=0, separators=(',', ':'))
# Generate search.html
with _open_write_file(path.join(main_path, 'doc-search.html')) as f:
rendered_template = pdoc._render_template('/search.mako', **template_config)
f.write(rendered_template)
def main(_args=None):
""" Command-line entry point """
global args
args = _args or parser.parse_args()
warnings.simplefilter("once", DeprecationWarning)
if args.close_stdin:
sys.stdin.close()
if (args.html or args.http) and not args.output_dir:
args.output_dir = 'html'
if args.html_dir:
_warn_deprecated('--html-dir', '--output-dir')
args.output_dir = args.html_dir
if args.overwrite:
_warn_deprecated('--overwrite', '--force')
args.force = args.overwrite
template_config = {}
for config_str in args.config:
try:
key, value = config_str.split('=', 1)
value = ast.literal_eval(value)
template_config[key] = value
except Exception:
raise ValueError(
'Error evaluating --config statement "{}". '
'Make sure string values are quoted?'
.format(config_str)
)
if args.html_no_source:
_warn_deprecated('--html-no-source', '-c show_source_code=False', True)
template_config['show_source_code'] = False
if args.link_prefix:
_warn_deprecated('--link-prefix', '-c link_prefix="foo"', True)
template_config['link_prefix'] = args.link_prefix
if args.external_links:
_warn_deprecated('--external-links')
template_config['external_links'] = True
if args.template_dir is not None:
if not path.isdir(args.template_dir):
print('Error: Template dir {!r} is not a directory'.format(args.template_dir),
file=sys.stderr)
sys.exit(1)
pdoc.tpl_lookup.directories.insert(0, args.template_dir)
# Support loading modules specified as python paths relative to cwd
sys.path.append(os.getcwd())
# Virtual environment handling for pdoc script run from system site
try:
venv_dir = os.environ['VIRTUAL_ENV']
except KeyError:
pass # pdoc was not invoked while in a virtual environment
else:
from glob import glob
from distutils.sysconfig import get_python_lib
libdir = get_python_lib(prefix=venv_dir)
sys.path.append(libdir)
# Resolve egg-links from `setup.py develop` or `pip install -e`
# XXX: Welcome a more canonical approach
for pth in glob(path.join(libdir, '*.egg-link')):
try:
with open(pth) as f:
sys.path.append(path.join(libdir, f.readline().rstrip()))
except IOError:
warn('Invalid egg-link in venv: {!r}'.format(pth))
if args.http:
template_config['link_prefix'] = "/"
# Run the HTTP server.
_WebDoc.args = args # Pass params to HTTPServer xP
_WebDoc.template_config = template_config
host, _, port = args.http.partition(':')
host = host or DEFAULT_HOST
port = int(port or DEFAULT_PORT)
print('Starting pdoc server on {}:{}'.format(host, port), file=sys.stderr)
httpd = HTTPServer((host, port), _WebDoc)
print("pdoc server ready at http://%s:%d" % (host, port), file=sys.stderr)
# Allow tests to perform `pdoc.cli._httpd.shutdown()`
global _httpd
_httpd = httpd
try:
httpd.serve_forever()
finally:
httpd.server_close()
sys.exit(0)
docfilter = None
if args.filter and args.filter.strip():
def docfilter(obj, _filters=args.filter.strip().split(',')):
return any(f in obj.refname or
isinstance(obj, pdoc.Class) and f in obj.doc
for f in _filters)
modules = [pdoc.Module(module, docfilter=docfilter,
skip_errors=args.skip_errors)
for module in args.modules]
pdoc.link_inheritance()
if args.pdf:
_print_pdf(modules, **template_config)
import textwrap
print("""
PDF-ready markdown written to standard output.
^^^^^^^^^^^^^^^
Convert this file to PDF using e.g. Pandoc:
{PANDOC_CMD}
or using Python-Markdown and Chrome/Chromium/WkHtmlToPDF:
markdown_py --extension=meta \\
--extension=abbr \\
--extension=attr_list \\
--extension=def_list \\
--extension=fenced_code \\
--extension=footnotes \\
--extension=tables \\
--extension=admonition \\
--extension=smarty \\
--extension=toc \\
pdf.md > pdf.html
chromium --headless --disable-gpu --print-to-pdf=pdf.pdf pdf.html
wkhtmltopdf --encoding utf8 -s A4 --print-media-type pdf.html pdf.pdf
or similar, at your own discretion.""".format(PANDOC_CMD=textwrap.indent(_PANDOC_COMMAND, ' ')),
file=sys.stderr)
sys.exit(0)
for module in modules:
if args.html:
_quit_if_exists(module, ext='.html')
recursive_write_files(module, ext='.html', **template_config)
elif args.output_dir: # Generate text files
_quit_if_exists(module, ext='.md')
recursive_write_files(module, ext='.md', **template_config)
else:
sys.stdout.write(module.text(**template_config))
# Two blank lines between two modules' texts
sys.stdout.write(os.linesep * (1 + 2 * int(module != modules[-1])))
lunr_config = pdoc._get_config(**template_config).get('lunr_search')
if lunr_config is not None:
_generate_lunr_search(
modules, lunr_config.get("index_docstrings", True), template_config)
_PANDOC_COMMAND = '''\
pandoc --metadata=title:"MyProject Documentation" \\
--from=markdown+abbreviations+tex_math_single_backslash \\
--pdf-engine=xelatex --variable=mainfont:"DejaVu Sans" \\
--toc --toc-depth=4 --output=pdf.pdf pdf.md\
'''
if __name__ == "__main__":
main(parser.parse_args())
| 33.41206
| 99
| 0.587657
| 5,821
| 0.291823
| 513
| 0.025718
| 911
| 0.045671
| 0
| 0
| 6,676
| 0.334687
|
ce40a683df91507328100c3fd2d4f4e66c206aad
| 4,981
|
py
|
Python
|
application/helper/connection_check.py
|
HarshadKavathiya/acciom
|
10e4d813c897bcf0078ab350d9432117cb708d1a
|
[
"MIT"
] | null | null | null |
application/helper/connection_check.py
|
HarshadKavathiya/acciom
|
10e4d813c897bcf0078ab350d9432117cb708d1a
|
[
"MIT"
] | 9
|
2019-07-23T09:55:15.000Z
|
2022-02-19T01:45:12.000Z
|
application/helper/connection_check.py
|
accionlabs/acciom
|
889958c0f8ec1d74db1958d0a6473c4678eaab3f
|
[
"MIT"
] | 21
|
2019-07-20T04:47:23.000Z
|
2020-01-07T06:55:42.000Z
|
import cx_Oracle
import psycopg2
import pymysql
import pyodbc
from application.common.constants import APIMessages, SupportedDBType, \
GenericStrings
def connection_check(db_type_id, db_hostname, db_username, db_password,
db_name):
"""
Helper method to check the database connectivity for the given database
details.
Args:
db_type_id(int): type of the database
db_hostname(str): database hostname
db_username(str): database username
db_password(str): database password
db_name(str): database name
Returns(str):
Returns success only if connection can be establish
"""
# cnxn is a connection object
if db_type_id == SupportedDBType().get_db_id_by_name("mysql"):
try:
cnxn = pymysql.connect(host=db_hostname, user=db_username,
password=db_password, db=db_name)
except pymysql.err.InternalError as e:
if GenericStrings.UNKNOWN_DATABASE_MYSQL in e.args[1]:
return APIMessages.UNKNOWN_DATABASE.format(db_name)
elif GenericStrings.CANNOT_CONNECT_TO_REMOTE_SERVER_MYSQL in \
e.args[1]:
return APIMessages.CANNOT_CONNECT_TO_REMOTE_SERVER_MYSQL
else:
return e.args[1]
except pymysql.err.OperationalError as e:
if GenericStrings.AUTHENTICATION_FAILED_MYSQL in e.args[1]:
return APIMessages.AUTHENTICATION_FAILED.format(db_username)
elif GenericStrings.CANNOT_CONNECT_TO_SERVER_MYSQL in e.args[1]:
return APIMessages.CANNOT_CONNECT_TO_SERVER.format(
SupportedDBType().get_db_name_by_id(db_type_id),
db_hostname)
else:
return e.args[1]
cursor = cnxn.cursor()
if cursor:
return APIMessages.SUCCESS
elif db_type_id == SupportedDBType().get_db_id_by_name("mssql"):
server = db_hostname
database = db_name
username = db_username
password = db_password
# This code can handle Oracle Driver 17
# If other version 13 is given, code will fail
# TODO: Need to implement an approach that takes driver version
# based on user input
try:
cnxn = pyodbc.connect(
'DRIVER={0}'.format(GenericStrings.ORACLE_DRIVER) +
';SERVER=' + server +
';DATABASE=' + database +
';UID=' + username + ';PWD=' + password)
except pyodbc.ProgrammingError as e:
return APIMessages.UNKNOWN_DATABASE.format(db_name)
except pyodbc.InterfaceError as e:
return APIMessages.AUTHENTICATION_FAILED.format(db_username)
except pyodbc.OperationalError as e:
return APIMessages.CANNOT_CONNECT_TO_SERVER.format(
SupportedDBType().get_db_name_by_id(db_type_id),
db_hostname)
cursor = cnxn.cursor()
if cursor:
return APIMessages.SUCCESS
elif db_type_id == SupportedDBType().get_db_id_by_name("postgresql"):
try:
cnxn = psycopg2.connect(host=db_hostname, database=db_name,
user=db_username,
password=db_password)
except psycopg2.OperationalError as e:
if GenericStrings.UNKNOWN_DATABASE_POSTGRES in str(e):
return APIMessages.UNKNOWN_DATABASE.format(db_name)
elif GenericStrings.AUTHENTICATION_FAILED_POSTGRES in str(e):
return APIMessages.AUTHENTICATION_FAILED.format(db_username)
elif GenericStrings.CANNOT_CONNECT_TO_SERVER_POSTGRES in str(e):
return APIMessages.CANNOT_CONNECT_TO_SERVER.format(
SupportedDBType().get_db_name_by_id(db_type_id),
db_hostname)
else:
return e
cursor = cnxn.cursor()
if cursor:
return APIMessages.SUCCESS
elif db_type_id == SupportedDBType().get_db_id_by_name("oracle"):
try:
cnxn = cx_Oracle.connect(
"{0}/{1}@{2}/{3}".format(db_username, db_password, db_hostname,
db_name))
except cx_Oracle.DatabaseError as e:
if GenericStrings.UNKNOWN_DB_AUTHENTICATION_FAILED_ORACLE in str(
e):
return APIMessages.UNKNOWN_DB_AUTHENTICATION_FAILED.format(
db_name, db_username)
elif GenericStrings.CANNOT_CONNECT_TO_SERVER_ORACLE in str(
e):
return APIMessages.CANNOT_CONNECT_TO_SERVER.format(
SupportedDBType().get_db_name_by_id(db_type_id),
db_hostname)
else:
return e
cursor = cnxn.cursor()
if cursor:
return APIMessages.SUCCESS
| 43.313043
| 79
| 0.609717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 701
| 0.140735
|
ce40f79ba52230bce534975d34f03a0b62be130e
| 701
|
py
|
Python
|
src/db/alembic/tests/add_problems.py
|
furea2/ProofGame
|
787f9be7f616c53eb9ce5a677660aee7cc824a14
|
[
"MIT"
] | null | null | null |
src/db/alembic/tests/add_problems.py
|
furea2/ProofGame
|
787f9be7f616c53eb9ce5a677660aee7cc824a14
|
[
"MIT"
] | null | null | null |
src/db/alembic/tests/add_problems.py
|
furea2/ProofGame
|
787f9be7f616c53eb9ce5a677660aee7cc824a14
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///db.sqlite3')
Session = sessionmaker(engine)
import sys
sys.path.append("D:\\Users\\furea2\\NodejsProjects\\login_sample\\src\\db\\alembic\\app\\models")
from problem import Problem
userList=[
Problem(title='zero_le_one', body='theorem zero_le_one : 0 < 1 := sorry', difficulty=1),
Problem(title='zero_le_two', body='theorem zero_le_two : 0 < 2 := sorry', difficulty=1),
Problem(title='one_le_two', body='theorem one_le_two : 1 < 2 := sorry', difficulty=1),
]
if __name__=='__main__':
with Session() as session:
session.add_all(userList)
session.commit()
| 33.380952
| 97
| 0.713267
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 263
| 0.375178
|
ce41c01cf6e704d26212115f3eeca7c18fc41d33
| 2,871
|
py
|
Python
|
aes/rkeys.py
|
raviolliii/AES-Encryption
|
48434bd2cb19fc447065872cf1a1ede55ed024de
|
[
"MIT"
] | null | null | null |
aes/rkeys.py
|
raviolliii/AES-Encryption
|
48434bd2cb19fc447065872cf1a1ede55ed024de
|
[
"MIT"
] | null | null | null |
aes/rkeys.py
|
raviolliii/AES-Encryption
|
48434bd2cb19fc447065872cf1a1ede55ed024de
|
[
"MIT"
] | null | null | null |
def uint(x):
# casts x to unsigned int (1 byte)
return x & 0xff
def chunkify(string, size):
# breaks up string into chunks of size
chunks = []
for i in range(0, len(string), size):
chunks.append(string[i:i + size])
return chunks
def gen_rcons(rounds):
# generates and returns round constants
# the round constants don't depend on the
# key so these constants could have been
# hard coded, but I wanted to build it anyway
rcons = []
for i in range(rounds):
value = 0
if i + 1 > 1:
if rcons[i - 1] >= 0x80:
value = uint((2 * rcons[i - 1]) ^ 0x11b)
else:
value = uint(2 * rcons[i - 1])
else:
value = 1
rcons.append(value)
return list(map(lambda x: x << 24, rcons))
def generate_round_keys(key, rounds, sbox):
# Generates round keys based on main key
# basic variables used for looping, etc.
key_size = len(key) * 8
R = rounds + 1
rcons = gen_rcons(rounds) # get round key constants
N = key_size // 32
# split key into 32 bit words and parse to int
K = [int(k.encode("utf-8").hex(), 16) for k in chunkify(key, 4)]
W = [0] * (4 * R)
# main loop to generate expanded round subkeys
for i in range(4 * R):
if i < N:
W[i] = K[i]
elif i >= N and i % N == 0:
word_str = hex(W[i - 1])[2:].zfill(8) # turn int to 8 digit hex
rot = word_str[2:] + word_str[:2] # rotate left 1 byte
hex_bytes = chunkify(rot, 2) # split into byte chunks
subvals = [sbox(hexb) for hexb in hex_bytes] # sub out hex bytes with s-box
sval = (subvals[0] << 24) \
+ (subvals[1] << 16) \
+ (subvals[2] << 8) \
+ subvals[3] # concat hex bytes and parse to 32 bit int
W[i] = W[i - N] ^ sval ^ rcons[(i // N) - 1]
elif i >= N and N > 6 and i % N == 4:
word_str = hex(W[i - 1])[2:].zfill(8) # turn int to 8 digit hex
hex_bytes = chunkify(word_str, 2) # split into byte chunks
subvals = [sbox(hexb) for hexb in hex_bytes] # sub out hex bytes with s-box
sval = (subvals[0] << 24) \
+ (subvals[1] << 16) \
+ (subvals[2] << 8) \
+ subvals[3] # concat hex bytes and parse to 32 bit int
W[i] = W[i - N] ^ sval
else:
W[i] = W[i - N] ^ W[i - 1]
# subkeys are all 128 bits, but each entry is 32 bits
# so combine all entries by groups of 4 for later use
return [tuple(W[i:i + 4]) for i in range(0, len(W), 4)]
def add_round_key(state, round_key):
# adds each byte of the round key to the
# respective byte of the state
key = []
# round key is a tuple of 4, 32 bit ints
for rk in round_key:
hx = hex(rk)[2:].zfill(8) # turn int to hex
# add each byte to the key list as an int
key += [int(hx[i:i + 2], 16) for i in range(0, len(hx), 2)]
for i in range(len(state)):
# run through the state and add each byte to the
# respective byte in the key
key_state = [key[j] for j in range(i, len(key), 4)]
state[i] = [sv ^ kv for sv, kv in zip(state[i], key_state)]
return state
| 33.383721
| 78
| 0.617207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,057
| 0.368164
|
cbe97f3cb389489740f1e42249ec7c347020db47
| 30
|
py
|
Python
|
otscrape/core/extractor/nested/__init__.py
|
SSripilaipong/otscrape
|
73ad2ea3d20841cf5d81b37180a1f21c48e87480
|
[
"MIT"
] | null | null | null |
otscrape/core/extractor/nested/__init__.py
|
SSripilaipong/otscrape
|
73ad2ea3d20841cf5d81b37180a1f21c48e87480
|
[
"MIT"
] | null | null | null |
otscrape/core/extractor/nested/__init__.py
|
SSripilaipong/otscrape
|
73ad2ea3d20841cf5d81b37180a1f21c48e87480
|
[
"MIT"
] | null | null | null |
from .zip_dict import ZipDict
| 15
| 29
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
cbea98388f135a070422bda42a79198d77ccf817
| 546
|
py
|
Python
|
10_Exceptions_and_Errors/internal.py
|
MANOJPATRA1991/Python-Beyond-the-Basics
|
aed7bfd35e33c2b1759b48e1c89314aa149c56d0
|
[
"MIT"
] | null | null | null |
10_Exceptions_and_Errors/internal.py
|
MANOJPATRA1991/Python-Beyond-the-Basics
|
aed7bfd35e33c2b1759b48e1c89314aa149c56d0
|
[
"MIT"
] | null | null | null |
10_Exceptions_and_Errors/internal.py
|
MANOJPATRA1991/Python-Beyond-the-Basics
|
aed7bfd35e33c2b1759b48e1c89314aa149c56d0
|
[
"MIT"
] | null | null | null |
def modulus_three(n):
r = n % 3
if r == 0:
print("Multiple of 3")
elif r == 1:
print("Remainder 1")
else:
assert r == 2, "Remainder is not 2"
print("Remainder 2")
def modulus_four(n):
r = n % 4
if r == 0:
print("Multiple of 4")
elif r == 1:
print("Remainder 1")
elif r == 2:
print("Remainder 2")
elif r == 3:
print("Remainder 3")
else:
assert False, "This should never happen"
if __name__ == '__main__':
print(modulus_four(5))
| 20.222222
| 48
| 0.507326
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 151
| 0.276557
|
cbeae155ad896dc6fd2c6c3e36347da77b95da7e
| 17,038
|
py
|
Python
|
ml_studio/visualate/dashboards/data_explorer.py
|
john-james-ai/ml-studio
|
2230fcd6579d2291c761e559ec93b18ddd7a96e6
|
[
"BSD-3-Clause"
] | 1
|
2020-01-30T09:37:00.000Z
|
2020-01-30T09:37:00.000Z
|
ml_studio/visualate/dashboards/data_explorer.py
|
john-james-ai/ml-studio
|
2230fcd6579d2291c761e559ec93b18ddd7a96e6
|
[
"BSD-3-Clause"
] | 3
|
2019-12-05T19:37:59.000Z
|
2020-03-31T05:49:53.000Z
|
ml_studio/visualate/dashboards/data_explorer.py
|
john-james-ai/ml-studio
|
2230fcd6579d2291c761e559ec93b18ddd7a96e6
|
[
"BSD-3-Clause"
] | null | null | null |
# =========================================================================== #
# DATA EXPLORER #
# =========================================================================== #
# =========================================================================== #
# Project: ML Studio #
# Version: 0.1.14 #
# File: \data_explorer.py #
# Python Version: 3.7.3 #
# --------------- #
# Author: John James #
# Company: Decision Scients #
# Email: jjames@decisionscients.com #
# --------------- #
# Create Date: Friday December 6th 2019, 9:12:28 pm #
# Last Modified: Friday December 6th 2019, 9:12:35 pm #
# Modified By: John James (jjames@decisionscients.com) #
# --------------- #
# License: Modified BSD #
# Copyright (c) 2019 Decision Scients #
# =========================================================================== #
"""Data Explorer - A dash powered web app for analyzing and preparing data.
This module provides a dashboard application that supports:
- Data Audit : Missing values and outliers
- Data Analysis : Exploration of data vis-a-vis statistical
assumptions of independence, linearity, normality,
and homoscedasticity
- Data Preparation : Missing values, and outliers
- Feature Selection : Identifying the features that most
influence the dependent variable
- Features Engineering : Feature transformation, Binning
One-Hot Encoding, Features Split and Scaling
- Dimensionality Reduction : PCA,
t-Distributed Stochastic Neighbor Embedding (t-SNE)
see https://www.analyticsvidhya.com/blog/2018/08/dimensionality-reduction-techniques-python/
Note: This module was highly inspired by the plotly dash-svm
at https://github.com/plotly/dash-svm.
"""
#%%
import os
import sys
sys.path.append('ml_studio')
sys.path.append('ml_studio/utils/visual')
import time
from textwrap import dedent
import warnings
import dash
import dash_core_components as dcc
import dash_daq as daq
import dash_html_components as html
from dash.dependencies import Input, Output, State
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_california_housing, make_regression
from sklearn.datasets import make_classification
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.svm import SVC
from ml_studio.visualate.classification.figures import serve_prediction_plot, serve_roc_curve, \
serve_pie_confusion_matrix
import ml_studio
from ml_studio.utils.model import get_model_name
from ml_studio.utils.data_manager import sampler, data_split, StandardScaler
from ml_studio.utils.misc import proper
import ml_studio.utils.visual as drc
# --------------------------------------------------------------------------- #
external_scripts = [
# Normalize the CSS
"https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css",
# Fonts
"https://fonts.googleapis.com/css?family=Open+Sans|Roboto",
"https://maxcdn.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css"
]
app = dash.Dash(__name__,
external_scripts=external_scripts)
app.scripts.config.serve_locally = False
server = app.server
# --------------------------------------------------------------------------- #
# Generate Data #
# --------------------------------------------------------------------------- #
def generate_data(dataset, n_samples=None, n_features=None, noise=100,
seed=None):
if dataset == 'california':
return(fetch_california_housing(return_X_y=True))
elif dataset == 'msd':
data = pd.read_csv("ml_studio/data_gathering/msd/year_prediction.csv")
y = data[['label']]
X = data.drop(columns=['label'], inplace=False)
msd = (X, y)
return msd
elif dataset == 'online_news':
data = pd.read_csv("ml_studio/data_gathering/online_news_popularity/OnlineNewsPopularity.csv")
data.columns = data.columns.str.replace(r'\s+', '')
y = data[['shares']]
X = data.drop(columns=['shares'], inplace=False)
online_news = (X, y)
return online_news
elif dataset == 'speed_dating':
data = pd.read_csv("ml_studio/data_gathering/speed_dating/Speed Dating Data.csv",
encoding = 'unicode_escape')
y = data[['match']]
X = data.drop(columns=['match'], inplace=False)
speed_dating = (X, y)
return speed_dating
elif dataset == 'regression':
if n_samples is None:
warnings.warn("n_samples is None, defaulting to 10,000")
n_samples = 10000
if n_features is None:
warnings.warn("n_features is None, defaulting to 100")
n_features = 100
X, y = make_regression(n_samples, n_features,
n_informative=100,
bias=400,
effective_rank=50,
noise=100,
random_state=seed)
regression = (X, y)
return regression
elif dataset == 'binary':
X, y = make_classification(
n_samples=100,
n_features=2,
n_redundant=0,
n_informative=2,
random_state=2,
n_clusters_per_class=1
)
linearly_separable = (X, y)
return linearly_separable
else:
raise ValueError(
'Data type incorrectly specified. Please choose an existing '
'dataset.')
# --------------------------------------------------------------------------- #
# Define Tabs #
# --------------------------------------------------------------------------- #
tabs_styles = {
'height': '44px'
}
tab_style = {
'border': '1px solid #282b38',
'borderBottom': '1px solid #282b38',
'backgroundColor': '#282b38',
'padding': '6px',
'fontWeight': 'bold'
}
tab_selected_style = {
'border': '1px solid #282b38',
'borderBottom': '1px solid #31459E',
'backgroundColor': '#282b38',
'color': 'white',
'padding': '6px'
}
def build_tabs():
return html.Div(
id="tabs",
className="tabs",
children=[
dcc.Tabs(
id="app-tabs",
value="tab1",
className="custom-tabs",
children=[
dcc.Tab(
id="Analysis-tab",
label="Data Analysis",
value="tab3",
style=tab_style,
selected_style=tab_selected_style,
className="custom-tab",
selected_className="custom-tab--selected",
),
dcc.Tab(
id="Cleaning-tab",
label="Data Cleaning",
value="tab4",
style=tab_style,
selected_style=tab_selected_style,
className="custom-tab",
selected_className="custom-tab--selected",
),
dcc.Tab(
id="Feature-selection-tab",
label="Feature Selection",
value="tab5",
style=tab_style,
selected_style=tab_selected_style,
className="custom-tab",
selected_className="custom-tab--selected",
),
dcc.Tab(
id="Features-engineering-tab",
label="Feature Engineering",
value="tab6",
style=tab_style,
selected_style=tab_selected_style,
className="custom-tab",
selected_className="custom-tab--selected",
),
dcc.Tab(
id="Dimension-reduction-tab",
label="Dimension Reduction",
value="tab7",
style=tab_style,
selected_style=tab_selected_style,
className="custom-tab",
selected_className="custom-tab--selected",
),
],
)
],
)
def build_analysis_tab():
pass
app.layout = html.Div(children=[
# .container class is fixed, .container.scalable is scalable
html.Div(className="banner", children=[
# Change App Name here
html.Div(className='container scalable', children=[
# Change App Name here
html.H2(html.A(
'ML Studio Data Explorer',
href='https://github.com/decisionscients/ml-studio',
style={
'text-decoration': 'none',
'color': 'inherit'
}
)),
html.A(
# TODO: Create logo
html.Img(src="https://s3-us-west-1.amazonaws.com/plotly-tutorials/logo/new-branding/dash-logo-by-plotly-stripe-inverted.png"),
href='https://plot.ly/products/dash/'
)
]),
]),
html.Div(id='body', className='container scalable', children=[
html.Div(
id="app-container",
children=[
build_tabs()
],
),
html.Div(className='row', children=[
html.Div(
id='div-graphs',
children=dcc.Graph(
id='graph-sklearn-svm',
style={'display': 'none'}
)
),
html.Div(
className='three columns',
style={
'min-width': '24.5%',
'max-height': 'calc(100vh - 85px)',
'overflow-y': 'auto',
'overflow-x': 'hidden',
},
children=[
drc.Card([
drc.NamedDropdown(
name='Select Data Type',
id='dropdown-select-datatype',
options=[
{'label': 'Regression', 'value': 'regression'},
{'label': 'Binary Classification','value': 'binary'},
{'label': 'Multiclass Classification','value': 'multiclass'}
],
clearable=False,
searchable=False,
value='regression'
),
drc.NamedDropdown(
name='Select Dataset',
id='dropdown-select-dataset',
options=[
{'label': 'California Housing', 'value': 'california'},
{'label': 'Million Song Dataset','value': 'msd'},
{'label': 'Online News Popularity','value': 'online_news'},
{'label': 'Speed Dating', 'value': 'speed_dating'},
{'label': 'Regression', 'value': 'regression'},
{'label': 'Binary', 'value': 'binary'}
],
clearable=False,
searchable=False,
value='california'
),
]),
html.Div(
dcc.Markdown(dedent("""
[Click here](https://github.com/decisionscients/ml-studio) to visit the project repo, and learn about how to use the app.
""")),
style={'margin': '20px 0px', 'text-align': 'center'}
),
]
),
]),
])
])
# @app.callback(Output('div-graphs', 'children'),
# Input('dropdown-select-dataset', 'value'),
# Input('slider-threshold', 'value')
# def update_svm_graph(kernel,
# degree,
# C_coef,
# C_power,
# gamma_coef,
# gamma_power,
# dataset,
# noise,
# shrinking,
# threshold,
# sample_size):
# t_start = time.time()
# h = .3 # step size in the mesh
# # Data Pre-processing
# X, y = generate_data(dataset=dataset)
# StandardScaler().fit(X)
# X = StandardScaler().transform(X)
# X_train, X_test, y_train, y_test = \
# data_split(X, y, test_size=.4, seed=42)
# x_min = X[:, 0].min() - .5
# x_max = X[:, 0].max() + .5
# y_min = X[:, 1].min() - .5
# y_max = X[:, 1].max() + .5
# xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
# np.arange(y_min, y_max, h))
# C = C_coef * 10 ** C_power
# gamma = gamma_coef * 10 ** gamma_power
# # Train SVM
# clf = SVC(
# C=C,
# kernel=kernel,
# degree=degree,
# gamma=gamma,
# shrinking=shrinking
# )
# clf.fit(X_train, y_train)
# # Plot the decision boundary. For that, we will assign a color to each
# # point in the mesh [x_min, x_max]x[y_min, y_max].
# if hasattr(clf, "decision_function"):
# Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
# else:
# Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# prediction_figure = serve_prediction_plot(
# model=clf,
# X_train=X_train,
# X_test=X_test,
# y_train=y_train,
# y_test=y_test,
# Z=Z,
# xx=xx,
# yy=yy,
# mesh_step=h,
# threshold=threshold
# )
# roc_figure = serve_roc_curve(
# model=clf,
# X_test=X_test,
# y_test=y_test
# )
# confusion_figure = serve_pie_confusion_matrix(
# model=clf,
# X_test=X_test,
# y_test=y_test,
# Z=Z,
# threshold=threshold
# )
# print(
# f"Total Time Taken: {time.time() - t_start:.3f} sec")
# return [
# html.Div(
# className='three columns',
# style={
# 'min-width': '24.5%',
# 'height': 'calc(100vh - 90px)',
# 'margin-top': '5px',
# # Remove possibility to select the text for better UX
# 'user-select': 'none',
# '-moz-user-select': 'none',
# '-webkit-user-select': 'none',
# '-ms-user-select': 'none'
# },
# children=[
# dcc.Graph(
# id='graph-line-roc-curve',
# style={'height': '40%'},
# figure=roc_figure
# ),
# dcc.Graph(
# id='graph-pie-confusion-matrix',
# figure=confusion_figure,
# style={'height': '60%'}
# )
# ]),
# html.Div(
# className='six columns',
# style={'margin-top': '5px'},
# children=[
# dcc.Graph(
# id='graph-sklearn-svm',
# figure=prediction_figure,
# style={'height': 'calc(100vh - 90px)'}
# )
# ])
# ]
# Running the server
if __name__ == '__main__':
app.run_server(debug=True)
# %%
| 37.862222
| 145
| 0.435204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9,226
| 0.541495
|
cbeb6bdd865a57de9bfabcbd439111e0ae5d40b5
| 1,080
|
py
|
Python
|
bot.py
|
m2Link/YouTube-Video-Search
|
0512ea220af271dc1853925026f31c32990fa4ff
|
[
"MIT"
] | 9
|
2021-09-30T06:25:03.000Z
|
2022-02-10T05:45:23.000Z
|
bot.py
|
m2Link/YouTube-Video-Search
|
0512ea220af271dc1853925026f31c32990fa4ff
|
[
"MIT"
] | null | null | null |
bot.py
|
m2Link/YouTube-Video-Search
|
0512ea220af271dc1853925026f31c32990fa4ff
|
[
"MIT"
] | 7
|
2021-09-30T06:24:56.000Z
|
2022-02-10T04:52:10.000Z
|
from pyrogram import Client ,filters
import os
from py_youtube import Data, Search
from pyrogram.types import *
TOKEN = os.environ.get("TOKEN", "")
APP_ID = int(os.environ.get("APP_ID", ""))
API_HASH = os.environ.get("API_HASH", "")
app = Client( "yt-search",
bot_token = TOKEN, api_id =API_ID , api_hash = API_HASH)
@Client.on_message(filters.private & filters.command(["start"]))
async def start(client,message):
await message.reply_text("Helo iam Youtube Video Search\nUse in inline mode")
@app.on_inline_query()
async def search_video(client,query):
search = []
result = Search(query.query.strip()).videos()
for i in result:
try:
title = i["title"]
id = i["id"]
thumb = i["thumb"][0]
data = i["simple_data"]
except:
pass
try:
search.append(
InlineQueryResultPhoto(
title=title,
description=data,
caption="https://youtu.be/"+id,
photo_url=thumb))
except:
pass
await query.answer(search)
app.run()
| 22.5
| 78
| 0.609259
| 0
| 0
| 0
| 0
| 726
| 0.672222
| 638
| 0.590741
| 150
| 0.138889
|
cbebd1e68288c77af1b90def0eca795dc3029718
| 7,582
|
py
|
Python
|
steam_review_sentiments/cnn_model.py
|
joshuamegnauth54/aapor_scholars_2021
|
1848083203714d2c0a205e538e91990983b3320e
|
[
"MIT"
] | null | null | null |
steam_review_sentiments/cnn_model.py
|
joshuamegnauth54/aapor_scholars_2021
|
1848083203714d2c0a205e538e91990983b3320e
|
[
"MIT"
] | 1
|
2021-11-03T15:33:31.000Z
|
2021-11-03T15:33:31.000Z
|
steam_review_sentiments/cnn_model.py
|
joshuamegnauth54/steam_user_reviews
|
1848083203714d2c0a205e538e91990983b3320e
|
[
"MIT"
] | null | null | null |
import numpy as np
import keras
import spacy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.exceptions import NotFittedError
from keras.models import Sequential
from keras.layers import BatchNormalization, Conv1D, Dense, Embedding
from keras.layers.pooling import GlobalMaxPooling1D
from keras.initializers import Constant
from utilities import null_preproc, transform_string,\
transform_all, tokenize_all
# This class is badly designed. I wanted to leverage spaCy, but I combined
# tools in a very poor way...
class PadCounts:
def __init__(self, nlp, pad_length=None):
"""Instantiate PadCounts.
Parameters
----------
nlp : spacy.lang.en.English
Trained spaCy language object.
pad_length : int, optional
Set a predefined length to pad data in transform(). Calculated
from X_train during fit() if None.
Returns
-------
None.
"""
# Language object for embeddings.
self.__nlp = nlp
# Word embeddings array.
self.__embeddings = None
# Sklearn model for word counts.
self.__vectorizer = None
# Vocabulary size based on X_train.
self.__vocab_size = None
# Length of the pre-trained word embeddings vector (300 most likely)
self.__vec_size = None
# Max length of a training document (or a predefined max for padding)
self.__pad_length = pad_length
def __to_docs(self, X):
# Convert X to a list of Doc if necessary
if isinstance(X[0], str):
return np.array([self.__nlp(text) for text in X])
else:
return X
def fit(self, X_train):
"""Fit PadCounts on X_train and transform into embeddings.
Parameters
----------
X_train : np.ndarray[spacy.tokens.Doc or str]
Array of spaCy Docs or strings (training).
Raises
------
ValueError
Raised if X_train isn't an array of spaCy Docs.
Returns
-------
None.
"""
if not isinstance(X_train, (np.ndarray, list)) or not len(X_train):
raise ValueError("X_train needs to be an array of strs or Docs.")
# Make sure X_train are Docs.
X_train = self.__to_docs(X_train)
# CountVectorizer counts each word/token, so I can use it to extract
# ONLY the vectors present in my data from spaCy's pretrained
# embeddings.
self.__vectorizer = CountVectorizer(strip_accents="unicode",
preprocessor=null_preproc,
tokenizer=transform_string,
token_pattern=None).fit(X_train)
# The vocabulary size only consists of the terms that appear after
# vectorizing. This is our first dimension.
# 0 will be used as an indicator for missing words, so let's shift the
# vocab by elements + 1.
self.__vocab_size = len(self.__vectorizer.get_feature_names()) + 1
# Word vectors length (second dimension).
self.__vec_size = self.__nlp.vocab.vectors_length
# Remove stop words, et cetera.
# And yeah, due to bad design I'm calling transform_string a lot.
X_transformed = transform_all(X_train)
if not self.__pad_length:
self.__pad_length = len(max(X_transformed, key=len))
def embeddings(self):
"""Return subsetted embeddings for X_train.
The returned vectors are a subset of the spaCy language object's
vectors that only include words present in X_train.
PadCounts should be fit() before calling embeddings().
Raises
------
NotFittedError
Raised if PadCounts() is unfit.
Returns
-------
embeddings : np.ndarray[np.float32]
Subsetted word embeddings.
"""
if self.__embeddings:
return self.__embeddings
elif not self.__vectorizer:
raise NotFittedError("Call PadCounts.fit() first.")
# Initialize a zero length ndarray with the vocab and vector sizes.
self.__embeddings = np.zeros((self.__vocab_size, self.__vec_size),
dtype=np.float32)
# CountVectorizer.vocabulary_ is a dictionary matching word to index.
# Thus:
# index = vectorizer.vocabulary_["meow"]
# value = vectorizer.get_feature_names()[index]
# value == "meow"
for word, i in self.__vectorizer.vocabulary_.items():
# Can't index with NumPy strings.
# Also, shift the embeddings by 1.
self.__embeddings[i + 1] = self.__nlp.vocab[str(word)].vector
def transform(self, X, remove_junk=True):
"""Return tokenized X.
Parameters
----------
X : np.ndarray[Doc or str]
Array of Docs or str to tokenize.
remove_junk : bool, optional
Whether X needs to be transformed to remove stop words.
The default is True.
Raises
------
NotFittedError
DESCRIPTION.
ValueError
DESCRIPTION.
Returns
-------
X_tokens : np.ndarray[np.int32]
Word embeddings for X.
"""
if not self.__vectorizer or not self.__pad_length:
raise NotFittedError("Call PadCounts.fit() first.")
if not isinstance(X, (np.ndarray, list)) or not len(X):
raise ValueError("X_train needs to be an array of strs or Docs.")
# Make sure X is a list of Docs
X = self.__to_docs(X)
# Remove stop words et cetera if necessary.
if remove_junk:
X = transform_all(X)
# Tokenize the training and test sets. 0 is the magic NaN value.
return tokenize_all(X,
self.__vectorizer,
0,
True,
self.__pad_length)
def cnn_model(embeddings, max_length, ngrams=3, dropout_prob=.4):
# Base model. Convert to class later(?!?).
model = Sequential(name="cnn_steam_reviews_model")
# Embedding layer to use our pretrained vectors.
# https://keras.io/examples/nlp/pretrained_word_embeddings/
model.add(Embedding(embeddings.shape[0],
embeddings.shape[1],
embeddings_initializer=Constant(embeddings),
# mask_zero=True,
input_length=max_length,
trainable=False))
# One dimension convulution layer
model.add(Conv1D(max_length,
ngrams,
padding="same"))
# Normalize inputs.
model.add(BatchNormalization())
# Max pooling
model.add(GlobalMaxPooling1D())
# Non-linearity and weight optimization
model.add(Dense(128, activation="relu"))
# Output
model.add(BatchNormalization())
model.add(Dense(1, activation="sigmoid"))
# Compile and return
model.compile("adam",
"binary_crossentropy",
["accuracy"])
return model
def model_def_fit(model, X_train, y_train, epochs):
return model.fit(X_train,
y_train,
batch_size=128,
epochs=epochs,
workers=6,
use_multiprocessing=True,
validation_split=.25)
| 33.254386
| 78
| 0.587048
| 5,565
| 0.733975
| 0
| 0
| 0
| 0
| 0
| 0
| 3,636
| 0.479557
|
cbec06c90522fab416454e28ed3f8f1ea15d10d0
| 96
|
py
|
Python
|
startup.py
|
andreagia/WEBNMR
|
512a8cc04cf69300796585feae722614501389a9
|
[
"Apache-2.0"
] | null | null | null |
startup.py
|
andreagia/WEBNMR
|
512a8cc04cf69300796585feae722614501389a9
|
[
"Apache-2.0"
] | null | null | null |
startup.py
|
andreagia/WEBNMR
|
512a8cc04cf69300796585feae722614501389a9
|
[
"Apache-2.0"
] | null | null | null |
from paste.deploy import loadapp
app = loadapp("config:/home/webenmr/WebENMR/development.ini")
| 24
| 61
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 46
| 0.479167
|
cbef0e085fbba4e6b5fa308476e408eed61f8acc
| 2,548
|
py
|
Python
|
dataset/components.py
|
mikhailkin/dataset
|
7417483fdbe2e3743af4d614cb9036fd5b1375c0
|
[
"Apache-2.0"
] | null | null | null |
dataset/components.py
|
mikhailkin/dataset
|
7417483fdbe2e3743af4d614cb9036fd5b1375c0
|
[
"Apache-2.0"
] | null | null | null |
dataset/components.py
|
mikhailkin/dataset
|
7417483fdbe2e3743af4d614cb9036fd5b1375c0
|
[
"Apache-2.0"
] | null | null | null |
""" Contains classes to handle batch data components """
class ComponentDescriptor:
""" Class for handling one component item """
def __init__(self, component, default=None):
self._component = component
self._default = default
def __get__(self, instance, cls):
try:
if instance.data is None:
out = self._default
elif instance.pos is None:
out = instance.data[self._component]
else:
pos = instance.pos[self._component]
data = instance.data[self._component]
out = data[pos] if data is not None else self._default
except IndexError:
out = self._default
return out
def __set__(self, instance, value):
if instance.pos is None:
new_data = list(instance.data) if instance.data is not None else []
new_data = new_data + [None for _ in range(max(len(instance.components) - len(new_data), 0))]
new_data[self._component] = value
instance.data = tuple(new_data)
else:
pos = instance.pos[self._component]
instance.data[self._component][pos] = value
class BaseComponentsTuple:
""" Base class for a component tuple """
components = None
def __init__(self, data=None, pos=None):
if isinstance(data, BaseComponentsTuple):
self.data = data.data
else:
self.data = data
if pos is not None and not isinstance(pos, list):
pos = [pos for _ in self.components]
self.pos = pos
def __str__(self):
s = ''
for comp in self.components:
d = getattr(self, comp)
s += comp + '\n' + str(d) + '\n'
return s
def as_tuple(self, components=None):
""" Return components data as a tuple """
components = tuple(components or self.components)
return tuple(getattr(self, comp) for comp in components)
class MetaComponentsTuple(type):
""" Class factory for a component tuple """
def __init__(cls, *args, **kwargs):
_ = kwargs
super().__init__(*args, (BaseComponentsTuple,), {})
def __new__(mcs, name, components):
comp_class = super().__new__(mcs, name, (BaseComponentsTuple,), {})
comp_class.components = components
for i, comp in enumerate(components):
setattr(comp_class, comp, ComponentDescriptor(i))
globals()[comp_class.__name__] = comp_class
return comp_class
| 34.432432
| 105
| 0.594192
| 2,482
| 0.974097
| 0
| 0
| 0
| 0
| 0
| 0
| 235
| 0.092229
|
cbefd7cba52260caad3d20e4693a2870bae5c60c
| 708
|
py
|
Python
|
app/models.py
|
owen-rpx/RainGod
|
ba20023c1191519edec7f12fb488c942a2e05627
|
[
"MIT"
] | 7
|
2019-04-11T09:45:37.000Z
|
2019-04-19T01:40:03.000Z
|
app/models.py
|
Owenzh/RainGod
|
ba20023c1191519edec7f12fb488c942a2e05627
|
[
"MIT"
] | 5
|
2021-03-18T23:43:45.000Z
|
2022-03-11T23:44:29.000Z
|
app/models.py
|
owen-rpx/RainGod
|
ba20023c1191519edec7f12fb488c942a2e05627
|
[
"MIT"
] | 2
|
2019-04-11T09:45:39.000Z
|
2019-04-19T01:39:02.000Z
|
#-*- coding:utf-8 -*-
from .apps import db
class User(db.Model):
__tablename__ = 'user'
user_id = db.Column(db.Integer, primary_key=True)
user_count = db.Column(db.String(100), unique=True)
user_name = db.Column(db.String(100), unique=True)
user_sex = db.Column(db.String(100))
user_pwd = db.Column(db.String(100))
user_mail = db.Column(db.String(100))
user_phone = db.Column(db.String(100))
user_addtime = db.Column(db.DateTime, index=True)
user_photo = db.Column(db.String(100))
user_ispass = db.Column(db.Integer)
def check_pwd(self,pwd):
from werkzeug.security import check_password_hash
return check_password_hash(self.user_pwd,pwd)
| 29.5
| 57
| 0.686441
| 660
| 0.932203
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.038136
|
cbf1db6303b75bf9bb5a1fdfc15b60807174510e
| 834
|
py
|
Python
|
fingerExercises/fingerExercises-03/03.6-finger.how-many.py
|
sodaPhix/MITx-6.00.1x
|
8629e227d250cf6c2d5ca56944668b5796ce78cf
|
[
"MIT"
] | 1
|
2019-10-06T22:58:39.000Z
|
2019-10-06T22:58:39.000Z
|
fingerExercises/fingerExercises-03/03.6-finger.how-many.py
|
sodaPhix/MITx-6.00.1x
|
8629e227d250cf6c2d5ca56944668b5796ce78cf
|
[
"MIT"
] | null | null | null |
fingerExercises/fingerExercises-03/03.6-finger.how-many.py
|
sodaPhix/MITx-6.00.1x
|
8629e227d250cf6c2d5ca56944668b5796ce78cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 7 03:26:16 2019
@author: sodatab
MITx: 6.00.1x
"""
"""
03.6-Finger How Many
---------------------
Consider the following sequence of expressions:
animals = { 'a': ['aardvark'], 'b': ['baboon'], 'c': ['coati']}
animals['d'] = ['donkey']
animals['d'].append('dog')
animals['d'].append('dingo')
We want to write some simple procedures that work on dictionaries to return information.
First, write a procedure, called how_many, which returns the sum of the number of values associated with a dictionary.
"""
"""Answer Script:"""
def how_many(aDict):
'''
aDict: A dictionary, where all the values are lists.
returns: int, how many values are in the dictionary.
'''
sum = 0
for i in aDict.values():
sum += len(i)
return sum
| 22.540541
| 118
| 0.631894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 723
| 0.866906
|
cbf29fc594fa3d410506fc9b2b10ddf99a2f2899
| 1,569
|
py
|
Python
|
test/const.py
|
DaniFdezAlvarez/shexerp3
|
80c3bdaac856a88d53359f5996477994774d34e2
|
[
"Apache-2.0"
] | 3
|
2019-06-24T18:13:06.000Z
|
2020-08-06T03:08:23.000Z
|
test/const.py
|
DaniFdezAlvarez/shexerp3
|
80c3bdaac856a88d53359f5996477994774d34e2
|
[
"Apache-2.0"
] | 109
|
2019-05-22T11:53:05.000Z
|
2021-03-15T11:09:18.000Z
|
test/const.py
|
DaniFdezAlvarez/shexerp3
|
80c3bdaac856a88d53359f5996477994774d34e2
|
[
"Apache-2.0"
] | 2
|
2019-10-23T13:06:31.000Z
|
2020-07-31T09:59:15.000Z
|
BASE_FILES = "C:\\Users\\Dani\\repos-git\\shexerp3\\test\\t_files\\"
BASE_FILES_GENERAL = BASE_FILES + "general\\"
G1 = BASE_FILES + "t_graph_1.ttl"
G1_NT = BASE_FILES + "t_graph_1.nt"
G1_TSVO_SPO = BASE_FILES + "t_graph_1.tsv"
G1_JSON_LD = BASE_FILES + "t_graph_1.json"
G1_XML = BASE_FILES + "t_graph_1.xml"
G1_N3 = BASE_FILES + "t_graph_1.n3"
G1_ALL_CLASSES_NO_COMMENTS = BASE_FILES_GENERAL + "g1_all_classes_no_comments.shex"
# PREFIX xml: <http://www.w3.org/XML/1998/namespace/>
# PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
# PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
# PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
# PREFIX foaf: <http://xmlns.com/foaf/0.1/>
# NAMESPACES_WITH_FOAF_AND_EX = {"http://example.org/" : "ex",
# "http://www.w3.org/XML/1998/namespace/" : "xml",
# "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
# "http://www.w3.org/2000/01/rdf-schema#" : "rdfs",
# "http://www.w3.org/2001/XMLSchema#": "xsd",
# "http://xmlns.com/foaf/0.1/": "foaf"
# }
def default_namespaces():
return {"http://example.org/": "ex",
"http://www.w3.org/XML/1998/namespace/": "xml",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://www.w3.org/2000/01/rdf-schema#": "rdfs",
"http://www.w3.org/2001/XMLSchema#": "xsd",
"http://xmlns.com/foaf/0.1/": "foaf"
}
| 42.405405
| 86
| 0.560867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,174
| 0.748247
|
cbf2a3881275e0a82374f52818602abe974fb113
| 23,265
|
py
|
Python
|
src/lookoutequipment/evaluation.py
|
dast1/amazon-lookout-for-equipment-python-sdk
|
37213819c46b2dd3bcd4844235bececeabca8f12
|
[
"Apache-2.0"
] | 3
|
2021-09-28T19:53:53.000Z
|
2022-02-14T17:50:59.000Z
|
src/lookoutequipment/evaluation.py
|
dast1/amazon-lookout-for-equipment-python-sdk
|
37213819c46b2dd3bcd4844235bececeabca8f12
|
[
"Apache-2.0"
] | null | null | null |
src/lookoutequipment/evaluation.py
|
dast1/amazon-lookout-for-equipment-python-sdk
|
37213819c46b2dd3bcd4844235bececeabca8f12
|
[
"Apache-2.0"
] | 1
|
2021-11-11T18:15:14.000Z
|
2021-11-11T18:15:14.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import gridspec
from scipy.stats import wasserstein_distance
from tqdm import tqdm
class LookoutEquipmentAnalysis:
"""
A class to manage Lookout for Equipment result analysis
Attributes:
model_name (string): the name of the Lookout for Equipment trained model
predicted_ranges (pandas.DataFrame): a Pandas dataframe with the
predicted anomaly ranges listed in chronological order with a Start
and End columns
labelled_ranges (pandas.DataFrame): A Pandas dataframe with the labelled
anomaly ranges listed in chronological order with a Start and End
columns
df_list (list of pandas.DataFrame): A list with each time series into a
dataframe
"""
def __init__(self, model_name, tags_df):
"""
Create a new analysis for a Lookout for Equipment model.
Parameters:
model_name (string):
The name of the Lookout for Equipment trained model
tags_df (pandas.DataFrame):
A dataframe containing all the signals, indexed by time
region_name (string):
Name of the AWS region from where the service is called.
"""
self.client = boto3.client('lookoutequipment')
self.model_name = model_name
self.predicted_ranges = None
self.labelled_ranges = None
self.ts_normal_training = None
self.ts_label_evaluation = None
self.ts_known_anomalies = None
self.df_list = dict()
for signal in tags_df.columns:
self.df_list.update({signal: tags_df[[signal]]})
model_description = self.client.describe_model(ModelName=self.model_name)
if model_description['Status'] == 'FAILED':
raise Exception('Model training failed, nothing to analyze.')
# Extracting time ranges used at training time:
self.training_start = pd.to_datetime(
model_description['TrainingDataStartTime'].replace(tzinfo=None)
)
self.training_end = pd.to_datetime(
model_description['TrainingDataEndTime'].replace(tzinfo=None)
)
self.evaluation_start = pd.to_datetime(
model_description['EvaluationDataStartTime'].replace(tzinfo=None)
)
self.evaluation_end = pd.to_datetime(
model_description['EvaluationDataEndTime'].replace(tzinfo=None)
)
def _load_model_response(self):
"""
Use the trained model description to extract labelled and predicted
ranges of anomalies. This method will extract them from the
DescribeModel API from Lookout for Equipment and store them in the
labelled_ranges and predicted_ranges properties.
"""
describe_model_response = self.client.describe_model(
ModelName=self.model_name
)
if self.labelled_ranges is None:
self.labelled_ranges = eval(
describe_model_response['ModelMetrics']
)['labeled_ranges']
if len(self.labelled_ranges) > 0:
self.labelled_ranges = pd.DataFrame(self.labelled_ranges)
self.labelled_ranges['start'] = pd.to_datetime(self.labelled_ranges['start'])
self.labelled_ranges['end'] = pd.to_datetime(self.labelled_ranges['end'])
else:
self.labelled_ranges = pd.DataFrame(columns=['start', 'end'])
self.predicted_ranges = eval(
describe_model_response['ModelMetrics']
)['predicted_ranges']
if len(self.predicted_ranges) > 0:
self.predicted_ranges = pd.DataFrame(self.predicted_ranges)
self.predicted_ranges['start'] = pd.to_datetime(self.predicted_ranges['start'])
self.predicted_ranges['end'] = pd.to_datetime(self.predicted_ranges['end'])
else:
self.predicted_ranges = pd.DataFrame(columns=['start', 'end'])
def set_time_periods(
self,
evaluation_start,
evaluation_end,
training_start,
training_end
):
"""
Set the time period of analysis
Parameters:
evaluation_start (datetime):
Start of the evaluation period
evaluation_end (datetime):
End of the evaluation period
training_start (datetime):
Start of the training period
training_end (datetime):
End of the training period
"""
self.evaluation_start = evaluation_start
self.evaluation_end = evaluation_end
self.training_start = training_start
self.training_end = training_end
def get_predictions(self):
"""
Get the anomaly ranges predicted by the current model
Returns:
pandas.DataFrame:
A Pandas dataframe with the predicted anomaly ranges listed in
chronological order with a Start and End columns
"""
if self.predicted_ranges is None:
self._load_model_response()
return self.predicted_ranges
def get_labels(self, labels_fname=None):
"""
Get the labelled ranges as provided to the model before training
Parameters:
labels_fname (string):
As an option, if you provide a path to a CSV file containing
the label ranges, this method will use this file to load the
labels. If this argument is not provided, it will load the
labels from the trained model Describe API (Default to None)
Returns:
pandas.DataFrame:
A Pandas dataframe with the labelled anomaly ranges listed in
chronological order with a Start and End columns
"""
if labels_fname is not None:
labels_df = pd.read_csv(labels_fname, header=None)
labels_df[0] = pd.to_datetime(labels_df[0])
labels_df[1] = pd.to_datetime(labels_df[1])
labels_df.columns = ['start', 'end']
self.labelled_ranges = labels_df
elif self.labelled_ranges is None:
self._load_model_response()
return self.labelled_ranges
def _get_time_ranges(self):
"""
Extract DateTimeIndex with normal values and anomalies from the
predictions generated by the model.
Returns:
pandas.DateTimeIndex:
Timestamp index for all the values marked as normal during the
training period
pandas.DateTimeIndex:
Timestamp index for all the values predicted as anomalies by
the model during the evaluation period
"""
# Extract the first time series
tag = list(self.df_list.keys())[0]
tag_df = self.df_list[tag]
# Initialize the predictions dataframe:
predictions_df = pd.DataFrame(columns=['Prediction'], index=tag_df.index)
predictions_df['Prediction'] = 0
# Loops through the predicted and labelled anomalies
# ranges and set these predictions to 1 (predicted)
# or 2 (initially known):
for index, row in self.predicted_ranges.iterrows():
predictions_df.loc[row['start']:row['end'], 'Prediction'] = 1
for index, row in self.labelled_ranges.iterrows():
predictions_df.loc[row['start']:row['end'], 'Prediction'] = 2
# Limits the analysis range to the evaluation period:
predictions_df = predictions_df[self.training_start:self.evaluation_end]
# Build a DateTimeIndex for normal values and anomalies:
index_normal = predictions_df[predictions_df['Prediction'] == 0].index
index_anomaly = predictions_df[predictions_df['Prediction'] == 1].index
index_known = predictions_df[predictions_df['Prediction'] == 2].index
return index_normal, index_anomaly, index_known
def compute_histograms(
self,
index_normal=None,
index_anomaly=None,
num_bins=20
):
"""
This method loops through each signal and computes two distributions of
the values in the time series: one for all the anomalies found in the
evaluation period and another one with all the normal values found in the
same period. It then computes the Wasserstein distance between these two
histograms and then rank every signals based on this distance. The higher
the distance, the more different a signal is when comparing anomalous
and normal periods. This can orient the investigation of a subject
matter expert towards the sensors and associated components.
Parameters:
index_normal (pandas.DateTimeIndex):
All the normal indices
index_anomaly (pandas.DateTimeIndex):
All the indices for anomalies
num_bins (integer):
Number of bins to use to build the distributions (default: 20)
"""
if (index_normal is None) or (index_anomaly is None):
index_lists = self._get_time_ranges()
self.ts_normal_training = index_lists[0]
self.ts_label_evaluation = index_lists[1]
self.ts_known_anomalies = index_lists[2]
self.num_bins = num_bins
# Now we loop on each signal to compute a
# histogram of each of them in this anomaly range,
# compte another one in the normal range and
# compute a distance between these:
rank = dict()
for tag, current_tag_df in tqdm(
self.df_list.items(),
desc='Computing distributions'
):
try:
# Get the values for the whole signal, parts
# marked as anomalies and normal part:
current_signal_values = current_tag_df[tag]
current_signal_evaluation = current_tag_df.loc[self.ts_label_evaluation, tag]
current_signal_training = current_tag_df.loc[self.ts_normal_training, tag]
# Let's compute a bin width based on the whole range of possible
# values for this signal (across the normal and anomalous periods).
# For both normalization and aesthetic reasons, we want the same
# number of bins across all signals:
bin_width = (np.max(current_signal_values) - np.min(current_signal_values))/self.num_bins
bins = np.arange(
np.min(current_signal_values),
np.max(current_signal_values) + bin_width,
bin_width
)
# We now use the same bins arrangement for both parts of the signal:
u = np.histogram(
current_signal_training,
bins=bins,
density=True
)[0]
v = np.histogram(
current_signal_evaluation,
bins=bins,
density=True
)[0]
# Wasserstein distance is the earth mover distance: it can be
# used to compute a similarity between two distributions: this
# metric is only valid when the histograms are normalized (hence
# the density=True in the computation above):
d = wasserstein_distance(u, v)
rank.update({tag: d})
except Exception as e:
rank.update({tag: 0.0})
# Sort histograms by decreasing Wasserstein distance:
rank = {k: v for k, v in sorted(rank.items(), key=lambda rank: rank[1], reverse=True)}
self.rank = rank
def plot_histograms_v2(self, custom_ranking, nb_cols=3, max_plots=12, num_bins=20):
index_lists = self._get_time_ranges()
self.ts_normal_training = index_lists[0]
self.ts_label_evaluation = index_lists[1]
self.ts_known_anomalies = index_lists[2]
self.num_bins = num_bins
# Prepare the figure:
nb_rows = len(self.df_list.keys()) // nb_cols + 1
plt.style.use('Solarize_Light2')
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
fig = plt.figure(figsize=(16, int(nb_rows * 3)))
gs = gridspec.GridSpec(nb_rows, nb_cols, hspace=0.5, wspace=0.25)
axes = []
for i in range(max_plots):
axes.append(fig.add_subplot(gs[i]))
# Loops through each signal by decreasing distance order:
i = 0
for tag, current_rank in tqdm(
custom_ranking.items(),
total=max_plots,
desc='Preparing histograms'
):
# We stop after reaching the number of plots we are interested in:
if i > max_plots - 1:
break
try:
# Get the anomaly and the normal values from the current signal:
current_signal_values = self.df_list[tag][tag]
current_signal_evaluation = self.df_list[tag].loc[self.ts_label_evaluation, tag]
current_signal_training = self.df_list[tag].loc[self.ts_normal_training, tag]
# Compute the bin width and bin edges to match the
# number of bins we want to have on each histogram:
bin_width =(np.max(current_signal_values) - np.min(current_signal_values))/self.num_bins
bins = np.arange(
np.min(current_signal_values),
np.max(current_signal_values) + bin_width,
bin_width
)
# Add both histograms in the same plot:
axes[i].hist(current_signal_training,
density=True,
alpha=0.5,
color=colors[1],
bins=bins,
edgecolor='#FFFFFF')
axes[i].hist(current_signal_evaluation,
alpha=0.5,
density=True,
color=colors[5],
bins=bins,
edgecolor='#FFFFFF')
except Exception as e:
print(e)
axes[i] = plt.subplot(gs[i])
# Removes all the decoration to leave only the histograms:
axes[i].grid(False)
axes[i].get_yaxis().set_visible(False)
axes[i].get_xaxis().set_visible(False)
# Title will be the tag name followed by the score:
title = tag
title += f' (score: {current_rank:.02f}%)'
axes[i].set_title(title, fontsize=10)
i+= 1
return fig, axes
def plot_histograms(self, nb_cols=3, max_plots=12):
"""
Once the histograms are computed, we can plot the top N by decreasing
ranking distance. By default, this will plot the histograms for the top
12 signals, with 3 plots per line.
Parameters:
nb_cols (integer):
Number of plots to assemble on a given row (default: 3)
max_plots (integer):
Number of signal to consider (default: 12)
Returns:
tuple: tuple containing:
* A ``matplotlib.pyplot.figure`` where the plots are drawn
* A ``list of matplotlib.pyplot.Axis`` with each plot drawn here
"""
# Prepare the figure:
nb_rows = len(self.df_list.keys()) // nb_cols + 1
plt.style.use('Solarize_Light2')
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
fig = plt.figure(figsize=(16, int(nb_rows * 3)))
gs = gridspec.GridSpec(nb_rows, nb_cols, hspace=0.5, wspace=0.25)
axes = []
for i in range(max_plots):
axes.append(fig.add_subplot(gs[i]))
# Loops through each signal by decreasing distance order:
i = 0
for tag, current_rank in tqdm(
self.rank.items(),
total=max_plots,
desc='Preparing histograms'
):
# We stop after reaching the number of plots we are interested in:
if i > max_plots - 1:
break
try:
# Get the anomaly and the normal values from the current signal:
current_signal_values = self.df_list[tag][tag]
current_signal_evaluation = self.df_list[tag].loc[self.ts_label_evaluation, tag]
current_signal_training = self.df_list[tag].loc[self.ts_normal_training, tag]
# Compute the bin width and bin edges to match the
# number of bins we want to have on each histogram:
bin_width =(np.max(current_signal_values) - np.min(current_signal_values))/self.num_bins
bins = np.arange(
np.min(current_signal_values),
np.max(current_signal_values) + bin_width,
bin_width
)
# Add both histograms in the same plot:
axes[i].hist(current_signal_training,
density=True,
alpha=0.5,
color=colors[1],
bins=bins,
edgecolor='#FFFFFF')
axes[i].hist(current_signal_evaluation,
alpha=0.5,
density=True,
color=colors[5],
bins=bins,
edgecolor='#FFFFFF')
except Exception as e:
print(e)
axes[i] = plt.subplot(gs[i])
# Removes all the decoration to leave only the histograms:
axes[i].grid(False)
axes[i].get_yaxis().set_visible(False)
axes[i].get_xaxis().set_visible(False)
# Title will be the tag name followed by the score:
title = tag
title += f' (score: {current_rank:.02f})'
axes[i].set_title(title, fontsize=10)
i+= 1
return fig, axes
def plot_signals(self, nb_cols=3, max_plots=12):
"""
Once the histograms are computed, we can plot the top N signals by
decreasing ranking distance. By default, this will plot the signals for
the top 12 signals, with 3 plots per line. For each signal, this method
will plot the normal values in green and the anomalies in red.
Parameters:
nb_cols (integer):
Number of plots to assemble on a given row (default: 3)
max_plots (integer):
Number of signal to consider (default: 12)
Returns:
tuple: tuple containing:
* A ``matplotlib.pyplot.figure`` where the plots are drawn
* A ``list of matplotlib.pyplot.Axis`` with each plot drawn here
"""
# Prepare the figure:
nb_rows = max_plots // nb_cols + 1
plt.style.use('Solarize_Light2')
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
fig = plt.figure(figsize=(28, int(nb_rows * 4)))
gs = gridspec.GridSpec(nb_rows, nb_cols, hspace=0.5, wspace=0.25)
axes = []
for i in range(max_plots):
axes.append(fig.add_subplot(gs[i]))
# Loops through each signal by decreasing distance order:
i = 0
for tag, current_rank in self.rank.items():
# We stop after reaching the number of plots we are interested in:
if i > max_plots - 1:
break
# Get the anomaly and the normal values from the current signal:
current_signal_evaluation = self.df_list[tag].loc[self.ts_label_evaluation, tag]
current_signal_training = self.df_list[tag].loc[self.ts_normal_training, tag]
current_signal_known = self.df_list[tag].loc[self.ts_known_anomalies, tag]
# Plot both time series with a line plot
# axes.append(plt.subplot(gs[i]))
axes[i].plot(current_signal_training,
linewidth=0.5,
alpha=0.8,
color=colors[1])
axes[i].plot(current_signal_evaluation,
linewidth=0.5,
alpha=0.8,
color=colors[5])
axes[i].plot(current_signal_known,
linewidth=0.5,
alpha=0.8,
color='#AAAAAA')
# Title will be the tag name followed by the score:
title = tag
title += f' (score: {current_rank:.02f})'
axes[i].set_title(title, fontsize=10)
start = min(
min(self.ts_label_evaluation),
min(self.ts_normal_training),
min(self.ts_known_anomalies)
)
end = max(
max(self.ts_label_evaluation),
max(self.ts_normal_training),
max(self.ts_known_anomalies)
)
axes[i].set_xlim(start, end)
i += 1
return fig, axes
def get_ranked_list(self, max_signals=12):
"""
Returns the list of signals with computed rank.
Parameters:
max_signals (integer)
Number of signals to consider (default: 12)
Returns:
pandas.DataFrame:
A dataframe with each signal and the associated rank value
"""
significant_signals_df = pd.DataFrame(list(self.rank.items())[:max_signals])
significant_signals_df.columns = ['Tag', 'Rank']
return significant_signals_df
| 40.744308
| 105
| 0.571545
| 22,441
| 0.964582
| 0
| 0
| 0
| 0
| 0
| 0
| 9,818
| 0.422007
|
cbf2dc049f1ccb6bed778490aa18d0ea3f007439
| 793
|
py
|
Python
|
src/opserver/plugins/alarm_process_connectivity/setup.py
|
biswajit-mandal/contrail-controller
|
80c4a7e8515f7296b18ba4c21a439bd3daefcc4a
|
[
"Apache-2.0"
] | 3
|
2019-01-11T06:16:40.000Z
|
2021-02-24T23:48:21.000Z
|
src/opserver/plugins/alarm_process_connectivity/setup.py
|
biswajit-mandal/contrail-controller
|
80c4a7e8515f7296b18ba4c21a439bd3daefcc4a
|
[
"Apache-2.0"
] | null | null | null |
src/opserver/plugins/alarm_process_connectivity/setup.py
|
biswajit-mandal/contrail-controller
|
80c4a7e8515f7296b18ba4c21a439bd3daefcc4a
|
[
"Apache-2.0"
] | 18
|
2017-01-12T09:28:44.000Z
|
2019-04-18T20:47:42.000Z
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from setuptools import setup, find_packages
setup(
name='alarm_process_connectivity',
version='0.1dev',
packages=find_packages(),
entry_points = {
'contrail.analytics.alarms': [
'ObjectCollectorInfo = alarm_process_connectivity.main:ProcessConnectivity',
'ObjectVRouter = alarm_process_connectivity.main:ProcessConnectivity',
'ObjectConfigNode = alarm_process_connectivity.main:ProcessConnectivity',
'ObjectBgpRouter = alarm_process_connectivity.main:ProcessConnectivity',
'ObjectDatabaseInfo = alarm_process_connectivity.main:ProcessConnectivity',
],
},
zip_safe=False,
long_description="ProcessConnectivity alarm"
)
| 34.478261
| 88
| 0.717528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 517
| 0.651955
|
cbf3c9c5fb96524cab1cc99ebc8311885fc1db2a
| 813
|
py
|
Python
|
app/tools/dbg_queue_graph.py
|
samelamin/kylinmonitorbot
|
00a38ca9513dceadf23bfdb19b6daab77a368e76
|
[
"MIT"
] | null | null | null |
app/tools/dbg_queue_graph.py
|
samelamin/kylinmonitorbot
|
00a38ca9513dceadf23bfdb19b6daab77a368e76
|
[
"MIT"
] | null | null | null |
app/tools/dbg_queue_graph.py
|
samelamin/kylinmonitorbot
|
00a38ca9513dceadf23bfdb19b6daab77a368e76
|
[
"MIT"
] | null | null | null |
import asyncio
import os
from localization import LocalizationManager
from services.dialog.queue_picture import queue_graph
from services.lib.config import Config
from services.lib.db import DB
from services.lib.depcont import DepContainer
async def q_points(d: DepContainer):
image = await queue_graph(d, d.loc_man.get_from_lang('rus'))
p = os.path.expanduser('~/sns_test.png')
with open(p, 'wb') as f:
f.write(image.getvalue())
os.system(f'open "{p}"')
async def stake_graph():
...
async def test_plots(d):
# await q_points(d)
await stake_graph()
if __name__ == '__main__':
d = DepContainer()
d.loc_man = LocalizationManager()
d.loop = asyncio.get_event_loop()
d.cfg = Config()
d.db = DB(d.loop)
d.loop.run_until_complete(test_plots(d))
| 22.583333
| 64
| 0.693727
| 0
| 0
| 0
| 0
| 0
| 0
| 346
| 0.425584
| 67
| 0.082411
|
cbf41ad432dd52c7fa96aae20328389d8a8a2749
| 8,482
|
py
|
Python
|
fenpei/job.py
|
mverleg/fenpei
|
2142e4fe4a1e2897d8d8185ef4b86adc7323e1eb
|
[
"BSD-3-Clause"
] | null | null | null |
fenpei/job.py
|
mverleg/fenpei
|
2142e4fe4a1e2897d8d8185ef4b86adc7323e1eb
|
[
"BSD-3-Clause"
] | 2
|
2016-11-17T12:10:36.000Z
|
2017-02-08T09:06:37.000Z
|
fenpei/job.py
|
mverleg/fenpei
|
2142e4fe4a1e2897d8d8185ef4b86adc7323e1eb
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Base class for fenpei job; this should be considered abstract.
Your custom job(s) should inherit from this job and extend the relevant methods, such as::
* is_prepared
* is_complete
* prepare
* start
* result
* summary
"""
from re import match
from sys import stdout
from bardeen.system import mkdirp
from time import time
from os import remove
from os.path import join, isdir
from shutil import rmtree
from .utils import CALC_DIR
class Job(object):
CRASHED, NONE, PREPARED, RUNNING, COMPLETED = -1, 0, 1, 2, 3
status_names = {-1: 'crashed', 0: 'nothing', 1: 'prepared', 2: 'running', 3: 'completed'}
queue = None
node = None
pid = None
status = None
""" Set a group_cls to report results together with another class (that has the same output format). """
group_cls = None
def __init__(self, name, weight=1, batch_name=None, force_node=None):
"""
Create a Job object.
:param name: unique name consisting of letters, numbers, dot (.) and underscore (_) **YOU need to make sure that name is unique (bijectively maps to job)**
:param weight: the relative resource use of this job (higher relative weights means fewer jobs will be scheduled together)
:param batch_name: optionally, a name of the same format as ``name``, which specifies the batch (will be grouped)
:param force_node: demand a specific node; it's up to the queue whether this is honoured
"""
assert match(r'^\w[/\w\.\+_-]*$', name), 'This is not a valid name: "{0:}"'.format(name)
assert weight > 0
self.name = name
self.weight = weight
self.cluster = None
self.batch_name = batch_name
self.force_node = force_node
if self.batch_name:
assert match('^\w[\w\._-]*$', batch_name)
self.directory = join(CALC_DIR, batch_name, name)
elif batch_name is None:
raise AssertionError('no batch name for {0:}; provide batch_name argument when creating jobs or set it to False'.format(self))
elif batch_name is False:
self.directory = join(CALC_DIR, name)
self.status = self.NONE
def __repr__(self):
if hasattr(self, 'name'):
return self.name
return '{0:s} id{1:}'.format(self.__class__.__name__, id(self))
def _log(self, txt, *args, **kwargs):
"""
Logging function.
.queue is not always set, so have own logging function.
"""
if self.queue is None:
if len(txt.strip()):
stdout.write('(no queue) ' + txt + '\n')
else:
stdout.write('(empty)\n')
else:
self.queue._log(txt, *args, **kwargs)
def save(self):
"""
Save information about a running job to locate the process.
"""
assert self.node is not None
assert self.pid is not None
with open('%s/node_pid.job' % self.directory, 'w+') as fh:
fh.write('%s\n%s\n%s\n%s' % (self.name, self.node, self.pid, str(time())))
self._log('job %s saved' % self, level=3)
def unsave(self):
"""
Remove the stored process details.
"""
try:
remove('%s/node_pid.job' % self.directory)
except IOError:
pass
self._log('job %s save file removed' % self.name, level=3)
def load(self):
"""
Load process details from cache.
"""
try:
with open('%s/node_pid.job' % self.directory, 'r') as fh:
lines = fh.read().splitlines()
self.node = lines[1]
self.pid = int(lines[2])
self._log('job %s loaded' % self.name, level=3)
return True
except IOError:
self._log('job %s save file not found' % self, level=3)
return False
def is_prepared(self):
pass
def is_started(self):
if not self.is_prepared():
return False
l = self.load()
return l
def is_running(self):
"""
Only called if at least prepared.
"""
if self.pid is None:
if not self.load():
return False
if not self.queue:
raise Exception('cannot check if %s is running because it is not in a queue' % self)
proc_list = self.queue.processes(self.node)
try:
return self.pid in [proc['pid'] for proc in proc_list if proc is not None]
except KeyError:
raise Exception('node %s for job %s no longer found?' % (self.node, self))
def is_complete(self):
"""
Check if job completed successfully.
Needs to be extended by child class.
Only called for jobs that are at least prepared.
"""
return True
def find_status(self):
"""
Find status using is_* methods.
"""
def check_status_indicators(self):
if self.is_prepared():
if self.is_complete():
return self.COMPLETED
elif self.is_started():
if self.is_running():
return self.RUNNING
return self.CRASHED
return self.PREPARED
return self.NONE
if time() - getattr(self, '_last_status_time', time() - 100) > 0.7:
self.status = check_status_indicators(self)
setattr(self, '_last_status_time', time())
return self.status
def status_str(self):
return self.status_names[self.find_status()]
def prepare(self, silent=False, *args, **kwargs):
"""
Prepares the job for execution.
More steps are likely necessary for child classes.
"""
self.status = self.PREPARED
if not self.is_prepared():
if self.batch_name:
mkdirp(join(CALC_DIR, self.batch_name))
mkdirp(self.directory)
if not silent:
self._log('preparing {0:s}'.format(self), level=2)
""" child method add more steps here """
def _start_pre(self, *args, **kwargs):
"""
Some checks at the beginning of .start().
"""
if self.is_running() or self.is_complete():
if not self.queue is None:
if self.queue.force:
if self.is_running():
self.kill()
else:
raise AssertionError(('you are trying to restart a job that is running '
'or completed ({0:} run={1:} complete={2:}); use restart (-e) to '
'skip such jobs or -f to overrule this warning').format(
self, self.is_running(), self.is_complete()))
if not self.is_prepared():
self.prepare(silent=True)
def _start_post(self, node, pid, *args, **kwargs):
"""
Some bookkeeping at the end of .start().
"""
self.node = node
self.pid = pid
self.save()
if self.is_running():
self.STATUS = self.RUNNING
self._log('starting %s on %s with pid %s' % (self, self.node, self.pid), level=2)
def start(self, node, *args, **kwargs):
"""
Start the job and store node/pid.
"""
self._start_pre(*args, **kwargs)
"""
Your starting code here.
"""
self._start_post(node, 'pid_here', *args, **kwargs)
return True
def fix(self, *args, **kwargs):
"""
Some code that can be ran to fix jobs, e.g. after bugfixes or updates.
Needs to be implemented by children for the specific fix applicable (if just restarting is not viable).
"""
return False
def kill(self, *args, **kwargs):
"""
Kills the current job if running using queue methods.
Any overriding should probably happen in :ref: queue.processes and :ref: queue.stop_job.
"""
if self.is_running():
assert self.node is not None
assert self.pid is not None
self._log('killing %s: %s on %s' % (self, self.pid, self.node), level=2)
self.queue.stop_job(node = self.node, pid = self.pid)
return True
self._log('job %s not running' % self, level=3)
return False
def cleanup(self, skip_conflicts=False, *args, **kwargs):
if self.is_running() or self.is_complete():
if self.queue is not None and not self.queue.force:
if skip_conflicts:
return False
raise AssertionError(('you are trying to clean up a job ({0:s}; run={1:} complete={2:}) '
'that is running or completed; use -f to force this, or -e to skip these jobs (it '
'could also mean that two jobs are use the same name and batchname).').format(
self.name, self.is_running(), self.is_complete()))
if self.batch_name is not False and isdir(self.directory):
rmtree(self.directory, ignore_errors = True)
self._log('cleaned up {0:s}'.format(self), level=2)
return True
return False
def result(self, *args, **kwargs):
"""
Collects the result of the completed job.
:return: result of the job; only requirement is that the result be compatible with :ref: summary (and other jobs), but a dict is suggested.
"""
if not self.is_complete():
return None
return None
def crash_reason(self, verbosity=0, *args, **kwargs):
"""
Find the reason the job has crashed. Should only be called for crashed jobs (by _crash_reason_if_crashed).
"""
if verbosity <= 0:
return '??'
else:
return '?? reason for crash not known'
def _crash_reason_if_crashed(self, verbosity=0, *args, **kwargs):
if not self.find_status() == self.CRASHED:
return None
return self.crash_reason(verbosity=verbosity, *args, **kwargs)
| 29.451389
| 157
| 0.67543
| 8,038
| 0.947654
| 0
| 0
| 0
| 0
| 0
| 0
| 3,401
| 0.400967
|
cbf53d52cd9777aefd5d176bd11a75c4a1b54abc
| 303
|
py
|
Python
|
Aula 07/ex6.py
|
diegorafaelvieira/Programacao-1
|
657a974f1215cec4aed68603e738d9a135131545
|
[
"MIT"
] | null | null | null |
Aula 07/ex6.py
|
diegorafaelvieira/Programacao-1
|
657a974f1215cec4aed68603e738d9a135131545
|
[
"MIT"
] | null | null | null |
Aula 07/ex6.py
|
diegorafaelvieira/Programacao-1
|
657a974f1215cec4aed68603e738d9a135131545
|
[
"MIT"
] | null | null | null |
val = int(input("Valor:"))
soma = val
maior = val
menor = val
for i in range(0,9):
val = int(input("Valor:"))
if val>maior:
maior = val
if val<menor:
menor=val
soma+=val
print("O maior valor é:",maior)
print("O menor valor é:",menor)
print("A média é:",(soma/10))
| 16.833333
| 31
| 0.570957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 68
| 0.221498
|
cbf5f6dcb21e20b6bc3a2e8d76fc27d3087ec7c4
| 746
|
py
|
Python
|
waterbutler/providers/github/exceptions.py
|
KakeruMizuno/RDM-waterbutler
|
58ecd801385a7572d1ed56568a31f701291c4e3e
|
[
"Apache-2.0"
] | 1
|
2019-05-08T02:32:17.000Z
|
2019-05-08T02:32:17.000Z
|
waterbutler/providers/github/exceptions.py
|
KakeruMizuno/RDM-waterbutler
|
58ecd801385a7572d1ed56568a31f701291c4e3e
|
[
"Apache-2.0"
] | null | null | null |
waterbutler/providers/github/exceptions.py
|
KakeruMizuno/RDM-waterbutler
|
58ecd801385a7572d1ed56568a31f701291c4e3e
|
[
"Apache-2.0"
] | null | null | null |
from http import HTTPStatus
from waterbutler.core.exceptions import ProviderError
class GitHubUnsupportedRepoError(ProviderError):
def __init__(self, dummy):
"""``dummy`` argument is because children of ``WaterButlerError`` must be instantiable with
a single integer argument. See :class:`waterbutler.core.exceptions.WaterButlerError`
for details.
"""
super().__init__('Some folder operations on large GitHub repositories cannot be supported '
'without data loss. To carry out this operation, please perform it in a '
'local git repository, then push to the target repository on GitHub.',
code=HTTPStatus.NOT_IMPLEMENTED)
| 46.625
| 99
| 0.672922
| 660
| 0.884718
| 0
| 0
| 0
| 0
| 0
| 0
| 435
| 0.58311
|
cbf60a5f54499551d07c8764354e2a5053355b82
| 899
|
py
|
Python
|
buildencyclopedia.py
|
ZhenyuZ/gdc-docs
|
f024d5d4cd86dfa2c9e7d63850eee94d975b7948
|
[
"Apache-2.0"
] | 67
|
2016-06-09T14:11:51.000Z
|
2022-03-16T07:54:44.000Z
|
buildencyclopedia.py
|
ZhenyuZ/gdc-docs
|
f024d5d4cd86dfa2c9e7d63850eee94d975b7948
|
[
"Apache-2.0"
] | 19
|
2016-06-21T15:51:11.000Z
|
2021-06-07T09:22:20.000Z
|
buildencyclopedia.py
|
ZhenyuZ/gdc-docs
|
f024d5d4cd86dfa2c9e7d63850eee94d975b7948
|
[
"Apache-2.0"
] | 32
|
2016-07-15T01:24:19.000Z
|
2019-03-25T10:42:28.000Z
|
"""updates the encyclopedia section in the mkdocs.yml
should be run whenever a file is removed or added into the directory"""
import os
import yaml
ABSFILEPATH = os.path.dirname(os.path.realpath(__file__))
FILEARRAY = os.listdir(ABSFILEPATH + '/docs/Encyclopedia/pages')
FILEARRAY = sorted(FILEARRAY, key=str.lower)
with open(ABSFILEPATH + '/mkdocs.yml', 'r') as f:
doc = yaml.load(f)
encycdict = next(d for (index, d) in enumerate(doc['pages']) \
if d.get('EncyclopediaEntries', False) != False)
newlist = []
for x in range(len(FILEARRAY)):
if FILEARRAY[x][-3:] == ".md":
tempdict = {FILEARRAY[x][:-3].replace("_"," "):"".join(['Encyclopedia/pages/', FILEARRAY[x][:-3], '.md'])}
newlist.append(tempdict)
encycdict['EncyclopediaEntries'] = newlist
with open(ABSFILEPATH + '/mkdocs.yml', 'w+') as f:
f.write(yaml.dump(doc, default_flow_style=False))
| 32.107143
| 114
| 0.670745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 273
| 0.303671
|
cbf64e56908db17c3f6f03defc1efa0087875a63
| 3,056
|
py
|
Python
|
nucypher/tests/config/test_firstula_circumstances.py
|
kanzeparov/NuCypher
|
0d7e349872909d0cacfd66583d018d722587b2e7
|
[
"FTL",
"CNRI-Python"
] | null | null | null |
nucypher/tests/config/test_firstula_circumstances.py
|
kanzeparov/NuCypher
|
0d7e349872909d0cacfd66583d018d722587b2e7
|
[
"FTL",
"CNRI-Python"
] | null | null | null |
nucypher/tests/config/test_firstula_circumstances.py
|
kanzeparov/NuCypher
|
0d7e349872909d0cacfd66583d018d722587b2e7
|
[
"FTL",
"CNRI-Python"
] | null | null | null |
"""
This file is part of nucypher.
nucypher is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
nucypher is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with nucypher. If not, see <https://www.gnu.org/licenses/>.
"""
from functools import partial
import maya
import pytest
import pytest_twisted
from twisted.internet.threads import deferToThread
from nucypher.network.middleware import RestMiddleware
from nucypher.utilities.sandbox.ursula import make_federated_ursulas
def test_proper_seed_node_instantiation(ursula_federated_test_config):
lonely_ursula_maker = partial(make_federated_ursulas,
ursula_config=ursula_federated_test_config,
quantity=1,
know_each_other=False)
firstula = lonely_ursula_maker().pop()
firstula_as_seed_node = firstula.seed_node_metadata()
any_other_ursula = lonely_ursula_maker(seed_nodes=[firstula_as_seed_node]).pop()
assert not any_other_ursula.known_nodes
any_other_ursula.start_learning_loop(now=True)
assert firstula in any_other_ursula.known_nodes.values()
@pytest_twisted.inlineCallbacks
def test_get_cert_from_running_seed_node(ursula_federated_test_config):
lonely_ursula_maker = partial(make_federated_ursulas,
ursula_config=ursula_federated_test_config,
quantity=1,
know_each_other=False)
firstula = lonely_ursula_maker().pop()
node_deployer = firstula.get_deployer()
node_deployer.addServices()
node_deployer.catalogServers(node_deployer.hendrix)
node_deployer.start()
certificate_as_deployed = node_deployer.cert.to_cryptography()
firstula_as_seed_node = firstula.seed_node_metadata()
any_other_ursula = lonely_ursula_maker(seed_nodes=[firstula_as_seed_node],
network_middleware=RestMiddleware()).pop()
assert not any_other_ursula.known_nodes
def start_lonely_learning_loop():
any_other_ursula.start_learning_loop()
start = maya.now()
while not firstula in any_other_ursula.known_nodes.values():
passed = maya.now() - start
if passed.seconds > 2:
pytest.fail("Didn't find the seed node.")
yield deferToThread(start_lonely_learning_loop)
assert firstula in any_other_ursula.known_nodes.values()
certificate_as_learned = list(any_other_ursula.known_nodes.values())[0].certificate
assert certificate_as_learned == certificate_as_deployed
any_other_ursula.stop_learning_loop()
| 39.688312
| 87
| 0.729058
| 0
| 0
| 1,469
| 0.480694
| 1,501
| 0.491165
| 0
| 0
| 668
| 0.218586
|
cbf6bbc96905dc1f309f486dc863edc389cd8386
| 1,550
|
py
|
Python
|
anchore/anchore-modules/queries/show-familytree.py
|
berez23/anchore
|
594cce23f1d87d666397653054c22c2613247734
|
[
"Apache-2.0"
] | 401
|
2016-06-16T15:29:48.000Z
|
2022-03-24T10:05:16.000Z
|
anchore/anchore-modules/queries/show-familytree.py
|
berez23/anchore
|
594cce23f1d87d666397653054c22c2613247734
|
[
"Apache-2.0"
] | 63
|
2016-06-16T21:10:27.000Z
|
2020-07-01T06:57:27.000Z
|
anchore/anchore-modules/queries/show-familytree.py
|
berez23/anchore
|
594cce23f1d87d666397653054c22c2613247734
|
[
"Apache-2.0"
] | 64
|
2016-06-16T13:05:57.000Z
|
2021-07-16T10:03:45.000Z
|
#!/usr/bin/env python
import sys
import os
import re
import json
import traceback
import anchore.anchore_utils
# main routine
try:
config = anchore.anchore_utils.init_query_cmdline(sys.argv, "params: all\nhelp: shows dockerfile lines.")
except Exception as err:
print str(err)
sys.exit(1)
if not config:
sys.exit(0)
if len(config['params']) <= 0:
print "Query requires input: all"
warns = list()
outlist = list()
outlist.append(["Image_Id", "Repo_Tags", "Image Type"])
try:
idata = anchore.anchore_utils.load_image_report(config['imgid'])
ftree = idata['familytree']
for fid in ftree:
tags = "unknown"
itype = "unknown"
try:
fdata = anchore.anchore_utils.load_image_report(fid)
tags = ','.join(fdata['anchore_all_tags'])
if not tags:
tags = "none"
itype = fdata['meta']['usertype']
if not itype:
itype = "intermediate"
except:
warns.append("family tree id ("+str(fid)+") does not appear to have been analyzed, no data for this member of the tree")
outlist.append([fid, str(tags), str(itype)])
except Exception as err:
# handle the case where something wrong happened
import traceback
traceback.print_exc()
warns.append("query error: "+str(err))
pass
anchore.anchore_utils.write_kvfile_fromlist(config['output'], outlist)
if len(warns) > 0:
anchore.anchore_utils.write_plainfile_fromlist(config['output_warns'], warns)
sys.exit(0)
| 22.794118
| 132
| 0.645806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 422
| 0.272258
|
cbf70c92043ad32d0c5d7dec87ffaf9a8bdb7e8f
| 2,258
|
py
|
Python
|
spikeforest/spikeforestwidgets/templatewidget/templatewidget.py
|
mhhennig/spikeforest
|
5b4507ead724af3de0be5d48a3b23aaedb0be170
|
[
"Apache-2.0"
] | 1
|
2021-09-23T01:07:19.000Z
|
2021-09-23T01:07:19.000Z
|
spikeforest/spikeforestwidgets/templatewidget/templatewidget.py
|
mhhennig/spikeforest
|
5b4507ead724af3de0be5d48a3b23aaedb0be170
|
[
"Apache-2.0"
] | null | null | null |
spikeforest/spikeforestwidgets/templatewidget/templatewidget.py
|
mhhennig/spikeforest
|
5b4507ead724af3de0be5d48a3b23aaedb0be170
|
[
"Apache-2.0"
] | 1
|
2021-09-23T01:07:21.000Z
|
2021-09-23T01:07:21.000Z
|
import uuid
from spikeforest import mdaio
import io
import base64
import vdomr as vd
import os
import numpy as np
import mtlogging
import time
import traceback
source_path = os.path.dirname(os.path.realpath(__file__))
def _mda32_to_base64(X):
f = io.BytesIO()
mdaio.writemda32(X, f)
return base64.b64encode(f.getvalue()).decode('utf-8')
class TemplateWidget(vd.Component):
def __init__(self, *, template, size=(200, 200)):
vd.Component.__init__(self)
vd.devel.loadBootstrap()
vd.devel.loadCss(url='https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css')
vd.devel.loadJavascript(path=source_path + '/mda.js')
vd.devel.loadJavascript(path=source_path + '/canvaswidget.js')
vd.devel.loadJavascript(path=source_path + '/templatewidget.js')
vd.devel.loadJavascript(path=source_path + '/../dist/jquery-3.3.1.min.js')
self._div_id = 'TemplateWidget-' + str(uuid.uuid4())
self._template = template
self._template_b64 = _mda32_to_base64(self._template)
self._y_scale_factor = None
self._size = size
def setYScaleFactor(self, scale_factor):
self._y_scale_factor = scale_factor
self.refresh()
def setSize(self, size):
if self._size == size:
return
self._size = size
self.refresh()
def size(self):
return self._size
def render(self):
div = vd.div(id=self._div_id)
return div
def postRenderScript(self):
js = """
let W=new window.TemplateWidget();
let X=new window.Mda();
X.setFromBase64('{template_b64}');
W.setTemplate(X);
W.setSize({width},{height});
W.setYScaleFactor({y_scale_factor});
$('#{div_id}').empty();
$('#{div_id}').css({width:'{width}px',height:'{height}px'})
$('#{div_id}').append(W.element());
"""
js = js.replace('{template_b64}', self._template_b64)
js = js.replace('{div_id}', self._div_id)
js = js.replace('{width}', str(self._size[0]))
js = js.replace('{height}', str(self._size[1]))
js = js.replace('{y_scale_factor}', str(self._y_scale_factor or 'null'))
return js
| 30.513514
| 110
| 0.623561
| 1,903
| 0.842781
| 0
| 0
| 0
| 0
| 0
| 0
| 635
| 0.281222
|
cbf7539dbf5f7fc02064b79ad7c95855899a2086
| 5,968
|
py
|
Python
|
delsmm/smm.py
|
sisl/delsmm
|
6baae49e6f7b31c817af5668972ba6c196b66e9c
|
[
"MIT"
] | 1
|
2021-09-21T08:08:44.000Z
|
2021-09-21T08:08:44.000Z
|
delsmm/smm.py
|
albernsrya/delsmm
|
11f2750356a7c7d8b196a67af747a9bc5f39b479
|
[
"MIT"
] | null | null | null |
delsmm/smm.py
|
albernsrya/delsmm
|
11f2750356a7c7d8b196a67af747a9bc5f39b479
|
[
"MIT"
] | 1
|
2021-07-02T13:23:32.000Z
|
2021-07-02T13:23:32.000Z
|
import torch
from torch import nn
from torch.autograd import grad
from torch.autograd.functional import jacobian
from scipy.optimize import root
from ceem.dynamics import *
from ceem.nn import LNMLP
from ceem.utils import temp_require_grad
from tqdm import tqdm
from delsmm.lagsys import AbstractLagrangianSystem
import delsmm.utils as utils
class AbstractStructuredMechanicalModel(AbstractLagrangianSystem, nn.Module):
def __init__(self, qdim, dt, hidden_sizes=[32]*3, method='midpoint'):
"""
Args:
qdim (int): number of generalized coordinates
dt (float): time-step
netparams (dict): parameters of Lagrangian neural network
method (str): integration method
"""
AbstractLagrangianSystem.__init__(self, qdim, dt, method)
nn.Module.__init__(self)
self._xdim = qdim
self._udim = None
self._ydim = qdim
def kinetic_energy(self, q, v):
mass_matrix = self._mass_matrix(q)
kenergy = 0.5 * (v.unsqueeze(-2) @ (mass_matrix @ v.unsqueeze(-1))).squeeze(-1)
return kenergy
def potential_energy(self, q):
pot = self._potential(q)
return pot
def ke_hessian(self, q, qdot, create_graph=True):
"""
Compute Hessian of kinetic energy wrt qdot
Args:
q (torch.tensor): (*, qdim) generalized coordinates
qdot (torch.tensor): (*, qdim) generalized velocities
create_graph (bool): create graph when computing Hessian
Returns:
HKEqdqd (torch.tensor): (*, qdim, qdim) kinetic energy Hessian values
"""
return self._mass_matrix(q)
class StructuredMechanicalModel(AbstractStructuredMechanicalModel):
def __init__(self, qdim, dt, hidden_sizes=[32]*3, method='midpoint'):
super().__init__(qdim, dt, hidden_sizes=hidden_sizes, method=method)
self._mass_matrix = CholeskyMMNet(qdim, hidden_sizes=hidden_sizes)
self._potential = PotentialNet(qdim, hidden_sizes=hidden_sizes)
class AltStructuredMechanicalModel(AbstractStructuredMechanicalModel):
def __init__(self, qdim, dt, hidden_sizes=[32]*3, method='midpoint'):
super().__init__(qdim, dt, hidden_sizes=hidden_sizes, method=method)
self._mass_matrix = ConvexMMNet(qdim, hidden_sizes=hidden_sizes)
self._potential = PotentialNet(qdim, hidden_sizes=hidden_sizes)
class ForcedSMM(StructuredMechanicalModel):
def __init__(self, qdim, dt, hidden_sizes=[32]*3, method='midpoint'):
super().__init__(qdim, dt, hidden_sizes=hidden_sizes, method=method)
self._generalized_force = GeneralizedForceNet(qdim, hidden_sizes=hidden_sizes)
def generalized_forces(self, q, qdot):
return self._generalized_force(q, qdot)
class CholeskyMMNet(torch.nn.Module):
def __init__(self, qdim, hidden_sizes=None, bias=1.0, pos_enforce=lambda x: x):
self._qdim = qdim
self._bias = bias
self._pos_enforce = pos_enforce
super().__init__()
embed = SharedMMVEmbed(qdim, hidden_sizes)
self.embed = embed
self.out = torch.nn.Linear(hidden_sizes[-1], int(qdim * (qdim + 1) / 2))
def forward(self, q):
dims = list(q.shape)
dims += [dims[-1]] # [..., qdim, qdim]
if self._qdim > 1:
L_params = self.out(self.embed(q))
L_diag = self._pos_enforce(L_params[..., :self._qdim])
L_diag = L_diag + self._bias
L_tril = L_params[..., self._qdim:]
L = q.new_zeros(*dims)
L = utils.bfill_lowertriangle(L, L_tril)
L = utils.bfill_diagonal(L, L_diag)
M = L @ L.transpose(-2, -1)
else:
M = self._pos_enforce((self.out(self.embed(q)) + self._bias).unsqueeze(-2))
return M
class ConvexMMNet(torch.nn.Module):
def __init__(self, qdim, hidden_sizes=None, bias=1.0, pos_enforce=lambda x: x):
self._qdim = qdim
self._bias = bias
self._pos_enforce = pos_enforce
super().__init__()
embed = SharedMMVEmbed(qdim, hidden_sizes)
self.embed = embed
self.out = torch.nn.Linear(hidden_sizes[-1], int(qdim * (qdim + 1) / 2))
def forward(self, q):
dims = list(q.shape)
dims += [dims[-1]] # [..., qdim, qdim]
if self._qdim > 1:
L_params = self.out(self.embed(q))
L_diag = self._pos_enforce(L_params[..., :self._qdim])
L_diag += self._bias
L_offdiag = L_params[..., self._qdim:]
M = q.new_zeros(*dims)
M = utils.bfill_lowertriangle(M, L_offdiag)
M = utils.bfill_uppertriangle(M, L_offdiag)
M = utils.bfill_diagonal(M, L_diag)
else:
M = self._pos_enforce((self.out(self.embed(q)) + self._bias).unsqueeze(-2))
return M
class SharedMMVEmbed(torch.nn.Module):
def __init__(self, qdim, hidden_sizes=[32]*3):
self._qdim = qdim
self._hidden_sizes = hidden_sizes
super().__init__()
self._lnet = LNMLP(qdim, hidden_sizes[:-1], hidden_sizes[-1], activation='tanh')
def forward(self, q):
embed = self._lnet(q)
return embed
class PotentialNet(torch.nn.Module):
def __init__(self, qdim, hidden_sizes=[32]*2):
self._qdim = qdim
super().__init__()
embed = SharedMMVEmbed(qdim, hidden_sizes)
self.embed = embed
self.out = torch.nn.Linear(hidden_sizes[-1], 1)
def forward(self, q):
return self.out(self.embed(q))
class GeneralizedForceNet(torch.nn.Module):
def __init__(self, qdim, hidden_sizes=[32]*2):
self._qdim = qdim
super().__init__()
embed = SharedMMVEmbed(2*qdim, hidden_sizes)
self.embed = embed
self.out = torch.nn.Linear(hidden_sizes[-1], qdim)
def forward(self, q, v):
inp = torch.cat([q,v], dim=-1)
return self.out(self.embed(inp))
| 34.298851
| 88
| 0.630697
| 5,607
| 0.939511
| 0
| 0
| 0
| 0
| 0
| 0
| 698
| 0.116957
|
cbf7a1ce96364e36588a482e13d4799ada06f5db
| 16,642
|
py
|
Python
|
src/speech/deep_model.py
|
dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture
|
a072cb940201bbcdb2d0f4d0dfa1dde478fa4464
|
[
"MIT"
] | 6
|
2020-08-03T03:13:25.000Z
|
2022-02-11T08:32:10.000Z
|
src/speech/deep_model.py
|
dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture
|
a072cb940201bbcdb2d0f4d0dfa1dde478fa4464
|
[
"MIT"
] | 1
|
2020-09-08T16:10:38.000Z
|
2020-09-08T16:10:38.000Z
|
src/speech/deep_model.py
|
dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture
|
a072cb940201bbcdb2d0f4d0dfa1dde478fa4464
|
[
"MIT"
] | 2
|
2020-08-03T21:37:21.000Z
|
2021-03-26T02:19:17.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence
import pdb
import math
torch.manual_seed(1)
class GRUAudio(nn.Module):
def __init__(self, num_features, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(GRUAudio, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
self.gru = nn.GRU(self.num_features, self.hidden_dim, self.num_layers, batch_first=True,
dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.classification = nn.Linear(self.hidden_dim * self.num_layers * self.num_directions, self.num_labels).to(
self.device)
# self.softmax = nn.Softmax()
def forward(self, input, target, train=True, seq_length=False):
input = input.to(self.device)
target = target.to(self.device)
hidden = torch.randn(self.num_layers * self.num_directions, self.batch_size, self.hidden_dim)
hidden = hidden.to(self.device)
out, hn = self.gru(input, hidden)
# print(out, out.shape)
# if train:
# hn, _ = pad_packed_sequence(hn, batch_first=True)
hn = hn.permute([1, 0, 2])
hn = hn.reshape(hn.shape[0], -1)
# pdb.set_trace()
out = self.classification(hn)
# out = self.softmax(out)
# pdb.set_trace()
loss = F.cross_entropy(out, torch.max(target, 1)[1])
return out, loss
class AttGRU(nn.Module):
def __init__(self, num_features, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(AttGRU, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
self.u = nn.Parameter(torch.zeros((self.num_directions * self.hidden_dim)), requires_grad=True)
self.gru = nn.GRU(self.num_features, self.hidden_dim, self.num_layers, batch_first=True,
dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.classification = nn.Linear(self.hidden_dim * self.num_directions, self.num_labels).to(self.device)
def forward(self, input, target, train=True, seq_length=False):
input = input.to(self.device)
target = target.to(self.device)
hidden = torch.zeros(self.num_layers * self.num_directions, self.batch_size, self.hidden_dim)
hidden = hidden.to(self.device)
out, hn = self.gru(input, hidden)
out, _ = pad_packed_sequence(out, batch_first=True)
mask = []
# pdb.set_trace()
for i in range(len(seq_length)):
mask.append([0] * int(seq_length[i].item()) + [1] * int(out.shape[1] - seq_length[i].item()))
mask = torch.ByteTensor(mask)
mask = mask.to(self.device)
x = torch.matmul(out, self.u)
x = x.masked_fill_(mask, -1e18)
alpha = F.softmax(x, dim=1)
input_linear = torch.sum(torch.matmul(alpha, out), dim=1)
out = self.classification(input_linear)
loss = F.cross_entropy(out, torch.max(target, 1)[1])
# print(self.u[10])
return out, loss
class MeanPool(nn.Module):
def __init__(self, num_features, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(MeanPool, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
# self.u=nn.Parameter(torch.randn(self.num_directions*self.hidden_dim)).to(self.device)
self.gru = nn.GRU(self.num_features, self.hidden_dim, self.num_layers, batch_first=True,
dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.classification = nn.Linear(self.hidden_dim * self.num_directions, self.num_labels).to(self.device)
def forward(self, input, target, train=True, seq_length=False):
input = input.to(self.device)
target = target.to(self.device)
hidden = torch.zeros(self.num_layers * self.num_directions, self.batch_size, self.hidden_dim)
hidden = hidden.to(self.device)
out, hn = self.gru(input, hidden)
out, _ = pad_packed_sequence(out, batch_first=True)
out = torch.mean(out, dim=1)
# pdb.set_trace()
out = self.classification(out)
loss = F.cross_entropy(out, torch.max(target, 1)[1])
return out, loss
class LSTM_Audio(nn.Module):
def __init__(self, num_features, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(LSTM_Audio, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
# self.u=nn.Parameter(torch.randn(self.num_directions*self.hidden_dim)).to(self.device)
self.lstm = nn.LSTM(self.num_features, self.hidden_dim, self.num_layers, batch_first=True,
dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.classification = nn.Linear(self.hidden_dim * self.num_directions, self.num_labels).to(self.device)
def forward(self, input, target, seq_length, train=True):
input = input.to(self.device)
target = target.to(self.device)
#hidden = torch.zeros(self.num_layers * self.num_directions, self.batch_size, self.hidden_dim)
#hidden = hidden.to(self.device)
# pdb.set_trace()
out, hn = self.lstm(input)
out, _ = pad_packed_sequence(out, batch_first=True)
out = torch.mean(out, dim=1)
# pdb.set_trace()
out = self.classification(out)
loss = F.cross_entropy(out, torch.max(target, 1)[1])
return out, loss
class ATT(nn.Module):
def __init__(self, num_features, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(ATT, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
self.attn = nn.Linear(self.hidden_dim * self.num_directions, hidden_dim)
self.u=nn.Parameter(torch.randn(self.hidden_dim))
stdv = 1. / math.sqrt(self.u.shape[0])
self.u.data.normal_(mean=0, std=stdv)
self.lstm = nn.LSTM(self.num_features, self.hidden_dim, self.num_layers, batch_first=True, dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.fc1 = nn.Linear(self.hidden_dim * self.num_directions, self.hidden_dim).to(self.device)
self.batch1=nn.BatchNorm1d(self.hidden_dim)
self.fc2=nn.Linear(self.hidden_dim,self.num_labels).to(self.device)
self.batch2=nn.BatchNorm1d(self.num_labels)
self.batchatt=nn.BatchNorm1d(self.hidden_dim * self.num_directions)
def forward(self, input, target, seq_length, train=True):
input = input.to(self.device)
target = target.to(self.device)
out, hn = self.lstm(input)
out , _ =pad_packed_sequence(out,batch_first=True)
mask=[]
# pdb.set_trace()
for i in range(len(seq_length)):
mask.append([0]*int(seq_length[i].item())+[1]*int(out.shape[1]-seq_length[i].item()))
mask=torch.ByteTensor(mask)
mask=mask.to(self.device)
out_att=torch.tanh(self.attn(out))
x=torch.matmul(out_att,self.u)
x=x.masked_fill_(mask,-1e18)
alpha=F.softmax(x,dim=1)
input_linear=torch.sum(torch.matmul(alpha,out),dim=1)
input_linear_normalized=self.batchatt(input_linear)
out_1 = self.fc1(input_linear_normalized)
out_1_normalized=self.batch1(out_1)
out_2=self.fc2(out_1_normalized)
out_2_normalized=self.batch2(out_2)
loss = F.cross_entropy(out_2_normalized, torch.max(target, 1)[1])
# print(self.u[10])
return out_2, loss
class Mean_Pool_2(nn.Module):
def __init__(self, num_features, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(Mean_Pool_2, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
#self.attn = nn.Linear(self.hidden_dim * self.num_directions, hidden_dim)
#self.u=nn.Parameter(torch.randn(self.hidden_dim))
#stdv = 1. / math.sqrt(self.u.shape[0])
#self.u.data.normal_(mean=0, std=stdv)
self.lstm = nn.LSTM(self.num_features, self.hidden_dim, self.num_layers, batch_first=True, dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.fc1 = nn.Linear(self.hidden_dim * self.num_directions, self.hidden_dim).to(self.device)
self.batch1=nn.BatchNorm1d(self.hidden_dim)
self.fc2=nn.Linear(self.hidden_dim,self.num_labels).to(self.device)
self.batch2=nn.BatchNorm1d(self.num_labels)
self.batchatt=nn.BatchNorm1d(self.hidden_dim * self.num_directions)
def forward(self, input, target, seq_length, train=True):
input = input.to(self.device)
target = target.to(self.device)
out, hn = self.lstm(input)
out , _ =pad_packed_sequence(out,batch_first=True)
x=torch.mean(out,dim=1)
input_linear_normalized=self.batchatt(x)
out_1 = self.fc1(input_linear_normalized)
out_1_normalized=self.batch1(out_1)
out_2=self.fc2(out_1_normalized)
out_2_normalized=self.batch2(out_2)
loss = F.cross_entropy(out_2_normalized, torch.max(target, 1)[1])
# print(self.u[10])
return out_2, loss
class ConvLSTMCell(nn.Module):
def __init__(self, input_channels, hidden_channels, kernel_size, kernel_size_pool=8, stride_pool=4):
super(ConvLSTMCell, self).__init__()
assert hidden_channels % 2 == 0
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.stride=1
self.padding = int((kernel_size-1) / 2)
self.kernel_size_pool=kernel_size_pool
self.stride_pool=stride_pool
self.Wxi = nn.Conv1d(self.input_channels, self.hidden_channels, self.kernel_size, self.stride,self.padding, bias=True)
self.Whi = nn.Conv1d(self.hidden_channels, self.hidden_channels, self.kernel_size, self.stride, self.padding, bias=False)
self.Wxf = nn.Conv1d(self.input_channels, self.hidden_channels, self.kernel_size, self.stride,self.padding, bias=True)
self.Whf = nn.Conv1d(self.hidden_channels, self.hidden_channels, self.kernel_size, self.stride,self.padding, bias=False)
self.Wxc = nn.Conv1d(self.input_channels, self.hidden_channels, self.kernel_size, self.stride, self.padding, bias=True)
self.Whc = nn.Conv1d(self.hidden_channels, self.hidden_channels, self.kernel_size, self.stride, self.padding, bias=False)
self.Wxo = nn.Conv1d(self.input_channels, self.hidden_channels, self.kernel_size, self.stride,self.padding, bias=True)
self.Who = nn.Conv1d(self.hidden_channels, self.hidden_channels, self.kernel_size, self.stride, self.padding, bias=False)
self.max_pool = nn.MaxPool1d(self.kernel_size_pool, stride=self.stride_pool)
self.batch = nn.BatchNorm1d(self.hidden_channels)
self.Wci = None
self.Wcf = None
self.Wco = None
def forward(self, x, h, c):
ci = torch.sigmoid(self.Wxi(x) + self.Whi(h) + c * self.Wci)
cf = torch.sigmoid(self.Wxf(x) + self.Whf(h) + c * self.Wcf)
cc = cf * c + ci * torch.tanh(self.Wxc(x) + self.Whc(h))
co = torch.sigmoid(self.Wxo(x) + self.Who(h) + cc * self.Wco)
ch = co * torch.tanh(cc)
ch_pool=self.batch(self.max_pool(ch))
return ch_pool, ch, cc
def init_hidden(self, batch_size, hidden, shape):
if self.Wci is None:
self.Wci = nn.Parameter(torch.zeros(1, hidden, shape)).cuda()
self.Wcf = nn.Parameter(torch.zeros(1, hidden, shape)).cuda()
self.Wco = nn.Parameter(torch.zeros(1, hidden, shape)).cuda()
return (nn.Parameter(torch.zeros(batch_size, hidden, shape)).cuda(),
nn.Parameter(torch.zeros(batch_size, hidden, shape)).cuda())
class ConvLSTM(nn.Module):
# input_channels corresponds to the first input feature map
# hidden state is a list of succeeding lstm layers.
# kernel size is also a list, same length as hidden_channels
def __init__(self, input_channels, hidden_channels, kernel_size, step):
super(ConvLSTM, self).__init__()
assert len(hidden_channels)==len(kernel_size), "size mismatch"
self.input_channels = [input_channels] + hidden_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.num_layers = len(hidden_channels)
self.step = step
self._all_layers = []
self.num_labels=4
self.linear_dim=16*18
self.device= torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.classification = nn.Linear(self.linear_dim, self.num_labels)
for i in range(self.num_layers):
name = 'cell{}'.format(i)
cell = ConvLSTMCell(self.input_channels[i], self.hidden_channels[i], self.kernel_size[i])
setattr(self, name, cell)
self._all_layers.append(cell)
def forward(self, input, target):
# input should be a list of inputs, like a time stamp, maybe 1280 for 100 times.
internal_state = []
outputs = []
for step in range(self.step):
x = input[step]
for i in range(self.num_layers):
name = 'cell{}'.format(i)
if step == 0:
bsize, _, shape = x.size()
(h, c) = getattr(self, name).init_hidden(batch_size=bsize, hidden=self.hidden_channels[i],
shape=shape)
internal_state.append((h, c))
# do forward
(h, c) = internal_state[i]
x, new_h, new_c = getattr(self, name)(x, h, c)
internal_state[i] = (new_h, new_c)
outputs.append(x)
## mean pooling and loss function
out=[torch.unsqueeze(o, dim=3) for o in outputs]
out=torch.flatten(torch.mean(torch.cat(out,dim=3),dim=3),start_dim=1)
out = self.classification(out)
loss = F.cross_entropy(out, torch.max(target, 1)[1].to(self.device))
return torch.unsqueeze(out,dim=0), torch.unsqueeze(loss, dim=0)
| 42.671795
| 175
| 0.651785
| 16,450
| 0.988463
| 0
| 0
| 0
| 0
| 0
| 0
| 1,313
| 0.078897
|
cbf8a1ef0f33878d804eb957ddcbefc421928a1b
| 40
|
py
|
Python
|
problem/01000~09999/09498/9498.py3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 1
|
2019-04-19T16:37:44.000Z
|
2019-04-19T16:37:44.000Z
|
problem/01000~09999/09498/9498.py3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 1
|
2019-04-20T11:42:44.000Z
|
2019-04-20T11:42:44.000Z
|
problem/01000~09999/09498/9498.py3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 3
|
2019-04-19T16:37:47.000Z
|
2021-10-25T00:45:00.000Z
|
print(("F"*6+"DCBAA")[int(input())//10])
| 40
| 40
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.25
|
cbf9013b2e9891516c04252ba24b05ce5ea2d134
| 2,596
|
py
|
Python
|
tests/netcdf_engine/test_utils.py
|
TileDB-Inc/TileDB-CF-Py
|
9aab0fe9ba7346a1846c7458a5d08b123dcf90a8
|
[
"MIT"
] | 12
|
2021-06-07T16:51:32.000Z
|
2022-03-10T12:48:00.000Z
|
tests/netcdf_engine/test_utils.py
|
TileDB-Inc/TileDB-CF-Py
|
9aab0fe9ba7346a1846c7458a5d08b123dcf90a8
|
[
"MIT"
] | 72
|
2021-04-28T21:49:41.000Z
|
2022-02-24T13:58:11.000Z
|
tests/netcdf_engine/test_utils.py
|
TileDB-Inc/TileDB-CF-Py
|
9aab0fe9ba7346a1846c7458a5d08b123dcf90a8
|
[
"MIT"
] | 3
|
2021-08-11T16:33:37.000Z
|
2021-12-01T20:31:12.000Z
|
# Copyright 2021 TileDB Inc.
# Licensed under the MIT License.
import numpy as np
import pytest
from tiledb.cf.netcdf_engine._utils import get_netcdf_metadata, get_unpacked_dtype
netCDF4 = pytest.importorskip("netCDF4")
@pytest.mark.parametrize(
"input_dtype,scale_factor,add_offset,output_dtype",
(
(np.int16, None, None, np.int16),
(np.int16, np.float32(1), None, np.float32),
(np.int16, None, np.float32(1), np.float32),
(np.int16, np.float64(1), np.float32(1), np.float64),
),
)
def test_unpacked_dtype(input_dtype, scale_factor, add_offset, output_dtype):
"""Tests computing the unpacked data type for a NetCDF variable."""
with netCDF4.Dataset("tmp.nc", diskless=True, mode="w") as dataset:
dataset.createDimension("t", None)
variable = dataset.createVariable("x", dimensions=("t",), datatype=input_dtype)
if scale_factor is not None:
variable.setncattr("scale_factor", scale_factor)
if add_offset is not None:
variable.setncattr("add_offset", add_offset)
dtype = get_unpacked_dtype(variable)
assert dtype == output_dtype
def test_unpacked_dtype_unsupported_dtype_error():
"""Tests attempting to unpack a NetCDF variable with a data type that does not
support packing/unpacking."""
with netCDF4.Dataset("tmp.nc", diskless=True, mode="w") as dataset:
variable = dataset.createVariable("x", dimensions=tuple(), datatype="S1")
with pytest.raises(ValueError):
get_unpacked_dtype(variable)
@pytest.mark.parametrize(
"value, expected_result",
(
(np.float64(1), np.float64(1)),
(np.array((1), dtype=np.float64), np.float64(1)),
(np.array([1], dtype=np.int32), np.int32(1)),
),
)
def test_get_netcdf_metadata_number(value, expected_result):
"""Tests computing the unpacked data type for a NetCDF variable."""
key = "name"
with netCDF4.Dataset("tmp.nc", diskless=True, mode="w") as dataset:
dataset.setncattr(key, value)
result = get_netcdf_metadata(dataset, key, is_number=True)
assert result == expected_result
@pytest.mark.parametrize("value", (("",), (1, 2)))
def test_get_netcdf_metadata_number_with_warning(value):
"""Tests computing the unpacked data type for a NetCDF variable."""
key = "name"
with netCDF4.Dataset("tmp.nc", diskless=True, mode="w") as dataset:
dataset.setncattr(key, value)
with pytest.warns(Warning):
result = get_netcdf_metadata(dataset, key, is_number=True)
assert result is None
| 38.176471
| 87
| 0.678737
| 0
| 0
| 0
| 0
| 1,960
| 0.755008
| 0
| 0
| 564
| 0.217257
|
cbf916118eb5c3081ccd1fe9c5e35846ce4dd6b9
| 9,091
|
py
|
Python
|
bib2mp3.py
|
ewquon/bib2mp3
|
6917f5223de7d2ae1ed9857c445015a05e64936c
|
[
"MIT"
] | null | null | null |
bib2mp3.py
|
ewquon/bib2mp3
|
6917f5223de7d2ae1ed9857c445015a05e64936c
|
[
"MIT"
] | null | null | null |
bib2mp3.py
|
ewquon/bib2mp3
|
6917f5223de7d2ae1ed9857c445015a05e64936c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import numpy as np
import html
from bs4 import BeautifulSoup
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import convert_to_unicode
import eyed3
from tokenizer import MyTokenizer
# acronyms to spell out
acronyms = [
'LES',
'ALM',
'MYNN',
]
spelled_out_form = ['-'.join(list(acronym)) for acronym in acronyms]
class BibtexLibrary(object):
"""Class that processes bibtex file"""
def __init__(self,
bibfile,
mp3dir=os.path.join(os.environ['HOME'],
'Music','Article Abstracts')
):
parser = BibTexParser(common_strings=True)
parser.customization = convert_to_unicode
self.bibname = os.path.split(bibfile)[1]
with open(bibfile) as bib:
bibdata = bibtexparser.load(bib, parser=parser)
self.lib = bibdata.entries
self.mp3dir = mp3dir
os.makedirs(mp3dir,exist_ok=True)
self._process_bib_data()
def _process_bib_data(self):
self.keys = [article['ID'] for article in self.lib]
assert len(self.keys) == len(set(self.keys)),\
'article keys are not unique!'
self._process_bib_authors()
self._process_bib_titles()
self._process_bib_dates()
self._process_bib_pubnames()
self._process_bib_keywords()
self._process_bib_abstracts()
def _clean_text(self,s):
s = s.replace('{','').replace('}','')
s = s.replace('~','')
s = s.replace('$\\','').replace('$','')
# get rid of HTML tags
s = BeautifulSoup(html.unescape(s),'html.parser').text
# spell out common acronyms
for acronym,spelledout in zip(acronyms,spelled_out_form):
s = s.replace(acronym,spelledout)
return s
def _process_bib_authors(self):
self.author = {}
for key,article in zip(self.keys,self.lib):
authorstr = self._clean_text(article['author'])
#print(key,authorstr)
authorlist = [
author.strip().replace('.','')
for author in authorstr.split(' and ')
]
#print(key,authorlist)
authorlist_firstlast = []
for author in authorlist:
# if "lastname, first", split by comma and reverse
firstlast = [s.strip() for s in author.split(',')]
assert (len(firstlast) <= 2) # should be 2 or 1
firstlast = ' '.join(firstlast[::-1])
authorlist_firstlast.append(firstlast)
#print(key,authorlist_firstlast)
if len(authorlist_firstlast) == 1:
authorstr = authorlist_firstlast[0]
elif len(authorlist_firstlast) == 2:
authorstr = '{:s} and {:s}'.format(*authorlist_firstlast)
elif len(authorlist_firstlast) == 3:
authorstr = '{:s}, {:s}, and {:s}'.format(*authorlist_firstlast)
else:
authorstr = '{:s} et al'.format(authorlist_firstlast[0])
#print(key,authorstr)
self.author[key] = authorstr
def _process_bib_titles(self):
self.title = {}
for key,article in zip(self.keys,self.lib):
self.title[key] = self._clean_text(article['title'])
def _process_bib_dates(self):
self.year = {}
self.date = {}
for key,article in zip(self.keys,self.lib):
year = article.get('year',None)
if year is None:
self.date[key] = None
else:
self.year[key] = year
self.date[key] = year
month = article.get('month',None)
if month is not None:
self.date[key] = '{:s} {:s}'.format(month,year)
num_missing_dates = np.count_nonzero(
[(d is None) for _,d in self.date.items()]
)
if num_missing_dates > 0:
print('Note:',
num_missing_dates,'/',len(self.lib),
'articles are missing dates')
def _process_bib_pubnames(self):
self.publication = {}
for key,article in zip(self.keys,self.lib):
if article['ENTRYTYPE'] == 'article':
name = article['journal']
else:
name = article.get('booktitle',None)
if name is not None:
name = self._clean_text(name)
self.publication[key] = name
num_missing_pubnames = np.count_nonzero(
[(n is None) for _,n in self.publication.items()]
)
if num_missing_pubnames > 0:
print('Note:',
num_missing_pubnames,'/',len(self.lib),
'articles are missing publication names')
def _process_bib_keywords(self):
self.keywords = {}
for key,article in zip(self.keys,self.lib):
kw = article.get('keywords',None)
if kw is not None:
kw = self._clean_text(kw)
self.keywords[key] = kw
num_missing_keywords = np.count_nonzero(
[(kw is None) for _,kw in self.keywords.items()]
)
if num_missing_keywords > 0:
print('Note:',
num_missing_keywords,'/',len(self.lib),
'articles are missing keywords')
def _process_bib_abstracts(self):
self.abstract = {}
for key,article in zip(self.keys,self.lib):
ab = article.get('abstract',None)
if ab is not None:
ab = self._clean_text(ab)
self.abstract[key] = ab
num_missing_abstracts = np.count_nonzero(
[(ab is None) for _,ab in self.abstract.items()]
)
if num_missing_abstracts > 0:
print('Note:',
num_missing_abstracts,'/',len(self.lib),
'articles are missing abstracts')
def generate_descriptions(self):
self.description = {}
# minimal information: author, title
for key in self.keys:
if self.date[key]:
desc = 'In {:s}, '.format(self.date[key])
else:
desc = ''
desc += '{:s} published a paper entitled: {:s}.'.format(
self.author[key], self.title[key])
if self.publication[key]:
desc += ' This was published in {:s}.'.format(self.publication[key])
if self.keywords[key]:
desc += ' Publication keywords include: '
kwlist = [kw.strip() for kw in self.keywords[key].split(',')]
if kwlist == 1:
kwstr = kwlist[0]
elif kwlist == 2:
kwstr = '{:s} and {:s}'.format(*kwlist)
else:
kwlist[-1] = 'and '+kwlist[-1]
kwstr = ', '.join(kwlist)
desc += kwstr + '.'
if self.abstract[key]:
desc += ' The abstract reads: ' + self.abstract[key]
else:
desc += ' There is no abstract available.'
desc += ' This concludes the summary of the work' \
+ ' by {:s}.'.format(self.author[key])
self.description[key] = desc
def to_mp3(self,key=None,overwrite=False,language='en-GB',debug=False):
from gtts import gTTS
if key is None:
keylist = self.keys
elif isinstance(key,str):
keylist = [key]
else:
assert isinstance(key,list)
keylist = key
tokefunc = lambda text: MyTokenizer(text,debug=debug)
for key in keylist:
mp3file = os.path.join(self.mp3dir,'{:s}.mp3'.format(key))
overwriting = False
if os.path.isfile(mp3file):
if overwrite:
overwriting = True
else:
print('File exists, skipping',key)
continue
assert hasattr(self,'description'), \
'Need to run generate_descriptions'
tts = gTTS(text=self.description[key], lang=language, slow=False,
tokenizer_func=tokefunc)
if overwriting:
print('Overwriting',mp3file)
else:
print('Writing',mp3file)
tts.save(mp3file)
# add metadata
mp3 = eyed3.load(mp3file)
mp3.initTag()
mp3.tag.artist = self.author[key]
mp3.tag.title = self.title[key]
mp3.tag.album = self.bibname
mp3.tag.album_artist = 'bib2mp3.py'
mp3.tag.save()
if debug: print(key,':',self.description[key])
#==============================================================================
if __name__ == '__main__':
import sys
if len(sys.argv) <= 1:
sys.exit('Specify bib file')
bib = BibtexLibrary(sys.argv[1])
bib.generate_descriptions()
bib.to_mp3()
| 36.657258
| 84
| 0.527995
| 8,403
| 0.924321
| 0
| 0
| 0
| 0
| 0
| 0
| 1,295
| 0.142449
|
cbf92713179f71318935e2ab443c7a93e35ceec1
| 529
|
py
|
Python
|
build/create_tag_body.py
|
Nexusforge/Nexus.Extensions.RpcDataSource
|
e379243a1aca38c03e882759964d8bc008a7c8bd
|
[
"MIT"
] | null | null | null |
build/create_tag_body.py
|
Nexusforge/Nexus.Extensions.RpcDataSource
|
e379243a1aca38c03e882759964d8bc008a7c8bd
|
[
"MIT"
] | null | null | null |
build/create_tag_body.py
|
Nexusforge/Nexus.Extensions.RpcDataSource
|
e379243a1aca38c03e882759964d8bc008a7c8bd
|
[
"MIT"
] | null | null | null |
import os
import re
import subprocess
tag = os.getenv('GITHUB_REF_NAME')
if tag is None:
raise Exception("GITHUB_REF_NAME is not defined")
with open("tag_body.txt", "w") as file:
output = subprocess.check_output(["git", "tag", "-l", "--format='%(contents)'", tag], stdin=None, stderr=None, shell=False)
match = re.search("'(.*)'", output.decode("utf8"), re.DOTALL)
if match is None:
raise Exception("Unable to extract the tag body")
tag_body = str(match.groups(1)[0])
file.write(tag_body)
| 24.045455
| 127
| 0.655955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 150
| 0.283554
|
cbf954558d8ce8ef179a3032c8e9bbe1051befb6
| 3,456
|
py
|
Python
|
amos/django_orchestrator/api/views.py
|
amosproj/2020ws02-computer-vision-for-sights
|
66641de397af77f16ee36aa9e860ca7249982cb1
|
[
"MIT"
] | 2
|
2021-02-03T23:25:14.000Z
|
2021-03-18T15:03:12.000Z
|
amos/django_orchestrator/api/views.py
|
amosproj/2020ws02-computer-vision-for-sights
|
66641de397af77f16ee36aa9e860ca7249982cb1
|
[
"MIT"
] | 2
|
2021-02-03T22:35:12.000Z
|
2021-02-12T14:09:31.000Z
|
amos/django_orchestrator/api/views.py
|
amosproj/2020ws02-computer-vision-for-sights
|
66641de397af77f16ee36aa9e860ca7249982cb1
|
[
"MIT"
] | 1
|
2021-03-18T15:03:14.000Z
|
2021-03-18T15:03:14.000Z
|
"""This module contains the views exposed to the user."""
from django.http import HttpResponse
from rest_framework.decorators import api_view
from rest_framework.request import Request
from api.view_handlers import (
handle_get_trained_city_model,
handle_persist_sight_image,
handle_add_new_city,
handle_get_supported_cities,
HTTP_200_MESSAGE,
handle_get_latest_city_model_version,
)
@api_view(["GET"])
def get_trained_city_model(request: Request, city: str) -> HttpResponse:
"""Returns a trained city model as a .pt file.
Parameters
----------
request: Request
Request object.
city: str
Name of the city.
Returns
-------
response: HttpResponse
Response object containing the trained model as a .pt file.
"""
response = handle_get_trained_city_model(city.replace(' ', '_'))
return HttpResponse(response[0], status=response[1])
@api_view(["GET"])
def get_latest_city_model_version(request: Request, city: str) -> HttpResponse:
"""Returns the latest version of the persisted city model.
Parameters
----------
request: Request
Request object.
city: str
Name of the city.
Returns
-------
response: HttpResponse
Response object containing the latest model version.
"""
response = handle_get_latest_city_model_version(city.replace(' ', '_'))
return HttpResponse(response[0], status=response[1])
@api_view(["POST"])
def persist_sight_image(request: Request, city: str) -> HttpResponse:
"""Persists an image of a given supported city in the data warehouse.
Parameters
----------
request: Request
Request object.
city: str
Name of the city.
Returns
-------
response: HttpResponse
Response object containing a status message.
"""
image = request.FILES["image"] if "image" in request.FILES else None
response = handle_persist_sight_image(city.replace(' ', '_'), image)
return HttpResponse(response[0], status=response[1])
@api_view(["POST"])
def add_new_city(request: Request, city: str) -> HttpResponse:
"""Adds a new city to the internally managed list of supported cities.
Parameters
----------
request: Request
Request object.
city: str
Name of the city to add.
Returns
-------
response: HttpResponse
Response object containing a default 200 HTTP message.
"""
response = handle_add_new_city(city.replace(' ', '_'))
return HttpResponse(response[0], status=response[1])
@api_view(["GET"])
def get_supported_cities(request: Request) -> HttpResponse:
"""Returns a list containing the currently supported cities.
Parameters
----------
request: Request
Request object.
Returns
-------
response: HttpResponse
Response object containing the list of supported cities.
"""
response_content = handle_get_supported_cities()
return HttpResponse(response_content[0], status=response_content[1])
@api_view(["GET"])
def get_index(request):
"""Returns a default 200 HTTP code.
Parameters
----------
request: Request
Request object.
Returns
-------
response: HttpResponse
Response object containing a default 200 status code.
Notes
-----
This endpoint is only provided as a best practice.
"""
return HttpResponse(HTTP_200_MESSAGE, 200)
| 25.984962
| 79
| 0.664063
| 0
| 0
| 0
| 0
| 3,030
| 0.876736
| 0
| 0
| 1,896
| 0.548611
|
cbf9db657ca3437e042cc26606350dba666d7720
| 1,154
|
py
|
Python
|
samples/verify.py
|
ssmbct-netops/CyberSaucier
|
58e965e7b37ad74563319cd6b2b5c68da2dbd6c3
|
[
"MIT"
] | 17
|
2019-02-01T06:46:52.000Z
|
2021-12-28T06:33:07.000Z
|
samples/verify.py
|
Melon-Tropics/CyberSaucier
|
780fb3df8518a41ed2d14b9a4b33f5ae520c15d4
|
[
"MIT"
] | 9
|
2020-05-01T00:36:24.000Z
|
2022-03-29T17:04:21.000Z
|
samples/verify.py
|
Melon-Tropics/CyberSaucier
|
780fb3df8518a41ed2d14b9a4b33f5ae520c15d4
|
[
"MIT"
] | 4
|
2019-10-20T03:22:34.000Z
|
2022-02-07T18:27:04.000Z
|
import requests, json, argparse, os
from termcolor import colored
parser = argparse.ArgumentParser(description="Verify the recipes by running them through CyberSaucier")
parser.add_argument('--rulefolder', help='Folder containing the json recipes')
parser.add_argument("--url", help="URL to CyberSaucier", default="http://localhost:7000")
args = parser.parse_args()
for root, dirs, files in os.walk(args.rulefolder):
path = root.split(os.sep)
for fname in files:
if fname.lower().endswith("json"):
file = os.path.join(root, fname)
with open(file, 'r') as f:
data=f.read()
rule = json.loads(data)
if "verify" in rule:
u = args.url + "/" + rule["name"]
resp = requests.post(url=u, data=rule["verify"]["originalInput"], headers={'Content-Type':'text/plain'})
resp = resp.json()
if resp["result"] == rule["verify"]["expectedOutput"]:
print(colored(rule["name"] + " : PASS", "green"))
else:
print(colored(rule["name"] + " : FAIL", "red"))
| 42.740741
| 120
| 0.57279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 307
| 0.266031
|
cbfa1107b8b7c29048f818cde663861f0e4ac256
| 761
|
py
|
Python
|
tests/test_binary_tree.py
|
avere001/dsplot
|
89948c2f1b16e00bb3a240f73d0cb100b3eac847
|
[
"MIT"
] | 8
|
2021-08-08T06:06:39.000Z
|
2022-02-04T18:30:38.000Z
|
tests/test_binary_tree.py
|
avere001/dsplot
|
89948c2f1b16e00bb3a240f73d0cb100b3eac847
|
[
"MIT"
] | 1
|
2022-01-04T02:01:36.000Z
|
2022-01-04T02:01:36.000Z
|
tests/test_binary_tree.py
|
avere001/dsplot
|
89948c2f1b16e00bb3a240f73d0cb100b3eac847
|
[
"MIT"
] | 2
|
2021-08-18T12:28:40.000Z
|
2022-01-03T23:56:41.000Z
|
import os
import pytest
from dsplot.errors import InputException
from dsplot.tree import BinaryTree
def test_binary_tree():
tree = BinaryTree(nodes=[5, 4, 8, 11, None, 13, 4, 7, 2, None, None, 5, 1])
assert tree.root.val == 5
assert tree.root.right.left.val == 13
assert tree.root.right.right.left.val == 5
assert tree.preorder() == [5, 4, 11, 7, 2, 8, 13, 4, 5, 1]
assert tree.inorder() == [7, 11, 2, 4, 5, 13, 8, 5, 4, 1]
assert tree.postorder() == [7, 2, 11, 4, 13, 5, 1, 4, 8, 5]
tree.plot('tests/test_data/tree.png')
assert 'tree.png' in os.listdir('tests/test_data')
with pytest.raises(InputException) as e:
BinaryTree(nodes=[])
assert str(e.value) == 'Input list must have at least 1 element.'
| 29.269231
| 79
| 0.628121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 95
| 0.124836
|