max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
pyrfuniverse/utils/__init__.py | happyCoderJDFJJ/pyrfuniverse | 0 | 12772551 | <reponame>happyCoderJDFJJ/pyrfuniverse<filename>pyrfuniverse/utils/__init__.py
from .controller import RFUniverseController
from .tobor_controller import RFUniverseToborController
__all__ = [
'RFUniverseController', 'RFUniverseToborController'
] | 1.28125 | 1 |
example/drf_integrations_example/integrations/urls.py | yoyowallet/drf-integrations-framework | 1 | 12772552 | from rest_framework import routers
from . import ecommerce_viewsets
router = routers.DefaultRouter()
router.register(
r"ecommerce/purchase", ecommerce_viewsets.PurchasesViewSet, basename="api-purchases"
)
urlpatterns = router.urls
| 1.6875 | 2 |
src/irobotframework/completion_finders.py | bollwyvl/irobotframework | 9 | 12772553 | # Copyright (c) 2018 Georgia Tech Research Corporation
# Distributed under the terms of the BSD-3-Clause License
""" Completion implementations
"""
# pylint: disable=W0613,C0330,R0913,W0703,R0914
import re
from typing import List, Tuple
from IPython.core.completerlib import get_root_modules
from robot.libraries import STDLIBS
from robot.parsing.datarow import DataRow
from robot.parsing.robotreader import RobotReader
from .completer import Completer
TABLE_NAMES = ["Keywords", "Settings", "Tasks", "Test Cases", "Variables"]
TABLE_NAMES = ["Keywords", "Settings", "Tasks", "Test Cases", "Variables"]
RE_TABLE_NAME = (
r"^\*+ *(?P<name>settings?|(user )?keywords?|test cases?|variables?|tasks?) *\*+$"
)
RE_SEP = r"\|| {2,}|\t"
DEFAULT_SEP = " "
SUITE_SETTINGS = [
"Default Tags",
"Documentation",
"Force Tags",
"Library",
"Metadata",
"Resource",
"Suite Setup",
"Suite Teardown",
"Task Setup",
"Task Teardown",
"Task Template",
"Task Timeout",
"Test Setup",
"Test Teardown",
"Test Template",
"Test Timeout",
"Variables",
]
CASE_SETTINGS = ["Documentation", "Setup", "Tags", "Teardown", "Template", "Timeout"]
KEYWORD_SETTINGS = [
"Documentation",
"Tags",
"Teardown",
"Timeout",
"Arguments",
"Return",
]
# lowercase line-starting tokens that trigger keyword completion
RE_PRE_KEYWORD_SUITE = r"^((suite|test|task) (setup|teardown)|(test|task) template)$"
RE_PRE_KEYWORD_BRACKET = r"^\[ *(setup|teardown|template) *\]$"
def get_default_completion_finders():
""" The default ordering of completers, roughly from cheapest to most dear
"""
return [
complete_cell_magics,
complete_tables,
complete_libraries,
complete_settings,
complete_variables,
complete_keywords,
]
def complete_cell_magics(
completer: Completer,
line: str,
code: str,
cursor_pos: int,
line_cursor: int,
offset: int,
history: List[str],
):
""" Complete with all defined magics
"""
if not offset and line.startswith("%"):
matches = (
[
f"%%{name}"
for name in completer.parent.robot_magics["cell"]
if name.startswith(line.replace("%", "").strip())
],
)
return (
[
{"start": offset, "end": offset + len(line), "type": "magic", "text": m}
for m in matches
],
)
def complete_tables(
completer: Completer,
line: str,
code: str,
cursor_pos: int,
line_cursor: int,
offset: int,
history: List[str],
) -> Tuple[List[str], List[dict]]:
""" Complete table names
"""
matches = []
if line.startswith("*"):
no_star = line.replace("*", "").lower().strip()
for name in TABLE_NAMES:
if not no_star or name.lower().startswith(no_star):
matches.append(f"*** {name} ***\n")
elif line.startswith("| *"):
no_star = line.replace("*", "").replace("|", "").lower().strip()
for name in TABLE_NAMES:
if not no_star or name.lower().startswith(no_star):
matches.append(f"| *** {name} *** |\n")
return (
matches,
[
{"start": offset, "end": offset + len(line), "type": "table", "text": m}
for m in matches
],
)
def complete_settings(
completer: Completer,
line: str,
code: str,
cursor_pos: int,
line_cursor: int,
offset: int,
history: List[str],
) -> Tuple[List[str], List[dict]]:
""" Complete settings
"""
matches = []
row = DataRow(RobotReader.split_row(line[:line_cursor]))
tokens = row.data
current_table = find_current_table(code, cursor_pos)
if current_table is None:
return matches, []
bracket = False
settings = None
if "etting" in current_table:
settings = SUITE_SETTINGS
elif "test case" in current_table or "task" in current_table:
settings, bracket = CASE_SETTINGS, True
elif "keyword" in current_table:
settings, bracket = KEYWORD_SETTINGS, True
if not settings:
return matches, []
matches = complete_table_settings(completer, settings, tokens[-1], bracket)
post = ""
if bracket and not line.strip()[-1] == "]":
post = "]"
post += " | " if line.startswith("|") else " "
elif not bracket:
post += " | " if line.startswith("|") else " "
matches = [
f"{line[:line_cursor - (len(tokens[-1]))]}{match}{post}" for match in matches
]
return (
matches,
[
{
"start": cursor_pos,
"end": offset + len(line),
"type": "setting",
"text": m,
}
for m in matches
],
)
def complete_variables(
completer: Completer,
line: str,
code: str,
cursor_pos: int,
line_cursor: int,
offset: int,
history: List[str],
) -> Tuple[List[str], List[dict]]:
""" Complete variable references
These aren't particularly clever in terms of scope.
"""
matches = []
if not re.findall(r"[\$&@%]", line):
return matches, []
try:
frag = re.findall(r".*([\$%&@]\{[^{}]*$)", line[:line_cursor])[0]
frag_type = frag[0]
frag_start = frag[2:]
except Exception:
return matches, []
try:
if line[line_cursor] == "}":
trail = ""
except Exception:
trail = "}"
for var in find_all_variable_names(code, history, frag_type):
if frag_start.lower() in var.lower():
matches += [line.split(frag)[0] + frag_type + "{" + var + trail]
return (
matches,
[
{
"start": cursor_pos,
"end": offset + len(line),
"type": "variable",
"text": m,
}
for m in matches
],
)
def complete_libraries(
completer: Completer,
line: str,
code: str,
cursor_pos: int,
line_cursor: int,
offset: int,
history: List[str],
) -> Tuple[List[str], List[dict]]:
""" Complete library names
This could do better with sub-modules.
"""
matches = []
row = DataRow(RobotReader.split_row(line))
tokens = row.data
if not re.findall(r"\* *settings", code.lower(), flags=re.I):
return matches, []
if not tokens or tokens[0].lower() != "library":
return matches, []
for lib in list(STDLIBS) + list(get_root_modules()):
if tokens[1].lower() in lib.lower():
pre = line.split(tokens[1])[0]
if line.startswith("|"):
matches += [f"""{pre}{lib} | """]
else:
matches += [f"""{pre}{lib} """]
return (
matches,
[
{
"start": cursor_pos,
"end": offset + len(line),
"type": "library",
"text": m,
}
for m in matches
],
)
def complete_keywords(
completer: Completer,
line: str,
code: str,
cursor_pos: int,
line_cursor: int,
offset: int,
history: List[str],
) -> Tuple[List[str], List[dict]]:
""" Complete keywords from all imported libraries
"""
matches = []
row = DataRow(RobotReader.split_row(line))
tokens = row.data
if len(tokens) < 2:
return matches, []
if len(tokens) == 2 and not tokens[0].strip():
kw_token = tokens[1]
elif re.match(RE_PRE_KEYWORD_SUITE, tokens[0], flags=re.I) is not None:
kw_token = tokens[1]
elif re.match(RE_PRE_KEYWORD_BRACKET, tokens[1], flags=re.I) is not None:
kw_token = tokens[2]
else:
return matches, []
bdd = None
orig_kw_token = kw_token
bdd_token = re.match(r"^(given|when|then|and|but)?\b *(.*)", kw_token, flags=re.I)
if bdd_token is not None:
bdd, kw_token = bdd_token.groups()
for doc in completer.docs(history).values():
for keyword in getattr(doc, "keywords", []):
if kw_token.lower() in keyword.name.lower():
suggest_token = f"{bdd} {keyword.name}" if bdd else keyword.name
pre = line.split(orig_kw_token)[0]
if line.strip()[0] == "|":
matches.append(f"""{pre}{suggest_token} | """)
else:
matches.append(f"""{pre}{suggest_token} """)
return (
matches,
[
{
"start": cursor_pos,
"end": offset + len(line),
"type": "keyword",
"text": m,
}
for m in matches
],
)
# Utility functions
def complete_table_settings(
completer: Completer, settings: List[str], token: str, bracket: bool = False
) -> List[str]:
""" Find settings that might be in a table
"""
matches = []
for setting in settings:
if bracket:
setting = f"[{setting}"
if setting.lower().startswith(token.lower()):
matches += [setting]
return matches
def find_current_table(code: str, cursor_pos: int) -> str:
""" Given some code, what is the current table we are in?
"""
bits = re.split(r"^(\| )?(\*+ *[^*]+? *\*+)", code[:cursor_pos], flags=re.I | re.M)
for bit in bits[::-1]:
if bit is None:
continue
match = re.match(RE_TABLE_NAME, bit, flags=re.I)
if match is not None:
return match.groupdict()["name"].lower()
return None
def find_all_variable_names(code: str, history: str, frag_type: str):
""" Find all the variable names in the code and history
frag_type is one of the Robot variable types
"""
if frag_type == "%":
pattern = r"""%\{[^\$%&@}]+}"""
else:
pattern = r"""[\$&@]\{[^\$%&@}]+}"""
return [var[2:-1] for var in re.findall(pattern, "\n".join(history + [code]))]
| 2.03125 | 2 |
recruiter/api_urls.py | b1pb1p/opensource-job-portal | 199 | 12772554 | <filename>recruiter/api_urls.py
from django.urls import path, re_path
from recruiter import api_views
app_name = "api_recruiter"
urlpatterns = [
path("login/", api_views.login_view, name="api_login"),
path("out/", api_views.getout, name="getout"),
path("change-password/", api_views.change_password, name="api_change_password"),
path("profile/", api_views.user_profile, name="api_user_profile"),
path("job/list/", api_views.jobs_list, name="api_list"),
path("skill/list/", api_views.skill_list),
path("industry/list/", api_views.industry_list),
path("city/list/", api_views.city_list),
path("state/list/", api_views.state_list),
path("company/list/", api_views.company_list),
path("functional-area/list/", api_views.functional_area_list),
path("job/inactive/list/", api_views.inactive_jobs, name="api_inactive_jobs"),
path("profile/edit/", api_views.edit_profile, name="edit_profile"),
path("company-profile/", api_views.view_company, name="view_company"),
re_path(r"^job/(?P<job_type>[-\w]+)/new/$", api_views.new_job, name="api_new_job"),
re_path(
r"^job/edit/(?P<job_post_id>[a-zA-Z0-9]+)/$",
api_views.edit_job,
name="api_edit_job",
),
re_path(
r"^job/delete/(?P<job_post_id>[a-zA-Z0-9]+)/$",
api_views.delete_job,
name="api_delete_job",
),
]
| 2 | 2 |
mechroutines/es/newts/_irc.py | keceli/mechdriver | 0 | 12772555 | """ irc x
"""
import automol.reac
import autofile
import elstruct
from mechlib.reaction import grid as rxngrid
from mechlib.amech_io import printer as ioprinter
from mechroutines.es import runner as es_runner
from mechroutines.es.runner import qchem_params
# Intrinsic Reaction Coordinates
def execute_irc(zma, ts_info,
mod_ini_thy_info, ini_method_dct,
ini_scn_run_fs, ini_scn_save_fs,
es_keyword_dct,
directions=(elstruct.Job.IRCF, elstruct.Job.IRCR)):
""" Run and save the IRC
"""
coord_name = 'IRC'
overwrite = es_keyword_dct['overwrite']
retryfail = es_keyword_dct['retryfail']
# Set up run filesys
run_fs = autofile.fs.run(ini_scn_run_fs[1].path([coord_name]))
# Run and Read the IRC in the forward and reverse direction
for direction in directions:
script_str, kwargs = qchem_params(
ini_method_dct, job=direction)
run_irc(
zma,
direction,
coord_name,
run_fs,
ini_scn_save_fs,
ts_info,
mod_ini_thy_info,
overwrite,
retryfail,
script_str,
**kwargs
)
success, _ = es_runner.read_job(
job=direction,
run_fs=run_fs,
)
if success:
save_irc(
direction,
coord_name,
run_fs,
ini_scn_save_fs,
mod_ini_thy_info
)
return success
def run_irc(zma, irc_job, coord_name, run_fs, ini_scn_save_fs,
ts_info, mod_ini_thy_info, overwrite, retryfail,
opt_script_str, **opt_kwargs):
""" Run the irc job
"""
def _irc_ran(ini_scn_save_fs, coord_name, irc_job):
""" See if coords are available
"""
coords = ini_scn_save_fs[-1].existing([coord_name])
if irc_job == elstruct.Job.IRCF:
ran_coords = [coord[1][0] for coord in coords if coord[1][0] > 0.0]
else:
ran_coords = [coord[1][0] for coord in coords if coord[1][0] < 0.0]
return bool(ran_coords)
# Maybe check for positive coords
if not _irc_ran(ini_scn_save_fs, coord_name, irc_job):
print('No IRC calculation in save filesystem')
opt_success, _ = es_runner.read_job(
job=irc_job,
run_fs=run_fs,
)
need_irc = not opt_success
else:
print('Found IRC directory at '
f'{ini_scn_save_fs[1].path([coord_name])}')
need_irc = False
if need_irc:
print('Running IRC calculation...')
es_runner.run_job(
job=irc_job,
script_str=opt_script_str,
run_fs=run_fs,
geo=zma,
spc_info=ts_info,
thy_info=mod_ini_thy_info,
overwrite=overwrite,
retryfail=retryfail,
**opt_kwargs
)
def save_irc(irc_job, coord_name,
run_fs, ini_scn_save_fs, mod_ini_thy_info):
""" Read IRC output and store data in filesystem
"""
opt_success, opt_ret = es_runner.read_job(
job=irc_job,
run_fs=run_fs,
)
locs_lst = []
if opt_success is not None:
# Read the IRC output file
inf_obj, inp_str, out_str = opt_ret
prog = inf_obj.prog
geos, gras, hessians = elstruct.reader.irc_points(prog, out_str)
coord_vals, enes = elstruct.reader.irc_path(prog, out_str)
# Write the data for each geom along IRC to the filesystem
save_path = ini_scn_save_fs[1].path([coord_name])
print(" - Saving...")
print(f" - Save path: {save_path}")
locs_lst = []
for idx, val in enumerate(coord_vals):
# Set locs idx; for reverse, ignore SadPt and flip idx to negative
locs_idx = idx
if irc_job == elstruct.Job.IRCR:
if locs_idx == 0:
continue
# Scale the coordinates so rounding to .2f number is non-zero
locs = [coord_name, [val*100.0]]
locs_lst.append(locs)
# Save files
ini_scn_save_fs[-1].create(locs)
ini_scn_save_fs[-1].file.energy.write(enes[idx], locs)
ini_scn_save_fs[-1].file.geometry.write(geos[idx], locs)
ini_scn_save_fs[-1].file.geometry_input.write(inp_str, locs)
ini_scn_save_fs[-1].file.geometry_info.write(inf_obj, locs)
if gras:
ini_scn_save_fs[-1].file.gradient.write(gras[idx], locs)
ini_scn_save_fs[-1].file.gradient_info.write(inf_obj, locs)
if hessians:
ini_scn_save_fs[-1].file.hessian.write(hessians[idx], locs)
ini_scn_save_fs[-1].file.hessian_info.write(inf_obj, locs)
scn_save_path = ini_scn_save_fs[-1].path(locs)
sp_save_fs = autofile.fs.single_point(scn_save_path)
sp_save_fs[-1].create(mod_ini_thy_info[1:4])
sp_save_fs[-1].file.input.write(inp_str, mod_ini_thy_info[1:4])
sp_save_fs[-1].file.info.write(inf_obj, mod_ini_thy_info[1:4])
sp_save_fs[-1].file.energy.write(enes[idx], mod_ini_thy_info[1:4])
update_traj_file(coord_name, ini_scn_save_fs, mod_ini_thy_info)
return locs_lst
def update_traj_file(coord_name, ini_scn_save_fs, mod_ini_thy_info):
""" Update the full IRC trajectory file based on what is in SAVE
filesystem
"""
saved_locs = ini_scn_save_fs[-1].existing()
if saved_locs:
es_runner.scan.write_traj(
coord_name, ini_scn_save_fs, mod_ini_thy_info, sorted(saved_locs)
)
def launch_point_zmatrices(ts_dct, mod_thy_info,
scn_alg, scn_fs, cnf_fs, cnf_locs):
""" Determine the point to launch an IRC from
Try to find saddle point at inplvl
Then search for the max
'auto': use sadpt, then max series
'sadpt': sadpt
'series': max series
"""
if 'sadpt' in scn_alg:
_, cnf_save_fs = cnf_fs
zma_locs = (ts_dct['zma_idx'],)
zma_fs = autofile.fs.zmatrix(cnf_save_fs[-1].path(cnf_locs))
if zma_fs[-1].file.zmatrix.exists(zma_locs):
geo_path = zma_fs[-1].file.zmatrix.path(zma_locs)
ioprinter.info_message(
' - Z-Matrix found.')
ioprinter.info_message(
f' - Reading Z-Matrix from path {geo_path}')
irc_zmas = (zma_fs[-1].file.zmatrix.read(zma_locs),)
elif 'max' in scn_alg:
_, scn_save_fs = scn_fs
zma, zrxn = ts_dct['zma'], ts_dct['zrxn']
scan_inf = automol.reac.build_scan_info(zrxn, zma)
coord_names, constraint_dct, coord_grids, _ = scan_inf
irc_zmas = rxngrid.grid_maximum_zmatrices(
zrxn.class_, zma, coord_grids, coord_names, scn_save_fs,
mod_thy_info, constraint_dct, series='full-n1')
print('irc zmas', irc_zmas)
import sys
sys.exit()
return irc_zmas
| 2.265625 | 2 |
tools/reval_discovery.py | AdilSiddiqui131/OIM | 231 | 12772556 | <filename>tools/reval_discovery.py
#!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""Reval = re-eval. Re-evaluate saved detections."""
import _init_paths
from fast_rcnn.config import cfg
from datasets.factory import get_imdb
import cPickle
import os, sys, argparse
import numpy as np
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Re-evaluate results')
parser.add_argument('output_dir', nargs=1, help='results directory',
type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to re-evaluate',
default='voc_2007_trainval', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def from_dets(imdb_name, output_dir, args):
imdb = get_imdb(imdb_name)
imdb.competition_mode(args.comp_mode)
with open(os.path.join(output_dir, 'discovery.pkl'), 'rb') as f:
dets = cPickle.load(f)
print 'Evaluating discovery'
imdb.evaluate_discovery(dets, output_dir)
if __name__ == '__main__':
args = parse_args()
output_dir = os.path.abspath(args.output_dir[0])
imdb_name = args.imdb_name
from_dets(imdb_name, output_dir, args)
| 2.3125 | 2 |
test/arch/ia32/test_mov_add.py | Flopz-Project/flopz | 7 | 12772557 | <gh_stars>1-10
from flopz.arch.ia32.ia32_generic_arch import IA32GenericArchitecture
from flopz.arch.ia32.auto_instructions import *
import pytest
def test_mov():
arch = IA32GenericArchitecture()
""" Reg to Reg """
mov64 = Mov(arch.rax, arch.rbx)
assert(mov64.bytes() == b'\x48\x89\xd8')
assert(mov64.expand()[0].modrm._rm == 0)
assert(mov64.expand()[0].modrm._reg == 3)
mov64 = Mov(arch.r8, arch.r11)
assert(mov64.bytes() == b'\x4D\x89\xd8')
mov32 = Mov(arch.eax, arch.ebx)
assert(mov32.bytes() == b'\x89\xd8')
assert(mov32.expand()[0].modrm._rm == 0)
assert(mov32.expand()[0].modrm._reg == 3)
mov16 = Mov(arch.ax, arch.bx)
assert(mov16.bytes() == b'\x66\x89\xd8')
""" Reg to Mem """
mov = Mov(arch.ma(64, arch.rax + arch.rbx * 4 - 64), arch.r12)
assert(mov.bytes() == b'\x4c\x89\x64\x98\xc0')
""" Mem to Reg """
mov = Mov(arch.si, arch.ma(16, arch.r13 + arch.rbp * 8 - 107))
assert(mov.bytes() == b'\x66\x41\x8B\x74\xED\x95')
""" Imm to Reg """
movi = Mov(arch.r12w, 28500)
assert (movi.bytes() == b'\x66\x41\xBC\x54\x6f')
movi2 = Mov(arch.rcx, 28500)
assert (movi2.bytes() == b'\x48\xB9\x54\x6f\x00\x00\x00\x00\x00\x00')
""" Imm to Mem """
movi = Mov(arch.ma(32, arch.r13 + arch.rcx * 8 - 75), 0x10000000)
assert(movi.bytes() == b'\x41\xc7\x44\xcd\xb5\x00\x00\x00\x10')
movi2 = Mov(arch.ma(64, arch.r11 + arch.rcx*4 - 30), 0x10000000)
assert(movi2.bytes() == b'\x49\xc7\x44\x8b\xe2\x00\x00\x00\x10')
def test_add():
arch = IA32GenericArchitecture()
""" Imm to Reg """
addi = Add(arch.si, 32000)
assert (addi.bytes() == b'\x66\x81\xc6\x00\x7d')
addi = Add(arch.rdx, 0x2400)
assert(addi.bytes() == b'\x48\x81\xc2\x00\x24\x00\00')
""" Imm to Mem """
addi = Add(arch.ma(8, arch.r14 + arch.rdi * 4 - 123), 2)
assert(addi.bytes() == b'\x41\x80\x44\xbe\x85\x02')
addi = Add(arch.ma(64, arch.r14 + arch.r10 * 8), 0x10203040)
assert(addi.bytes() == b'\x4b\x81\x04\xd6\x40\x30\x20\x10')
""" Reg to Reg """
add = Add(arch.si, arch.r12w)
assert(add.bytes() == b'\x66\x44\x01\xe6')
""" Reg to Mem """
add = Add(arch.ma(32, arch.r12 + arch.rcx * 8 - 99), arch.r8d)
assert(add.bytes() == b'\x45\x01\x44\xcc\x9d')
""" Mem to Reg """
add = Add(arch.rcx, arch.ma(64, arch.r11 + arch.rdx*8 - 88))
assert(add.bytes() == b'\x49\x03\x4c\xd3\xa8')
| 1.921875 | 2 |
backend/extractbeta.py | rachitnaruzu/resultnotifier | 0 | 12772558 | <gh_stars>0
import re
import requests as rq
from bs4 import BeautifulSoup
'''
return a list of all the currenttly available data (result, notice, schedule, etc.....
implement this method to return a list in this format (ex: a webscrape logic):
[
{
'displayname': '2015 CSE VII SEM', # the name which will be displayed on app
'url': 'http://result.example.com/cse7sem2015',
'fileid' : 'cse7sem2015', # unique identifier
'datatype' : 'result', # could be notice or schedule.. etc.
'filetype' : 'pdf'
},
.
.
.
]
'''
def fetch():
'''
implement this method
''' | 3.28125 | 3 |
backend/orders/api.py | hbutau/product-shop | 0 | 12772559 | <reponame>hbutau/product-shop
from typing import List
from django.shortcuts import get_object_or_404
from ninja import Router
from .models import Order, LineItem
from .schema import OrderSchemaIn, OrderSchemaOut
router = Router()
@router.post("")
def create_order(request, payload: OrderSchemaIn):
order_data = payload.dict()
lineitems = order_data.pop("order_items")
print(lineitems)
order = Order.objects.create(**order_data)
for order_lines in lineitems:
order.order_items.create(**order_lines)
return {"id": order.id}
@router.get("", response=List[OrderSchemaOut])
def list_orders(request):
qs = Order.objects.all()
return qs
@router.get("/{order_id}", response=OrderSchemaOut)
def get_order(request, order_id: int):
order = get_object_or_404(Order, id=order_id)
return order
@router.put("/{order_id}")
def update_order(request, order_id: int, payload: OrderSchemaIn):
update_data = payload.dict()
lineitems = update_data.pop("order_items")
wanted_items = [product.get("product_id") for product in lineitems if product.get("product_id", None)]
LineItem.objects.filter(order=order_id).exclude(id__in=wanted_items).delete()
for line in lineitems:
product_id = line.pop('product_id')
# TODO: Resolve error
obj, created = LineItem.objects.update_or_create(product_id=product_id, order=Order.objects.get(id=order_id), defaults=line)
return {"succes": "True"}
@router.delete("/{order_id}")
def destroy_order(request, order_id: int):
order = get_object_or_404(Order, id=order_id)
order.delete()
return {"success": "True"}
| 2.171875 | 2 |
tests/integration/unit_test/test_unit_test_python2_7.py | aahung/aws-sam-cli-app-templates | 0 | 12772560 | from tests.integration.base import Base
class UnitTest_python2_7_cookiecutter_aws_sam_hello_python(Base.PythonUnitTestBase):
python_executable = "python2.7"
directory = "python2.7/cookiecutter-aws-sam-hello-python"
class UnitTest_python2_7_cookiecutter_aws_sam_step_functions_sample_app(Base.PythonUnitTestBase):
python_executable = "python2.7"
directory = "python2.7/cookiecutter-aws-sam-step-functions-sample-app"
| 1.609375 | 2 |
termssrv/terms/models.py | rimvaliulin/termssrv | 0 | 12772561 | <filename>termssrv/terms/models.py
from django.db import models
from django.utils.translation import gettext_lazy as _
class Book(models.Model):
"""
The reference book of terms.
The version of the reference book must be unique
within that reference book.
Name, short name are required.
Description is optional.
"""
name = models.CharField(_('name'), max_length=150)
short_name = models.CharField(_('short name'), max_length=50)
description = models.TextField(_('description'), blank=True)
class Meta:
verbose_name = _('Reference Book')
verbose_name_plural = _('Reference Books')
constraints = [
models.UniqueConstraint(
fields=['short_name', 'name'],
name='unique_names',
),
]
def __str__(self):
return self.name
class Version(models.Model):
"""
THe version of the reference book.
Name and publication data are requied.
"""
book = models.ForeignKey(
Book, on_delete=models.CASCADE, verbose_name=_('version')
)
name = models.CharField(_('name'), max_length=50)
pub_date = models.DateField(_('date'))
def __str__(self):
return f'{self.book} ({self.name})'
class Meta:
verbose_name = _('Version')
verbose_name_plural = _('Versions')
constraints = [
models.UniqueConstraint(
fields=['name', 'book'],
name='unique_version_name',
),
]
class Term(models.Model):
"""
The element of the reference book.
Code and value are required.
"""
version = models.ForeignKey(
Version, on_delete=models.CASCADE, verbose_name=_('version')
)
code = models.CharField(_('code'), max_length=50)
value = models.CharField(_('value'), max_length=100)
class Meta:
verbose_name = _('Term')
verbose_name_plural = _('Terms')
constraints = [
models.UniqueConstraint(
fields=['code', 'version'],
name='unique_term_per_version',
),
]
def __str__(self):
return f'{self.code} ({self.version})'
| 2.453125 | 2 |
pysec/core/ctx.py | benhunter/owasp-pysec | 416 | 12772562 | <filename>pysec/core/ctx.py
# Python Security Project (PySec) and its related class files.
#
# PySec is a set of tools for secure application development under Linux
#
# Copyright 2014 PySec development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: ascii -*-
import sys
from pysec.core import Object
__all__ = 'Context',
CONTEXTS = {'file', 'cmd', 'html', 'js'}
class Context(Object):
def __init__(self, name='none', info=None, locs=None):
name = str(name)
self.name = name
self.info = {} if info is None else dict(info)
CONTEXTS.add(name)
def __enter__(self):
frame = sys._getframe().f_back
contexts = frame.f_locals.setdefault('__ctx__', [])
contexts.append(self)
def __exit__(self, exc_type, exc_value, exc_tb):
sys._getframe().f_back.__ctx__.pop()
return 0
def contexts(self):
frame = sys._getframe().f_back
while frame:
ls = frame.f_locals.get('__ctx__', None)
if ls:
for ctx in ls:
yield ctx
frame = sys._getframe().f_back
| 2.359375 | 2 |
Library/Simulation.py | jonestcharles/physics-simulation | 0 | 12772563 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 18:03:59 2016
@author: jones_000
"""
import copy as cp
import numpy as np
import math
import Solver
import Physics
import Body
import vector
import matplotlib.pyplot as plt
class Simulation(object):
'''Parent Simulation class
Attributes
----------
stop_condition : callable
sets the stop condition for simulation
physics : Physics
the physics being simulated with a solver
body : array, GravBody
the array of bodies with position, velocity, and mass
'''
def __init__(self,stop_condition=None,physics=None,body=None):
'''Make body a list'''
if type(body) == list:
self.body = body
else:
self.body = [body]
self.physics = physics
self.stop_condition = stop_condition
def get_results(self):
'''This advances the sim and returns results'''
body = self.body
time = 0
self.bodies = [cp.deepcopy(self.body)]
self.t = [0]
while self.stop_condition(self.bodies) == True:
body, time = self.physics.advance(body,time)
self.bodies.append(cp.deepcopy(body))
self.t.append(time)
return self.t, self.bodies
class OrbitSim(Simulation):
'''Drives the Central Grav sim for orbits
Attributes
----------
stop_condition : callable
sets the stop condition for simulation
physics : Physics
the physics being simulated with a solver
body : GravBody
the body with position, velocity, and mass
'''
def __init__(self,stop_condition=None,physics=None,body=None,apnd=True):
Simulation.__init__(self,stop_condition,physics,body)
self.bodies = [cp.deepcopy(self.body)]
self.t = [0]
self.apnd = apnd
def get_results(self):
'''Returns time and bodies lists'''
return self.t, self.bodies
def advance(self,time=None,step=None):
'''Advances sim to a certain time or step
Parameters
----------
time : float
the target time for the sim
step : float
the number of steps to run
'''
if time != None:
dt = self.physics.solver.stepsize
time = time - dt
self.run(self.time_stop(time))
self.physics.solver.stepsize = time + dt - self.t[-1]
self.run(self.time_stop(time+dt))
self.physics.solver.stepsize = dt
if step != None:
self.run(self.step_stop(step))
if time == None and step == None:
self.run(self.stop_condition)
def step_stop(self,step):
'''Reference to stop function to end at a certain step'''
def stop(time,bodies):
steps = math.floor(time/self.physics.solver.step_size)
if steps < step:
return True
else:
return False
return stop
def time_stop(self,goal):
'''Reference to a stop function to end at a certain time'''
def stop(time,bodies):
if time < goal:
return True
else:
return False
return stop
def run(self,stop_condition):
'''Internal run function that advances bodies
Parameters
----------
stop_condition : callable
the stop function to end the sim
'''
time = self.t[-1]
while stop_condition(time,self.bodies) == True:
self.body, time = self.physics.advance(self.body,time)
if self.apnd:
self.bodies.append(cp.deepcopy(self.body))
self.t.append(time)
if not self.apnd:
self.bodies.append(cp.deepcopy(self.body))
self.t.append(cp.deepcopy(time))
class BinarySim(OrbitSim):
'''Takes in Elliptical Inputs and produces a Binary Sim
Attributes
----------
M1 : float
mass of the first body
M2 : float
mass of the second body
a1 : float
the semi-major axis of the first body's orbit
e : float
the orbits' eccentricity
'''
def __init__(self,M1=None,M2=None,a1=1,e=0,apnd=True):
'''Build GravBodies'''
self.G = 4*math.pi**2.
r1p = a1-e*a1
r2p = -(M1/M2)*r1p
v1p = math.sqrt(((self.G*M2**3.)/(a1*(M1+M2)**2.))*((1.+e)/(1.-e)))
v2p = -(M1/M2)*v1p
r1 = vector.Vector(r1p,0.,0.)
r2 = vector.Vector(r2p,0.,0.)
v1 = vector.Vector(0.,v1p,0.)
v2 = vector.Vector(0.,v2p,0.)
body1 = Body.GravBody(M1,r1,v1)
body2 = Body.GravBody(M2,r2,v2)
'''Set up Sim'''
self.body = [body1,body2]
solver = Solver.RK2(0.01)
self.physics = Physics.NBody(solver,self.G)
self.bodies = [cp.deepcopy(self.body)]
self.t = [0]
self.apnd = apnd
class ExoSim(BinarySim):
'''Runs a siim for an exoplant search
Attributes
----------
Ms : float
mass of the star
Mp : float
mass of the plant
ap : float
the semi-major axis of the planet's orbit
e : float
the orbits' eccentricity
Rs : float
the radius of the star in Solar Radii
Rp : float
the radius of the planet in Solar Radii
omega : float
the angle of periastron
i : float
the angle of inclination
'''
def __init__(self,Ms=None,Mp=None,ap=None,e=0,Rs=None,Rp=None,omega=None,i=None,apnd=True):
'''Save Values'''
self.apnd = apnd
self.Rs = Rs
self.Rp = Rp
self.G = 4*math.pi**2.
self.period = np.sqrt(((ap**3.)*((Ms+Mp)**2.))/Ms**3.)
'''Set up Vectors'''
rpp = ap-e*ap
rsp = -(Mp/Ms)*rpp
vpp = math.sqrt(((self.G*Ms**3.)/(ap*(Ms+Mp)**2.))*((1.+e)/(1.-e)))
vsp = -(Mp/Ms)*vpp
'''Rotate Vectors into Viewer frame'''
rs = vector.Vector(rsp,0.,0.)
rs.rot_z(omega)
rs.rot_x(i)
rp = vector.Vector(rpp,0.,0.)
rp.rot_z(omega)
rp.rot_x(i)
vs = vector.Vector(0.,vsp,0.)
vs.rot_z(omega)
vs.rot_x(i)
vp = vector.Vector(0.,vpp,0.)
vp.rot_z(omega)
vp.rot_x(i)
'''Set Up Sim'''
star = Body.GravBody(Ms,rs,vs)
planet = Body.GravBody(Mp,rp,vp)
self.body = [star,planet]
solver = Solver.RK2(0.01)
self.physics = Physics.NBody(solver,self.G)
self.bodies = [cp.deepcopy(self.body)]
self.t = [0]
def advance(self,time=None):
'''Advances Sim to a certain time or for one orbital period
Parameters
----------
time : float
the target time for the simulation, defaults to one orbital period
'''
if time == None:
time = self.period
dt = self.physics.solver.stepsize
time = time - dt
self.run(self.time_stop(time))
self.physics.solver.stepsize = time + dt - self.t[-1]
self.run(self.time_stop(time+dt))
self.physics.solver.stepsize = dt
def light_curve(self,time,bodies):
'''Creates and plots an exoplanet transit light curve for the orbit
Paramters
---------
time : list, float
a list of the independant variable, time
bodies : list, GravBody
a list of the Gravbodies at each time in time list
Returns
-------
a graph of the light curve
'''
r_list = np.array([b[0].position - b[1].position for b in bodies])
p = np.array([r.cart for r in r_list])
d = np.sqrt((p[:,0])**2. + (p[:,1])**2.)
x = (self.Rp**2. - self.Rs**2. + d**2.)/(2.*d)
h = np.sqrt(self.Rp**2. - x**2.)
theta = np.arccos(x/self.Rp)
psi = np.arccos((d-x)/self.Rs)
'''Areas of Arcs and Triangles'''
a1 = 0.5*x*h
ap = 0.5*theta*(self.Rp**2.)
A1 = ap - a1
a2 = 0.5*(d-x)*h
As = 0.5*psi*(self.Rs**2.)
A2 = As -a2
A = 2*(A1 + A2)
'''Fix Failures'''
A[d>=(self.Rp+self.Rs)] = 0.
A[d<=(self.Rs-self.Rp)] = np.pi*(self.Rp**2.)
A[p[:,2]<=0] = 0
I = ((np.pi*self.Rs**2.) - A)/(np.pi*self.Rs**2.)
plt.figure()
plt.plot(time,I,'.')
plt.title('Exo Planet Light Curve')
plt.xlabel('Time [Years]')
plt.ylabel('Intensity')
| 3.140625 | 3 |
src/tests/sql_regression/test_python.py | margrit2103/github-actions-for-ci | 0 | 12772564 | from datetime import datetime
import sys
import getopt
import glob
import psycopg2
def get_parameters():
__all_pars, remainder = getopt.getopt ( sys.argv[1:], '', ['output-dir=','db-tag=','test-dir='] )
_parameters = {'output-dir':'.'}
print ( f"Parameters {__all_pars}" )
for opt, arg in __all_pars:
print( f"""{opt} , {arg}""" )
if opt in ('--output-dir'):
_output_dir = arg
_parameters['output-dir'] = _output_dir
if opt in ('--db-tag'):
_parameters['db-tag'] = arg
if opt in ('--test-dir'):
_parameters['test-dir'] = arg
return _parameters
def do_connection():
_user = 'postgres'
_password = '<PASSWORD>'
_host = 'localhost'
_port = '5432'
_database = 'goodxweb'
return psycopg2.connect(user = _user,
password = _password,
host = _host,
port = _port,
database = _database)
def main():
_parameters = get_parameters()
now = datetime.now() # current date and time
date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
print(f"{_parameters['output-dir']}test_info")
with open(f"{_parameters['output-dir']}test_info.txt", 'w') as f:
f.write(f"Test run on {date_time} \nDocker {_parameters['db-tag']}")
f.close()
do_connection()
cursor = connection.cursor()
try:
for f in glob.glob(f"{_parameters['test-dir']}*.sql"):
try:
print(f)
cursor.execute(open(f"{_parameters['test-dir']}{f}", "r").read())
except (Exception, psycopg2.Error) as error :
print(f"Error while executing {f}")
finally:
if(connection.closed == 0 ):
cursor.close()
connection.close()
if __name__ == "__main__":
main() | 2.4375 | 2 |
elstruct/elstruct/run.py | sjklipp/autoio | 0 | 12772565 | """ core run function
"""
from autorun import from_input_string
def direct(input_writer, script_str, run_dir, prog,
geo, charge, mult, method, basis, **kwargs):
""" Generates an input file for an electronic structure job and
runs it directly.
:param input_writer: elstruct writer module function for desired job
:type input_writer: elstruct function
:param script_str: string of bash script that contains
execution instructions electronic structure job
:type script_str: str
:param run_dir: name of directory to run electronic structure job
:type run_dir: str
:param prog: electronic structure program to run
:type prog: str
:param geo: cartesian or z-matrix geometry
:type geo: tuple
:param charge: molecular charge
:type charge: int
:param mult: spin multiplicity
:type mult: int
:param method: electronic structure method
:type method: str
:returns: the input string, the output string, and the run directory
:rtype: (str, str)
"""
input_str = input_writer(
prog=prog,
geo=geo, charge=charge, mult=mult, method=method, basis=basis,
**kwargs)
output_strs = from_input_string(script_str, run_dir, input_str)
output_str = output_strs[0]
return input_str, output_str
| 2.875 | 3 |
themis/pwm.py | celskeggs/themis | 1 | 12772566 | <gh_stars>1-10
from collections import namedtuple
import themis.channel
# all in milliseconds
SpeedControlSpecs = namedtuple("SpeedControlSpecs", "rev_max rev_min rest fwd_min fwd_max frequency_hz")
# from WPILib HAL
TALON_SR = SpeedControlSpecs(0.989, 1.487, 1.513, 1.539, 2.037, 200.0)
JAGUAR = SpeedControlSpecs(0.697, 1.454, 1.507, 1.55, 2.31, 198.0)
VICTOR_OLD = SpeedControlSpecs(1.026, 1.49, 1.507, 1.525, 2.027, 100.0)
SERVO = SpeedControlSpecs(0.6, 1.6, 1.6, 1.6, 2.6, 50.0) # essentially just linear from 0.6 to 2.6
VICTOR_SP = SpeedControlSpecs(0.997, 1.48, 1.50, 1.52, 2.004, 200.0)
SPARK = SpeedControlSpecs(0.999, 1.46, 1.50, 1.55, 2.003, 200.0)
SD540 = SpeedControlSpecs(0.94, 1.44, 1.50, 1.55, 2.05, 200.0)
TALON_SRX = SpeedControlSpecs(0.997, 1.48, 1.50, 1.52, 2.004, 200.0)
def filter_to(spec: SpeedControlSpecs, out: themis.channel.FloatOutput) -> themis.channel.FloatOutput:
return out.filter("pwm_map", (), (spec.rev_max, spec.rev_min, spec.rest, spec.fwd_min, spec.fwd_max))
| 2.25 | 2 |
examples/pytorch/vision/Face_Detection/eval.py | cw18-coder/EdgeML | 719 | 12772567 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import os
import time
import argparse
import numpy as np
from PIL import Image
import cv2
from data.choose_config import cfg
cfg = cfg.cfg
from utils.augmentations import to_chw_bgr
from importlib import import_module
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='face detection demo')
parser.add_argument('--save_dir', type=str, default='results/',
help='Directory for detect result')
parser.add_argument('--model', type=str,
default='weights/rpool_face_c.pth', help='trained model')
parser.add_argument('--thresh', default=0.17, type=float,
help='Final confidence threshold')
parser.add_argument('--multigpu',
default=False, type=str2bool,
help='Specify whether model was trained with multigpu')
parser.add_argument('--model_arch',
default='RPool_Face_C', type=str,
choices=['RPool_Face_C', 'RPool_Face_Quant', 'RPool_Face_QVGA_monochrome', 'RPool_Face_M4'],
help='choose architecture among rpool variants')
parser.add_argument('--image_folder', default=None, type=str, help='folder containing images')
parser.add_argument('--save_traces',
default=False, type=str2bool,
help='Specify whether to save input output traces')
args = parser.parse_args()
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
use_cuda = torch.cuda.is_available()
if use_cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
def detect(net, img_path, thresh, save_traces):
img = Image.open(img_path)
img = img.convert('RGB')
img = np.array(img)
height, width, _ = img.shape
if os.environ['IS_QVGA_MONO'] == '1':
max_im_shrink = np.sqrt(
320 * 240 / (img.shape[0] * img.shape[1]))
else:
max_im_shrink = np.sqrt(
640 * 480 / (img.shape[0] * img.shape[1]))
if save_traces==True and os.environ['IS_QVGA_MONO'] == '1':
image = cv2.resize(img, (320, 240))
elif save_traces==True:
image = cv2.resize(img, (640, 480))
else:
image = cv2.resize(img, None, None, fx=max_im_shrink,
fy=max_im_shrink, interpolation=cv2.INTER_LINEAR)
x = to_chw_bgr(image)
x = x.astype('float32')
x -= cfg.img_mean
x = x[[2, 1, 0], :, :]
if cfg.IS_MONOCHROME == True:
x = 0.299 * x[0] + 0.587 * x[1] + 0.114 * x[2]
x = torch.from_numpy(x).unsqueeze(0).unsqueeze(0)
else:
x = torch.from_numpy(x).unsqueeze(0)
if use_cuda:
x = x.cuda()
t1 = time.time()
y, loc, conf = net(x)
detections = y.data
scale = torch.Tensor([img.shape[1], img.shape[0],
img.shape[1], img.shape[0]])
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] >= thresh:
score = detections[0, i, j, 0]
pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
left_up, right_bottom = (pt[0], pt[1]), (pt[2], pt[3])
j += 1
cv2.rectangle(img, left_up, right_bottom, (0, 0, 255), 2)
conf_score = "{:.3f}".format(score)
point = (int(left_up[0]), int(left_up[1] - 5))
cv2.putText(img, conf_score, point, cv2.FONT_HERSHEY_COMPLEX,
0.6, (0, 255, 0), 1)
t2 = time.time()
print('detect:{} timer:{}'.format(img_path, t2 - t1))
cv2.imwrite(os.path.join(args.save_dir, os.path.basename(img_path)), img)
if save_traces == True:
return x, loc, conf
if __name__ == '__main__':
module = import_module('models.' + args.model_arch)
net = module.build_s3fd('test', cfg.NUM_CLASSES)
if args.multigpu == True:
net = torch.nn.DataParallel(net)
checkpoint_dict = torch.load(args.model)
model_dict = net.state_dict()
model_dict.update(checkpoint_dict)
net.load_state_dict(model_dict)
net.eval()
if use_cuda:
net.cuda()
cudnn.benckmark = True
img_path = args.image_folder
img_list = [os.path.join(img_path, x)
for x in os.listdir(img_path)]
x = []
loc = []
conf = []
for path in img_list:
if args.save_traces == True:
x_temp, loc_temp, conf_temp = detect(net, path, args.thresh, args.save_traces)
x.append(x_temp)
loc.append(loc_temp)
conf.append(conf_temp)
else:
detect(net, path, args.thresh, args.save_traces)
if args.save_traces == True:
np.save('trace_inputs.npy', torch.cat(x).cpu().detach().numpy())
np.save('trace_outputs.npy', torch.cat([torch.cat(conf), torch.cat(loc)], dim=1).cpu().detach().numpy())
| 2.15625 | 2 |
testoutLF/lanefollowing/ros2_ws/src/lane_following/train/train.py | dr563105/basicad_framework | 0 | 12772568 | import os
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.models import Sequential
from keras.layers import Conv2D, Dense, Activation, Flatten, Lambda, Dropout
from keras.models import Sequential
from keras.optimizers import Adam
from utils import load_multi_dataset, mkdir_p, HDF5_PATH, MODEL_PATH
from datetime import datetime
import time
from sklearn.model_selection import train_test_split
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
print('Loading data from HDF5...')
X_data, Y_data = load_multi_dataset(os.path.join(HDF5_PATH, 'train_h5_list.txt'))
# X_test, Y_test = load_multi_dataset(os.path.join(HDF5_PATH, 'test_h5_list.txt'))
print('Number of images:', X_data.shape[0])
print('Number of labels:', Y_data.shape[0])
print('Splitting data into training set and testing set....')
X_train, X_test, Y_train, Y_test = train_test_split(X_data, Y_data, test_size=0.2, random_state=42)
print('X_train shape:', X_train.shape)
print('Y_train shape:', Y_train.shape)
print('X_test shape:', X_test.shape)
print('Y_test shape:', Y_test.shape)
model = Sequential()
model.add(Lambda(lambda x: x / 255.0, input_shape=(70, 320, 3)))
model.add(Conv2D(24, (5, 5), strides=(2, 2), padding='valid', activation='relu'))
model.add(Conv2D(36, (5, 5), strides=(2, 2), padding='valid', activation='relu'))
model.add(Conv2D(48, (5, 5), strides=(2, 2), padding='valid', activation='relu'))
model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='valid', activation='relu'))
model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='valid', activation='relu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
model.summary()
model.compile(optimizer=Adam(lr=1e-04, decay=0.0), loss='mse')
t0 = time.time()
model.fit(X_train, Y_train, validation_data=(X_test, Y_test), shuffle=True, epochs=30, batch_size=128)
t1 = time.time()
print('Total training time:', t1 - t0, 'seconds')
mkdir_p(MODEL_PATH)
model_id = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
model_file = os.path.join(MODEL_PATH, '{}.h5'.format(model_id))
model.save(model_file)
print("Training done successfully and model has been saved: {}".format(model_file))
print("Drive safely!")
| 2.703125 | 3 |
tensor2tensor/models/attention_lm_moe.py | anishsingh20/tensor2tensor | 0 | 12772569 | <reponame>anishsingh20/tensor2tensor
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Self-attention based language model.
Like transformer.py, but no encoder
decoder: [Self-Attention, Feed-forward] x n
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
# Dependency imports
from six.moves import xrange # pylint: disable=redefined-builtin
from tensor2tensor.models import common_attention
from tensor2tensor.models import common_hparams
from tensor2tensor.models import common_layers
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
@registry.register_model
class AttentionLmMoe(t2t_model.T2TModel):
"""Attention net. See file docstring."""
def model_fn_body_sharded(self, sharded_features, train):
# Remove dropout if not training
hparams = copy.copy(self._hparams)
if not train:
hparams.attention_dropout = 0.
hparams.relu_dropout = 0.
hparams.residual_dropout = 0.
dp = self._data_parallelism
targets = sharded_features["targets"]
targets = dp(tf.squeeze, targets, 2)
(decoder_input, decoder_self_attention_bias) = dp(
attention_lm_moe_prepare_decoder, targets, hparams)
def residual_fn(x, y):
return common_layers.layer_norm(x + tf.nn.dropout(
y, 1.0 - hparams.residual_dropout))
x = dp(tf.nn.dropout, decoder_input, 1.0 - hparams.residual_dropout)
extra_loss = 0.0
for layer in xrange(hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("attention"):
y = dp(common_attention.multihead_attention,
x,
None,
decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
summaries=True,
name="decoder_self_attention")
x = dp(residual_fn, x, y)
with tf.variable_scope("ffn"):
if str(layer) in hparams.moe_layers.split(","):
y, loss = common_layers.moe_layer(
dp, self._ps_devices, x, train, hparams.hidden_size,
hparams.moe_hidden_size, hparams.moe_n1, hparams.moe_n2,
hparams.moe_loss_coef)
extra_loss += loss
else:
y = dp(common_layers.conv_hidden_relu,
x,
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout)
x = dp(residual_fn, x, y)
decoder_output = dp(tf.expand_dims, x, 2)
return decoder_output, extra_loss
def attention_lm_moe_prepare_decoder(targets, hparams):
"""Prepare one shard of the model for the decoder.
Args:
targets: a Tensor.
hparams: run hyperparameters
Returns:
decoder_input: a Tensor, bottom of decoder stack
decoder_self_attention_bias: a Tensor, containing large negative values
to implement masked attention and possibly baises for diagonal alignments
"""
decoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(tf.shape(targets)[1]))
decoder_input = common_layers.shift_left_3d(targets)
if hparams.pos == "timing":
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
return (decoder_input, decoder_self_attention_bias)
@registry.register_hparams
def attention_lm_moe_base():
"""Set of hyperparameters.
suitable for 1 gpu.
on lm1b_16k:
~337M params
1.1 steps/sec on [GeForce GTX TITAN X]
Returns:
a hparams object
"""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 1024
hparams.batch_size = 8192
hparams.max_length = 256
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 1000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 4
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.0
hparams.shared_embedding_and_softmax_weights = int(False)
hparams.add_hparam("filter_size", 2948) # Add new ones like this.
# comma-separated list of layer numbers.
# At each of these layers, we replace the ffn with a mixture of experts.
hparams.add_hparam("moe_layers", "2")
# If moe_n2 is None, then use a flat MoE with moe_n1 experts.
# If moe_n2 is an integer, then use a hierarchical MoE
# consisting of moe_n1 groups of moe_n2 experts each.
hparams.add_hparam("moe_n1", 64)
hparams.add_hparam("moe_n2", 0)
hparams.add_hparam("moe_hidden_size", 2048)
hparams.add_hparam("moe_loss_coef", 1e-2)
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("residual_dropout", 0.1)
return hparams
@registry.register_hparams
def attention_lm_moe_small():
"""Cheap model for single-gpu training.
on lm1b_16k:
~295M params
2 steps/sec on [GeForce GTX TITAN X]
Returns:
an hparams object.
"""
hparams = attention_lm_moe_base()
hparams.num_hidden_layers = 4
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.moe_n1 = 128
hparams.moe_layers = "2"
hparams.moe_hidden_size = 2048
return hparams
@registry.register_hparams
def attention_lm_moe_large():
"""Large model for distributed training.
Over 1B parameters, so requires multi-gpu training due to memory
requirements.
Returns:
an hparams object.
"""
hparams = attention_lm_moe_base()
hparams.num_hidden_layers = 5
hparams.moe_layers = "3"
hparams.hidden_size = 1024
hparams.num_heads = 16
hparams.filter_size = 4096
hparams.moe_hidden_size = 4096
hparams.moe_n1 = 128
hparams.residual_dropout = 0.2
return hparams
| 1.820313 | 2 |
S4/S4 Library/simulation/objects/definition_manager.py | NeonOcean/Environment | 1 | 12772570 | <gh_stars>1-10
from _collections import defaultdict
import weakref
from sims4.tuning.instance_manager import InstanceManager
from sims4.tuning.merged_tuning_manager import UnavailablePackSafeResourceError
from sims4.tuning.tunable import TunableList, TunableReference
import build_buy
import objects.system
import paths
import protocolbuffers.FileSerialization_pb2 as file_serialization
import services
import sims4.core_services
import sims4.log
logger = sims4.log.Logger('DefinitionManager')
class TunableDefinitionList(TunableList):
def __init__(self, pack_safe=False, class_restrictions=(), **kwargs):
super().__init__(TunableReference(description='\n The definition of the object.\n ', manager=services.definition_manager(), pack_safe=pack_safe, class_restrictions=class_restrictions), **kwargs)
PROTOTYPE_INSTANCE_ID = 15013
class DefinitionManager(InstanceManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._definitions_cache = {}
self._definitions_tag_cache = defaultdict(list)
if paths.SUPPORT_RELOADING_RESOURCES:
self._dependencies = {}
def on_start(self):
if paths.SUPPORT_RELOADING_RESOURCES:
sims4.core_services.file_change_manager().create_set(sims4.resources.Types.OBJECTDEFINITION, sims4.resources.Types.OBJECTDEFINITION)
super().on_start()
self.refresh_build_buy_tag_cache(refresh_definition_cache=False)
def on_stop(self):
if paths.SUPPORT_RELOADING_RESOURCES:
sims4.core_services.file_change_manager().remove_set(sims4.resources.Types.OBJECTDEFINITION)
super().on_stop()
def get_changed_files(self):
changed = super().get_changed_files()
changed.extend(sims4.core_services.file_change_manager().consume_set(sims4.resources.Types.OBJECTDEFINITION))
return changed
def get(self, def_id, obj_state=0, pack_safe=False, get_fallback_definition_id=True):
def_id = int(def_id)
if get_fallback_definition_id:
def_id = build_buy.get_vetted_object_defn_guid(0, def_id)
if def_id is None:
return
key = (def_id, obj_state) if obj_state else def_id
definition = self._definitions_cache.get(key)
if definition is not None:
return definition
return self._load_definition_and_tuning(def_id, obj_state, pack_safe=pack_safe)
def get_object_tuning(self, definition_id):
definition = self.get(definition_id)
if definition is not None:
return definition.cls
@property
def loaded_definitions(self):
return self._definitions_cache.values()
def refresh_build_buy_tag_cache(self, refresh_definition_cache=True):
for key in sorted(sims4.resources.list(type=sims4.resources.Types.OBJECTDEFINITION)):
definition = self.get(key.instance, get_fallback_definition_id=False)
if definition is None:
logger.error('Definition is None for instance id {}. Key: {}', key.instance, key)
else:
definition.assign_build_buy_tags()
if refresh_definition_cache:
for definition in self._definitions_cache.values():
definition.assign_build_buy_tags()
def register_definition(self, def_id, interested_party):
if paths.SUPPORT_RELOADING_RESOURCES:
objects_with_def = self._dependencies.get(def_id)
if objects_with_def is None:
objects_with_def = weakref.WeakSet()
self._dependencies[def_id] = objects_with_def
objects_with_def.add(interested_party)
def unregister_definition(self, def_id, interested_party):
if paths.SUPPORT_RELOADING_RESOURCES:
objects_with_def = self._dependencies.get(def_id)
if objects_with_def is not None:
objects_with_def.remove(interested_party)
if not objects_with_def:
del self._dependencies[def_id]
def get_definitions_for_tags_gen(self, tag_set):
key = tuple(sorted(tag_set))
if key not in self._definitions_tag_cache:
for definition in self.loaded_definitions:
if definition.has_build_buy_tag(*tag_set):
self._definitions_tag_cache[key].append(definition)
yield from self._definitions_tag_cache[key]
def get_tuning_file_id(self, def_id):
if def_id in self._definitions_cache:
return self._definitions_cache[def_id].tuning_file_id
def reload_by_key(self, key):
raise RuntimeError('[manus] Reloading tuning is not supported for optimized python builds.')
if key.type == sims4.resources.Types.OBJECTDEFINITION:
self._reload_definition(key.instance)
elif key.type == self.TYPE:
super().reload_by_key(key)
object_tuning = super().get(key)
object_guid64 = getattr(object_tuning, 'guid64', None)
reload_list = set()
for (definition_key, definition) in self._definitions_cache.items():
def_cls = definition.cls
def_cls_guid64 = getattr(def_cls, 'guid64', None)
if object_guid64 is not None:
if def_cls_guid64 is not None:
if object_guid64 == def_cls_guid64:
reload_list.add(definition_key)
for cache_key in reload_list:
del self._definitions_cache[cache_key]
for definition_key in reload_list:
self._reload_definition(definition_key)
def _reload_definition(self, key):
if paths.SUPPORT_RELOADING_RESOURCES:
sims4.resources.purge_cache()
if isinstance(key, tuple):
(def_id, state) = key
else:
def_id = key
state = 0
definition = self._load_definition_and_tuning(def_id, state)
if definition is not None:
if def_id in self._dependencies:
list_copy = list(self._dependencies.get(def_id))
self._dependencies[def_id].clear()
for gameobject in list_copy:
if gameobject.is_sim:
continue
loc_type = gameobject.item_location
object_list = file_serialization.ObjectList()
save_data = gameobject.save_object(object_list.objects)
try:
gameobject.manager.remove(gameobject)
except:
logger.exception('exception in removing game object {}', gameobject)
continue
try:
dup = objects.system.create_object(definition, obj_id=gameobject.id, loc_type=loc_type, disable_object_commodity_callbacks=True)
dup.load_object(save_data, inline_finalize=True)
if gameobject.location is not None:
dup.location = gameobject.location
inventory = dup.get_inventory()
if inventory is not None:
inventory.system_add_object(dup)
logger.error('reloading game object with ID {}', dup.id)
except:
logger.exception('exception in reinitializing game object {}', gameobject)
return definition
def _load_definition_and_tuning(self, def_id, obj_state, pack_safe=False):
try:
definition = self._load_definition(def_id)
except KeyError:
if pack_safe:
raise UnavailablePackSafeResourceError
logger.error('Failed to load definition with id {}', def_id, owner='tingyul')
return
else:
try:
tuning_file_id = definition.tuning_file_id
if tuning_file_id == 0:
tuning_file_id = PROTOTYPE_INSTANCE_ID
cls = super().get(tuning_file_id)
if cls is None:
logger.info('Failed to load object-tuning-id {} for definition {}. This is valid for SP14 objects mimic based on EP04 objects.', tuning_file_id, definition)
cls = super().get(PROTOTYPE_INSTANCE_ID)
if cls is None:
return
cls = cls.get_class_for_obj_state(obj_state)
except:
logger.exception('Unable to create a script object for definition id: {0}', def_id)
return
definition.set_class(cls)
key = (def_id, obj_state) if obj_state else def_id
self._definitions_cache[key] = definition
definition.assign_build_buy_tags()
return definition
def _load_definition(self, def_id):
key = sims4.resources.Key(sims4.resources.Types.OBJECTDEFINITION, def_id)
resource = sims4.resources.load(key)
properties = sims4.PropertyStreamReader(resource)
return objects.definition.Definition(properties, def_id)
def find_first_definition_by_cls(self, cls):
for definition in self._definitions_cache.values():
if definition.cls is cls:
return definition
| 1.828125 | 2 |
setup.py | AzatAI/pyvcgencmd | 0 | 12772571 | <reponame>AzatAI/pyvcgencmd
#!/usr/bin/env python3
import os
import sys
from codecs import open
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
about = {} # create a empty dictionary to store the content of the __version__.py file
# execute the __version__.py file and get the content, save to the dictionary about.
with open(os.path.join(here, "pyvcgencmd", "__version__.py"), "r", "utf-8") as f:
exec(f.read(), about)
# 'setup.py publish' shortcut.
if sys.argv[-1] == "publish":
os.system("python setup.py sdist")
os.system("twine upload dist/*")
os.system("rm -rf dist")
os.system("rm -rf *.egg-info")
sys.exit()
if sys.argv[-1] == "publishwin":
os.system("python3 setup.py sdist")
os.system("twine upload dist/*")
os.system("rm dist")
os.system("rm *.egg-info")
sys.exit()
requires = ["pydantic", "psutil"]
with open("README.md", "r", "utf-8") as f:
readme = f.read()
setup(
name="pyvcgencmd",
version=about["__version__"],
description=about["__description__"],
long_description=readme,
long_description_content_type="text/markdown",
author=about["__author__"],
author_email=about["__author_email__"],
url=about["__url__"],
packages=find_packages(),
package_data={"": ["LICENSE"]},
# package_dir={'startpkg': 'src'},
include_package_data=True,
python_requires=">=3.6",
install_requires=requires,
# entry_points='''
# ''',
license=about["__license__"],
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
project_urls={
"Documentation": "https://github.com/AzatAI/pyvcgencmd",
"Source": "https://github.com/AzatAI/pyvcgencmd",
},
)
| 1.960938 | 2 |
src/server_app/app.py | colesturza/ECEN5033-Final-Project | 0 | 12772572 | import os
from flask import Flask, request, jsonify, Response
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from prometheus_flask_exporter import PrometheusMetrics
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://mongo:27017/dev"
mongo = PyMongo(app)
db = mongo.db
hostname = os.uname()[1]
PrometheusMetrics(app)
@app.route("/")
def index():
return jsonify(message=f"Welcome to Movies app! I am running inside {hostname} pod!")
@app.route("/message1")
def message1():
return jsonify(message=f"You've hit {hostname}. A very interesting message.")
@app.route("/message2")
def message2():
return jsonify(message=f"You've hit {hostname}. A second very interesting message.")
@app.route("/message3")
def message3():
return jsonify(message=f"You've hit {hostname}. A third very interesting message.")
@app.route("/movies", methods=["GET"])
def get_all_movies():
movies = db.movies.find()
data = []
for movie in movies:
item = {
"id": str(movie["_id"]),
"title": movie["title"],
"year": movie["year"]
}
data.append(item)
return jsonify(data=data)
@app.route("/movies", methods=["POST"])
def create_movie():
data = request.get_json(force=True)
db.movies.insert_one(
{
"title": data["title"],
"year": data["year"]
}
)
return jsonify(message="Movie saved successfully!")
@app.route("/movies/<id>", methods=["GET"])
def get_movie_by_id(id):
movie = db.movies.find_one_or_404({"_id": ObjectId(id)})
movie = {
"id": str(movie["_id"]),
"title": movie["title"],
"year": movie["year"]
}
return jsonify(movie=movie)
if __name__ == "__main__":
app.run() | 2.796875 | 3 |
build/debian_system_builder/debian_system_builder.py | chaytanyasinha/openr | 0 | 12772573 | <filename>build/debian_system_builder/debian_system_builder.py
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import, division, print_function, unicode_literals
import distutils.spawn
import os
import fbcode_builder_path
from fbcode_builder import FBCodeBuilder
from shell_builder import ShellFBCodeBuilder
from shell_quoting import ShellQuoted, path_join, raw_shell, shell_comment, shell_join
from utils import recursively_flatten_list
"""
debian_system_builder.py allows running the fbcode_builder logic on the host
rather than in a container and installs libraries and programs to the system.
It emits a bash script with set -exo pipefail configured such that
any failing step will cause the script to exit with failure.
== How to run it? ==
cd build
python debian_system_builder/debian_system_builder.py > ./build_openr.sh
sudo chmod +x build_openr.sh
sudo ./build_openr.sh
"""
class DebianSystemFBCodeBuilder(ShellFBCodeBuilder):
# Overwrite configure to remove prefix for system build
def configure(self, name=None):
autoconf_options = {}
if name is not None:
autoconf_options.update(
self.option("{0}:autoconf_options".format(name), {})
)
return [
self.run(
ShellQuoted(
'LDFLAGS="$LDFLAGS" '
'CFLAGS="$CFLAGS" '
'CPPFLAGS="$CPPFLAGS" '
"./configure {args}"
).format(
args=shell_join(
" ",
(
ShellQuoted("{k}={v}").format(k=k, v=v)
for k, v in autoconf_options.items()
),
)
)
)
]
# Overwrite cmake_configure to remove prefix for system build
def cmake_configure(self, name, cmake_path=".."):
cmake_defines = {"BUILD_SHARED_LIBS": "ON"}
cmake_defines.update(self.option("{0}:cmake_defines".format(name), {}))
return [
self.run(
ShellQuoted(
'CXXFLAGS="$CXXFLAGS -fPIC" '
'CFLAGS="$CFLAGS -fPIC" '
"cmake {args} {cmake_path}"
).format(
args=shell_join(
" ",
(
ShellQuoted("-D{k}={v}").format(k=k, v=v)
for k, v in cmake_defines.items()
),
),
cmake_path=cmake_path,
)
)
]
def github_project_workdir(self, project, path):
# Only check out a non-default branch if requested. This especially
# makes sense when building from a local repo.
git_hash = self.option(
"{0}:git_hash".format(project),
# Any repo that has a hash in deps/github_hashes defaults to
# that, with the goal of making builds maximally consistent.
self._github_hashes.get(project, ""),
)
maybe_change_branch = (
[self.run(ShellQuoted("git checkout {hash}").format(hash=git_hash))]
if git_hash
else []
)
base_dir = self.option("projects_dir")
local_repo_dir = self.option("{0}:local_repo_dir".format(project), "")
return self.step(
"Check out {0}, workdir {1}".format(project, path),
[
self.workdir(base_dir),
self.run(
ShellQuoted(
"if [[ ! -d {d} ]]; then \n"
"\tgit clone https://github.com/{p}\n"
"fi"
).format(
p=project, d=path_join(base_dir, os.path.basename(project))
)
)
if not local_repo_dir
else self.copy_local_repo(local_repo_dir, os.path.basename(project)),
self.workdir(path_join(base_dir, os.path.basename(project), path)),
]
+ maybe_change_branch,
)
# Cmake system install
def make_and_install(self, make_vars=None):
return [
self.parallel_make(make_vars),
self.run(
ShellQuoted("sudo make install VERBOSE=1 {vars}").format(
vars=self._make_vars(make_vars)
)
),
self.run(ShellQuoted("sudo ldconfig")),
]
def debian_deps(self):
return super(DebianSystemFBCodeBuilder, self).debian_deps() + [
"python-setuptools",
"python3-setuptools",
"python-pip",
"ccache",
]
def setup(self):
steps = [
ShellQuoted("#!/bin/bash\n"),
ShellQuoted("set -exo pipefail"),
self.install_debian_deps(),
]
if self.has_option("ccache_dir"):
ccache_dir = self.option("ccache_dir")
steps += [
ShellQuoted(
# Set CCACHE_DIR before the `ccache` invocations below.
"export CCACHE_DIR={ccache_dir} "
'CC="ccache ${{CC:-gcc}}" CXX="ccache ${{CXX:-g++}}"'
).format(ccache_dir=ccache_dir)
]
return steps
def install_dir():
install_dir = os.environ.get("INSTALL_DIR")
if not install_dir:
install_dir = "/usr/local"
return install_dir
def projects_dir():
projects_dir = os.environ.get("PROJECTS_DIR")
if not projects_dir:
projects_dir = "/usr/local/src"
return projects_dir
def ccache_dir():
ccache_dir = os.environ.get("CCACHE_DIR")
if not ccache_dir:
ccache_dir = "/ccache"
return ccache_dir
def gcc_version():
gcc_version = os.environ.get("GCC_VERSION")
if not gcc_version:
gcc_version = "5"
return gcc_version
if __name__ == "__main__":
from utils import read_fbcode_builder_config, build_fbcode_builder_config
install_dir = install_dir()
projects_dir = projects_dir()
config_file = os.path.join(
os.path.dirname(__file__), "debian_system_fbcode_builder_config.py"
)
config = read_fbcode_builder_config(config_file)
builder = DebianSystemFBCodeBuilder(projects_dir=projects_dir)
if distutils.spawn.find_executable("ccache"):
ccache_dir = ccache_dir()
builder.add_option("ccache_dir", ccache_dir)
# Option is required by fbcode_builder_spec
builder.add_option("prefix", install_dir)
builder.add_option("make_parallelism", 4)
gcc_version = gcc_version()
builder.add_option("gcc_version", gcc_version)
make_steps = build_fbcode_builder_config(config)
steps = make_steps(builder)
print(builder.render(steps))
| 2 | 2 |
adv/yurius.py | slushiedee/dl | 45 | 12772574 | from core.advbase import *
from slot.a import *
from slot.d import *
def module():
return Yurius
class Yurius(Adv):
a3 = ('prep', 100)
conf = {}
conf['slots.a'] = Primal_Crisis()+Candy_Couriers()
conf['slots.d'] = Gaibhne_and_Creidhne()
conf['acl'] = """
if self.afflics.frostbite.get()
`dragon, not self.dragondrive_buff.get() and (self.duration<=120 or self.dragonform.dragon_gauge>=2130 or self.dragonform.shift_count>0)
else
`dragon, self.dragondrive_buff.get()
end
queue prep and self.duration>120
`s3; s2; s1; s4
end
`s3, cancel
`s2, cancel
`s4, cancel
`s1, cancel
"""
coab = ['Blade','Hunter_Sarisse','Xander']
share = ['Gala_Elisanne', 'Ranzal']
# conf['sim_afflict.efficiency'] = 1
# conf['sim_afflict.type'] = 'frostbite'
def prerun(self):
# 3000/1200/75
self.dragondrive_buff = Selfbuff('dragondrive_sd', 0.35, -1, 's', 'passive')
self.dragondrive_haste = Selfbuff('dragondrive_sp',0.30, -1, 'sp', 'buff')
self.dragonform.set_dragondrive(self.dragondrive_buff, drain=75)
Event('dragon_end').listener(self.dragondrive_on) # cursed
Event('dragondrive_end').listener(self.dragondrive_off)
def dragondrive_on(self, e):
self.dragondrive_haste.on()
def dragondrive_off(self, e):
self.dragondrive_haste.off()
def s1_proc(self, e):
if self.dragondrive_buff.get():
with KillerModifier('s1_killer', 'hit', 0.6, ['frostbite']):
self.dmg_make(e.name, 7.92)
else:
self.dmg_make(e.name, 7.56)
self.dragonform.charge_gauge(530, utp=True)
self.inspiration.add(1, team=False)
def s2_proc(self, e):
if self.dragondrive_buff.get():
with KillerModifier('s2_killer', 'hit', 0.6, ['frostbite']):
self.dmg_make(e.name, 10.52)
else:
self.dmg_make(e.name, 2.08)
self.afflics.frostbite(e.name,120,0.287,duration=30)
self.dmg_make(e.name, 6.24)
self.dragonform.charge_gauge(530, utp=True)
self.inspiration.add(2, team=False)
def s_proc(self, e):
if self.dragondrive_buff.get():
s = getattr(self, e.name)
self.dragonform.add_drive_gauge_time(s.ac.getstartup()+s.ac.getrecovery(), skill_pause=True)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
| 1.96875 | 2 |
debate/api/serializers.py | steventimberman/debateIt | 0 | 12772575 | <filename>debate/api/serializers.py
from rest_framework.serializers import ModelSerializer
from debate.models import DebateTopic
class DebateTopicSerializer(ModelSerializer):
class Meta:
model = DebateTopic
fields = [
'topic',
'description',
'timestamp',
'photo',
'article_URL',
'user'
] | 2.015625 | 2 |
tests/sympc/tensor/grad/grad_functions_test.py | Boluwatifeh/SyMPC | 0 | 12772576 | <gh_stars>0
# stdlib
from typing import List
# third party
import numpy as np
import pytest
import torch
from sympc.session import Session
from sympc.session import SessionManager
from sympc.tensor import MPCTensor
from sympc.tensor.grads.grad_functions import GradAdd
from sympc.tensor.grads.grad_functions import GradConv2d
from sympc.tensor.grads.grad_functions import GradFlatten
from sympc.tensor.grads.grad_functions import GradFunc
from sympc.tensor.grads.grad_functions import GradMatMul
from sympc.tensor.grads.grad_functions import GradMul
from sympc.tensor.grads.grad_functions import GradPow
from sympc.tensor.grads.grad_functions import GradReshape
from sympc.tensor.grads.grad_functions import GradSigmoid
from sympc.tensor.grads.grad_functions import GradSub
from sympc.tensor.grads.grad_functions import GradSum
from sympc.tensor.grads.grad_functions import GradT
from sympc.utils import parallel_execution
def test_grad_func_abstract_forward_exception() -> None:
with pytest.raises(NotImplementedError):
GradFunc.forward({})
def test_grad_func_abstract_backward_exception() -> None:
with pytest.raises(NotImplementedError):
GradFunc.backward({})
def test_grad_transpose_forward(get_clients) -> None:
secret = torch.Tensor([[1, 2, 3], [4, 5, 6]])
mpc_tensor = secret.share(parties=get_clients(4))
ctx = {}
res_mpc = GradT.forward(ctx, mpc_tensor)
res = res_mpc.reconstruct()
expected = secret.t()
assert (res == expected).all()
def test_grad_transpose_backward(get_clients) -> None:
parties = get_clients(4)
grad = torch.Tensor([[1, 2, 3], [4, 5, 6]])
grad_mpc = grad.t().share(parties=parties)
ctx = {}
res_mpc = GradT.backward(ctx, grad_mpc)
res = res_mpc.reconstruct()
expected = grad
assert (res == expected).all()
def test_grad_add_different_dims_forward(get_clients) -> None:
parties = get_clients(4)
x = torch.Tensor([[1, 2, 3], [4, 5, 6]])
y = torch.Tensor([1, 2, 3])
x_mpc = x.share(parties=parties)
y_mpc = y.share(parties=parties)
ctx = {}
res_mpc = GradAdd.forward(ctx, x_mpc, y_mpc)
res = res_mpc.reconstruct()
expected = x + y
assert (res == expected).all()
def test_grad_add_different_dims_backward(get_clients) -> None:
parties = get_clients(4)
grad = torch.Tensor([[[2, 4, 6], [5, 7, 9]]])
grad_x = grad
grad_y = torch.Tensor([[7, 11, 15]])
grad_mpc = grad.share(parties=parties)
ctx = {"x_shape": (2, 3), "y_shape": (1, 3)}
res_mpc_x, res_mpc_y = GradAdd.backward(ctx, grad_mpc)
assert (res_mpc_x.reconstruct() == grad_x).all()
assert (res_mpc_y.reconstruct() == grad_y).all()
def test_grad_add_forward(get_clients) -> None:
parties = get_clients(4)
x = torch.Tensor([[1, 2, 3], [4, 5, 6]])
y = torch.Tensor([[1, 4, 6], [8, 10, 12]])
x_mpc = x.share(parties=parties)
y_mpc = y.share(parties=parties)
ctx = {}
res_mpc = GradAdd.forward(ctx, x_mpc, y_mpc)
res = res_mpc.reconstruct()
expected = x + y
assert (res == expected).all()
def test_grad_add_backward(get_clients) -> None:
parties = get_clients(4)
grad = torch.Tensor([1, 2, 3, 4])
grad_mpc = grad.share(parties=parties)
ctx = {"x_shape": (4,), "y_shape": (4,)}
res_mpc_x, res_mpc_y = GradAdd.backward(ctx, grad_mpc)
assert (res_mpc_x.reconstruct() == grad).all()
assert (res_mpc_y.reconstruct() == grad).all()
def test_grad_sum_forward(get_clients) -> None:
parties = get_clients(4)
x = torch.Tensor([[1, 2, 3], [4, 5, 6]])
x_mpc = x.share(parties=parties)
ctx = {}
res_mpc = GradSum.forward(ctx, x_mpc)
assert ctx["x_shape"] == (2, 3)
res = res_mpc.reconstruct()
expected = x.sum()
assert (res == expected).all()
def test_grad_sum_backward(get_clients) -> None:
parties = get_clients(4)
grad = torch.tensor(420)
grad_mpc = grad.share(parties=parties)
shape = (2, 3)
ctx = {"x_shape": shape}
res_mpc = GradSum.backward(ctx, grad_mpc)
res = res_mpc.reconstruct()
expected = torch.ones(size=shape) * grad
assert (res == expected).all()
def test_grad_sub_forward(get_clients) -> None:
parties = get_clients(4)
x = torch.Tensor([[1, 2, 3], [4, 5, 6]])
y = torch.Tensor([[1, 4, 6], [8, 10, 12]])
x_mpc = x.share(parties=parties)
y_mpc = y.share(parties=parties)
ctx = {}
res_mpc = GradSub.forward(ctx, x_mpc, y_mpc)
res = res_mpc.reconstruct()
expected = x - y
assert (res == expected).all()
def test_grad_sub_backward(get_clients) -> None:
parties = get_clients(4)
grad = torch.Tensor([1, 2, 3, 4])
grad_mpc = grad.share(parties=parties)
ctx = {"x_shape": (4,), "y_shape": (4,)}
res_mpc_x, res_mpc_y = GradSub.backward(ctx, grad_mpc)
assert (res_mpc_x.reconstruct() == grad).all()
assert (res_mpc_y.reconstruct() == -grad).all()
def test_grad_sub_different_dims_forward(get_clients) -> None:
parties = get_clients(4)
x = torch.Tensor([[1, 2, 3], [4, 5, 6]])
y = torch.Tensor([1, 2, 3])
x_mpc = x.share(parties=parties)
y_mpc = y.share(parties=parties)
ctx = {}
res_mpc = GradSub.forward(ctx, x_mpc, y_mpc)
res = res_mpc.reconstruct()
expected = x - y
assert (res == expected).all()
def test_grad_sub_different_dims_backward(get_clients) -> None:
parties = get_clients(4)
grad = torch.Tensor([[[2, 4, 6], [5, 7, 9]]])
grad_x = grad
grad_y = -torch.Tensor([[7, 11, 15]])
grad_mpc = grad.share(parties=parties)
ctx = {"x_shape": (2, 3), "y_shape": (1, 3)}
res_mpc_x, res_mpc_y = GradSub.backward(ctx, grad_mpc)
assert (res_mpc_x.reconstruct() == grad_x).all()
assert (res_mpc_y.reconstruct() == grad_y).all()
def test_grad_sigmoid_forward(get_clients) -> None:
# We need Function Secret Sharing (only for 2 parties) for
# comparing
parties = get_clients(2)
x = torch.Tensor([7, 10, 12])
x_mpc = x.share(parties=parties)
ctx = {}
res_mpc = GradSigmoid.forward(ctx, x_mpc)
assert "probabilities" in ctx
res = res_mpc.reconstruct()
expected = x.sigmoid()
assert np.allclose(res, expected, rtol=1e-2)
def test_grad_sigmoid_backward(get_clients) -> None:
parties = get_clients(4)
grad = torch.tensor([0.3, 0.4, 0.7])
grad_mpc = grad.share(parties=parties)
ctx = {"probabilities": grad}
res_mpc = GradSigmoid.backward(ctx, grad_mpc)
res = res_mpc.reconstruct()
expected = grad * grad * (1 - grad)
assert np.allclose(res, expected, rtol=1e-2)
def test_grad_mul_forward(get_clients) -> None:
parties = get_clients(4)
x = torch.Tensor([[1, 2], [3, -4]])
y = torch.Tensor([[1, -4], [8, 9]])
x_mpc = x.share(parties=parties)
y_mpc = y.share(parties=parties)
ctx = {}
res_mpc = GradMul.forward(ctx, x_mpc, y_mpc)
assert "x" in ctx
assert "y" in ctx
res = res_mpc.reconstruct()
expected = x * y
assert np.allclose(res, expected, rtol=1e-3)
def test_grad_mul_backward(get_clients) -> None:
parties = get_clients(4)
grad = torch.Tensor([[1, 2], [3, 4]])
x = torch.Tensor([[1, 2], [3, -4]])
y = torch.Tensor([[1, -4], [8, 9]])
x_mpc = x.share(parties=parties)
y_mpc = y.share(parties=parties)
grad_mpc = grad.share(parties=parties)
ctx = {"x": x_mpc, "y": y_mpc}
res_mpc_x, res_mpc_y = GradMul.backward(ctx, grad_mpc)
assert np.allclose(res_mpc_x.reconstruct(), y * grad, rtol=1e-3)
assert np.allclose(res_mpc_y.reconstruct(), x * grad, rtol=1e-3)
def test_grad_conv2d_forward(get_clients) -> None:
parties = get_clients(4)
input_secret = torch.ones(1, 1, 4, 4)
weight_secret = torch.ones(1, 1, 2, 2)
input = input_secret.share(parties=parties)
weight = weight_secret.share(parties=parties)
kwargs = {"bias": None, "stride": 1, "padding": 0, "dilation": 1, "groups": 1}
ctx = {}
res_mpc = GradConv2d.forward(ctx, input, weight, **kwargs)
assert "input" in ctx
assert "weight" in ctx
assert "stride" in ctx
assert "padding" in ctx
assert "dilation" in ctx
assert "groups" in ctx
res = res_mpc.reconstruct()
expected = torch.nn.functional.conv2d(input_secret, weight_secret, **kwargs)
assert np.allclose(res, expected, rtol=1e-3)
def test_grad_conv2d_backward(get_clients) -> None:
parties = get_clients(4)
grad = torch.Tensor([[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]])
input = torch.Tensor(
[
[
[
[-2.1805, -1.3338, -0.9718, -0.1335],
[-0.5632, 1.2667, 0.9994, -0.0627],
[-0.9563, 0.5861, -1.4422, -0.4825],
[0.2732, -1.1900, -0.6624, -0.7513],
]
]
]
)
weight = torch.Tensor([[[[0.3257, -0.7538], [-0.5773, -0.7619]]]])
x_mpc = input.share(parties=parties)
y_mpc = weight.share(parties=parties)
grad_mpc = grad.share(parties=parties)
ctx = {
"input": x_mpc,
"weight": y_mpc,
"stride": 1,
"padding": 0,
"dilation": 1,
"groups": 1,
}
res_mpc_input, res_mpc_weight = GradConv2d.backward(ctx, grad_mpc)
expected_input = torch.nn.functional.grad.conv2d_input(input.size(), weight, grad)
expected_weight = torch.nn.functional.grad.conv2d_weight(input, weight.size(), grad)
assert np.allclose(res_mpc_input.reconstruct(), expected_input, rtol=1e-3)
assert np.allclose(res_mpc_weight.reconstruct(), expected_weight, rtol=1e-3)
@pytest.mark.parametrize(
"common_args", [[(6, 6), 2, 1, (3, 3), 1], [(4, 4), 1, 0, (2, 2), 1]]
)
@pytest.mark.parametrize("nr_parties", [2, 3, 4])
def test_get_grad_input_padding(get_clients, common_args: List, nr_parties) -> None:
clients = get_clients(2)
session = Session(parties=clients)
SessionManager.setup_mpc(session)
grad = torch.Tensor([[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]])
grad_mpc = MPCTensor(secret=grad, session=session)
input_size, stride, padding, kernel_size, dilation = common_args
expected_padding = torch.nn.functional.grad._grad_input_padding(
grad,
input_size,
(stride, stride),
(padding, padding),
kernel_size,
(dilation, dilation),
)
args = [[el] + common_args + [session] for el in grad_mpc.share_ptrs]
shares = parallel_execution(
GradConv2d.get_grad_input_padding, grad_mpc.session.parties
)(args)
grad_input_padding = MPCTensor(shares=shares, session=grad_mpc.session)
output_padding_tensor = grad_input_padding.reconstruct()
output_padding_tensor /= grad_mpc.session.nr_parties
calculated_padding = tuple(output_padding_tensor.to(torch.int).tolist())
assert calculated_padding == expected_padding
def test_grad_reshape_forward(get_clients) -> None:
parties = get_clients(4)
x = torch.Tensor([[1, 2], [3, -4], [-9, 0]])
x_mpc = x.share(parties=parties)
ctx = {}
shape = (3, 2)
res_mpc = GradReshape.forward(ctx, x_mpc, shape)
assert "x_shape" in ctx
res_shape = res_mpc.shape
assert res_shape == shape
assert np.allclose(res_mpc.reconstruct(), x.reshape(shape), rtol=1e-3)
def test_grad_reshape_backward(get_clients) -> None:
parties = get_clients(4)
grad = torch.Tensor([[1, 2, 3], [3, 4, 7]])
x = torch.Tensor([[1, 2], [3, -4], [5, 8]])
x_mpc = x.share(parties=parties)
grad_mpc = grad.share(parties=parties)
ctx = {"x_shape": x_mpc.shape}
res_mpc_grad = GradReshape.backward(ctx, grad_mpc)
res_mpc_grad_shape = res_mpc_grad.shape
assert res_mpc_grad_shape == x_mpc.shape
assert np.allclose(res_mpc_grad.reconstruct(), grad.reshape(x_mpc.shape), rtol=1e-3)
@pytest.mark.parametrize("args", [[0, -1], [1, -1], [0, 1]])
def test_grad_flatten_forward(get_clients, args: list) -> None:
parties = get_clients(4)
x = torch.tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
x_mpc = x.share(parties=parties)
ctx = {}
start_dim, end_dim = args
res_mpc = GradFlatten.forward(ctx, x_mpc, start=start_dim, end=end_dim)
assert "x_shape" in ctx
expected = torch.flatten(x, start_dim=start_dim, end_dim=end_dim)
assert np.allclose(res_mpc.reconstruct(), expected, rtol=1e-3)
def test_grad_flatten_backward(get_clients) -> None:
parties = get_clients(4)
grad = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8])
x = torch.Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
x_mpc = x.share(parties=parties)
grad_mpc = grad.share(parties=parties)
ctx = {"x_shape": x_mpc.shape}
res_mpc_grad = GradFlatten.backward(ctx, grad_mpc)
assert np.allclose(res_mpc_grad.reconstruct(), x, rtol=1e-3)
@pytest.mark.parametrize("power", [2, 4, 5])
def test_grad_pow_forward(get_clients, power) -> None:
parties = get_clients(4)
x = torch.Tensor([[1, 2, 3], [4, 5, 6]])
x_mpc = x.share(parties=parties)
ctx = {}
res_mpc = GradPow.forward(ctx, x_mpc, power)
assert "x" in ctx
assert "y" in ctx
res = res_mpc.reconstruct()
expected = x ** power
assert np.allclose(res, expected, rtol=1e-3)
@pytest.mark.parametrize("power", [1.0, torch.tensor([1, 3])])
def test_grad_pow_forward_exception(get_clients, power) -> None:
parties = get_clients(4)
x = torch.Tensor([[1, 2, 3], [4, 5, 6]])
x_mpc = x.share(parties=parties)
ctx = {}
with pytest.raises(TypeError):
GradPow.forward(ctx, x_mpc, power)
@pytest.mark.parametrize("power", [2, 4, 5])
def test_grad_pow_backward(get_clients, power) -> None:
parties = get_clients(4)
grad = torch.Tensor([1, 2, 3, 4])
grad_mpc = grad.share(parties=parties)
x = torch.Tensor([1, 4, 9, 16])
x_mpc = x.share(parties=parties)
ctx = {"x": x_mpc, "y": power}
res_mpc = GradPow.backward(ctx, grad_mpc)
res = res_mpc.reconstruct()
expected = power * x ** (power - 1) * grad
assert np.allclose(res, expected, rtol=1e-3)
def test_grad_matmul_forward(get_clients) -> None:
parties = get_clients(4)
x = torch.Tensor([[1, 2], [3, -4]])
y = torch.Tensor([[1, -4], [8, 9]])
x_mpc = x.share(parties=parties)
y_mpc = y.share(parties=parties)
ctx = {}
res_mpc = GradMatMul.forward(ctx, x_mpc, y_mpc)
assert "x" in ctx
assert "y" in ctx
res = res_mpc.reconstruct()
expected = x @ y
assert np.allclose(res, expected, rtol=1e-3)
def test_grad_matmul_backward(get_clients) -> None:
parties = get_clients(4)
grad = torch.Tensor([[1, 2], [3, 4]])
x = torch.Tensor([[1, 2], [3, -4]])
y = torch.Tensor([[1, -4], [8, 9]])
x_mpc = x.share(parties=parties)
y_mpc = y.share(parties=parties)
grad_mpc = grad.share(parties=parties)
ctx = {"x": x_mpc, "y": y_mpc}
res_mpc_x, res_mpc_y = GradMatMul.backward(ctx, grad_mpc)
assert np.allclose(res_mpc_x.reconstruct(), grad @ y.T, rtol=1e-3)
assert np.allclose(res_mpc_y.reconstruct(), x.T @ grad, rtol=1e-3)
# Test Value Error
y = torch.Tensor([[1, -4], [8, 9], [10, 11]])
y_mpc = y.share(parties=parties)
ctx = {"x": x_mpc, "y": y_mpc}
with pytest.raises(ValueError):
GradMatMul.backward(ctx, grad_mpc)
| 1.921875 | 2 |
bamboos/docker/environment/globalregistry.py | onedata/cluster-example | 1 | 12772577 | <filename>bamboos/docker/environment/globalregistry.py
# coding=utf-8
"""Authors: <NAME>, <NAME>
Copyright (C) 2015 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
Brings up a set of Global Registry nodes along with databases.
They can create separate clusters.
"""
import copy
import json
import os
import re
from . import common, docker, dns
LOGFILE = '/tmp/run.log'
def gr_domain(gr_instance, uid):
"""Formats domain for a GR."""
return common.format_hostname(gr_instance, uid)
def gr_hostname(node_name, gr_instance, uid):
"""Formats hostname for a docker hosting GR.
NOTE: Hostnames are also used as docker names!
"""
return common.format_hostname([node_name, gr_instance], uid)
def gr_db_hostname(db_node_name, gr_instance, uid):
"""Formats hostname for a docker hosting bigcouch for GR.
NOTE: Hostnames are also used as docker names!
"""
return common.format_hostname([db_node_name, gr_instance], uid)
def db_erl_node_name(db_node_name, gr_instance, uid):
"""Formats erlang node name for a vm on GR DB docker.
"""
hostname = gr_db_hostname(db_node_name, gr_instance, uid)
return common.format_erl_node_name('bigcouch', hostname)
def gr_erl_node_name(node_name, gr_instance, uid):
"""Formats erlang node name for a vm on GR docker.
"""
hostname = gr_hostname(node_name, gr_instance, uid)
return common.format_erl_node_name('gr', hostname)
def _tweak_config(config, gr_node, gr_instance, uid):
cfg = copy.deepcopy(config)
cfg['nodes'] = {'node': cfg['nodes'][gr_node]}
sys_config = cfg['nodes']['node']['sys.config']
sys_config['db_nodes'] = [db_erl_node_name(n, gr_instance, uid)
for n in sys_config['db_nodes']]
if 'http_domain' in sys_config:
sys_config['http_domain'] = {'string': gr_domain(gr_instance, uid)}
if 'vm.args' not in cfg['nodes']['node']:
cfg['nodes']['node']['vm.args'] = {}
vm_args = cfg['nodes']['node']['vm.args']
vm_args['name'] = gr_erl_node_name(gr_node, gr_instance, uid)
return cfg
def _node_up(gr_id, domain, gr_ips, dns_ips, dns_config, gen_dev_config):
"""Updates dns.config and starts the GR node"""
ip_addresses = {
domain: gr_ips
}
ns_servers = []
for i in range(len(dns_ips)):
ns = 'ns{0}.{1}'.format(i, domain)
ns_servers.append(ns)
ip_addresses[ns] = [dns_ips[i]]
primary_ns = ns_servers[0]
mail_exchange = 'mail.{0}'.format(domain)
ip_addresses[mail_exchange] = [gr_ips[0]]
admin_mailbox = 'dns-admin.{0}'.format(domain)
cname = '{{cname, "{0}"}},'.format(domain)
dns_config = re.sub(
re.compile(r"\{cname,\s*[^\}]*\},", re.MULTILINE),
cname,
dns_config)
ip_addresses_entries = []
for address in ip_addresses:
ip_list = '"{0}"'.format('","'.join(ip_addresses[address]))
ip_addresses_entries.append(' {{"{0}", [{1}]}}'
.format(address, ip_list))
ip_addresses = '{{ip_addresses, [\n{0}\n ]}},'.format(
',\n'.join(ip_addresses_entries))
dns_config = re.sub(
re.compile(r"\{ip_addresses,\s*\[(\s*\{[^\}]*\}[,]?\s*)*\]\},",
re.MULTILINE),
ip_addresses,
dns_config)
ns_servers = '{{ns_servers, [\n "{0}"\n ]}},'.format(
'",\n "'.join(ns_servers))
dns_config = re.sub(
re.compile(r"\{ns_servers,\s*\[[^\]\}]*\]\},", re.MULTILINE),
ns_servers,
dns_config)
mail_exchange = '{{mail_exchange, [\n {{10, "{0}"}}\n ]}},' \
.format(mail_exchange)
dns_config = re.sub(
re.compile(r"\{mail_exchange,\s*\[[^\]]*\]\},", re.MULTILINE),
mail_exchange,
dns_config)
primary_ns = '{{primary_ns, "{0}"}},'.format(primary_ns)
dns_config = re.sub(
re.compile(r"\{primary_ns,\s*[^\}]*\},", re.MULTILINE),
primary_ns,
dns_config)
admin_mailbox = '{{admin_mailbox, "{0}"}},'.format(admin_mailbox)
dns_config = re.sub(
re.compile(r"\{admin_mailbox,\s*[^\}]*\},", re.MULTILINE),
admin_mailbox,
dns_config)
gr_command = '''set -e
mkdir -p /root/bin/node/log/
echo 'while ((1)); do chown -R {uid}:{gid} /root/bin/node/log; sleep 1; done' > /root/bin/chown_logs.sh
bash /root/bin/chown_logs.sh &
cat <<"EOF" > /tmp/gen_dev_args.json
{gen_dev_args}
EOF
escript bamboos/gen_dev/gen_dev.escript /tmp/gen_dev_args.json
cat <<"EOF" > /root/bin/node/resources/dns.config
{dns_config}
EOF
/root/bin/node/bin/globalregistry console >> {logfile}'''
gr_command = gr_command.format(
uid=os.geteuid(),
gid=os.getegid(),
gen_dev_args=json.dumps({'globalregistry': gen_dev_config}),
dns_config=dns_config,
logfile=LOGFILE)
docker.exec_(
container=gr_id,
detach=True,
interactive=True,
tty=True,
command=gr_command)
def _docker_up(image, bindir, config, dns_servers, logdir):
"""Starts the docker but does not start GR
as dns.config update is needed first
"""
node_name = config['nodes']['node']['vm.args']['name']
cookie = config['nodes']['node']['vm.args']['setcookie']
db_nodes = config['nodes']['node']['sys.config']['db_nodes']
(gr_name, sep, gr_hostname) = node_name.partition('@')
# Start DB node for current GR instance.
# Currently, only one DB node for GR is allowed, because we are using links.
# It's impossible to create a bigcouch cluster with docker's links.
db_node = db_nodes[0]
(db_name, sep, db_hostname) = db_node.partition('@')
db_command = '''echo '[httpd]' > /opt/bigcouch/etc/local.ini
echo 'bind_address = 0.0.0.0' >> /opt/bigcouch/etc/local.ini
sed -i 's/-name bigcouch/-name {name}@{host}/g' /opt/bigcouch/etc/vm.args
sed -i 's/-setcookie monster/-setcookie {cookie}/g' /opt/bigcouch/etc/vm.args
/opt/bigcouch/bin/bigcouch'''
db_command = db_command.format(name=db_name, host=db_hostname,
cookie=cookie)
bigcouch = docker.run(
image='onedata/bigcouch',
name=db_hostname,
hostname=db_hostname,
detach=True,
command=db_command)
volumes = [(bindir, '/root/build', 'ro')]
if logdir:
logdir = os.path.join(os.path.abspath(logdir), gr_hostname)
volumes.extend([(logdir, '/root/bin/node/log', 'rw')])
# Just start the docker, GR will be started later when dns.config is updated
gr = docker.run(
image=image,
name=gr_hostname,
hostname=gr_hostname,
detach=True,
interactive=True,
tty=True,
workdir='/root/build',
volumes=volumes,
dns_list=dns_servers,
link={db_hostname: db_hostname},
command=['bash'])
return gr, {
'docker_ids': [bigcouch, gr],
'gr_db_nodes': ['{0}@{1}'.format(db_name, db_hostname)],
'gr_nodes': ['{0}@{1}'.format(gr_name, gr_hostname)]
}
def up(image, bindir, dns_server, uid, config_path, logdir=None):
config = common.parse_json_file(config_path)
input_dir = config['dirs_config']['globalregistry']['input_dir']
dns_servers, output = dns.maybe_start(dns_server, uid)
for gr_instance in config['globalregistry_domains']:
gen_dev_cfg = {
'config': {
'input_dir': input_dir,
'target_dir': '/root/bin'
},
'nodes': config['globalregistry_domains'][gr_instance]['globalregistry']
}
tweaked_configs = [_tweak_config(gen_dev_cfg, gr_node, gr_instance, uid)
for gr_node in gen_dev_cfg['nodes']]
gr_ips = []
gr_configs = {}
for cfg in tweaked_configs:
gr, node_out = _docker_up(image, bindir, cfg, dns_servers, logdir)
common.merge(output, node_out)
gr_configs[gr] = cfg
gr_ips.append(common.get_docker_ip(gr))
domain = gr_domain(gr_instance, uid)
dns_cfg_path = os.path.join(os.path.abspath(bindir),
input_dir, 'resources', 'dns.config')
orig_dns_cfg = open(dns_cfg_path).read()
# Update dns.config file on each GR node
for id in gr_configs:
_node_up(id, domain, gr_ips, gr_ips, orig_dns_cfg, gr_configs[id])
domains = {
'domains': {
domain: {
'ns': gr_ips,
'a': []
}
}
}
common.merge(output, domains)
# Make sure domains are added to the dns server
dns.maybe_restart_with_configuration(dns_server, uid, output)
return output
| 2 | 2 |
bluebrain/repo-bluebrain/packages/py-bluepy/package.py | BlueBrain/Spack | 0 | 12772578 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBluepy(PythonPackage):
"""Pythonic Blue Brain data access API"""
homepage = "https://bbpgitlab.epfl.ch/nse/bluepy"
git = "<EMAIL>:nse/bluepy.git"
version('develop', branch='main')
version('2.4.5', tag='bluepy-v2.4.5')
version('2.4.4', tag='bluepy-v2.4.4')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-libsonata@0.1.7:0.999', type=('build', 'run'))
depends_on('py-pandas@1.0.0:1.999', type=('build', 'run'))
depends_on('py-bluepy-configfile@0.1.18:0.999,develop', type=('build', 'run'))
depends_on('py-numpy@1.8:', type=('build', 'run'))
depends_on('py-h5py@3.0.1:3.999', type=('build', 'run'))
depends_on('py-morph-tool@2.4.3:2.999', type=('build', 'run'))
depends_on('py-morphio@3.0.1:3.999', type=('build', 'run'))
depends_on('py-voxcell@3.0.0:3.999,develop', type=('build', 'run'))
depends_on('py-bluepysnap@0.13.0:0.999,develop', type=('build', 'run'))
depends_on('py-cached-property@1.0:', type=('build', 'run'))
depends_on('brion+python@3.3.0:3.999', type=('build', 'run'))
@property
def import_modules(self):
# bluepy.index requires libFLATIndex, unavailable on spack
modules = super(PyBluepy, self).import_modules
return [m for m in modules if m != 'bluepy.index']
| 1.585938 | 2 |
ctapipe/calib/camera/tests/test_calibrator.py | maxnoe/ctapipe | 0 | 12772579 | from numpy.testing import assert_allclose
from ctapipe.calib.camera import (
CameraCalibrator,
HESSIOR1Calibrator,
NullR1Calibrator
)
from ctapipe.image.extractor import LocalPeakWindowSum
from ctapipe.io import SimTelEventSource
from ctapipe.utils import get_dataset_path
from traitlets.config.configurable import Config
def test_camera_calibrator(example_event):
telid = list(example_event.r0.tel)[0]
calibrator = CameraCalibrator(r1_product="HESSIOR1Calibrator")
calibrator.calibrate(example_event)
image = example_event.dl1.tel[telid].image
assert image is not None
def test_manual_r1():
calibrator = CameraCalibrator(r1_product="HESSIOR1Calibrator")
assert isinstance(calibrator.r1, HESSIOR1Calibrator)
def test_manual_extractor():
calibrator = CameraCalibrator(extractor_name="LocalPeakWindowSum")
assert isinstance(calibrator.dl1.extractor, LocalPeakWindowSum)
def test_eventsource_r1():
dataset = get_dataset_path("gamma_test_large.simtel.gz")
eventsource = SimTelEventSource(input_url=dataset)
calibrator = CameraCalibrator(eventsource=eventsource)
assert isinstance(calibrator.r1, HESSIOR1Calibrator)
def test_eventsource_override_r1():
dataset = get_dataset_path("gamma_test_large.simtel.gz")
eventsource = SimTelEventSource(input_url=dataset)
calibrator = CameraCalibrator(
eventsource=eventsource,
r1_product="NullR1Calibrator"
)
assert isinstance(calibrator.r1, NullR1Calibrator)
def test_config():
window_shift = 3
window_width = 9
config = Config({"LocalPeakWindowSum": {
"window_shift": window_shift,
"window_width": window_width,
}})
calibrator = CameraCalibrator(
r1_product='HESSIOR1Calibrator',
extractor_name='LocalPeakWindowSum',
config=config
)
assert calibrator.dl1.extractor.window_shift == window_shift
assert calibrator.dl1.extractor.window_width == window_width
| 2.265625 | 2 |
scripts/download_log_archive.py | mmurayama/fujitsu-redfish-samples | 7 | 12772580 | <reponame>mmurayama/fujitsu-redfish-samples<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Sample script to download a log archive via iRMC Redfish API
in Fujitsu PRIMERGY
[Usage]
- Download a log archive
$ python download_log_archive.py -i 192.168.10.10 -u admin -p admin -f archive.zip
"""
import sys
import time
import argparse
import requests
import json
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
__author__ = "<NAME>"
__version__ = "0.0.2"
def download_log_archive(irmc, user, password, archive_name):
session = requests.Session()
session.verify = False
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
payload = {'UserName': user, 'Password': password}
sessions_url = "https://{}/redfish/v1/SessionService/Sessions".format(irmc)
response = session.post(
sessions_url,
headers=headers,
auth=(user, password),
data=json.dumps(payload)
)
if response.status_code != 201:
print("ERROR: Could not create a session for the iRMC")
sys.exit()
session.headers.update({"X-Auth-Token": response.headers["X-Auth-Token"]})
session_info = response.headers['Location']
response = session.get('https://{}/redfish/v1/Managers/iRMC'.format(irmc))
irmc_fw = response.json()['FirmwareVersion']
irmc_model = response.json()['Model']
if irmc_model == 'iRMC S4' and int(irmc_fw[0:-1].replace('.','')) < 960:
print("ERROR: The Log Archive endpoint is not supported in {} firmware {}.".format(irmc_model, irmc_fw))
sys.exit()
# Generate a log file archive
url = "https://{}/redfish/v1/Oem/ts_fujitsu/FileDownload/Actions/FTSFileDownload.GenerateLogFileArchive".format(irmc)
response = session.post(url)
status_code = response.status_code
if status_code != 202:
print("ERROR: The request failed (url: {0}, error: {1})".format(
url, response.json()['error']['message']))
sys.exit()
task_url = "https://{0}{1}".format(irmc, response.headers['Location'])
# Will wait for up to 120 seconds for the task to be completed
max_wait_time = 120
while max_wait_time:
response = session.get(task_url)
if response.json()['TaskState'] == "Completed":
break
else:
print("Generating a log file archive. Please wait...", end='\r')
# check the task status every second
time.sleep(1)
max_wait_time -= 1
if max_wait_time == 0:
print("\nERROR: Generating a log archive did not complete in time. Please check the task log at {}/Oem/ts_fujitsu/Logs".format(task_url))
else:
# Download the archive from iRMC
url = "https://{}/redfish/v1/Oem/ts_fujitsu/FileDownload/Actions/FTSFileDownload.DownloadLogFileArchive".format(irmc)
response = session.post(url)
status_code = response.status_code
if status_code != 200:
print("ERROR: Failed to download a log archive from iRMC. (error: {})".format(response.json()['error']['message']))
else:
with open(archive_name, 'wb') as newfile:
newfile.write(response.content)
print("iRMC Log Archive has been saved to {} successfully.".format(archive_name))
session.delete("https://{0}{1}".format(irmc, session_info))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-i', '--irmc',
required=True,
help="iRMC IP address/hostname/FQDN")
parser.add_argument(
'-u', '--user',
default="admin",
help="iRMC user name")
parser.add_argument(
'-p', '--password',
default="<PASSWORD>",
help="iRMC password")
parser.add_argument(
'-f', '--file',
default="LogArchive.zip",
help="iRMC log archive file name")
args = parser.parse_args()
irmc = args.irmc
user = args.user
password = <PASSWORD>
archive_name = args.file
download_log_archive(irmc, user, password, archive_name)
if __name__ == '__main__':
main()
| 1.859375 | 2 |
code/Series/pisano.py | mys-anusha/NISB-Rosetta-Code | 4 | 12772581 | <gh_stars>1-10
def pisano(m):
prev,curr=0,1
for i in range(0,m*m):
prev,curr=curr,(prev+curr)%m
if prev==0 and curr == 1 :
return i+1
def fib(n,m):
seq=pisano(m)
n%=seq
if n<2:
return n
f=[0]*(n+1)
f[1]=1
for i in range(2,n+1):
f[i]=f[i-1]+f[i-2]
return f[n]
# Print Fn % m where n and m are 2 inputs, and Fn => nth Fibonacci Term
inp=list(map(int,input().split()))
print(fib(inp[0],inp[1])%inp[1]) | 3.375 | 3 |
plot_histogram.py | rickdberg/global_extrapolation | 0 | 12772582 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 25 14:52:28 2017
@author: rickdberg
Module for applying machine learning method to flux data
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from pylab import savefig
from user_parameters import (ml_outputs_path)
meth = 'gbr'
# Define fonts
mpl.rcParams['mathtext.fontset'] = 'custom'
mpl.rcParams['mathtext.rm'] = 'Verdana'
mpl.rcParams['mathtext.it'] = 'Verdana'
mpl.rc('font',family='sans-serif')
mpl.rcParams['font.sans-serif'] = 'Verdana'
mpl.rcParams['font.cursive'] = 'Verdana'
margin_fluxes = np.loadtxt(ml_outputs_path + 'margin_fluxes_{}.csv'.format(meth), delimiter='\t')
abyssal_fluxes = np.loadtxt(ml_outputs_path + 'abyssal_fluxes_{}.csv'.format(meth), delimiter='\t')
mva_fig = plt.figure(figsize=(45/25.4, 35/25.4))
ax1 = mva_fig.add_axes([0.23,0.2,0.7,0.7])
plt.hist(abyssal_fluxes*1000,
color='#006f00',
alpha=0.8,
normed=True,
bins=100,
label='>100 km',
histtype='bar',
edgecolor='black',
linewidth=0.1)
plt.hist(margin_fluxes*1000,
color='#a4ff48',
alpha=0.65,
normed=True,
bins=100,
label='<100 km',
histtype='bar',
edgecolor='black',
linewidth=0.1)
plt.legend(fontsize=6, frameon=False)
plt.ylabel('$Probability\ Density$', fontsize=6, linespacing=0.5, labelpad=1)
plt.xlabel('$Mg^{2+}\ Flux\ (mmol\ m^{-2}\ y^{-1})$', fontsize=6, linespacing=0.5, labelpad=1)
plt.tick_params(axis='both', which='major', labelsize=6)
savefig(ml_outputs_path + 'plot_histogram_{}.pdf'.format(meth), dpi=1000)
# eof
| 2.515625 | 3 |
cart/urls.py | abhishek593/IITISOC_LaFrescoInPocket | 0 | 12772583 | <reponame>abhishek593/IITISOC_LaFrescoInPocket
from django.urls import path
from . import views
app_name = 'cart'
urlpatterns = [
path('list_items/', views.list_items, name='list_items'),
path('add_item/', views.add_item, name='add_item'),
path('show_cart/', views.show_cart, name='show_cart'),
path('remove_item/', views.remove_item, name='remove_item'),
]
| 2.015625 | 2 |
web/utest/test_news.py | tangjx/migrant | 0 | 12772584 | # -*- coding:utf-8 -*-
"""
author <EMAIL>
"""
from bson import ObjectId
import json
from logic.utility import m_del,m_page,m_update,m_exists
from unittest import TestCase
from logic.news import hot
class NewsCase(TestCase):
def setUp(self):
pass
def test_hot(self):
print 'one day news'
print hot(3,1)
print
print
print 'week hot'
lst = hot(3,7)
print len(lst)
assert len(lst) <=3
print
print
print 'month hot'
lst = hot(10,30)
print len(lst)
assert len(lst) <=10
| 2.578125 | 3 |
evap/evaluation/migrations/0041_populate_semester_is_archived.py | felixrindt/EvaP | 29 | 12772585 | # Generated by Django 1.9.1 on 2016-02-20 21:11
from django.db import migrations
def is_course_archived(course):
assert (course._participant_count is None) == (course._voter_count is None)
return course._participant_count is not None
def is_semester_archived(semester):
if semester.course_set.count() == 0:
return False
first_course_is_archived = is_course_archived(semester.course_set.first())
assert all(is_course_archived(course) == first_course_is_archived for course in semester.course_set.all())
return first_course_is_archived
def set_is_archived(apps, _schema_editor):
Semester = apps.get_model('evaluation', 'Semester')
for semester in Semester.objects.all():
semester.is_archived = is_semester_archived(semester)
semester.save()
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0040_add_semester_is_archived'),
]
operations = [
migrations.RunPython(set_is_archived, reverse_code=migrations.RunPython.noop),
]
| 1.851563 | 2 |
codigo/Live173/exemplo_tk.py | BrunoPontesLira/live-de-python | 572 | 12772586 | from tkinter import Tk, Label
root = Tk()
a = Label(root, text='Live de Python', font=('Arial', 30))
a.pack()
root.mainloop()
| 3 | 3 |
flightrl/rpg_baselines/ppo/ppo2_test.py | MarioBonse/flightmare | 596 | 12772587 | <reponame>MarioBonse/flightmare<filename>flightrl/rpg_baselines/ppo/ppo2_test.py<gh_stars>100-1000
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.gridspec as gridspec
def test_model(env, model, render=False):
#
fig = plt.figure(figsize=(18, 12), tight_layout=True)
gs = gridspec.GridSpec(5, 12)
#
ax_x = fig.add_subplot(gs[0, 0:4])
ax_y = fig.add_subplot(gs[0, 4:8])
ax_z = fig.add_subplot(gs[0, 8:12])
#
ax_dx = fig.add_subplot(gs[1, 0:4])
ax_dy = fig.add_subplot(gs[1, 4:8])
ax_dz = fig.add_subplot(gs[1, 8:12])
#
ax_euler_x = fig.add_subplot(gs[2, 0:4])
ax_euler_y = fig.add_subplot(gs[2, 4:8])
ax_euler_z = fig.add_subplot(gs[2, 8:12])
#
ax_euler_vx = fig.add_subplot(gs[3, 0:4])
ax_euler_vy = fig.add_subplot(gs[3, 4:8])
ax_euler_vz = fig.add_subplot(gs[3, 8:12])
#
ax_action0 = fig.add_subplot(gs[4, 0:3])
ax_action1 = fig.add_subplot(gs[4, 3:6])
ax_action2 = fig.add_subplot(gs[4, 6:9])
ax_action3 = fig.add_subplot(gs[4, 9:12])
max_ep_length = env.max_episode_steps
num_rollouts = 5
if render:
env.connectUnity()
for n_roll in range(num_rollouts):
pos, euler, dpos, deuler = [], [], [], []
actions = []
obs, done, ep_len = env.reset(), False, 0
while not (done or (ep_len >= max_ep_length)):
act, _ = model.predict(obs, deterministic=True)
obs, rew, done, infos = env.step(act)
#
ep_len += 1
#
pos.append(obs[0, 0:3].tolist())
dpos.append(obs[0, 6:9].tolist())
euler.append(obs[0, 3:6].tolist())
deuler.append(obs[0, 9:12].tolist())
#
actions.append(act[0, :].tolist())
pos = np.asarray(pos)
dpos = np.asarray(dpos)
euler = np.asarray(euler)
deuler = np.asarray(deuler)
actions = np.asarray(actions)
#
t = np.arange(0, pos.shape[0])
ax_x.step(t, pos[:, 0], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_y.step(t, pos[:, 1], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_z.step(t, pos[:, 2], color="C{0}".format(
n_roll), label="pos [x, y, z] -- trail: {0}".format(n_roll))
#
ax_dx.step(t, dpos[:, 0], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_dy.step(t, dpos[:, 1], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_dz.step(t, dpos[:, 2], color="C{0}".format(
n_roll), label="vel [x, y, z] -- trail: {0}".format(n_roll))
#
ax_euler_x.step(t, euler[:, -1], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_euler_y.step(t, euler[:, 0], color="C{0}".format(
n_roll), label="trail :{0}".format(n_roll))
ax_euler_z.step(t, euler[:, 1], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
#
ax_euler_vx.step(t, deuler[:, -1], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_euler_vy.step(t, deuler[:, 0], color="C{0}".format(
n_roll), label="trail :{0}".format(n_roll))
ax_euler_vz.step(t, deuler[:, 1], color="C{0}".format(
n_roll), label=r"$\theta$ [x, y, z] -- trail: {0}".format(n_roll))
#
ax_action0.step(t, actions[:, 0], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_action1.step(t, actions[:, 1], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_action2.step(t, actions[:, 2], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_action3.step(t, actions[:, 3], color="C{0}".format(
n_roll), label="act [0, 1, 2, 3] -- trail: {0}".format(n_roll))
#
if render:
env.disconnectUnity()
ax_z.legend()
ax_dz.legend()
ax_euler_z.legend()
ax_euler_vz.legend()
ax_action3.legend()
#
plt.tight_layout()
plt.show()
| 2.015625 | 2 |
Aula 03/While.py | IsaacPSilva/LetsCode | 0 | 12772588 | ### Malha de repetição (loop) - While
'''
O while é bastante parecido com um 'if': ele possui uma expressão,
e é executado caso ela seja verdadeira.
Mas o if é executado apenas uma vez e depois o código segue adiante.
O while não: ao final de sua execução, ele torna a testar a expressão,
e caso ela seja verdadeira, ele repete sua execução.
'''
'''
Uma utilidade interessante do while é obrigar o usuário a
digitar apenas entradas válidas.
'''
# o exemplo abaixo não aceita um salário menor do que o mínimo atual:
salario = 0.0
while salario < 998.0:
salario = float(input('Digite o seu salário: '))
print('Você ganha ', salario)
'''
Todo tipo de código que deve se repetir várias vezes pode ser feito
com o while, como somar vários valores, gerar uma sequência etc.
Nestes casos, é normal utilizar um contador:
'''
numero = int(input('Digite quantas provas você fez: '))
contador = 1
soma = 0
while contador <= numero:
nota = float(input('Digite a nota da prova ' + str(contador) + ':'))
soma = soma + nota
contador = contador + 1
media = soma/numero
print('Você fechou com média:', media)
'''
Um jeito de forçar um loop a ser interrompido é utilizando o comando 'break'.
O loop abaixo em tese seria infinito, mas se a condição do if for verificada,
o break é executado e conseguimos escapar do loop:
'''
while True:
resposta = input('Digite SAIR para sair: ')
if resposta == 'SAIR':
break
else:
print('E lá vamos nós de novo...')
| 4.25 | 4 |
configuration/config.py | akhfzz/FastAPI-Shorten-case | 0 | 12772589 | from fastapi import FastAPI
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseSettings
#root
app = FastAPI()
class Settings(BaseSettings):
env: str = 'production'
SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
JWT_ALGORITHM = 'HS256'
EXPIRE_JWT = 25
settings = Settings()
#db setup
DATABASE_URI = 'mysql+mysqlconnector://root:@localhost/shorten_url'
engine = create_engine(DATABASE_URI)
Base = declarative_base()
#cors setting
origins = dict(
development=["http://localhost:8000"],
production=["http://localhost:3000"]
)
app.add_middleware(
CORSMiddleware,
allow_origins=origins[settings.env],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
| 2.1875 | 2 |
tests/test_stream_handlers.py | vforgione/lager | 0 | 12772590 | <gh_stars>0
import codecs
import io
import os.path
from capturer import CaptureOutput
from lager.enums import Verbosity
from lager.handlers import StreamHandler, StdErrHandler, StdOutHandler
class TestStreamHandler:
def setup_method(self, mtd):
self.stream = io.StringIO()
self.handler = StreamHandler(stream=self.stream)
def test_write_entry(self):
entry = 'Hello, world!'
self.handler.write_entry(entry, verbosity=Verbosity.info)
self.stream.seek(0)
output = self.stream.getvalue()
assert output == entry
def test_write_entry_non_ascii(self):
entry = '안녕하세요'
self.handler.write_entry(entry, verbosity=Verbosity.info)
self.stream.seek(0)
output = self.stream.getvalue()
assert output == entry
def test_write_entry_refuses_verbosity_insufficient(self):
self.handler.verbosity = Verbosity.exception
entry = 'Hello, world!'
self.handler.write_entry(entry, Verbosity.debug)
self.stream.seek(0)
output = self.stream.getvalue()
assert output == ''
self.handler.write_entry(entry, Verbosity.info)
self.stream.seek(0)
output = self.stream.getvalue()
assert output == ''
self.handler.write_entry(entry, Verbosity.warning)
self.stream.seek(0)
output = self.stream.getvalue()
assert output == ''
self.handler.write_entry(entry, Verbosity.error)
self.stream.seek(0)
output = self.stream.getvalue()
assert output == ''
class TestStdOutHandler:
def setup_method(self, mtd):
self.handler = StdOutHandler()
def test_write_entry(self):
entry = 'Hello, world!'
with CaptureOutput() as co:
self.handler.write_entry(entry, verbosity=Verbosity.info)
output = co.get_text()
assert output == entry
def test_write_entry_non_ascii(self):
entry = '안녕하세요'
with CaptureOutput() as co:
self.handler.write_entry(entry, verbosity=Verbosity.info)
output = co.get_text()
assert output == entry
class TestStdErrHandler:
def setup_method(self, mtd):
self.handler = StdErrHandler()
def test_write_entry(self):
entry = 'Hello, world!'
with CaptureOutput() as co:
self.handler.write_entry(entry, verbosity=Verbosity.info)
output = co.get_text()
assert output == entry
def test_write_entry_non_ascii(self):
entry = '안녕하세요'
with CaptureOutput() as co:
self.handler.write_entry(entry, verbosity=Verbosity.info)
output = co.get_text()
assert output == entry
def test_write_massive_entry_doesnt_truncate(self):
# something i've run into in the past is stdout logging truncating
# long lines -- most typically outputting json dumps
fname = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'fixtures', 'lorem-ipsum.txt')
with codecs.open(fname, 'r', 'utf8') as fh:
entry = fh.read()
with CaptureOutput() as co:
self.handler.write_entry(entry, verbosity=Verbosity.info)
self.handler.write_entry(entry, verbosity=Verbosity.info)
self.handler.write_entry(entry, verbosity=Verbosity.info)
self.handler.write_entry(entry, verbosity=Verbosity.info)
output = co.get_text()
entries = output.split('\n')
assert len(entries) == 4
for entry in entries:
assert entry.endswith('Nunc fermentum elit a dolor rhoncus varius.')
assert len(entry) == 81773
| 2.4375 | 2 |
test_numpy_embedding.py | morpheusthewhite/twitter-sent-dnn | 314 | 12772591 | <gh_stars>100-1000
import theano
import numpy as np
from dcnn import WordEmbeddingLayer
from dcnn_train import WordEmbeddingLayer as TheanoWordEmbeddingLayer
from test_util import assert_matrix_eq
########### NUMPY ###########
vocab_size, embed_dm = 10, 5
embeddings = np.random.rand(vocab_size, embed_dm)
sents = np.asarray(np.random.randint(10, size = (3, 6)),
dtype = np.int32)
np_l = WordEmbeddingLayer(embeddings)
actual = np_l.output(sents)
########### THEANO ###########
x_symbol = theano.tensor.imatrix('x') # the word indices matrix
th_l = TheanoWordEmbeddingLayer(rng = np.random.RandomState(1234),
input = x_symbol,
vocab_size = vocab_size,
embed_dm = embed_dm,
embeddings = theano.shared(value = embeddings,
name = "embeddings"
)
)
f = theano.function(inputs = [x_symbol],
outputs = th_l.output)
expected = f(sents)
assert_matrix_eq(actual, expected, "Embedding")
| 2.453125 | 2 |
pyexamples/SimpleVideo.py | slovak194/Pangolin | 1 | 12772592 | <reponame>slovak194/Pangolin<gh_stars>1-10
import sys
from pathlib import Path
import os
from matplotlib import pyplot as plt
import argparse
import numpy as np
import time
# add pangolin to PYTHONPATH
homeDir = str(Path.home())
sys.path.append(os.path.join(homeDir, 'ws/Pangolin/build/src'))
# import pypangolin
import pypangolin as pango
def main(flags):
vid_uri = flags.pango
vout_uri = flags.pangoOut
stream = flags.stream
if vout_uri is None:
vout_uri = "pango://demo.pango"
vid = pango.VideoInput(vid_uri)
vout = pango.VideoOutput(vout_uri)
# print metadata
print("Opened video uri: '{}' with {} x {} dimensions".format(
vid_uri,vid.Width(),vid.Height()))
fmt = vid.PixFormat()
print("format: channels: {}, channel_bits: {}, planar: {}".format(
fmt.channels, fmt.bpp, fmt.planar))
# initialize display
allStreams = vid.Grab()
numstreams = len(allStreams)
streams = [stream] if stream else list(range(numstreams))
assert streams[-1] < numstreams, 'specified stream {} is out of bnd'.format(stream)
fig, axes = plt.subplots(len(streams), 1, figsize=(12, 12*len(streams)), squeeze=False)
fig.show()
# show each frame
frameCounter = 0
while (allStreams):
# if frameCounter > 20:
# break
vout.WriteStreams(allStreams);
for i, s in enumerate(streams):
arr = allStreams[s]
# print(arr.shape)
axes[i,0].cla()
if arr.shape[-1] == 1:
axes[i,0].imshow(np.squeeze(arr), cmap='Greys')
else:
axes[i,0].imshow(arr)
# grab the next frame
allStreams = vid.Grab()
frameCounter += 1
# update figures
fig.canvas.draw()
# printing
if frameCounter % 10 == 0:
print('frame: {}'.format(frameCounter))
if __name__ == "__main__":
# input flags
parser = argparse.ArgumentParser(
'Read a .pango file frame by frame.')
parser.add_argument(
'--pango', type=str,
help='path to the input pango file.')
parser.add_argument(
'--pangoOut', type=str, default=None,
help='path to the output pango file.')
parser.add_argument(
'--stream', type=int, default=None,
help='stream to open.')
FLAGS = parser.parse_args()
# main function
main(FLAGS)
| 2.421875 | 2 |
printing/main.py | notatallshaw/Grail-0.6 | 0 | 12772593 | <reponame>notatallshaw/Grail-0.6<gh_stars>0
"""HTML to PostScript translator.
This module uses the AbstractWriter class interface defined by in the
standard formatter module to generate PostScript corresponding to a
stream of HTML text. The HTMLParser class scans the HTML stream,
generating high-level calls to an AbstractWriter object.
Note that this module can be run as a standalone script for command
line conversion of HTML files to PostScript. Use the '-h' option to
see information about all-too-many command-line options.
"""
import os
import sys
import posixpath
import string
import traceback
import urllib
from urllib.parse import urlparse
from types import TupleType
# local modules:
import epstools
import fonts # nested package
import utils
import PSParser
import PSWriter
from grailbase.uricontext import URIContext
MULTI_DO_PAGE_BREAK = 1 # changing this breaks stuff
# The main program. Really needs to be broken up a bit!
def run(app):
global logfile
import getopt
import paper
import settings
settings = settings.get_settings(app.prefs)
# do this after loading the settings so the user can just call
# get_settings() w/out an arg to get a usable object.
load_rcscript()
context = None
help = None
error = 0
logfile = None
title = ''
url = ''
tabstop = None
multi = 0
verbose = 0
printer = None
copies = 1
levels = None
outfile = None
#
try:
options, args = getopt.getopt(sys.argv[1:],
'mvhdcaUl:u:t:sp:o:f:C:P:T:',
['color',
'copies=',
'debug',
'fontsize=',
'footnote-anchors',
'help',
'images',
'logfile=',
'multi',
'orientation=',
'output=',
'papersize=',
'paragraph-indent=',
'paragraph-skip=',
'printer=',
'strict-parsing',
'tab-width=',
'tags=',
'title=',
'underline-anchors',
'url=',
'verbose',
])
except getopt.error as err:
error = 1
help = 1
options = ()
sys.stderr.write("option failure: %s\n" % err)
for opt, arg in options:
if opt in ('-h', '--help'):
help = 1
elif opt in ('-a', '--footnote-anchors'):
settings.footnoteflag = not settings.footnoteflag
elif opt in ('-i', '--images'):
settings.imageflag = not settings.imageflag
elif opt in ('-d', '--debug'):
utils.set_debugging(1)
elif opt in ('-l', '--logfile'):
logfile = arg
elif opt in ('-o', '--orientation'):
settings.orientation = arg
elif opt in ('-f', '--fontsize'):
settings.set_fontsize(arg)
elif opt in ('-t', '--title'):
title = arg
elif opt in ('-u', '--url'):
url = arg
elif opt in ('-U', '--underline-anchors'):
settings.underflag = not settings.underflag
elif opt in ('-c', '--color'):
settings.greyscale = not settings.greyscale
elif opt in ('-p', '--papersize'):
settings.papersize = arg
elif opt in ('-s', '--strict-parsing'):
settings.strict_parsing = not settings.strict_parsing
elif opt in ('-C', '--copies'):
copies = string.atoi(arg)
elif opt in ('-P', '--printer'):
printer = arg
elif opt in ('-T', '--tab-width'):
tabstop = string.atof(arg)
elif opt in ('-m', '--multi'):
multi = 1
elif opt in ('-v', '--verbose'):
verbose = verbose + 1
elif opt == '--output':
outfile = arg
elif opt == '--tags':
if not load_tag_handler(app, arg):
error = 2
help = 1
elif opt == '--paragraph-indent':
# negative indents should indicate hanging indents, but we don't
# do those yet, so force to normal interpretation
settings.paragraph_indent = max(string.atof(arg), 0.0)
elif opt == '--paragraph-skip':
settings.paragraph_skip = max(string.atof(arg), 0.0)
if help:
usage(settings)
sys.exit(error)
# crack open log file if given
stderr = sys.stderr
if logfile:
try: sys.stderr = open(logfile, 'a')
except IOError: sys.stderr = stderr
utils.debug("Using Python version " + sys.version)
# crack open the input file, or stdin
outfp = None
if printer:
if copies < 1:
copies = 1
outfile = "|lpr -#%d -P%s" % (copies, printer)
if args:
infile = args[0]
if args[1:]:
multi = 1
infp, outfn = open_source(infile)
if not outfile:
outfile = (os.path.splitext(outfn)[0] or 'index') + '.ps'
else:
infile = None
infp = sys.stdin
outfile = '-'
#
# open the output file
#
if outfile[0] == '|':
cmd = string.strip(outfile[1:])
outfile = '|' + cmd
outfp = os.popen(cmd, 'w')
elif outfile == '-':
outfp = sys.stdout
else:
outfp = open(outfile, 'w')
if outfile != '-':
print('Outputting PostScript to', outfile)
if url:
context = URIContext(url)
elif infile:
url = infile
context = URIContext(url)
else:
# BOGOSITY: reading from stdin
context = URIContext("file:/index.html")
context.app = app
paper = printing.paper.PaperInfo(settings.papersize,
margins=settings.margins,
rotation=settings.orientation)
if tabstop and tabstop > 0:
paper.TabStop = tabstop
if utils.get_debugging('paper'):
paper.dump()
# create the writer & parser
fontsize, leading = settings.get_fontsize()
w = PSWriter.PSWriter(outfp, title or None, url or '',
#varifamily='Palatino',
paper=paper, settings=settings)
ctype = "text/html"
mod = app.find_type_extension("printing.filetypes", ctype)
if not mod.parse:
sys.exit("cannot load printing support for " + ctype)
p = mod.parse(w, settings, context)
if multi:
if args[1:]:
xform = explicit_multi_transform(args[1:])
else:
xform = multi_transform(context, levels)
p.add_anchor_transform(xform)
p.feed(infp.read())
docs = [(context.get_url(), 1, w.ps.get_title(), 1)]
#
# This relies on xform.get_subdocs() returning the list used
# internally to accumulate subdocs. Make a copy to go only one
# level deep.
#
for url in xform.get_subdocs():
xform.set_basedoc(url)
while p.sgml_parser.get_depth():
p.sgml_parser.lex_endtag(p.sgml_parser.get_stack()[0])
try:
infp, fn = open_source(url)
except IOError as err:
if verbose and outfp is not sys.stdout:
print("Error opening subdocument", url)
print(" ", err)
else:
new_ctype = get_ctype(app, url, infp)
if new_ctype != ctype:
if verbose:
print("skipping", url)
print(" wrong content type:", new_ctype)
continue
if verbose and outfp is not sys.stdout:
print("Subdocument", url)
w.ps.close_line()
if MULTI_DO_PAGE_BREAK: # must be true for now, not sure why
pageend = w.ps.push_page_end()
context.set_url(url)
w.ps.set_pageno(w.ps.get_pageno() + 1)
w.ps.set_url(url)
w.ps.push_page_start(pageend)
else:
context.set_url(url)
w.ps.set_url(url)
pageno = w.ps.get_pageno()
p.feed(infp.read())
infp.close()
title = w.ps.get_title()
p._set_docinfo(url, pageno, title)
spec = (url, pageno, title, xform.get_level(url))
docs.append(spec)
else:
p.feed(infp.read())
p.close()
w.close()
# Lots of helper functions....
def load_tag_handler(app, arg):
loader = app.get_loader("html.postscript")
narg = os.path.join(os.getcwd(), arg)
if os.path.isdir(narg):
loader.add_directory(narg)
elif os.path.isfile(narg):
basename, ext = os.path.splitext(narg)
if ext != ".py":
sys.stdout = sys.stderr
print ("Extra tags must be defined in a"
" Python source file with '.py' extension.")
print
return 0
dirname, modname = os.path.split(basename)
oldpath = sys.path
try:
sys.path = [dirname] + oldpath
exec("import %s ; mod = %s" % (modname, modname))
loader.load_tag_handlers(mod)
finally:
sys.path = oldpath
else:
sys.stdout = sys.stderr
print("Could not locate tag handler", arg)
print()
print("Argument to --tags must be a directory to be added to the html")
print("package or a file containing tag handler functions. The tag")
print("handlers defined in the directory or file will take precedence")
print("over any defined in other extensions.")
print()
return 0
return 1
def get_ctype(app, url, infp):
"""Attempt to determine the MIME content-type as best as possible."""
try:
return infp.info()["content-type"]
except (AttributeError, KeyError):
return app.guess_type(url)[0]
def load_rcscript():
try:
import grailutil
except ImportError:
return
graildir = grailutil.getgraildir()
userdir = os.path.join(graildir, "user")
if os.path.isdir(userdir):
sys.path.insert(0, userdir)
try:
import html2psrc
except ImportError:
pass
except:
traceback.print_exc()
sys.stderr.write("[Traceback generated in html2psrc module.]\n")
def open_source(infile):
try:
infp = open(infile, 'r')
except IOError:
# derive file object via URL; still needs to be HTML.
infp = urllib.urlopen(infile)
# use posixpath since URLs are expected to be POSIX-like; don't risk
# that we're running on NT and os.path.basename() doesn't "do the
# right thing."
fn = posixpath.basename(urlparse(infile)[2])
else:
fn = infile
return infp, fn
class multi_transform:
def __init__(self, context, levels=None):
self.__app = context.app
baseurl = context.get_baseurl()
scheme, netloc, path, params, query, frag = urlparse(baseurl)
self.__scheme = scheme
self.__netloc = string.lower(netloc)
self.__path = os.path.dirname(path)
self.__subdocs = []
self.__max_levels = levels
self.__level = 0
self.__docs = {baseurl: 0}
def __call__(self, url, attrs):
scheme, netloc, path, params, query, frag = urlparse(url)
if params or query: # safety restraint
return url
netloc = string.lower(netloc)
if scheme != self.__scheme or netloc != self.__netloc:
return url
# check the paths:
stored_url = urllib.urlunparse((scheme, netloc, path, '', '', ''))
if self.__docs.has_key(stored_url):
return url
if len(path) < len(self.__path):
return url
if path[:len(self.__path)] != self.__path:
return url
if (not self.__max_levels) \
or (self.__max_levels and self.__level < self.__max_levels):
self.__docs[stored_url] = self.__level + 1
self.insert(stored_url)
return url
def get_subdocs(self):
return self.__subdocs
__base_index = None
def set_basedoc(self, url):
level = 1
if self.__docs.has_key(url):
level = self.__docs[url]
self.__level = level
self.__current_base = url
try:
self.__base_index = self.__subdocs.index(url)
except ValueError:
self.__base_index = None
def insert(self, url):
if self.__base_index is not None:
i = self.__base_index + 1
scheme, netloc, path, x, y, z = urlparse(url)
basepath = os.path.dirname(path)
while i < len(self.__subdocs):
scheme, netloc, path, x, y, z = urlparse(
self.__subdocs[i])
path = os.path.dirname(path)
i = i + 1
if path != basepath:
break
self.__subdocs.insert(i, url)
return
self.__subdocs.append(url)
def get_level(self, url):
return self.__docs[url]
class explicit_multi_transform:
def __init__(self, subdocs):
self.__subdocs = map(None, subdocs)
def __call__(self, url, attrs):
return url
def get_subdocs(self):
return map(None, self.__subdocs)
def set_basedoc(self, url):
pass
def get_level(self, url):
return 1
def usage(settings):
import printing.paper
#
progname = os.path.basename(sys.argv[0])
print('Usage:', progname, '[options] [file-or-url]')
print(' -u: URL for footer')
print(' -t: title for header')
print(' -a: toggle anchor footnotes (default is %s)' \
% _onoff(settings.footnoteflag))
print(' -U: toggle anchor underlining (default is %s)' \
% _onoff(settings.underflag))
print(' -o: orientation; portrait, landscape, or seascape')
print(' -p: paper size; letter, legal, a4, etc.',)
print('(default is %s)' % settings.papersize)
print(' -f: font size, in points (default is %s/%s)' \
% settings.get_fontsize())
print(' -d: turn on debugging')
print(' -l: logfile for debugging, otherwise stderr')
print(' -s: toggle "advanced" SGML recognition (default is %s)'\
% _onoff(settings.strict_parsing))
print(' -T: size of tab stop in points (default is %s)' \
% printing.paper.PaperInfo.TabStop)
print(' -P: specify output printer')
print(' -m: descend tree starting from specified document,')
print(' printing all HTML documents found')
print(' -h: this help message')
print('[file]: file to convert, otherwise from stdin')
def _onoff(bool):
return bool and "ON" or "OFF"
# main() & relations....
import BaseApplication
class Application(BaseApplication.BaseApplication):
def __init__(self, prefs=None):
BaseApplication.BaseApplication.__init__(self, prefs)
import GlobalHistory
self.global_history = GlobalHistory.GlobalHistory(self, readonly=1)
def exception_dialog(self, message='', *args):
traceback.print_exc()
if message:
sys.stderr.write(message + "\n")
def main():
app = Application()
try:
run(app)
except KeyboardInterrupt:
if utils.get_debugging():
app.exception_dialog()
sys.exit(1)
def profile_main(n=18):
import profile, pstats
print("Running under profiler....")
profiler = profile.Profile()
try:
profiler.runctx('main()', globals(), locals())
finally:
sys.stdout = logfile
profiler.dump_stats('@html2ps.prof')
p = pstats.Stats('@html2ps.prof')
p.strip_dirs().sort_stats('time').print_stats(n)
p.print_callers(n)
p.sort_stats('cum').print_stats(n)
| 2.109375 | 2 |
python-learn/mult_process/process-pool.py | ljm516/python-repo | 0 | 12772594 | import os
import random
import time
from multiprocessing import Pool
def run_task(name):
print('Task {task_name} (pid={pid}) is Running...'.format(task_name=name, pid=os.getpid()))
time.sleep(random.randm() * 3)
print('Task {task_name} end.'.format(task_name=name))
if __name__ == '__main__':
print('Current process {pid}'.format(pid=os.getpid()))
p = Pool(processes=3)
for i in range(5):
p.apply_async(run_task, args=(i,))
print('waiting for all subprocess done....')
p.close()
p.join()
print('All subprocesses done')
| 3.09375 | 3 |
recipes/structopt/all/conanfile.py | rockandsalt/conan-center-index | 562 | 12772595 | import os
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
class StructoptConan(ConanFile):
name = "structopt"
homepage = "https://github.com/p-ranav/structopt"
url = "https://github.com/conan-io/conan-center-index"
description = "Parse command line arguments by defining a struct+"
license = "MIT"
settings = "compiler", "os"
topics = ("conan", "structopt", "argument-parser", "cpp17", "header-only",
"single-header-lib", "header-library", "command-line", "arguments",
"mit-license", "modern-cpp", "structopt", "lightweight", "reflection",
"cross-platform", "library", "type-safety", "type-safe", "argparse",
"clap", "visit-struct-library", "magic-enum")
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _supported_compiler(self):
compiler = str(self.settings.compiler)
version = tools.Version(self.settings.compiler.version)
if compiler == "Visual Studio" and version >= "15":
return True
elif compiler == "gcc" and version >= "9":
return True
elif compiler == "clang" and version >= "5":
return True
elif compiler == "apple-clang" and version >= "10":
return True
else:
self.output.warn("{} recipe lacks information about the {} compiler standard version support".format(self.name, compiler))
return False
def configure(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, "17")
if not self._supported_compiler:
raise ConanInvalidConfiguration("structopt: Unsupported compiler: {}-{} "
"(https://github.com/p-ranav/structopt#compiler-compatibility).".format(self.settings.compiler, self.settings.compiler.version))
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("{}-{}".format(self.name, self.version), self._source_subfolder)
def package(self):
self.copy(pattern="LICENSE", src=self._source_subfolder, dst="licenses")
self.copy(pattern="*.h", src=os.path.join(self._source_subfolder, "include"), dst="include")
self.copy(pattern="*.hpp", src=os.path.join(self._source_subfolder, "include"), dst="include")
def package_id(self):
self.info.header_only()
| 2.28125 | 2 |
classes/Text.py | JeanExtreme002/Aim-Coach | 1 | 12772596 | <filename>classes/Text.py
from pygame import Surface
from pygame import font
class Text(object):
"""
Class for inserting text on the surface.
"""
DEFAULT_FONT = ('Comic Sans MS', 30)
DEFAULT_COLOR = (255,0,0)
def __init__(self,surface,x,y,text=None,text_font=DEFAULT_FONT,text_color=DEFAULT_COLOR):
if type(surface) is Surface:
self.__surface = surface
else: raise TypeError('The argument "surface" must be a Surface object.')
self.__font = myfont = font.SysFont(*text_font)
self.__surface = surface
self.__text = text
self.__color = text_color
self.__area = [x,y]
def drawText(self):
textSurface = self.__font.render(self.__text,False,self.__color)
self.__surface.blit(textSurface,self.__area)
def setFont(self,text_font):
self.__font = font.SysFont(*text_font)
def setText(self,text):
self.__text = text
| 3.734375 | 4 |
Aeneas/aeneas/aeneas/executejob.py | yalhaizaey/Dreich | 25 | 12772597 | <gh_stars>10-100
#!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, <NAME> (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, <NAME> (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains the following classes:
* :class:`~aeneas.executejob.ExecuteJob`, a class to process a job;
* :class:`~aeneas.executejob.ExecuteJobExecutionError`,
* :class:`~aeneas.executejob.ExecuteJobInputError`, and
* :class:`~aeneas.executejob.ExecuteJobOutputError`,
representing errors generated while processing jobs.
"""
from __future__ import absolute_import
from __future__ import print_function
from aeneas.analyzecontainer import AnalyzeContainer
from aeneas.container import Container
from aeneas.container import ContainerFormat
from aeneas.executetask import ExecuteTask
from aeneas.job import Job
from aeneas.logger import Loggable
from aeneas.runtimeconfiguration import RuntimeConfiguration
import aeneas.globalfunctions as gf
class ExecuteJobExecutionError(Exception):
"""
Error raised when the execution of the job fails for internal reasons.
"""
pass
class ExecuteJobInputError(Exception):
"""
Error raised when the input parameters of the job are invalid or missing.
"""
pass
class ExecuteJobOutputError(Exception):
"""
Error raised when the creation of the output container failed.
"""
pass
class ExecuteJob(Loggable):
"""
Execute a job, that is, execute all of its tasks
and generate the output container
holding the generated sync maps.
If you do not provide a job object in the constructor,
you must manually set it later, or load it from a container
with :func:`~aeneas.executejob.ExecuteJob.load_job_from_container`.
In the first case, you are responsible for setting
the absolute audio/text/sync map paths of each task of the job,
to their actual absolute location on the computing machine.
Moreover, you are responsible for cleaning up
any temporary files you might have generated around.
In the second case, you are responsible for
calling :func:`~aeneas.executejob.ExecuteJob.clean`
at the end of the job execution,
to delete the working directory
created by :func:`~aeneas.executejob.ExecuteJob.load_job_from_container`
when creating the job object.
:param job: the job to be executed
:type job: :class:`~aeneas.job.Job`
:param rconf: a runtime configuration
:type rconf: :class:`~aeneas.runtimeconfiguration.RuntimeConfiguration`
:param logger: the logger object
:type logger: :class:`~aeneas.logger.Logger`
:raises: :class:`~aeneas.executejob.ExecuteJobInputError`: if ``job`` is not an instance of ``Job``
"""
TAG = u"ExecuteJob"
def __init__(self, job=None, rconf=None, logger=None):
super(ExecuteJob, self).__init__(rconf=rconf, logger=logger)
self.job = job
self.working_directory = None
self.tmp_directory = None
if job is not None:
self.load_job(self.job)
def load_job(self, job):
"""
Load the job from the given ``Job`` object.
:param job: the job to load
:type job: :class:`~aeneas.job.Job`
:raises: :class:`~aeneas.executejob.ExecuteJobInputError`: if ``job`` is not an instance of :class:`~aeneas.job.Job`
"""
if not isinstance(job, Job):
self.log_exc(u"job is not an instance of Job", None, True, ExecuteJobInputError)
self.job = job
def load_job_from_container(self, container_path, config_string=None):
"""
Load the job from the given :class:`aeneas.container.Container` object.
If ``config_string`` is ``None``,
the container must contain a configuration file;
otherwise use the provided config string
(i.e., the wizard case).
:param string container_path: the path to the input container
:param string config_string: the configuration string (from wizard)
:raises: :class:`~aeneas.executejob.ExecuteJobInputError`: if the given container does not contain a valid :class:`~aeneas.job.Job`
"""
self.log(u"Loading job from container...")
# create working directory where the input container
# will be decompressed
self.working_directory = gf.tmp_directory(root=self.rconf[RuntimeConfiguration.TMP_PATH])
self.log([u"Created working directory '%s'", self.working_directory])
try:
self.log(u"Decompressing input container...")
input_container = Container(container_path, logger=self.logger)
input_container.decompress(self.working_directory)
self.log(u"Decompressing input container... done")
except Exception as exc:
self.clean()
self.log_exc(u"Unable to decompress container '%s': %s" % (container_path, exc), None, True, ExecuteJobInputError)
try:
self.log(u"Creating job from working directory...")
working_container = Container(
self.working_directory,
logger=self.logger
)
analyzer = AnalyzeContainer(working_container, logger=self.logger)
self.job = analyzer.analyze(config_string=config_string)
self.log(u"Creating job from working directory... done")
except Exception as exc:
self.clean()
self.log_exc(u"Unable to analyze container '%s': %s" % (container_path, exc), None, True, ExecuteJobInputError)
if self.job is None:
self.log_exc(u"The container '%s' does not contain a valid Job" % (container_path), None, True, ExecuteJobInputError)
try:
# set absolute path for text file and audio file
# for each task in the job
self.log(u"Setting absolute paths for tasks...")
for task in self.job.tasks:
task.text_file_path_absolute = gf.norm_join(
self.working_directory,
task.text_file_path
)
task.audio_file_path_absolute = gf.norm_join(
self.working_directory,
task.audio_file_path
)
self.log(u"Setting absolute paths for tasks... done")
self.log(u"Loading job from container: succeeded")
except Exception as exc:
self.clean()
self.log_exc(u"Error while setting absolute paths for tasks", exc, True, ExecuteJobInputError)
def execute(self):
"""
Execute the job, that is, execute all of its tasks.
Each produced sync map will be stored
inside the corresponding task object.
:raises: :class:`~aeneas.executejob.ExecuteJobExecutionError`: if there is a problem during the job execution
"""
self.log(u"Executing job")
if self.job is None:
self.log_exc(u"The job object is None", None, True, ExecuteJobExecutionError)
if len(self.job) == 0:
self.log_exc(u"The job has no tasks", None, True, ExecuteJobExecutionError)
job_max_tasks = self.rconf[RuntimeConfiguration.JOB_MAX_TASKS]
if (job_max_tasks > 0) and (len(self.job) > job_max_tasks):
self.log_exc(u"The Job has %d Tasks, more than the maximum allowed (%d)." % (len(self.job), job_max_tasks), None, True, ExecuteJobExecutionError)
self.log([u"Number of tasks: '%d'", len(self.job)])
for task in self.job.tasks:
try:
custom_id = task.configuration["custom_id"]
self.log([u"Executing task '%s'...", custom_id])
executor = ExecuteTask(task, rconf=self.rconf, logger=self.logger)
executor.execute()
self.log([u"Executing task '%s'... done", custom_id])
except Exception as exc:
self.log_exc(u"Error while executing task '%s'" % (custom_id), exc, True, ExecuteJobExecutionError)
self.log(u"Executing task: succeeded")
self.log(u"Executing job: succeeded")
def write_output_container(self, output_directory_path):
"""
Write the output container for this job.
Return the path to output container,
which is the concatenation of ``output_directory_path``
and of the output container file or directory name.
:param string output_directory_path: the path to a directory where
the output container must be created
:rtype: string
:raises: :class:`~aeneas.executejob.ExecuteJobOutputError`: if there is a problem while writing the output container
"""
self.log(u"Writing output container for this job")
if self.job is None:
self.log_exc(u"The job object is None", None, True, ExecuteJobOutputError)
if len(self.job) == 0:
self.log_exc(u"The job has no tasks", None, True, ExecuteJobOutputError)
self.log([u"Number of tasks: '%d'", len(self.job)])
# create temporary directory where the sync map files
# will be created
# this temporary directory will be compressed into
# the output container
self.tmp_directory = gf.tmp_directory(root=self.rconf[RuntimeConfiguration.TMP_PATH])
self.log([u"Created temporary directory '%s'", self.tmp_directory])
for task in self.job.tasks:
custom_id = task.configuration["custom_id"]
# check if the task has sync map and sync map file path
if task.sync_map_file_path is None:
self.log_exc(u"Task '%s' has sync_map_file_path not set" % (custom_id), None, True, ExecuteJobOutputError)
if task.sync_map is None:
self.log_exc(u"Task '%s' has sync_map not set" % (custom_id), None, True, ExecuteJobOutputError)
try:
# output sync map
self.log([u"Outputting sync map for task '%s'...", custom_id])
task.output_sync_map_file(self.tmp_directory)
self.log([u"Outputting sync map for task '%s'... done", custom_id])
except Exception as exc:
self.log_exc(u"Error while outputting sync map for task '%s'" % (custom_id), None, True, ExecuteJobOutputError)
# get output container info
output_container_format = self.job.configuration["o_container_format"]
self.log([u"Output container format: '%s'", output_container_format])
output_file_name = self.job.configuration["o_name"]
if ((output_container_format != ContainerFormat.UNPACKED) and
(not output_file_name.endswith(output_container_format))):
self.log(u"Adding extension to output_file_name")
output_file_name += "." + output_container_format
self.log([u"Output file name: '%s'", output_file_name])
output_file_path = gf.norm_join(
output_directory_path,
output_file_name
)
self.log([u"Output file path: '%s'", output_file_path])
try:
self.log(u"Compressing...")
container = Container(
output_file_path,
output_container_format,
logger=self.logger
)
container.compress(self.tmp_directory)
self.log(u"Compressing... done")
self.log([u"Created output file: '%s'", output_file_path])
self.log(u"Writing output container for this job: succeeded")
self.clean(False)
return output_file_path
except Exception as exc:
self.clean(False)
self.log_exc(u"Error while compressing", exc, True, ExecuteJobOutputError)
return None
def clean(self, remove_working_directory=True):
"""
Remove the temporary directory.
If ``remove_working_directory`` is ``True``
remove the working directory as well,
otherwise just remove the temporary directory.
:param bool remove_working_directory: if ``True``, remove
the working directory as well
"""
if remove_working_directory is not None:
self.log(u"Removing working directory... ")
gf.delete_directory(self.working_directory)
self.working_directory = None
self.log(u"Removing working directory... done")
self.log(u"Removing temporary directory... ")
gf.delete_directory(self.tmp_directory)
self.tmp_directory = None
self.log(u"Removing temporary directory... done")
| 1.90625 | 2 |
Container/rm.py | qazbnm456/toolbox | 0 | 12772598 | """This module contains `docker container rm` class"""
from docker.errors import APIError
from tsaotun.lib.Docker.Container.command import Command
from tsaotun.cli import Tsaotun
class Rm(Command):
"""This class implements `docker container rm` command"""
name = "container rm"
require = []
def __init__(self):
Command.__init__(self)
self.settings[self.name] = None
def eval_command(self, args):
try:
containers = args["containers"]
clear = args["clear"]
del args["containers"]
del args["clear"]
Ids = []
if clear:
cli = Tsaotun()
cli.send('ps -a --format {{Id}}')
ress = cli.recv()
if ress:
ress = ress.split('\n')
ress = [res[0:4] for res in ress]
for Id in ress:
Ids.append(Id)
args['container'] = Id
self.client.remove_container(**args)
else:
for Id in containers:
Ids.append(Id)
args['container'] = Id
self.client.remove_container(**args)
self.settings[self.name] = '\n'.join(Ids)
except APIError as e:
raise e
def final(self):
return self.settings[self.name]
| 2.828125 | 3 |
sonic-pde-tests/sonic_pde_tests/test_cpld.py | wbschwar/sonic-platform-pdk-pde | 1 | 12772599 | import pytest
import sys
import imp
import subprocess
import time
# Global platform-specific cpldutil class instance
platform_cpldutil = None
# Loads platform specific cpldutil module from source
def load_platform_cpldutil(json_config_data):
global platform_cpldutil
if platform_cpldutil is not None:
return
try:
if json_config_data['PLATFORM']['modules']['CPLD']['support'] == "false":
pytest.skip("Skip the testing due to the module is not supported in BSP")
modules_dir = json_config_data['PLATFORM']['modules']['CPLD']['path']
modules_name = json_config_data['PLATFORM']['modules']['CPLD']['name']
class_name = json_config_data['PLATFORM']['modules']['CPLD']['class']
cpld_module = "/usr/share/sonic/classes/" + modules_dir + "/" + modules_name + '.py'
platform_cpldutil_module = imp.load_source(modules_name,cpld_module)
platform_cpldutil_class = getattr(platform_cpldutil_module,class_name)
platform_cpldutil = platform_cpldutil_class()
except AttributeError, e:
print("Failed to instantiate '%s' class: %s" % (class_name, str(e)), True)
return
def test_for_num_cpld(json_config_data):
"""Test Purpose: Verify that the numer of CPLD reported as supported
by the CPLD plugin matches what the platform supports
Args:
arg1 (json): platform-<sonic_platform>-config.json
Example:
For a system that physically supports 2 CPLD
platform-<sonic_platform>-config.json
{
"PLATFORM": {
"num_cplds": 2
}
}
"""
load_platform_cpldutil(json_config_data)
assert platform_cpldutil.get_num_cplds() == json_config_data['PLATFORM']['num_cplds'],\
"verify CPLD numbers {} not matching with platform JSON".format(platform_cpldutil.get_num_cplds())
def test_for_cpld_read(json_config_data,json_test_data):
"""Test Purpose: Test Purpose: Verify that the CPLD version able to read and value
is matching with the value defined in test config JSON
Args:
arg1 (json): platform-<sonic_platform>-config.json
arg2 (json): test-<sonic_platform>-config.json
Example:
For a system that physically supports 2 CPLD, the CPLD version defined in the
test-<sonic_platform>-config.json
"CPLD": {
"CPLD1": {
"version": "2"
},
"CPLD2": {
"version": "1"
}
},
"""
load_platform_cpldutil(json_config_data)
for key in json_config_data:
for x in range(json_config_data[key]['num_cplds']):
assert platform_cpldutil.get_cpld_version(x+1).strip() == json_test_data[key]['CPLD']['CPLD'+str(x+1)]['version'], \
"verify" + " CPLD"+str(x+1)+" version={} is False".format(platform_cpldutil.get_cpld_version(x+1))
if __name__ == '__main__':
unittest.main()
| 2.171875 | 2 |
tests/test_LazyJSON.py | jameswenzel/jwp | 6 | 12772600 | <gh_stars>1-10
import unittest
import os
from LazyScripts.LazyJSON import *
class jsonTest(unittest.TestCase):
def setUp(self):
self.cwd = os.path.dirname(os.path.realpath(__file__))
self.testpath = self.cwd + '/testdata/test.json'
self.js = {
'1992-12-17': {'name': 'James', 'age': 69},
'1969-06-09': {'name': 'Hugh', 'age': 420}
}
def tearDown(self):
if os.path.isfile(self.testpath):
os.remove(self.testpath)
def test_load_json(self):
example = load_json(self.cwd + '/testdata/example.json',
encoding='utf-16')
self.assertTrue(len(example) == 2)
self.assertTrue(all(x in example for x in ('1992-12-17',
'1969-06-09')))
def test_write_json(self):
write_json(self.testpath, self.js)
self.assertTrue(os.path.isfile(self.testpath))
read = load_json(self.testpath)
self.assertEqual(read, self.js)
def test_load_json_touch(self):
test = load_json(self.testpath, touch=True)
self.assertTrue(os.path.isfile(self.testpath))
test['x'] = '42'
self.assertTrue(test['x'])
def test_write_load_unicode(self):
self.js['🍆'] = '666'
write_json(self.testpath, self.js, encoding='utf-8')
test = load_json(self.testpath, encoding='utf-8')
self.assertEqual(self.js, test)
if __name__ == '__main__':
unittest.main()
| 2.75 | 3 |
ports/devel/ninja/dragonfly/patch-configure.py | liweitianux/DeltaPorts | 31 | 12772601 | --- configure.py.intermediate 2014-12-12 13:05:20 UTC
+++ configure.py
@@ -41,6 +41,8 @@ class Platform(object):
self._platform = sys.platform
if self._platform.startswith('linux'):
self._platform = 'linux'
+ elif self._platform.startswith('dragonfly'):
+ self._platform = 'freebsd'
elif self._platform.startswith('freebsd'):
self._platform = 'freebsd'
elif self._platform.startswith('gnukfreebsd'):
| 2.078125 | 2 |
bytesCryptorMain.py | ZeroxTM/IDEA-HMK-Cryptor-1 | 0 | 12772602 | <filename>bytesCryptorMain.py<gh_stars>0
__author__ = "<NAME>, <NAME>"
__copyright__ = "2020 HMK-IDEA-Cryptor"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__email__ = "<EMAIL>"
import binascii
from pckgIDEA.IDEA_bytestream import IDEA
KEY = int('<KEY>', 16)
cryptor = IDEA(KEY) # Initialize cryptor with 128bit key
########ENCRYPTION########
in_file = open("files/song.mp3", "rb")
out_file = open("files/encrypted.mp3", "w", encoding="utf-8")
bytes8 = in_file.read(8)
while bytes8:
res = cryptor.encrypt(str(binascii.b2a_hex(bytes8))[2:-1], is_hex=True)
print('Text: ' + str(bytes8.decode('latin-1')) + ' \ Encrypted: ' + res)
out_file.write(res)
bytes8 = in_file.read(8)
#bytes8 = bytes8
in_file.close()
out_file.close()
########DECRYPTION########
in_file = open("files/encrypted.mp3", "r", encoding="utf-8")
out_file = open("files/decrypted.mp3", "wb")
bytes8 = in_file.read(16)
while bytes8:
res = cryptor.decrypt(bytes8)
print('Decrypted: ' + str(res))
out_file.write(res)
bytes8 = in_file.read(16)
in_file.close()
out_file.close()
| 2.84375 | 3 |
Exceptions.py | Pratyaksh7/PythonPrograms-Hackerrank | 0 | 12772603 | T = int(input())
for i in range(T):
try:
a, b = map(str, input().split())
print(int(int(a)//int(b)))
except Exception as e:
print("Error Code:", e)
| 3.078125 | 3 |
Diplomacy/commands.py | CScarame/DiplomacyBot | 2 | 12772604 | <filename>Diplomacy/commands.py
#############
# Diplomacy turn.py
# <NAME>
# Set of possible commands that can be sent.
#############
from Diplomacy.types import UnitType, OrderValidationError
from Diplomacy.province import ProvinceBase, LandProvince, WaterProvince, CoastProvince, TwoCoastProvince
from Diplomacy.unit import Unit
class Order():
unit:Unit
msg:str
def __init__(self, unit):
self.unit = unit
def msgBase(self):
if self.unit.typ == UnitType.ARMY:
return "A " + self.unit.province.abr.capitalize()
elif self.unit.typ == UnitType.FLEET:
return "F " + self.unit.province.abr.capitalize()
else:
return " "
def validate(self):
NotImplementedError()
def get_msg(self):
return self.msg
class Hold(Order):
def __init__(self, unit):
super().__init__(unit)
self.set_msg()
def set_msg(self):
self.msg = self.msgBase()+ " holds"
return
class Move(Order):
province:ProvinceBase
def __init__(self, unit:Unit, prov:ProvinceBase):
super().__init__(unit)
self.province = prov
self.validate()
self.set_msg()
def validate(self):
# Move Order requirements:
# -provinces are adjacent OR
# -both provinces are Coastal and can be connected by water
# -unit type must correspond with the destination(armies can't move to water, fleets can't move to land)
# note line above is already checked by is_adjacent I think
# note: black sea is only water province that is not connected to all other water provinces
typ = self.unit.typ
unit = self.unit
prov = self.province
if not unit.province.is_adjacent(prov,unit):
if typ == UnitType.FLEET:
raise OrderValidationError()
if not (isinstance(unit.province, CoastProvince) or isinstance(unit.province, TwoCoastProvince)):
raise OrderValidationError()
if not (isinstance(prov, CoastProvince) or isinstance(prov, TwoCoastProvince)): # Can be convoyed
raise OrderValidationError()
return
def set_msg(self):
self.msg = self.msgBase() + "-" + self.province.abr.capitalize()
return
class Support(Order):
supported:ProvinceBase
def __init__(self, unit:Unit, supported:ProvinceBase):
super().__init__(unit)
self.supported = supported
def validate(self):
NotImplementedError()
def set_msg(self):
NotImplementedError()
def support_msg(self):
return self.msgBase() + " S " + self.supported.abr.capitalize()
class SupportHold(Support):
def __init__(self, unit:Unit, supported:ProvinceBase):
super().__init__(unit, supported)
self.validate()
self.set_msg()
def validate(self):
# Support Hold must be adjacent to supported unit
if not self.unit.province.is_adjacent(self.supported,self.unit):
raise OrderValidationError()
def set_msg(self):
self.msg = self.support_msg()
return
class SupportMove(Support):
target:ProvinceBase
def __init__(self, unit:Unit,supported:ProvinceBase, target:ProvinceBase):
super().__init__(unit, supported)
self.target = target
def validate(self):
#Support Move must be adjacent to target province
if not self.unit.province.is_adjacent(self.target, self.unit):
raise OrderValidationError()
def set_msg(self):
self.msg = self.support_msg() + "-" + self.target.abr.capitalize()
return
class Convoy(Order):
start:ProvinceBase
end:ProvinceBase
def __init__(self, unit:Unit, c_start:ProvinceBase, c_end:ProvinceBase):
super().__init__(unit)
self.start = c_start
self.end = c_end
self.validate()
self.set_msg()
def validate(self):
# Convoy Order Requirements:
# Only fleets can convoy
# Start and end must be coastal
# TODO: Black sea separated from others
if not self.unit.typ == UnitType.FLEET:
raise OrderValidationError()
if not (isinstance(self.start, CoastProvince) or isinstance(self.start, TwoCoastProvince)):
raise OrderValidationError()
if not (isinstance(self.end, CoastProvince) or isinstance(self.end, TwoCoastProvince)):
raise OrderValidationError()
def set_msg(self):
self.msg = self.msgBase() + " C " + self.start.abr.capitalize() + "-" + self.end.abr.capitalize() | 3.234375 | 3 |
datasets/utils/batch_collator.py | Nik-V9/AirObject | 9 | 12772605 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import torch
import re
import collections
from torch._six import string_classes
class BatchCollator(object):
'''
pack dict batch
'''
def __init__(self):
super(BatchCollator,self).__init__()
def __call__(self, batch):
data= {}
size = len(batch)
for key in batch[0]:
l = []
for i in range(size):
l = l + [batch[i][key]]
data[key] = l
return data
# def vis_custom_collate(batch):
# r"""Puts each tensor data field into a tensor with outer dimension batch size
# and Puts list data into list with length batch size"""
# tensors = []
# if len(batch[0]) == 3:
# list_1 = []
# list_2 = []
# elif len(batch[0]) == 2:
# list_1 = []
# for i in range(len(batch)):
# tensors.append(batch[i][0])
# if len(batch[0]) == 3:
# list_1.append(batch[i][1])
# list_2.append(batch[i][2])
# elif len(batch[0]) == 2:
# list_1.append(batch[i][1])
# tensor = torch.stack(tensors, 0)
# if len(batch[0]) == 3:
# return tensor, list_1, list_2
# elif len(batch[0]) == 2:
# return tensor, list_1
# else:
# return tensor
def vis_custom_collate(batch):
r"""Puts each tensor data field into a tensor with outer dimension batch size
and Puts list data into list with length batch size"""
if len(batch[0]) == 3:
tensors = []
list_1 = []
list_2 = []
if len(batch[0]) == 2:
list_1 = []
list_2 = []
elif len(batch[0]) == 1:
list_1 = []
for i in range(len(batch)):
if len(batch[0]) == 3:
tensors.append(batch[i][0])
list_1.append(batch[i][1])
list_2.append(batch[i][2])
elif len(batch[0]) == 2:
list_1.append(batch[i][0])
list_2.append(batch[i][1])
elif len(batch[0]) == 1:
list_1.append(batch[i][0])
if len(batch[0]) == 3:
tensor = torch.stack(tensors, 0)
return tensor, list_1, list_2
elif len(batch[0]) == 2:
return list_1, list_2
elif len(batch[0]) == 1:
return list_1
def eval_custom_collate(batch):
r"""Puts each tensor data field into a tensor with outer dimension batch size
and Puts list data into list with length batch size"""
if len(batch[0]) == 4:
tensors = []
list_1 = []
list_2 = []
list_3 = []
elif len(batch[0]) == 3:
list_1 = []
list_2 = []
list_3 = []
elif len(batch[0]) == 2:
list_1 = []
list_2 = []
elif len(batch[0]) == 1:
list_1 = []
for i in range(len(batch)):
if len(batch[0]) == 4:
tensors.append(batch[i][0])
list_1.append(batch[i][1])
list_2.append(batch[i][2])
list_3.append(batch[i][3])
elif len(batch[0]) == 3:
list_1.append(batch[i][0])
list_2.append(batch[i][1])
list_3.append(batch[i][2])
elif len(batch[0]) == 2:
list_1.append(batch[i][0])
list_2.append(batch[i][1])
elif len(batch[0]) == 1:
list_1.append(batch[i][0])
if len(batch[0]) == 4:
tensor = torch.stack(tensors, 0)
return tensor, list_1, list_2, list_3
elif len(batch[0]) == 3:
return list_1, list_2, list_3
elif len(batch[0]) == 2:
return list_1, list_2
elif len(batch[0]) == 1:
return list_1 | 2.671875 | 3 |
module/text_converter.py | 5l1v3r1/saydog-framework | 2 | 12772606 | <reponame>5l1v3r1/saydog-framework<gh_stars>1-10
import os,sys,time
from time import *
r='\x1b[00m\x1b[91m'
g='\x1b[00m\x1b[32m'
y='\x1b[00m\x1b[33m'
c='\x1b[00m\x1b[36m'
w='\x1b[00m'
u='\033[4m'
b='\033[5m'
def sprint(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(0.1 / 100)
def corrupt():
print(r+'[?]'+w+' Command not found, pleade type help')
def exit():
print(r+'[!]'+w+' The user forces it to stop')
primt(r+'[!]'+w+' Exiting tool')
def help():
print('')
print('command example')
print('------- -------')
print('set text [your text] set text iqbalmh18')
print('run, go, create create')
print('')
def all_main():
global text
while True:
am = input(w+'saydog('+r+'text2ascii/all'+w+') > '+w)
if am == 'help':
help()
elif am == 'back':
sys.exit(0)
elif am == 'exit':
exit()
sys.exit(1)
elif 'set text' in am:
text = am.split()[(-1)]
print('text > '+text)
elif am == 'run' or am == 'create' or am == 'go':
try:
os.system('figlet -f "1Row" "'+text+'"')
os.system('figlet -f "3-D" "'+text+'"')
os.system('figlet -f "3D Diagonal" "'+text+'"')
os.system('figlet -f "3D-ASCII" "'+text+'"')
os.system('figlet -f "3x5" "'+text+'"')
os.system('figlet -f "4Max" "'+text+'"')
os.system('figlet -f "5 Line Oblique" "'+text+'"')
os.system('figlet -f "AMC 3 Line" "'+text+'"')
os.system('figlet -f "AMC 3 Liv1" "'+text+'"')
os.system('figlet -f "AMC AAA01" "'+text+'"')
os.system('figlet -f "AMC Neko" "'+text+'"')
os.system('figlet -f "AMC Razor" "'+text+'"')
os.system('figlet -f "AMC Razor2" "'+text+'"')
os.system('figlet -f "AMC Slash" "'+text+'"')
os.system('figlet -f "AMC Slider" "'+text+'"')
os.system('figlet -f "AMC Thin" "'+text+'"')
os.system('figlet -f "AMC Tubes" "'+text+'"')
os.system('figlet -f "AMC Untitled" "'+text+'"')
os.system('figlet -f "ANSI Shadow" "'+text+'"')
os.system('figlet -f "ASCII New Roman" "'+text+'"')
os.system('figlet -f "Acrobatic" "'+text+'"')
os.system('figlet -f "Alligator" "'+text+'"')
os.system('figlet -f "Alligator2" "'+text+'"')
os.system('figlet -f "Alpha" "'+text+'"')
os.system('figlet -f "Alphabet" "'+text+'"')
os.system('figlet -f "Arrows" "'+text+'"')
os.system('figlet -f "Avatar" "'+text+'"')
os.system('figlet -f "B1FF" "'+text+'"')
os.system('figlet -f "Banner" "'+text+'"')
os.system('figlet -f "Banner3-D" "'+text+'"')
os.system('figlet -f "Banner3" "'+text+'"')
os.system('figlet -f "Banner4" "'+text+'"')
os.system('figlet -f "Barbwire" "'+text+'"')
os.system('figlet -f "Basic" "'+text+'"')
os.system('figlet -f "Bear" "'+text+'"')
os.system('figlet -f "Bell" "'+text+'"')
os.system('figlet -f "Benjamin" "'+text+'"')
os.system('figlet -f "Big Chief" "'+text+'"')
os.system('figlet -f "Big Money-ne" "'+text+'"')
os.system('figlet -f "Big Money-nw" "'+text+'"')
os.system('figlet -f "Big Money-se" "'+text+'"')
os.system('figlet -f "Big Money-sw" "'+text+'"')
os.system('figlet -f "Big" "'+text+'"')
os.system('figlet -f "Bigfig" "'+text+'"')
os.system('figlet -f "Binary" "'+text+'"')
os.system('figlet -f "Block" "'+text+'"')
os.system('figlet -f "Blocks" "'+text+'"')
os.system('figlet -f "Bloody" "'+text+'"')
os.system('figlet -f "Bolger" "'+text+'"')
os.system('figlet -f "Braced" "'+text+'"')
os.system('figlet -f "Bright" "'+text+'"')
os.system('figlet -f "Broadway KB" "'+text+'"')
os.system('figlet -f "Broadway" "'+text+'"')
os.system('figlet -f "Bubble" "'+text+'"')
os.system('figlet -f "Bulbhead" "'+text+'"')
os.system('figlet -f "Caligraphy" "'+text+'"')
os.system('figlet -f "Caligraphy2" "'+text+'"')
os.system('figlet -f "Calvin S" "'+text+'"')
os.system('figlet -f "Cards" "'+text+'"')
os.system('figlet -f "Catwalk" "'+text+'"')
os.system('figlet -f "Chiseled" "'+text+'"')
os.system('figlet -f "Chunky" "'+text+'"')
os.system('figlet -f "Coinstak" "'+text+'"')
os.system('figlet -f "Cola" "'+text+'"')
os.system('figlet -f "Colossal" "'+text+'"')
os.system('figlet -f "Computer" "'+text+'"')
os.system('figlet -f "Contessa" "'+text+'"')
os.system('figlet -f "Contrast" "'+text+'"')
os.system('figlet -f "Cosmike" "'+text+'"')
os.system('figlet -f "Crawford" "'+text+'"')
os.system('figlet -f "Crawford2" "'+text+'"')
os.system('figlet -f "Crazy" "'+text+'"')
os.system('figlet -f "Cricket" "'+text+'"')
os.system('figlet -f "Cursive" "'+text+'"')
os.system('figlet -f "Cyberlarge" "'+text+'"')
os.system('figlet -f "Cybermedium" "'+text+'"')
os.system('figlet -f "Cybersmall" "'+text+'"')
os.system('figlet -f "Cygnet" "'+text+'"')
os.system('figlet -f "DANC4" "'+text+'"')
os.system('figlet -f "DWhistled" "'+text+'"')
os.system('figlet -f "Dancing Font" "'+text+'"')
os.system('figlet -f "Decimal" "'+text+'"')
os.system('figlet -f "Def Leppard" "'+text+'"')
os.system('figlet -f "Delta Corps Priest 1" "'+text+'"')
os.system('figlet -f "Diamond" "'+text+'"')
os.system('figlet -f "Diet Cola" "'+text+'"')
os.system('figlet -f "Digital" "'+text+'"')
os.system('figlet -f "Doh" "'+text+'"')
os.system('figlet -f "Doom" "'+text+'"')
os.system('figlet -f "Dot Matrix" "'+text+'"')
os.system('figlet -f "Double Shorts" "'+text+'"')
os.system('figlet -f "Double" "'+text+'"')
os.system('figlet -f "Dr Pepper" "'+text+'"')
os.system('figlet -f "Efti Chess" "'+text+'"')
os.system('figlet -f "Efti Font" "'+text+'"')
os.system('figlet -f "Efti Italic" "'+text+'"')
os.system('figlet -f "Efti Piti" "'+text+'"')
os.system('figlet -f "Efti Robot" "'+text+'"')
os.system('figlet -f "Efti Wall" "'+text+'"')
os.system('figlet -f "Efti Water" "'+text+'"')
os.system('figlet -f "Electronic" "'+text+'"')
os.system('figlet -f "Elite" "'+text+'"')
os.system('figlet -f "Epic" "'+text+'"')
os.system('figlet -f "Fender" "'+text+'"')
os.system('figlet -f "Filter" "'+text+'"')
os.system('figlet -f "Fire Font-k" "'+text+'"')
os.system('figlet -f "Fire Font-s" "'+text+'"')
os.system('figlet -f "Flipped" "'+text+'"')
os.system('figlet -f "Flower Power" "'+text+'"')
os.system('figlet -f "Four Tops" "'+text+'"')
os.system('figlet -f "Fraktur" "'+text+'"')
os.system('figlet -f "Fun Face" "'+text+'"')
os.system('figlet -f "Fun Faces" "'+text+'"')
os.system('figlet -f "Fuzzy" "'+text+'"')
os.system('figlet -f "Georgi16" "'+text+'"')
os.system('figlet -f "Georgia11" "'+text+'"')
os.system('figlet -f "Ghost" "'+text+'"')
os.system('figlet -f "Ghoulish" "'+text+'"')
os.system('figlet -f "Glenyn" "'+text+'"')
os.system('figlet -f "Goofy" "'+text+'"')
os.system('figlet -f "Gothic" "'+text+'"')
os.system('figlet -f "Graceful" "'+text+'"')
os.system('figlet -f "Gradient" "'+text+'"')
os.system('figlet -f "Graffiti" "'+text+'"')
os.system('figlet -f "Greek" "'+text+'"')
os.system('figlet -f "Heart Left" "'+text+'"')
os.system('figlet -f "Heart Right" "'+text+'"')
os.system('figlet -f "Henry 3D" "'+text+'"')
os.system('figlet -f "Hex" "'+text+'"')
os.system('figlet -f "Hieroglyphs" "'+text+'"')
os.system('figlet -f "Hollywood" "'+text+'"')
os.system('figlet -f "Horizontal Left" "'+text+'"')
os.system('figlet -f "Horizontal Right" "'+text+'"')
os.system('figlet -f "ICL-1900" "'+text+'"')
os.system('figlet -f "Impossible" "'+text+'"')
os.system('figlet -f "Invita" "'+text+'"')
os.system('figlet -f "Isometric1" "'+text+'"')
os.system('figlet -f "Isometric2" "'+text+'"')
os.system('figlet -f "Isometric3" "'+text+'"')
os.system('figlet -f "Isometric4" "'+text+'"')
os.system('figlet -f "Italic" "'+text+'"')
os.system('figlet -f "Ivrit" "'+text+'"')
os.system('figlet -f "JS Block Letters" "'+text+'"')
os.system('figlet -f "JS Bracket Letters" "'+text+'"')
os.system('figlet -f "JS Capital Curves" "'+text+'"')
os.system('figlet -f "JS Cursive" "'+text+'"')
os.system('figlet -f "JS Stick Letters" "'+text+'"')
os.system('figlet -f "Jacky" "'+text+'"')
os.system('figlet -f "Jazmine" "'+text+'"')
os.system('figlet -f "Jerusalem" "'+text+'"')
os.system('figlet -f "Katakana" "'+text+'"')
os.system('figlet -f "Kban" "'+text+'"')
os.system('figlet -f "Keyboard" "'+text+'"')
os.system('figlet -f "Knob" "'+text+'"')
os.system('figlet -f "Konto Slant" "'+text+'"')
os.system('figlet -f "Konto" "'+text+'"')
os.system('figlet -f "LCD" "'+text+'"')
os.system('figlet -f "Larry 3D 2" "'+text+'"')
os.system('figlet -f "Larry 3D" "'+text+'"')
os.system('figlet -f "Lean" "'+text+'"')
os.system('figlet -f "Letters" "'+text+'"')
os.system('figlet -f "Lil Devil" "'+text+'"')
os.system('figlet -f "Line Blocks" "'+text+'"')
os.system('figlet -f "Linux" "'+text+'"')
os.system('figlet -f "Lockergnome" "'+text+'"')
os.system('figlet -f "Madrid" "'+text+'"')
os.system('figlet -f "Marquee" "'+text+'"')
os.system('figlet -f "Maxfour" "'+text+'"')
os.system('figlet -f "Merlin1" "'+text+'"')
os.system('figlet -f "Merlin2" "'+text+'"')
os.system('figlet -f "Mike" "'+text+'"')
os.system('figlet -f "Mini" "'+text+'"')
os.system('figlet -f "Mirror" "'+text+'"')
os.system('figlet -f "Mnemonic" "'+text+'"')
os.system('figlet -f "Modular" "'+text+'"')
os.system('figlet -f "Morse" "'+text+'"')
os.system('figlet -f "Morse2" "'+text+'"')
os.system('figlet -f "Moscow" "'+text+'"')
os.system('figlet -f "Mshebrew210" "'+text+'"')
os.system('figlet -f "Muzzle" "'+text+'"')
os.system('figlet -f "NScript" "'+text+'"')
os.system('figlet -f "NT Greek" "'+text+'"')
os.system('figlet -f "NV Script" "'+text+'"')
os.system('figlet -f "Nancyj-Fancy" "'+text+'"')
os.system('figlet -f "Nancyj-Improved" "'+text+'"')
os.system('figlet -f "Nancyj-Underlined" "'+text+'"')
os.system('figlet -f "Nancyj" "'+text+'"')
os.system('figlet -f "Nipples" "'+text+'"')
os.system('figlet -f "O8" "'+text+'"')
os.system('figlet -f "OS2" "'+text+'"')
os.system('figlet -f "Octal" "'+text+'"')
os.system('figlet -f "Ogre" "'+text+'"')
os.system('figlet -f "Old Banner" "'+text+'"')
os.system('figlet -f "Patorjk-HeX" "'+text+'"')
os.system('figlet -f "Pawp" "'+text+'"')
os.system('figlet -f "Peaks Slant" "'+text+'"')
os.system('figlet -f "Peaks" "'+text+'"')
os.system('figlet -f "Pebbles" "'+text+'"')
os.system('figlet -f "Pepper" "'+text+'"')
os.system('figlet -f "Poison" "'+text+'"')
os.system('figlet -f "Puffy" "'+text+'"')
os.system('figlet -f "Puzzle" "'+text+'"')
os.system('figlet -f "Pyramid" "'+text+'"')
os.system('figlet -f "Rammstein" "'+text+'"')
os.system('figlet -f "Rectangles" "'+text+'"')
os.system('figlet -f "Red Phoenix" "'+text+'"')
os.system('figlet -f "Relief" "'+text+'"')
os.system('figlet -f "Relief2" "'+text+'"')
os.system('figlet -f "Reverse" "'+text+'"')
os.system('figlet -f "Roman" "'+text+'"')
os.system('figlet -f "Rot13" "'+text+'"')
os.system('figlet -f "Rotated" "'+text+'"')
os.system('figlet -f "Rounded" "'+text+'"')
os.system('figlet -f "Rowan Cap" "'+text+'"')
os.system('figlet -f "Rozzo" "'+text+'"')
os.system('figlet -f "Runic" "'+text+'"')
os.system('figlet -f "Runyc" "'+text+'"')
os.system('figlet -f "S Blood" "'+text+'"')
os.system('figlet -f "SL Script" "'+text+'"')
os.system('figlet -f "Santa Clara" "'+text+'"')
os.system('figlet -f "Script" "'+text+'"')
os.system('figlet -f "Serifcap" "'+text+'"')
os.system('figlet -f "Shadow" "'+text+'"')
os.system('figlet -f "Shimrod" "'+text+'"')
os.system('figlet -f "Short" "'+text+'"')
os.system('figlet -f "Slant Relief" "'+text+'"')
os.system('figlet -f "Slant" "'+text+'"')
os.system('figlet -f "Slide" "'+text+'"')
os.system('figlet -f "Small Caps" "'+text+'"')
os.system('figlet -f "Small Isometric1" "'+text+'"')
os.system('figlet -f "Small Keyboard" "'+text+'"')
os.system('figlet -f "Small Poison" "'+text+'"')
os.system('figlet -f "Small Script" "'+text+'"')
os.system('figlet -f "Small Shadow" "'+text+'"')
os.system('figlet -f "Small Slant" "'+text+'"')
os.system('figlet -f "Small Tengwar" "'+text+'"')
os.system('figlet -f "Small" "'+text+'"')
os.system('figlet -f "Soft" "'+text+'"')
os.system('figlet -f "Speed" "'+text+'"')
os.system('figlet -f "Spliff" "'+text+'"')
os.system('figlet -f "Stacey" "'+text+'"')
os.system('figlet -f "Stampate" "'+text+'"')
os.system('figlet -f "Stampatello" "'+text+'"')
os.system('figlet -f "Standard" "'+text+'"')
os.system('figlet -f "Star Strips" "'+text+'"')
os.system('figlet -f "Star Wars" "'+text+'"')
os.system('figlet -f "Stellar" "'+text+'"')
os.system('figlet -f "Stforek" "'+text+'"')
os.system('figlet -f "Stick Letters" "'+text+'"')
os.system('figlet -f "Stop" "'+text+'"')
os.system('figlet -f "Straight" "'+text+'"')
os.system('figlet -f "Stronger Than All" "'+text+'"')
os.system('figlet -f "Sub-Zero" "'+text+'"')
os.system('figlet -f "Swamp Land" "'+text+'"')
os.system('figlet -f "Swan" "'+text+'"')
os.system('figlet -f "Sweet" "'+text+'"')
os.system('figlet -f "THIS" "'+text+'"')
os.system('figlet -f "Tanja" "'+text+'"')
os.system('figlet -f "Tengwar" "'+text+'"')
os.system('figlet -f "Term" "'+text+'"')
os.system('figlet -f "Test1" "'+text+'"')
os.system('figlet -f "The Edge" "'+text+'"')
os.system('figlet -f "Thick" "'+text+'"')
os.system('figlet -f "Thin" "'+text+'"')
os.system('figlet -f "Thorned" "'+text+'"')
os.system('figlet -f "Three Point" "'+text+'"')
os.system('figlet -f "Ticks Slant" "'+text+'"')
os.system('figlet -f "Ticks" "'+text+'"')
os.system('figlet -f "Tiles" "'+text+'"')
os.system('figlet -f "Tinker-Toy" "'+text+'"')
os.system('figlet -f "Tombstone" "'+text+'"')
os.system('figlet -f "Train" "'+text+'"')
os.system('figlet -f "Trek" "'+text+'"')
os.system('figlet -f "Tsalagi" "'+text+'"')
os.system('figlet -f "Tubular" "'+text+'"')
os.system('figlet -f "Twisted" "'+text+'"')
os.system('figlet -f "Two Point" "'+text+'"')
os.system('figlet -f "USA Flag" "'+text+'"')
os.system('figlet -f "Univers" "'+text+'"')
os.system('figlet -f "Varsity" "'+text+'"')
os.system('figlet -f "Wavy" "'+text+'"')
os.system('figlet -f "Weird" "'+text+'"')
os.system('figlet -f "Wet Letter" "'+text+'"')
os.system('figlet -f "Whimsy" "'+text+'"')
os.system('figlet -f "Wow" "'+text+'"')
os.system('figlet -f "" "'+text+'"')
except KeyboardInterrupt:
exit()
else:
corrupt()
all_main() | 2.421875 | 2 |
src/TulsiGenerator/Scripts/install_genfiles_tests.py | comius/tulsi | 511 | 12772607 | <gh_stars>100-1000
# Copyright 2018 The Tulsi Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for install_genfiles.py."""
import os
import unittest
import install_genfiles
DOES_EXIST_DATA = {
'generated_sources': [
('src/TulsiGenerator/Scripts/install_genfiles.py',
'install_genfiles.py'),
],
}
DOES_NOT_EXIST_DATA = {
'generated_sources': [('src/does/not/exist.txt',
'exist.txt')],
}
class TestInstallForData(unittest.TestCase):
def testSrcDoeNotExist(self):
tmpdir = os.environ['TEST_TMPDIR']
installer = install_genfiles.Installer('.', output_root=tmpdir)
installer.InstallForData(DOES_NOT_EXIST_DATA)
self.assertFalse(os.path.lexists(
os.path.join(tmpdir, 'bazel-tulsi-includes/x/x/exist.txt')))
def testSrcDoesExist(self):
tmpdir = os.environ['TEST_TMPDIR']
installer = install_genfiles.Installer('.', output_root=tmpdir)
installer.InstallForData(DOES_EXIST_DATA)
# Must use lexists because we create a link but use the wrong exec root,
# so the symlink is not valid.
self.assertTrue(os.path.lexists(
os.path.join(tmpdir, 'bazel-tulsi-includes/x/x/install_genfiles.py')))
if __name__ == '__main__':
unittest.main()
| 2.03125 | 2 |
pycroft/model/session.py | JuKu/pycroft | 0 | 12772608 | <filename>pycroft/model/session.py
# -*- coding: utf-8 -*-
# Copyright (c) 2015 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
"""
pycroft.model.session
~~~~~~~~~~~~~~
This module contains the session stuff for db actions.
:copyright: (c) 2011 by AG DSN.
"""
from werkzeug.local import LocalProxy
import wrapt
from sqlalchemy import func
class NullScopedSession(object):
def __getattr__(self, item):
raise AttributeError("Session has not been initialized.")
def __call__(self, *args, **kwargs):
raise AttributeError("Session has not been initialized.")
def remove(self):
pass
Session = LocalProxy(lambda: NullScopedSession())
session = LocalProxy(lambda: Session())
def set_scoped_session(scoped_session):
Session.remove()
# noinspection PyCallByClass
object.__setattr__(Session, '_LocalProxy__local', lambda: scoped_session)
@wrapt.decorator
def with_transaction(wrapped, instance, args, kwargs):
transaction = session.begin(subtransactions=True)
try:
rv = wrapped(*args, **kwargs)
transaction.commit()
return rv
except:
transaction.rollback()
raise
def utcnow():
return session.query(func.current_timestamp()).scalar()
| 2.328125 | 2 |
pivot_based_eccv2018/misc/optimizer.py | gujiuxiang/unpaired_im2text_iccv19 | 18 | 12772609 | import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn.utils import clip_grad_norm
import misc.utils as utils
class Optim(object):
def __init__(self, opt):
self.last_ppl = None
self.init_i2t(opt)
self.init_nmt(opt)
self._step = 0
self.opt = opt
def init_i2t(self, opt):
self.i2t_train_flag = opt.i2t_train_flag
self.i2t_eval_flag = opt.i2t_eval_flag
self.i2t_method = opt.i2t_optim
self.i2t_lr = opt.i2t_learning_rate
self.i2t_current_lr = self.i2t_lr
self.i2t_learning_rate_decay_start = opt.i2t_learning_rate_decay_start
self.i2t_learning_rate_decay_every = opt.i2t_learning_rate_decay_every
self.i2t_learning_rate_decay_rate = opt.i2t_learning_rate_decay_rate
self.i2t_optim_alpha = opt.i2t_optim_alpha
self.i2t_optim_beta = opt.i2t_optim_beta
self.i2t_optim_epsilon = opt.i2t_optim_epsilon
self.i2t_momentum = opt.i2t_momentum
self.i2t_max_grad_norm = opt.i2t_max_grad_norm
self.i2t_grad_clip = opt.i2t_grad_clip
self.i2t_start_decay = False
self.i2t_decay_method = opt.i2t_decay_method
self.i2t_weight_decay = opt.i2t_weight_decay
def init_nmt(self, opt):
self.nmt_train_flag = opt.nmt_train_flag
self.nmt_eval_flag = opt.nmt_eval_flag
self.nmt_method = opt.nmt_optim
self.nmt_lr = opt.nmt_learning_rate
self.nmt_current_lr = self.nmt_lr
self.nmt_learning_rate_decay_start = opt.nmt_learning_rate_decay_start
self.nmt_learning_rate_decay_every = opt.nmt_learning_rate_decay_every
self.nmt_learning_rate_decay_rate = opt.nmt_learning_rate_decay_rate
self.nmt_optim_alpha = opt.nmt_optim_alpha
self.nmt_optim_beta = opt.nmt_optim_beta
self.nmt_optim_epsilon = opt.nmt_optim_epsilon
self.nmt_momentum = opt.nmt_momentum
self.nmt_max_grad_norm = opt.nmt_max_grad_norm
self.nmt_grad_clip = opt.nmt_grad_clip
self.nmt_start_decay = False
self.nmt_decay_method = opt.nmt_decay_method
self.nmt_weight_decay = opt.nmt_weight_decay
self.nmt_warmup_steps = opt.nmt_warmup_steps
self.nmt_betas = [0.9, 0.98]
def create_optimizer(self, method, parameters, lr, alpha, beta, epsilon, weight_decay):
if method == 'rmsprop':
optimizer = optim.RMSprop(parameters, lr, alpha, epsilon, weight_decay=weight_decay)
elif method == 'adagrad':
optimizer = optim.Adagrad(parameters, lr, weight_decay=weight_decay)
elif method == 'sgd':
optimizer = optim.SGD(parameters, lr, weight_decay=weight_decay)
elif method == 'sgdm':
optimizer = optim.SGD(parameters, lr, alpha, weight_decay=weight_decay)
elif method == 'sgdmom':
optimizer = optim.SGD(parameters, lr, alpha, weight_decay=weight_decay, nesterov=True)
elif method == 'adam':
optimizer = optim.Adam(parameters, lr, (alpha, beta), epsilon, weight_decay=weight_decay)
else:
raise RuntimeError("Invalid optim method: " + method)
return optimizer
def set_parameters(self, i2t_model, nmt_model):
if i2t_model is not None:
self.i2t_params = i2t_model.parameters()
self.i2t_optimizer = self.create_optimizer(self.i2t_method, self.i2t_params, self.i2t_lr, self.i2t_optim_alpha, self.i2t_optim_beta, self.i2t_optim_epsilon, self.i2t_weight_decay)
if vars(self.opt).get('start_from', None) is not None and os.path.isfile(os.path.join(self.opt.start_from, "i2t_optimizer.pth")):
self.i2t_optimizer.load_state_dict(torch.load(os.path.join(self.opt.start_from, 'i2t_optimizer.pth')))
if nmt_model is not None:
self.nmt_params = list(nmt_model.parameters()) # careful: params may be a generator
self.nmt_optimizer = self.create_optimizer(self.nmt_method, self.nmt_params, self.nmt_lr, self.nmt_optim_alpha, self.nmt_optim_beta, self.nmt_optim_epsilon, self.nmt_weight_decay)
if vars(self.opt).get('start_from', None) is not None and os.path.isfile(os.path.join(self.opt.start_from, "nmt_optimizer.pth")):
self.nmt_optimizer.load_state_dict(torch.load(os.path.join(self.opt.start_from, 'nmt_optimizer.pth')))
def step(self):
self._step += 1
if self.i2t_train_flag:
if self.i2t_max_grad_norm: clip_grad_norm(self.i2t_params, self.i2t_max_grad_norm)
self.i2t_optimizer.step()
if self.opt.nmt_train_flag:
if self.opt.nmt_decay_method == "noam":
self.nmt_current_lr = self.nmt_lr * (self.opt.rnn_size ** (-0.5) * min(self._step ** (-0.5), self._step * self.nmt_warmup_steps ** (-1.5)))
for group in self.nmt_optimizer.param_groups:
group['lr'] = self.nmt_current_lr
if self.nmt_max_grad_norm: clip_grad_norm(self.nmt_params, self.nmt_max_grad_norm)
self.nmt_optimizer.step()
def zero_grad(self):
if self.i2t_train_flag:
self.i2t_optimizer.zero_grad()
if self.nmt_train_flag:
self.nmt_optimizer.zero_grad()
def update_ScheduledSampling_prob(self, opt, epoch, dp_i2t_model):
if epoch > opt.scheduled_sampling_start and opt.scheduled_sampling_start >= 0:
frac = (epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every
dp_i2t_model.ss_prob = min(opt.scheduled_sampling_increase_prob * frac, opt.scheduled_sampling_max_prob)
return dp_i2t_model
def update_LearningRate(self, type, epoch):
if type == 'i2t':
if epoch > self.i2t_learning_rate_decay_start and self.i2t_learning_rate_decay_start >= 0:
frac = (epoch - self.i2t_learning_rate_decay_start) // self.i2t_learning_rate_decay_every
decay_factor = self.i2t_learning_rate_decay_rate ** frac
self.i2t_current_lr = self.i2t_lr * decay_factor
for group in self.i2t_optimizer.param_groups:
group['lr'] = self.i2t_current_lr
else:
self.i2t_current_lr = self.i2t_lr
if type == 'nmt':
if epoch > self.nmt_learning_rate_decay_start and self.nmt_learning_rate_decay_start >= 0:
self.nmt_current_lr = self.nmt_lr * self.nmt_learning_rate_decay_rate
for group in self.nmt_optimizer.param_groups:
group['lr'] = self.nmt_current_lr
else:
self.nmt_current_lr = self.nmt_lr
| 2.3125 | 2 |
api/test/conftest.py | avikalpg/IndianVotingAssistant | 8 | 12772610 | import pytest
@pytest.fixture
def get_base_url():
base_url = "http://localhost:5000/"
return base_url | 1.6875 | 2 |
employee.py | vinayak1809/Python-Billing-Software | 7 | 12772611 | #==================imports===================
import sqlite3
import re
import random
import string
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
from time import strftime
from datetime import date
from tkinter import scrolledtext as tkst
#============================================
root = Tk()
root.geometry("1366x768")
root.title("Billing Software")
user = StringVar()
passwd = StringVar()
fname = StringVar()
lname = StringVar()
new_user = StringVar()
new_passwd = StringVar()
cust_name = StringVar()
cust_num = StringVar()
cust_new_bill = StringVar()
cust_search_bill = StringVar()
bill_date = StringVar()
with sqlite3.connect("./Database/store.db") as db:
cur = db.cursor()
def random_bill_number(stringLength):
lettersAndDigits = string.ascii_letters.upper() + string.digits
strr=''.join(random.choice(lettersAndDigits) for i in range(stringLength-2))
return ('CC'+strr)
def valid_phone(phn):
if re.match(r"[789]\d{9}$", phn):
return True
return False
def login(Event=None):
global username
username = user.get()
password = <PASSWORD>.get()
with sqlite3.connect("./Database/store.db") as db:
cur = db.cursor()
cur.execute("SELECT * FROM employee WHERE emp_id = ? and password = ?", [username, password])
results = cur.fetchall()
if results:
messagebox.showinfo("Login Page", "The login is successful")
page1.entry1.delete(0, END)
page1.entry2.delete(0, END)
root.withdraw()
global biller
global page2
biller = Toplevel()
page2 = bill_window(biller)
page2.time()
biller.protocol("WM_DELETE_WINDOW", exitt)
biller.mainloop()
else:
messagebox.showerror("Error", "Incorrect username or password.")
page1.entry2.delete(0, END)
def logout():
sure = messagebox.askyesno("Logout", "Are you sure you want to logout?", parent=biller)
if sure == True:
biller.destroy()
root.deiconify()
page1.entry1.delete(0, END)
page1.entry2.delete(0, END)
class login_page:
def __init__(self, top=None):
top.geometry("1366x768")
top.resizable(0, 0)
top.title("Employee login")
self.label1 = Label(root)
self.label1.place(relx=0, rely=0, width=1366, height=768)
self.img = PhotoImage(file="./images/employee_login.png")
self.label1.configure(image=self.img)
self.entry1 = Entry(root)
self.entry1.place(relx=0.373, rely=0.273, width=374, height=24)
self.entry1.configure(
font="-family {Poppins} -size 10",
relief="flat",
textvariable=user
)
self.entry2 = Entry(root)
self.entry2.place(relx=0.373, rely=0.384, width=374, height=24)
self.entry2.configure(
font="-family {Poppins} -size 10",
relief="flat",
show="*",
textvariable=passwd
)
self.button1 = Button(root)
self.button1.place(relx=0.366, rely=0.685, width=356, height=43)
self.button1.configure(
relief="flat",
overrelief="flat",
activebackground="#D2463E",
cursor="hand2",
foreground="#ffffff",
background="#D2463E",
font="-family {Poppins SemiBold} -size 20",
borderwidth="0",
text="Login",
command=login
)
class Item:
def __init__(self, name, price, qty):
self.product_name = name
self.price = price
self.qty = qty
class Cart:
def __init__(self):
self.items = []
self.dictionary = {}
def add_item(self, item):
self.items.append(item)
def remove_item(self):
self.items.pop()
def remove_items(self):
self.items.clear()
def total(self):
total = 0.0
for i in self.items:
total += i.price * i.qty
return total
def isEmpty(self):
if len(self.items) == 0:
return True
def allCart(self):
for i in self.items:
if (i.product_name in self.dictionary):
self.dictionary[i.product_name] += i.qty
else:
self.dictionary.update({i.product_name:i.qty})
def exitt():
sure = messagebox.askyesno("Exit","Are you sure you want to exit?", parent=biller)
if sure == True:
biller.destroy()
root.destroy()
class bill_window:
def __init__(self, top = None):
top.geometry("1366x768")
top.resizable(0, 0)
top.title("Billing Software")
self.label = Label(biller)
self.label.place(relx=0, rely=0, width=1366, height=768)
self.img = PhotoImage(file="./images/bill_window.png")
self.label.configure(image=self.img)
self.message = Label(biller)
self.message.place(relx=0.038, rely=0.055, width=136, height=30)
self.message.configure(
font="-family {Poppins} -size 10",
foreground="#000000",
background="#ffffff",
text=username,
anchor="w"
)
self.clock = Label(biller)
self.clock.place(relx=0.9, rely=0.065, width=102, height=36)
self.clock.configure(
font="-family {Poppins Light} -size 12",
foreground="#000000",
background="#ffffff"
)
self.entry1 = Entry(biller)
self.entry1.place(relx=0.509, rely=0.23, width=240, height=24)
self.entry1.configure(
font="-family {Poppins} -size 12",
relief="flat",
textvariable=cust_name
)
self.entry2 = Entry(biller)
self.entry2.place(relx=0.791, rely=0.23, width=240, height=24)
self.entry2.configure(
font="-family {Poppins} -size 12",
relief="flat",
textvariable=cust_num
)
self.entry3 = Entry(biller)
self.entry3.place(relx=0.102, rely=0.23, width=240, height=24)
self.entry3.configure(
font="-family {Poppins} -size 12",
relief="flat",
textvariable=cust_search_bill
)
self.button1 = Button(biller)
self.button1.place(relx=0.031, rely=0.104, width=76, height=23)
self.button1.configure(
relief="flat",
overrelief="flat",
activebackground="#CF1E14",
cursor="hand2",
foreground="#ffffff",
background="#CF1E14",
font="-family {Poppins SemiBold} -size 12",
borderwidth="0",
text="Logout",
command=logout
)
self.button2 = Button(biller)
self.button2.place(relx=0.315, rely=0.234, width=76, height=23)
self.button2.configure(
relief="flat",
overrelief="flat",
activebackground="#CF1E14",
cursor="hand2",
foreground="#ffffff",
background="#CF1E14",
font="-family {Poppins SemiBold} -size 12",
borderwidth="0",
text="Search",
command=self.search_bill
)
self.button3 = Button(biller)
self.button3.place(relx=0.048, rely=0.885, width=86, height=25)
self.button3.configure(
relief="flat",
overrelief="flat",
activebackground="#CF1E14",
cursor="hand2",
foreground="#ffffff",
background="#CF1E14",
font="-family {Poppins SemiBold} -size 10",
borderwidth="0",
text="Total",
command=self.total_bill
)
self.button4 = Button(biller)
self.button4.place(relx=0.141, rely=0.885, width=84, height=25)
self.button4.configure(
relief="flat",
overrelief="flat",
activebackground="#CF1E14",
cursor="hand2",
foreground="#ffffff",
background="#CF1E14",
font="-family {Poppins SemiBold} -size 10",
borderwidth="0",
text="Generate",
command=self.gen_bill
)
self.button5 = Button(biller)
self.button5.place(relx=0.230, rely=0.885, width=86, height=25)
self.button5.configure(
relief="flat",
overrelief="flat",
activebackground="#CF1E14",
cursor="hand2",
foreground="#ffffff",
background="#CF1E14",
font="-family {Poppins SemiBold} -size 10",
borderwidth="0",
text="Clear",
command=self.clear_bill
)
self.button6 = Button(biller)
self.button6.place(relx=0.322, rely=0.885, width=86, height=25)
self.button6.configure(
relief="flat",
overrelief="flat",
activebackground="#CF1E14",
cursor="hand2",
foreground="#ffffff",
background="#CF1E14",
font="-family {Poppins SemiBold} -size 10",
borderwidth="0",
text="Exit",
command=exitt
)
self.button7 = Button(biller)
self.button7.place(relx=0.098, rely=0.734, width=86, height=26)
self.button7.configure(
relief="flat",
overrelief="flat",
activebackground="#CF1E14",
cursor="hand2",
foreground="#ffffff",
background="#CF1E14",
font="-family {Poppins SemiBold} -size 10",
borderwidth="0",
text="Add To Cart",
command=self.add_to_cart
)
self.button8 = Button(biller)
self.button8.place(relx=0.274, rely=0.734, width=84, height=26)
self.button8.configure(
relief="flat",
overrelief="flat",
activebackground="#CF1E14",
cursor="hand2",
foreground="#ffffff",
background="#CF1E14",
font="-family {Poppins SemiBold} -size 10",
borderwidth="0",
text="""Clear""",
command=self.clear_selection
)
self.button9 = Button(biller)
self.button9.place(relx=0.194, rely=0.734, width=68, height=26)
self.button9.configure(
relief="flat",
overrelief="flat",
activebackground="#CF1E14",
cursor="hand2",
foreground="#ffffff",
background="#CF1E14",
font="-family {Poppins SemiBold} -size 10",
borderwidth="0",
text="""Remove""",
command=self.remove_product
)
text_font = ("Poppins", "8")
self.combo1 = ttk.Combobox(biller)
self.combo1.place(relx=0.035, rely=0.408, width=477, height=26)
find_category = "SELECT product_cat FROM raw_inventory"
cur.execute(find_category)
result1 = cur.fetchall()
cat = []
for i in range(len(result1)):
if(result1[i][0] not in cat):
cat.append(result1[i][0])
self.combo1.configure(
values=cat,
state="readonly",
font="-family {Poppins} -size 8",
)
self.combo1.option_add("*TCombobox*Listbox.font", text_font)
self.combo1.option_add("*TCombobox*Listbox.selectBackground", "#D2463E")
self.combo2 = ttk.Combobox(biller)
self.combo2.place(relx=0.035, rely=0.479, width=477, height=26)
self.combo2.configure(font="-family {Poppins} -size 8")
self.combo2.option_add("*TCombobox*Listbox.font", text_font)
self.combo2.configure(state="disabled")
self.combo3 = ttk.Combobox(biller)
self.combo3.place(relx=0.035, rely=0.551, width=477, height=26)
self.combo3.configure(
state="disabled",
font="-family {Poppins} -size 8"
)
self.combo3.option_add("*TCombobox*Listbox.font", text_font)
self.entry4 = ttk.Entry(biller)
self.entry4.place(relx=0.035, rely=0.629, width=477, height=26)
self.entry4.configure(
font="-family {Poppins} -size 8",
foreground="#000000",
state="disabled"
)
self.Scrolledtext1 = tkst.ScrolledText(top)
self.Scrolledtext1.place(relx=0.439, rely=0.586, width=695, height=275)
self.Scrolledtext1.configure(
borderwidth=0,
font="-family {Podkova} -size 8",
state="disabled"
)
self.combo1.bind("<<ComboboxSelected>>", self.get_category)
def get_category(self, Event):
self.combo2.configure(state="readonly")
self.combo2.set('')
self.combo3.set('')
find_subcat = "SELECT product_subcat FROM raw_inventory WHERE product_cat = ?"
cur.execute(find_subcat, [self.combo1.get()])
result2 = cur.fetchall()
subcat = []
for j in range(len(result2)):
if(result2[j][0] not in subcat):
subcat.append(result2[j][0])
self.combo2.configure(values=subcat)
self.combo2.bind("<<ComboboxSelected>>", self.get_subcat)
self.combo3.configure(state="disabled")
def get_subcat(self, Event):
self.combo3.configure(state="readonly")
self.combo3.set('')
find_product = "SELECT product_name FROM raw_inventory WHERE product_cat = ? and product_subcat = ?"
cur.execute(find_product, [self.combo1.get(), self.combo2.get()])
result3 = cur.fetchall()
pro = []
for k in range(len(result3)):
pro.append(result3[k][0])
self.combo3.configure(values=pro)
self.combo3.bind("<<ComboboxSelected>>", self.show_qty)
self.entry4.configure(state="disabled")
def show_qty(self, Event):
self.entry4.configure(state="normal")
self.qty_label = Label(biller)
self.qty_label.place(relx=0.033, rely=0.664, width=82, height=26)
self.qty_label.configure(
font="-family {Poppins} -size 8",
anchor="w"
)
product_name = self.combo3.get()
find_qty = "SELECT stock FROM raw_inventory WHERE product_name = ?"
cur.execute(find_qty, [product_name])
results = cur.fetchone()
self.qty_label.configure(
text=f"In Stock: {results[0]}",
background="#ffffff",
foreground="#333333"
)
cart = Cart()
def add_to_cart(self):
self.Scrolledtext1.configure(state="normal")
strr = self.Scrolledtext1.get('1.0', END)
if strr.find('Total') == -1:
product_name = self.combo3.get()
if(product_name != ""):
product_qty = self.entry4.get()
find_mrp = "SELECT mrp, stock FROM raw_inventory WHERE product_name = ?"
cur.execute(find_mrp, [product_name])
results = cur.fetchall()
stock = results[0][1]
mrp = results[0][0]
if product_qty.isdigit() == True:
if (stock - int(product_qty)) >= 0:
sp = mrp * int(product_qty)
item = Item(product_name, mrp, int(product_qty))
self.cart.add_item(item)
self.Scrolledtext1.configure(state="normal")
bill_text = f"{product_name}\t\t\t\t\t\t\t{product_qty}\t\t\t\t\t\t{sp}\n"
self.Scrolledtext1.insert('insert', bill_text)
self.Scrolledtext1.configure(state="disabled")
else:
messagebox.showerror("Oops!", "Out of stock. Check quantity.", parent=biller)
else:
messagebox.showerror("Oops!", "Invalid quantity.", parent=biller)
else:
messagebox.showerror("Oops!", "Choose a product.", parent=biller)
else:
self.Scrolledtext1.delete('1.0', END)
new_li = []
li = strr.split("\n")
for i in range(len(li)):
if len(li[i]) != 0:
if li[i].find('Total') == -1:
new_li.append(li[i])
else:
break
for j in range(len(new_li)-1):
self.Scrolledtext1.insert('insert', new_li[j])
self.Scrolledtext1.insert('insert','\n')
product_name = self.combo3.get()
if(product_name != ""):
product_qty = self.entry4.get()
find_mrp = "SELECT mrp, stock, product_id FROM raw_inventory WHERE product_name = ?"
cur.execute(find_mrp, [product_name])
results = cur.fetchall()
stock = results[0][1]
mrp = results[0][0]
if product_qty.isdigit() == True:
if (stock - int(product_qty)) >= 0:
sp = results[0][0] * int(product_qty)
item = Item(product_name, mrp, int(product_qty))
self.cart.add_item(item)
self.Scrolledtext1.configure(state="normal")
bill_text = f"{product_name}\t\t\t\t\t\t\t{product_qty}\t\t\t\t\t\t{sp}\n"
self.Scrolledtext1.insert('insert', bill_text)
self.Scrolledtext1.configure(state="disabled")
else:
messagebox.showerror("Oops!", "Out of stock. Check quantity.", parent=biller)
else:
messagebox.showerror("Oops!", "Invalid quantity.", parent=biller)
else:
messagebox.showerror("Oops!", "Choose a product.", parent=biller)
def remove_product(self):
if(self.cart.isEmpty() != True):
self.Scrolledtext1.configure(state="normal")
strr = self.Scrolledtext1.get('1.0', END)
if strr.find('Total') == -1:
try:
self.cart.remove_item()
except IndexError:
messagebox.showerror("Oops!", "Cart is empty", parent=biller)
else:
self.Scrolledtext1.configure(state="normal")
get_all_bill = (self.Scrolledtext1.get('1.0', END).split("\n"))
new_string = get_all_bill[:len(get_all_bill)-3]
self.Scrolledtext1.delete('1.0', END)
for i in range(len(new_string)):
self.Scrolledtext1.insert('insert', new_string[i])
self.Scrolledtext1.insert('insert','\n')
self.Scrolledtext1.configure(state="disabled")
else:
try:
self.cart.remove_item()
except IndexError:
messagebox.showerror("Oops!", "Cart is empty", parent=biller)
else:
self.Scrolledtext1.delete('1.0', END)
new_li = []
li = strr.split("\n")
for i in range(len(li)):
if len(li[i]) != 0:
if li[i].find('Total') == -1:
new_li.append(li[i])
else:
break
new_li.pop()
for j in range(len(new_li) - 1):
self.Scrolledtext1.insert('insert', new_li[j])
self.Scrolledtext1.insert('insert','\n')
self.Scrolledtext1.configure(state="disabled")
else:
messagebox.showerror("Oops!", "Add a product.", parent=biller)
def wel_bill(self):
self.name_message = Text(biller)
self.name_message.place(relx=0.514, rely=0.452, width=176, height=30)
self.name_message.configure(
font="-family {Podkova} -size 10",
borderwidth=0,
background="#ffffff"
)
self.num_message = Text(biller)
self.num_message.place(relx=0.894, rely=0.452, width=90, height=30)
self.num_message.configure(
font="-family {Podkova} -size 10",
borderwidth=0,
background="#ffffff"
)
self.bill_message = Text(biller)
self.bill_message.place(relx=0.499, rely=0.477, width=176, height=26)
self.bill_message.configure(
font="-family {Podkova} -size 10",
borderwidth=0,
background="#ffffff"
)
self.bill_date_message = Text(biller)
self.bill_date_message.place(relx=0.852, rely=0.477, width=90, height=26)
self.bill_date_message.configure(
font="-family {Podkova} -size 10",
borderwidth=0,
background="#ffffff"
)
def total_bill(self):
if self.cart.isEmpty():
messagebox.showerror("Oops!", "Add a product.", parent=biller)
else:
self.Scrolledtext1.configure(state="normal")
strr = self.Scrolledtext1.get('1.0', END)
if strr.find('Total') == -1:
self.Scrolledtext1.configure(state="normal")
divider = "\n\n\n" + ("─"*84)
self.Scrolledtext1.insert('insert', divider)
total = f"\nTotal\t\t\t\t\t\t\t\t\t\t\t\tRs. {self.cart.total()}"
self.Scrolledtext1.insert('insert', total)
divider2 = "\n" + ("─"*84)
self.Scrolledtext1.insert('insert', divider2)
self.Scrolledtext1.configure(state="disabled")
else:
return
state = 1
def gen_bill(self):
if self.state == 1:
strr = self.Scrolledtext1.get('1.0', END)
self.wel_bill()
if(cust_name.get() == ""):
messagebox.showerror("Oops!", "Please enter a name.", parent=biller)
elif(cust_num.get() == ""):
messagebox.showerror("Oops!", "Please enter a number.", parent=biller)
elif valid_phone(cust_num.get()) == False:
messagebox.showerror("Oops!", "Please enter a valid number.", parent=biller)
elif(self.cart.isEmpty()):
messagebox.showerror("Oops!", "Cart is empty.", parent=biller)
else:
if strr.find('Total') == -1:
self.total_bill()
self.gen_bill()
else:
self.name_message.insert(END, cust_name.get())
self.name_message.configure(state="disabled")
self.num_message.insert(END, cust_num.get())
self.num_message.configure(state="disabled")
cust_new_bill.set(random_bill_number(8))
self.bill_message.insert(END, cust_new_bill.get())
self.bill_message.configure(state="disabled")
bill_date.set(str(date.today()))
self.bill_date_message.insert(END, bill_date.get())
self.bill_date_message.configure(state="disabled")
with sqlite3.connect("./Database/store.db") as db:
cur = db.cursor()
insert = (
"INSERT INTO bill(bill_no, date, customer_name, customer_no, bill_details) VALUES(?,?,?,?,?)"
)
cur.execute(insert, [cust_new_bill.get(), bill_date.get(), cust_name.get(), cust_num.get(), self.Scrolledtext1.get('1.0', END)])
db.commit()
print(self.cart.allCart())
for name, qty in self.cart.dictionary.items():
update_qty = "UPDATE raw_inventory SET stock = stock - ? WHERE product_name = ?"
cur.execute(update_qty, [qty, name])
db.commit()
messagebox.showinfo("Success!!", "Bill Generated", parent=biller)
self.entry1.configure(
state="disabled",
disabledbackground="#ffffff",
disabledforeground="#000000"
)
self.entry2.configure(
state="disabled",
disabledbackground="#ffffff",
disabledforeground="#000000"
)
self.state = 0
else:
return
def clear_bill(self):
self.wel_bill()
self.entry1.configure(state="normal")
self.entry2.configure(state="normal")
self.entry1.delete(0, END)
self.entry2.delete(0, END)
self.entry3.delete(0, END)
self.name_message.configure(state="normal")
self.num_message.configure(state="normal")
self.bill_message.configure(state="normal")
self.bill_date_message.configure(state="normal")
self.Scrolledtext1.configure(state="normal")
self.name_message.delete(1.0, END)
self.num_message.delete(1.0, END)
self.bill_message.delete(1.0, END)
self.bill_date_message.delete(1.0, END)
self.Scrolledtext1.delete(1.0, END)
self.name_message.configure(state="disabled")
self.num_message.configure(state="disabled")
self.bill_message.configure(state="disabled")
self.bill_date_message.configure(state="disabled")
self.Scrolledtext1.configure(state="disabled")
self.cart.remove_items()
self.state = 1
def clear_selection(self):
self.entry4.delete(0, END)
self.combo1.configure(state="normal")
self.combo2.configure(state="normal")
self.combo3.configure(state="normal")
self.combo1.delete(0, END)
self.combo2.delete(0, END)
self.combo3.delete(0, END)
self.combo2.configure(state="disabled")
self.combo3.configure(state="disabled")
self.entry4.configure(state="disabled")
try:
self.qty_label.configure(foreground="#ffffff")
except AttributeError:
pass
def search_bill(self):
cur.execute("SELECT * FROM bill WHERE bill_no = ?", [cust_search_bill.get().rstrip()])
results = cur.fetchall()
if results:
self.clear_bill()
self.wel_bill()
self.name_message.insert(END, results[0][2])
self.name_message.configure(state="disabled")
self.num_message.insert(END, results[0][3])
self.num_message.configure(state="disabled")
self.bill_message.insert(END, results[0][0])
self.bill_message.configure(state="disabled")
self.bill_date_message.insert(END, results[0][1])
self.bill_date_message.configure(state="disabled")
self.Scrolledtext1.configure(state="normal")
self.Scrolledtext1.insert(END, results[0][4])
self.Scrolledtext1.configure(state="disabled")
self.entry1.configure(
state="disabled",
disabledbackground="#ffffff",
disabledforeground="#000000"
)
self.entry2.configure(
state="disabled",
disabledbackground="#ffffff",
disabledforeground="#000000"
)
self.state = 0
else:
messagebox.showerror("Error!!", "Bill not found.", parent=biller)
self.entry3.delete(0, END)
def time(self):
string = strftime("%H:%M:%S %p")
self.clock.config(text=string)
self.clock.after(1000, self.time)
page1 = login_page(root)
root.bind("<Return>", login)
root.mainloop() | 3.265625 | 3 |
evaluate/store_classifier_accuracy.py | shenkev/Exact-Inference-VAE-Robustness | 3 | 12772612 | <filename>evaluate/store_classifier_accuracy.py<gh_stars>1-10
import torch
import numpy as np
from PIL import Image
from torchvision import datasets, transforms
from mnist_model import Net
from torch.autograd import Variable
import os
import pickle
import logging
import pdb
def trim_to_28_from_32(data, num_data):
return np.array(data).reshape(num_data, 32, 32)[:, 2:30, 2:30].reshape(num_data, 784)
def evaluate(dataset_name, dataset, ground_truth, adversarial_target, model, num_classes):
logging.info('')
logging.info("Evaluating accuracy for " + dataset_name)
# Tracking
classifer_label = []
# Get model predictions
output = model(dataset)
pred = output.data.max(1, keepdim=True)[1].numpy()
# calculate totals
correct_per_class = [0]*num_classes
adversarial_success_per_class = [0]*num_classes
number_per_class = [0]*num_classes
for y, y_pred in zip(ground_truth, pred):
if y != adversarial_target:
if y_pred == y:
correct_per_class[y] += 1
if y_pred == adversarial_target:
adversarial_success_per_class[y] += 1
number_per_class[y] += 1
classifer_label.append(y_pred)
# log correct metrics
logging.info('')
logging.info("Correct Metrics")
logging.info('')
logging.info('Overall: {}/{} ({})'.format(sum(correct_per_class) - correct_per_class[adversarial_target], sum(number_per_class) - number_per_class[adversarial_target],
float(sum(correct_per_class) - correct_per_class[adversarial_target]) / (sum(number_per_class) - number_per_class[adversarial_target])))
for class_num, correct, total in zip(range(num_classes), correct_per_class, number_per_class):
if class_num != adversarial_target:
logging.info('Class {}: {}/{} ({})'.format(class_num, correct, total, float(correct) / total))
else:
logging.info('No metrics for {} as it same as adversarial target'.format(class_num))
# log adversarial metrics
logging.info('')
logging.info("Adversarial Metrics")
logging.info('')
logging.info('Overall: {}/{} ({})'.format(sum(adversarial_success_per_class) - adversarial_success_per_class[adversarial_target], sum(number_per_class) - number_per_class[adversarial_target],
float(sum(adversarial_success_per_class) - adversarial_success_per_class[adversarial_target]) / (sum(number_per_class) - number_per_class[adversarial_target])))
for class_num, adversarial_success, total in zip(range(num_classes), adversarial_success_per_class, number_per_class):
if class_num != adversarial_target:
logging.info('Class {}: {}/{} ({})'.format(class_num, adversarial_success, total, float(adversarial_success) / total))
else:
logging.info('No metrics for {} as it same as adversarial target'.format(class_num))
return classifer_label
# Constants
NUM_CLASSES = 10
ADVERSARIAL_TARGET = 7
LOG_FILE = 'pixel_loss_results.log'
DATA_DIR = 'pixel_loss_results'
# Set-up logging
logging.basicConfig(filename=LOG_FILE, format='%(message)s', level=logging.INFO)
logging.info('Begin Evaluation')
#Load data
all_data_files = [ (int(data_file.split('_')[0]), os.path.join(DATA_DIR, data_file)) for data_file in os.listdir(DATA_DIR) if 'pckl' not in data_file]
best_l2 = []
best_latent = []
best_recon = []
mcmc_random = []
mcmc_mean = []
vae_recon = []
adversarial = []
ground_truth = []
labels = []
for num, data_file in all_data_files:
with open(data_file, 'rb') as f:
np_data = np.array(f.readline().split()).astype(float)
if 'best_l2' in data_file:
best_l2.append((num, np_data))
if 'best_latent' in data_file:
best_latent.append((num, np_data))
if 'best_recon' in data_file:
best_recon.append((num, np_data))
if 'mcmc_sample_1' in data_file:
mcmc_random.append((num, np_data))
if 'mcmcMean' in data_file:
mcmc_mean.append((num, np_data))
if 'vae_recon' in data_file:
vae_recon.append((num, np_data))
if 'x_adversarial' in data_file:
adversarial.append((num, np_data))
if 'x_gt' in data_file:
ground_truth.append((num, np_data))
labels.append((num, int(data_file.split('_')[6])))
best_l2 = np.array([y[1] for y in sorted(best_l2, key=lambda x: x[0])])
best_latent = np.array([y[1] for y in sorted(best_latent, key=lambda x: x[0])])
best_recon = np.array([y[1] for y in sorted(best_recon, key=lambda x: x[0])])
mcmc_random = np.array([y[1] for y in sorted(mcmc_random, key=lambda x: x[0])])
mcmc_mean = np.array([y[1] for y in sorted(mcmc_mean, key=lambda x: x[0])])
vae_recon = np.array([y[1] for y in sorted(vae_recon, key=lambda x: x[0])])
adversarial = np.array([y[1] for y in sorted(adversarial, key=lambda x: x[0])])
ground_truth = np.array([y[1] for y in sorted(ground_truth, key=lambda x: x[0])])
labels = np.array([y[1] for y in sorted(labels, key=lambda x: x[0])])
data_size = len(labels)
# Modify data for evaluation
normalize = transforms.Compose([transforms.Normalize((0.1307,), (0.3081,))])
ground_truth = Variable(normalize(torch.FloatTensor(ground_truth)).view(data_size, 1, 28, 28), volatile=True)
adversarial = Variable(normalize(torch.FloatTensor(adversarial)).view(data_size, 1, 28, 28), volatile=True)
vae_recon = Variable(normalize(torch.FloatTensor(vae_recon)).view(data_size, 1, 28, 28), volatile=True)
best_l2 = Variable(normalize(torch.FloatTensor(best_l2)).view(data_size, 1, 28, 28), volatile=True)
best_latent = Variable(normalize(torch.FloatTensor(best_latent)).view(data_size, 1, 28, 28), volatile=True)
best_recon = Variable(normalize(torch.FloatTensor(best_recon)).view(data_size, 1, 28, 28), volatile=True)
mcmc_mean = Variable(normalize(torch.FloatTensor(mcmc_mean)).view(data_size, 1, 28, 28), volatile=True)
mcmc_random = Variable(normalize(torch.FloatTensor(mcmc_random)).view(data_size, 1, 28, 28), volatile=True)
# Load model
model = Net()
model.load_state_dict(torch.load("checkpoint.pth"))
model.eval()
#Evaluate
gt_class = evaluate(dataset_name="Ground Truth", dataset=ground_truth, ground_truth=labels,
adversarial_target=ADVERSARIAL_TARGET, model=model, num_classes=NUM_CLASSES)
ad_class = evaluate(dataset_name="Adversarial", dataset=adversarial, ground_truth=labels,
adversarial_target=ADVERSARIAL_TARGET, model=model, num_classes=NUM_CLASSES)
vae_class = evaluate(dataset_name="VAE Reconstructions", dataset=vae_recon, ground_truth=labels,
adversarial_target=ADVERSARIAL_TARGET, model=model, num_classes=NUM_CLASSES)
l2_class = evaluate(dataset_name="Best L2", dataset=best_l2, ground_truth=labels,
adversarial_target=ADVERSARIAL_TARGET, model=model, num_classes=NUM_CLASSES)
latent_class = evaluate(dataset_name="Best Latent", dataset=best_latent, ground_truth=labels,
adversarial_target=ADVERSARIAL_TARGET, model=model, num_classes=NUM_CLASSES)
recon_class = evaluate(dataset_name="Best Recon", dataset=best_recon, ground_truth=labels,
adversarial_target=ADVERSARIAL_TARGET, model=model, num_classes=NUM_CLASSES)
mcmcmean_class = evaluate(dataset_name="MCMC Mean", dataset=mcmc_mean, ground_truth=labels,
adversarial_target=ADVERSARIAL_TARGET, model=model, num_classes=NUM_CLASSES)
mcmcrandom_class = evaluate(dataset_name="MCMC Random", dataset=mcmc_random, ground_truth=labels,
adversarial_target=ADVERSARIAL_TARGET, model=model, num_classes=NUM_CLASSES)
with open("v0_target_7_old_class.pckl", 'wb') as f:
pickle.dump([labels, gt_class, ad_class, vae_class, l2_class, latent_class, recon_class, mcmcmean_class, mcmcrandom_class], f)
| 2.46875 | 2 |
app/views.py | BamX/dota2-matches-statistic | 0 | 12772613 | <filename>app/views.py
from flask import jsonify, abort, request, make_response, render_template, redirect
from app import app, auth, db, models
from sqlalchemy.orm import aliased
BASE_URL = "http://www.dotabuff.com"
def allTeams(fameous):
query = models.Team.query
if fameous:
query = query.filter(models.Team.imageUrl != None)
return query.all()
@app.errorhandler(404)
def not_found(error):
return render_template('not-found.html')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/open/match/<int:matchId>')
def openMatch(matchId):
return redirect("%s/matches/%s" % (BASE_URL, matchId))
@app.route('/open/team/<int:teamId>')
def openTeam(teamId):
return redirect("%s/esports/teams/%s" % (BASE_URL, teamId))
@app.route('/teams')
def getTeams():
query = request.args['query']
teams = [{'id':team.id, 'name': team.name} for team in models.Team.query.filter(models.Team.name.contains(query), models.Team.imageUrl != None).order_by('name').all()]
return jsonify(items = teams)
@app.route('/<int:firstTeamId>/vs/<int:secondTeamId>')
def versus(firstTeamId, secondTeamId):
if firstTeamId == secondTeamId:
abort(404)
firstTeam = models.Team.query.filter_by(id = firstTeamId).first_or_404()
secondTeam = models.Team.query.filter_by(id = secondTeamId).first_or_404()
p1alias = aliased(models.Participant)
p2alias = aliased(models.Participant)
matches = models.Match.query.\
join(p1alias, models.Match.participants).\
join(p2alias, models.Match.participants).\
filter(p1alias.team_id == firstTeamId).\
filter(p2alias.team_id == secondTeamId).\
order_by(models.Match.date.desc())
return render_template('versus.html', matches = matches, firstTeam = firstTeam, secondTeam = secondTeam)
| 2.484375 | 2 |
settings.py | guoqiao/graphvizweb | 0 | 12772614 | <reponame>guoqiao/graphvizweb<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from os.path import abspath, dirname, join
import sys
BASE_DIR = dirname(abspath(__file__))
print BASE_DIR
APPS_DIR = join(BASE_DIR, 'apps')
sys.path.insert(0, APPS_DIR)
WWW_DIR = join(BASE_DIR, 'www')
STATIC_ROOT = join(WWW_DIR, 'static')
SECRET_KEY = 'aoaoas)a@k5=hax0o!*e3$ec(+3llxc_1_n5*sirg)wa)lp=*-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = (
join(BASE_DIR, "static"),
)
print STATICFILES_DIRS
| 1.789063 | 2 |
src/restic_compose_backup/containers_minecraft.py | Silthus/restic-compose-backup | 0 | 12772615 | <gh_stars>0
import os
import logging
from pathlib import Path
from restic_compose_backup.containers import Container
from restic_compose_backup.config import config, Config
from restic_compose_backup import (
commands,
restic,
rcon
)
from restic_compose_backup import utils
logger = logging.getLogger(__name__)
class MinecraftContainer(Container):
container_type = 'minecraft'
def get_credentials(self) -> dict:
"""dict: get credentials for the service"""
return {
'host': self.hostname,
'password': self.get_config_env('RCON_PASSWORD'),
'port': self.get_config_env('RCON_PORT'),
}
def prepare_mc_backup(self) -> bool:
creds = self.get_credentials()
with utils.environment('RCON_PASSWORD', creds['password']):
rcon.save_off(creds['host'], creds['port'])
rcon.save_all(creds['host'], creds['port'])
rcon.sync(creds['host'], creds['port'])
return True
def ping(self) -> bool:
"""Check the availability of the service"""
creds = self.get_credentials()
try:
logger.debug("[rcon-cli] checking if minecraft server %s is online...", self.service_name)
with utils.environment('RCON_PASSWORD', creds['password']):
return rcon.is_online(
creds['host'],
creds['port']
)
except Exception as ex:
logger.error('[rcon-cli] unable to contact minecraft server %s', self.service_name)
return 1
def backup(self) -> bool:
config = Config()
creds = self.get_credentials()
errors = False
with utils.environment('RCON_PASSWORD', creds['password']):
try:
# turn off auto-save and sync all data to the disk before backing up worlds
self.prepare_mc_backup()
for mount in self.filter_mounts():
backup_data = self.get_volume_backup_destination(mount, '/minecraft')
logger.info('Backing up %s', mount.source)
vol_result = restic.backup_files(config.repository, source=backup_data, tags=self.tags)
logger.debug('Minecraft backup exit code: %s', vol_result)
if vol_result != 0:
logger.error('Minecraft backup exited with non-zero code: %s', vol_result)
errors = True
except Exception as ex:
logger.error('Exception raised during minecraft backup')
logger.exception(ex)
errors = True
# always always turn saving back on
rcon.save_on(creds['host'], creds['port'])
return errors | 2.265625 | 2 |
derrida/common/solr_backend.py | making-books-ren-today/test_eval_4_derrmar | 11 | 12772616 | '''
Haystack does not yet support range facets on Solr. This module
provides subclasses of SolrSearchQuery and SolrSearchBackend to
patch in range facet functionalty.
'''
from haystack import connections
from haystack.backends.solr_backend import SolrSearchQuery, SolrSearchBackend, \
SolrEngine
from unidecode import unidecode
class RangeSolrSearchQuery(SolrSearchQuery):
def __init__(self, *args, **kwargs):
super(RangeSolrSearchQuery, self).__init__(*args, **kwargs)
self.range_facets = {}
def add_field_facet(self, field, **options):
# extend default facet field method to handle a special
# range=True case
if options.get('range', None):
self.add_range_facet(field, **options)
else:
return super(RangeSolrSearchQuery, self).add_field_facet(field, **options)
def add_range_facet(self, field, **options):
"""Adds a solr range facet on a field. Options must include
start, end, and gap."""
# using same logic as normal facets; for range facets this
# is probably unnecessary since they have to be numeric anyway
field_name = connections[self._using].get_unified_index() \
.get_facet_fieldname(field)
self.range_facets[field_name] = options.copy()
def build_params(self, *args, **kwargs):
# extend default build params logic to include any facet range
# options
search_kwargs = super(RangeSolrSearchQuery, self).build_params(*args, **kwargs)
if not self.range_facets:
return search_kwargs
range_kwargs = {
'facet.range': list(self.range_facets.keys())
}
for field, opts in self.range_facets.items():
# NOTE: not exposing other range facet params for now
for solr_opt in ['start', 'end', 'gap']:
if solr_opt in opts:
range_kwargs['f.%s.facet.range.%s' % (field, solr_opt)] \
= opts[solr_opt]
# support hard end option; convert python boolean to solr bool
if 'hardend' in opts:
val = 'true' if bool(opts['hardend']) else 'false'
range_kwargs['f.%s.facet.range.hardend' % field] = val
search_kwargs.update(range_kwargs)
return search_kwargs
def post_process_facets(self, results):
'''
Extend post processing logic to include facet range data in returned
facets.
'''
facets = super(RangeSolrSearchQuery, self).post_process_facets(results)
if 'facet_ranges' in results:
# copy facet range data into existing facet data
facets['ranges'] = results['facet_ranges'][0]
for data in facets['ranges'].values():
# possible to get no counts, in which case we can't calculate a max
if data['counts']:
# find the max value for the facet_ranges
data['max'] = max(data['counts'][1::2])
# solr returns a list of value, count, value, count
# use zip to convert into a list of two-tuples
# (thanks to https://stackoverflow.com/questions/14902686/turn-flat-list-into-two-tuples)
data['counts'] = list(zip(data['counts'][::2], data['counts'][1::2]))
return facets
def _clone(self, *args, **kwargs):
# extend clone to ensure range facets are preserved
clone = super(RangeSolrSearchQuery, self)._clone(klass=self.__class__,
*args, **kwargs)
clone.range_facets = self.range_facets.copy()
return clone
class SolrRangeSearchBackend(SolrSearchBackend):
# extend default solr backend to ensure facet ranges are accessible
# in the result for processing by RangeSolrSearchQuery
def _process_results(self, raw_results, *args, **kwargs):
results = super(SolrRangeSearchBackend, self)._process_results(raw_results,
*args, **kwargs)
if hasattr(raw_results, 'facets'):
results['facet_ranges'] = raw_results.facets.get('facet_ranges', {}),
return results
def build_schema(self, fields):
# haystack doesn't have any customization points for schema generation
# or types, and Solr won't allow tokenization/customization on
# the built string field; customize the generated schema here
# to use local 'string_en' solr field for fields ending in "_isort"
schema = super(SolrRangeSearchBackend, self).build_schema(fields)
for field_cfg in schema[1]:
if field_cfg['field_name'].endswith('_isort'):
field_cfg['type'] = 'string_en'
return schema
class RangeSolrEngine(SolrEngine):
# extend default solr engine to make range backend and query defaults
backend = SolrRangeSearchBackend
query = RangeSolrSearchQuery
def facet_sort_ignoreaccents(facets, *fields):
'''Update alpha facet so that sorting ignores accents.'''
# update alpha facet so that sorting ignores accents
# (can't be done in solr because then facets would display without accents)
if not facets:
return facets
for field in fields:
if field in facets['fields']:
facets['fields'][field].sort(key=lambda elem: unidecode(elem[0]))
return facets
| 2.4375 | 2 |
figs12_13.py | asl-epfl/asl-it-2021 | 0 | 12772617 | <gh_stars>0
import matplotlib.pyplot as plt
import matplotlib as mpl
import os
from functions import *
# %%
getcontext().prec = 200
mpl.style.use('seaborn-deep')
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['text.latex.preamble'] = r'\usepackage{bm}'
# %% Figure path
FIG_PATH = 'figs/'
if not os.path.isdir(FIG_PATH):
os.makedirs(FIG_PATH)
# %% Setup
N = 10
M = 3
np.random.seed(0)
SETUP = 'SLOW'
# SETUP = 'FAST'
# Transition matrices
if SETUP == 'SLOW':
N_ITER = 10000
dx = 1e-4
fig_name = ['13a.pdf', '13b.pdf', '13c.pdf']
seed = [1234, 4, 12]
ylim_plot = [0.18, .5]
else:
N_ITER = 1000
dx = 1e-3
fig_name = ['12a.pdf', '12b.pdf', '12c.pdf']
seed = [100, 15, 6]
ylim_plot = [0.16, .54]
T_hyp = np.array([[1-5*dx, 5*dx, 0], [5*dx, 1-10*dx, 5*dx], [0, 5*dx, 1-5*dx]])
T_mat = np.array([[1-dx, dx], [dx, 1-dx]])
T_var = np.array([[1-dx, dx, 0], [dx, 1-2*dx, dx], [0, dx, 1-dx]])
# ASL step size
delta = 1e2 * dx
# Noise profiles
var_list = np.array([0, 5, 500])
# %% Graph generation
G = np.random.choice([0.0, 1.0], size=(N, N), p=[0.5, 0.5])
G = G + G.T + np.eye(N)
G = (G > 0) * 1.0
# %% Averaging Rule -> Left-stochastic matrix
A_ls = np.zeros((N, N))
A_ls = G / G.sum(axis = 0)
A_ls_dec = np.array([[Decimal(x) for x in y] for y in A_ls])
# %% Laplacian Rule -> Doubly-stochastic matrix
A_ds = np.zeros((N, N))
deg = G.sum(axis = 0)
dmax = np.max(deg)
A_ds[G > 0] = 1 / dmax
deg_n = deg - np.diag(G)
np.fill_diagonal(A_ds, 1 - deg_n * 1/ dmax)
A_ds_dec = decimal_array(A_ds)
# %%Check that A^i converges
print('Left-stochastic A is primitive: ',
np.all(np.isclose(np.linalg.matrix_power(A_ls, 100), np.linalg.matrix_power(A_ls, 101))),
' Doubly-stochastic A is primitive: ',
np.all(np.isclose(np.linalg.matrix_power(A_ds, 100), np.linalg.matrix_power(A_ds, 101))))
# %% Hypotheses
theta = np.arange(1, 4) * 1.0
theta_dec = decimal_array(theta)
b = 1
x = np.linspace(-10, 10, 1000)
x_dec = decimal_array(x)
dt = (max(x)-min(x))/len(x)
dt_dec = (max(x_dec)-min(x_dec))/len(x_dec)
#%% Initialization
np.random.seed(0)
mu_0 = np.random.rand(N, M)
mu_0 = mu_0 / np.sum(mu_0, axis = 1)[:, None]
#%% Markov sequences for the hypothesis, comb. matrix and noise profile:
# Seed values are chosen empirically to allow a clear illustration of the changing variables
# Hypothesis
np.random.seed(seed[0])
theta_vector = []
for i in range(N_ITER):
if i == 0:
theta_vector.append(np.random.randint(0, 3))
else:
theta_vector.append(np.random.choice([0, 1, 2], 1, p = list(T_hyp[theta_vector[i-1]]))[0])
# Comb. matrix
np.random.seed(seed[1])
A_list = [A_ls, A_ds]
A_vector, state_mat = [], []
for i in range(N_ITER):
if i == 0:
state_mat.append(np.random.randint(0, 2))
else:
state_mat.append(np.random.choice([0, 1], 1, p = list(T_mat[state_mat[i-1]]))[0])
A_vector.append(A_list[state_mat[i]])
# Noise profile
np.random.seed(seed[2])
state_var = []
for i in range(N_ITER):
if i == 0:
state_var.append(np.random.randint(0, 3))
else:
state_var.append(np.random.choice([0, 1, 2], 1, p = list(T_var[state_var[i-1]]))[0])
# %% Generate observations
csi = []
for l in range(0, N):
csi.append(np.random.laplace(theta[theta_vector], b) +
np.sqrt(var_list[state_var]) * np.random.randn(len(theta_vector)))
csi = np.array(csi)
csidec = decimal_array(csi)
#%% Simulate ASL
MU = asl_markov(mu_0, csi, A_vector, N_ITER, theta, b, delta)
#%% Plot ASL
f, ax = plt.subplots(1, 1, figsize=(8, 3))
ax.plot(np.array([MU[k][4] for k in range(len(MU))]))
ax.set_xlim([0, N_ITER])
ax.set_ylim(ylim_plot)
ax.tick_params(axis='both', which='major', labelsize=16)
ax.set_xlabel(r'$i$', fontsize=16)
ax.set_ylabel(r'$\bm{\mu}_{1,i}(\theta)$', fontsize=16)
plt.legend([r'$\theta=1$', r'$\theta=2$', r'$\theta=3$'],
fontsize=16, ncol=3, handlelength=1, loc='center', bbox_to_anchor=[0.5, -0.47])
plt.subplots_adjust(bottom=0.35)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig(FIG_PATH + fig_name[0])
#%% Simulate SL
MU_sl = sl_markov(mu_0, csidec, A_vector, N_ITER, theta_dec, b)
#%%
f, ax = plt.subplots(1,1, figsize=(8,3))
ax.plot(np.array([MU_sl[k][0] for k in range(len(MU_sl))]))
ax.set_xlim([0, N_ITER])
ax.tick_params(axis='both', which='major', labelsize=16)
ax.set_xlabel(r'$i$', fontsize=16)
ax.set_ylabel(r'$\bm{\mu}_{1,i}(\theta)$', fontsize=16)
plt.legend([r'$\theta=1$', r'$\theta=2$', r'$\theta=3$'],
fontsize=16, ncol=3, handlelength=1, loc = 'center', bbox_to_anchor=[0.5,-0.47])
plt.subplots_adjust(bottom=0.35)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig(FIG_PATH + fig_name[2])
#%% Prob. of error
N_MC = 1000
MU_all = []
for n in range(N_MC):
csi = []
for l in range(0, N):
csi.append(np.random.laplace(theta[theta_vector], b) +
np.sqrt(var_list[state_var]) * np.random.randn(len(theta_vector)))
csi = np.array(csi)
MU_all.append(asl(mu_0, csi, A_vector, N_ITER, theta, b, delta))
if n % 100 == 0:
print(n)
mu_pe = np.array([[MU_all[j][k][0] for k in range(len(MU))] for j in range(N_MC)])
acc = np.sum(np.equal(np.argmax(mu_pe, axis=2)[:, :-1], np.array(theta_vector)), axis=0) / N_MC
#%% Plot prob. of error
f, ax = plt.subplots(1, 1, figsize=(8,2))
ax.plot(1 - acc)
ax.set_xlim([0, N_ITER])
ax.tick_params(axis='both', which='major', labelsize=16)
ax.set_xlabel(r'$i$', fontsize=16)
ax.set_ylabel(r'$p^{(0.01)}_{1,i}(\theta)$', fontsize=16)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig(FIG_PATH + fig_name[1], bbox_inches='tight')
| 2 | 2 |
control-api/app/api/endpoints/sensors.py | Towed-ROV/control-api | 1 | 12772618 | <reponame>Towed-ROV/control-api<gh_stars>1-10
import asyncio
import json
import queue
import threading
import time
from multiprocessing import Queue
from communication.data_saver_connection import DataSaverConnection
from communication.payload_receiver import PayloadReceiver
from communication.sensor_subscriber import SensorSubscriber
from fastapi import APIRouter, Request
from sse_starlette.sse import EventSourceResponse
router = APIRouter()
# FROM THE REMOTE TOWED-ROV
data_queue_1 = Queue(maxsize=15)
sensor_sub_1 = SensorSubscriber(data_queue_1, host="192.168.1.118", port=8001)
sensor_sub_1.start()
# FROM THE LOCAL SUITCASE BOX
data_queue_2 = Queue(maxsize=15)
sensor_sub_2 = SensorSubscriber(data_queue_2, host="192.168.1.118", port=8002)
sensor_sub_2.start()
# COLLECTS DATA AND PROCESSES DATA FROM ALL PUBLISHERS
payload_receiver = PayloadReceiver()
payload_receiver.add_queue(data_queue_1)
payload_receiver.add_queue(data_queue_2)
# CONTROL ACCESS OBJECTS
save_queue = None
is_recording = False
exit_flag = threading.Event()
saver_connection = DataSaverConnection()
@router.get("/toggle_recording")
def toggle_csv_recorder():
"""Starts recording the sensordata in csv""
Returns:
dict: wether the recording is running or not
"""
global is_recording
global save_queue
is_recording = not is_recording
if is_recording:
# enforce new queue everytime (cus of slow garbage collection or something?)
save_queue = queue.Queue()
saver_connection.start(save_queue, exit_flag)
else:
saver_connection.stop(save_queue, exit_flag)
save_queue = None
return {"is_recording": is_recording}
@router.get('/live')
async def live_sensor_data_feed(request: Request):
async def sensor_data_generator():
"""receives data through two ZMQ subscribers,
and yields a EventSourceResponse for listeners to receive the sensordata stream
Yields:
EventSourceResponse: json payload containing the sensordata
"""
print("[OPEN] SSE")
global is_recording
skips = 0
counter_skip = 0
counter_sent = 0
# start = time.time()
while True:
if await request.is_disconnected():
break
payload = payload_receiver.get_all()
if payload is not None:
if is_recording:
save_to_csv(payload)
yield {"event": "stream", "data": json.dumps(payload)}
counter_sent = counter_sent + 1
else:
counter_skip = counter_skip + 1
# DEBUGG HELP
# TODO: remove before production release
# if ((time.time() - start) > 5):
# print("TIME________: ", str(time.time() - start))
# print("Times sent : ", str(counter_sent))
# print("Times skips : ", str(counter_skip))
# counter_sent = 0
# counter_skip = 0
# start = time.time()
await asyncio.sleep(0.09)
print("[CLOSE] SSE")
return EventSourceResponse(sensor_data_generator())
def save_to_csv(payload):
"""Adds sensordata (payload) to a writer queue for saving
Args:
payload (dict): containing the sensordata
"""
if payload is None:
return
# guard against not saving payloads with invalid name such as "response"-payloads
if payload["payload_name"] == "sensor_data":
try:
save_queue.put_nowait(payload)
except queue.Full:
print("FULL QUEUE")
| 2.375 | 2 |
ddf_utils/chef/procedure/merge_entity.py | semio/ddf_utils | 2 | 12772619 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""merge_entity procedure for recipes"""
import logging
from typing import List
from .. helpers import debuggable
from .. model.ingredient import DataPointIngredient
from .. model.chef import Chef
logger = logging.getLogger('merge_entity')
@debuggable
def merge_entity(chef: Chef, ingredients: List[DataPointIngredient], dictionary,
target_column, result, merged='drop') -> DataPointIngredient:
"""merge entities"""
from ... transformer import merge_keys
assert len(ingredients) == 1, "procedure only support 1 ingredient for now."
# ingredient = chef.dag.get_node(ingredients[0]).evaluate()
ingredient = ingredients[0]
data = ingredient.compute()
res_data = dict()
for k, df in data.items():
res_data[k] = merge_keys(df.set_index(ingredient.key),
dictionary, target_column=target_column, merged=merged).reset_index()
return DataPointIngredient.from_procedure_result(result, ingredient.key, res_data)
| 2.234375 | 2 |
quex/engine/state_machine/construction/TEST/test-repeat.py | smmckay/quex-mirror | 0 | 12772620 | #! /usr/bin/env python
import sys
import os
sys.path.insert(0, os.environ["QUEX_PATH"])
from quex.engine.state_machine.core import *
import quex.engine.state_machine.construction.repeat as repeat
from quex.engine.state_machine.TEST_help.some_dfas import *
if "--hwut-info" in sys.argv:
print "DFA Operations: Repetition with min and max repetition numbers"
sys.exit(0)
print "-------------------------------------------------------------------------------"
print "##sm0", sm3
print "## repeat.do(sm3) "
sm3r = repeat.do(sm3)
print "##result = ", sm3r
print "-------------------------------------------------------------------------------"
print "##sm0", sm3
print "## repeat.do(sm3, 2) "
sm3r = repeat.do(sm3, 2)
print "##result = ", sm3r
print "-------------------------------------------------------------------------------"
print "##sm0", sm3
print "## repeat.do(sm3, 0, 2) "
sm3r = repeat.do(sm3, 0, 2)
print "##result = ", sm3r
print "-------------------------------------------------------------------------------"
print "##sm0", sm3
print "## repeat.do(sm3, 2, 2) "
sm3r = repeat.do(sm3, 2, 2)
print "##result = ", sm3r
| 2.328125 | 2 |
main/forms.py | olnikiforov/djangoProject | 0 | 12772621 | <filename>main/forms.py
"""Forms for site."""
from django import forms
from django.forms import ModelForm, Select, Textarea, TextInput
from .models import Author, Comments, Post, Subscriber
class PostForm(ModelForm):
"""Postform class."""
class Meta:
"""Meta class."""
model = Post
fields = ['title', 'description', 'content']
widgets = {
"title": TextInput(attrs={
"class": "form-control",
"placeholder": "Название статьи"
}),
"description": TextInput(attrs={
"class": "form-control",
"placeholder": "Описание статьи"
}),
"content": Textarea(attrs={
"class": "form-control",
"placeholder": "Содержимое"
})
}
class SubscriberForm(ModelForm):
"""Subscriber Form."""
author_id = forms.ModelChoiceField(
queryset=Author.objects.all().order_by('name'),
empty_label="Choose Author",
widget=forms.Select(
attrs={"class": "form-control"}
)
)
class Meta:
"""Meta Сlass."""
model = Subscriber
fields = ['email_to', 'author_id']
widgets = {
"email_to": TextInput(attrs={
"class": "form-control",
"placeholder": "Email подписчика "
}),
}
class CommentsForm(ModelForm):
"""Comments model form."""
class Meta:
"""Meta Class."""
model = Comments
fields = ['body', 'subs_id']
widgets = {
"body": TextInput(attrs={
"class": "form-control",
"placeholder": "Comment"
}
),
"subs_id": Select(attrs={
"class": "form-control",
"placeholder": "subscriber ID"
}),
}
| 2.6875 | 3 |
learning_logs/apps.py | kevinbowen777/learning_log | 0 | 12772622 | from django.apps import AppConfig
class LearningLogsConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "learning_logs"
| 1.367188 | 1 |
handlers/qr.py | Cratosart/test_disc | 0 | 12772623 | <reponame>Cratosart/test_disc<gh_stars>0
import qrcode
import secrets
import string
import os
from PIL import Image, ImageDraw
from aiogram import types
from aiogram.dispatcher.filters import Command
from utils import get_qr
from utils import delete_user_qr
from loader import dp, bot
@dp.message_handler(Command(commands='qr'))
async def send_qr(message: types.Message):
photo = get_qr(message.from_user.id)
await bot.send_photo (
chat_id=message.from_user.id,
photo=photo,
)
delete_user_qr(message.from_user.id)
return
| 2.015625 | 2 |
model/npi.py | nienjiuntai/pytorch-npi | 0 | 12772624 | <reponame>nienjiuntai/pytorch-npi
from torch.nn.modules.sparse import Embedding
from tasks.add.env import AdditionEnv
from tasks.add.core import StateEncoder, program_embedding
import torch
from torch import optim, tensor
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules import activation
from tasks.add.config import config
import numpy as np
class NPITerm(nn.Module):
def __init__(self, input_dim:int):
super(NPITerm, self).__init__()
self._fcn = nn.Linear(in_features=input_dim, out_features=1) # Dense(1, W_regularizer=l2(0.001))
def forward(self, x):
x = self._fcn(x)
# x= F.dropout(x, training=self.training)
x = torch.sigmoid(x)
return x
class NPIProg(nn.Module):
def __init__(self, input_dim:int, prog_key_dim:int, prog_num:int,):
super(NPIProg, self).__init__()
self._fcn1 = nn.Linear(in_features=input_dim, out_features=prog_key_dim)
self._fcn2 = nn.Linear(in_features=prog_key_dim, out_features=prog_num)
def forward(self, x):
x = self._fcn1(x)
x = self._fcn2(F.relu_(x))
# x= F.dropout(x, training=self.training)
x = F.log_softmax(x.view(1, -1), dim=1)
return x
class NPIArg(nn.Module):
def __init__(self, input_dim:int, arg_dim:int):
super(NPIArg, self).__init__()
self.f_arg = nn.Linear(input_dim, arg_dim)
def forward(self, x):
x = self.f_arg(x)
# x= F.dropout(x, training=self.training)
x = F.log_softmax(x.view(1, -1), dim=1)
return x
class NPICore(nn.Module):
def __init__(self, input_dim:int, hidden_dim:int, layers:int=1):
super(NPICore, self).__init__()
self.input_dim, self.hidden_dim = input_dim, hidden_dim
self.h_0, self.c_0 = torch.zeros(1, self.hidden_dim).to(config.device), torch.zeros(1, self.hidden_dim).to(config.device)
self.cell_inpt = nn.LSTMCell(input_size=input_dim, hidden_size=hidden_dim)
self.cells = nn.ModuleList([nn.LSTMCell(input_size=hidden_dim, hidden_size=hidden_dim) for _ in range(layers)])
def forward(self, x):
h_x, c_x = self.cell_inpt(x.view(1, -1), (self.h_0, self.c_0))
for cell in self.cells:
h_x, c_x = cell(F.relu(h_x), (h_x, c_x))
self.h_0, self.c_0 = h_x.clone().detach(), c_x.clone().detach()
return F.relu(h_x)
def reset_states(self):
self.h_0, self.c_0 = torch.zeros(1, self.hidden_dim).to(config.device), torch.zeros(1, self.hidden_dim).to(config.device)
for layer in self.cell_inpt.modules():
if hasattr(layer, 'reset_states'): # could use `isinstance` instead to check if it's an RNN layer
layer.reset_states()
for cell in self.cells:
for layer in cell.modules():
if hasattr(layer, 'reset_states'): # could use `isinstance` instead to check if it's an RNN layer
layer.reset_states()
class NPI(nn.Module):
def __init__(self, state_encoder:StateEncoder, model_path:str=None, max_depth:int=10, max_steps:int=1000):
super(NPI, self).__init__()
self.steps = 0
self.max_depth, self.max_steps = max_depth, max_steps
self.prog_dim, self.prog_key_size, self.prog_num = config.prog_embedding_size, config.prog_key_size, config.prog_num
self.arg_num, self.arg_depth = config.arg_shape
self.arg_dim = self.arg_num * self.arg_depth
state_in, prog_in = state_encoder.state_dim, state_encoder.prog_dim
self.f_enc, self.prog_embedding = state_encoder, program_embedding
# LSTM configs
self.lstm_input_dim, self.lstm_hidden_dim, self.lstm_hidden_layer = state_in + prog_in, 256, 1
self.f_lstm = NPICore(self.lstm_input_dim, self.lstm_hidden_dim, self.lstm_hidden_layer)
self.f_term = NPITerm(self.lstm_hidden_dim)
self.f_prog = NPIProg(self.lstm_hidden_dim, self.prog_key_size, self.prog_num)
self.f_args:list = []
for _ in range(self.arg_num):
self.f_args.append(NPIArg(self.lstm_hidden_dim, self.arg_depth))
# try nn.ModuleDict
self.f_args = nn.ModuleList(self.f_args)
def forward(self, state, prog, args_embedding):
prog_embedding = self.prog_embedding(prog)
# prog_embedding = tensor(to_one_hot(prog, self.prog_dim)).to(config.device)
z_state = self.f_enc(torch.cat([state, args_embedding]))
inpt = torch.cat([z_state, prog_embedding])
h_state = self.f_lstm(inpt.view(1, -1)) # find out how to use program embedding
t_state = self.f_term(h_state.clone())
p_state = self.f_prog(h_state.clone())
args_state:list = []
for f_arg in self.f_args:
a_state = f_arg(h_state.clone())
args_state.append(a_state)
return t_state.view([]), p_state.view(1, -1), args_state
def reset(self):
# reset LSTM states
self.steps:int = 0
self.f_lstm.reset_states()
def predict(self, env, pgid, args):
args = [to_one_hot(arg, self.arg_depth) for arg in args]
args += [to_one_hot(self.arg_depth - 1, self.arg_depth) for _ in range(len(args) + 1, self.arg_num + 1)] # fill with 10
term, prog_key, args_out = self(tensor(env).flatten().type(torch.FloatTensor).to(config.device), tensor(pgid).to(config.device), tensor(args).flatten().type(torch.FloatTensor).to(config.device))
return term.item(), torch.argmax(prog_key).item(), [torch.argmax(arg).item() for arg in args_out]
# TODO:disable limit
def step(self, env:AdditionEnv, pgid:int, args:list, term_thresh:float=0.5, depth:int=0, display:bool=False):
term_prob:float = 0
if depth > self.max_depth:
return
while term_prob < term_thresh:
self.steps += 1
if self.steps > self.max_steps:
return
env_state = env.get_observation()
# term_prob, pgid_out, args_out = self(env_state, pgid, args)
term_prob, pgid_pred, args_pred = self.predict(env_state, pgid, args)
if display:
env.display_step(pgid, args, term_prob)
if pgid in [0, 1]:
env.exec(pgid, args)
if display:
env.display(pgid, args, term_prob)
else:
if pgid_pred != 6: # here need modify
self.step(env, pgid_pred, args_pred, depth=depth+1, display=display)
def save(self, modelpath:str=f'{config.outdir}/weights/npi.weights'):
torch.save(self.state_dict(), modelpath)
def to_one_hot(x:int, dim:int, dtype=np.int8):
one_hot = np.zeros((dim,), dtype=dtype)
if 0 <= x < dim:
one_hot[x] = 1
return one_hot
| 2.484375 | 2 |
scripts/bus_factor/plot_normalized_commit_rates.py | a-paxton/oss-community-health | 0 | 12772625 | from glob import glob
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import argparse
"""
This is a reproduction of Fernando's 2011 normalized commit rate plot. This
shows roughly the bus factor
"""
parser = argparse.ArgumentParser()
parser.add_argument("--outname", "-o")
args = parser.parse_args()
outname = args.outname
filenames = glob("data/raw_data/*/commits.tsv")
filenames.sort()
fig, ax = plt.subplots()
for i, filename in enumerate(filenames):
# Parse project name
project = filename.split("/")[2].split("_")[0]
commits = pd.read_csv(filename, sep="\t", keep_default_na=False)
commits[commits["author_name"].isnull()]["author_name"] = ""
_, ticket_counts = np.unique(commits["author_name"], return_counts=True)
ticket_counts.sort()
ticket_counts = ticket_counts[::-1] / ticket_counts.max()
ax.plot(ticket_counts[:15] * 100,
label=project,
marker=".", color="C%d" % i,
linewidth=2)
ax.set_xlim(0, 20)
ax.legend()
ax.set_title("Normalized commit rates", fontweight="bold",
fontsize="large")
ax.set_xticks(np.arange(0, 21, 5))
ax.set_yticks([0, 50, 100])
[ax.axhline(i, color="0", alpha=0.3, linewidth=1, zorder=-1)
for i in (0, 50, 100)]
ax.set_ylim(-1, 105)
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(which='major', axis='y', linewidth=0.75, linestyle='-',
color='0.5')
ax.set_xticklabels(["%d" % i for i in np.arange(0, 20, 5)], fontsize="medium",
fontweight="bold", color="0.5")
ax.set_yticklabels(["%d%%" % i for i in (0, 50, 100)], fontsize="medium",
fontweight="bold", color="0.5")
ax.set_xlabel("Contributors", fontsize="medium", fontweight="bold",
color="0.5")
if outname is not None:
try:
os.makedirs(os.path.dirname(outname))
except OSError:
pass
fig.savefig(outname)
| 2.5 | 2 |
pyrocov/sketch.py | corneliusroemer/pyro-cov | 22 | 12772626 | <reponame>corneliusroemer/pyro-cov
# Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import logging
import re
from collections import Counter, namedtuple
import torch
logger = logging.getLogger(__name__)
MeanStd = namedtuple("MeanStd", ("mean", "std"))
def murmur64(h):
"""
A cheap simple hash function : int64 -> int64.
"""
if isinstance(h, torch.Tensor):
h ^= h >> 33
h *= -49064778989728563 # == 0xff51afd7ed558ccd
h ^= h >> 33
h *= -4265267296055464877 # == 0xc4ceb9fe1a85ec53
h ^= h >> 33
else:
h ^= h >> 33
h *= 0xFF51AFD7ED558CCD
h &= 0xFFFFFFFFFFFFFFFF
h ^= h >> 33
h *= 0xC4CEB9FE1A85EC53
h &= 0xFFFFFFFFFFFFFFFF
h ^= h >> 33
return h
def count_bits(bits):
"""
Returns a lookup table for the ``count_bits()`` function on binary numbers
``bits``.
"""
assert isinstance(bits, int) and bits > 0
result = torch.zeros((2,) * bits, dtype=torch.float)
for b in range(bits):
index = [slice(None)] * bits
index[b] = 1
result[tuple(index)] += 1
return result.reshape(-1)
class KmerCounter(Counter):
"""
Hard-coded to count 32-mers of DNA sequences.
"""
def __init__(self, *, backend="cpp"):
super().__init__()
self.backend = backend
self._pending = None
def update(self, iterable=None, **kwds):
if not isinstance(iterable, str):
super().update(iterable, **kwds)
elif self.backend == "python":
iterable = get_32mers(iterable).tolist()
super().update(iterable)
elif self.backend == "cpp":
if self._pending is None:
self._pending = _get_cpp_module().KmerCounter()
self._pending.update(iterable)
else:
raise NotImplementedError(f"Unsupported backend: {self.backend}")
def flush(self, truncate_below=None):
if self._pending is not None:
if truncate_below is not None:
self._pending.truncate_below(truncate_below)
self.update(self._pending.to_dict())
self._pending = None
class AMSSketcher:
"""
Clustering via AMS sketching of k-mer counts followed by LSH.
"""
def __init__(self, *, min_k=2, max_k=12, bits=16, backend="cpp"):
assert 1 <= min_k <= max_k
assert max_k * 2 <= 64, "max_k is too large"
assert bits <= 24, "too many bits for float storage"
self.min_k = min_k
self.max_k = max_k
self.bits = bits
self.backend = backend
def string_to_soft_hash(self, strings, out):
assert out.dim() == 1
assert out.dtype == torch.float
if isinstance(strings, str):
strings = re.findall("[ACGT]+", strings)
if self.backend == "python":
impl = string_to_soft_hash
elif self.backend == "cpp":
impl = _get_cpp_module().string_to_soft_hash
else:
raise NotImplementedError(f"Unsupported backend: {self.backend}")
out.fill_(0)
for string in strings:
impl(self.min_k, self.max_k, string, out)
def soft_to_hard_hashes(self, soft_hashes):
assert soft_hashes.dim() == 2
soft_hashes = soft_hashes[:, : self.bits]
signs = (soft_hashes > soft_hashes.median(0).values).float()
powers_of_two = 2 ** torch.arange(self.bits, dtype=torch.float)
hard_hashes = (signs @ powers_of_two).long()
return hard_hashes
def find_clusters(self, hard_hashes, *, radius=4):
assert hard_hashes.dim() == 1
assert radius >= 1
# Aggregate hash counts.
counts = torch.zeros(2 ** self.bits, dtype=torch.float)
ones = torch.ones(()).expand_as(hard_hashes)
counts.scatter_add_(-1, hard_hashes, ones)
# Add counts from single and double bit flips via inclusion-exclusion.
B = self.bits
B2 = B * (B - 1) / 2
counts = counts.reshape((2,) * B)
conv = counts.clone()
for b in range(B):
conv -= counts.sum(b, True) / B
for b2 in range(b):
conv += counts.sum([b, b2], True) / B2
counts = conv.reshape(-1)
# Greedily detect clusters, suppressing nearby maxima.
mask = count_bits(self.bits) <= radius
k = torch.arange(2 ** self.bits)
clusters = []
while counts.max() > 0:
c = counts.max(0).indices.item()
counts[mask[k ^ c]] = 0
clusters.append(c)
return torch.tensor(clusters)
def cdist(self, hard_hashes, clusters):
assert hard_hashes.dim() == 1
assert clusters.dim() == 1
return count_bits(self.bits)[hard_hashes[:, None] ^ clusters]
class ClockSketch:
def __init__(self, clocks, count):
assert clocks.shape[:-1] == count.shape
assert clocks.dtype == torch.int8
assert count.dtype == torch.int16
self.clocks = clocks
self.count = count
def __getitem__(self, i):
return ClockSketch(self.clocks[i], self.count[i])
def __setitem__(self, i, value):
self.clocks[i] = value.clocks
self.count[i] = value.count
def __len__(self):
return len(self.count)
@property
def shape(self):
return self.count.shape
def clone(self):
return ClockSketch(self.clocks.clone(), self.count.clone())
class ClockSketcher:
"""
Sketches each bag of k-mers as bank of 8-bit clocks plus a single total
k-mer counter.
"""
def __init__(self, k, *, num_clocks=256, backend="cpp"):
assert num_clocks > 0 and num_clocks % 64 == 0
self.k = k
self.num_clocks = num_clocks
self.backend = backend
def init_sketch(self, *shape):
clocks = torch.zeros(shape + (self.num_clocks,), dtype=torch.int8)
count = torch.zeros(shape, dtype=torch.int16)
return ClockSketch(clocks, count)
def string_to_hash(self, strings, sketch):
assert sketch.shape == ()
if isinstance(strings, str):
strings = re.findall("[ACGT]+", strings)
if self.backend == "python":
impl = string_to_clock_hash
elif self.backend == "cpp":
impl = _get_cpp_module().string_to_clock_hash
else:
raise NotImplementedError(f"Unsupported backend: {self.backend}")
for string in strings:
impl(self.k, string, sketch.clocks, sketch.count)
sketch.clocks.mul_(2).sub_(sketch.count)
def cdiff(self, x, y):
clocks = x.clocks.unsqueeze(-2) - y.clocks.unsqueeze(-3)
count = x.count.unsqueeze(-1) - y.count.unsqueeze(-2)
return ClockSketch(clocks, count)
def estimate_set_difference(self, x, y):
r"""
Estimates the multiset difference ``|x\y|``.
Returns the mean and standard deviation of the estimate.
"""
clocks = x.clocks.unsqueeze(-2) - y.clocks.unsqueeze(-3)
count = x.count.unsqueeze(-1) - y.count.unsqueeze(-2)
# Use a moment-matching estimator with Gaussian approximation.
V_hx_minus_hy = clocks.float().square_().mean(-1)
E_x_minus_y = count.float().add_(V_hx_minus_hy).clamp_(min=0).mul_(0.5)
std_x_minus_y = V_hx_minus_hy.mul_((0.5 / self.num_clocks) ** 0.5)
# Give up in case of numerical overflow.
overflow = (count.abs_() > 85).any(-1)
std_x_minus_y[overflow] = float(x.count.max())
return MeanStd(E_x_minus_y, std_x_minus_y)
def set_difference(self, x, y):
r"""
Estimates the multiset difference ``|x\y|``.
"""
# An optimized version of estimate_set_difference().
clocks = x.clocks.unsqueeze(-2) - y.clocks.unsqueeze(-3)
count = x.count.unsqueeze(-1) - y.count.unsqueeze(-2)
V_hx_minus_hy = clocks.float().square_().mean(-1)
E_x_minus_y = V_hx_minus_hy.add_(count).clamp_(min=0).mul_(0.5)
return E_x_minus_y
_cpp_module = None
def _get_cpp_module():
"""
JIT compiles the cpp module.
"""
global _cpp_module
if _cpp_module is None:
from torch.utils.cpp_extension import load
assert __file__.endswith(".py")
path = __file__[:-3] + ".cpp"
_cpp_module = load(
name="cpp_sketch", sources=[path], extra_cflags=["-O2"], verbose=False
)
return _cpp_module
def get_32mers(seq):
to_bits = {"A": 0, "C": 1, "G": 2, "T": 3}
if len(seq) < 32:
return torch.empty([0], dtype=torch.long)
seq = list(map(to_bits.__getitem__, seq))
seq = torch.tensor(seq, dtype=torch.long)
powers = torch.arange(62, -1, -2, dtype=torch.long)
seq = (seq.unfold(0, 32, 1) << powers).sum(-1)
return seq
def string_to_soft_hash(min_k, max_k, seq, out):
to_bits = {"A": 0, "C": 1, "G": 2, "T": 3}
bits = out.size(-1)
for k in range(min_k, max_k + 1):
salt = murmur64(1 + k)
for pos in range(len(seq) - k + 1):
hash_ = salt
for i in range(k):
hash_ ^= to_bits[seq[pos + i]] << (i + i)
hash_ = murmur64(hash_)
for b in range(bits):
out[b] += 1 if (hash_ & (1 << b)) else -1
def string_to_clock_hash(k, seq, clocks, count):
to_bits = {"A": 0, "C": 1, "G": 2, "T": 3}
num_kmers = len(seq) - k + 1
count.add_(num_kmers)
for pos in range(num_kmers):
hash_ = 0
for i in range(k):
hash_ ^= to_bits[seq[pos + i]] << (i + i)
for w in range(len(clocks) // 64):
hash_w = murmur64(murmur64(1 + w) ^ hash_)
for b in range(64):
wb = w * 64 + b
b_ = (b // 8) + 8 * (b % 8) # for vectorized math in C++
clocks[wb] += (hash_w >> b_) & 1
clocks[wb] &= 0x7F
| 2.265625 | 2 |
fate/repeat.py | Mattias1/fate | 0 | 12772627 | <gh_stars>0
"""This module provides a repeat mechanism for user input."""
from .document import Document
from . import commands
from functools import wraps
class RepeatData:
"""A container object for all data we need."""
def __init__(self):
self.last_user_input = []
self.last_command = None
self.current_user_input = []
self.current_command = None
self.recording_level = 0
def init(document):
document.ui.OnUserInput.add(record_input)
document.ui.repeat_data = RepeatData()
Document.OnDocumentInit.add(init)
def record_input(ui, key):
data = ui.repeat_data
if data.recording_level > 0:
data.current_user_input.append(key)
def start_recording(document):
data = document.ui.repeat_data
data.recording_level += 1
def stop_recording(document):
data = document.ui.repeat_data
data.recording_level -= 1
if data.recording_level < 0:
raise Exception('input_recording_level must not be < 0')
if data.recording_level == 0:
data.last_user_input = data.current_user_input
data.last_command = data.current_command
data.current_user_input = []
data.current_command = None
def repeat(document):
data = document.ui.repeat_data
if data.last_command:
document.ui.feedinput(data.last_user_input)
data.last_command(document)
commands.repeat = repeat
def repeatable(command):
"""Action decorator which stores command in last_command field in document."""
@wraps(command)
def wrapper(document):
document.ui.repeat_data.current_command = command
start_recording(document)
result = command(document)
stop_recording(document)
return result
return wrapper
| 3.40625 | 3 |
relativism.py | pau4o/quantumnik | 16 | 12772628 | # -*- coding: utf-8 -*-
import os
import sys
from xml.dom import minidom
from os.path import abspath, dirname, normcase, normpath, splitdrive
# http://code.activestate.com/recipes/302594/
def commonpath(a, b):
"""Returns the longest common to 'paths' path.
Unlike the strange os.path.commonprefix:
- this returns valid path
- accepts only two arguments
"""
a = normpath(normcase(a))
b = normpath(normcase(b))
if a == b:
return a
while len(a) > 0:
if a == b:
return a
if len(a) > len(b):
a = dirname(a)
else:
b = dirname(b)
return None
def relpath(target, base_path=os.curdir):
"""
Return a relative path to the target from either the current directory
or an optional base directory.
Base can be a directory specified either as absolute or relative
to current directory.
"""
base_path = normcase(abspath(normpath(base_path)))
target = normcase(abspath(normpath(target)))
if base_path == target:
return '.'
# On the windows platform the target may be on a different drive.
if splitdrive(base_path)[0] != splitdrive(target)[0]:
return None
common_path_len = len(commonpath(base_path, target))
# If there's no common prefix decrease common_path_len should be less by 1
base_drv, base_dir = splitdrive(base_path)
if common_path_len == len(base_drv) + 1:
common_path_len -= 1
# if base_path is root directory - no directories up
if base_dir == os.sep:
dirs_up = 0
else:
dirs_up = base_path[common_path_len:].count(os.sep)
ret = os.sep.join([os.pardir] * dirs_up)
if len(target) > common_path_len:
ret = os.path.join(ret, target[common_path_len + 1:])
return ret
def fix_file_params(name,element,base_path):
params = element.getElementsByTagName(name)
if params:
for param in params:
attr = param.getAttribute('name')
if attr == 'file':
path = param.firstChild.nodeValue
param.firstChild.nodeValue = relpath(path,base_path)
def fix_sym_file(element,base_path):
path = element.getAttribute('file')
if path:
element.setAttribute('file',relpath(path,base_path))
def fix_paths(mapfile,base_path):
doc = minidom.parse(mapfile)
datasources = doc.getElementsByTagName("Datasource")
for ds in datasources:
fix_file_params('Parameter',ds,base_path)
pnt_syms = doc.getElementsByTagName("PointSymbolizer")
for pnt in pnt_syms:
fix_sym_file(pnt,base_path)
open(mapfile,'wb').write(doc.toxml())
#def move_files(mapfile,base_path): pass
| 2.84375 | 3 |
byceps/services/metrics/service.py | homeworkprod/byceps | 23 | 12772629 | <gh_stars>10-100
"""
byceps.metrics.service
~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from typing import Iterator
from ...services.brand import service as brand_service
from ...services.board import (
board_service,
topic_query_service as board_topic_query_service,
posting_query_service as board_posting_query_service,
)
from ...services.consent import consent_service
from ...services.metrics.models import Label, Metric
from ...services.party.transfer.models import Party
from ...services.party import service as party_service
from ...services.seating import seat_service
from ...services.shop.order import service as order_service
from ...services.shop.article import service as shop_article_service
from ...services.shop.shop import service as shop_service
from ...services.shop.shop.transfer.models import Shop, ShopID
from ...services.ticketing import ticket_service
from ...services.user import stats_service as user_stats_service
from ...typing import BrandID, PartyID
def serialize(metrics: Iterator[Metric]) -> Iterator[str]:
"""Serialize metric objects to text lines."""
for metric in metrics:
yield metric.serialize() + '\n'
def collect_metrics() -> Iterator[Metric]:
brand_ids = [brand.id for brand in brand_service.get_all_brands()]
active_parties = party_service.get_active_parties()
active_party_ids = [p.id for p in active_parties]
active_shops = shop_service.get_active_shops()
active_shop_ids = {shop.id for shop in active_shops}
yield from _collect_board_metrics(brand_ids)
yield from _collect_consent_metrics()
yield from _collect_shop_ordered_article_metrics(active_shop_ids)
yield from _collect_shop_order_metrics(active_shops)
yield from _collect_seating_metrics(active_party_ids)
yield from _collect_ticket_metrics(active_parties)
yield from _collect_user_metrics()
def _collect_board_metrics(brand_ids: list[BrandID]) -> Iterator[Metric]:
for brand_id in brand_ids:
boards = board_service.get_boards_for_brand(brand_id)
board_ids = [board.id for board in boards]
for board_id in board_ids:
labels = [Label('board', board_id)]
topic_count = board_topic_query_service.count_topics_for_board(
board_id
)
yield Metric('board_topic_count', topic_count, labels=labels)
posting_count = (
board_posting_query_service.count_postings_for_board(board_id)
)
yield Metric('board_posting_count', posting_count, labels=labels)
def _collect_consent_metrics() -> Iterator[Metric]:
consents_per_subject = consent_service.count_consents_by_subject()
for subject_name, consent_count in consents_per_subject.items():
yield Metric(
'consent_count',
consent_count,
labels=[Label('subject_name', subject_name)],
)
def _collect_shop_ordered_article_metrics(
shop_ids: set[ShopID],
) -> Iterator[Metric]:
"""Provide ordered article quantities for shops."""
stats = shop_article_service.sum_ordered_articles_by_payment_state(shop_ids)
for shop_id, article_number, description, payment_state, quantity in stats:
yield Metric(
'shop_ordered_article_quantity',
quantity,
labels=[
Label('shop', shop_id),
Label('article_number', article_number),
Label('description', description),
Label('payment_state', payment_state.name),
],
)
def _collect_shop_order_metrics(shops: list[Shop]) -> Iterator[Metric]:
"""Provide order counts grouped by payment state for shops."""
for shop in shops:
order_counts_per_payment_state = (
order_service.count_orders_per_payment_state(shop.id)
)
for payment_state, quantity in order_counts_per_payment_state.items():
yield Metric(
'shop_order_quantity',
quantity,
labels=[
Label('shop', shop.id),
Label('payment_state', payment_state.name),
],
)
def _collect_seating_metrics(
active_party_ids: list[PartyID],
) -> Iterator[Metric]:
"""Provide seat occupation counts per party and category."""
for party_id in active_party_ids:
occupied_seat_counts_by_category = (
seat_service.count_occupied_seats_by_category(party_id)
)
for category, count in occupied_seat_counts_by_category:
yield Metric(
'occupied_seat_count',
count,
labels=[
Label('party', party_id),
Label('category_title', category.title),
],
)
def _collect_ticket_metrics(active_parties: list[Party]) -> Iterator[Metric]:
"""Provide ticket counts for active parties."""
for party in active_parties:
party_id = party.id
labels = [Label('party', party_id)]
max_ticket_quantity = party.max_ticket_quantity
if max_ticket_quantity is not None:
yield Metric('tickets_max', max_ticket_quantity, labels=labels)
tickets_revoked_count = ticket_service.count_revoked_tickets_for_party(
party_id
)
yield Metric(
'tickets_revoked_count', tickets_revoked_count, labels=labels
)
tickets_sold_count = ticket_service.count_sold_tickets_for_party(
party_id
)
yield Metric('tickets_sold_count', tickets_sold_count, labels=labels)
tickets_checked_in_count = (
ticket_service.count_tickets_checked_in_for_party(party_id)
)
yield Metric(
'tickets_checked_in_count', tickets_checked_in_count, labels=labels
)
def _collect_user_metrics() -> Iterator[Metric]:
users_active = user_stats_service.count_active_users()
users_uninitialized = user_stats_service.count_uninitialized_users()
users_suspended = user_stats_service.count_suspended_users()
users_deleted = user_stats_service.count_deleted_users()
users_total = user_stats_service.count_users()
yield Metric('users_active_count', users_active)
yield Metric('users_uninitialized_count', users_uninitialized)
yield Metric('users_suspended_count', users_suspended)
yield Metric('users_deleted_count', users_deleted)
yield Metric('users_total_count', users_total)
| 1.671875 | 2 |
telegram_bot/config.py | YulLeo/meme-and-gifs-generator-telegram-bot | 0 | 12772630 | import os
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
API_TOKEN = os.getenv("TELEGRAM_API_TOKEN")
SECRET_KEY = os.getenv("SECRET_KEY")
ACCESS_KEY = os.getenv("ACCESS_KEY")
OBJECT_STORAGE_IP = os.getenv("IN_DOCKER", "127.0.0.1")
OBJECT_STORAGE_PORT = os.getenv("OBJECT_STORAGE_PORT", "9000")
REPOSITORY_ROOT = Path(__file__).parent.parent
PNG = "PNG"
GIF = "GIF"
GIF_FILE_NAME = "water_gif.gif"
PNG_IMAGE = "image_with_text.png"
ZIP_FILE_NAME = "gifs.zip"
RGBA = "RGBA"
FILL_COLOR = "white"
STROKE_COLOR = "black"
DURATION = 350
STROKE_WIDTH = 4
SIZE = 20
HEIGHT_PROPORTION = 0.9
WIDTH_PROPORTION = 0.7
REGULAR_TTF = REPOSITORY_ROOT / "telegram_bot" / "fonts" / "AbyssinicaSIL-Regular.ttf" # noqa: E501
USER_ID = "X-Amz-Meta-User_id"
PRIVATE = "X-Amz-Meta-Private"
| 1.789063 | 2 |
packages/core/src/RPA/core/webdriver.py | FormulatedAutomation/rpaframework | 2 | 12772631 | <filename>packages/core/src/RPA/core/webdriver.py<gh_stars>1-10
import logging
import os
import platform
import re
import stat
import subprocess
import tempfile
from pathlib import Path
from typing import Any, List
from selenium import webdriver
from webdrivermanager import AVAILABLE_DRIVERS
LOGGER = logging.getLogger(__name__)
DRIVER_DIR = Path(tempfile.gettempdir()) / "drivers"
DRIVER_PREFERENCE = {
"Windows": ["Chrome", "Firefox", "Edge", "IE", "Opera"],
"Linux": ["Chrome", "Firefox", "Opera"],
"Darwin": ["Chrome", "Safari", "Firefox", "Opera"],
"default": ["Chrome", "Firefox"],
}
CHROME_VERSION_PATTERN = r"(\d+\.\d+.\d+)(\.\d+)"
CHROME_VERSION_COMMANDS = {
"Windows": [
[
"reg",
"query",
r"HKEY_CURRENT_USER\Software\Google\Chrome\BLBeacon",
"/v",
"version",
],
[
"reg",
"query",
r"HKEY_CURRENT_USER\Software\Chromium\BLBeacon",
"/v",
"version",
],
],
"Linux": [
["chromium", "--version"],
["chromium-browser", "--version"],
["google-chrome", "--version"],
],
"Darwin": [
["/Applications/Google Chrome.app/Contents/MacOS/Google Chrome", "--version"],
["/Applications/Chromium.app/Contents/MacOS/Chromium", "--version"],
],
}
def executable(browser: str, download: bool = False) -> str:
"""Get path to webdriver executable, and download it if requested.
:param browser: name of browser to get webdriver for
:param download: download driver binaries if they don't exist
"""
LOGGER.debug(
"Webdriver initialization for '%s' (download: %s)",
browser,
download,
)
browser = browser.lower().strip()
factory = AVAILABLE_DRIVERS.get(browser)
if not factory:
return None
driver_path = _driver_path(factory, download)
if driver_path.exists() and not download:
LOGGER.debug("Attempting to use existing driver: %s", driver_path)
return str(driver_path)
elif driver_path.exists() and download:
# TODO: Implement version check for all browsers
if browser == "chrome":
chrome_version = _chrome_version()
driver_version = _chromedriver_version(driver_path)
if chrome_version != driver_version:
_download_driver(factory)
else:
LOGGER.debug(
"Driver download skipped, because it exists at '%s'", driver_path
)
return str(driver_path)
elif not driver_path.exists() and download:
_download_driver(factory)
return str(driver_path)
else:
LOGGER.debug("Attempting to use driver from PATH")
return None
def start(name: str, **options):
"""Start a Selenium webdriver."""
name = name.strip()
try:
factory = getattr(webdriver, name)
except AttributeError as e:
raise RuntimeError(f"'{name}' is not a valid driver name") from e
driver = factory(**options)
return driver
def _driver_path(factory: Any, download: bool) -> Any:
if platform.system() != "Windows":
manager = factory(link_path="/usr/bin")
else:
manager = factory()
filename = manager.get_driver_filename()
temp_path = Path(DRIVER_DIR) / filename
link_path = Path(manager.link_path) / filename
if temp_path.exists() or download:
return temp_path
else:
return link_path
def _chrome_version() -> str:
system = platform.system()
commands = CHROME_VERSION_COMMANDS.get(system)
if not commands:
LOGGER.warning("Unsupported system: %s", system)
return None
for cmd in commands:
output = _run_command(cmd)
if not output:
continue
version = re.search(CHROME_VERSION_PATTERN, output)
if not version:
continue
return version.group(1)
return None
def _chromedriver_version(path: Path) -> str:
output = _run_command([str(path), "--version"])
if not output:
return None
version = re.search(CHROME_VERSION_PATTERN, output)
if not version:
return None
return version.group(1)
def _download_driver(factory: Any, version: str = None) -> None:
path = str(DRIVER_DIR)
manager = factory(download_root=path, link_path=path)
try:
if version:
bin_path, _ = manager.download_and_install(version, show_progress_bar=False)
else:
bin_path, _ = manager.download_and_install(show_progress_bar=False)
if platform.system() == "Darwin" and bin_path:
# TODO: Required for linux also?
_set_executable_permissions(bin_path)
LOGGER.debug(
"%s downloaded to %s",
manager.get_driver_filename(),
bin_path,
)
except RuntimeError:
pass
def _set_executable_permissions(path: str) -> None:
st = os.stat(path)
os.chmod(
path,
st.st_mode | stat.S_IXOTH | stat.S_IXGRP | stat.S_IEXEC,
)
def _run_command(args: List[str]) -> str:
try:
output = subprocess.check_output(args)
return output.decode().strip()
except (FileNotFoundError, subprocess.CalledProcessError) as err:
LOGGER.debug("Command failed: %s", err)
return None
| 2.34375 | 2 |
test/tests/chirc/tests/test_whois.py | Jusot/irc | 2 | 12772632 | import pytest
from chirc import replies
import chirc.tests.fixtures as fixtures
class TestWHOIS(object):
@pytest.mark.category("WHOIS")
def test_whois1(self, irc_session):
"""
Test doing a WHOIS on a user (user2) that is not in any channels.
"""
client1 = irc_session.connect_user("user1", "User One")
client2 = irc_session.connect_user("user2", "User Two")
client1.send_cmd("WHOIS user2")
reply = irc_session.get_reply(client1, expect_code = replies.RPL_WHOISUSER,
expect_nparams = 5, long_param_re = "User Two")
reply = irc_session.get_reply(client1, expect_code = replies.RPL_WHOISSERVER,
expect_nparams = 3)
reply = irc_session.get_reply(client1, expect_code = replies.RPL_ENDOFWHOIS,
expect_nparams = 2, long_param_re = "End of WHOIS list")
@pytest.mark.category("WHOIS")
def test_whois_nonick(self, irc_session):
"""
Test doing a WHOIS on a user (user2) that does not exist in the server.
"""
client1 = irc_session.connect_user("user1", "User One")
client1.send_cmd("WHOIS user2")
reply = irc_session.get_reply(client1, expect_code = replies.ERR_NOSUCHNICK, expect_nick = "user1",
expect_nparams = 2, expect_short_params = ["user2"],
long_param_re = "No such nick/channel")
@pytest.mark.category("WHOIS")
def test_whois_params(self, irc_session):
"""
Test sending a WHOIS without parameters..
"""
client1 = irc_session.connect_user("user1", "User One")
client1.send_cmd("WHOIS")
irc_session.get_reply(client1, expect_timeout = True)
def _test_userchannels(self, irc_session, channels, nick, channelstring):
whois_channels = channelstring.strip().split()
for qchannel in whois_channels:
if qchannel[0] in ('@', '+'):
modchar = qchannel[0]
channel = qchannel[1:]
else:
modchar = ""
channel = qchannel
assert channel in channels, "RPL_WHOISCHANNELS: Includes unexpected channel {}".format(channel)
users = channels[channel]
assert modchar + nick in users, "RPL_WHOISCHANNELS: Expected {} to be in {} (for channels '{}')".format(modchar + nick, channel, channelstring)
if channelstring[-1] != " ":
pytest.fail("You may want to *very carefully* reread the specification for RPL_WHOISCHANNELS...")
@pytest.mark.category("UPDATE_ASSIGNMENT2")
def test_whois2(self, irc_session):
"""
Given the following users and channels (@ denotes channel
operators, and + denotes a user with voice privileges):
#test1: @user1, user2, user3
#test2: @user2
#test3: @user3, @user4, user5, user6
#test4: @user7, +user8, +user9, user1, user2
#test5: @user1, @user5
Not in a channel: user10, user11
Test doing a WHOIS on user2
"""
users = irc_session.connect_and_join_channels(fixtures.channels3)
users["user1"].send_cmd("WHOIS user2")
reply = irc_session.get_reply(users["user1"], expect_code = replies.RPL_WHOISUSER,
expect_nparams = 5, long_param_re = "user2")
reply = irc_session.get_reply(users["user1"], expect_code = replies.RPL_WHOISCHANNELS,
expect_nparams = 2)
self._test_userchannels(irc_session, fixtures.channels3, "user2", reply.params[2][1:])
reply = irc_session.get_reply(users["user1"], expect_code = replies.RPL_WHOISSERVER,
expect_nparams = 3)
reply = irc_session.get_reply(users["user1"], expect_code = replies.RPL_ENDOFWHOIS,
expect_nparams = 2, long_param_re = "End of WHOIS list")
@pytest.mark.category("UPDATE_ASSIGNMENT2")
def test_whois3(self, irc_session):
"""
Given the following users and channels (@ denotes channel
operators, and + denotes a user with voice privileges):
#test1: @user1, user2, user3
#test2: @user2
#test3: @user3, @user4, user5, user6
#test4: @user7, +user8, +user9, user1, user2
#test5: @user1, @user5
Not in a channel: user10, user11
Where, additionally, user8 is an IRCop and is away.
Test doing a WHOIS on user8
"""
users = irc_session.connect_and_join_channels(fixtures.channels3, aways=["user8"], ircops=["user8"])
users["user1"].send_cmd("WHOIS user8")
reply = irc_session.get_reply(users["user1"], expect_code = replies.RPL_WHOISUSER,
expect_nparams = 5, long_param_re = "user8")
reply = irc_session.get_reply(users["user1"], expect_code = replies.RPL_WHOISCHANNELS,
expect_nparams = 2)
self._test_userchannels(irc_session, fixtures.channels3, "user8", reply.params[2][1:])
reply = irc_session.get_reply(users["user1"], expect_code = replies.RPL_WHOISSERVER,
expect_nparams = 3)
reply = irc_session.get_reply(users["user1"], expect_code = replies.RPL_AWAY, expect_nick = "user1",
expect_nparams = 2, expect_short_params = ["user8"],
long_param_re = "I'm away")
reply = irc_session.get_reply(users["user1"], expect_code = replies.RPL_WHOISOPERATOR,
expect_nparams = 2, expect_short_params = ["user8"],
long_param_re = "is an IRC operator")
reply = irc_session.get_reply(users["user1"], expect_code = replies.RPL_ENDOFWHOIS,
expect_nparams = 2, long_param_re = "End of WHOIS list")
| 2.140625 | 2 |
test/test_dtypes.py | fjarri/grunnur | 1 | 12772633 | import numpy
import pytest
from grunnur import dtypes
from grunnur.modules import render_with_modules
def test_normalize_type():
dtype = dtypes.normalize_type(numpy.int32)
assert dtype == numpy.int32
assert type(dtype) == numpy.dtype
def test_ctype_builtin():
assert dtypes.ctype(numpy.int32) == 'int'
def test_is_complex():
assert dtypes.is_complex(numpy.complex64)
assert dtypes.is_complex(numpy.complex128)
assert not dtypes.is_complex(numpy.float64)
def test_is_double():
assert dtypes.is_double(numpy.float64)
assert dtypes.is_double(numpy.complex128)
assert not dtypes.is_double(numpy.complex64)
def test_is_integer():
assert dtypes.is_integer(numpy.int32)
assert not dtypes.is_integer(numpy.float32)
def test_is_real():
assert dtypes.is_real(numpy.float32)
assert not dtypes.is_real(numpy.complex64)
assert not dtypes.is_real(numpy.int32)
def test_promote_type():
assert dtypes._promote_type(numpy.int8) == numpy.int32
assert dtypes._promote_type(numpy.uint8) == numpy.uint32
assert dtypes._promote_type(numpy.float16) == numpy.float32
assert dtypes._promote_type(numpy.int32) == numpy.int32
def test_result_type():
assert dtypes.result_type(numpy.int32, numpy.float32) == numpy.float64
def test_min_scalar_type():
assert dtypes.min_scalar_type(1) == numpy.uint32
assert dtypes.min_scalar_type(-1) == numpy.int32
assert dtypes.min_scalar_type(1.) == numpy.float32
assert dtypes.min_scalar_type(2**31-1, force_signed=True) == numpy.int32
# 2**31 will not fit into int32 type
assert dtypes.min_scalar_type(2**31, force_signed=True) == numpy.int64
def test_detect_type():
assert dtypes.detect_type(numpy.int8(-1)) == numpy.int32
assert dtypes.detect_type(numpy.int64(-1)) == numpy.int64
assert dtypes.detect_type(-1) == numpy.int32
assert dtypes.detect_type(-1.) == numpy.float32
def test_complex_for():
assert dtypes.complex_for(numpy.float32) == numpy.complex64
assert dtypes.complex_for(numpy.float64) == numpy.complex128
with pytest.raises(ValueError):
assert dtypes.complex_for(numpy.complex64)
with pytest.raises(ValueError):
assert dtypes.complex_for(numpy.int32)
def test_real_for():
assert dtypes.real_for(numpy.complex64) == numpy.float32
assert dtypes.real_for(numpy.complex128) == numpy.float64
with pytest.raises(ValueError):
assert dtypes.real_for(numpy.float32)
with pytest.raises(ValueError):
assert dtypes.real_for(numpy.int32)
def test_complex_ctr():
assert dtypes.complex_ctr(numpy.complex64) == "COMPLEX_CTR(float2)"
def test_cast():
cast = dtypes.cast(numpy.uint64)
for val in [cast(1), cast(numpy.int32(1)), cast(numpy.uint64(1))]:
assert val.dtype == numpy.uint64 and val == 1
def test_c_constant():
# scalar values
assert dtypes.c_constant(1) == "1"
assert dtypes.c_constant(numpy.uint64(1)) == "1UL"
assert dtypes.c_constant(numpy.int64(-1)) == "-1L"
assert dtypes.c_constant(numpy.float64(1.)) == "1.0"
assert dtypes.c_constant(numpy.float32(1.)) == "1.0f"
assert dtypes.c_constant(numpy.complex64(1 + 2j)) == "COMPLEX_CTR(float2)(1.0f, 2.0f)"
assert dtypes.c_constant(numpy.complex128(1 + 2j)) == "COMPLEX_CTR(double2)(1.0, 2.0)"
# array
assert dtypes.c_constant(numpy.array([1, 2, 3], numpy.float32)) == "{1.0f, 2.0f, 3.0f}"
# struct type
dtype = numpy.dtype([('val1', numpy.int32), ('val2', numpy.float32)])
val = numpy.empty((), dtype)
val['val1'] = 1
val['val2'] = 2
assert dtypes.c_constant(val) == "{1, 2.0f}"
# custom dtype
assert dtypes.c_constant(1, numpy.float32) == "1.0f"
def test__align_simple():
dtype = numpy.dtype('int32')
res = dtypes._align(dtype)
ref = dtypes.WrappedType(dtype, dtype.itemsize)
assert res == ref
def test__align_array():
dtype = numpy.dtype('int32')
dtype_arr = numpy.dtype((dtype, 3))
res = dtypes._align(dtype_arr)
ref = dtypes.WrappedType(dtype_arr, dtype.itemsize)
assert res == ref
def test__align_non_aligned_struct():
dtype = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32]))
res = dtypes._align(dtype)
dtype_aligned = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 2, 4],
itemsize=8,
aligned=True))
wt_x = dtypes.WrappedType(numpy.dtype('int8'), 1)
wt_y = dtypes.WrappedType(numpy.dtype('int16'), 2)
wt_z = dtypes.WrappedType(numpy.dtype('int32'), 4)
ref = dtypes.WrappedType(
dtype_aligned, 4, explicit_alignment=None, wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),
field_alignments=dict(x=None, y=None, z=None))
assert res == ref
def test__align_aligned_struct():
dtype_aligned = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 2, 4],
itemsize=8,
aligned=True))
res = dtypes._align(dtype_aligned)
wt_x = dtypes.WrappedType(numpy.dtype('int8'), 1)
wt_y = dtypes.WrappedType(numpy.dtype('int16'), 2)
wt_z = dtypes.WrappedType(numpy.dtype('int32'), 4)
ref = dtypes.WrappedType(
dtype_aligned, 4, explicit_alignment=None, wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),
field_alignments=dict(x=None, y=None, z=None))
assert res == ref
def test__align_aligned_struct_custom_itemsize():
dtype_aligned = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 2, 4],
itemsize=16,
aligned=True))
res = dtypes._align(dtype_aligned)
wt_x = dtypes.WrappedType(numpy.dtype('int8'), 1)
wt_y = dtypes.WrappedType(numpy.dtype('int16'), 2)
wt_z = dtypes.WrappedType(numpy.dtype('int32'), 4)
ref = dtypes.WrappedType(
dtype_aligned, 16, explicit_alignment=16, wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),
field_alignments=dict(x=None, y=None, z=None))
assert res == ref
def test__align_custom_field_offsets():
dtype = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 4, 16],
itemsize=32))
dtype_aligned = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 4, 16],
itemsize=32,
aligned=True))
res = dtypes._align(dtype_aligned)
wt_x = dtypes.WrappedType(numpy.dtype('int8'), 1)
wt_y = dtypes.WrappedType(numpy.dtype('int16'), 2)
wt_z = dtypes.WrappedType(numpy.dtype('int32'), 4)
ref = dtypes.WrappedType(
dtype_aligned, 16, explicit_alignment=None, wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),
field_alignments=dict(x=None, y=4, z=16))
assert res == ref
def test__align_aligned_struct_invalid_itemsize():
dtype_aligned = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 2, 4],
itemsize=20, # not a power of 2, an error should be raised
aligned=True))
with pytest.raises(ValueError):
dtypes._align(dtype_aligned)
def test_align_nested():
dtype_nested = numpy.dtype(dict(
names=['val1', 'pad'],
formats=[numpy.int8, numpy.int8]))
dtype = numpy.dtype(dict(
names=['pad', 'struct_arr', 'regular_arr'],
formats=[numpy.int32, numpy.dtype((dtype_nested, 2)), numpy.dtype((numpy.int16, 3))]))
dtype_ref = numpy.dtype(dict(
names=['pad','struct_arr','regular_arr'],
formats=[numpy.int32, (dtype_nested, (2,)), (numpy.int16, (3,))],
offsets=[0,4,8],
itemsize=16))
dtype_aligned = dtypes.align(dtype)
assert dtype_aligned.isalignedstruct
assert dtype_aligned == dtype_ref
def test_align_preserve_nested_aligned():
dtype_int3 = numpy.dtype(dict(names=['x'], formats=[(numpy.int32, 3)], itemsize=16, aligned=True))
dtype = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int32, dtype_int3, numpy.int32]))
dtype_ref = numpy.dtype(dict(
names=['x','y','z'],
formats=[numpy.int32, dtype_int3, numpy.int32],
offsets=[0,16,32],
itemsize=48,
aligned=True))
dtype_aligned = dtypes.align(dtype)
assert dtype_aligned.isalignedstruct
assert dtype_aligned == dtype_ref
def test_lcm():
assert dtypes._lcm(10) == 10
assert dtypes._lcm(15, 20) == 60
assert dtypes._lcm(16, 32, 24) == 96
def test_find_minimum_alignment():
# simple case: base alignment is enough because 12 is the next multiple of 4 after 9
assert dtypes._find_minimum_alignment(12, 4, 9) == 4
# the next multiple of 4 is 12, but we want offset 16 - this means we need to set
# the alignment equal to 8, because 16 is the next multiple of 8 after 9.
assert dtypes._find_minimum_alignment(16, 4, 9) == 8
# incorrect offset (not a multiple of the base alignment)
with pytest.raises(ValueError):
dtypes._find_minimum_alignment(13, 4, 9)
# offset too large and not a power of 2 - cannot achieve that with alignment only,
# will need explicit padding
with pytest.raises(ValueError):
dtypes._find_minimum_alignment(24, 4, 9)
def test_wrapped_type_repr():
dtype_aligned = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 4, 16],
itemsize=32,
aligned=True))
wt_x = dtypes.WrappedType(numpy.dtype('int8'), 1)
wt_y = dtypes.WrappedType(numpy.dtype('int16'), 2)
wt_z = dtypes.WrappedType(numpy.dtype('int32'), 4)
wt = dtypes.WrappedType(
dtype_aligned, 16, explicit_alignment=None, wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),
field_alignments=dict(x=None, y=4, z=16))
assert eval(
repr(wt),
dict(
numpy=numpy, WrappedType=dtypes.WrappedType,
int8=numpy.int8, int16=numpy.int16, int32=numpy.int32)) == wt
def test_ctype_struct():
dtype = dtypes.align(numpy.dtype([('val1', numpy.int32), ('val2', numpy.float32)]))
ctype = dtypes.ctype(dtype)
src = render_with_modules("${ctype}", render_globals=dict(ctype=ctype)).strip()
assert src == (
'typedef struct _mod__module_0__ {\n'
' int val1;\n'
' float val2;\n'
'} _mod__module_0_;\n\n\n'
'_mod__module_0_')
def test_ctype_struct_nested():
dtype_nested = numpy.dtype(dict(
names=['val1', 'pad'],
formats=[numpy.int8, numpy.int8]))
dtype = numpy.dtype(dict(
names=['pad', 'struct_arr', 'regular_arr'],
formats=[numpy.int32, numpy.dtype((dtype_nested, 2)), numpy.dtype((numpy.int16, 3))]))
dtype = dtypes.align(dtype)
ctype = dtypes.ctype(dtype)
src = render_with_modules("${ctype}", render_globals=dict(ctype=ctype)).strip()
assert src == (
'typedef struct _mod__module_1__ {\n'
' char val1;\n'
' char pad;\n'
'} _mod__module_1_;\n\n\n'
'typedef struct _mod__module_0__ {\n'
' int pad;\n'
' _mod__module_1_ struct_arr[2];\n'
' short regular_arr[3];\n'
'} _mod__module_0_;\n\n\n'
'_mod__module_0_')
def test_ctype_to_ctype_struct():
# Checks that ctype() on an unknown type calls ctype_struct()
dtype = dtypes.align(numpy.dtype([('val1', numpy.int32), ('val2', numpy.float32)]))
ctype = dtypes.ctype(dtype)
src = render_with_modules("${ctype}", render_globals=dict(ctype=ctype)).strip()
assert src == (
'typedef struct _mod__module_0__ {\n'
' int val1;\n'
' float val2;\n'
'} _mod__module_0_;\n\n\n'
'_mod__module_0_')
def test_ctype_struct():
dtype = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 4, 16],
itemsize=64,
aligned=True))
ctype = dtypes.ctype_struct(dtype)
src = render_with_modules("${ctype}", render_globals=dict(ctype=ctype)).strip()
assert src == (
'typedef struct _mod__module_0__ {\n'
' char x;\n'
' short ALIGN(4) y;\n'
' int ALIGN(16) z;\n'
'} ALIGN(64) _mod__module_0_;\n\n\n'
'_mod__module_0_')
def test_ctype_struct_ignore_alignment():
dtype = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 4, 16],
itemsize=64,
aligned=True))
ctype = dtypes.ctype_struct(dtype, ignore_alignment=True)
src = render_with_modules("${ctype}", render_globals=dict(ctype=ctype)).strip()
assert src == (
'typedef struct _mod__module_0__ {\n'
' char x;\n'
' short y;\n'
' int z;\n'
'} _mod__module_0_;\n\n\n'
'_mod__module_0_')
def test_ctype_struct_checks_alignment():
dtype = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32]))
with pytest.raises(ValueError):
dtypes.ctype_struct(dtype)
def test_ctype_struct_for_non_struct():
dtype = numpy.dtype((numpy.int32, 3))
with pytest.raises(ValueError):
dtypes.ctype_struct(dtype)
# ctype_struct() is not applicable for simple types
with pytest.raises(ValueError):
dtypes.ctype_struct(numpy.int32)
def test_flatten_dtype():
dtype_nested = numpy.dtype(dict(
names=['val1', 'pad'],
formats=[numpy.int8, numpy.int8]))
dtype = numpy.dtype(dict(
names=['pad', 'struct_arr', 'regular_arr'],
formats=[numpy.int32, numpy.dtype((dtype_nested, 2)), numpy.dtype((numpy.int16, 3))]))
res = dtypes.flatten_dtype(dtype)
ref = [
(['pad'], numpy.dtype('int32')),
(['struct_arr', 0, 'val1'], numpy.dtype('int8')),
(['struct_arr', 0, 'pad'], numpy.dtype('int8')),
(['struct_arr', 1, 'val1'], numpy.dtype('int8')),
(['struct_arr', 1, 'pad'], numpy.dtype('int8')),
(['regular_arr', 0], numpy.dtype('int16')),
(['regular_arr', 1], numpy.dtype('int16')),
(['regular_arr', 2], numpy.dtype('int16'))]
assert dtypes.flatten_dtype(dtype) == ref
def test_c_path():
assert dtypes.c_path(['struct_arr', 0, 'val1']) == 'struct_arr[0].val1'
def test_extract_field():
dtype_nested = numpy.dtype(dict(
names=['val1', 'pad'],
formats=[numpy.int8, numpy.int8]))
dtype = numpy.dtype(dict(
names=['pad', 'struct_arr', 'regular_arr'],
formats=[numpy.int32, numpy.dtype((dtype_nested, 2)), numpy.dtype((numpy.int16, 3))]))
a = numpy.empty(16, dtype)
a['struct_arr']['val1'][:,1] = numpy.arange(16)
assert (dtypes.extract_field(a, ['struct_arr', 1, 'val1']) == numpy.arange(16)).all()
b = numpy.empty(16, dtype_nested)
b['val1'] = numpy.arange(16)
assert (dtypes.extract_field(b, ['val1']) == numpy.arange(16)).all()
| 2.375 | 2 |
pyapprox/examples/sparse_grid_examples.py | ConnectedSystems/pyapprox | 26 | 12772634 | <filename>pyapprox/examples/sparse_grid_examples.py
from pyapprox.sparse_grid import *
from pyapprox.univariate_quadrature import *
def plot_2d_isotropic_clenshaw_curtis_sparse_grid():
num_vars = 2; level = 3
quad_rule = clenshaw_curtis_in_polynomial_order
growth_rule = clenshaw_curtis_rule_growth
samples, weights, data_structures = get_sparse_grid_samples_and_weights(
num_vars,level,quad_rule,growth_rule)
axs = plot_sparse_grid_2d(samples,weights,poly_indices=data_structures[1],
subspace_indices=data_structures[2])
plt.xlabel(r'$z_1$')
plt.ylabel(r'$z_2$')
plt.show()
def plot_3d_isotropic_mixed_rule_sparse_grid():
num_vars = 3; level = 3
quad_rule_1 = partial(
gaussian_leja_quadrature_rule,growth_rule=clenshaw_curtis_rule_growth)
quad_rule=[gaussian_leja_quadrature_rule,quad_rule_1,
clenshaw_curtis_in_polynomial_order]
growth_rule=[leja_growth_rule,clenshaw_curtis_rule_growth,
clenshaw_curtis_rule_growth]
samples, weights, data_structures = get_sparse_grid_samples_and_weights(
num_vars,level,quad_rule,growth_rule)
axs = plot_sparse_grid_3d(samples,weights,poly_indices=data_structures[1],
subspace_indices=data_structures[2])
axs[0].set_title('samples')
axs[1].set_title('polynomial indices')
axs[2].set_title('subspace indices')
plt.show()
| 2.5 | 2 |
theanet/layer/convpool.py | rakeshvar/theanet | 55 | 12772635 | <reponame>rakeshvar/theanet<gh_stars>10-100
import math
import theano as th
import theano.tensor as tt
import theano.tensor.nnet as nnconv
from theano.tensor.signal import pool
from .layer import Layer, activation_by_name
from .weights import init_wb
float_x = th.config.floatX
# ############################## ConvPool Layer ################################
class ConvLayer(Layer):
def __init__(self, inpt, wts, rand_gen,
batch_sz, num_prev_maps, in_sz,
num_maps, filter_sz, stride,
mode='valid',
actvn='relu50',
reg=()):
"""
:param inpt:
:param wts:
:param rand_gen:
:param batch_sz:
:param num_prev_maps:
:param in_sz:
:param num_maps:
:param filter_sz:
:param stride:
:param mode: "valid", "full", "same"
:param actvn:
:param reg:
:raise NotImplementedError:
TODO: Add support for rectangular input
"""
assert (wts is not None or rand_gen is not None)
assert mode in ("valid", "full", "same")
image_shape = (batch_sz, num_prev_maps, in_sz, in_sz)
filter_shape = (num_maps, num_prev_maps, filter_sz, filter_sz)
# Assign Weights
fan_in = num_prev_maps * filter_sz * filter_sz
fan_out = num_maps * filter_sz * filter_sz
self.W, self.b = init_wb(wts, rand_gen,
filter_shape, (filter_shape[0], ),
fan_in, fan_out, actvn, 'Conv')
# Add Conv2D Operation to the graph
border_mode = "valid" if mode == "valid" else "full"
conv_out = nnconv.conv2d(inpt, self.W, image_shape, filter_shape,
subsample=(stride, stride),
border_mode=border_mode)
if mode == 'same':
assert stride == 1, "For Same mode stride should be 1"
shift = (filter_sz - 1) // 2
conv_out = conv_out[:, :, shift:in_sz + shift, shift:in_sz + shift]
self.out_sz = in_sz
elif mode == "full":
self.out_sz = in_sz + filter_sz + 1
elif mode == "valid":
self.out_sz = in_sz - filter_sz + 1
# TODO: Remove stride support OR make more robust
self.out_sz //= stride
self.output = activation_by_name(actvn)(conv_out +
self.b.dimshuffle('x', 0, 'x', 'x'))
# Store Parameters
self.params = [self.W, self.b]
self.inpt = inpt
self.num_maps = num_maps
self.mode = mode
self.n_out = num_maps * self.out_sz ** 2
self.reg = {"L1": 0, "L2": 0,
"momentum": .95,
"rate": 1,
"maxnorm": 0,}
self.reg.update(reg)
self.args = (batch_sz, num_prev_maps, in_sz, num_maps, filter_sz,
stride, mode, actvn, reg)
self.representation = (
"Conv Maps:{:2d} Filter:{} Stride:{} Mode:{} Output:{:2d} "
"Act:{}\n\t L1:{L1} L2:{L2} Momentum:{momentum} Rate:{rate} Max Norm:{maxnorm}"
"".format(num_maps, filter_sz, stride, mode, self.out_sz,
actvn, **self.reg))
def TestVersion(self, inpt):
return ConvLayer(inpt, (self.W, self.b), None, *self.args)
class PoolLayer(Layer):
def __init__(self, inpt, num_maps, in_sz, pool_sz, ignore_border=False):
"""
Pool Layer to follow Convolutional Layer
:param inpt:
:param pool_sz:
:param ignore_border: When True, (5,5) input with ds=(2,2)
will generate a (2,2) output. (3,3) otherwise.
"""
self.output = pool.pool_2d(inpt, (pool_sz, pool_sz),
ignore_border=ignore_border)
if ignore_border:
self.out_sz = in_sz//pool_sz
else:
self.out_sz = math.ceil(in_sz/pool_sz)
self.params = []
self.inpt = inpt
self.num_maps = num_maps
self.ignore_border = ignore_border
self.args = (num_maps, in_sz, pool_sz, ignore_border)
self.n_out = num_maps * self.out_sz ** 2
self.representation = (
"Pool Maps:{:2d} Pool_sz:{} Border:{} Output:{:2d}"
"".format(num_maps, pool_sz,
"Ignore" if ignore_border else "Keep",
self.out_sz))
def TestVersion(self, inpt):
return PoolLayer(inpt, *self.args)
class MeanLayer(Layer):
def __init__(self, inpt, num_maps, in_sz):
self.output = tt.mean(inpt, axis=(2,3))
self.params = []
self.inpt = inpt
self.num_maps = num_maps
self.in_sz = in_sz
self.out_sz = 1
self.n_out = num_maps
self.representation = (
"Mean Maps:{:2d} Output:{:2d}"
"".format(num_maps, self.out_sz))
def TestVersion(self, inpt):
return MeanLayer(inpt, self.num_maps, self.in_sz)
| 2.1875 | 2 |
Tests/Marketplace/Tests/test_pack_dependencies.py | satyakidroid/content | 799 | 12772636 | <reponame>satyakidroid/content<gh_stars>100-1000
# type: ignore[attr-defined]
from unittest.mock import patch
import networkx as nx
from Tests.Marketplace.packs_dependencies import calculate_single_pack_dependencies
def find_pack_display_name_mock(pack_folder_name):
return pack_folder_name
class TestCalculateSinglePackDependencies:
@classmethod
def setup_class(cls):
patch('demisto_sdk.commands.find_dependencies.find_dependencies.find_pack_display_name',
side_effect=find_pack_display_name_mock)
patch('Tests.scripts.utils.log_util.install_logging')
graph = nx.DiGraph()
graph.add_node('pack1', mandatory_for_packs=[])
graph.add_node('pack2', mandatory_for_packs=[])
graph.add_node('pack3', mandatory_for_packs=[])
graph.add_node('pack4', mandatory_for_packs=[])
graph.add_node('pack5', mandatory_for_packs=[])
graph.add_edge('pack1', 'pack2')
graph.add_edge('pack2', 'pack3')
graph.add_edge('pack1', 'pack4')
graph.nodes()['pack4']['mandatory_for_packs'].append('pack1')
dependencies = calculate_single_pack_dependencies('pack1', graph)
cls.first_level_dependencies, cls.all_level_dependencies, _ = dependencies
def test_calculate_single_pack_dependencies_first_level_dependencies(self):
"""
Given
- A full dependency graph where:
- pack1 -> pack2 -> pack3
- pack1 -> pack4
- pack4 is mandatory for pack1
- pack5 and pack1 are not a dependency for any pack
When
- Running `calculate_single_pack_dependencies` to extract the first and all levels dependencies
Then
- Ensure first level dependencies for pack1 are only pack2 and pack4
"""
all_nodes = {'pack1', 'pack2', 'pack3', 'pack4', 'pack5'}
expected_first_level_dependencies = {'pack2', 'pack4'}
for node in expected_first_level_dependencies:
assert node in self.first_level_dependencies
for node in all_nodes - expected_first_level_dependencies:
assert node not in self.first_level_dependencies
def test_calculate_single_pack_dependencies_all_levels_dependencies(self):
"""
Given
- A full dependency graph where:
- pack1 -> pack2 -> pack3
- pack1 -> pack4
- pack4 is mandatory for pack1
- pack5 and pack1 are not a dependency for any pack
When
- Running `calculate_single_pack_dependencies` to extract the first and all levels dependencies
Then
- Ensure all levels dependencies for pack1 are pack2, pack3 and pack4 only
"""
all_nodes = {'pack1', 'pack2', 'pack3', 'pack4', 'pack5'}
expected_all_level_dependencies = {'pack2', 'pack3', 'pack4'}
for node in expected_all_level_dependencies:
assert node in self.all_level_dependencies
for node in all_nodes - expected_all_level_dependencies:
assert node not in self.all_level_dependencies
def test_calculate_single_pack_dependencies_mandatory_dependencies(self):
"""
Given
- A full dependency graph where:
- pack1 -> pack2 -> pack3
- pack1 -> pack4
- pack4 is mandatory for pack1
- pack5 and pack1 are not a dependency for any pack
When
- Running `calculate_single_pack_dependencies` to extract the first and all levels dependencies
Then
- pack4 is mandatory for pack1 and that there are no other mandatory dependencies
"""
expected_mandatory_dependency = 'pack4'
assert self.first_level_dependencies[expected_mandatory_dependency]['mandatory']
for node in self.first_level_dependencies:
if node != expected_mandatory_dependency:
assert not self.first_level_dependencies[node]['mandatory']
| 2.09375 | 2 |
depthwise_graphviz.py | vladimirtomic/anari-ai | 0 | 12772637 | <reponame>vladimirtomic/anari-ai<filename>depthwise_graphviz.py
##############################################
# #
# Depthwise convolutional layer #
# #
##############################################
# Implort pygears types
from pygears import gear, datagear, sim, find, reg
# Import pygears types
from pygears.typing import Array, Fixp, Queue, Tuple, Uint
# Import pygears built-in modules
from pygears.lib import accum, ccat, collect, czip, dreg, drv, flatten, mul, qdeal, qrange, qround, queuemap, replicate, saturate, sdp, when
# Packages used for verification and visualization
import matplotlib.image as mpimg
import numpy as np
import matplotlib.pyplot as plt
import pygearsviz
reg['gear/memoize'] = False
##############################################
# #
# Design #
# #
##############################################
# Dot product implementation with saturation and rounding
@gear
def dot(din):
return din \
| queuemap(f=mul) \
| accum(init=Fixp[10, 18](0)) \
| qround \
| saturate(t=Uint[8])
# Reorganizes data on the bus for proper distribution among 3 dot product
# modules
@datagear
def reorder(din: Queue[Tuple['pixel', 'weight']]
) -> Array[Queue[Tuple['pixel[0]', 'weight[0]']], 3]:
p = din.data[0]
w = din.data[1]
return (
((p[0], w[0]), din.eot),
((p[1], w[1]), din.eot),
((p[2], w[2]), din.eot),
)
# Generates write addresses based on filter weights stream. This is used for
# caching of filter weights.
@gear(hdl={'compile': True})
async def wr_req(weights: Queue) -> Tuple[Uint[4], 'weights.data']:
cnt = Uint[4](0)
async for w, last in weights:
yield cnt, w
cnt += 1
# Implements:
# 1. filter weights caching for a single CNN filter
# 2. filter weights readout synchronized with the input image segment
# 3. 3 dot product modules in operating in parallel on different slices
# along the image segment depth
# 4. outputs the result as a vector of 3 output feature map elements
@gear
def filter(
img: Queue[Array[Uint, 3]], # Image segment stream
weights: Queue, # Filter weights stream
) -> b'img.data':
# - Performs the readout of the cached filter weights out of a simple
# dual-port (sdp) memory
# - Before readout waits for the last of the filter weigts to be streamed in
# "weights['eot']"
# - Kernel weights will be read out 30*30=900 times for each of the image
# segments
w = when(weights['eot'] | dreg, 9) \
| replicate(30 * 30) \
| flatten \
| qrange \
| flatten \
| sdp(wr_req(weights))
# Pair up corresponding slices of the kernel and image segment and send
# them for processing to a set of "dot" modules, one for each slice along
# the tensor depth
res = [dot(d) for d in reorder(czip(img, w))]
# Synchronize outputs of the "dot" modules and combine them into a vector
return ccat(*res)
# Top level design module - distributes image segments for processing on "num"
# filters in parallel
@gear
def depthwise(
img, # Image segment stream
weights, # Filter weights stream
*,
num, # Number of parallel filters available
):
res = [filter(img, w) for w in qdeal(weights, num=num, lvl=1)]
return ccat(*res)
##############################################
# #
# Simulation #
# #
##############################################
res = []
# Driver that outputs image segments
img_drv = drv(t=Queue[Array[Uint[8], 3]], seq=[])
# Driver that outputs filter weights
w_drv = drv(t=Queue[Array[Fixp[3, 8], 3]], seq=[])
# Top level connection between drivers, dut and a monitor
# - "depthwise" module will be first converted to SystemVerilog and simulated
# using "verilator" HDL simulator
depthwise(img_drv, w_drv, num=2) \
| Array[Array[int, 3], 2] \
| collect(result=res)
##############################################
# #
# Graphviz hierarchy visualization #
# #
##############################################
top = find('/')
# Traverse hierarchy starting from the 'top' and generate graphviz graph
pygearsviz.render_hierarchy_tree(gear=top, name='depthwise_graphviz_hierarchy_tree', format='svg')
pygearsviz.render_dag(gear=top, name='depthwise_graphviz_dag', format='svg')
| 2.171875 | 2 |
test/test_edit_contact.py | Gargamellos/python_training | 0 | 12772638 | <reponame>Gargamellos/python_training<filename>test/test_edit_contact.py
from model.contact import Contact
def test_edit_first_contact(app):
app.contact.edit_first_contact(Contact(name ="edit", middlename ="edit", surname ="edit", nick ="edit", title ="edit",
company = "edit", address = "edit", homephone = "edit", mobilephone = "edit",
workphone = "edit", fax = "edit", email = "edit", email2 = "edit", email3 = "edit",
homepage = "edit", address2 = "edit", phone2 = "edit",
notes = "edit"))
| 2.703125 | 3 |
average_water_vapour.py | ASVincent/swfo | 0 | 12772639 | <reponame>ASVincent/swfo<filename>average_water_vapour.py
#!/bin/bash
"""
Create timeseries averages for the NOAA water vapour data.
"""
from datetime import datetime
from pathlib import Path
import numpy
import h5py
import pandas
from wagl.geobox import GriddedGeoBox
from wagl.hdf5.compression import H5CompressionFilter
from wagl.hdf5 import read_h5_table, write_h5_image
def build_index(indir):
"""
Read the INDEX table for each file and build a full history
index.
The records are sorted in ascending time (earliest to most recent)
"""
df = pandas.DataFrame(columns=['filename', 'band_name', 'timestamp'])
for fname in Path(indir).glob("pr_wtr.eatm.*.h5"):
with h5py.File(str(fname), 'r') as fid:
tmp_df = read_h5_table(fid, 'INDEX')
tmp_df['filename'] = fid.filename
df = df.append(tmp_df)
df.sort_values('timestamp', inplace=True)
df.set_index('timestamp', inplace=True)
return df
def calculate_average(dataframe):
"""
Given a dataframe with the columns:
* filename
* band_name
Calculate the 3D/timeseries average from all input records.
Each 2D dataset has dimensions (73y, 144x), and type float32.
"""
dims = (dataframe.shape[0], 73, 144)
data = numpy.zeros(dims, dtype="float32")
# load all data into 3D array (dims are small so just read all)
for i, rec in enumerate(dataframe.iterrows()):
row = rec[1]
with h5py.File(row.filename, "r") as fid:
ds = fid[row.band_name]
ds.read_direct(data[i])
no_data = float(ds.attrs['missing_value'])
# check for nodata and convert to nan
# do this for each dataset in case the nodata value changes
data[i][data[i] == no_data] = numpy.nan
# get the geobox, chunks
with h5py.File(row.filename, "r") as fid:
ds = fid[row.dataset_name]
geobox = GriddedGeoBox.from_dataset(ds)
chunks = ds.chunks
mean = numpy.nanmean(data, axis=0)
return mean, geobox, chunks
def prwtr_average(indir, outdir, compression=H5CompressionFilter.LZF,
filter_opts=None):
"""
Take the 4 hourly daily average from all files.
"""
df = build_index(indir)
# grouping
groups = df.groupby([df.index.month, df.index.day, df.index.hour])
# create directories as needed
out_fname = Path(outdir).joinpath("pr_wtr_average.h5")
if not out_fname.parent.exists():
out_fname.parent.mkdir(parents=True)
# create output file
with h5py.File(str(out_fname), 'w') as fid:
# the data is ordered so we can safely use BAND-1 = Jan-1
for band_index, item in enumerate(groups):
grp_name, grp_df = item
# synthesised leap year timestamp (use year 2000)
fmt = "2000 {:02d} {:02d} {:02d}"
dtime = datetime.strptime(fmt.format(*grp_name), "%Y %m %d %H")
# mean
mean, geobox, chunks = calculate_average(grp_df)
# dataset name format "%B-%d/%H%M" eg FEBRUARY-06/1800 for Feb 6th 1800 hrs
dname = "AVERAGE/{}".format(dtime.strftime("%B-%d/%H%M").upper())
# dataset description
description = ("Average data for {year_month} {hour}00 hours, "
"over the timeperiod {dt_min} to {dt_max}")
description = description.format(
year_month=dtime.strftime("%B-%d"),
hour=dtime.strftime("%H"),
dt_min=grp_df.index.min(),
dt_max=grp_df.index.max()
)
# dataset attributes
attrs = {
"description": description,
"timestamp": dtime,
"date_format": "2000 %B-%d/%H%M",
"band_name": "BAND-{}".format(band_index +1),
"geotransform": geobox.transform.to_gdal(),
"crs_wkt": geobox.crs.ExportToWkt()
}
# create empty or copy the user supplied filter options
if not filter_opts:
f_opts = dict()
else:
f_opts = filter_opts.copy()
# use original chunks if none are provided
if 'chunks' not in f_opts:
f_opts['chunks'] = chunks
# write
write_h5_image(mean, dname, fid, attrs=attrs,
compression=compression, filter_opts=f_opts)
| 2.78125 | 3 |
gans/building_blocks/attention.py | christophstach/research-project-gan | 1 | 12772640 | import torch
import torch.nn as nn
from .convolution import Conv2d
class SelfAttention2d(nn.Module):
def __init__(self, in_channels, k=8, bias=False, eq_lr=False, spectral_normalization=False):
super().__init__()
self.wf = Conv2d(in_channels, in_channels // k, kernel_size=1, stride=1, padding=0, bias=bias, eq_lr=eq_lr, spectral_normalization=spectral_normalization)
self.wg = Conv2d(in_channels, in_channels // k, kernel_size=1, stride=1, padding=0, bias=bias, eq_lr=eq_lr, spectral_normalization=spectral_normalization)
self.wh = Conv2d(in_channels, in_channels // k, kernel_size=1, stride=1, padding=0, bias=bias, eq_lr=eq_lr, spectral_normalization=spectral_normalization)
self.wv = Conv2d(in_channels // k, in_channels, kernel_size=1, stride=1, padding=0, bias=bias, eq_lr=eq_lr, spectral_normalization=spectral_normalization)
self.gamma = nn.Parameter(torch.zeros(1), requires_grad=True)
def forward(self, x):
f = self.wf(x).view(x.size(0), -1, x.size(2) * x.size(3))
g = self.wg(x).view(x.size(0), -1, x.size(2) * x.size(3))
h = self.wh(x).view(x.size(0), -1, x.size(2) * x.size(3))
s = torch.bmm(f.transpose(1, 2), g)
beta = torch.softmax(s, 2)
v = torch.bmm(h, beta).view(x.size(0), -1, x.size(2), x.size(3))
o = self.wv(v)
return self.gamma * o + x
| 2.53125 | 3 |
pymultimatic/model/__init__.py | oliverpell/pymultiMATIC | 30 | 12772641 | <gh_stars>10-100
"""Mapped model from the API."""
from .mode import ( # noqa: F401
Mode,
OperatingMode,
OperatingModes,
QuickVeto,
ActiveMode,
SettingMode,
SettingModes,
)
from .timeprogram import TimeProgram, TimeProgramDay, TimePeriodSetting # noqa: F401
from .common import Component, Function # noqa: F401
from .zone import ZoneCooling, ZoneHeating, Zone, ActiveFunction # noqa: F401
from .room import Device, Room # noqa: F401
from .status import HvacStatus, BoilerStatus, Error # noqa: F401
from .syncstate import SyncState # noqa: F401
from .dhw import Dhw, HotWater, Circulation # noqa: F401
from .report import Report, EmfReport # noqa: F401
from .ventilation import Ventilation # noqa: F401
from .quick_mode import QuickMode, QuickModes, HolidayMode # noqa: F401
from .info import FacilityDetail # noqa: F401
from .system import System # noqa: F401
| 1.53125 | 2 |
libs/utils/android/workloads/uibench.py | MIPS/external-lisa | 0 | 12772642 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os
import logging
from subprocess import Popen, PIPE
from time import sleep
from android import Screen, System, Workload
class UiBench(Workload):
"""
Android UiBench workload
"""
# Package required by this workload
package = 'com.android.test.uibench'
# Instrumentation required to run tests
test_package = 'com.android.uibench.janktests'
# Supported tests list
test_ClippedListView = 'UiBenchJankTests#testClippedListView'
test_DialogListFling = 'UiBenchJankTests#testDialogListFling'
test_FadingEdgeListViewFling = 'UiBenchJankTests#testFadingEdgeListViewFling'
test_FullscreenOverdraw = 'UiBenchJankTests#testFullscreenOverdraw'
test_GLTextureView = 'UiBenchJankTests#testGLTextureView'
test_InflatingListViewFling = 'UiBenchJankTests#testInflatingListViewFling'
test_Invalidate = 'UiBenchJankTests#testInvalidate'
test_InvalidateTree = 'UiBenchJankTests#testInvalidateTree'
test_OpenNavigationDrawer = 'UiBenchJankTests#testOpenNavigationDrawer'
test_OpenNotificationShade = 'UiBenchJankTests#testOpenNotificationShade'
test_ResizeHWLayer = 'UiBenchJankTests#testResizeHWLayer'
test_SaveLayerAnimation = 'UiBenchJankTests#testSaveLayerAnimation'
test_SlowBindRecyclerViewFling = 'UiBenchJankTests#testSlowBindRecyclerViewFling'
test_SlowNestedRecyclerViewFling = 'UiBenchJankTests#testSlowNestedRecyclerViewFling'
test_SlowNestedRecyclerViewInitialFling = 'UiBenchJankTests#testSlowNestedRecyclerViewInitialFling'
test_TrivialAnimation = 'UiBenchJankTests#testTrivialAnimation'
test_TrivialListViewFling = 'UiBenchJankTests#testTrivialListViewFling'
test_TrivialRecyclerListViewFling = 'UiBenchJankTests#testTrivialRecyclerListViewFling'
test_BitmapUploadJank = 'UiBenchRenderingJankTests#testBitmapUploadJank'
test_ShadowGridListFling = 'UiBenchRenderingJankTests#testShadowGridListFling'
test_EditTextTyping = 'UiBenchTextJankTests#testEditTextTyping'
test_LayoutCacheHighHitrateFling = 'UiBenchTextJankTests#testLayoutCacheHighHitrateFling'
test_LayoutCacheLowHitrateFling = 'UiBenchTextJankTests#testLayoutCacheLowHitrateFling'
test_ActivityTransitionsAnimation = 'UiBenchTransitionsJankTests#testActivityTransitionsAnimation'
test_WebViewFling = 'UiBenchWebView#testWebViewFling'
def __init__(self, test_env):
super(UiBench, self).__init__(test_env)
self._log = logging.getLogger('UiBench')
self._log.debug('Workload created')
# Set of output data reported by UiBench
self.db_file = None
def run(self, out_dir, test_name, duration_s, collect=''):
"""
Run single UiBench workload.
:param out_dir: Path to experiment directory where to store results.
:type out_dir: str
:param test_name: Name of the test to run
:type test_name: str
:param duration_s: Run benchmak for this required number of seconds
:type duration_s: int
:param collect: Specifies what to collect. Possible values:
- 'energy'
- 'systrace'
- 'ftrace'
- any combination of the above
:type collect: list(str)
"""
activity = '.' + test_name
# Keep track of mandatory parameters
self.out_dir = out_dir
self.collect = collect
# Unlock device screen (assume no password required)
Screen.unlock(self._target)
# Close and clear application
System.force_stop(self._target, self.package, clear=True)
# Set airplane mode
System.set_airplane_mode(self._target, on=True)
# Set min brightness
Screen.set_brightness(self._target, auto=False, percent=0)
# Start the main view of the app which must be running
# to reset the frame statistics.
System.monkey(self._target, self.package)
# Force screen in PORTRAIT mode
Screen.set_orientation(self._target, portrait=True)
# Reset frame statistics
System.gfxinfo_reset(self._target, self.package)
sleep(1)
# Clear logcat
os.system(self._adb('logcat -c'));
# Regexps for benchmark synchronization
start_logline = r'TestRunner: started'
UIBENCH_BENCHMARK_START_RE = re.compile(start_logline)
self._log.debug("START string [%s]", start_logline)
# Parse logcat output lines
logcat_cmd = self._adb(
'logcat TestRunner:* System.out:I *:S BENCH:*'\
.format(self._target.adb_name))
self._log.info("%s", logcat_cmd)
# Run benchmark with a lot of iterations to avoid finishing before duration_s elapses
command = "nohup am instrument -e iterations 1000000 -e class {}{} -w {}".format(
self.test_package, activity, self.test_package)
self._target.background(command)
logcat = Popen(logcat_cmd, shell=True, stdout=PIPE)
while True:
# read next logcat line (up to max 1024 chars)
message = logcat.stdout.readline(1024)
# Benchmark start trigger
match = UIBENCH_BENCHMARK_START_RE.search(message)
if match:
self.tracingStart()
self._log.debug("Benchmark started!")
break
# Run the workload for the required time
self._log.info('Benchmark [%s] started, waiting %d [s]',
activity, duration_s)
sleep(duration_s)
self._log.debug("Benchmark done!")
self.tracingStop()
# Get frame stats
self.db_file = os.path.join(out_dir, "framestats.txt")
System.gfxinfo_get(self._target, self.package, self.db_file)
# Close and clear application
System.force_stop(self._target, self.package, clear=True)
# Go back to home screen
System.home(self._target)
# Switch back to original settings
Screen.set_orientation(self._target, auto=True)
System.set_airplane_mode(self._target, on=False)
Screen.set_brightness(self._target, auto=True)
# vim :set tabstop=4 shiftwidth=4 expandtab
| 1.96875 | 2 |
blood/migrations/0001_initial.py | Timoh97/Blood-G | 0 | 12772643 | <reponame>Timoh97/Blood-G
# Generated by Django 3.2.9 on 2022-01-21 15:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('donor', '0002_auto_20210213_1602'),
('patient', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Stock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bloodgroup', models.CharField(max_length=10)),
('unit', models.PositiveIntegerField(default=0)),
],
),
migrations.CreateModel(
name='BloodRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('patient_name', models.CharField(max_length=30)),
('patient_age', models.PositiveIntegerField()),
('reason', models.CharField(max_length=500)),
('bloodgroup', models.CharField(max_length=10)),
('unit', models.PositiveIntegerField(default=0)),
('status', models.CharField(default='Pending', max_length=20)),
('date', models.DateField(auto_now=True)),
('request_by_donor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='donor.donor')),
('request_by_patient', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='patient.patient')),
],
),
]
| 1.84375 | 2 |
setup.py | anmolmalik01/package-installer | 1 | 12772644 | from setuptools import setup,find_packages
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name='python installer',
version='0.0.1',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='A package installer for python',
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
install_requires=[
'click',
'pipreqs'
],
keywords=['python', 'package installer', 'json'],
classifiers= [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS',
'Operating System :: Unix',
'Programming Language :: Python'
],
entry_points={
'console_scripts': [
'package = python_installer.main:cli',
]
},
)
| 1.492188 | 1 |
make_superpixel_dl.py | alsmeirelles/QuickAnnotator | 0 | 12772645 | <reponame>alsmeirelles/QuickAnnotator
# +
import argparse
import glob
import os
import sys
import traceback
import cv2
import numpy as np
import sklearn.feature_extraction.image
import torch
from skimage.segmentation import find_boundaries
from skimage.segmentation import slic
import json
# -
class LayerActivations():
features=None
def __init__(self,layer):
self.hook = layer.register_forward_hook(self.hook_fn)
def hook_fn(self,module,input,output):
self.features = output
def remove(self):
self.hook.remove()
from QA_utils import get_torch_device
from unet import UNet
# -----helper function to split data into batches
def divide_batch(l, n):
for i in range(0, l.shape[0], n):
yield l[i:i + n, ::]
try:
# ----- parse command line arguments
print("USER: Generating DL-Superpixel output with latest model", flush=True)
parser = argparse.ArgumentParser(description='Make output for entire image using Unet')
parser.add_argument('input_pattern',
help="input filename pattern. try: *.png, or tsv file containing list of files to analyze",
nargs="*")
#--- gpu params
parser.add_argument('-p', '--patchsize', help="patchsize, default 256", default=256, type=int)
parser.add_argument('-x', '--batchsize', help="batchsize for controlling GPU memory usage, default 10", default=10,
type=int)
parser.add_argument('-m', '--model', help="DL model location, default None", default=None, type=str)
parser.add_argument('-i', '--gpuid', help="id of gpu to use, using -2 will use the CPU", default=0, type=int)
#--- super pixel params
parser.add_argument('-s', '--superdir', help="output dir for superpixel, default ./output/", default="./output/", type=str)
parser.add_argument('-o', '--boundarydir', help="output dir for superpixel boundary, default ./output/", default="./output/", type=str)
parser.add_argument('-b', '--basepath', help="base path to add to file names, helps when producing data using tsv file as input", default="", type=str)
parser.add_argument('-a', '--approxcellsize', help="approximate width of cell in pixels, will be used to determine number of segments", default=20, type=int)
parser.add_argument('-c', '--compactness', help="compactness of slic, default .01", default=.01, type=float)
parser.add_argument('-f', '--force', help="force regeneration of output even if it exists", default=False,
action="store_true")
args = parser.parse_known_args()[0]
print(f"args: {args}")
if not (args.input_pattern):
parser.error('No images selected with input pattern')
batch_size = args.batchsize
patch_size = args.patchsize
stride_size = patch_size // 2
superoutdir = args.superdir
boutdir = args.boundarydir
if not os.path.exists(superoutdir):
os.makedirs(superoutdir)
if not os.path.exists(boutdir):
os.makedirs(boutdir)
if not os.path.isfile(args.model):
print("INFO: No model ready in {}".format(args.model),flush=True)
sys.exit(0)
# ----- load network
device = get_torch_device(args.gpuid)
checkpoint = torch.load(args.model, map_location=lambda storage,
loc: storage) # load checkpoint to CPU and then put to device https://discuss.pytorch.org/t/saving-and-loading-torch-models-on-2-machines-with-different-number-of-gpu-devices/6666
model = UNet(n_classes=checkpoint["n_classes"], in_channels=checkpoint["in_channels"],
padding=checkpoint["padding"], depth=checkpoint["depth"], wf=checkpoint["wf"],
up_mode=checkpoint["up_mode"], batch_norm=checkpoint["batch_norm"]).to(device)
model.load_state_dict(checkpoint["model_dict"])
model.eval()
dr=LayerActivations(model.up_path[-1].conv_block.block[-1])
print(f"total params: \t{sum([np.prod(p.size()) for p in model.parameters()])}")
# ----- get file list
files = []
basepath = args.basepath #
basepath = basepath + os.sep if len(
basepath) > 0 else "" # if the user supplied a different basepath, make sure it ends with an os.sep
if len(args.input_pattern) > 1: # bash has sent us a list of files
files = args.input_pattern
elif args.input_pattern[0].endswith("tsv"): # user sent us an input file
# load first column here and store into files
with open(args.input_pattern[0], 'r') as f:
for line in f:
if line[0] == "#":
continue
files.append(basepath + line.strip().split("\t")[0])
else: # user sent us a wildcard, need to use glob to find files
files = glob.glob(args.basepath + args.input_pattern[0])
# ------ work on files
output_files=[]
nfiles = len(files)
for ii,fname in enumerate(files):
print(f"PROGRESS: File {ii}/{nfiles}")
fname = fname.strip()
sfname= "%s/%s_superpixels.png" % (superoutdir, os.path.basename(fname)[0:-4])
bfname= "%s/%s_superpixels_boundary.png" % (boutdir, os.path.basename(fname)[0:-4])
print(f"working on file: \t {fname}", flush=True)
print(f"saving superpixel to : \t {sfname}", flush=True)
print(f"saving boundary to : \t {bfname}", flush=True)
if not args.force and os.path.exists(sfname):
print("Skipping as output file exists", flush=True)
continue
io = cv2.cvtColor(cv2.imread(fname), cv2.COLOR_BGR2RGB)
io_shape_orig = np.array(io.shape)
# add half the stride as padding around the image, so that we can crop it away later
io = np.pad(io, [(stride_size // 2, stride_size // 2), (stride_size // 2, stride_size // 2), (0, 0)],
mode="reflect")
io_shape_wpad = np.array(io.shape)
# pad to match an exact multiple of unet patch size, otherwise last row/column are lost
npad0 = int(np.ceil(io_shape_wpad[0] / patch_size) * patch_size - io_shape_wpad[0])
npad1 = int(np.ceil(io_shape_wpad[1] / patch_size) * patch_size - io_shape_wpad[1])
io = np.pad(io, [(0, npad0), (0, npad1), (0, 0)], mode="constant")
arr_out = sklearn.feature_extraction.image._extract_patches(io, (patch_size, patch_size, 3), stride_size)
arr_out_shape = arr_out.shape
arr_out = arr_out.reshape(-1, patch_size, patch_size, 3)
# in case we have a large network, lets cut the list of tiles into batches
output = np.zeros((0, 4, patch_size, patch_size))
for batch_arr in divide_batch(arr_out, batch_size):
print(f"PROGRESS: Superpixel Chunk {output.shape[0]}/{arr_out.shape[0]}")
arr_out_gpu = torch.from_numpy(batch_arr.transpose(0, 3, 1, 2) / 255).type('torch.FloatTensor').to(device)
# ---- get results
output_batch = model(arr_out_gpu)
# --- pull from GPU and append to rest of output
output_batch=dr.features.detach().cpu().numpy().astype(np.double)
output = np.append(output, output_batch, axis=0)
output = output.transpose((0, 2, 3, 1))
# turn from a single list into a matrix of tiles
output = output.reshape(arr_out_shape[0], arr_out_shape[1], patch_size, patch_size, output.shape[3])
# remove the padding from each tile, we only keep the center
output = output[:, :, stride_size // 2:-stride_size // 2, stride_size // 2:-stride_size // 2, :]
# turn all the tiles into an image
output = np.concatenate(np.concatenate(output, 1), 1)
# incase there was extra padding to get a multiple of patch size, remove that as well
output = output[0:io_shape_orig[0], 0:io_shape_orig[1], :] # remove paddind, crop back
# --- super pixel work
number_segments = (output.shape[0]//args.approxcellsize)**2
print(f"Using {number_segments} superpixels")
segs_dl = slic(output, n_segments=number_segments, compactness=args.compactness, multichannel=True, slic_zero=True) # <--- slic_zero?
colors = np.array( #make random colors. its okay if some are the same, just as long as they're not touching which is unlikely
[(np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)) for i in
range(segs_dl.max() + 1)])
cv2.imwrite(sfname, colors[segs_dl])
boundary = find_boundaries(segs_dl, connectivity=1, mode='outer', background=0)
boundary = boundary.astype(np.uint8) * 255
cv2.imwrite(bfname, boundary)
output_files.append(fname)
print("USER: Done generating output", flush=True)
print(f"RETVAL: {json.dumps({'model': args.model,'output_file': output_files})}", flush=True)
except SystemExit as ex:
if ex.code != 0:
track = traceback.format_exc()
track = track.replace("\n","\t")
print(f"ERROR: {track}", flush=True)
sys.exit(1)
else:
sys.exit(ex.code)
else:
track = traceback.format_exc()
track = track.replace("\n","\t")
print(f"ERROR: {track}", flush=True)
sys.exit(1)
| 2.28125 | 2 |
FamilyFood/lower_card/apps.py | ZST-Devs/HomeFood | 2 | 12772646 | from django.apps import AppConfig
class LowerCardConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'lower_card'
| 1.445313 | 1 |
pichtest.py | PeterWinzell/RadarRPISwitch | 0 | 12772647 | import os
import subprocess
import sys
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(6,GPIO.IN)
while True:
val = GPIO.input(6)
print(val)
if val==1:
break;
time.sleep(0.2)
print(' finished ' )
| 2.65625 | 3 |
dataset/Venice/3.make_3d_dataset.py | LeeJAJA/PHNet-pytorch | 5 | 12772648 | <filename>dataset/Venice/3.make_3d_dataset.py
import os,os.path,re,shutil,json
IMAGE_NUM = 2
SOURCE_PATH = '/.Venice/img_roi'
EXPORT_PATH = '/.Venice/ablation' + str(IMAGE_NUM)
ROI_PATH = '/.Venice/density_map_init'
def mycopyfile(srcfile,dstfile):
if not os.path.isfile(srcfile):
print("not exist!"%(srcfile))
else:
fpath,fname=os.path.split(dstfile)
if not os.path.exists(fpath):
os.makedirs(fpath)
shutil.copyfile(srcfile,dstfile)
#print("copy %s -> %s"%( srcfile,dstfile))
def searchFile(pathname,filename):
matchedFile =[]
for root,dirs,files in os.walk(pathname):
for file in files:
if re.match(filename,file):
matchedFile.append(file)
return matchedFile
train_json = []
test_json = []
list1 = []
list1 = searchFile(SOURCE_PATH,'(.*).jpg')
list1.sort()
def normal(ind1, ind2):
if ind1 < 0:
return False
if list1[ind1].split('_')[0] != list1[ind2].split('_')[0]:
return False
i1 = int(list1[ind1].split('.')[0].split('_')[1])
i2 = int(list1[ind2].split('.')[0].split('_')[1])
if (i2-i1) > 60*(ind2-ind1+1):
print('{} lost more than one frame to {}'.format(list1[ind1], list1[ind2]))
return False
return True
for index in range(len(list1)):
for place in range(IMAGE_NUM-1,-1,-1):
if normal(index-place,index):
mycopyfile(os.path.join(SOURCE_PATH, list1[index-place]), os.path.join(EXPORT_PATH, list1[index], list1[index-place]))
else:
mycopyfile(os.path.join(SOURCE_PATH, list1[index]), os.path.join(EXPORT_PATH, list1[index], list1[index].split('.')[0]+str(place)+'.jpg'))
if list1[index].split('_')[0] == '4896':
train_json.append(os.path.join(EXPORT_PATH, list1[index]))
else:
test_json.append(os.path.join(EXPORT_PATH, list1[index]))
with open('./jsons/train' + str(IMAGE_NUM) + '.json', 'w', encoding='utf-8') as json_file:
json.dump(train_json, json_file, indent=1)
print(len(train_json))
with open('./jsons/test' + str(IMAGE_NUM) + '.json', 'w', encoding='utf-8') as json_file:
json.dump(test_json, json_file, indent=1)
print(len(test_json))
| 2.5625 | 3 |
util/rev_info.py | ccwanggl/smartknob | 3,836 | 12772649 | <reponame>ccwanggl/smartknob
import datetime
import subprocess
def git_short_rev():
try:
return subprocess.check_output([
'git',
'rev-parse',
'--short',
'HEAD',
]).decode('utf-8').strip()
except Exception:
raise RuntimeError("Could not read git revision. Make sure you have git installed and you're working with a git clone of the repository.")
def current_date():
return datetime.date.today().strftime('%Y-%m-%d')
def git_date(short=True):
try:
iso = subprocess.check_output([
'git',
'log',
'-1',
'--format=%ci',
'HEAD',
]).decode('utf-8').strip()
if short:
return iso.split(' ')[0]
else:
return iso
except Exception:
raise RuntimeError("Could not read git commit date. Make sure you have git installed and you're working with a git clone of the repository.")
def git_release_version(search_prefix):
try:
tags = subprocess.check_output([
'git',
'tag',
'--points-at',
'HEAD',
]).decode('utf-8').splitlines()
for tag in tags:
if tag.startswith(search_prefix):
return tag[len(search_prefix):]
return None
except Exception:
raise RuntimeError("Could not read git release tags. Make sure you have git installed and you're working with a git clone of the repository.")
| 2.6875 | 3 |
test/pytest/test_anonymous_group.py | showipintbri/ttp | 254 | 12772650 | <filename>test/pytest/test_anonymous_group.py
import sys
sys.path.insert(0, "../..")
import pprint
from ttp import ttp
def test_simple_anonymous_template():
template_1 = """interface {{ interface }}
description {{ description | ORPHRASE }}"""
data_1 = """
interface Port-Chanel11
description Storage Management
interface Loopback0
description RID
"""
parser = ttp(template=template_1, data=data_1)
# check that data added:
datums_added = {
"{}:{}".format(template.name, input_name): input_obj.data
for template in parser._templates
for input_name, input_obj in template.inputs.items()
}
# pprint.pprint(datums_added)
parser.parse()
res = parser.result()
# pprint.pprint(res)
# assert res == [[[{'description': 'Storage Management', 'interface': 'Port-Chanel11'}, {'description': 'RID', 'interface': 'Loopback0'}]]]
# test_simple_anonymous_template()
def test_anonymous_group_with_vars():
template = """
<input load="text">
interface Port-Chanel11
description Storage Management
interface Loopback0
description RID
</input>
<vars name="my.var.s">
a = 1
b = 2
</vars>
<group>
interface {{ interface }}
description {{ description | ORPHRASE }}
</group>
"""
parser = ttp(template=template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
[
{"description": "Storage Management", "interface": "Port-Chanel11"},
{"description": "RID", "interface": "Loopback0"},
{"my": {"var": {"s": {"a": 1, "b": 2}}}},
]
]
]
# test_anonymous_group_with_vars()
def test_anonymous_group_with_child_group_empty_absolute_path():
template = """
<template results="per_template">
<input name="Cisco_ios" load="text">
r2#show interfaces | inc line protocol
interface GigabitEthernet1
vrf forwarding MGMT
ip address 10.123.89.55 255.255.255.0
</input>
<input name="Cisco_ios" load="text">
r1#show interfaces | inc line protocol:
interface GigabitEthernet1
description some info
vrf forwarding MGMT
ip address 10.123.89.56 255.255.255.0
interface GigabitEthernet2
ip address 10.123.89.55 255.255.255.0
</input>
<group void="">
interface {{ interface }}
description {{ description | ORPHRASE }}
<group name="/">
ip address {{ ip }} {{ mask }}
</group>
</group>
</template>
"""
parser = ttp(template=template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{"ip": "10.123.89.55", "mask": "255.255.255.0"},
{"ip": "10.123.89.56", "mask": "255.255.255.0"},
{"ip": "10.123.89.55", "mask": "255.255.255.0"},
]
]
# test_anonymous_group_with_child_group_empty_absolute_path()
def test_anonymous_group_with_per_template_mode():
template = """
<template results="per_template">
<group void="">
hostname {{ hostname | record(hostname_abc) }}
</group>
<group>
interface {{ interface }}
description {{ description | ORPHRASE }}
ip address {{ ip }} {{ mask }}
{{ hostname | set(hostname_abc) }}
</group>
</template>
"""
datum_1 = """
hostname r2
!
interface GigabitEthernet1
vrf forwarding MGMT
ip address 10.123.89.55 255.255.255.0
"""
datum_2 = """
hostname r1
!
interface GigabitEthernet1
description some info
vrf forwarding MGMT
ip address 10.123.89.56 255.255.255.0
interface GigabitEthernet2
ip address 10.123.89.55 255.255.255.0
"""
parser_a = ttp(template=template)
parser_a.add_input(datum_1)
parser_a.add_input(datum_2)
parser_a.parse()
res = parser_a.result()
# pprint.pprint(res)
assert res == [
[
{
"hostname": "r2",
"interface": "GigabitEthernet1",
"ip": "10.123.89.55",
"mask": "255.255.255.0",
},
{
"description": "some info",
"hostname": "r1",
"interface": "GigabitEthernet1",
"ip": "10.123.89.56",
"mask": "255.255.255.0",
},
{
"hostname": "r1",
"interface": "GigabitEthernet2",
"ip": "10.123.89.55",
"mask": "255.255.255.0",
},
]
]
# test_anonymous_group_with_per_template_mode()
| 2.59375 | 3 |