content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python
import os
from os import environ
from os.path import dirname, join, isdir
from uuid import uuid4 as random_uuid
from netmiko import utilities
from netmiko._textfsm import _clitable as clitable
RESOURCE_FOLDER = join(dirname(dirname(__file__)), "etc")
CONFIG_FILENAME = join(RESOURCE_FOLDER, ".netmiko.yml")
def test_load_yaml_file():
"""Read a YAML file successfully"""
filename = join(RESOURCE_FOLDER, "yaml_test.yml")
expected = {
"answer": 42,
"hello": "world",
"complex": {"truth": False, "key": "value"},
}
assert utilities.load_yaml_file(filename) == expected
def test_invalid_yaml_file():
"""Try to read an invalid YAML file"""
filename = join(RESOURCE_FOLDER, "this_should_not_exist.yml")
try:
utilities.load_yaml_file(filename)
except SystemExit as exc:
assert isinstance(exc, SystemExit)
return
assert False
def test_find_cfg_file():
"""Try to find a configuration file"""
assert utilities.find_cfg_file(CONFIG_FILENAME) == CONFIG_FILENAME
def test_load_cfg_file():
"""Try to load a configuration file"""
expected = {
"rtr1": {
"device_type": "cisco_ios",
"ip": "10.10.10.1",
"username": "admin",
"password": "cisco123",
"secret": "cisco123",
},
"rtr2": {
"device_type": "cisco_ios",
"ip": "10.10.10.2",
"username": "admin",
"password": "cisco123",
"secret": "cisco123",
},
"cisco": ["rtr1", "rtr2"],
}
assert utilities.load_devices(CONFIG_FILENAME) == expected
def test_obtain_all_devices():
"""Dynamically create 'all' group."""
netmiko_tools_load = utilities.load_devices(CONFIG_FILENAME)
expected = {
"rtr1": {
"device_type": "cisco_ios",
"ip": "10.10.10.1",
"username": "admin",
"password": "cisco123",
"secret": "cisco123",
},
"rtr2": {
"device_type": "cisco_ios",
"ip": "10.10.10.2",
"username": "admin",
"password": "cisco123",
"secret": "cisco123",
},
}
result = utilities.obtain_all_devices(netmiko_tools_load)
assert result == expected
def test_find_netmiko_dir():
"""Try to get the netmiko_dir"""
folder = dirname(__file__)
environ["NETMIKO_DIR"] = folder
result = utilities.find_netmiko_dir()
assert result[0] == folder
assert result[1].endswith("/tmp")
def test_invalid_netmiko_dir():
"""Try with an invalid netmiko_base_dir"""
environ["NETMIKO_DIR"] = "/"
try:
utilities.find_netmiko_dir()
except ValueError as exc:
assert isinstance(exc, ValueError)
return
assert False
def test_string_to_bytes():
"""Convert string to bytes"""
assert utilities.write_bytes("test") == b"test"
def test_bytes_to_bytes():
"""Convert bytes to bytes"""
result = b"hello world"
assert utilities.write_bytes(result) == result
def test_invalid_data_to_bytes():
"""Convert an invalid data type to bytes"""
try:
utilities.write_bytes(456779)
except ValueError as exc:
assert isinstance(exc, ValueError)
return
assert False
def test_ensure_resource_dir_exists():
"""Ensure that the resource folder exists"""
utilities.ensure_dir_exists(RESOURCE_FOLDER)
def test_ensure_file_exists():
"""Ensure that a file makes ensure_dir_exists raise an error"""
try:
utilities.ensure_dir_exists(__file__)
except ValueError as exc:
assert isinstance(exc, ValueError)
return
assert False
def test_clitable_to_dict():
"""Converts TextFSM cli_table object to list of dictionaries"""
table = clitable.CliTable(template_dir=RESOURCE_FOLDER)
text_filename = join(RESOURCE_FOLDER, "textfsm.txt")
template_filename = join(RESOURCE_FOLDER, "cisco_ios_show_version.template")
with open(text_filename) as data_file:
text = data_file.read()
with open(template_filename) as template_file:
table = table._ParseCmdItem(text, template_file)
result = utilities.clitable_to_dict(table)
assert result == [{"model": "4500"}]
def test_get_structured_data():
"""Convert raw CLI output to structured data using TextFSM template"""
environ["NET_TEXTFSM"] = RESOURCE_FOLDER
raw_output = "Cisco IOS Software, Catalyst 4500 L3 Switch Software"
result = utilities.get_structured_data(
raw_output, platform="cisco_ios", command="show version"
)
assert result == [{"model": "4500"}]
|
#!/usr/bin/env python3
# ------------------------------------------------------------------------------
# stress test scopez_server
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Imports
# ------------------------------------------------------------------------------
import sys
import argparse
import time
import signal
import json
import random
import base64
import datetime
import requests
from urllib.request import urlopen
from urllib.request import Request
# ------------------------------------------------------------------------------
# Globals
# ------------------------------------------------------------------------------
g_run = True
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def signal_handler(signal, frame):
global g_run
g_run = False
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def print_banner():
print('+-----------------------------------------------------------------------------+')
print('| SCOPEZ S E R V E R S T R E S S T E S T E R |')
print('+------------+------------+------------+------------+------------+------------+')
print('| Req/s | 200s | 300s | 400s | 500s | Confs/s |')
print('+------------+------------+------------+------------+------------+------------+')
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def print_stats_line(a_time_delta_ms, a_num_reqs, a_num_configs, a_results):
if '200' not in a_results:
a_results['200'] = 0
if '300' not in a_results:
a_results['300'] = 0
if '400' not in a_results:
a_results['400'] = 0
if '500' not in a_results:
a_results['500'] = 0
print('| %10.2f | %10d | %10d | %10d | %10d | %10d |' % (
(a_num_reqs*1000/a_time_delta_ms),
a_results['200'],
a_results['300'],
a_results['400'],
a_results['500'],
(a_num_configs*1000/a_time_delta_ms)))
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def get_rqst(a_host, a_id, a_vectors, a_idx, a_results):
l_url = a_host
l_v = a_vectors[a_idx]
l_headers = {'x-ec-scopes-id': str(a_id)}
l_body = ''
if 'uri' in l_v:
l_url = '%s/%s'%(a_host, l_v['uri'])
if 'query_string' in l_v:
l_url += '?'
l_url += l_v['query_string']
if 'headers' in l_v and len(l_v['headers']):
l_headers.update(l_v['headers'])
if 'body' in l_v:
l_body = base64.b64decode(l_v['body'])
else:
l_body = l_body.encode()
l_r = requests.get(l_url, headers = l_headers)
l_code = l_r.status_code
if l_code >= 200 and l_code < 300:
if '200' in a_results:
a_results['200'] += 1
else:
a_results['200'] = 1
if l_code >= 300 and l_code < 400:
if '300' in a_results:
a_results['300'] += 1
else:
a_results['300'] = 1
if l_code >= 400 and l_code < 500:
if '400' in a_results:
a_results['400'] += 1
else:
a_results['400'] = 1
if l_code >= 500 and l_code < 600:
if '500' in a_results:
a_results['500'] += 1
else:
a_results['500'] = 1
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def post_config(a_host, a_template, a_type, a_idx):
if isinstance(a_template, list):
for l_instance in a_template:
if 'last_modified_date' in l_instance:
l_instance['last_modified_date'] = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
if 'name' in a_template:
l_instance['name'] = str(a_idx);
else:
if "last_modified_date" in a_template:
a_template['last_modified_date'] = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
if 'name' in a_template:
a_template['name'] = str(a_idx);
l_headers = {}
l_headers['Content-type'] = 'application/json'
l_url = '%s/update_%s'%(a_host, a_type)
l_body = json.dumps(a_template)
# ------------------------------------------------------
# POST
# ------------------------------------------------------
try:
l_rq = Request(l_url, l_body.encode(), l_headers)
l_r = urlopen(l_rq, timeout=20.0)
except Exception as l_e:
print('error: performing POST to %s. Exception: %s' % (l_url, l_e))
sys.exit(1)
l_body = l_r.read().decode()
if l_r.getcode() != 200:
print('error: performing POST to %s -status: %d. Response: %s' % (l_url, l_r.getcode(), l_body))
sys.exit(1)
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def scopez_server_stress(a_verbose,
a_port,
a_template,
a_type,
a_vector_file,
a_num_ids):
global g_run
l_host = 'http://127.0.0.1:%d'%(a_port)
# ------------------------------------------------------
# read template
# ------------------------------------------------------
l_template = []
try:
with open(a_template) as l_f:
l_template = json.load(l_f)
except Exception as l_e:
print('error opening template file: %s. Reason: %s error: %s, doc: %s' % (
a_template, type(l_e), l_e, l_e.__doc__))
sys.exit(-1)
l_time_ms_last = time.time()*1000
i_c = 0
while g_run:
i_c += 1
post_config(l_host, l_template, a_type, i_c)
l_time_ms_cur = time.time()*1000
if l_time_ms_cur > (l_time_ms_last + 100):
l_time_ms_last = time.time()*1000
l_time_ms_next = l_time_ms_last + 100
print('%6.2f done'%((((float(i_c))) / (a_num_ids)) *100.0))
if i_c == a_num_ids:
break
if not g_run:
return
print_banner()
# ------------------------------------------------------
# read vector file
# ------------------------------------------------------
l_vectors = []
try:
with open(a_vector_file) as l_f:
l_vectors = json.load(l_f)
except Exception as l_e:
print('error opening vector file: %s. Reason: %s error: %s, doc: %s' % (
a_vector_file, type(l_e), l_e, l_e.__doc__))
sys.exit(-1)
# print(l_vectors)
# ------------------------------------------------------
# setup
# ------------------------------------------------------
l_v_size = len(l_vectors)
l_v_idx = 0
l_time_ms_last = time.time()*1000
l_num_reqs = 0
l_num_reqs_total = 0
l_num_confs = 0
l_num_confs_total = 0
l_results = {}
# ------------------------------------------------------
# run...
# ------------------------------------------------------
while g_run:
l_id = random.randint(1, a_num_ids)
get_rqst(l_host, l_id, l_vectors, l_v_idx, l_results)
l_v_idx += 1
if l_v_idx >= l_v_size:
l_v_idx = 0
l_num_reqs += 1
l_num_reqs_total += 1
if l_num_reqs_total % 100 == 0:
post_config(l_host, l_template, a_type, int(l_id))
l_num_confs += 1
l_num_confs_total += 1
l_time_ms_cur = time.time()*1000
if l_time_ms_cur > (l_time_ms_last + 100):
print_stats_line(l_time_ms_cur - l_time_ms_last, l_num_reqs, l_num_confs, l_results)
l_time_ms_last = time.time()*1000
l_time_ms_next = l_time_ms_last + 100
l_num_reqs = 0
l_num_confs = 0
l_results = {}
# ------------------------------------------------------
# done...
# ------------------------------------------------------
print('...shutting down...')
# ------------------------------------------------------------------------------
# main
# ------------------------------------------------------------------------------
def main(argv):
l_arg_parser = argparse.ArgumentParser(
description='scopez_server stress tester.',
usage='%(prog)s -t <any one template file(acl, rules, profile, scopes, limit) -a <template type> -x <request vector file>',
epilog='')
l_arg_parser.add_argument(
'-v',
'--verbose',
dest='verbose',
help='Verbosity.',
action='store_true',
default=False,
required=False)
l_arg_parser.add_argument(
'-t',
'--template',
dest='template',
help='acl/rules/profile/scopes template(REQUIRED).',
required=True)
l_arg_parser.add_argument(
'-a',
'--template_type',
dest='type',
help='type of template - should be acl, rules, scopes or profile',
required=True)
l_arg_parser.add_argument(
'-x',
'--vectors',
dest='vector_file',
help='request vector file.',
required=True)
l_arg_parser.add_argument(
'-p',
'--port',
dest='port',
help='scopez_server port (default: 12345).',
default=12345,
type=int,
required=False)
l_arg_parser.add_argument(
'-n',
'--num_ids',
dest='num_ids',
help='number of account id\'s to cycle through (default: 10).',
type=int,
default=1,
required=False)
l_args = l_arg_parser.parse_args()
signal.signal(signal.SIGINT, signal_handler)
scopez_server_stress(a_verbose=l_args.verbose,
a_port=l_args.port,
a_template=l_args.template,
a_type= l_args.type,
a_vector_file=l_args.vector_file,
a_num_ids=l_args.num_ids)
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
if __name__ == "__main__":
main(sys.argv[1:])
|
"""
Created on 13 Jul 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import optparse
# --------------------------------------------------------------------------------------------------------------------
class CmdDisplay(object):
"""unix command line handler"""
def __init__(self):
"""
Constructor
"""
self.__parser = optparse.OptionParser(usage="%prog [-u UDS] [-v]", version="%prog 1.0")
# optional...
self.__parser.add_option("--uds", "-u", type="string", nargs=1, action="store", dest="uds",
help="receive messages from Unix domain socket (instead of stdin)")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
@property
def uds(self):
return self.__opts.uds
@property
def verbose(self):
return self.__opts.verbose
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdDisplay:{uds:%s, verbose:%s}" % (self.uds, self.verbose)
|
from typing import Tuple, Optional, List
import warnings
import ctypes
import operator
from functools import reduce
import torch
import numpy as np
from dqc.hamilton.intor.lcintwrap import LibcintWrapper
from dqc.hamilton.intor.utils import np2ctypes, int2ctypes, CGTO, CPBC, \
c_null_ptr
from dqc.hamilton.intor.pbcintor import PBCIntOption, _check_and_set_pbc, \
_get_default_options, _get_default_kpts, \
_concat_atm_bas_env
from dqc.utils.types import get_complex_dtype
from dqc.utils.pbc import estimate_ovlp_rcut
from dqc.hamilton.intor.lattice import Lattice
from dqc.hamilton.intor.namemgr import IntorNameManager
__all__ = ["pbcft_int1e", "pbcft_overlap"]
# Fourier transform integrals
def pbcft_int1e(shortname: str, wrapper: LibcintWrapper,
other: Optional[LibcintWrapper] = None,
gvgrid: Optional[torch.Tensor] = None,
kpts: Optional[torch.Tensor] = None,
options: Optional[PBCIntOption] = None):
r"""
Performing the periodic boundary condition (PBC) on 1-electron Fourier
Transform integrals, i.e.
$$
\sum_\mathbf{T} e^{-i \mathbf{k}\cdot\mathbf{T}} \int \exp(-i\mathbf{G}\cdot\mathbf{r})
\phi_i(\mathbf{r}) \phi_j(\mathbf{r}-\mathbf{T})\ \mathrm{d}\mathbf{r}
$$
Arguments
---------
shortname: str
The shortname of the integral (i.e. without the prefix `int1e_` or else)
wrapper: LibcintWrapper
The environment wrapper containing the basis
other: Optional[LibcintWrapper]
Another environment wrapper containing the basis. This environment
must have the same complete environment as `wrapper` (e.g. `other` can be
a subset of `wrapper`). If unspecified, then `other = wrapper`.
gvgrid: Optional[torch.Tensor]
The reciprocal coordinate of $\mathbf{G}$ with shape `(nggrid, ndim)`.
If unspecified, then it is assumed to be all zeros.
kpts: Optional[torch.Tensor]
k-points where the integration is supposed to be performed. If specified,
it should have the shape of `(nkpts, ndim)`. Otherwise, it is assumed
to be all zeros.
options: Optional[PBCIntOption]
The integration options. If unspecified, then just use the default
value of `PBCIntOption`.
Returns
-------
torch.Tensor
A complex tensor representing the 1-electron integral with shape
`(nkpts, *ncomp, nwrapper, nother, nggrid)` where `ncomp` is the Cartesian
components of the integral, e.g. `"ipovlp"` integral will have 3
components each for x, y, and z.
"""
# check and set the default values
other1 = _check_and_set_pbc(wrapper, other)
options1 = _get_default_options(options)
kpts1 = _get_default_kpts(kpts, dtype=wrapper.dtype, device=wrapper.device)
gvgrid1 = _get_default_kpts(gvgrid, dtype=wrapper.dtype, device=wrapper.device)
assert isinstance(wrapper.lattice, Lattice) # check if wrapper has a lattice
return _PBCInt2cFTFunction.apply(
*wrapper.params,
*wrapper.lattice.params,
gvgrid1,
kpts1,
[wrapper, other1],
IntorNameManager("int1e", shortname), options1)
# shortcuts
def pbcft_overlap(wrapper: LibcintWrapper,
other: Optional[LibcintWrapper] = None,
gvgrid: Optional[torch.Tensor] = None,
kpts: Optional[torch.Tensor] = None,
options: Optional[PBCIntOption] = None):
return pbcft_int1e("ovlp", wrapper, other, gvgrid, kpts, options)
################# torch autograd function wrappers #################
class _PBCInt2cFTFunction(torch.autograd.Function):
# wrapper class for the periodic boundary condition 2-centre integrals
@staticmethod
def forward(ctx, # type: ignore
# basis params
allcoeffs: torch.Tensor, allalphas: torch.Tensor, allposs: torch.Tensor,
# lattice params
alattice: torch.Tensor,
# other parameters
gvgrid: torch.Tensor,
kpts: torch.Tensor,
# non-tensor parameters
wrappers: List[LibcintWrapper], int_nmgr: IntorNameManager,
options: PBCIntOption) -> torch.Tensor:
# allcoeffs: (ngauss_tot,)
# allalphas: (ngauss_tot,)
# allposs: (natom, ndim)
out_tensor = PBCFTIntor(int_nmgr, wrappers, gvgrid, kpts, options).calc()
ctx.save_for_backward(allcoeffs, allalphas, allposs, alattice, gvgrid, kpts)
ctx.other_info = (wrappers, int_nmgr, options)
return out_tensor
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[Optional[torch.Tensor], ...]: # type: ignore
raise NotImplementedError("gradients of PBC 2-centre FT integrals are not implemented")
################# integrator object (direct interface to lib*) #################
class PBCFTIntor(object):
def __init__(self, int_nmgr: IntorNameManager, wrappers: List[LibcintWrapper],
gvgrid_inp: torch.Tensor, kpts_inp: torch.Tensor, options: PBCIntOption):
# This is a class for once integration only
# I made a class for refactoring reason because the integrals share
# some parameters
# No gradients propagated in the methods of this class
assert len(wrappers) > 0
wrapper0 = wrappers[0]
kpts_inp_np = kpts_inp.detach().numpy() # (nk, ndim)
GvT = np.asarray(gvgrid_inp.detach().numpy().T, order="C") # (ng, ndim)
opname = int_nmgr.get_ft_intgl_name(wrapper0.spherical)
lattice = wrapper0.lattice
assert isinstance(lattice, Lattice)
# get the output's component shape
comp_shape = int_nmgr.get_intgl_components_shape()
ncomp = reduce(operator.mul, comp_shape, 1)
# estimate the rcut and the lattice translation vectors
coeffs, alphas, _ = wrapper0.params
rcut = estimate_ovlp_rcut(options.precision, coeffs, alphas)
ls = np.asarray(lattice.get_lattice_ls(rcut=rcut))
self.int_type = int_nmgr.int_type
self.wrappers = wrappers
self.GvT = GvT
self.kpts_inp_np = kpts_inp_np
self.opname = opname
self.dtype = wrapper0.dtype
self.device = wrapper0.device
self.comp_shape = comp_shape
self.ncomp = ncomp
self.ls = ls
self.options = options
# this class is meant to be used once
self.integral_done = False
def calc(self) -> torch.Tensor:
assert not self.integral_done
self.integral_done = True
if self.int_type == "int1e":
return self._int2c()
else:
raise ValueError("Unknown integral type: %s" % self.int_type)
def _int2c(self) -> torch.Tensor:
# 2-centre integral
# this function works mostly in numpy
# no gradients propagated in this function (and it's OK)
# this function mostly replicate the `ft_aopair_kpts` function in pyscf
# https://github.com/pyscf/pyscf/blob/master/pyscf/pbc/df/ft_ao.py
# https://github.com/pyscf/pyscf/blob/c9aa2be600d75a97410c3203abf35046af8ca615/pyscf/pbc/df/ft_ao.py#L52
assert len(self.wrappers) == 2
# if the ls is too big, it might produce segfault
if (self.ls.shape[0] > 1e6):
warnings.warn("The number of neighbors in the integral is too many, "
"it might causes segfault")
# libpbc will do in-place shift of the basis of one of the wrappers, so
# we need to make a concatenated copy of the wrapper's atm_bas_env
atm, bas, env, ao_loc = _concat_atm_bas_env(self.wrappers[0], self.wrappers[1])
i0, i1 = self.wrappers[0].shell_idxs
j0, j1 = self.wrappers[1].shell_idxs
nshls0 = len(self.wrappers[0].parent)
shls_slice = (i0, i1, j0 + nshls0, j1 + nshls0)
# get the lattice translation vectors and the exponential factors
expkl = np.asarray(np.exp(1j * np.dot(self.kpts_inp_np, self.ls.T)), order='C')
# prepare the output
nGv = self.GvT.shape[-1]
nkpts = len(self.kpts_inp_np)
outshape = (nkpts,) + self.comp_shape + tuple(w.nao() for w in self.wrappers) + (nGv,)
out = np.empty(outshape, dtype=np.complex128)
# do the integration
cintor = getattr(CGTO(), self.opname)
eval_gz = CPBC().GTO_Gv_general
fill = CPBC().PBC_ft_fill_ks1
drv = CPBC().PBC_ft_latsum_drv
p_gxyzT = c_null_ptr()
p_mesh = (ctypes.c_int * 3)(0, 0, 0)
p_b = (ctypes.c_double * 1)(0)
drv(cintor, eval_gz, fill,
np2ctypes(out), # ???
int2ctypes(nkpts),
int2ctypes(self.ncomp),
int2ctypes(len(self.ls)),
np2ctypes(self.ls),
np2ctypes(expkl),
(ctypes.c_int * len(shls_slice))(*shls_slice),
np2ctypes(ao_loc),
np2ctypes(self.GvT),
p_b, p_gxyzT, p_mesh,
int2ctypes(nGv),
np2ctypes(atm), int2ctypes(len(atm)),
np2ctypes(bas), int2ctypes(len(bas)),
np2ctypes(env))
out_tensor = torch.as_tensor(out, dtype=get_complex_dtype(self.dtype),
device=self.device)
return out_tensor
|
import olympe
import os
from olympe.messages.ardrone3.Piloting import TakeOff, moveBy, Landing
from olympe.messages.ardrone3.PilotingState import FlyingStateChanged
DRONE_IP = os.environ.get("DRONE_IP", "10.202.0.1")
def test_moveby2():
drone = olympe.Drone(DRONE_IP)
drone.connect()
assert drone(
TakeOff()
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait().success()
assert drone(
moveBy(10, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait().success()
assert drone(Landing()).wait().success()
drone.disconnect()
if __name__ == "__main__":
test_moveby2()
|
# -*- coding: utf-8 -*-
"""
This module defines the parser of the QL language. It uses the PLY library to
take a string and return instead a stream of tokens. PLY is often used as a
single script but in this case we use a class for better encapsulation and code
organisation.
"""
from ply import lex
from ply.lex import TOKEN
class QLLexer(object):
"""The QLLexer parser shaped as a class"""
# Reserved words. The documentation
# (http://www.dabeaz.com/ply/ply.html#ply_nn6) advises to store them
# separately in order to substitute common string for reserved words.
reserved = {
'boolean': 'BOOLEAN',
'decimal': 'DECIMAL',
'else': 'ELSE',
'false': 'FALSE',
'form': 'FORM',
'if': 'IF',
'string': 'STRING',
'true': 'TRUE'
}
# List of tokens used in the lexer.
tokens = (
'LBRACK', # {
'RBRACK', # }
'LPAREN', # (
'RPAREN', # )
'COLON', # :
'AND', # &&
'OR', # ||
'NOT', # !
'LT', # <
'GT', # >
'LET', # <=
'GET', # >=
'EQ', # ==
'NEQ', # !=
'PLUS', # +
'MINUS', # -
'MULT', # *
'DIV', # /
'ASSIGN', # =
'ID', # anyString
'STR' # "A question?"
) + tuple(reserved.values()) # Added also the reserved words.
# Reggular expressions that define the tokens.
t_LBRACK = r'\{'
t_RBRACK = r'\}'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COLON = r'\:'
t_AND = r'&&'
t_OR = r'\|\|'
t_LT = r'<'
t_GT = r'>'
t_LET = r'<='
t_GET = r'>='
t_EQ = r'=='
t_NEQ = r'!='
t_PLUS = r'\+'
t_MINUS = r'-'
t_MULT = r'\*'
t_DIV = r'/'
t_ASSIGN = r'='
t_NOT = r'!'
t_STR = r'\".+\"'
# Regular expressions for tokens that require special interactions.
ID = r'[a-zA-Z_][a-zA-Z_0-9]*'
COMMENT = r'\/\/.*'
NEW_LINE = r'\n+'
WHITESPACE = r'\s+'
DECIMAL = r'\d+(.\d+)?'
@TOKEN(ID)
def t_ID(self, t):
"""
Identifies the ID tokens. It also identifies all the reserved words by
looking them up in a dictionary and substituting the ID type for the
reserved word.
"""
t.type = self.reserved.get(t.value, 'ID') # Check for reserved words
return t
@TOKEN(COMMENT)
def t_COMMENT(self, t):
"""Comments are ignored"""
pass
@TOKEN(WHITESPACE)
def t_WHITESPACE(self, t):
"""White spaces are ignored"""
pass
@TOKEN(NEW_LINE)
def t_newline(self, t):
"""Tracks the line number for error reporting purposes."""
t.lexer.lineno += len(t.value)
@TOKEN(DECIMAL)
def t_DECIMAL(self, t):
"""We make sure the value returned is an float"""
t.value = float(t.value)
return t
def t_error(self, t):
"""
Error handler. It logs when a character is not recognised by any token
of the lexer.
"""
msg = 'Illegal character {}'.format(t.value[0])
self.checker.register_error(t.lineno, msg)
t.lexer.skip(1)
def __init__(self, checker):
"""Initialises the lexer. It complies with PLY requirements."""
self.checker = checker
self.lexer = lex.lex(module=self, debug=0)
|
from datetime import datetime
import pytest
from sqlalchemy.orm import Session
from itunesdb.web import crud
from itunesdb.web import models
from itunesdb.web import schemas
@pytest.fixture
def track_1_ambient_1(db: Session, album_ambient_1: models.Album) -> models.Track:
return crud.create_track(
schemas.TrackCreate(
size=34687093,
total_time=1041520,
disc_number=1,
disc_count=1,
track_number=1,
track_count=4,
year=1978,
play_count=10,
persistent_id="737B36D75E4131F0",
name="1/1",
artist="Brian Eno",
album_id=album_ambient_1.id,
date_added=datetime.now(),
),
db,
)
@pytest.fixture
def track_2_ambient_1(db: Session, album_ambient_1: models.Album) -> models.Track:
return crud.create_track(
schemas.TrackCreate(
size=19405063,
total_time=534213,
disc_number=1,
disc_count=1,
track_number=2,
track_count=4,
year=1978,
play_count=10,
persistent_id="D4F23EFFB06007FD",
name="2/1",
artist="Brian Eno",
album_id=album_ambient_1.id,
date_added=datetime.now(),
),
db,
)
@pytest.fixture
def track_1_ambient_2(db: Session, album_ambient_2: models.Album) -> models.Track:
return crud.create_track(
schemas.TrackCreate(
size=15480610,
total_time=426533,
disc_number=1,
disc_count=1,
track_number=1,
track_count=10,
year=1980,
play_count=9,
persistent_id="9F2A4665120C2477",
name="First Light",
artist="Harold Budd / Brian Eno",
album_id=album_ambient_2.id,
date_added=datetime.now(),
),
db,
)
@pytest.fixture
def track_2_ambient_2(db: Session, album_ambient_2: models.Album) -> models.Track:
return crud.create_track(
schemas.TrackCreate(
size=3674249,
total_time=89813,
disc_number=1,
disc_count=1,
track_number=2,
track_count=10,
year=1980,
play_count=9,
persistent_id="113F9FC294A88D65",
name="Steal Away",
artist="Harold Budd / Brian Eno",
album_id=album_ambient_2.id,
date_added=datetime.now(),
),
db,
)
|
import os
import flask
import pytest
import uuid
from kaos_backend.controllers.tests import create_job_service, t_any, create_train_zip
from kaos_backend.controllers.train import TrainController
from kaos_backend.exceptions.exceptions import InvalidBundleError
from kaos_backend.util.tests import create_zip
from kaos_model.common import TrainJobListing, SubmissionInfo
def generate_submission_info():
return SubmissionInfo(
job_id=uuid.uuid4().hex,
state="aaa",
started="aa",
duration=1,
hyperopt="pp",
progress="It's ain't much, but it's honest progress"
)
def test_list_train(mocker):
job_service = create_job_service(mocker, workspaces=["pippo"])
submission_info_1 = generate_submission_info()
submission_info_2 = generate_submission_info()
submission_info_3 = generate_submission_info()
job_service.list_training_jobs = mocker.Mock(return_value=[submission_info_1])
job_service.list_build_train_jobs = mocker.Mock(return_value=[submission_info_2])
job_service.list_ingestion_jobs = mocker.Mock(return_value=[submission_info_3])
reference_listing = TrainJobListing(training=[submission_info_1],
building=[submission_info_2],
ingesting=[submission_info_3])
with flask.Flask("Test").app_context():
train_controller = TrainController(job_service)
print(train_controller.list_training_jobs("pippo"))
assert train_controller.list_training_jobs("pippo") == reference_listing
def test_invalid_submit_training(mocker):
service = create_job_service(mocker)
with pytest.raises(InvalidBundleError, match="Bundle is malformed"):
with flask.Flask("Test").app_context():
controller = TrainController(service)
controller.submit_training("test_workspace", "test_user", data_bytes=b'0x1', cpu=None, memory=None)
def test_submit_training_missing_root(mocker):
service = create_job_service(mocker)
data_bytes, _, temp_dir, zip_filename = create_zip()
with pytest.raises(InvalidBundleError, match="Missing root directory in source-code bundle"):
with flask.Flask("Test").app_context():
controller = TrainController(service)
controller.submit_training("test_workspace", "test_user", data_bytes=data_bytes, cpu=None, memory=None)
temp_dir.cleanup()
os.remove(zip_filename)
def test_submit_training(mocker):
service = create_job_service(mocker)
mocker.patch.object(service, 'submit_training_code')
data_bytes, temp_dir, zip_filename = create_train_zip()
service.submit_training_data = mocker.Mock()
with flask.Flask("Test").app_context():
controller = TrainController(service)
controller.submit_training("test_workspace", "test_user", data_bytes=data_bytes, cpu=None, memory=None)
service.submit_training_code.assert_called_with("test_workspace",
"test_user",
t_any(str),
gpu=0,
cpu=None,
memory=None)
temp_dir.cleanup()
os.remove(zip_filename)
|
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import json
import requests
class Segwit(callbacks.Plugin):
threaded = True
def segwit(self, irc, msg, args):
timeout = False
segwit_usage = '0.0%'
str_out = ''
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'
sess = requests.session()
sess.headers = headers
# Take Segwit data from transactionfee.info
url = 'https://transactionfee.info/static/data/paymentsSegwit.csv'
try:
response = sess.get(url, headers=headers, timeout=30)
except:
timeout = True
if timeout == False:
if response.status_code == 200:
# Segwit data comes in CSV format.
# Segwit usage for today is the last column of the last row.
csv_data = response.content
last_row = csv_data.split('\n')[-2]
segwit_usage = last_row.split(',')[-1]
irc.reply(segwit_usage, prefixNick=False)
else:
str_out = 'Error ' + str(response.status_code)
irc.reply(str_out, prefixNick=False)
else:
str_out = 'Connection timed out'
irc.reply(str_out, prefixNick=False)
Class = Segwit
# vim:set shiftwidth=4 tabstop=4 expandtab:
|
# Generated by Django 2.2.5 on 2019-09-04 00:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('announcement', '0004_comment'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='last_seen_city',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='announcements', to='location.City'),
),
]
|
import copy
import random
# Consider using the modules imported above.
class Hat:
def __init__(self, **kwargs):
self.contents = []
for k, v in kwargs.items():
for _ in range(v):
self.contents.append(k)
def draw(self, num):
if num > len(self.contents):
drawn_balls = [i for i in self.contents]
self.contents.clear()
return drawn_balls
idx_drawn_balls = random.sample(range(len(self.contents)), k=num)
drawn_balls = [self.contents[i] for i in idx_drawn_balls]
self.contents = [self.contents[i] for i in range(len(self.contents)) if not i in idx_drawn_balls]
return drawn_balls
def experiment(hat, expected_balls, num_balls_drawn, num_experiments):
success_cnt = 0
for _ in range(num_experiments):
copy_hat = copy.deepcopy(hat)
drawn_balls = copy_hat.draw(num_balls_drawn)
for color, number in expected_balls.items():
if drawn_balls.count(color) < number:
break
else:
success_cnt += 1
return success_cnt / num_experiments
|
import os
import sys
import copy
import torch
import platform
import numpy as np
import pandas as pd
from ray import tune
from tqdm import tqdm
from pathlib import Path
from torchvision import transforms
from torch.utils.data import DataLoader
from sklearn.utils.class_weight import compute_sample_weight
if platform.system() == 'Linux':
from accimage import Image
else:
from PIL import Image
class Dataset(torch.utils.data.Dataset):
def __init__(self, file_paths, labels,
transform=None):
self.file_paths = file_paths
self.labels = labels
self.transform = transform
assert len(file_paths) == len(labels)
if platform.system() == 'Linux':
self.load = load_acc
else:
self.load = load_PIL
def __len__(self):
return len(self.file_paths)
def __getitem__(self, index):
path_img = self.file_paths[index]
assert os.path.isfile(path_img), 'Image path does not exist: {}'.format(path_img)
image = self.load(path_img)
target = self.labels[index]
if self.transform is not None:
image = self.transform(image)
return image, target
def load_acc(path_img):
return Image(path_img)
def load_PIL(path_img):
return Image.open(path_img)
def predict_rna(config):
# Get paths
path_dfs_base = config['path_dfs_base']
path_data_base = config['path_data_base']
path_models_base = config['path_models_base']
# Make sure we are at correct subfolder level with model path
if os.listdir(path_models_base)[0] == 'train':
path_models_base = os.path.join(path_models_base, 'train')
path_df_tile = os.path.join(path_dfs_base, 'df_tile.pkl')
path_df_clusters = os.path.join(path_dfs_base, 'df_clusters.csv')
print('Loading dataframes...')
df_cluster_ids = pd.read_csv(path_df_clusters)
df_tile = pd.read_pickle(path_df_tile)
# Get genes
genes = df_cluster_ids['ensemble_id'].loc[df_cluster_ids['cluster_id'] == config['cluster_id']].values
# Drop non-cancer tiles, select validation or test data
if config['set'] == 'valid':
df_tile = df_tile.loc[~df_tile['Test'].astype(bool)].reset_index(drop=True)
df_tile = df_tile.loc[df_tile['CV'] == config['fold']].reset_index(drop=True)
elif config['set'] == 'test':
df_tile = df_tile.loc[df_tile['Test'].astype(bool)].reset_index(drop=True)
print('Final number of ids: ', len(set(df_tile['case_id'])))
# Set up transform for images with augmentations and normalisations
trans_valid = transforms.Compose([transforms.CenterCrop(config['img_shape']),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
# Create dataloader
labels = np.arange(len(df_tile))
filenames = [os.path.join(path_data_base, row['slide_filename'], row['tile_filename'])
for _, row in tqdm(df_tile.iterrows(), total=len(df_tile))]
dataset = Dataset(filenames, labels,
transform=trans_valid)
dataloader = DataLoader(dataset, batch_size=config['batch_size'],
sampler=None,
num_workers=config['n_workers'],
pin_memory=True)
df_out = df_tile.copy()
df_out = df_out.drop(labels='tile_filename', axis=1)
# Find model checkpoint path, load model
search_str = '*cluster_id={},fold={}*/**/*.pth'.format(config['cluster_id'], config['fold'])
device = 'cuda:0'
path_model = str(list(Path(path_models_base).rglob(search_str))[0])
model = torch.load(path_model, map_location=device)
model.eval()
# Collect all prediction in dataset
predictions = list()
labels = list()
for images, labels_mock in tqdm(dataloader):
images = images.to(device)
outputs = model(images)
outputs = outputs.detach().cpu().numpy()
predictions.append(copy.deepcopy(outputs))
labels.append(copy.deepcopy(labels_mock))
# Convert filenames from bytes to string, only take filename, not path
predictions = np.concatenate(predictions)
labels = np.concatenate(labels)
# Make sure that order is correct
assert (labels == np.arange(len(labels))).all()
# Collect predictions, scale with model train mean and variance
for idx_pred, gene in enumerate(genes):
df_out[gene] = predictions[:, idx_pred]*model.vars[idx_pred] + model.means[idx_pred]
# Save as .pkl file
path_df_preds = os.path.join(path_dfs_base,
'predictions_{}/df_pred_{}_{}.pkl'.format(config['set'],
config['cluster_id'],
config['fold']))
df_out.to_pickle(path_df_preds)
tune.report(complete=True)
|
from enum import Enum
import os
class Mode(Enum):
NONE = 0
DEBUG = 1
LOG = 2
class Level(Enum):
ERROR = 0
INFO = 1
TRACE = 2
class Config:
def __new__(cls):
if not hasattr(cls, 'instance') or not cls.instance:
cls.instance = super().__new__(cls)
cls.instance.__initialised = False
return cls.instance
def __init__(self):
if not self.__initialised:
self.__initialised = True
self.__log_path = "log"
self.__level = None
self.__mode = None
self.__rotate_log = True
def log_path(self):
env_var = os.environ.get("CVLOG_PATH")
if env_var is not None and env_var.strip():
return env_var
return self.__log_path
def log_time(self):
return self.__log_time
def rotate_log(self):
return self.__rotate_log
def set_curent_mode(self, mode):
self.__mode = self.__get_enum(mode, Mode)
def set_curent_level(self, mode):
self.__level = self.__get_enum(mode, Level)
def set_log_path(self, path):
self.__log_path = path
def set_rotate_log(self, is_rotate_log):
self.__log_path = is_rotate_log
def curent_mode(self):
return self.__osenv_or_else("CVLOG_MODE", Mode, self.__mode)
def curent_level(self):
return self.__osenv_or_else("CVLOG_LEVEL", Level, self.__level)
def __osenv_or_else(self, name, enum, current_value):
env_var = os.environ.get(name)
if env_var is not None and env_var.strip():
current_value = self.__get_enum(env_var, enum)
if current_value is None:
current_value = enum(0)
return current_value
def __get_enum(self, value, enum):
if type(value) == enum:
return value
elif type(value) == str and value in enum.__members__:
return enum[value]
raise Exception("Invalid " + value)
def set_mode(mode):
return Config().set_curent_mode(mode)
def set_level(level):
return Config().set_curent_level(level)
def set_path(path):
return Config().set_log_path(path)
def set_rotate_log(is_rotate_log: bool = True):
return Config().set_rotate_log(is_rotate_log)
|
import sys
import os
sys.path.insert(1, os.getcwd())
|
import cv2
import numpy as np
from aip import AipOcr
from PIL import Image, ImageDraw, ImageFont
import os, math
def crop_image(src_img, x_start, x_end, y_start, y_end):
"""
图片裁剪
:param src_img: 原始图片
:param x_start: x 起始坐标
:param x_end: x 结束坐标
:param y_start: y 开始坐标
:param y_end: y 结束坐标
:return:
"""
tmp_img = cv2.cvtColor(src_img, cv2.COLOR_BGR2RGB)
tmp_img = tmp_img[y_start:y_end, x_start:x_end] # 长,宽
return cv2.cvtColor(tmp_img, cv2.COLOR_RGB2BGR)
def adjust_lightness(src_img, lightness_value):
"""
:param src_img: 待调整亮度的图片
:param lightness_value: 亮度值
:return:
"""
height, width, channel = src_img.shape # 获取shape的数值,height和width、通道
# 新建全零图片数组src2,将height和width,类型设置为原图片的通道类型(色素全为零,输出为全黑图片)
src2 = np.zeros([height, width, channel], src_img.dtype)
# new_img = cv2.addWeighted(src_img, a, src2, 1 - a, lightnessValue) # 处理后的图片
new_img = cv2.addWeighted(src_img, 1, src2, 1, lightness_value) # 处理后的图片
return new_img
def add_watermark(src_img, water_text, position, color):
"""
添加水印
:param src_img: 原始图片
:param water_text: 水印文字
:param position: 水印位置
:param color: 水印文字颜色
:return:
"""
# 根据选择的位置,确定水印的起始位置
height, width, channel = src_img.shape
x_padding, y_padding = width * 0.05, height * 0.05 # 与边缘的间距
scale = min((width / 1000), (height / 1000)) # 按照图片的长宽大小对字体进行一个放大,scale 即为放大倍数
font_size = 20 + int(scale) * 5 # 根据 scale 增加字体的大小,从而使得字体大小适应图片的大小
font_path = "{0}/ui/font.ttf".format(os.getcwd())
font = ImageFont.truetype(font_path, font_size, encoding="utf-8") # 获取自定义的字体
(text_width, text_height) = font.getsize(water_text)
x_start, y_start = 0, 0 # 水印文字的左下角坐标
if position == "左上角":
x_start = x_padding
y_start = y_padding
elif position == "右上角":
x_start = width - text_width - x_padding
y_start = y_padding
elif position == "中间":
x_start = (width - text_width) / 2
y_start = (height - text_height) / 2
elif position == "左下角":
x_start = x_padding
y_start = height - y_padding - text_height
elif position == "右下角":
x_start = width - text_width - x_padding
y_start = height - y_padding - text_height
img_pil = Image.fromarray(cv2.cvtColor(src_img, cv2.COLOR_BGR2RGB)) # 将 OpenCV 的 BGR 色彩转换成 PIL 需要的 RGB 色彩
draw = ImageDraw.Draw(img_pil)
draw.text((x_start, y_start), water_text, color, font=font)
return cv2.cvtColor(np.asarray(img_pil), cv2.COLOR_RGB2BGR) # 将 PIL 的 RGB 色彩转换成 OpenCV 的 BGR 色彩
def gaussian_blur(src_img, x_start, x_end, y_start, y_end, ksize, sigmaX):
"""
高斯模糊
"""
blur = src_img[y_start:y_end, x_start:x_end]
blur = cv2.GaussianBlur(blur, ksize, sigmaX)
src_img[y_start:y_end, x_start:x_end] = blur
return src_img
def compress_img(src_img, size):
"""
调整图片到指定大小
"""
return cv2.resize(src_img, size, interpolation=cv2.INTER_AREA)
def img_stitching(images):
"""
图片拼接
"""
stitcher = cv2.Stitcher_create()
status, stitch_img = stitcher.stitch(images)
if status != cv2.Stitcher_OK:
print(f"合拼图片失败,status = {status}")
return stitch_img
def img_encoding(image, dir_path):
"""
图片加密
:return:
"""
height, width, channel = image.shape
# 随机创建密钥文件
img_key = np.random.randint(0, 256, size=[height, width, channel], dtype=np.uint8)
# 保存密钥
np.save(dir_path + "/" + "img_key2", img_key)
# 返回加密后的图片
return cv2.bitwise_xor(image, img_key)
def img_decoding(image, key_file_path):
"""
图片解密
"""
img_key = np.load(key_file_path)
return cv2.bitwise_xor(image, img_key)
def img_ocr(image):
"""
OCR 文字识别
"""
APP_ID = '你的 App ID'
API_KEY = '你的 Api Key'
SECRET_KEY = '你的 Secret Key'
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
text = client.basicGeneral(image)
words_result = text["words_result"]
result_str = "" # 存储最终的结果
for w in words_result:
result_str = result_str + w["words"] + "\n"
return result_str
# 滤镜效果
def black_white_filter(src_img):
"""
黑白滤镜
"""
return cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY) # 直接将图片转换为灰度图片即可
def sketch_filter(src_img):
"""
素描滤镜
"""
# 图像灰度处理
gray_img = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)
# 高斯滤波降噪
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
# Canny算子
canny = cv2.Canny(gaussian, 50, 150)
# 阈值化处理
ret, result = cv2.threshold(canny, 100, 255, cv2.THRESH_BINARY_INV)
return result
def embossment_filter(src_img):
"""
浮雕滤镜
"""
# 获取图像行和列
height, width = src_img.shape[:2]
# 图像灰度处理
gray_img = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)
result = np.zeros(gray_img.shape, np.uint8)
for w in range(0, width - 1):
new_value = np.int32(gray_img[:, w]) - np.int32(gray_img[:, w + 1]) + 120
new_value[new_value > 255] = 255
new_value[new_value < 0] = 0
result[:, w] = new_value
return result
def reminiscence_filter(src_img):
"""
怀旧滤镜
"""
# 图像怀旧特效
B = 0.272 * src_img[:, :, 2] + 0.534 * src_img[:, :, 1] + 0.131 * src_img[:, :, 0]
G = 0.349 * src_img[:, :, 2] + 0.686 * src_img[:, :, 1] + 0.168 * src_img[:, :, 0]
R = 0.393 * src_img[:, :, 2] + 0.769 * src_img[:, :, 1] + 0.189 * src_img[:, :, 0]
# 像素值大于 255 的,则直接赋值为 255
B[B > 255] = 255
G[G > 255] = 255
R[R > 255] = 255
filter_result = np.dstack((B, G, R)) # 加了滤镜效果后的图片
return np.uint8(filter_result) # 将像素值从 numpy.float64 类型转换成 np.uint8 类型,从而可以正常显示
# for i in range(rows):
# for j in range(cols):
# B = 0.272 * src_img[i, j][2] + 0.534 * src_img[i, j][1] + 0.131 * src_img[i, j][0]
# G = 0.349 * src_img[i, j][2] + 0.686 * src_img[i, j][1] + 0.168 * src_img[i, j][0]
# R = 0.393 * src_img[i, j][2] + 0.769 * src_img[i, j][1] + 0.189 * src_img[i, j][0]
# if B > 255:
# B = 255
# if G > 255:
# G = 255
# if R > 255:
# R = 255
# dst[i, j] = np.uint8((B, G, R))
# return dst
def add_filter(src_img, filter_type):
"""
为图片添加滤镜效果
:param src_img: 原始图片
:param filter_type: 滤镜类型
"""
if filter_type == "黑白":
return black_white_filter(src_img)
elif filter_type == "素描":
return sketch_filter(src_img)
elif filter_type == "浮雕":
return embossment_filter(src_img)
elif filter_type == "怀旧":
return reminiscence_filter(src_img)
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for slice_sampler_utils.py and slice_sampler_kernel.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
class SliceSamplerTest(tf.test.TestCase):
def testOneDimNormal(self):
"""Sampling from the Standard Normal Distribution."""
dtype = np.float32
with self.test_session() as sess:
target = tfd.Normal(loc=dtype(0), scale=dtype(1))
samples, _ = tfp.mcmc.sample_chain(
num_results=500,
current_state=dtype(1),
kernel=tfp.mcmc.SliceSampler(
target.log_prob,
step_size=1.0,
max_doublings=5,
seed=1234),
num_burnin_steps=500,
parallel_iterations=1) # For determinism.
sample_mean = tf.reduce_mean(samples, axis=0)
sample_std = tf.sqrt(
tf.reduce_mean(tf.squared_difference(samples, sample_mean),
axis=0))
[sample_mean, sample_std] = sess.run([sample_mean, sample_std])
self.assertAllClose(0., b=sample_mean, atol=0.1, rtol=0.1)
self.assertAllClose(1., b=sample_std, atol=0.1, rtol=0.1)
def testTwoDimNormal(self):
"""Sampling from a 2-D Multivariate Normal distribution."""
dtype = np.float32
true_mean = dtype([0, 0])
true_cov = dtype([[1, 0.5], [0.5, 1]])
num_results = 200
num_chains = 75
with self.test_session() as sess:
# Target distribution is defined through the Cholesky decomposition.
chol = tf.linalg.cholesky(true_cov)
target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol)
# Assume that the state is passed as a list of 1-d tensors `x` and `y`.
# Then the target log-density is defined as follows:
def target_log_prob(x, y):
# Stack the input tensors together
z = tf.stack([x, y], axis=-1) - true_mean
return target.log_prob(z)
# Initial state of the chain
init_state = [np.ones([num_chains, 1], dtype=dtype),
np.ones([num_chains, 1], dtype=dtype)]
# Run Slice Samper for `num_results` iterations for `num_chains`
# independent chains:
[x, y], _ = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=init_state,
kernel=tfp.mcmc.SliceSampler(
target_log_prob_fn=target_log_prob,
step_size=1.0,
max_doublings=5,
seed=47),
num_burnin_steps=200,
num_steps_between_results=1,
parallel_iterations=1)
states = tf.stack([x, y], axis=-1)
sample_mean = tf.reduce_mean(states, axis=[0, 1])
z = states - sample_mean
sample_cov = tf.reduce_mean(tf.matmul(z, z, transpose_a=True),
axis=[0, 1])
[sample_mean, sample_cov] = sess.run([
sample_mean, sample_cov])
self.assertAllClose(true_mean, b=np.squeeze(sample_mean),
atol=0.1, rtol=0.1)
self.assertAllClose(true_cov, b=np.squeeze(sample_cov), atol=0.1, rtol=0.1)
def testFourDimNormal(self):
"""Sampling from a 4-D Multivariate Normal distribution."""
dtype = np.float32
true_mean = dtype([0, 4, -8, 2])
true_cov = dtype([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
num_results = 25
num_chains = 500
with self.test_session() as sess:
# Target distribution is defined through the Cholesky decomposition
chol = tf.linalg.cholesky(true_cov)
target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol)
# Initial state of the chain
init_state = [np.ones([num_chains, 4], dtype=dtype)]
# Run Slice Samper for `num_results` iterations for `num_chains`
# independent chains:
states, _ = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=init_state,
kernel=tfp.mcmc.SliceSampler(
target_log_prob_fn=target.log_prob,
step_size=1.0,
max_doublings=5,
seed=47),
num_burnin_steps=300,
num_steps_between_results=1,
parallel_iterations=1)
result = states[0]
sample_mean = tf.reduce_mean(result, axis=[0, 1])
deviation = tf.reshape(result - sample_mean, shape=[-1, 4])
sample_cov = tf.matmul(deviation, b=deviation, transpose_a=True)
sample_cov /= tf.to_float(tf.shape(deviation)[0])
sample_mean_err = sample_mean - true_mean
sample_cov_err = sample_cov - true_cov
[sample_mean_err, sample_cov_err] = sess.run([sample_mean_err,
sample_cov_err])
self.assertAllClose(np.zeros_like(sample_mean_err), b=sample_mean_err,
atol=0.1, rtol=0.1)
self.assertAllClose(np.zeros_like(sample_cov_err), b=sample_cov_err,
atol=0.1, rtol=0.1)
if __name__ == '__main__':
tf.test.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable-msg=no-member
"""
elmo_config.py is a module for elmo model config
"""
import torch
__author__ = "Ehsan Tavan"
__project__ = "Persian Emoji Prediction"
__credits__ = ["Ehsan Tavan"]
__license__ = "Public Domain"
__version__ = "1.0.0"
__maintainer__ = "Ehsan Tavan"
__email__ = "tavan.ehsan@gmail.com"
__status__ = "Production"
__date__ = "01/08/2021"
RAW_NO_MENTION_DATA_PATH = "../data/Raw/first_categori_no_mention_tweets.csv"
RAW_DATA_PATH = "../data/Raw/first_categori_tweets.csv"
TRAIN_NORMAL_NO_MENTION_DATA_PATH = "../data/Processed/" \
"train_first_categori_no_mention_tweets_normal.csv"
TEST_NORMAL_NO_MENTION_DATA_PATH = "../data/Processed/" \
"test_first_categori_no_mention_tweets_normal.csv"
TRAIN_NORMAL_DATA_PATH = "../data/Processed/" \
"train_first_categori_tweets_normal_5.csv"
TEST_NORMAL_DATA_PATH = "../data/Processed/" \
"train_first_categori_tweets_normal_5.csv"
VALIDATION_NORMAL_DATA_PATH = "../data/Processed/" \
"train_first_categori_tweets_normal_5.csv"
GLOVE_NEWS_300D = "../data/Embeddings/news_glove_300d_e10.txt"
SKIPGRAM_NEWS_300D = "../data/Embeddings/skipgram_news_300d_30e.txt"
CBOW_NEWS_300D = "../data/Embeddings/cbow_news_300d_30e.txt"
EMOTION_EMBEDDING_PATH = "../data/Embeddings/word_emotion_dict.pkl"
ELMO_MODEL_PATH = "../models/elmo/"
LOSS_CURVE_PATH = "../models/tmp/Curves/loss_curve.png"
ACC_CURVE_PATH = "../models/tmp/Curves/accuracy_curve.png"
TEXT_FIELD_PATH = "../models/tmp/Fields/text_field"
LABEL_FIELD_PATH = "../models/tmp/Fields/label_field"
LOG_PATH = "../models/tmp/Logs/log.txt"
TEST_AUG_LOG_PATH = "../models/tmp/Logs/test_aug_log.txt"
MODEL_PATH = "../models/tmp/"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
N_EPOCHS = 20
BATCH_SIZE = 8
ELMO_OUTPUT_DIM = 1024
LSTM_HIDDEN_DIM = 128
LSTM_LAYERS = 2
DROPOUT = 0.2
BIDIRECTIONAL = True
ADDING_NOISE = False
LR_DECAY = False
TRAIN_AUGMENTATION = False
TEST_AUGMENTATION = False
USE_EMOTION = False
|
class Solution:
def dfs(self, root):
if not root:
return 0
return max(self.dfs(root.left),self.dfs(root.right)) + 1
def XXX(self, root: TreeNode) -> bool:
if not root:
return True
if abs(self.dfs(root.left) - self.dfs(root.right)) <= 1:
return self.XXX(root.left) and self.XXX(root.right)
else:
return False
|
import altair as alt
from vega_datasets import data
from rpcjs import Dashboard, Page, set_attribute
import rpcjs.elements as html
import rpcjs.binded as forms
source = data.cars()
columns = list(source.columns)
class MyDynamicPage(Page):
def routes(self):
return '/'
def __init__(self):
self.title = 'MyDynamicPage'
self.xlabel = None
self.ylabel = None
def make_form(self):
"""Make a simple form so the user can input the x and y axis"""
self.xlabel, xlabel_html = forms.select_dropdown(columns, callback=self.make_graph)
self.ylabel, ylabel_html = forms.select_dropdown(columns, callback=self.make_graph)
form = html.div(
html.div(
html.header('X axis', level=5),
xlabel_html),
html.div(
html.header('Y axis', level=5),
ylabel_html))
return form
def make_graph(self):
"""Generate the graph when all the inputs are ready"""
xlabel = self.xlabel.get()
ylabel = self.ylabel.get()
if xlabel is None or ylabel is None:
return
chart = alt.Chart(source).mark_circle().encode(
alt.X(xlabel, type='quantitative'),
alt.Y(ylabel, type='quantitative'),
color='Origin:N'
).properties(
width=500,
height=500
).interactive()
# send our graph back to the page
set_attribute('graph_id', 'srcdoc', html.altair_plot(chart, with_iframe=False))
def main(self):
return html.div(
self.make_form(),
# where our graph will be populated
html.iframe("", id='graph_id'))
if __name__ == '__main__':
# go to http://127.0.0.1:5000/
with Dashboard(__name__) as dash:
dash.add_page(MyDynamicPage())
dash.run()
|
from django.urls import path, include
from .views import dashboard, register_user, edit_user_details, ProfileList, ProfileDetail, start_following
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
urlpatterns = [
## USER MANAGED LOGIN.
# path("login/", account_login, name="login")
# path("login/", auth_views.LoginView.as_view(), name="login"),
# path("logout/", auth_views.LogoutView.as_view(), name="logout"),
# path("password_change/", auth_views.PasswordChangeView.as_view(), name="password_change"),
# path("password_change/done/", auth_views.PasswordChangeDoneView.as_view(), name="password_change_done"),
# path("password_reset/", auth_views.PasswordResetView.as_view(), name="password_reset"),
# path("password_reset/done/", auth_views.PasswordResetDoneView.as_view(), name="password_reset_done"),
# path("reset/<uidb64>/<token>", auth_views.PasswordResetConfirmView.as_view(), name="password_reset_confirm"),
# path("reset/done/", auth_views.PasswordResetCompleteView.as_view(), name="password_reset_complete"),
path("", include("django.contrib.auth.urls")),
path("register/", register_user, name="register"),
path("edit/",edit_user_details, name="edit_user_details"),
path("", dashboard, name="dashboard"),
path("profiles/", login_required(ProfileList.as_view()), name="profiles"),
path("profiles/<pk>/", login_required(ProfileDetail.as_view()), name="profile_details"),
path("start_following/", start_following, name="start_following"),
]
|
import mock
import mopidy_funkwhale
from mopidy_funkwhale import models
from mopidy_funkwhale.client import (convert_uri,
favorites_playlist)
from tests import factories
@mock.patch('mopidy_funkwhale.client.translator')
def test_client_convert_uri_uri(translator):
@convert_uri
def fn(uri=None):
return uri
fn(uri='test')
translator.get_id.assert_called_once_with('test')
@mock.patch('mopidy_funkwhale.client.translator')
def test_client_convert_uri_uris(translator):
@convert_uri
def fn(uris=None):
return uris
fn(uris=['test', 'test2'])
translator.get_id.assert_has_calls([mock.call('test'), mock.call('test2')])
def test_client_get_playlists_refs(client):
playlist = factories.PlaylistJSONFactory()
client.api.get_playlists = lambda: [playlist, favorites_playlist]
client.api.load_all = lambda _: [playlist]
actual = client.get_playlists_refs()
assert actual == [models.playlist_ref(playlist),
models.playlist_ref(favorites_playlist)]
def test_client_get_playlist_ref(client):
playlist = factories.PlaylistJSONFactory()
client.convert_uri = lambda: ''
client.api.get_playlist = lambda _: playlist
actual = client.get_playlist_ref(uri='')
assert actual == models.playlist_ref(playlist)
def test_client_get_playlist(client):
playlist = factories.PlaylistJSONFactory()
tracks = [{
'id': t['id'],
'track': t
} for t in [factories.TrackJSONFactory() for _ in
range(playlist['tracks_count'])]
]
client.convert_uri = lambda: ''
client.api.get_playlist = lambda _: playlist
client.api.get_playlist_tracks = lambda _: tracks
actual = client.get_playlist(uri='')
assert actual == models.playlist(playlist, tracks)
def test_client_get_playlist_items_ref(client):
track = factories.TrackJSONFactory()
client.convert_uri = lambda: ''
client.api.get_playlist_tracks = lambda _: [track]
actual = client.get_playlist_items_refs(uri='')
assert actual == [models.track_ref(track)]
def test_client_get_track(client):
track = factories.TrackJSONFactory()
client.convert_uri = lambda: ''
client.api.get_track = lambda _: track
actual = client.get_track(uri='')
assert actual == models.track(track)
def test_client_get_tracks_list(client):
track = factories.TrackJSONFactory()
client.convert_uri = lambda: ''
client.api.get_track = lambda _: track
actual = client.get_tracks_list(uris=[''])
assert actual == [models.track(track)]
def test_client_get_playback(client):
client.convert_uri = lambda: ''
client.api.get_playback = lambda _: 'uri'
actual = client.get_playback(uri='uri')
assert actual == 'uri'
def test_client_save_playlist_add_track(client, requests_mock):
playlist = factories.PlaylistFactory()
playlist_id = int(playlist.uri)
json_tracklist = [{
'id': t['id'],
'track': t
} for t in [factories.TrackJSONFactory() for _ in
range(0, 10)]
]
new_track = mopidy_funkwhale.models.track(factories.TrackJSONFactory())
# mopidy models are immutable, so we need to make a new playlist
tracklist = tuple(mopidy_funkwhale.models.track(json['track'])
for json in json_tracklist)
local_playlist = models.Playlist(
uri=mopidy_funkwhale.translator.get_playlist_uri(playlist_id),
name=playlist.name,
tracks=tracklist + (new_track, ),
last_modified=playlist.last_modified)
add_mock = mock.Mock()
del_mock = mock.Mock()
favorite_mock = mock.Mock()
unfavorite_mock = mock.Mock()
client.api.add_track_to_playlist = add_mock
client.api.remove_track_from_playlist = del_mock
client.api.favorite_track = favorite_mock
client.api.unfavorite_track = unfavorite_mock
client.get_playlist = lambda uri=None: local_playlist
client.api.get_playlist_tracks = lambda _: json_tracklist
actual = client.save_playlist(local_playlist)
assert actual == local_playlist
add_mock.assert_called_with(str(playlist_id),
mopidy_funkwhale.translator.get_id(
new_track.uri))
del_mock.assert_not_called()
favorite_mock.assert_not_called()
unfavorite_mock.assert_not_called()
def test_client_save_playlist_remove_track(client, requests_mock):
playlist = factories.PlaylistFactory()
playlist_id = int(playlist.uri)
json_tracklist = [{
'id': t['id'],
'track': t
} for t in [factories.TrackJSONFactory() for _ in
range(0, 10)]
]
track_to_del = json_tracklist.pop(-1)
tracklist = tuple(mopidy_funkwhale.models.track(json['track'])
for json in json_tracklist)
local_playlist = models.Playlist(
uri=mopidy_funkwhale.translator.get_playlist_uri(playlist_id),
name=playlist.name,
tracks=tracklist,
last_modified=playlist.last_modified)
add_mock = mock.Mock()
del_mock = mock.Mock()
favorite_mock = mock.Mock()
unfavorite_mock = mock.Mock()
client.api.add_track_to_playlist = add_mock
client.api.remove_track_from_playlist = del_mock
client.api.favorite_track = favorite_mock
client.api.unfavorite_track = unfavorite_mock
client.get_playlist = lambda uri=None: local_playlist
client.api.get_playlist_tracks = lambda _: json_tracklist + [track_to_del]
actual = client.save_playlist(local_playlist)
assert actual == local_playlist
add_mock.assert_not_called()
del_mock.assert_called_with(str(playlist_id), str(track_to_del['id']))
favorite_mock.assert_not_called()
unfavorite_mock.assert_not_called()
|
"""Asyncio S3 file operations."""
|
from model.group import Group
from random import randrange
from model.contact import Contact
def test_add_contact_in_group(app, db):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
if len(db.get_contact_list()) == 0:
app.contact.add_new(Contact(firstname="random", home_number="87326352378", work="73652363281",
mobile="328744653263", phone2="87439847362"))
old_contact_in_db = db.count_contact_in_group()
if len(app.contact.get_contact_list_in_none_group()) == 0:
app.contact.add_new(Contact(firstname="random", home_number="87326352378", work="73652363281",
mobile="328744653263", phone2="87439847362"))
app.contact.open_none_group()
old_contacts = app.contact.get_contact_list_in_none_group()
index = randrange(len(old_contacts))
contacts = old_contacts[index]
app.contact.add_in_group(contacts.id)
new_contact_in_db = db.count_contact_in_group()
assert old_contact_in_db +1 == new_contact_in_db
|
import torch
from typing import Union, Sequence, List, Tuple
import builtins
# Convenience aliases for common composite types that we need
# to talk about in PyTorch
_TensorOrTensors = Union[torch.Tensor, Sequence[torch.Tensor]]
# In some cases, these basic types are shadowed by corresponding
# top-level values. The underscore variants let us refer to these
# types. See https://github.com/python/mypy/issues/4146 for why these
# workarounds is necessary
_int = builtins.int
_float = builtins.float
_bool = builtins.bool
_dtype = torch.dtype
_device = torch.device
_qscheme = torch.qscheme
_size = Union[torch.Size, List[_int], Tuple[_int, ...]]
_layout = torch.layout
# Meta-type for "numeric" things; matches our docs
Number = Union[builtins.int, builtins.float, builtins.bool]
# Meta-type for "device-like" things. Not to be confused with 'device' (a
# literal device object). This nomenclature is consistent with PythonArgParser.
# None means use the default device (typically CPU)
Device = Union[_device, str, None]
|
import threading
import time
import logging
from Core.Logger import log
# URL 队列调度
class Scheduler(threading.Thread):
__slots__ = ('redis_connection', 'url_rate', 'follow_info_url_queue', 'user_info_url_queue', 'url_queue_name')
def __init__(self, redis_connection, url_rate):
threading.Thread.__init__(self)
# 设置Redis连接
self.redis_connection = redis_connection
# 设置 url 的调度比例
self.url_rate = url_rate
# Following & Follower URL 队列名称
self.follow_info_url_queue = 'followInfoURLQueue'
# User info URL 队列名称
self.user_info_url_queue = 'userInfoURLQueue'
# 待下载URL队列名称
self.url_queue_name = 'urlQueue'
if log.isEnabledFor(logging.INFO):
log.info('Scheduler 模块初始化完毕')
def run(self):
if log.isEnabledFor(logging.INFO):
log.info('Scheduler 模块启动成功')
while True:
# 当 urlQueue 队列中元素太多时,停止放入
while self.redis_connection.llen(self.url_queue_name) > 500:
time.sleep(180)
# 当队列中均没有元素时,暂停添加
follow_info_queue_length = self.redis_connection.llen(self.follow_info_url_queue)
user_info_queue_length = self.redis_connection.llen(self.user_info_url_queue)
if follow_info_queue_length == 0 and user_info_queue_length == 0:
time.sleep(20)
continue
# 分别从两个队列中获取设定比例的数量的元素添加到下载URL队列
for i in range(self.url_rate):
url_info = self.redis_connection.lpop(self.user_info_url_queue)
if url_info is not None:
self.redis_connection.rpush(self.url_queue_name, url_info)
del url_info
for i in range(10 - self.url_rate):
url_info = self.redis_connection.lpop(self.follow_info_url_queue)
if url_info is not None:
self.redis_connection.rpush(self.url_queue_name, url_info)
del url_info
|
from enum import Enum
class Exchange(Enum):
Binance = 1
Huobi = 2
Gemini = 3
class BaseDataAPI:
def __init__(self, exchange: Exchange, api_key: str, secrete_key: str):
self.api_key = api_key
self.secrete_key = secrete_key
self.exchange = exchange
def set_exchange(self, exchange: Exchange):
self.exchange = exchange
return self
def set_api_key(self, api_key: str):
self.api_key = api_key
return self
def set_secrete_key(self, secret_key: str):
self.secrete_key = secret_key
return self
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam
from federatedml.protobuf.model_migrate.converter.converter_base import AutoReplace
from federatedml.protobuf.model_migrate.converter.converter_base import ProtoConverterBase
class HeteroSBTConverter(ProtoConverterBase):
def convert(self, param: BoostingTreeModelParam, meta: BoostingTreeModelMeta,
guest_id_mapping: Dict,
host_id_mapping: Dict,
arbiter_id_mapping: Dict
):
feat_importance_list = list(param.feature_importances)
tree_list = list(param.trees_)
replacer = AutoReplace(guest_id_mapping, host_id_mapping, arbiter_id_mapping)
# fp == feature importance
for fp in feat_importance_list:
fp.sitename = replacer.replace(fp.sitename)
fp.fullname = replacer.replace(fp.fullname)
for tree in tree_list:
tree_nodes = list(tree.tree_)
for node in tree_nodes:
node.sitename = replacer.replace(node.sitename)
return param, meta
|
from botocore.credentials import Credentials
from aiodynamo.credentials import Key
from aiodynamo.sign import make_default_endpoint
KEY = Key("AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY")
CREDENTIALS = Credentials(KEY.id, KEY.secret)
SERVICE_NAME = "dynamodb"
REGION = "us-east-1"
URL = make_default_endpoint(REGION)
URL_STRING = str(URL)
ACTION = "Query"
PAYLOAD = {
"TableName": "table-name",
"KeyConditionExpression": "#n0.#n1.#n2.#n3.#n4.#n5.#n3.#n2.#n6.#n7.#n1 = :v0 AND size(#n8) <= :v1",
"ScanIndexForward": True,
"ProjectionExpression": "#n9,#n1,#n10[4].#n11",
"FilterExpression": "(begins_with(#n12, :v2) AND contains(#n13, :v3))",
"ExclusiveStartKey": {
"my-hash-key": {"S": "some-value"},
"my-range-key": {"S": "other-value"},
},
"Select": "SPECIFIC_ATTRIBUTES",
"ExpressionAttributeNames": {
"#n0": "m",
"#n1": "y",
"#n2": "-",
"#n3": "h",
"#n4": "a",
"#n5": "s",
"#n6": "k",
"#n7": "e",
"#n8": "my-range-key",
"#n9": "x",
"#n10": "z",
"#n11": "alpha",
"#n12": "foo",
"#n13": "hoge",
},
"ExpressionAttributeValues": {
":v0": {"S": "some-value"},
":v1": {"N": "200"},
":v2": {"S": "bar"},
":v3": {"B": "aGVsbG8gd29ybGQ="},
},
}
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to setup the environment to run unit tests.
Modifies PYTHONPATH to automatically include parent, common and pylibs
directories.
"""
import os
import sys
import textwrap
RUNTESTS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(RUNTESTS_DIR, 'data')
BASE_DIR = os.path.abspath(
os.path.join(RUNTESTS_DIR, os.pardir, os.pardir, os.pardir))
DEPOT_TOOLS_DIR = os.path.join(BASE_DIR, os.pardir, 'depot_tools')
# Load our common Infra environment.
sys.path.insert(0, os.path.join(BASE_DIR, 'scripts'))
import common.env
common.env.Install(hermetic=False)
def ensure_coverage_importable():
# We want to use the compiled coverage if we can
from pkg_resources import get_build_platform
try:
# Python 2.7 or >= 3.2
from sysconfig import get_python_version
except ImportError:
from distutils.sysconfig import get_python_version
cov_dir = os.path.join(BASE_DIR, 'third_party', 'coverage-3.7.1')
cov_egg = os.path.join(cov_dir, 'dist', 'coverage-3.7.1-py%s-%s.egg' % (
get_python_version(), get_build_platform()))
if cov_egg not in sys.path:
sys.path.insert(0, cov_egg)
try:
from distutils.version import StrictVersion
import coverage
if (StrictVersion(coverage.__version__) < StrictVersion('3.7') or
not coverage.collector.CTracer):
toDel = set()
for m in sys.modules:
if m == 'coverage' or m.startswith('coverage.'):
toDel.add(m)
for k in toDel:
del sys.modules[k]
del coverage
else:
return
except ImportError:
if sys.platform.startswith('win'):
# In order to compile the coverage module on Windows we need to set the
# 'VS90COMNTOOLS' environment variable. This usually point to the
# installation folder of VS2008 but we can fake it to make it point to the
# version of the toolchain checked in depot_tools.
#
# This variable usually point to the $(VsInstallDir)\Common7\Tools but is
# only used to access %VS90COMNTOOLS%/../../VC/vcvarsall.bat and therefore
# any valid directory respecting this structure can be used.
vc_path = os.path.join(DEPOT_TOOLS_DIR, 'win_toolchain', 'vs2013_files',
'VC', 'bin')
# If the toolchain isn't available then ask the user to fetch chromium in
# order to install it.
if not os.path.isdir(vc_path):
print textwrap.dedent("""
You probably don't have the Windows toolchain in your depot_tools
checkout. Install it by running:
fetch chromium
""")
sys.exit(1)
os.environ['VS90COMNTOOLS'] = vc_path
try:
import setuptools # pylint: disable=W0612
except ImportError:
print textwrap.dedent("""
No compatible system-wide python-coverage package installed, and
setuptools is not installed either. Please obtain setuptools by:
Debian/Ubuntu:
sudo apt-get install python-setuptools python-dev
OS X:
https://pypi.python.org/pypi/setuptools#unix-including-mac-os-x-curl
Other:
https://pypi.python.org/pypi/setuptools#installation-instructions
""")
sys.exit(1)
# The C-compiled coverage engine is WAY faster (and less buggy) than the pure
# python version, so we build the dist_egg if necessary.
if not os.path.exists(cov_egg):
import subprocess
print 'Building Coverage 3.7.1'
p = subprocess.Popen([sys.executable, 'setup.py', 'bdist_egg'], cwd=cov_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
print 'Error while building :('
print stdout
print stderr
if sys.platform.startswith('linux'):
print textwrap.dedent("""
You probably don't have the 'python-dev' package installed. Install
it by running:
sudo apt-get install python-dev
""")
else:
print textwrap.dedent("""
I'm not sure what's wrong, but your system seems incapable of building
python extensions. Please fix that by installing a Python with headers
and the approprite command-line build tools for your platform.
""")
sys.exit(1)
ensure_coverage_importable()
from common import find_depot_tools # pylint: disable=W0611
|
import math
import time
class vector:
x = 0.0
y = 0.0
def __init__(self):
self.X = 0.0
self.y = 0.0
def __str__(self):
return '{x:%s, y:%s}' % (self.x, self.y)
class car:
# given by system
id = None
token = None
# given by system - end
# given by user
name = None
color = None
type = None
# given by user - end
# external control variables
steer_angle = 0.0
throttle = 0
brake = 0
rear_slip = 0
front_slip = 0
# external control variables - end
# external read variables - begin
angle = 0.0
position_wc = vector()
# external read variables - end
# car simulation variables
velocity_wc = vector()
angular_velocity = 0.0
velocity = vector()
acceleration_wc = vector()
rot_angle = 0.0
side_slip = 0.0
slipangle_front = 0.0
slipangle_rear = 0.0
force = vector()
resistance = vector()
acceleration = vector()
torque = 0.0
angular_acceleration = 0.0
sn = 0.0
cs = 0.0
yawspeed = 0.0
weight = 0.0
ftraction = vector()
flatf = vector()
flatr = vector()
# car simulation variables - end
## car configuration
b = 1.0
c = 1.0
wheel_base = 2.0 # b + c = 0.0
h = 1.0
mass = 1500
inertia = 1500
width = 1.5
length = 3.0 # must be > wheel_base
wheel_length = 0.7
wheel_width = 0.3
## car configuration - end
## constants
PI = 3.14159265359
DRAG = 5.0 # factor for air resistance (drag)
RESISTANCE = 30.0 # factor for rolling resistance
CA_R = -5.20 # cornering stiffness
CA_F = -5.0 # cornering stiffness
MAX_GRIP = 2.0 # maximum (normalised) friction force, =diameter of friction circle
## constants - end
def __init__(self, name, color, type):
self.name = name
self.color = color
self.type = type
self.steer_angle = 0.0
self.throttle = 0
self.brake = 0
self.rear_slip = 0
self.front_slip = 0
self.angle = 0.0
self.position_wc = vector()
self.velocity_wc = vector()
self.angular_velocity = 0.0
self.velocity = vector()
self.acceleration_wc = vector()
self.rot_angle = 0.0
self.side_slip = 0.0
self.slipangle_front = 0.0
self.slipangle_rear = 0.0
self.force = vector()
self.resistance = vector()
self.acceleration = vector()
self.torque = 0.0
self.angular_acceleration = 0.0
self.sn = 0.0
self.cs = 0.0
self.yawspeed = 0.0
self.weight = 0.0
self.ftraction = vector()
self.flatf = vector()
self.flatr = vector()
def get_info(self):
return (self.name, self.color, self.type)
def get_pos(self):
return (self.position_wc.x, self.position_wc.y, self.angle, self.steer_angle, self.throttle)
def sign(self, val):
if val >= 0.0:
return 1.0
return -1.0
def move_tick(self, delta_t):
self.sn = math.sin(self.angle)
self.cs = math.cos(self.angle)
if self.steer_angle != 0.0:
breakme = 1
# SAE convention: x is to the front of the car, y is to the right, z is down
# bangz: Velocity of Car. Vlat and Vlong
# transform velocity in world reference frame to velocity in car reference frame
self.velocity.x = self.cs * self.velocity_wc.y + self.sn * self.velocity_wc.x
self.velocity.y = -self.sn * self.velocity_wc.y + self.cs * self.velocity_wc.x
# Lateral force on wheels
#
# Resulting velocity of the wheels as result of the yaw rate of the car body
# v = yawrate * r where r is distance of wheel to CG (approx. half wheel base)
# yawrate (ang.velocity) must be in rad/s
#
self.yawspeed = self.wheel_base * 0.5 * self.angular_velocity
#bangz: velocity.x = fVLong_, velocity.y = fVLat_
if self.velocity.x == 0: # TODO: fix math.singularity
self.rot_angle = 0
else:
self.rot_angle = math.atan2(self.yawspeed, self.velocity.x)
# Calculate the side slip angle of the car (a.k.a. beta)
if self.velocity.x == 0: # TODO: fix math.singularity
self.side_slip = 0
else:
self.side_slip = math.atan2(self.velocity.y, self.velocity.x)
# Calculate slip angles for front and rear wheels (a.k.a. alpha)
self.slipangle_front = self.side_slip + self.rot_angle - self.steer_angle
self.slipangle_rear = self.side_slip - self.rot_angle
# weight per axle = half car mass times 1G (=9.8m/s^2)
self.weight = self.mass * 9.8 * 0.5
# lateral force on front wheels = (Ca * slip angle) capped to friction circle * load
self.flatf.x = 0
self.flatf.y = self.CA_F * self.slipangle_front
self.flatf.y = min(self.MAX_GRIP, self.flatf.y)
self.flatf.y = max(-self.MAX_GRIP, self.flatf.y)
self.flatf.y *= self.weight
if self.front_slip != 0:
self.flatf.y *= 0.5
# lateral force on rear wheels
self.flatr.x = 0
self.flatr.y = self.CA_R * self.slipangle_rear
self.flatr.y = min(self.MAX_GRIP, self.flatr.y)
self.flatr.y = max(-self.MAX_GRIP, self.flatr.y)
self.flatr.y *= self.weight
if self.rear_slip != 0:
self.flatr.y *= 0.5
# longtitudinal force on rear wheels - very simple traction model
self.ftraction.x = 100 * (self.throttle - self.brake * self.sign(self.velocity.x))
self.ftraction.y = 0
if self.rear_slip != 0:
self.ftraction.x *= 0.5
# Forces and torque on body
# drag and rolling resistance
self.resistance.x = -(self.RESISTANCE * self.velocity.x + self.DRAG * self.velocity.x * abs(self.velocity.x))
self.resistance.y = -(self.RESISTANCE * self.velocity.y + self.DRAG * self.velocity.y * abs(self.velocity.y))
# sum forces
self.force.x = self.ftraction.x + math.sin(self.steer_angle) * self.flatf.x + self.flatr.x + self.resistance.x
self.force.y = self.ftraction.y + math.cos(self.steer_angle) * self.flatf.y + self.flatr.y + self.resistance.y
# torque on body from lateral forces
self.torque = self.b * self.flatf.y - self.c * self.flatr.y
# Acceleration
# Newton F = m.a, therefore a = F/m
self.acceleration.x = self.force.x / self.mass
self.acceleration.y = self.force.y / self.mass
self.angular_acceleration = self.torque / self.inertia
# Velocity and position
# transform acceleration from car reference frame to world reference frame
self.acceleration_wc.x = self.cs * self.acceleration.y + self.sn * self.acceleration.x
self.acceleration_wc.y = -self.sn * self.acceleration.y + self.cs * self.acceleration.x
# velocity is integrated acceleration
#
self.velocity_wc.x += delta_t * self.acceleration_wc.x
self.velocity_wc.y += delta_t * self.acceleration_wc.y
# position is integrated velocity
#
self.position_wc.x += delta_t * self.velocity_wc.x
self.position_wc.y += delta_t * self.velocity_wc.y
# Angular velocity and heading
# integrate angular acceleration to get angular velocity
#
self.angular_velocity += delta_t * self.angular_acceleration
# integrate angular velocity to get angular orientation
#
self.angle += delta_t * self.angular_velocity
def __str__(self):
return str(self.__dict__)
def main():
my_car = car('john', 'red', 'truck')
my_car.throttle = 10
my_car.steer_angle = my_car.PI / 256.0
for i in range(0, 100):
my_car.move_tick(16.0 / 1000.0)
print my_car.steer_angle
print my_car.throttle
print my_car.brake
print my_car.rear_slip
print my_car.front_slip
print my_car.angle
print my_car.position_wc
print my_car.velocity_wc
print my_car.angular_velocity
print my_car.velocity
print my_car.acceleration_wc
print my_car.rot_angle
print my_car.side_slip
print my_car.slipangle_front
print my_car.slipangle_rear
print my_car.force
print my_car.resistance
print my_car.acceleration
print my_car.torque
print my_car.angular_acceleration
print my_car.sn
print my_car.cs
print my_car.yawspeed
print my_car.weight
print my_car.ftraction
print my_car.flatf
print my_car.flatr
return
print car.PI
while True:
print
my_car.move_tick(16.0 / 1000)
time.sleep(1)
print my_car.get_pos()
if __name__ == '__main__':
main()
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from typing import Optional
from opsgenie_sdk import (
AlertApi,
ApiClient,
CloseAlertPayload,
Configuration,
CreateAlertPayload,
OpenApiException,
SuccessResponse,
)
from airflow.hooks.base import BaseHook
class OpsgenieAlertHook(BaseHook):
"""
This hook allows you to post alerts to Opsgenie.
Accepts a connection that has an Opsgenie API key as the connection's password.
This hook sets the domain to conn_id.host, and if not set will default
to ``https://api.opsgenie.com``.
Each Opsgenie API key can be pre-configured to a team integration.
You can override these defaults in this hook.
:param opsgenie_conn_id: The name of the Opsgenie connection to use
"""
conn_name_attr = 'opsgenie_conn_id'
default_conn_name = 'opsgenie_default'
conn_type = 'opsgenie'
hook_name = 'Opsgenie'
def __init__(self, opsgenie_conn_id: str = 'opsgenie_default') -> None:
super().__init__() # type: ignore[misc]
self.conn_id = opsgenie_conn_id
configuration = Configuration()
conn = self.get_connection(self.conn_id)
configuration.api_key['Authorization'] = conn.password
configuration.host = conn.host or 'https://api.opsgenie.com'
self.alert_api_instance = AlertApi(ApiClient(configuration))
def _get_api_key(self) -> str:
"""
Get the API key from the connection
:return: API key
:rtype: str
"""
conn = self.get_connection(self.conn_id)
return conn.password
def get_conn(self) -> AlertApi:
"""
Get the underlying AlertApi client
:return: AlertApi client
:rtype: opsgenie_sdk.AlertApi
"""
return self.alert_api_instance
def create_alert(self, payload: Optional[dict] = None) -> SuccessResponse:
"""
Create an alert on Opsgenie
:param payload: Opsgenie API Create Alert payload values
See https://docs.opsgenie.com/docs/alert-api#section-create-alert
:return: api response
:rtype: opsgenie_sdk.SuccessResponse
"""
payload = payload or {}
try:
create_alert_payload = CreateAlertPayload(**payload)
api_response = self.alert_api_instance.create_alert(create_alert_payload)
return api_response
except OpenApiException as e:
self.log.exception('Exception when sending alert to opsgenie with payload: %s', payload)
raise e
def close_alert(
self,
identifier: str,
identifier_type: Optional[str] = 'id',
payload: Optional[dict] = None,
**kwargs: Optional[dict],
) -> SuccessResponse:
"""
Close an alert in Opsgenie
:param identifier: Identifier of alert which could be alert id, tiny id or alert alias
:param identifier_type: Type of the identifier that is provided as an in-line parameter.
Possible values are 'id', 'alias' or 'tiny'
:param payload: Request payload of closing alert action.
see https://github.com/opsgenie/opsgenie-python-sdk/blob/master/docs/AlertApi.md#close_alert
:param kwargs: params to pass to the function
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
:rtype: opsgenie_sdk.SuccessResponse
"""
payload = payload or {}
try:
close_alert_payload = CloseAlertPayload(**payload)
api_response = self.alert_api_instance.close_alert(
identifier=identifier,
identifier_type=identifier_type,
close_alert_payload=close_alert_payload,
**kwargs,
)
return api_response
except OpenApiException as e:
self.log.exception('Exception when closing alert in opsgenie with payload: %s', payload)
raise e
def delete_alert(
self,
identifier: str,
identifier_type: Optional[str] = None,
user: Optional[str] = None,
source: Optional[str] = None,
) -> SuccessResponse:
"""
Delete an alert in Opsgenie
:param identifier: Identifier of alert which could be alert id, tiny id or alert alias.
:param identifier_type: Type of the identifier that is provided as an in-line parameter.
Possible values are 'id', 'alias' or 'tiny'
:param user: Display name of the request owner.
:param source: Display name of the request source
:return: SuccessResponse
:rtype: opsgenie_sdk.SuccessResponse
"""
try:
api_response = self.alert_api_instance.delete_alert(
identifier=identifier,
identifier_type=identifier_type,
user=user,
source=source,
)
return api_response
except OpenApiException as e:
self.log.exception('Exception when calling AlertApi->delete_alert: %s\n', e)
raise e
|
# Generated by Django 2.2.9 on 2020-01-01 17:07
from django.db import migrations, models
import django.db.models.deletion
import survey_app_repo.survey.models
class Migration(migrations.Migration):
dependencies = [
('survey', '0003_auto_20200101_1301'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=255)),
('description', models.CharField(max_length=400)),
('question_type', models.CharField(choices=[(survey_app_repo.survey.models.QuestionTypes('Description'), 'Description'), (survey_app_repo.survey.models.QuestionTypes('Text'), 'Text')], max_length=4)),
('survey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='survey.Survey')),
],
),
]
|
from setuptools import setup
setup(
name='microconfig',
version='0.0.1',
packages=['microconfig'],
url='',
license='',
author='haimcohen',
author_email='hcloli@gmail.com',
description=''
)
|
# All the ways of invalidating getattr
# TODO should test getclsattr as well
# TODO should also test some crazier stuff, like descriptors with inheritance
def get(self, obj, typ):
print '__get__ called'
print type(self)
print type(obj)
print typ
return self.elem
def set(self, obj, typ):
print '__get__ called'
print type(self)
print type(obj)
print typ
class Descriptor(object):
def __init__(self, elem):
self.elem = elem
def __str__(self):
return 'Descriptor object'
class C(object):
a = Descriptor(0)
b = Descriptor(lambda : 0)
c = C()
def f():
print c.a
print C.a
def g():
try:
print c.b()
except TypeError:
print 'got TypeError'
try:
print C.b()
except TypeError:
print 'got TypeError'
def h():
c.c = 10
for i in xrange(2000):
f()
g()
h()
if i == 50:
Descriptor.__get__ = get
if i == 100:
Descriptor.__set__ = set
if i == 150:
del Descriptor.__get__
if i == 200:
del Descriptor.__set__
if i == 250:
Descriptor.__set__ = set
if i == 300:
Descriptor.__get__ = get
if i == 350:
del Descriptor.__set__
if i == 400:
del Descriptor.__get__
if i == 450:
Descriptor.__get__ = get
Descriptor.__set__ = set
if i == 500:
del Descriptor.__get__
del Descriptor.__set__
if i == 550:
Descriptor.__get__ = get
if i == 600:
Descriptor.__set__ = set
del Descriptor.__get__
if i == 650:
Descriptor.__get__ = get
del Descriptor.__set__
if i == 700:
c.a = 5
c.b = lambda : 5
if i == 750:
del c.a
del c.b
if i == 800:
Descriptor.__set__ = set
if i == 850:
del Descriptor.__set__
c.a = 5
c.b = lambda : 5
Descriptor.__set__ = set
if i == 900:
del Descriptor.__set__
del c.a
del c.b
Descriptor.__set__ = set
if i == 950:
del Descriptor.__get__
if i == 1000:
del Descriptor.__set__
c.a = 5
c.b = lambda : 5
Descriptor.__set__ = set
if i == 1050:
del Descriptor.__set__
del c.a
del c.b
Descriptor.__set__ = set
if i == 1100:
del Descriptor.__set__
if i == 1150:
c.a = 5
c.b = lambda : 5
if i == 1200:
del c.a
del c.b
if i == 1250:
c.a = 5
c.b = lambda : 5
if i == 1350:
Descriptor.__get__ = get
if i == 1400:
Descriptor.__set__ = set
if i == 1450:
del Descriptor.__get__
if i == 1500:
del Descriptor.__set__
if i == 1550:
Descriptor.__set__ = set
if i == 1600:
Descriptor.__get__ = get
if i == 1650:
del Descriptor.__set__
if i == 1700:
del Descriptor.__get__
if i == 1750:
Descriptor.__get__ = get
Descriptor.__set__ = set
if i == 1800:
del Descriptor.__get__
del Descriptor.__set__
if i == 1850:
Descriptor.__get__ = get
if i == 1900:
Descriptor.__set__ = set
del Descriptor.__get__
if i == 1950:
Descriptor.__get__ = get
del Descriptor.__set__
|
from nonebot import (CommandSession, IntentCommand, NLPSession, get_bot, log,
on_command, on_natural_language)
from nonebot import permission as perm
from nonebot.permission import *
import random
import datetime
import asyncio
import json
from os import path
from bot_config import GROUP_USE
bot = get_bot()
THIS_PATH = path.join(path.dirname(__file__))
ENVE_PATH = f"{THIS_PATH}\\user_get_enve.json"
ENVELOPES_PATH = f"{THIS_PATH}\\envelopes.json"
PACK_PATH = f"{THIS_PATH}\\user_package.json"
@on_command('summer2021', aliases=('夏活背包', '查看夏活背包', '祭典背包'), permission=GROUP, only_to_me=False)
async def summer2021(session: CommandSession):
group_id = str(session.event.group_id)
if group_id not in GROUP_USE:
session.finish()
user_id = str(session.event.user_id)
msg = summer2021_find_package(user_id)
await session.send(msg)
@on_command('summer2021_b', aliases=('抢',), permission=GROUP, only_to_me=False)
async def summer2021_b(session: CommandSession):
group_id = str(session.event.group_id)
if group_id not in GROUP_USE:
session.finish()
user_id = str(session.event.user_id)
new_user_pack(user_id)
boolean, flg = summer2021_get_envelopes(user_id)
if boolean == "False":
session.finish("你已经抢过一次了,不能再抢了哦~")
elif boolean == "True":
session.finish("这个红包已经被抢完了,下次再来吧~")
else:
session.finish(f"恭喜你抢到了{boolean}个夏日冰棒🍧")
# 抢红包函数
def summer2021_get_envelopes(user_id):
enve = json.load(
open(ENVE_PATH, 'r', encoding='utf8'))
data = json.load(
open(ENVELOPES_PATH, 'r', encoding='utf8'))
pack = json.load(
open(PACK_PATH, 'r', encoding='utf8'))
new_user_pack(user_id)
asyncio.sleep(0.5)
# 判断这个红包还有没有
if data[str(len(data))]["e_total"] <= 0:
return "True",0
# 判断这个红包是不是被这个用户抢过了
if data[str(len(data))]["e_id"] in enve[user_id]:
return "False",0
enve[user_id].append(data[str(len(data))]["e_id"])
# 红包金额计算
flg = random.randint(2, 7)
this_get = random.randint(200, 600)
if this_get > data[str(len(data))]["e_total"]:
this_get = data[str(len(data))]["e_total"]
data[str(len(data))]["e_total"] = 0
else:
data[str(len(data))]["e_total"] -= this_get
# 用户冰棒增加(待优化)
for item in pack[user_id]:
if pack[user_id][item]["item_name"] == "夏日冰棒":
pack[user_id][item]["item_num"] += this_get
# 用户梦境碎片增加
flag = "0"
for item in pack[user_id]:
if pack[user_id][item]["item_name"] == "梦境碎片":
pack[user_id][item]["item_num"] += flg
flag = "1"
# 如果没有梦境碎片
if flag == "0":
new_obj = {
"3": {
"item_name": "梦境碎片",
"item_num": flg
}
}
pack[user_id].update(new_obj)
# 修改文件
with open(ENVE_PATH, 'w', encoding='utf-8') as f:
f.write(json.dumps(enve, ensure_ascii=False))
f.close()
with open(ENVELOPES_PATH, 'w', encoding='utf-8') as f:
f.write(json.dumps(data, ensure_ascii=False))
f.close()
with open(PACK_PATH, 'w', encoding='utf-8') as f:
f.write(json.dumps(pack, ensure_ascii=False))
f.close()
return this_get, flg
# 查看背包函数
def summer2021_find_package(user_id):
data = json.load(
open(PACK_PATH, 'r', encoding='utf8'))
if user_id not in data:
new_pack = {
user_id: {
"1": {
"item_name": "2021夏日祭典通行证",
"item_num": 1
},
"2": {
"item_name": "夏日冰棒",
"item_num": 0
}
}
}
data.update(new_pack)
# 遍历这个用户的背包
msg = '🎐你有以下物品🎐\n__________________\n'
item_num = 1
for item in data[user_id]:
name = data[user_id][item]["item_name"]
num = str(data[user_id][item]["item_num"])
msg += f"{item_num}.{name}({num}个)\n"
item_num += 1
with open(PACK_PATH, 'w', encoding='utf-8') as f:
f.write(json.dumps(data, ensure_ascii=False))
f.close()
return msg
# 新用户相关
def new_user_pack(user_id):
enve = json.load(
open(ENVE_PATH, 'r', encoding='utf8'))
pack = json.load(
open(PACK_PATH, 'r', encoding='utf8'))
if user_id not in enve:
new_obj = {
user_id: []
}
enve.update(new_obj)
with open(ENVE_PATH, 'w', encoding='utf-8') as f:
f.write(json.dumps(enve, ensure_ascii=False))
f.close()
if user_id not in pack:
new_obj = {
user_id: {"1": {
"item_name": "2021夏日祭典通行证",
"item_num": 1
},
"2": {
"item_name": "夏日冰棒",
"item_num": 0
}}
}
pack.update(new_obj)
with open(PACK_PATH, 'w', encoding='utf-8') as f:
f.write(json.dumps(pack, ensure_ascii=False))
f.close()
|
from evawiz_basic import *
def request(handler,*args):
#def request_connect_user(self):
#handler.contact_server('mod_connect_user')
dprint('step into disconnect_user')
#save auth information
handler.clear_auth_info()
print('You\'ve disconnectted to evawiz server.')
pass
|
from .__plugin__ import Plugin as _P
from .__plugin__ import publicFun
from PySide2 import QtCore, QtWidgets
import re
from functools import partial
import os.path as op
class Plugin(_P):
def __init__(self, app):
super().__init__(app)
self.widget = Widget(app)
@publicFun(guishortcut="Ctrl+^")
def toggle(self):
self.widget.togglehide()
@publicFun(guishortcut="Ctrl+Space")
def autocomplete(self):
self.widget.autocomplete()
def start(self):self.widget.start()
def parse(self, cmd):
if cmd:
self.widget.parse(cmd)
class Widget(QtWidgets.QDockWidget):
def __init__(self, app):
super().__init__()
self.app = app
self.txt = QtWidgets.QLineEdit(self)
empty = QtWidgets.QWidget(self)
self.setTitleBarWidget(empty)
self.setWindowTitle("cmd")
self.setWidget(self.txt)
self.txt.returnPressed.connect(self.parse)
app.gui.addDockWidget(QtCore.Qt.TopDockWidgetArea, self)
self.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
self.setAllowedAreas(QtCore.Qt.TopDockWidgetArea)
self.setVisible(False)
self.comp = None
def autocomplete(self):
if not self.comp:return
if self.txt.text() == "": self.comp.setCompletionPrefix("")
self.comp.complete()
def togglehide(self):
self.setVisible(self.isHidden())
self.txt.setFocus()
def start(self):
list = sorted(self.app.publicfuns.keys())
comp = QtWidgets.QCompleter(list)
comp.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.txt.setCompleter(comp)
self.comp = comp
def parse(self, txt=None):
if txt is None:
txt = self.txt.text()
self.txt.clear()
self.app.plugins["log"].logwidget.setVisible(True)
if op.isfile(fp := op.abspath(txt)):
filecmds = open(fp,"r").readlines()
if filecmds:
self.app.log.info(f">>> executing {fp}")
for idx, cmd in enumerate(filecmds):
x = cmd.strip()
if not x:continue
QtCore.QTimer.singleShot(idx+1, partial(self.parse,x) )
return
cmdmatches = re.findall(r"(.*?)\((.*)\)",txt)
if cmdmatches: cmdmatches = cmdmatches[0]
self.app.log.info(f">>> {txt}")
if len(cmdmatches)==1:
cmd = cmdmatches[0]
args = []
elif len(cmdmatches)>1:
cmd = cmdmatches[0]
args = cmdmatches[1]
else:
cmd = "?"
args = [txt.replace("?","")]
args = [x for x in args if x]
try:
if cmd == "?":
if args:
targetfun = self.app.publicfuns[args[0]]
docstring = targetfun.getDescr()
self.app.log.info("="*30)
self.app.log.info(args[0]+":")
for line in docstring.split("\n"):
self.app.log.info(line)
#get inputs (TODO)
#get outputs (TODO)
self.app.log.info("="*30)
else:
#print general help
self.app.log.info(f"{self.app.info['name']} help:")
self.app.log.info(f"{self.app.info['description']}")
self.app.log.info("="*30)
self.app.log.info(f"Ctrl+Space for autocomplete.")
self.app.log.info(f"enter <function name> for help on function (eg: 'log.toggle' shows help on log.toggle).")
self.app.log.info(f"enter <function name>(args) to call a function (eg: 'log.toggle()' toggles log.")
self.app.log.info("="*30)
self.app.log.info(f"Functions available:")
for fn in sorted(self.app.publicfuns.keys()):
self.app.log.info(fn)
self.app.execNextCmd()
else:
p = partial(self.app.publicfuns[cmd].trigger, args)
self.app.cmdbacklog.append(p)
self.app.execNextCmd()
except KeyError:
self.app.log.error(f"<<< invalid command")
#self.app.log.info("")
|
import os
import pickle
def dump(name, value, end=".pickle"):
full_name = check_name(name, end)
create(name)
with open(full_name, "wb") as file:
pickle.dump(value, file)
return value
def create(name, end=".pickle", only=None):
full_name = check_name(name, end)
# existing test
try:
open(full_name, "xb").close()
except Exception: pass
# filling test
if not file_size(name) > 0:
with open(full_name, "wb") as file:
# create main array
if only is None:
pickle.dump([], file)
def load(name, end=".pickle"):
full_name = check_name(name, end)
with open(full_name, 'rb') as file:
return pickle.load(file)
def check_len(name, end=".pickle"):
return len(load(name, end))
def file_size(name, end=".pickle"):
full_name = check_name(name, end)
return os.path.getsize(full_name)
def check_name(name, end):
if name.starswith(end):
return name
else:
return name + end
|
#!/usr/bin/python -u
import sys,os,re
from datetime import datetime
from stat import *
import tempfile
from cStringIO import StringIO
from cPickle import Pickler, Unpickler
import subprocess as sub
import string
reload(sys)
sys.setdefaultencoding("utf-8") # Needs Python Unicode build !
try:
import json
except:
import simplejson as json
class ElementMissing(Exception):
pass
class TooFewArgs(Exception):
pass
class NoFilesError(Exception):
pass
class FileNotFound(Exception):
pass
class MissingHumansFile(Exception):
pass
class NoLicense(Exception):
pass
def fixEndings(str):
str = string.replace(str, '\r\n', '\n')
str = string.replace(str, '\r', '\n')
return str
def path(name):
if name == "std":
return os.path.join("tests", "fixtures", "std", "processor-tests", "humans")
elif name == "styles":
return os.path.join("tests", "fixtures", "std", "styles")
elif name == "run":
return os.path.join("tests", "fixtures", "run")
elif name == "bundled":
return os.path.join("tests", "bundled")
elif name == "styletests":
return os.path.join("tests", "styletests")
elif name == "local":
return os.path.join("tests", "fixtures", "local")
elif name == "config":
return os.path.join("tests", "config")
elif name == "citeproc-js":
return os.path.join("tests", "citeproc-js")
elif name == "runners":
return os.path.join("tests", "runners")
elif name == "demo":
return os.path.join("demo")
class ApplyLicense:
def __init__(self):
self.rex = "(?sm)^^(/\*.*?^\s*\*/\n*)(.*)"
lines = fixEndings(open(os.path.join("LICENSE")).read()).strip()
lines = lines.split('\n')
for pos in range(0,len(lines),1):
lines[pos] = " * %s" % lines[pos]
self.license = '/*\n%s\n */\n' % '\n'.join(lines)
def apply(self,suppressConsole=False):
if not suppressConsole:
print self.license
#for p in [".", "src", path("std"), path("local"), path("bundled"), path("styletests"), path("citeproc-js"), path("demo")]:
for p in ["."]:
for file in os.listdir(p):
if not file.startswith("citeproc"):
continue
self.process_file(p,file)
def process_file(self,p,file):
filepath = os.path.join(p, file)
if not filepath.endswith(".js") and not filepath.endswith(".txt") and not filepath.endswith(".json") and not filepath.endswith("README.txt"): return
text = fixEndings(open(filepath, "rb").read())
oldtext = text
m = re.match(self.rex,text)
if m:
text = "%s\n%s" % (self.license, m.group(2))
#text = "%s" % (m.group(2),)
else:
text = "%s%s" % (self.license, text)
#pass
if text.strip() != oldtext.strip():
open(filepath,"w+b").write(text)
class Bundle:
def __init__(self, mode=None):
self.citeprocs = [
{
"bundle_name": "citeproc.js",
"e4x": False,
"note": "without e4x support"
},
{
"bundle_name": "citeproc_with_e4x.js",
"e4x": True,
"note": "with e4x support"
}
]
f = ["load"]
f.extend(["print"])
f.extend(["xmljson","xmldom","xmle4xLoad","system","sort","util_disambig","util_nodes","util_dateparser","build"]);
f.extend(["util_static_locator","util_processor","util_citationlabel","api_control"]);
f.extend(["queue","state","api_cite","api_bibliography","util_integration","api_update"]);
f.extend(["util_locale","node_bibliography","node_choose","node_citation","node_comment"]);
f.extend(["node_date","node_datepart","node_elseif","node_else","node_etal","node_group"]);
f.extend(["node_if","node_conditions","node_condition","util_conditions","node_info"]);
f.extend(["node_institution","node_institutionpart","node_key"]);
f.extend(["node_label","node_layout","node_macro","util_names_output","util_names_tests"]);
f.extend(["util_names_truncate","util_names_divide","util_names_join","util_names_common"]);
f.extend(["util_names_constraints","util_names_disambig","util_names_etalconfig","util_names_etal"]);
f.extend(["util_names_render","util_publishers","util_label","node_name","node_namepart"]);
f.extend(["node_names","node_number","node_sort","node_substitute","node_text","attributes"]);
f.extend(["stack","util_parallel","util","util_transform","obj_token","obj_ambigconfig"]);
f.extend(["obj_blob","obj_number","util_datenode","util_date","util_names","util_dates"]);
f.extend(["util_sort","util_substitute","util_number","util_page","util_flipflop"]);
f.extend(["formatters","formats","registry","disambig_names","disambig_citations"]);
f.extend(["disambig_cites", "util_modules","util_name_particles"]);
self.files = f
def deleteOldBundles(self):
for citeproc in self.citeprocs:
if os.path.exists(citeproc["bundle_name"]):
os.unlink(citeproc["bundle_name"])
def cleanFile(self, subfile, e4xSupport):
subfile = fixEndings(subfile)
subfile = re.sub("(?m)^(\/\*.*?\*\/)$", "", subfile)
subfile = re.sub("(?sm)^\s*\/\*.*?^\s*\*\/","",subfile)
subfile = re.sub("(?sm)^\s*//SNIP-START.*?^\s*//SNIP-END","",subfile)
subfile = re.sub("(?sm)^\s*//.*?$","",subfile)
if not e4xSupport:
subfile = re.sub("(?sm)^\s*load.*?$","",subfile)
subfile = re.sub("(?sm)^\s*\n","",subfile)
return subfile
def createNewBundles(self):
for citeproc in self.citeprocs:
file = ""
for f in self.files:
filename = os.path.join( "src", "%s.js" % f)
ifh = open(filename, "rb")
file += self.cleanFile(ifh.read(), citeproc["e4x"])
open(citeproc["bundle_name"],"w+b").write(file)
print "Wrote %s (processor %s)" % (citeproc["bundle_name"], citeproc["note"])
class Params:
def __init__(self,opt,args,category,force=None):
self.opt = opt
self.args = args
self.script = os.path.split(sys.argv[0])[1]
self.pickle = ".".join((os.path.splitext( self.script )[0], "pkl"))
self.force = force
self.category = category
self.files = {}
self.files['humans'] = {}
self.files['machines'] = []
mypath = os.path.split(sys.argv[0])[0]
self.base = os.path.join( mypath )
if len(self.base):
os.chdir(self.base)
self.tests = os.path.join( os.getcwd(), "tests")
self.runners = os.path.join( self.tests, "runners")
self.makedirs()
self.initConfig()
self.getFilePointers()
def makedirs(self):
for dir in ("std", "citeproc-js", "runners"):
p = path(dir)
if not os.path.exists(p):
os.makedirs(p)
for subdir in ("humans","machines"):
p = os.path.join(path("run"), subdir)
if not os.path.exists(p):
os.makedirs(p)
def getSourcePaths(self):
#if self.opt.processor:
if False:
print "opt.processor is WIP"
else:
if len(self.args) == 2:
filename = "%s_%s.txt" % tuple(self.args)
self.files['humans'][filename] = os.path.split(self.file_pointers[filename])
else:
for filename in self.file_pointers:
self.files['humans'][filename] = os.path.split(self.file_pointers[filename])
def clearSource(self):
for subdir in ["run"]:
for file in os.listdir(os.path.join(path(subdir), "machines")):
if not file.endswith(".json"): continue
os.unlink(os.path.join(path(subdir), "machines", file))
def refreshSource(self,force=False):
groups = {}
for filename in self.files['humans'].keys():
hpath = self.files['humans'][filename]
mpath = os.path.join("tests", "fixtures", "run", "machines", "%s.json" % filename[:-4] )
hp = os.path.sep.join(hpath)
mp = os.path.join(mpath)
#if force:
# self.grindFile(hpath,filename,mp)
if not os.path.exists(mp):
self.grindFile(hpath,filename,mp)
if self.opt.verbose:
print "Created: %s" % mp
hmod = os.stat(hp)[ST_MTIME]
mmod = os.stat(mp)[ST_MTIME]
if hmod > mmod:
if self.opt.verbose:
print "Old: %s" % mp
self.grindFile(hpath,filename,mp)
#if not self.opt.processor:
if True:
m = re.match("([-a-z]*)_.*",filename)
if m:
groupkey = m.group(1)
if not groups.has_key(groupkey):
groups[groupkey] = {"mtime":0,"tests":[]}
groups[groupkey]["tests"].append(filename)
if hmod > groups[groupkey]["mtime"]:
groups[groupkey]["mtime"] = mmod
if len(self.args) < 2:
for group in groups.keys():
gp = os.path.join(path("bundled"), "%s.js"%group)
needs_gp = True
if os.path.exists( gp ):
needs_gp = False
gt = os.stat(gp)[ST_MTIME]
# if force or needs_gp or groups[group]["mtime"] > gt:
if needs_gp or groups[group]["mtime"] > gt:
if self.opt.verbose:
sys.stdout.write("!")
ofh = open( os.path.join(path("bundled"), "%s.js" % group), "w+b" )
group_text = '''dojo.provide("%s.%s");
doh.register("%s.%s", [
''' % (self.category,group,self.category,group)
ofh.write(group_text)
for filename in [x[:-4] for x in groups[group]["tests"]]:
if self.opt.verbose:
sys.stdout.write("+")
entry_text = ''' function(){
var test = new StdRhinoTest("%s", "%s");
doh.assertEqual(test.result, test.run());
},
''' % (filename, self.opt.engine)
ofh.write(entry_text)
ofh.write("]);\n")
def buildRunner(self):
has_files = False
ofh = open( os.path.join(path("runners"), "run.js"), "w+b")
header = 'dojo.require("doh.runner");\n'
ofh.write(header)
#if self.opt.processor:
if False:
testpath = path("citeproc-js")
self.category = "citeproc_js"
else:
testpath = path("bundled")
self.category = "std"
if len(args) == 2:
keys = self.files['humans'].keys()
if len(keys):
file = keys[0]
set = os.path.split( self.files['humans'][file][0] )[-1]
body = '''doh.register("%s.%s", [
function(){
var test = new StdRhinoTest("%s","%s");
doh.assertEqual(test.result, test.run());
},
])
''' % (set, file[:-4], file[:-4], self.opt.engine)
ofh.write(body)
has_files = True
else:
count = 0
for file in [x for x in os.listdir(testpath)]:
if not file.endswith('.js'): continue
if len(self.args) and not file.startswith('%s.'%args[0]): continue
has_files = True
ofh.write('dojo.require("%s.%s");\n' % (self.category,file[:-3]))
ofh.write("tests.run();")
if not has_files:
raise NoFilesError
def grindFile(self,hpath,filename,mp):
if self.opt.verbose:
sys.stdout.write(".")
test = CslTest(opt,hpath,filename)
test.parse()
test.repair()
test.dump(mp)
def runTests(self,bundle=False):
cp = ConfigParser()
cp.read(os.path.join(path("config"), "test.cnf"))
if self.opt.engine == "mozjs":
engine = cp.get("mozjs", "command")
nick = "mozjs"
elif self.opt.engine == "v8":
engine = cp.get("v8", "command")
nick = "v8"
elif self.opt.engine == "jsc":
engine = cp.get("jsc", "command")
nick = "jsc"
else:
engine = cp.get("rhino","command")
nick = "rhino"
bundleext = ""
if bundle:
bundleext = "-bundled"
runpath = os.path.join(path("runners"), "%s%s.js" %(nick,bundleext))
command = "%s %s" % (engine,runpath)
ifh = sub.Popen(command,shell=True, stdout=sub.PIPE).stdout
while 1:
line = ifh.readline()
if not line: break
line = fixEndings(line)
sys.stdout.write(line)
def validateSource(self):
skip_to_pos = 0
if os.path.exists(self.pickle):
upfh = open(self.pickle, "rb")
unpickler = Unpickler(upfh)
old_opt,old_pos = unpickler.load()
if self.opt == old_opt:
skip_to_pos = old_pos
for i in range(0,skip_to_pos,1):
sys.stdout.write(".")
pos = -1
files = self.files['humans'].keys()
files.sort()
cp = ConfigParser()
cp.read(os.path.join(path("config"), "test.cnf"))
validator_path = cp.get("validation", "validator")
csl_schema_path = cp.get("validation", "schema")
cslm_schema_path = cp.get("validation", "schema-m")
for filename in files:
pos += 1
if pos < skip_to_pos: continue
p = self.files['humans'][filename]
test = CslTest(opt,p,filename,pos=pos)
test.parse()
test.validate(validator_path, csl_schema_path, cslm_schema_path)
if os.path.exists( self.pickle ):
os.unlink(self.pickle)
def initConfig(self):
if not os.path.exists(path("bundled")):
os.makedirs(path("bundled"))
if not os.path.exists(path("styletests")):
os.makedirs(path("styletests"))
if not os.path.exists(path("std")):
os.makedirs(path("std"))
if not os.path.exists(os.path.join(path("run"))):
os.makedirs(path("run"))
if not os.path.exists(os.path.join(os.path.join(path("run"), "machines"))):
os.makedirs(os.path.join(path("run"), "machines"))
if not os.path.exists(os.path.join(os.path.join(path("run"), "humans"))):
os.makedirs(os.path.join(path("run"), "humans"))
if not os.path.exists(path("config")):
os.makedirs(path("config"))
if not os.path.exists(os.path.join(path("config"), "test.cnf")):
test_template = '''[rhino]
command: java -client -jar ./rhino/js-1.7R3.jar -opt 8
'''
ofh = open(os.path.join(path("config"), "test.cnf"), "w+b" )
ofh.write(test_template)
ofh.close()
def getFilePointers(self):
self.file_pointers = {}
sourcedirs = [path("local"), path("std")]
for sourcedir in sourcedirs:
filenames = os.listdir(sourcedir)
filenames.sort()
for filename in filenames:
if not filename.endswith(".txt"):
continue
if self.file_pointers.has_key(filename):
print "WARNING: duplicate fixture name \"%s\"" % filename
self.file_pointers[filename] = os.path.join(sourcedir, filename)
class CslTest:
def __init__(self,opt,hpath,testname,pos=0):
self.opt = opt
self.pos = pos
self.testname = testname
self.hpath = hpath
self.hp = os.path.sep.join(hpath)
self.CREATORS = ["author","editor","translator","recipient","interviewer"]
self.CREATORS += ["composer","original-author","container-author","collection-editor"]
self.RE_ELEMENT = '(?sm)^(.*>>=[^\n]*%s[^\n]+)\n(.*)(\n<<=.*%s.*)'
self.RE_FILENAME = '^[-a-z]+_[a-zA-Z0-9]+\.txt$'
self.script = os.path.split(sys.argv[0])[1]
self.pickle = ".".join((os.path.splitext( self.script )[0], "pkl"))
self.data = {}
self.raw = fixEndings(unicode(open(os.path.sep.join(hpath), "rb").read()))
def parse(self):
## print "kkk: %s" % (self.testname,)
for element in ["MODE","CSL"]:
self.extract(element,required=True,is_json=False)
if element == "MODE" and self.opt.engine == "rhino":
self.data['mode'] = "%s-rhino" % self.data['mode']
if element == "CSL" and self.data['csl'].endswith('.csl'):
stylepath = os.path.join(os.path.join(path("styles")), self.data['csl'])
self.data['csl'] = fixEndings(open(stylepath, "rb").read())
self.extract("RESULT",required=True,is_json=False,rstrip=True)
self.extract("INPUT",required=True,is_json=True)
self.extract("CITATION-ITEMS",required=False,is_json=True)
self.extract("CITATIONS",required=False,is_json=True)
self.extract("BIBENTRIES",required=False,is_json=True)
self.extract("BIBSECTION",required=False,is_json=True)
self.extract("ABBREVIATIONS",required=False,is_json=True)
self.extract("OPTIONS",required=False,is_json=True)
self.extract("MULTIAFFIX",required=False,is_json=True)
self.extract("LANGPARAMS",required=False,is_json=True)
self.extract("INPUT2",required=False,is_json=True)
def extract(self,tag,required=False,is_json=False,rstrip=False):
m = re.match(self.RE_ELEMENT %(tag,tag),self.raw)
data = False
if m:
if rstrip:
data = m.group(2).rstrip()
else:
data = m.group(2).strip()
elif required:
raise ElementMissing(self.script,tag,self.testname)
if data != False:
if is_json:
data = json.loads(data)
self.data[tag.lower().replace('-','_')] = data
else:
self.data[tag.lower().replace('-','_')] = False
def repair(self):
self.fix_dates()
input_str = json.dumps(self.data["input"],indent=4,sort_keys=True,ensure_ascii=False)
m = re.match(self.RE_ELEMENT % ("INPUT", "INPUT"),self.raw)
newraw = m.group(1) + "\n" + input_str + m.group(3)
if self.data["options"]:
options_str = json.dumps(self.data["options"],indent=4,sort_keys=True,ensure_ascii=False)
m = re.match(self.RE_ELEMENT % ("OPTIONS", "OPTIONS"),self.raw)
newraw = m.group(1) + "\n" + options_str + m.group(3)
if self.data["multiaffix"]:
multiaffix_str = json.dumps(self.data["multiaffix"],indent=4,sort_keys=True,ensure_ascii=False)
m = re.match(self.RE_ELEMENT % ("MULTIAFFIX", "MULTIAFFIX"),self.raw)
newraw = m.group(1) + "\n" + multiaffix_str + m.group(3)
if self.data["langparams"]:
langparams_str = json.dumps(self.data["langparams"],indent=4,sort_keys=True,ensure_ascii=False)
m = re.match(self.RE_ELEMENT % ("LANGPARAMS", "LANGPARAMS"),self.raw)
newraw = m.group(1) + "\n" + langparams_str + m.group(3)
if self.data["input2"]:
input2_str = json.dumps(self.data["input2"],indent=4,sort_keys=True,ensure_ascii=False)
m = re.match(self.RE_ELEMENT % ("INPUT2", "INPUT2"),self.raw)
newraw = m.group(1) + "\n" + input2_str + m.group(3)
if self.data["citation_items"]:
citations_str = json.dumps(self.data["citation_items"],indent=4,sort_keys=True,ensure_ascii=False)
m = re.match(self.RE_ELEMENT % ("CITATION-ITEMS", "CITATION-ITEMS"),self.raw)
newraw = m.group(1) + "\n" + citations_str + m.group(3)
if self.data["citations"]:
citations_str = json.dumps(self.data["citations"],indent=4,sort_keys=True,ensure_ascii=False)
m = re.match(self.RE_ELEMENT % ("CITATIONS", "CITATIONS"),self.raw)
newraw = m.group(1) + "\n" + citations_str + m.group(3)
if self.data["abbreviations"]:
abbreviations_str = json.dumps(self.data["abbreviations"],indent=4,sort_keys=True,ensure_ascii=False)
m = re.match(self.RE_ELEMENT % ("ABBREVIATIONS", "ABBREVIATIONS"),self.raw)
newraw = m.group(1) + "\n" + abbreviations_str + m.group(3)
if self.raw != newraw:
open(self.hp,"w+b").write(newraw)
def fix_dates(self):
for pos in range(0, len(self.data["input"]),1):
for k in ["issued", "event-date", "accessed", "container", "original-date"]:
if self.data["input"][pos].has_key(k):
newdate = []
if not self.data["input"][pos][k].has_key("date-parts"):
start = []
for e in ["year","month","day"]:
if self.data["input"][pos][k].has_key(e):
start.append( self.data["input"][pos][k][e] )
self.data["input"][pos][k].pop(e)
else:
break
if start:
newdate.append(start)
end = []
for e in ["year_end","month_end","day_end"]:
if self.data["input"][pos][k].has_key(e):
end.append( self.data["input"][pos][k][e] )
self.data["input"][pos][k].pop(e)
else:
break
if end:
newdate.append(end)
self.data["input"][pos][k]["date-parts"] = newdate
def dump(self, mpath):
s = json.dumps(self.data, indent=4, sort_keys=True, ensure_ascii=False )
s = s.replace(u"\u200b", "\\u200b")
open(mpath,"w+b").write(s)
def validate(self, validator_path, csl_schema_path, cslm_schema_path):
if self.opt.verbose:
print self.testname
m = re.match("(?sm).*version=\"1.1mlz1\".*",self.data["csl"])
if m:
rnc_path = cslm_schema_path
else:
rnc_path = csl_schema_path
tfd,tfilename = tempfile.mkstemp(dir=".")
os.write(tfd,self.data["csl"])
os.close(tfd)
jfh = os.popen("%s %s %s" % (validator_path,rnc_path,tfilename))
success = True
plural = ""
while 1:
line = jfh.readline()
if not line: break
line = line.strip()
e = re.match("^fatal:",line)
if e:
print line
sys.exit()
m = re.match(".*:([0-9]+):([0-9]+): *error:(.*)",line)
if m:
if success:
print "\n##"
print "#### Error%s in CSL for test: %s" % (plural,self.hp)
print "##\n"
success = False
print " %s @ line %s" %(m.group(3).upper(),m.group(1))
plural = "s"
jfh.close()
os.unlink(tfilename)
if not success:
print ""
io = StringIO()
io.write(self.data["csl"])
io.seek(0)
linepos = 1
while 1:
cslline = io.readline()
if not cslline: break
cslline = cslline.rstrip()
print "%3d %s" % (linepos,cslline)
linepos += 1
pfh = open( self.pickle,"w+b")
pickler = Pickler( pfh )
pickler.dump( (opt, self.pos) )
sys.exit()
sys.stdout.write(".")
sys.stdout.flush()
if __name__ == "__main__":
from ConfigParser import ConfigParser
from optparse import OptionParser
os.environ['LANG'] = "en_US.UTF-8"
usage = '\n%prog [options] [<group> [testname]]\n%prog -p [options] [testname]'
description="This script."
parser = OptionParser(usage=usage,description=description,epilog="Happy testing!")
parser.add_option("-s", "--standard", dest="testrun",
default=False,
action="store_true",
help='Run tests.')
parser.add_option("-r", "--release", dest="bundle",
default=False,
action="store_true",
help='Bundle processor, apply license to files, and test with bundled code.')
parser.add_option("-c", "--cranky", dest="cranky",
default=False,
action="store_true",
help='Attempt to validate style code for testing against the CSL schema.')
parser.add_option("-e", "--engine", dest="engine",
default="rhino",
help='Valid entries are "rhino" (default), "mozjs", "jsc" or "v8."')
parser.add_option("-g", "--grind", dest="grind",
default=False,
action="store_true",
help='Grind human-readable test code into machine-readable form (used only for debugging the test framework).')
#parser.add_option("-p", "--processor", dest="processor",
# default=False,
# action="store_true",
# help='Run processor tests (cannot be used with -c, -g or -s opts, takes only test name as single argument).')
parser.add_option("-v", "--verbose", dest="verbose",
default=False,
action="store_true",
help='Display test names during processing.')
parser.add_option("-B", "--bundle-only", dest="makebundle",
default=False,
action="store_true",
help='Create the citeproc.js bundle and exit.')
(opt, args) = parser.parse_args()
if len(args) == 1:
m = re.match("^(?:.*/)*(.*)_(.*?)(?:\.txt)*$", args[0])
if m:
args = [m.group(1), m.group(2)]
bundlecount = 0
if opt.makebundle:
bundlecount += 1
if bundlecount > 1:
print parser.print_help()
print "\nError: Only one of the -B, -G and -Z options can be used at one time."
sys.exit()
if opt.makebundle:
bundler = Bundle()
bundler.deleteOldBundles()
bundler.createNewBundles()
license = ApplyLicense()
license.apply()
sys.exit()
# Testing sequence:
# + Get single tests working
# Get automatic grinding for single tests working
# Get forced grinding for single tests working
# Get forced grinding and testing for single tests working
# Get CSL integrity check working for single tests
# Check running of all tests
# Check grinding of all tests followed by testing
# Check CSL integrity check of all tests
#
# Validation
#
#if opt.bundle and (opt.processor or opt.grind or opt.cranky or opt.testrun or len(args)):
if opt.bundle and (opt.grind or opt.cranky or opt.testrun or len(args)):
print parser.print_help()
print "\nError: Option -r must be used alone"
sys.exit()
#if opt.processor and (opt.grind or opt.cranky or opt.testrun):
# parser.print_help()
# print "\nError: Option -p cannot be used with options -c, -g, -s or -S.\n"
# sys.exit()
#elif opt.processor and len(args) and len(args) != 1:
#if len(args) and len(args) != 1:
# parser.print_help()
# print "\nError: Use only one argument (the test name) with the -p option.\n"
# sys.exit()
#elif (opt.grind or opt.cranky or opt.testrun) and len(args) and len(args) != 2 and len(args) != 1:
if (opt.grind or opt.cranky or opt.testrun) and len(args) and len(args) != 2 and len(args) != 1:
parser.print_help()
print "\nError: Use one or two arguments with the -c, -g, -s or -S options (group name plus"
print " optionally the test name).\n"
sys.exit()
#
# Set up paths engine
#
category = "std"
#if opt.processor:
# params = Params(opt,args,"citeproc_js",force="citeproc_js")
#elif len(args) < 2:
if len(args) < 2:
params = Params(opt,args,category,force="std")
else:
params = Params(opt,args,category)
#
# Will do something, so issue date stamp
#
start = datetime.now()
START="%s:%s:%s <--------------START" % (start.hour,start.minute,start.second)
print START
if opt.bundle:
opt.grind = True
opt.verbose = True
opt.testrun = True
try:
if opt.cranky or opt.grind or opt.testrun:
params.getSourcePaths()
if opt.grind or ((opt.testrun) and opt.bundle):
params.clearSource()
params.refreshSource(force=True)
print ""
else:
params.refreshSource()
if opt.cranky:
params.validateSource()
if opt.bundle:
bundle = Bundle()
bundle.deleteOldBundles()
bundle.createNewBundles()
license = ApplyLicense()
license.apply()
if opt.testrun:
params.buildRunner()
params.runTests(bundle=opt.bundle)
#elif opt.processor:
# params.buildRunner()
# params.runTests()
except (KeyboardInterrupt, SystemExit):
for file in os.listdir("."):
if not file.startswith("tmp") or not len(file) == 9: continue
os.unlink(file)
sys.exit()
except MissingHumansFile, error:
parser.print_help()
print '''\nError: File \"%s\" not found.
Looked in:''' % error[0]
for path in error[1]:
print ' %s' % path
except NoFilesError:
print '\nError: No files to process!\n'
except NoLicense:
print '\nError: No license found in load.js'
end = datetime.now()
END="%s:%s:%s <--------------END" % (end.hour,end.minute,end.second)
print END
diff = end-start
print "Time: %s seconds" % (diff.seconds)
|
# coding=UTF8
import os
import shutil
import pyexiv2
import time
import utils
# noinspection PyBroadException
class PictureArchiver:
_verbose = True
_debug = False
def __init__(self, src_path, dest_path):
self._srcPath = src_path
self._destPath = dest_path
self._move_files = False
self._diagnostics = False
self._imgCount = 0
self._currImgIndex = 0
self._success_count = 0
self._currImgFileName = None
self._correct_dates_only = False
self._start_size = 0
self.onAdvance = None
def _do_advance(self):
try:
if self.onAdvance is not None:
self.onAdvance(self)
except Exception:
pass
def _log(self, text):
if self._verbose:
print text
def _debug(self, text):
if self._debug:
print text
def _error(self, msg):
print "ERROR:", msg
def _correct_exif_date(self, filename, date):
if not utils.is_picture(filename):
return
try:
exif_data = pyexiv2.ImageMetadata(filename)
need_write = False
exif_data.read()
if utils.get_exif_value(exif_data, 'Exif.Image.DateTime') is None:
exif_data['Exif.Image.DateTime'] = date
need_write = True
if utils.get_exif_value(exif_data, 'Exif.Photo.DateTimeOriginal') is None:
exif_data['Exif.Photo.DateTimeOriginal'] = date
need_write = True
if utils.get_exif_value(exif_data, 'Exif.Photo.DateTimeDigitized') is None:
exif_data['Exif.Photo.DateTimeDigitized'] = date
need_write = True
if need_write:
exif_data.write(True)
except Exception as e:
self._error(e)
def _is_valid_backup_file(self, file_name):
fname, fext = os.path.splitext(file_name)
return fext.lower().lstrip(".") in utils.MIME_TYPES.keys()
def _get_dest_folder_name(self, obj_date):
if obj_date is not None:
year = obj_date.strftime("%Y")
month = obj_date.strftime("%m")
day = obj_date.strftime("%Y-%m-%d")
destpath = os.path.join(year, month, day)
return destpath
else:
return ""
def _correct_picture_date(self, picture_path, datetime):
self._correct_exif_date(picture_path, datetime)
filetime = time.mktime(datetime.timetuple())
os.utime(picture_path, (filetime, filetime))
self._debug("Corrected: " + picture_path)
def _walk_dir_correct_date(self, root_dir):
dir_list = os.listdir(root_dir)
for filename in dir_list:
src_file = os.path.join(root_dir, filename)
if os.path.isdir(src_file):
self._walk_dir_correct_date(src_file)
continue
if os.path.isfile(src_file):
picture_date = utils.get_picture_date(src_file)
self._correct_picture_date(src_file, picture_date)
def _walk_dir(self, root_dir):
dir_list = os.listdir(root_dir)
self._imgCount += len(dir_list)
files_left = len(dir_list)
for filename in dir_list:
self._currImgIndex += 1
self._currImgFileName = filename
self._do_advance()
src_file = os.path.join(root_dir, filename)
if os.path.isdir(src_file):
self._walk_dir(src_file)
continue
if not os.path.isfile(src_file):
continue
if not self._is_valid_backup_file(src_file):
self._log("SKIPING: '" + src_file + "' is not a picture or video")
continue
if self._start_size > 0:
fs = os.path.getsize(src_file)
if self._start_size > fs:
self._log("SKIPING: " + src_file + " is not larger than " + utils.sizeof_fmt(self._start_size) + " bytes (" +
utils.sizeof_fmt(fs) + ")")
continue
picture_date = utils.get_picture_date(src_file)
if picture_date is None:
self._log("SKIPING: '" + src_file + "' Couldn't determine file date")
continue
dest_folder_name = self._get_dest_folder_name(picture_date)
dest_folder = os.path.join(self._destPath, dest_folder_name)
dest_file = os.path.join(dest_folder, filename)
move = self._move_files or src_file.startswith(u"/home/hm/Imágenes/Camara")
try:
if os.path.isfile(dest_file) and os.path.samefile(src_file, dest_file):
self._log("SKIPING: '" + dest_file + "' Source and destination are the same.")
continue
src_size = os.path.getsize(src_file)
if os.path.isfile(dest_file):
dest_size = os.path.getsize(dest_file)
if dest_size >= src_size:
self._log("SKIPING: '" + dest_file + "' already exists.")
continue
if not os.path.isdir(dest_folder):
self._log("CREATING: Folder '" + dest_folder + "'")
if not self._diagnostics:
os.makedirs(dest_folder)
if move:
self._log("MOVING: '" + src_file + "' to '" + dest_file + "'")
if not self._diagnostics:
shutil.move(src_file, dest_folder)
files_left -= 1
if files_left == 0:
try:
if not self._diagnostics:
os.rmdir(root_dir)
except:
self._error("Error removing dir")
else:
self._log("COPING: '" + src_file + "' to '" + dest_file + "'")
if not self._diagnostics:
shutil.copy(src_file, dest_folder)
success = (not move or not os.path.isfile(src_file)) and os.path.isfile(dest_file) and src_size == os.path.getsize(dest_file)
if success:
if not self._diagnostics:
self._correct_picture_date(dest_file, picture_date)
self._success_count += 1
except Exception as exp:
self._error(exp)
continue
def archive_pictures(self):
self._imgCount = 0
self._currImgIndex = 0
self._success_count = 0
self._walk_dir(self._srcPath)
self._log(str(self._success_count) + " of " + str(self._currImgIndex) + " files copied.")
@classmethod
def do(cls, src_path, dest_path, diagnostics, move, start_size):
obj = cls(src_path, dest_path)
obj._diagnostics = diagnostics
obj._move_files = move
obj._start_size = int(start_size) * 1024 * 1024
print obj._start_size
if obj._diagnostics:
obj._log("WARING: Diagnostics mode activated.")
obj.archive_pictures()
@classmethod
def correct_dates(cls, src_path):
obj = cls(src_path, src_path)
obj._walk_dir_correct_date(src_path)
|
from tqdm import tqdm
from time import sleep
import logging
from pynonymizer.database.provider import DatabaseProvider, SEED_TABLE_NAME
from pynonymizer.database.exceptions import UnsupportedTableStrategyError
from pynonymizer.database.mysql import execution, query_factory
from pynonymizer.database.basic.input import resolve_input
from pynonymizer.database.basic.output import resolve_output
from pynonymizer.strategy.table import TableStrategyTypes
class MySqlProvider(DatabaseProvider):
"""
A command-line based mysql provider. Uses `mysql` and `mysqldump`,
Because of the efficiency of piping mass amounts of sql into the command-line client.
Unfortunately, this implementation provides limited feedback when things go wrong.
"""
__CHUNK_SIZE = 8192
__DUMPSIZE_ESTIMATE_INFLATION = 1.15
logger = logging.getLogger(__name__)
def __init__(self, db_host, db_user, db_pass, db_name, db_port=None, seed_rows=None, cmd_opts=None, dump_opts=None):
if db_host is None:
db_host = "127.0.0.1"
if db_port is None:
db_port = "3306"
if cmd_opts is None:
cmd_opts = ""
if dump_opts is None:
dump_opts = ""
self.db_host = db_host
self.db_user = db_user
self.db_pass = db_pass
self.db_name = db_name
self.db_port = db_port
if seed_rows is None:
seed_rows = 150
self.seed_rows = int(seed_rows)
self.__runner = execution.MySqlCmdRunner(db_host, db_user, db_pass, db_name, db_port, additional_opts=cmd_opts)
self.__dumper = execution.MySqlDumpRunner(db_host, db_user, db_pass, db_name, db_port, additional_opts=dump_opts)
def __seed(self, qualifier_map):
"""
'Seed' the database with a bunch of pre-generated random records so updates can be performed in batch updates
"""
for i in tqdm(range(0, self.seed_rows), desc="Inserting seed data", unit="rows"):
self.logger.debug(f"Inserting seed row {i}")
self.__runner.db_execute(query_factory.get_insert_seed_row(SEED_TABLE_NAME, qualifier_map))
def __estimate_dumpsize(self):
"""
Makes a guess on the dump size using internal database metrics
:return: A value in bytes, or None (unknown)
"""
statement = query_factory.get_dumpsize_estimate(self.db_name)
process_output = self.__runner.get_single_result(statement)
try:
return int(process_output) * self.__DUMPSIZE_ESTIMATE_INFLATION
except ValueError:
# Value unparsable, likely NULL
return None
def __read_until_empty_byte(self, data):
return iter(lambda: data.read(self.__CHUNK_SIZE), b'')
def __run_scripts(self, script_list, title=""):
for i, script in enumerate(script_list):
self.logger.info(f"Running {title} script #{i} \"{script[:50]}\"")
self.logger.info(self.__runner.db_execute(script))
def create_database(self):
"""Create the working database"""
self.__runner.execute(query_factory.get_create_database(self.db_name))
def drop_database(self):
"""Drop the working database"""
self.__runner.execute(query_factory.get_drop_database(self.db_name))
def anonymize_database(self, database_strategy):
"""
Anonymize a restored database using the passed database strategy
:param database_strategy: a strategy.DatabaseStrategy configuration
:return:
"""
qualifier_map = database_strategy.fake_update_qualifier_map
if len(qualifier_map) > 0:
self.logger.info("creating seed table with %d columns", len(qualifier_map))
create_seed_table_sql = query_factory.get_create_seed_table(SEED_TABLE_NAME, qualifier_map)
self.__runner.db_execute(create_seed_table_sql)
self.logger.info("Inserting seed data")
self.__seed(qualifier_map)
self.__run_scripts(database_strategy.before_scripts, "before")
table_strategies = database_strategy.table_strategies
self.logger.info("Anonymizing %d tables", len(table_strategies))
with tqdm(desc="Anonymizing database", total=len(table_strategies)) as progressbar:
for table_strategy in table_strategies:
if table_strategy.schema is not None:
self.logger.warning(
"%s: MySQL provider does not support table schema. This option will be ignored.",
table_strategy.table_name
)
if table_strategy.strategy_type == TableStrategyTypes.TRUNCATE:
progressbar.set_description("Truncating {}".format(table_strategy.table_name))
self.__runner.db_execute(query_factory.get_truncate_table(table_strategy.table_name))
elif table_strategy.strategy_type == TableStrategyTypes.DELETE:
progressbar.set_description("Deleting {}".format(table_strategy.table_name))
self.__runner.db_execute(query_factory.get_delete_table(table_strategy.table_name))
elif table_strategy.strategy_type == TableStrategyTypes.UPDATE_COLUMNS:
progressbar.set_description("Anonymizing {}".format(table_strategy.table_name))
statements = query_factory.get_update_table(SEED_TABLE_NAME, table_strategy)
self.__runner.db_execute(statements)
else:
raise UnsupportedTableStrategyError(table_strategy)
progressbar.update()
self.__run_scripts(database_strategy.after_scripts, "after")
self.logger.info("dropping seed table")
self.__runner.db_execute(query_factory.get_drop_seed_table(SEED_TABLE_NAME))
# Wait an arbitrary amount of time here to prevent this step from interacting with
# transactional dump operations
self.logger.debug("Waiting for trailing operations to complete...")
sleep(0.2)
def restore_database(self, input_path):
"""
Feed a mysqldump dumpfile to the mysql binary on stdin.
:param input_path:
:return:
"""
input_obj = resolve_input(input_path)
dumpsize = input_obj.get_size()
batch_processor = self.__runner.open_batch_processor()
with input_obj.open() as dumpfile_data:
with tqdm(desc="Restoring", total=dumpsize, unit='B', unit_scale=True, unit_divisor=1000) as bar:
for chunk in self.__read_until_empty_byte(dumpfile_data):
batch_processor.write(chunk)
batch_processor.flush()
bar.update(len(chunk))
def dump_database(self, output_path):
"""
Feed an output with stdout from the mysqldump binary
:param output_path:
:return:
"""
output_obj = resolve_output(output_path)
dumpsize_estimate = self.__estimate_dumpsize()
dump_process = self.__dumper.open_dumper()
with output_obj.open() as output_file:
with tqdm(desc="Dumping", total=dumpsize_estimate, unit='B', unit_scale=True, unit_divisor=1000) as bar:
for chunk in self.__read_until_empty_byte(dump_process):
output_file.write(chunk)
bar.update(len(chunk))
|
from numpy import *
from scipy import *
from scipy.signal import remez, resample
from .halfbandfir import halfbandfir
from fractions import gcd
from .upfirdn import upfirdn
def resample_cascade(x, fs_start, fs_end, N=42):
"""
Resample a signal from one sampling frequency to another, using a halfband
filter cascade in the case of drastic resampling ratios, and using polyphase
implementations whenever possible. See halfbandfir() for an explanation of
halfband filters and their application to downsampling, and upfirdn() for an
explanation of polyphase filtering.
Parameters
----------
x : 1-D signal array
The input signal array
fs_start : int
The starting sampling frequency
fs_end : int
The resultant sampling frequency
Returns
-------
y : 1-D signal array
The downsampled signal, of length len(x)*(fs_end/fs_start)
"""
# I'd be very surprised if this is actually ever hit, but let's cover our
# bases
fs_start = int(fs_start)
fs_end = int(fs_end)
if fs_start == fs_end:
return x
fs_start = float(fs_start)
fs_end = float(fs_end)
# Generate our halfband fir filter just in case
h = halfbandfir(N)
# Let's walk through the filter cascade
num_steps = int(abs(log2(fs_start / fs_end)))
# Are we upsampling or downsampling?
if fs_start < fs_end:
for step in range(num_steps):
x = 2 * upfirdn(x, h, uprate=2)[len(h) // 2:-len(h) // 2 + 1]
fs = fs_start * (2.0**num_steps)
else:
for step in range(num_steps):
x = upfirdn(x, h, downrate=2)[len(h) // 4:-len(h) // 4 + 1]
fs = fs_start / (2.0**num_steps)
if fs != fs_end:
# Now that we're less than a power of two off, we use the typical resample filter
# to finish off, since this guy works just fine for short filers
x = resample(x, int(round(fs_end / fs * len(x))))
return x
|
# Pytathon if Stement
# if Statement
sandwich_order = "Ham Roll"
if sandwich_order == "Ham Roll":
print("Price: $1.75")
# if else Statement
tab = 29.95
if tab > 20:
print("This user has a tab over $20 that needs to be paid.")
else:
print("This user's tab is below $20 that does not require immediate payment.")
# elif Statement
sandwich_order = "Bacon Roll"
if sandwich_order == "Ham Roll":
print("Price: $1.75")
elif sandwich_order == "Cheese Roll":
print("Price: $1.80")
elif sandwich_order == "Bacon Roll":
print("Price: $2.10")
else:
print("Price: $2.00")
# Nested if Statement
sandwich_order = "Other Filled Roll"
if sandwich_order != "Other Filled Roll":
if sandwich_order == "Ham Roll":
print("Price: $1.75")
if sandwich_order == "Cheese Roll":
print("Price: $1.80")
elif sandwich_order == "Bacon Roll":
print("Price: $2.10")
else:
print("Price: $2.00")
|
"""
This is the base file of the friendly computing machine!
"""
from .import math
from .math import mult
|
# -*- coding: utf-8 -*-
import os
import sys
from news_crawler.spiders import BaseSpider
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from datetime import datetime
sys.path.insert(0, os.path.join(os.getcwd(), "..",))
from news_crawler.items import NewsCrawlerItem
from news_crawler.utils import remove_empty_paragraphs
class JungleSpider(BaseSpider):
"""Spider for jungle.world"""
name = 'jungle'
rotate_user_agent = True
allowed_domains = ['jungle.world']
start_urls = ['https://jungle.world/']
# Exclude pages without relevant articles
rules = (
Rule(
LinkExtractor(
allow=(r'jungle\.world\/artikel\/.*'),
deny=(r'jungle\.world\/abo')
),
callback='parse_item',
follow=True
),
)
def parse_item(self, response):
"""
Checks article validity. If valid, it parses it.
"""
if 'Anmeldung erforderlich' in response.xpath('//meta[@name="dcterms.title"]/@content').get():
return
# Check if page is duplicate
if '?page=' in response.url:
return
# Check date validity
creation_date = response.xpath('//div/span[@class="date"]/text()').get()
if not creation_date:
return
creation_date = creation_date.strip()
if creation_date == '':
return
creation_date = datetime.strptime(creation_date, '%d.%m.%Y')
if self.is_out_of_date(creation_date):
return
# Extract the article's paragraphs
paragraphs = [node.xpath('string()').get().strip() for node in response.xpath('//div[@class="lead"] | //p[not(ancestor::div[@class="caption"]) and not(descendant::a[@class="btn btn-default scrollTop"])]')]
paragraphs = remove_empty_paragraphs(paragraphs)
text = ' '.join([para for para in paragraphs])
# Check article's length validity
if not self.has_min_length(text):
return
# Check keywords validity
if not self.has_valid_keywords(text):
return
# Parse the valid article
item = NewsCrawlerItem()
item['news_outlet'] = 'jungle'
item['provenance'] = response.url
item['query_keywords'] = self.get_query_keywords()
# Get creation, modification, and crawling dates
item['creation_date'] = creation_date.strftime('%d.%m.%Y')
item['last_modified'] = creation_date.strftime('%d.%m.%Y')
item['crawl_date'] = datetime.now().strftime('%d.%m.%Y')
# Get authors
authors = response.xpath('//meta[@name="dcterms.publisher"]/@content').get()
item['author_person'] = authors.split(', ') if authors else list()
item['author_organization'] = list()
# Extract keywords
news_keywords = response.xpath('//meta[@name="keywords"]/@content').get()
item['news_keywords'] = news_keywords.split(', ') if news_keywords else list()
# Get title, description, and body of article
title = response.xpath('//meta[@property="og:title"]/@content').get()
description = response.xpath('//meta[@property="og:description"]/@content').get().split(' • ')[0]
# Body as dictionary: key = headline (if available, otherwise empty string), values = list of corresponding paragraphs
body = dict()
# The articles have no headlines, just paragraphs
body[''] = paragraphs
item['content'] = {'title': title, 'description': description, 'body':body}
# No recommendations related to the current article available
item['recommendations'] = list()
item['response_body'] = response.body
yield item
|
# Generated by Django 2.2.7 on 2019-12-11 08:37
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(default='', max_length=150, unique=True)),
('logged_on', models.BooleanField(default=False)),
('friends', models.CharField(default=None, max_length=500, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')])),
('bio', models.TextField()),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
# coding: utf-8
# flake8: noqa
"""
Emby Server API
Explore the Emby Server API # noqa: E501
OpenAPI spec version: 4.1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from embyapi.models.activity_log_entry import ActivityLogEntry
from embyapi.models.all_theme_media_result import AllThemeMediaResult
from embyapi.models.authenticate_user import AuthenticateUser
from embyapi.models.authenticate_user_by_name import AuthenticateUserByName
from embyapi.models.authentication_authentication_result import AuthenticationAuthenticationResult
from embyapi.models.base_item_dto import BaseItemDto
from embyapi.models.base_item_person import BaseItemPerson
from embyapi.models.branding_branding_options import BrandingBrandingOptions
from embyapi.models.chapter_info import ChapterInfo
from embyapi.models.client_capabilities import ClientCapabilities
from embyapi.models.collections_collection_creation_result import CollectionsCollectionCreationResult
from embyapi.models.configuration_access_schedule import ConfigurationAccessSchedule
from embyapi.models.configuration_codec_configuration import ConfigurationCodecConfiguration
from embyapi.models.configuration_image_option import ConfigurationImageOption
from embyapi.models.configuration_library_options import ConfigurationLibraryOptions
from embyapi.models.configuration_media_path_info import ConfigurationMediaPathInfo
from embyapi.models.configuration_path_substitution import ConfigurationPathSubstitution
from embyapi.models.configuration_server_configuration import ConfigurationServerConfiguration
from embyapi.models.configuration_type_options import ConfigurationTypeOptions
from embyapi.models.configuration_user_configuration import ConfigurationUserConfiguration
from embyapi.models.connect_connect_authentication_exchange_result import ConnectConnectAuthenticationExchangeResult
from embyapi.models.connect_user_link_result import ConnectUserLinkResult
from embyapi.models.create_user_by_name import CreateUserByName
from embyapi.models.default_directory_browser_info import DefaultDirectoryBrowserInfo
from embyapi.models.devices_content_upload_history import DevicesContentUploadHistory
from embyapi.models.devices_device_info import DevicesDeviceInfo
from embyapi.models.devices_device_options import DevicesDeviceOptions
from embyapi.models.devices_local_file_info import DevicesLocalFileInfo
from embyapi.models.display_preferences import DisplayPreferences
from embyapi.models.dlna_codec_profile import DlnaCodecProfile
from embyapi.models.dlna_container_profile import DlnaContainerProfile
from embyapi.models.dlna_device_identification import DlnaDeviceIdentification
from embyapi.models.dlna_device_profile import DlnaDeviceProfile
from embyapi.models.dlna_device_profile_info import DlnaDeviceProfileInfo
from embyapi.models.dlna_direct_play_profile import DlnaDirectPlayProfile
from embyapi.models.dlna_http_header_info import DlnaHttpHeaderInfo
from embyapi.models.dlna_profile_condition import DlnaProfileCondition
from embyapi.models.dlna_response_profile import DlnaResponseProfile
from embyapi.models.dlna_subtitle_profile import DlnaSubtitleProfile
from embyapi.models.dlna_transcoding_profile import DlnaTranscodingProfile
from embyapi.models.dlna_xml_attribute import DlnaXmlAttribute
from embyapi.models.emby_notifications_api_notification import EmbyNotificationsApiNotification
from embyapi.models.emby_notifications_api_notification_result import EmbyNotificationsApiNotificationResult
from embyapi.models.emby_notifications_api_notifications_summary import EmbyNotificationsApiNotificationsSummary
from embyapi.models.emby_reports_api_model_report_group import EmbyReportsApiModelReportGroup
from embyapi.models.emby_reports_api_model_report_header import EmbyReportsApiModelReportHeader
from embyapi.models.emby_reports_api_model_report_item import EmbyReportsApiModelReportItem
from embyapi.models.emby_reports_api_model_report_result import EmbyReportsApiModelReportResult
from embyapi.models.emby_reports_api_model_report_row import EmbyReportsApiModelReportRow
from embyapi.models.external_id_info import ExternalIdInfo
from embyapi.models.external_url import ExternalUrl
from embyapi.models.forgot_password import ForgotPassword
from embyapi.models.forgot_password_pin import ForgotPasswordPin
from embyapi.models.game_system_summary import GameSystemSummary
from embyapi.models.general_command import GeneralCommand
from embyapi.models.globalization_country_info import GlobalizationCountryInfo
from embyapi.models.globalization_culture_dto import GlobalizationCultureDto
from embyapi.models.globalization_localizaton_option import GlobalizationLocalizatonOption
from embyapi.models.io_file_system_entry_info import IOFileSystemEntryInfo
from embyapi.models.image_by_name_info import ImageByNameInfo
from embyapi.models.image_info import ImageInfo
from embyapi.models.image_provider_info import ImageProviderInfo
from embyapi.models.item_counts import ItemCounts
from embyapi.models.library_add_media_path import LibraryAddMediaPath
from embyapi.models.library_add_virtual_folder import LibraryAddVirtualFolder
from embyapi.models.library_delete_info import LibraryDeleteInfo
from embyapi.models.library_library_option_info import LibraryLibraryOptionInfo
from embyapi.models.library_library_options_result import LibraryLibraryOptionsResult
from embyapi.models.library_library_type_options import LibraryLibraryTypeOptions
from embyapi.models.library_media_folder import LibraryMediaFolder
from embyapi.models.library_media_update_info import LibraryMediaUpdateInfo
from embyapi.models.library_post_updated_media import LibraryPostUpdatedMedia
from embyapi.models.library_rename_virtual_folder import LibraryRenameVirtualFolder
from embyapi.models.library_sub_folder import LibrarySubFolder
from embyapi.models.library_update_library_options import LibraryUpdateLibraryOptions
from embyapi.models.library_update_media_path import LibraryUpdateMediaPath
from embyapi.models.live_tv_get_programs import LiveTvGetPrograms
from embyapi.models.live_tv_guide_info import LiveTvGuideInfo
from embyapi.models.live_tv_listing_provider_type_info import LiveTvListingProviderTypeInfo
from embyapi.models.live_tv_listings_provider_info import LiveTvListingsProviderInfo
from embyapi.models.live_tv_live_tv_info import LiveTvLiveTvInfo
from embyapi.models.live_tv_live_tv_service_info import LiveTvLiveTvServiceInfo
from embyapi.models.live_tv_series_timer_info_dto import LiveTvSeriesTimerInfoDto
from embyapi.models.live_tv_set_channel_mapping import LiveTvSetChannelMapping
from embyapi.models.live_tv_timer_info_dto import LiveTvTimerInfoDto
from embyapi.models.live_tv_tuner_host_info import LiveTvTunerHostInfo
from embyapi.models.log_file import LogFile
from embyapi.models.media_encoding_codecs_common_types_bit_rate import MediaEncodingCodecsCommonTypesBitRate
from embyapi.models.media_encoding_codecs_common_types_level_information import MediaEncodingCodecsCommonTypesLevelInformation
from embyapi.models.media_encoding_codecs_common_types_profile_information import MediaEncodingCodecsCommonTypesProfileInformation
from embyapi.models.media_encoding_codecs_common_types_profile_level_information import MediaEncodingCodecsCommonTypesProfileLevelInformation
from embyapi.models.media_encoding_codecs_common_types_resolution import MediaEncodingCodecsCommonTypesResolution
from embyapi.models.media_encoding_codecs_common_types_resolution_with_rate import MediaEncodingCodecsCommonTypesResolutionWithRate
from embyapi.models.media_encoding_codecs_video_codecs_video_codec_base import MediaEncodingCodecsVideoCodecsVideoCodecBase
from embyapi.models.media_info_live_stream_request import MediaInfoLiveStreamRequest
from embyapi.models.media_info_live_stream_response import MediaInfoLiveStreamResponse
from embyapi.models.media_info_playback_info_request import MediaInfoPlaybackInfoRequest
from embyapi.models.media_info_playback_info_response import MediaInfoPlaybackInfoResponse
from embyapi.models.media_source_info import MediaSourceInfo
from embyapi.models.media_stream import MediaStream
from embyapi.models.media_url import MediaUrl
from embyapi.models.metadata_editor_info import MetadataEditorInfo
from embyapi.models.name_id_pair import NameIdPair
from embyapi.models.name_long_id_pair import NameLongIdPair
from embyapi.models.name_value_pair import NameValuePair
from embyapi.models.net_end_point_info import NetEndPointInfo
from embyapi.models.news_news_item import NewsNewsItem
from embyapi.models.notifications_notification_type_info import NotificationsNotificationTypeInfo
from embyapi.models.parental_rating import ParentalRating
from embyapi.models.play_request import PlayRequest
from embyapi.models.playback_progress_info import PlaybackProgressInfo
from embyapi.models.playback_reporting_api_custom_query import PlaybackReportingApiCustomQuery
from embyapi.models.playback_start_info import PlaybackStartInfo
from embyapi.models.playback_stop_info import PlaybackStopInfo
from embyapi.models.player_state_info import PlayerStateInfo
from embyapi.models.playlists_playlist_creation_result import PlaylistsPlaylistCreationResult
from embyapi.models.playstate_request import PlaystateRequest
from embyapi.models.plugins_plugin_info import PluginsPluginInfo
from embyapi.models.providers_album_info import ProvidersAlbumInfo
from embyapi.models.providers_artist_info import ProvidersArtistInfo
from embyapi.models.providers_book_info import ProvidersBookInfo
from embyapi.models.providers_box_set_info import ProvidersBoxSetInfo
from embyapi.models.providers_game_info import ProvidersGameInfo
from embyapi.models.providers_movie_info import ProvidersMovieInfo
from embyapi.models.providers_music_video_info import ProvidersMusicVideoInfo
from embyapi.models.providers_person_lookup_info import ProvidersPersonLookupInfo
from embyapi.models.providers_remote_search_query_providers_album_info import ProvidersRemoteSearchQueryProvidersAlbumInfo
from embyapi.models.providers_remote_search_query_providers_artist_info import ProvidersRemoteSearchQueryProvidersArtistInfo
from embyapi.models.providers_remote_search_query_providers_book_info import ProvidersRemoteSearchQueryProvidersBookInfo
from embyapi.models.providers_remote_search_query_providers_box_set_info import ProvidersRemoteSearchQueryProvidersBoxSetInfo
from embyapi.models.providers_remote_search_query_providers_game_info import ProvidersRemoteSearchQueryProvidersGameInfo
from embyapi.models.providers_remote_search_query_providers_movie_info import ProvidersRemoteSearchQueryProvidersMovieInfo
from embyapi.models.providers_remote_search_query_providers_music_video_info import ProvidersRemoteSearchQueryProvidersMusicVideoInfo
from embyapi.models.providers_remote_search_query_providers_person_lookup_info import ProvidersRemoteSearchQueryProvidersPersonLookupInfo
from embyapi.models.providers_remote_search_query_providers_series_info import ProvidersRemoteSearchQueryProvidersSeriesInfo
from embyapi.models.providers_remote_search_query_providers_trailer_info import ProvidersRemoteSearchQueryProvidersTrailerInfo
from embyapi.models.providers_series_info import ProvidersSeriesInfo
from embyapi.models.providers_song_info import ProvidersSongInfo
from embyapi.models.providers_trailer_info import ProvidersTrailerInfo
from embyapi.models.public_system_info import PublicSystemInfo
from embyapi.models.query_filters import QueryFilters
from embyapi.models.query_filters_legacy import QueryFiltersLegacy
from embyapi.models.query_result_activity_log_entry import QueryResultActivityLogEntry
from embyapi.models.query_result_base_item_dto import QueryResultBaseItemDto
from embyapi.models.query_result_devices_device_info import QueryResultDevicesDeviceInfo
from embyapi.models.query_result_live_tv_series_timer_info_dto import QueryResultLiveTvSeriesTimerInfoDto
from embyapi.models.query_result_live_tv_timer_info_dto import QueryResultLiveTvTimerInfoDto
from embyapi.models.query_result_news_news_item import QueryResultNewsNewsItem
from embyapi.models.query_result_sync_model_sync_job_item import QueryResultSyncModelSyncJobItem
from embyapi.models.query_result_sync_sync_job import QueryResultSyncSyncJob
from embyapi.models.query_result_user_library_official_rating_item import QueryResultUserLibraryOfficialRatingItem
from embyapi.models.query_result_user_library_tag_item import QueryResultUserLibraryTagItem
from embyapi.models.queue_item import QueueItem
from embyapi.models.recommendation_dto import RecommendationDto
from embyapi.models.remote_image_info import RemoteImageInfo
from embyapi.models.remote_image_result import RemoteImageResult
from embyapi.models.remote_search_result import RemoteSearchResult
from embyapi.models.remote_subtitle_info import RemoteSubtitleInfo
from embyapi.models.roku_metadata_api_thumbnail_info import RokuMetadataApiThumbnailInfo
from embyapi.models.roku_metadata_api_thumbnail_set_info import RokuMetadataApiThumbnailSetInfo
from embyapi.models.search_search_hint import SearchSearchHint
from embyapi.models.search_search_hint_result import SearchSearchHintResult
from embyapi.models.session_session_info import SessionSessionInfo
from embyapi.models.session_user_info import SessionUserInfo
from embyapi.models.sync_model_item_file_info import SyncModelItemFileInfo
from embyapi.models.sync_model_sync_data_request import SyncModelSyncDataRequest
from embyapi.models.sync_model_sync_data_response import SyncModelSyncDataResponse
from embyapi.models.sync_model_sync_dialog_options import SyncModelSyncDialogOptions
from embyapi.models.sync_model_sync_job_creation_result import SyncModelSyncJobCreationResult
from embyapi.models.sync_model_sync_job_item import SyncModelSyncJobItem
from embyapi.models.sync_model_sync_job_request import SyncModelSyncJobRequest
from embyapi.models.sync_model_sync_profile_option import SyncModelSyncProfileOption
from embyapi.models.sync_model_sync_quality_option import SyncModelSyncQualityOption
from embyapi.models.sync_model_synced_item import SyncModelSyncedItem
from embyapi.models.sync_model_synced_item_progress import SyncModelSyncedItemProgress
from embyapi.models.sync_sync_job import SyncSyncJob
from embyapi.models.sync_sync_target import SyncSyncTarget
from embyapi.models.system_info import SystemInfo
from embyapi.models.tasks_task_info import TasksTaskInfo
from embyapi.models.tasks_task_result import TasksTaskResult
from embyapi.models.tasks_task_trigger_info import TasksTaskTriggerInfo
from embyapi.models.theme_media_result import ThemeMediaResult
from embyapi.models.transcoding_info import TranscodingInfo
from embyapi.models.tuple_double_double import TupleDoubleDouble
from embyapi.models.update_user_easy_password import UpdateUserEasyPassword
from embyapi.models.update_user_password import UpdateUserPassword
from embyapi.models.updates_installation_info import UpdatesInstallationInfo
from embyapi.models.updates_package_info import UpdatesPackageInfo
from embyapi.models.updates_package_version_info import UpdatesPackageVersionInfo
from embyapi.models.user_dto import UserDto
from embyapi.models.user_item_data_dto import UserItemDataDto
from embyapi.models.user_library_official_rating_item import UserLibraryOfficialRatingItem
from embyapi.models.user_library_tag_item import UserLibraryTagItem
from embyapi.models.users_forgot_password_result import UsersForgotPasswordResult
from embyapi.models.users_pin_redeem_result import UsersPinRedeemResult
from embyapi.models.users_user_action import UsersUserAction
from embyapi.models.users_user_policy import UsersUserPolicy
from embyapi.models.validate_path import ValidatePath
from embyapi.models.virtual_folder_info import VirtualFolderInfo
from embyapi.models.wake_on_lan_info import WakeOnLanInfo
from embyapi.models.web_dashboard_api_configuration_page_info import WebDashboardApiConfigurationPageInfo
|
from typing import Any, Callable, Dict, List, Optional, Tuple
from concurrent.futures import Executor, Future
from itertools import repeat
from freak.models.response import EngineResponse
from networkx import DiGraph
from networkx.algorithms.dag import is_directed_acyclic_graph
def submit_and_execute_single_job(
executor: Executor,
func: Callable[..., Tuple[EngineResponse, Dict[str, Any]]],
job_args: Tuple[str, Any, Dict[str, Any]],
) -> Future: # type: ignore
future = executor.submit(func, *job_args)
return future
def validate_flow(step_graph: Dict[Optional[str], List[str]]) -> bool:
"""
flow must be a DAG.
"""
edges: List[str] = []
for key, value in step_graph.items():
if key is None or not value:
continue
edges.extend(zip(repeat(key, len(value)), value)) # type: ignore
graph = DiGraph()
graph.add_edges_from(edges)
is_dag: bool = is_directed_acyclic_graph(graph)
return is_dag
|
import tensorflow as tf
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from pickle import load
# map an integer to a word
def word_for_id(integer, tokenizer):
return tokenizer.index_word.get(integer)
# generate a description for an image
def generate_desc(model, tokenizer, photo, max_length):
# seed the generation process
in_text = 'startseq'
# iterate over the whole length of the sequence
for i in range(max_length):
# integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0]
#print("sequence after tok: ", sequence)
# pad input
sequence = pad_sequences([sequence], maxlen=max_length)
# predict next word
if i==0:
photo = np.expand_dims(photo, axis=0)
#print("photo: ", photo)
#print("sequence: ", sequence)
yhat = model.predict([photo, sequence], verbose=0)
# convert probability to integer
yhat = np.argmax(yhat)
# map integer to word
word = word_for_id(yhat, tokenizer)
# stop if we cannot map the word
if word is None:
break
# append as input for generating the next word
in_text += ' ' + word
# stop if we predict the end of the sequence
if word == 'endseq':
break
return in_text
def image_to_feat_vec(imagePath):
img1 = image.load_img(imagePath, target_size=(224, 224))
x = image.img_to_array(img1)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
resnet = tf.keras.models.load_model('/mnt/f/Projects/OcuHelp/backend/resnet50.h5')
fea_x = resnet.predict(x)
fea_x1 = np.reshape(fea_x , fea_x.shape[1])
return fea_x1
def generate_captions(photo_path):
tokenizer = load(open('/mnt/f/Projects/OcuHelp/backend/tokenizer.pkl', 'rb'))
max_length = 34
model = tf.keras.models.load_model('/mnt/f/Projects/OcuHelp/backend/model.h5')
photo = image_to_feat_vec(photo_path)
description = generate_desc(model, tokenizer, photo, max_length)
description = description[9:-6]
return description
# print(generate_captions("/home/jkt/Downloads/image-captioning-app/backend/IMG-20210502-WA0003 (1).jpg"))
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
from threading import Thread
import time
class KeepAlive(Thread):
running = False
call_every = 60
time = 0
func = None
__active = False
def __init__(self, interval, func):
super(KeepAlive, self).__init__()
self.call_every = interval
self.func = func
@property
def active(self):
""" activated """
return self.__active
@active.setter
def active(self, state):
self.__active = state
if state:
self.func()
self.time = 0
def run(self):
self.time = 0
self.running = True
while self.running:
time.sleep(1)
self.time = self.time + 1
if self.time > self.call_every:
self.time = self.time - self.call_every
if self.__active:
self.func()
def stop(self):
self.running = False
self.join()
|
from py21cmmc_fg.c_wrapper import stitch_and_coarsen_sky
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 10, 23)
X, Y = np.meshgrid(x,x)
z = X**2 + Y**2
z = np.atleast_3d(z)
z = np.tile(z ,(1,1,6))
print(z.shape)
plt.imshow(z[:,:,0])
plt.savefig("derp1.png")
z = np.atleast_3d(z)
out = stitch_and_coarsen_sky(z, 1, 2.3, 27)
plt.clf()
print(out)
plt.imshow(out[:,:,0])
plt.savefig("derp2.png")
|
"""
Provides :py:class:`UnaryMixin` mixin class
It provides unary operator support to the :py:class:`Lazy` class
"""
from .core import Lazy, LazyMixin
from .ops import lazy_operator
class UnaryMixin(LazyMixin):
"""
Binary operator support
"""
@lazy_operator
def __neg__(self: Lazy) -> Lazy:
pass # pragma: no cover
@lazy_operator
def __pos__(self: Lazy) -> Lazy:
pass # pragma: no cover
@lazy_operator
def __abs__(self: Lazy) -> Lazy:
pass # pragma: no cover
@lazy_operator
def __invert__(self: Lazy) -> Lazy:
pass # pragma: no cover
|
'''
Questions
1. Can the statue list be given sorted?
Observations
1. All sizes are non negative integers, unique. No repeats
2. kind of like sorting an array and seeing how many elements need to be in between
3. What is the minimum number of additional statues needed? 0. The maximum? The range of list, or largest-smallest
4. all statues are used.
5. statue list can be sorted or unsorted.
Solution 1: Sort array using counting sort. Doesn't have to be stable. O(n). Then go through list and count how many are "missing" by subtracting the current element and the previous element to see if it is 1, otherwise save to a counter variable. Additional O(n) space.
Solution 2: Similar to solution 1. Use a dictionary/map instead. The keys are the numbers, value is either 0 or 1. You go through list and find the largest element, and then iterate through map to check if that number exists. O(n) solution with O(n) time.
Solution 3: Is there a way to do it with constant space?
Yes you can. If you know the largest element in the list, and the smallest element in the list, you know the largest possible value of statues needed. So you can go through list again and subtract the size of the list. That will give you the missing elements. O(n) time but constant space.
2, 3, 4, 5, 6, 7, 8
I I I I
'''
def makeArrayConsecutive2(statues):
#largest possible answer
answer = max(statues) - min(statues) - 1
return answer - len(statues) + 2
|
from collections import defaultdict
import numpy as np
# from itertools import zip_longest as zip
def return_dim_ndarray(value):
""" helper function to always have at least 1d numpy array returned """
if isinstance(value, list):
return np.array(value)
elif isinstance(value, np.ndarray):
return value
else:
return np.array([value])
class PhydraModel:
"""Backend model class
- collects all things relevant to the model instance (i.e. variables, parameters, ...)
- can be solved by passing it to the SolverABC class (that's where conversion (if necessary) happens)
"""
def __init__(self):
self.time = None
self.variables = defaultdict()
self.parameters = defaultdict()
self.forcing_func = defaultdict()
self.forcings = defaultdict()
self.fluxes = defaultdict()
self.flux_values = defaultdict()
self.fluxes_per_var = defaultdict(list)
self.var_dims = defaultdict()
self.flux_dims = defaultdict()
self.full_model_dims = defaultdict()
def __repr__(self):
return (f"Model contains: \n"
f"Variables:{[var for var in self.variables]} \n"
f"Parameters:{[par for par in self.parameters]} \n"
f"Forcings:{[forc for forc in self.forcings]} \n"
f"Fluxes:{[flx for flx in self.fluxes]} \n"
f"Full Model Dimensions:{[(state,dim) for state,dim in self.full_model_dims.items()]} \n")
def unpack_flat_state(self, flat_state):
""" """
state_dict = defaultdict()
index = 0
for key, dims in self.full_model_dims.items():
if dims is None:
state_dict[key] = flat_state[index]
index += 1
elif isinstance(dims, int):
state_dict[key] = flat_state[index:index+dims]
index += dims
else:
_length = np.prod(dims)
state_dict[key] = flat_state[index:index+_length].reshape(dims) # np.array( )
index += _length
return state_dict
def model_function(self, current_state, time=None, forcing=None):
""" general model function that matches fluxes to state variables
:param current_state:
:param time: argument is necessary for odeint solve
:param forcing:
:return:
"""
state = self.unpack_flat_state(current_state)
# Return forcings for time point:
if time is not None:
forcing_now = defaultdict()
for key, func in self.forcing_func.items():
forcing_now[key] = func(time)
forcing = forcing_now
elif forcing is None:
forcing = self.forcings
# Compute fluxes:
flux_values = defaultdict()
fluxes_out = []
for flx_label, flux in self.fluxes.items():
_value = return_dim_ndarray(flux(state=state, parameters=self.parameters, forcings=forcing))
flux_values[flx_label] = _value
fluxes_out.append(_value)
if flx_label in state:
state.update({flx_label: _value})
# Route list input fluxes:
list_input_fluxes = defaultdict(list)
for flux_var_dict in self.fluxes_per_var["list_input"]:
flux_label, negative, list_input = flux_var_dict.values()
flux_val = flux_values[flux_label]
flux_dims = self.full_model_dims[flux_label]
list_var_dims = []
for var in list_input:
_dim = self.full_model_dims[var]
list_var_dims.append(_dim or 1)
if len(list_input) == flux_dims:
for var, flux in zip(list_input, flux_val):
if negative:
list_input_fluxes[var].append(-flux)
else:
list_input_fluxes[var].append(flux)
elif sum(list_var_dims) == flux_dims:
_dim_counter = 0
for var, dims in zip(list_input, list_var_dims):
flux = flux_val[_dim_counter:_dim_counter+dims]
_dim_counter += dims
if negative:
list_input_fluxes[var].append(-flux)
else:
list_input_fluxes[var].append(flux)
else:
raise Exception("ERROR: list input vars dims and flux output dims do not match")
# Assign fluxes to variables:
state_out = []
for var_label, value in self.variables.items():
var_fluxes = []
dims = self.full_model_dims[var_label]
flux_applied = False
if var_label in self.fluxes_per_var:
flux_applied = True
for flux_var_dict in self.fluxes_per_var[var_label]:
flux_label, negative, list_input = flux_var_dict.values()
if dims:
_flux = flux_values[flux_label]
else:
_flux = np.sum(flux_values[flux_label])
if negative:
var_fluxes.append(-_flux)
else:
var_fluxes.append(_flux)
if var_label in list_input_fluxes:
flux_applied = True
for flux in list_input_fluxes[var_label]:
if dims:
_flux = flux
else:
_flux = np.sum(flux)
var_fluxes.append(_flux)
if not flux_applied:
dims = self.full_model_dims[var_label]
if dims:
var_fluxes.append(np.array([0 for i in range(dims)]))
else:
var_fluxes.append(0)
state_out.append(np.sum(var_fluxes, axis=0))
full_output = np.concatenate([[v for val in state_out for v in val.ravel()],
[v for val in fluxes_out for v in val.ravel()]], axis=None)
return full_output
|
import os
import slivka.conf.logging
import slivka.server
home = os.path.dirname(os.path.abspath(__file__))
os.environ.setdefault('SLIVKA_HOME', home)
slivka.conf.logging.configure_logging()
application = app = slivka.server.create_app()
|
from typing import TypedDict
class IAppConfig(TypedDict):
appDbConStr: str
dumpFolder: str
flaskSecret: str
flaskPort: str
logstashHost: str
logstashPort: int
|
# ResNet-18, 5 classes, 25,000 images, 100 linear probing epochs, no bottleneck, classical.
def results():
tepochs = [11, 21, 51, 101, 201, 300]
pepochs = range(0, 101)
accs = [[68.14, 69.1, 70.64, 70.04, 71.34, 70.32, 71.78, 71.52, 72.12, 71.1, 72.26, 71.9, 72.56, 72.64, 73.0, 72.26,
72.98, 72.56, 73.64, 73.24, 72.34, 73.92, 72.24, 73.42, 72.68, 73.38, 72.7, 73.34, 73.6, 73.9, 73.92,
73.34, 74.12, 74.1, 73.32, 73.98, 74.4, 73.92, 74.12, 73.82, 74.68, 74.0, 73.88, 74.68, 74.48, 74.58,
74.44, 74.06, 74.32, 74.14, 74.98, 74.6, 74.62, 75.02, 74.38, 75.0, 74.24, 74.74, 74.9, 74.42, 75.06,
75.08, 75.28, 75.42, 74.96, 75.3, 75.16, 75.18, 75.18, 75.22, 75.24, 75.12, 75.16, 75.14, 75.24, 75.08,
75.26, 75.18, 75.22, 75.16, 75.12, 75.18, 75.14, 75.08, 75.04, 75.12, 75.04, 75.14, 75.04, 75.08, 75.14,
75.06, 75.1, 75.1, 75.2, 75.14, 75.16, 75.08, 75.08, 75.1],
[68.84, 72.6, 73.34, 74.14, 74.94, 75.24, 75.24, 75.52, 75.88, 75.0, 76.16, 75.44, 76.08, 75.82, 76.24,
76.46, 76.76, 76.56, 76.6, 76.66, 76.9, 76.72, 76.34, 77.2, 76.72, 77.06, 77.28, 77.56, 77.34, 76.98,
77.32, 76.64, 77.0, 77.72, 77.52, 76.7, 77.3, 77.76, 77.74, 77.8, 77.98, 77.38, 77.78, 77.38, 77.82, 77.7,
78.24, 77.94, 77.98, 77.86, 77.94, 78.04, 77.48, 77.9, 77.98, 78.04, 77.92, 77.74, 77.28, 77.38, 78.3,
78.38, 78.32, 78.12, 78.28, 78.24, 78.32, 78.22, 78.3, 78.24, 78.36, 78.12, 78.24, 78.2, 78.26, 78.14,
78.48, 77.94, 78.34, 78.3, 78.26, 78.34, 78.22, 78.32, 78.28, 78.38, 78.3, 78.36, 78.22, 78.42, 78.38,
78.44, 78.34, 78.24, 78.32, 78.32, 78.44, 78.3, 78.34, 78.34],
[73.78, 76.18, 76.2, 77.76, 78.02, 77.48, 78.72, 78.5, 78.4, 78.74, 79.04, 79.34, 79.12, 79.18, 79.42,
79.06, 79.52, 79.9, 79.4, 80.0, 80.24, 79.86, 79.88, 79.94, 80.16, 80.26, 80.06, 80.46, 79.8, 80.58, 80.66,
80.66, 80.14, 80.7, 80.22, 79.76, 80.22, 80.46, 80.7, 80.3, 79.96, 80.72, 80.6, 80.84, 80.96, 80.94, 80.02,
81.12, 80.96, 80.3, 80.38, 81.12, 81.08, 80.98, 81.02, 80.84, 79.94, 80.92, 79.94, 80.54, 81.28, 81.16,
81.2, 81.3, 81.24, 80.84, 81.26, 81.34, 81.02, 81.2, 81.2, 81.1, 81.28, 81.12, 81.3, 81.1, 81.08, 80.78,
81.34, 81.32, 81.26, 81.16, 81.14, 81.18, 81.16, 81.2, 81.14, 81.16, 81.26, 81.16, 81.16, 81.26, 81.18,
81.12, 81.16, 81.14, 81.2, 81.14, 81.14, 81.12],
[75.82, 78.42, 80.06, 80.26, 80.8, 80.46, 81.16, 81.04, 81.26, 81.44, 81.66, 82.18, 81.7, 81.24, 82.06,
81.86, 82.68, 82.62, 82.1, 82.56, 81.48, 82.62, 82.66, 82.2, 82.9, 82.58, 82.7, 82.3, 82.86, 82.48, 82.72,
82.86, 82.44, 82.72, 82.32, 82.32, 82.54, 81.34, 82.7, 82.26, 82.22, 81.76, 82.8, 82.32, 82.42, 82.2,
83.16, 82.8, 82.3, 83.16, 82.88, 82.64, 83.48, 83.1, 83.14, 83.18, 82.54, 83.24, 83.4, 83.02, 83.48, 83.28,
83.22, 82.94, 83.1, 82.92, 83.2, 82.74, 83.2, 83.14, 83.24, 83.0, 83.02, 82.84, 83.38, 83.1, 83.04, 83.26,
83.0, 83.28, 83.22, 83.24, 83.22, 83.22, 83.18, 83.16, 83.2, 83.16, 83.26, 83.16, 83.12, 83.18, 83.18,
83.12, 83.22, 83.18, 83.1, 83.2, 83.2, 83.12],
[78.66, 80.32, 82.3, 82.62, 83.26, 83.62, 83.84, 83.98, 83.96, 84.14, 84.22, 84.4, 84.86, 84.92, 84.82,
85.18, 84.88, 85.1, 84.88, 85.34, 85.4, 84.6, 85.36, 85.62, 85.46, 85.44, 85.56, 84.72, 85.26, 84.7, 85.42,
85.0, 85.16, 85.74, 85.5, 85.32, 85.0, 84.96, 85.84, 85.56, 85.2, 85.66, 85.64, 85.84, 85.44, 85.34, 85.46,
85.4, 85.52, 85.6, 85.84, 85.64, 85.3, 85.62, 85.5, 85.28, 85.84, 85.62, 85.52, 85.84, 85.76, 85.74, 85.8,
85.74, 85.8, 85.78, 85.78, 85.8, 85.8, 85.8, 85.84, 85.74, 85.74, 85.7, 85.7, 85.82, 85.64, 85.8, 85.76,
85.78, 85.76, 85.78, 85.78, 85.74, 85.76, 85.78, 85.76, 85.74, 85.68, 85.74, 85.78, 85.78, 85.74, 85.74,
85.74, 85.74, 85.74, 85.76, 85.76, 85.72],
[79.7, 82.4, 83.54, 84.64, 84.58, 84.82, 85.36, 85.54, 85.36, 85.14, 85.9, 85.66, 85.8, 86.02, 85.96, 86.06,
85.76, 85.72, 86.2, 86.32, 86.14, 86.06, 86.26, 86.24, 85.84, 86.48, 86.26, 86.46, 86.1, 86.12, 86.5,
86.56, 86.36, 86.42, 85.86, 86.4, 86.42, 86.68, 86.34, 86.38, 86.52, 86.54, 86.36, 86.46, 86.78, 86.78,
86.44, 86.76, 86.62, 86.42, 86.36, 86.46, 86.64, 86.28, 86.64, 86.64, 86.6, 86.66, 86.48, 86.74, 86.58,
86.46, 86.6, 86.6, 86.64, 86.66, 86.74, 86.58, 86.64, 86.6, 86.58, 86.64, 86.6, 86.4, 86.48, 86.6, 86.56,
86.66, 86.6, 86.52, 86.58, 86.54, 86.54, 86.58, 86.52, 86.56, 86.58, 86.54, 86.56, 86.56, 86.54, 86.58,
86.54, 86.54, 86.6, 86.54, 86.56, 86.56, 86.58, 86.6]]
max_accs = [max(acc) for acc in accs]
return tepochs, max_accs
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RWatermelon(RPackage):
"""Illumina 450 methylation array normalization and metrics.
15 flavours of betas and three performance metrics, with methods for
objects produced by methylumi and minfi packages."""
bioc = "wateRmelon"
version('2.0.0', commit='f6a331bdf50e0e5c94009fb67be873d996348ade')
version('1.34.0', commit='3fa2745535c22068a438747b41b9d793196098d4')
version('1.30.0', commit='66d7579fe49206d965832288df7937c3d43ed578')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r@3.5.0:', type=('build', 'run'), when='@2.0.0:')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-methylumi', type=('build', 'run'))
depends_on('r-lumi', type=('build', 'run'))
depends_on('r-roc', type=('build', 'run'))
depends_on('r-illuminahumanmethylation450kanno-ilmn12-hg19', type=('build', 'run'))
depends_on('r-illuminaio', type=('build', 'run'))
|
# -*- coding: utf-8 -*-
# Notification Template types
REPLY_TYPE = 'reply'
DOCUMENT_OWNER = 'document_owner'
ROOT_PATH = 'h:notification/templates/'
def includeme(config):
pass
|
import datetime
import uuid
from app.domains.users.validators import is_user_name_valid, is_user_email_valid
from database import db
class Users(db.Document):
_id = db.StringField(default=str(uuid.uuid4()), primary_key=True)
name = db.StringField(required=True, validation=is_user_name_valid)
last_name = db.StringField(required=True, validation=is_user_name_valid)
email = db.StringField(required=True, validation=is_user_email_valid)
created_at = db.DateTimeField(default=datetime.datetime.utcnow())
updated_at = db.DateTimeField(default=datetime.datetime.utcnow())
def serialize(self):
return {
"_id": self._id,
"name": self.name,
"last_name": self.last_name,
"email": self.email,
"created_at": self.created_at,
"updated_at": self.updated_at
}
|
"""
checks to make sure that the challenge_set is internally consistent
usage: python check.py
"""
import json
stats = {
"tests": 0,
"errors": 0,
}
required_playlist_fields = ["num_holdouts", "pid", "num_tracks", "tracks", "num_samples"]
optional_playlist_fields = ["name"] + required_playlist_fields
track_fields = set(['pos', 'artist_name', 'artist_uri', 'track_uri', 'track_name', 'album_uri', 'album_name',
'duration_ms'])
def check_challenge_set():
f = open("challenge_set.json")
js = f.read()
challenge_set = json.loads(js)
f.close()
tassert(challenge_set['version'] == "v1", "proper version")
tassert(len(challenge_set['playlists']) == 10000, "proper number of playlists")
known_ids = set()
unique_tracks = set()
unique_albums = set()
unique_artists = set()
total_tracks = 0
for playlist in challenge_set['playlists']:
ntracks = playlist['num_samples'] + playlist['num_holdouts']
tassert(playlist['pid'] not in known_ids, "unique pid")
tassert(ntracks == playlist['num_tracks'], "consistent num_tracks")
tassert(playlist['num_samples'] == len(playlist['tracks']), "consistent num_samples")
known_ids.add(playlist['pid'])
for field, val in playlist.items():
tassert(field in optional_playlist_fields, "valid playlist field")
for f in required_playlist_fields:
tassert(f in playlist.keys(), "missing required play list field " + f)
for track in playlist['tracks']:
for field, val in track.items():
tassert(field in track_fields, "valid track field")
for f in track_fields:
tassert(f in track.keys(), "missing required track field " + f)
unique_tracks.add(track['track_uri'])
unique_albums.add(track['album_uri'])
unique_artists.add(track['artist_uri'])
total_tracks += 1
tassert(len(known_ids) == 10000, "proper number of unqiue IDs")
print()
print("stats:")
for k, v in stats.items():
print("%s: %d" % (k, v))
print()
print("total playlists:", len(challenge_set["playlists"]))
print("total tracks: ", total_tracks)
print("unique tracks: ", len(unique_tracks))
print("unique albums: ", len(unique_albums))
print("unique artists: ", len(unique_artists))
print()
if stats['errors'] == 0:
print("challenge_set.json is OK")
else:
print("challenge_set.json has errors")
def tassert(cond, text):
stats['tests'] += 1
if not cond:
stats['errors'] += 1
print("error:" + text)
if __name__ == '__main__':
check_challenge_set()
|
# coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class MULESSolver(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'alpha_correctors': 'int',
'alpha_sub_cycles': 'int',
'compression_coefficient': 'float',
'isotropic_compression_coefficient': 'float',
'semi_implicit': 'OneOfMULESSolverSemiImplicit'
}
attribute_map = {
'type': 'type',
'alpha_correctors': 'alphaCorrectors',
'alpha_sub_cycles': 'alphaSubCycles',
'compression_coefficient': 'compressionCoefficient',
'isotropic_compression_coefficient': 'isotropicCompressionCoefficient',
'semi_implicit': 'semiImplicit'
}
def __init__(self, type='MULES_V7', alpha_correctors=None, alpha_sub_cycles=None, compression_coefficient=None, isotropic_compression_coefficient=None, semi_implicit=None, local_vars_configuration=None): # noqa: E501
"""MULESSolver - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._alpha_correctors = None
self._alpha_sub_cycles = None
self._compression_coefficient = None
self._isotropic_compression_coefficient = None
self._semi_implicit = None
self.discriminator = None
self.type = type
if alpha_correctors is not None:
self.alpha_correctors = alpha_correctors
if alpha_sub_cycles is not None:
self.alpha_sub_cycles = alpha_sub_cycles
if compression_coefficient is not None:
self.compression_coefficient = compression_coefficient
if isotropic_compression_coefficient is not None:
self.isotropic_compression_coefficient = isotropic_compression_coefficient
if semi_implicit is not None:
self.semi_implicit = semi_implicit
@property
def type(self):
"""Gets the type of this MULESSolver. # noqa: E501
Schema name: MULESSolver # noqa: E501
:return: The type of this MULESSolver. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this MULESSolver.
Schema name: MULESSolver # noqa: E501
:param type: The type of this MULESSolver. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def alpha_correctors(self):
"""Gets the alpha_correctors of this MULESSolver. # noqa: E501
:return: The alpha_correctors of this MULESSolver. # noqa: E501
:rtype: int
"""
return self._alpha_correctors
@alpha_correctors.setter
def alpha_correctors(self, alpha_correctors):
"""Sets the alpha_correctors of this MULESSolver.
:param alpha_correctors: The alpha_correctors of this MULESSolver. # noqa: E501
:type: int
"""
self._alpha_correctors = alpha_correctors
@property
def alpha_sub_cycles(self):
"""Gets the alpha_sub_cycles of this MULESSolver. # noqa: E501
:return: The alpha_sub_cycles of this MULESSolver. # noqa: E501
:rtype: int
"""
return self._alpha_sub_cycles
@alpha_sub_cycles.setter
def alpha_sub_cycles(self, alpha_sub_cycles):
"""Sets the alpha_sub_cycles of this MULESSolver.
:param alpha_sub_cycles: The alpha_sub_cycles of this MULESSolver. # noqa: E501
:type: int
"""
self._alpha_sub_cycles = alpha_sub_cycles
@property
def compression_coefficient(self):
"""Gets the compression_coefficient of this MULESSolver. # noqa: E501
:return: The compression_coefficient of this MULESSolver. # noqa: E501
:rtype: float
"""
return self._compression_coefficient
@compression_coefficient.setter
def compression_coefficient(self, compression_coefficient):
"""Sets the compression_coefficient of this MULESSolver.
:param compression_coefficient: The compression_coefficient of this MULESSolver. # noqa: E501
:type: float
"""
self._compression_coefficient = compression_coefficient
@property
def isotropic_compression_coefficient(self):
"""Gets the isotropic_compression_coefficient of this MULESSolver. # noqa: E501
:return: The isotropic_compression_coefficient of this MULESSolver. # noqa: E501
:rtype: float
"""
return self._isotropic_compression_coefficient
@isotropic_compression_coefficient.setter
def isotropic_compression_coefficient(self, isotropic_compression_coefficient):
"""Sets the isotropic_compression_coefficient of this MULESSolver.
:param isotropic_compression_coefficient: The isotropic_compression_coefficient of this MULESSolver. # noqa: E501
:type: float
"""
self._isotropic_compression_coefficient = isotropic_compression_coefficient
@property
def semi_implicit(self):
"""Gets the semi_implicit of this MULESSolver. # noqa: E501
:return: The semi_implicit of this MULESSolver. # noqa: E501
:rtype: OneOfMULESSolverSemiImplicit
"""
return self._semi_implicit
@semi_implicit.setter
def semi_implicit(self, semi_implicit):
"""Sets the semi_implicit of this MULESSolver.
:param semi_implicit: The semi_implicit of this MULESSolver. # noqa: E501
:type: OneOfMULESSolverSemiImplicit
"""
self._semi_implicit = semi_implicit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MULESSolver):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MULESSolver):
return True
return self.to_dict() != other.to_dict()
|
# -*- coding: utf-8 -*-
import numpy as np
from ensemble_model import Ensemble_Model
import cv2
COLLISION_RANGE = 0.3
COLLISION_NUM = 10
ARRIVAL_RANGE = 0.48
class PredictEnv:
def __init__(self, model):
self.model = model
def _termination_fn(self, image, goal):
collision = False
arrival = False
num_in_range = np.sum(image[:, 75:80, 75:85], axis=(1,2))
collision = num_in_range > COLLISION_NUM
# print("collision : ", collision.shape)
goal_distance = goal[:, 0] * 25
arrival = goal_distance < ARRIVAL_RANGE
# print("arrival : ", arrival.shape)
return collision, arrival, collision | arrival
def step(self, states, images, transformations, num_pre, action, deterministic=False):
# Predict 预测
ensemble_out_before, ensemble_out, ensemble_transformation_out, ensemble_vel_ang_out, ensemble_goal_out, ensemble_reward = \
self.model.predict(states, images, transformations, num_pre, action, batch_size=32)
num_models, batch_size, _, _ = ensemble_out.shape
# model_idxes = np.random.choice(self.model.elite_model_idxes, size=batch_size)
model_idxes = 0
batch_idxes = np.arange(0, batch_size)
# ========================= costmap =========================
model_out = ensemble_out[model_idxes, batch_idxes]
model_out_before = ensemble_out_before[model_idxes, batch_idxes]
# ========================= vel_ang & goal & reward =========================
model_transformation = ensemble_transformation_out[model_idxes, batch_idxes]
model_vel_ang = ensemble_vel_ang_out[model_idxes, batch_idxes]
model_goal = ensemble_goal_out[model_idxes, batch_idxes]
model_reward = ensemble_reward[model_idxes, batch_idxes]
next_states = states.copy()
for i in range(states.shape[0]):
next_states[i, 3630:3632] = model_vel_ang[i][:]
next_states[i, 3632:3634] = model_goal[i][:]
images_pre = images.copy()
transformations_pre = transformations.copy()
for i in range(states.shape[0]):
images_pre[i, num_pre[i,0], :, :] = model_out[i][:]
big = images_pre[i, num_pre[i,0]] > 0.36
small = images_pre[i, num_pre[i,0]] < 0.36
images_pre[i, num_pre[i,0]][big] = 1
images_pre[i, num_pre[i,0]][small] = 0
images_pre[i, num_pre[i,0]] = cv2.erode(images_pre[i, num_pre[i,0]], np.ones((3,3), np.uint8), iterations=1)
images_pre[i, num_pre[i,0]] = cv2.dilate(images_pre[i, num_pre[i,0]], np.ones((3,3), np.uint8), iterations=1)
# cv2.imshow('pre_after', images_pre[i, num_pre[i,0]])
# cv2.waitKey(0)
transformations_pre[i, num_pre[i,0], :] = model_transformation[i][:]
num_pre = num_pre + 1
rewards = model_reward
# 判断 terminal 修正 reward
collision, arrival, terminals = self._termination_fn(model_out, model_goal)
rewards[collision] = -20
rewards[arrival] = 20
info = {}
return next_states, images_pre, transformations_pre, num_pre, rewards, terminals, info
def main():
print("try")
models = Ensemble_Model(3,2)
env_pre = PredictEnv(models)
test_input = np.random.randn(2, 3635)
test_actions = np.random.randn(2, 2)
test_images = np.random.randn(2, 2, 80, 160)
test_numpre = np.array([[1], [0]], dtype=np.int)
test_state_labels = np.random.randn(2, 3635)
test_reward_labels = np.random.randn(2, 1)
models.train(test_input, test_images, test_numpre, test_actions, test_state_labels, test_reward_labels, batch_size=1)
env_pre.step(test_input, test_images, test_numpre, test_actions)
if __name__ == '__main__':
main()
|
# 多変量正規分布はベクトルが従う分布と考えることができる。
# そのためStanで扱う場合はベクトルとmatrixを使って表現する。
import numpy as np
import seaborn as sns
import pandas
import matplotlib.pyplot as plt
from matplotlib.figure import figaspect
from matplotlib.gridspec import GridSpec
import mcmc_tools
from scipy.stats import norm
import time
# data-mvn
# 1行が1名のデータ
# Y1: 50m走のタイム(sec)
# Y2: 走り幅跳びで飛んだ距離(m)
mvn = pandas.read_csv('data-mvn.txt')
print(mvn.head())
print(mvn.describe())
# 散布図を描いてみる
sns.scatterplot(
x='Y1',
y='Y2',
data=mvn
)
plt.show()
# 足が速い(秒数が少ない)ほうが、幅跳びの距離が大きくなる
# 負の相関が見られる。
# モデリング
# ここでは各人ごとに平均ベクトルと分散共分散行列をもつ多変量正規分布から
# Y1とY2のベクトルデータ(2次元)が観測されるとする。
Y1 = mvn['Y1']
Y2 = mvn['Y2']
N = len(Y1)
D = 2
stan_data = {
'N': N,
'D': D,
'Y': mvn
}
# コンパイル
filename = '../model/model9-3-1'
start_1 = time.time()
mcmc_result = mcmc_tools.sampling(filename, stan_data, n_jobs=4)
elapsed_time_1 = time.time() - start_1
# ベクトル化による高速化
filename = '../model/model9-3-1-vec'
start_2 = time.time()
mcmc_result = mcmc_tools.sampling(filename, stan_data, n_jobs=4)
elapsed_time_2 = time.time() - start_2
print("elapsed_time:{0}".format(elapsed_time_1) + "[sec]")
print("elapsed_time:{0}".format(elapsed_time_2) + "[sec]")
|
import os
import urllib
from bioconda_utils import recipe
from ruamel.yaml import YAML
"""
This script read the metadata from bioconda repo and annotate the metadata from the recipes in the tools
"""
yaml = YAML()
yaml_recipe = YAML(typ="rt") # pylint: disable=invalid-name
with open('../annotations.yaml', 'r') as read_file:
file_annotations = yaml.load(read_file)
recipes_path = "../../bioconda-recipes/recipes/"
tools = {}
### Annotate bioconductor package
for key in file_annotations:
tool = file_annotations[key]
bioconda_recipes_path = os.path.split(recipes_path)[0]
meta_yaml_path = "%s/%s/meta.yaml" % (recipes_path, key)
try:
current_recipe = recipe.Recipe.from_file(recipes_path, meta_yaml_path)
if 'about' in current_recipe.meta:
if 'summary' in current_recipe.meta['about'] and (
tool['description'] is None or len(tool['description']) == 0):
tool['description'] = current_recipe.meta['about']['summary']
if 'license' in current_recipe.meta['about'] and (tool['license'] is None or len(tool['license']) == 0):
tool['license'] = current_recipe.meta['about']['license']
if 'home' in current_recipe.meta['about'] and (tool['home_url'] is None or len(tool['home_url']) == 0):
tool['home_url'] = current_recipe.meta['about']['home']
if 'extra' in current_recipe.meta:
if 'identifiers' in current_recipe.meta['extra']:
tool["identifiers"] = current_recipe.meta['extra']["identifiers"]
except Exception as e:
try:
bioconda_recipes_path = os.path.split(recipes_path)[0]
versions = next(os.walk(bioconda_recipes_path + "/" + key + '/.'))[1]
version = versions[len(versions) - 1]
bioconda_recipes_path = bioconda_recipes_path + "/" + key + "/" + version
meta_yaml_path = bioconda_recipes_path + "/meta.yaml"
current_recipe = recipe.Recipe.from_file(recipes_path, meta_yaml_path)
if 'about' in current_recipe.meta:
if 'summary' in current_recipe.meta['about'] and (tool['description'] is None or len(tool['description']) ==0):
tool['description'] = current_recipe.meta['about']['summary']
if 'license' in current_recipe.meta['about'] and (tool['license'] is None or len(tool['license']) ==0):
tool['license'] = current_recipe.meta['about']['license']
if 'home' in current_recipe.meta['about'] and (tool['home_url'] is None or len(tool['home_url']) ==0):
tool['home_url'] = current_recipe.meta['about']['home']
if 'extra' in current_recipe.meta:
if 'identifiers' in current_recipe.meta['extra']:
tool["identifiers"] = current_recipe.meta['extra']["identifiers"]
except Exception as e:
print("Error reading -- " + key + " Error -- " + str(e))
if 'bioconductor' in key:
key_bio = key.replace("bioconductor-", "")
url = "https://www.bioconductor.org/packages/release/bioc/html/" + key_bio + ".html"
try:
with urllib.request.urlopen(url) as url:
s = url.read()
tool['home_url'] = key_bio
print(s)
except Exception as e:
print("Error reading -- " + key + " Error -- " + str(url))
print(tool['home_url'])
tools[key] = tool
# writing missing
yaml.indent(mapping=4, sequence=6, offset=2)
with open('annotations.yaml.previous.previous', 'w') as outfile:
yaml.dump(tools, outfile)
|
"""Definitions for the primitive `scalar_bit_rshift` x >> y."""
from ..lib import UniformPrimitiveInferrer, assert_scalar
from ..xtype import Integral
from . import primitives as P
def pyimpl_scalar_bit_rshift(x: Integral, y: Integral) -> Integral:
"""Implement `scalar_bit_rshift`."""
assert_scalar(x, y)
return x >> y
infer_scalar_bit_rshift = UniformPrimitiveInferrer.partial(
prim=P.scalar_bit_rshift, impl=pyimpl_scalar_bit_rshift, infer_value=False
)
__operation_defaults__ = {
"name": "scalar_bit_rshift",
"registered_name": "scalar_bit_rshift",
"mapping": P.scalar_bit_rshift,
"python_implementation": pyimpl_scalar_bit_rshift,
}
__primitive_defaults__ = {
"name": "scalar_bit_rshift",
"registered_name": "scalar_bit_rshift",
"type": "backend",
"python_implementation": pyimpl_scalar_bit_rshift,
"inferrer_constructor": infer_scalar_bit_rshift,
"grad_transform": None,
}
|
from databases import Database
from dotenv import load_dotenv
from google.cloud import storage
from starlette.applications import Starlette
from starlette.responses import JSONResponse
import uvicorn
from config import Config
import loader
import pubsub
load_dotenv()
# Load configuration from sources
cfg = Config()
# Connect to database
database = Database(cfg.database_url)
# Connect to Google Cloud services
storage_client = storage.Client()
bucket = storage_client.bucket(cfg.gcp_bucket)
# Create API server
app = Starlette(
routes=[],
exception_handlers={
404: lambda req, exc: JSONResponse({"success": False, "reason": "not found"}),
405: lambda req, exc: JSONResponse({"success": False, "reason": "method not allowed"}),
500: lambda req, exc: JSONResponse({"success": False, "reason": "internal server error"})
},
on_startup=[database.connect],
on_shutdown=[database.disconnect]
)
# Add redis configuration to startup and shutdown
app.router.on_startup.append(pubsub.configure(app, cfg.redis))
app.router.on_shutdown.append(pubsub.shutdown(app))
# Load initial routes
app.router.on_startup.append(loader.load_routes(app, database))
# Attach services to state
app.state.database = database
app.state.bucket = bucket
if __name__ == "__main__":
uvicorn.run("main:app", **cfg.app)
|
from __future__ import division
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.linewidth'] = 2
plt.rcParams['xtick.major.width'] = 2
plt.rcParams['ytick.major.width'] = 2
def get_combined_data():
names = ('wavelength', 'element', 'EP', 'loggf', 'EW')
df1 = pd.read_table('linelist1.moog', delimiter=r'\s+', names=names, skiprows=1)
df2 = pd.read_table('linelist2.moog', delimiter=r'\s+', names=names, skiprows=1)
df1['wavelength'] = [round(w, 2) for w in df1['wavelength']]
df2['wavelength'] = [round(w, 2) for w in df2['wavelength']]
df = pd.merge(df1, df2, how='outer',
left_on='wavelength',
right_on='wavelength',
suffixes=('_1', '_2'))
df['diff'] = df['EW_1'] - df['EW_2']
return df
if __name__ == '__main__':
df = get_combined_data()
m, s = np.nanmedian(df['diff']), np.nanstd(df['diff'])
plt.figure()
plt.plot(df['EW_1'], df['diff'], '.')
plt.hlines([m, m+s, m-s], 4, 200)
plt.xlabel(r'EW$_1$ [m$\AA$]')
plt.ylabel(r'EW$_1$ - EW$_2$ [m$\AA$]')
# plt.savefig('../linelist_comparison.pdf')
plt.show()
|
# -*- coding: utf-8 -*-
# @Time : 2021/4/7 下午5:38
# @Author : anonymous
# @File : urls.py
# @Software: PyCharm
# @Description:
from django.urls import path, include
from rest_framework import routers
from .views import TasksViewSet
router = routers.DefaultRouter()
router.register(prefix=r'tasks', viewset=TasksViewSet, basename='task')
urlpatterns = [path('', include(router.urls)), ]
|
# Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""
This sub-module provides a collection of filters for providing linq-style
programming (inspired by RxPy).
Each function appears as a method on the Publisher base class, allowing for
easy chaining of calls. For example:
sensor.where(lambda x: x > 100).select(lambda x: x*2)
If the @filtermethod decorator is used, then a standalone function is also
defined that takes all the arguments except the publisher and returns a
function which, when called, takes a publisher and subscribes to the publisher.
We call this returned function a "thunk". Thunks can be used with combinators
(like compose(), parallel(), and passthrough(), all defined in combinators.py)
as well as directly with the scheduler. For example:
scheduler.schedule_sensor(sensor, where(lambda x: x> 100),
select(lambda x: x*2))
The implementation code for a linq-style filter typically looks like the
following:
@filtermethod(Publisher)
def example(this, ...):
def on_next(self, x):
....
self._dispatch_next(...)
return Filter(this, on_next, name="example")
Note that, by convention, we use `this` as the first argument of the function,
rather than self. The `this` parameter corresponds to the previous element in
the chain, while the `self` parameter used in the on_next() function represents
the current element in the chain. If you get these mixed up, you can get an
infinite loop!
In general, a linq-style filter takes the previous publisher/filter in a
chain as its first input, parameters to the filter as subsequent inputs, and
returns a publisher/filter that should be used as the input to the next step
in the filter chain.
"""
from . import buffer
from . import first
from . import never
from . import output
from . import scan
from . import select
from . import skip
from . import some
from . import take
from . import transducer
from . import timeout
from . import where
from . import combinators
|
# mlt2/core.py
# Copyright 2019 Matthias Lesch <ml@matthiaslesch.de>
# MIT License: http://www.opensource.org/licenses/mit-license.php
"""
mlt2.core
---------
The core functionality.
:copyright: 2014-2020 by Matthias Lesch <ml@matthiaslesch.de>
:license: MIT license
"""
import os,re,sys
from collections import deque
from .reflection import myexec as _myexec,myeval as _myeval,MltReflectionError
from .fixed import Fixed2, formateuro, parse_decimal
from .runtime import Matrix
from .mlt_vars import MltVars, Var, Dec, DEC, INT, STR
import pudb
#debug=pudb.set_trace
# Parsing #
## Regexes ##
# RE muss eine Gruppe haben, wegen 3 split.
RE_py = re.compile(r'<\?(.*?)\?>[\t ]*?\n{0,1}', re.S|re.M)
RE_eval = re.compile(r'<\!(.*?)\!([^\!]*?)>',re.S|re.M)
RE_comment = re.compile(r'^<\#(.*?)^\#>.*?\n', re.S|re.M)
RE_aux = re.compile(r'<\$(.*?)\$>', re.S|re.M)
RE_aux_simple=re.compile(r'(\w*?[+-]{0,1})\$(\S*)_*')
RE_ipol = re.compile(r'#\{(.*?)\}')
RE_bezeichner = re.compile(r'\$([a-zA-Z_]\w*)')
RE_start = re.compile(r'<(\w*?[+-]{0,1})([\?\!\#\$])',re.S|re.M)
RE_endpy = re.compile(r'(\?>)[\t ]*?\n{0,1}', re.S|re.M)
RE_endeval = re.compile(r'(\!>)',re.S|re.M)
RE_endcomment = re.compile(r'(\#>).*?\n{0,1}', re.S|re.M)
RE_endaux = re.compile(r'(\${0,1}>)', re.M) # re.S removed, must be on a line
redict={
'!': RE_endeval,
'?': RE_endpy,
'#': RE_endcomment,
'$': RE_endaux
}
## Tokenizer
## new stacked tokenizer pattern
#def pyblock_tokenizer(s):
# tokentypen
TOKRAW='0'
TOKPY='1'
TOKEVAL='2'
TOKTXT='3'
TOKAUX='$'
#borrowed from interpolate.py
def partition(s):
"""partitions string s into left, token, right
left: string
token: mlt token
right: remainder
mlt symbols hardcoded
"""
mo = RE_start.search(s)
if not mo:
return (s,'', TOKTXT), ("",'',TOKTXT), ""
left = s[:mo.start()]
end_RE = redict[ mo.group(2) ]
mo1 = end_RE.search(s,mo.end() )
# it is an error if mo1 is None ....
return (left, '',TOKTXT),\
( s[mo.end():mo1.start()],mo.group(1), mo.group(2) ),s[mo1.end():]
def tokenize(s):
remainder=s
toks=deque()
push=toks.append
while remainder:
tokleft,tokmid,remainder = partition(remainder)
push( tokleft )
push( tokmid )
return toks
def simpletok(s):
i = 0
curser = 0
for mo in RE_aux_simple.finditer(s):
yield (s[curser:mo.start()],'',TOKTXT)
tmp = mo.group(2); tmp = tmp.rstrip('_')
yield ( tmp, mo.group(1), TOKAUX)
curser = mo.end()
yield (s[curser:],'',TOKTXT)
def tokenize2(s):
toks = tokenize(s)
for tok,label,ttype in toks:
if not ttype == TOKTXT: yield (tok,label,ttype)
else:
for t in simpletok(tok): yield t
# Runtime
matrix = Matrix()
def myexec(s,env,*args,**kwargs):
return _myexec(s,env)
def myeval(s,env,*args,**kwargs):
return _myeval(s,env)
def nothing(s,env,**args):
return ''
def noop(s,env,**args):
return s
def verb(s): return s
def insert(s):
with open(s,'r') as f:
return f.read()
def include(s):
# run s in a sandbox
with open(s,'r') as f:
return mltminimal( f.read(), {} )
## For reasons I don't understand, the following function cannot be
## defined inside a script, too much fiddling with the environment
## dict ...
## conveniences for numeval
def idfunc(x): return x
## z.Zt. ohne Funktion
class Alu():
def __init__(self):
self.list = list()
def clear(self):
self.list.clear()
@property
def last(self):
return self.list[-1]
def append(self,x):
return self.list.append(x)
def sum(self):
return sum(self.list)
alu=Alu()
class Runtime():
# exchange vars between script and mlt
runtime="""
<?
from mlt2.fixed import Fixed2 as Euro
?>"""
#def setsumvar(s):
# env=Runtime.env
# env['sumvar']=s
# env[s] = Fixed2(0)
def parselabel(label,res,env):
env['Vars']._lastlabel_ = None
if not label: return res
env['Vars']._lastlabel_ = label
symbol = label[-1]
if not symbol in '+-':
env[label]=res
return res
label=label[:-1]
if symbol=='+':
env[label] = env[label] + res
elif symbol=='-':
env[label] = env[label] - res
env['Vars']._lastlabel_ = label
return res
def numeval(s,env,**args):
# evaluates to a currency format ...
s=s.strip()
width=env.get('__width__',10)
numfilter = env.get('numfilter')
fmt = '{:>' + str(width) + '.2f}'
try:
s = parse_decimal(s)
res=Fixed2( eval(s,env) )
matrix.append(res)
except Exception as e:
print("Caught:",repr(e))
raise MltReflectionError( "Code:\n" + s )
#alu.append(res)
# tke care of summation
sumvar = env['sumvar']
if sumvar is not None and s!=sumvar:
env[sumvar] = env[sumvar]+res
if 'label' in args:
#pudb.set_trace()
label=args['label']
parselabel(label,res,env)
lastlbl = env['Vars']._lastlabel_
if lastlbl in env['Vars'].md:
targetvar = env['Vars'].md[lastlbl]
return targetvar.format(res)
return numfilter(res,env)
def numfilter(x,env):
x = formateuro(x)
#return '{}'.format(repr(env['numlatex']))
if env['numlatex'] is True:
return '\\texttt{{{}}}'.format(x.strip())
else:
return x
def ipol(s,env):
#debug()
method=env.get('__ML_subst','')
if not method:
return s
elif method=='$':
RE=RE_bezeichner
def _sub(mo):
#return myeval(mo.group(1),env)
return env[mo.group(1)]
elif method=='#{}':
RE=RE_ipol
def _sub(mo):
return myeval(mo.group(1),env)
# substitution of variables in s using in env
# syntax "Hello #{var}" substitutes var for its value
# if var is undefined ERROR
return RE.sub(_sub,s)
'''
from dbm.gnu import open as gopen
from pickle import loads,dumps
class VarStore():
def myinit(self,pth):
self.dbm = gopen(pth,"c")
tmp = self.dbm.get("numvars",None)
if tmp is not None:
self.numvars = loads(tmp)
else:
self.numvars = {}
def load(self):
self.env.update( self.numvars )
def save(self):
'''
symboltable={
'?': 'exec',
'!': 'eval',
'#': 'comment',
'$': 'aux',
TOKTXT: 'txt'
}
rtenv = {}
rtenv['__ML_'] = {}
rtenv['__ML_']['exec'] = myexec
rtenv['__ML_']['eval'] = myeval
rtenv['__ML_']['comment'] = nothing
rtenv['__ML_']['aux'] = numeval
rtenv['__ML_']['txt'] = noop
rtenv['__ML_']['symboltable'] = symboltable
rtenv['__ML_']['numhook'] = idfunc
#rtenv['alu'] = alu
rtenv['sumvar'] = None
#rtenv['setsumvar'] = setsumvar
rtenv['insert'] = insert # insert file verbatim
rtenv['include'] = include # insert file verbatim
rtenv['verb'] = verb
rtenv['matrix']=matrix # matrix from runtime is now available
rtenv['numlatex']=False
rtenv['numfilter']=numfilter
def mltminimal(s,env):
res = deque()
puts=res.append
runtime=Runtime()
runtime.env = env
s = runtime.runtime + '\n' + s
toks=tokenize2(s)
rtenv['runtime'] = runtime
env.update(rtenv)
env['Vars']=MltVars( env ) # new variables.
def process(s):
# processes file s in current environ
with open(s,'r') as f:
s = f.read()
return mltminimal(s,env)
env['process'] = process
#debug()
for tok,label,ttype in toks:
f = env['__ML_'][ symboltable[ttype] ]
tok = ipol(tok,env) # PREPROCESSING
puts( f(tok,env,label=label) )
res=''.join( res )
res=res.strip()
return res
# -*- coding: utf-8 -*-
## vim: tabstop=4 softtabstop=4 shiftwidth=4 expandtab autoindent tw=79 ft=python fenc=utf-8
|
# flake8: noqa
# TODO(vorj): When we will meet flake8 3.7.0+,
# we should ignore only W291 for whole file
# using --per-file-ignores .
import clpy
import unittest
class TestUltimaCIndexer(unittest.TestCase):
def test_cindexer_argument_mutation(self):
x = clpy.backend.ultima.exec_ultima('', '#include <cupy/carray.hpp>') + '''
void f(CIndexer_2 ind)
{
}
'''[1:]
y = clpy.backend.ultima.exec_ultima(
'''
void f(CIndexer<2> ind){}
''',
'#include <cupy/carray.hpp>')
self.maxDiff = None
self.assertEqual(x, y)
def test_cindexer_member_function(self):
x = clpy.backend.ultima.exec_ultima('', '#include <cupy/carray.hpp>') + '''
void f(CIndexer_2 ind)
{
ind_size;
}
'''[1:]
y = clpy.backend.ultima.exec_ultima(
'''
void f(CIndexer<2> ind){
ind.size();
}
''',
'#include <cupy/carray.hpp>')
self.maxDiff = None
self.assertEqual(x, y)
if __name__ == "__main__":
unittest.main()
|
from .package import template
|
from tuprolog import logger
from ._ktadapt import *
# noinspection PyUnresolvedReferences
import jpype
# noinspection PyUnresolvedReferences
import jpype.imports
# noinspection PyUnresolvedReferences
import it.unibo.tuprolog.core.operators as _operators
from tuprolog.jvmutils import jiterable
from tuprolog.pyutils import iterable_or_varargs
from tuprolog.core import Atom, Integer, Struct, Term
from functools import singledispatch
Operator = _operators.Operator
OperatorSet = _operators.OperatorSet
Specifier = _operators.Specifier
@singledispatch
def operator(functor: str, specifier: Specifier, priority: int) -> Operator:
return Operator(functor, specifier, priority)
@operator.register
def _(priority: Integer, specifier: Atom, functor: Atom) -> Operator:
return Operator.fromTerms(priority, specifier, functor)
@operator.register
def _(term: Struct) -> Operator:
return Operator.fromTerm(term)
def operator_set(*operators) -> OperatorSet:
return iterable_or_varargs(operators, lambda os: OperatorSet(jiterable(os)))
@singledispatch
def specifier(name: str) -> Specifier:
return Specifier.valueOf(name.upper())
@specifier.register
def _(term: Term) -> Specifier:
return Specifier.fromTerm(term)
EMPTY_OPERATORS: OperatorSet = OperatorSet.EMPTY
DEFAULT_OPERATORS: OperatorSet = OperatorSet.DEFAULT
STANDARD_OPERATORS: OperatorSet = OperatorSet.STANDARD
XF: Specifier = Specifier.XF
YF: Specifier = Specifier.YF
FX: Specifier = Specifier.FX
FY: Specifier = Specifier.FY
XFX: Specifier = Specifier.XFX
XFY: Specifier = Specifier.XFY
YFX: Specifier = Specifier.YFX
logger.debug("Loaded JVM classes from it.unibo.tuprolog.core.operators.*")
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from pathlib import Path
from pants.engine.console import Console
from pants.engine.fs import (
Digest,
DirectoryToMaterialize,
FileContent,
InputFilesContent,
MaterializeDirectoriesResult,
MaterializeDirectoryResult,
Workspace,
)
from pants.engine.goal import Goal
from pants.engine.rules import RootRule, console_rule
from pants.engine.selectors import Get
from pants.testutil.console_rule_test_base import ConsoleRuleTestBase
from pants.testutil.test_base import TestBase
from pants.util.contextutil import temporary_dir
@dataclass(frozen=True)
class MessageToConsoleRule:
tmp_dir: str
input_files_content: InputFilesContent
class MockWorkspaceGoal(Goal):
name = 'mock-workspace-goal'
@console_rule
def workspace_console_rule(console: Console, workspace: Workspace, msg: MessageToConsoleRule) -> MockWorkspaceGoal:
digest = yield Get(Digest, InputFilesContent, msg.input_files_content)
output = workspace.materialize_directories((
DirectoryToMaterialize(path=msg.tmp_dir, directory_digest=digest),
))
output_path = output.dependencies[0].output_paths[0]
console.print_stdout(str(Path(msg.tmp_dir, output_path)), end='')
yield MockWorkspaceGoal(exit_code=0)
class WorkspaceInConsoleRuleTest(ConsoleRuleTestBase):
"""This test is meant to ensure that the Workspace type successfully
invokes the rust FFI function to write to disk in the context of a @console_rule,
without crashing or otherwise failing."""
goal_cls = MockWorkspaceGoal
@classmethod
def rules(cls):
return super().rules() + [RootRule(MessageToConsoleRule), workspace_console_rule]
def test(self):
with temporary_dir() as tmp_dir:
input_files_content = InputFilesContent((
FileContent(path='a.txt', content=b'hello'),
))
msg = MessageToConsoleRule(tmp_dir=tmp_dir, input_files_content=input_files_content)
output_path = str(Path(tmp_dir, 'a.txt'))
self.assert_console_output_contains(output_path, additional_params=[msg])
contents = open(output_path).read()
self.assertEqual(contents, 'hello')
#TODO(gshuflin) - it would be nice if this test, which tests that the MaterializeDirectoryResults value
# is valid, could be subsumed into the above @console_rule-based test, but it's a bit awkward
# to get the MaterializeDirectoriesResult out of a @console_rule at the moment.
class FileSystemTest(TestBase):
def test_workspace_materialize_directories_result(self):
#TODO(#8336): at some point, this test should require that Workspace only be invoked from a console_role
workspace = Workspace(self.scheduler)
input_files_content = InputFilesContent((
FileContent(path='a.txt', content=b'hello'),
FileContent(path='subdir/b.txt', content=b'goodbye'),
))
digest, = self.scheduler.product_request(Digest, [input_files_content])
with temporary_dir() as tmp_dir:
path1 = Path(tmp_dir, 'a.txt')
path2 = Path(tmp_dir, 'subdir', 'b.txt')
self.assertFalse(path1.is_file())
self.assertFalse(path2.is_file())
output = workspace.materialize_directories((
DirectoryToMaterialize(path=tmp_dir, directory_digest=digest),
))
self.assertEqual(type(output), MaterializeDirectoriesResult)
materialize_result = output.dependencies[0]
self.assertEqual(type(materialize_result), MaterializeDirectoryResult)
self.assertEqual(materialize_result.output_paths,
(str(Path(tmp_dir, 'a.txt')), str(Path(tmp_dir, 'subdir/b.txt')),)
)
|
# $Id: SuperGlobal.py 1047 2009-01-15 14:48:58Z graham $
import __main__
class SuperGlobal:
"""
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/457667
Here so can use in testing.
Creates globals.
i.e.
superglobal = SuperGlobal()
superglobal.data = ....
However many times you create SuperGlobal it access the same data.
"""
def __getattr__(self, name):
return __main__.__dict__.get(name, None)
def __setattr__(self, name, value):
__main__.__dict__[name] = value
def __delattr__(self, name):
if __main__.__dict__.has_key(name):
del __main__.__dict__[name]
|
import matplotlib.pyplot as plt
from plotnine import *
import pandas as pd
df_penduduk = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/datakependudukandki-dqlab.csv')
df_penduduk_luas_jumlah = df_penduduk.groupby(['NAMA KELURAHAN', 'LUAS WILAYAH (KM2)'])[['JUMLAH']].agg('sum').reset_index()
(ggplot(data=df_penduduk_luas_jumlah)
+ aes(y='LUAS WILAYAH (KM2)', x='JUMLAH')
+ geom_point()
).draw()
plt.show()
# FIX ERROR on the site: geom_point() AFTER aes().
# the site's hard-coded solutions is sometimes frustrating...
|
import os
import re
import sys
import gzip
import pickle
import ftputil
import argparse
import datetime
import tempfile
import urllib.request
server = 'ftp.seismo.nrcan.gc.ca'
tmpdir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','metadata','INTERMAGNET')
parser = argparse.ArgumentParser()
parser.add_argument('--id', default='ams/definitive/minute')
parser.add_argument('--start', default='2013-12-01T00:00:00.000000000Z')
parser.add_argument('--stop', default='2013-12-02T00:00:00.000000000Z')
#parser.add_argument('--id', default='bou/definitive/minute')
#parser.add_argument('--start', default='2017-12-01T00:00:00.000000000Z')
#parser.add_argument('--stop', default='2017-12-02T00:00:00.000000000Z')
parser.add_argument('--tmpdir', default=tmpdir)
parser.add_argument('--refresh', default=False)
args = vars(parser.parse_args())
id = args['id']
start = args['start']
stop = args['stop']
def download(url, start, stop):
start = re.sub(r"T"," ", start[0:19])
stop = re.sub(r"T"," ", stop[0:19])
p = id.split("/")
path = os.path.join(args['tmpdir'], server,
'intermagnet', p[2], p[1],
'IAGA2002', start[0:4], start[5:7])
if not os.path.exists(path):
os.makedirs(path)
found = False
filename = os.path.join(path + "/" + url.split("/")[-1])
if os.path.exists(filename):
#print("Found " + filename)
found = True
else:
#print("Downloading " + url)
try:
urllib.request.urlretrieve(url, filename)
found = True
except Exception as e:
with open('bin/INTERMAGNET-error.log','at') as f:
f.write(e + ": " + url + "\n")
if not found:
# Try again with ".gz" removed.
url = url[0:-3]
filename = os.path.join(path, url.split("/")[-1])
if os.path.exists(filename):
#print("Found " + filename)
found = True
else:
#print("Downloading " + url)
try:
urllib.request.urlretrieve(url, filename)
found = True
except Exception as e:
with open('bin/INTERMAGNET-error.log','at') as f:
f.write(e + ": " + url + "\n")
if args['refresh'] and found:
# Very slow.
host = ftputil.FTPHost(server, "anonymous", "anonymous")
host.download_if_newer(url.split(server)[-1], filename)
if not found:
return
#print("Reading " + filename)
try:
if re.search(r'\.gz$', filename):
#with gzip.open(filename, 'rt', errors='replace') as f: lines = f.readlines()
with gzip.open(filename, 'rb') as f: lines = f.readlines()
else:
#with open(filename, 'rt', errors='replace') as f: lines = f.readlines()
with open(filename, 'r') as f: lines = f.readlines()
except Exception as e:
#print("Problem reading " + filename)
with open('bin/INTERMAGNET-error.log','at') as f:
f.write(e + ": " + filename + "\n")
f.close()
return
# TODO: Similar array appears in INTERMAGNET2HAPI.py
keys = [
['Source_of_Data', "string", None, None, 70],
['Station_Name', "string", None, None, 70],
['Geodetic_Latitude', "double", "degrees", None, None],
['Geodetic_Longitude', "double", "degrees", None, None],
['Elevation', "string", "double", "meters", None],
['Reported', "string", None, None, 70],
['Sensor_Orientation', "string", None, None, 70],
['Digital_Sampling', "string", None, None, 70],
['Data_Interval_Type', "string", None, None, 70],
['Publication_Date', "string", None, None, 70],
['Header', "string", None, None, 70*40],
]
# TODO: Similar code appears in INTERMAGNET2HAPI.py
if '/metadata' in id:
meta = {}
comment = '\n'
for line in lines:
line = line.decode()
if line[1] == '#':
comment = comment + line.rstrip() + "\n"
elif not re.match(r'DATE', line):
comment = comment + line.rstrip() + "\n"
name = line[0:23]
value = line[24:-2]
meta[name.strip()] = value.strip()
else:
meta['comment'] = comment
meta['parameters'] = re.sub(r"\s+", ",", line[0:-2].rstrip()).split(",")
break
k = k + 1
date = url.split("/")[-1][3:11]
linel = line.split(" ")
outline = date[0:4] + "-" + date[4:6] + "-" + date[6:8] + "Z,"
#print(",".join(meta['parameters']))
for i in range(3,len(meta['parameters'])):
if len(meta['parameters'][i]) != 4:
meta['parameters'][i] = "?"
else:
meta['parameters'][i] = meta['parameters'][i][-1]
outline = outline + ','.join(meta['parameters'][3:])
for i in range(len(keys)):
key = keys[i][0].replace("_"," ")
if key in meta:
if "," in meta[key]:
outline = outline + ',' + '"' + meta[key] + '"'
else:
outline = outline + "," + meta[key]
else:
outline = outline + ","
print(outline + "," + '"' + comment + '"')
else:
for line in lines:
line = line.decode()
if re.match(r"[0-9]{4}",line):
if line[0:18] >= start and line[0:18] < stop:
# Make comma separated
line = re.sub(r"\s+", ",", line.strip())
# Replace space in 'YYYY-MM-DD HH:MM:SS.FFF' with T
line = line[0:10] + "T" + line[11:23] + "Z" + line[23:]
print(line)
if False:
# Read list of available files. File is large so this slows down process.
# Instead, create filenames and if file does not exist, return. This will be slow
# if there are large time gaps.
path = os.path.dirname(os.path.realpath(sys.argv[0]))
fnamepkl = os.path.join(path,'..','metadata','INTERMAGNET','INTERMAGNET-manifest.pkl')
fnamepkl = os.path.realpath(fnamepkl)
f = open(fnamepkl, 'rb')
S = pickle.load(f)
f.close()
id_l = id.split("/")
ext = id_l[1][0] + id_l[2][0:3] + "." + id_l[2][0:3] + ".gz"
startdt = datetime.datetime.strptime(start[0:10], '%Y-%m-%d')
stopdt = datetime.datetime.strptime(stop[0:10], '%Y-%m-%d')
stepdt = datetime.timedelta(days=1)
# If last date is midnight, last date to look for file is before this
# (stop date is exlusive)
if stop[10:] == "T00:00:00.000000000Z":
stopdt = stopdt - stepdt
while startdt <= stopdt:
date = startdt.strftime('%Y%m%d')
url = "ftp://" + server + "/intermagnet/" + id_l[2] + "/" + id_l[1] \
+ "/IAGA2002/" + date[0:4] + "/" + date[4:6] + "/" \
+ id_l[0] + date + ext
download(url, start, stop)
startdt += stepdt
|
"""Hight cipher."""
from arxpy.bitvector.core import Constant
from arxpy.bitvector.operation import RotateLeft as ROL
from arxpy.primitives.primitives import KeySchedule, Encryption, Cipher
class HightKeySchedule(KeySchedule):
"""Key schedule function."""
rounds = 34 # key whitening seen as a round
input_widths = [8 for _ in range(16)]
output_widths = [8 for _ in range(4 * 34)]
@classmethod
def set_rounds(cls, new_rounds):
cls.rounds = new_rounds
cls.output_widths = [8 for _ in range(4 * cls.rounds)]
@classmethod
def eval(cls, *mk):
mk = list(reversed(mk)) # mk[i] = mki
d = [
0x5a, 0x6d, 0x36, 0x1b, 0x0d, 0x06, 0x03, 0x41,
0x60, 0x30, 0x18, 0x4c, 0x66, 0x33, 0x59, 0x2c,
0x56, 0x2b, 0x15, 0x4a, 0x65, 0x72, 0x39, 0x1c,
0x4e, 0x67, 0x73, 0x79, 0x3c, 0x5e, 0x6f, 0x37,
0x5b, 0x2d, 0x16, 0x0b, 0x05, 0x42, 0x21, 0x50,
0x28, 0x54, 0x2a, 0x55, 0x6a, 0x75, 0x7a, 0x7d,
0x3e, 0x5f, 0x2f, 0x17, 0x4b, 0x25, 0x52, 0x29,
0x14, 0x0a, 0x45, 0x62, 0x31, 0x58, 0x6c, 0x76,
0x3b, 0x1d, 0x0e, 0x47, 0x63, 0x71, 0x78, 0x7c,
0x7e, 0x7f, 0x3f, 0x1f, 0x0f, 0x07, 0x43, 0x61,
0x70, 0x38, 0x5c, 0x6e, 0x77, 0x7b, 0x3d, 0x1e,
0x4f, 0x27, 0x53, 0x69, 0x34, 0x1a, 0x4d, 0x26,
0x13, 0x49, 0x24, 0x12, 0x09, 0x04, 0x02, 0x01,
0x40, 0x20, 0x10, 0x08, 0x44, 0x22, 0x11, 0x48,
0x64, 0x32, 0x19, 0x0c, 0x46, 0x23, 0x51, 0x68,
0x74, 0x3a, 0x5d, 0x2e, 0x57, 0x6b, 0x35, 0x5a
]
d = [Constant(d_i, 8) for d_i in d]
def sk_round_i(round_i):
assert round_i <= 31
sk = []
for i in range(8):
for j in range(8):
if 4*round_i <= 16 * i + j < 4*round_i + 4:
sk.append(mk[(j - i) % 8] + d[16 * i + j])
elif 4*round_i <= 16 * i + j + 8 < 4*round_i + 4:
sk.append(mk[((j - i) % 8) + 8] + d[16 * i + j + 8])
return sk
rk = []
for r in range(cls.rounds):
if hasattr(cls, "skip_rounds") and r in cls.skip_rounds:
rk.extend(mk[:4]) # cte outputs not supported
continue
if r == 0:
wk0, wk1, wk2, wk3 = [mk[i + 12] for i in range(4)]
rk.extend([wk0, wk1, wk2, wk3])
elif r < cls.rounds - 1:
sk0, sk1, sk2, sk3 = sk_round_i(r - 1)
rk.extend([sk0, sk1, sk2, sk3])
else:
assert r == cls.rounds - 1
if r == 33:
wk4, wk5, wk6, wk7 = [mk[i - 4] for i in range(4, 8)]
rk.extend([wk4, wk5, wk6, wk7])
else:
sk0, sk1, sk2, sk3 = sk_round_i(r - 1)
rk.extend([sk0, sk1, sk2, sk3])
return rk
class HightEncryption(Encryption):
"""Encryption function."""
rounds = 34
input_widths = [8 for _ in range(8)]
output_widths = [8 for _ in range(8)]
round_keys = None
@classmethod
def set_rounds(cls, new_rounds):
cls.rounds = new_rounds
@classmethod
def initial_transformation(cls, p, wk3, wk2, wk1, wk0):
x = [None for _ in range(len(p))]
x[0] = p[0] + wk0
x[1] = p[1]
x[2] = p[2] ^ wk1
x[3] = p[3]
x[4] = p[4] + wk2
x[5] = p[5]
x[6] = p[6] ^ wk3
x[7] = p[7]
return x
@classmethod
def round_function(cls, x, sk3, sk2, sk1, sk0): # SK4i+3,SK4i+2,SK4i+1,SK4i
def f0(bv):
return ROL(bv, 1) ^ ROL(bv, 2) ^ ROL(bv, 7)
def f1(bv):
return ROL(bv, 3) ^ ROL(bv, 4) ^ ROL(bv, 6)
# there is a typo in Section 2.4 of Hight paper; using Fig. 3 instead
y = [None for _ in range(len(x))]
y[1] = x[0]
y[3] = x[2]
y[5] = x[4]
y[7] = x[6]
y[0] = x[7] ^ (f0(x[6]) + sk3)
y[2] = x[1] + (f1(x[0]) ^ sk0) # sk2
y[4] = x[3] ^ (f0(x[2]) + sk1)
y[6] = x[5] + (f1(x[4]) ^ sk2) # sk0
return y
@classmethod
def final_transformation(cls, x, wk7, wk6, wk5, wk4):
c = [None for _ in range(len(x))]
c[0] = x[1] + wk4
c[1] = x[2]
c[2] = x[3] ^ wk5
c[3] = x[4]
c[4] = x[5] + wk6
c[5] = x[6]
c[6] = x[7] ^ wk7
c[7] = x[0]
return c
@classmethod
def eval(cls, *p): # p7,...,p0
x = list(reversed(p))
cls.round_inputs = []
for r in range(cls.rounds): # due to round_inputs, better all logic in for loop
cls.round_inputs.append(x)
if hasattr(cls, "skip_rounds") and r in cls.skip_rounds:
continue
if r == 0:
wk0, wk1, wk2, wk3 = cls.round_keys[4*r: 4*r + 4]
x = cls.initial_transformation(x, wk3, wk2, wk1, wk0)
elif r < cls.rounds - 1:
sk0, sk1, sk2, sk3 = cls.round_keys[4*r: 4*r + 4]
x = cls.round_function(x, sk3, sk2, sk1, sk0)
else:
assert r == cls.rounds - 1
if r == 33:
wk4, wk5, wk6, wk7 = cls.round_keys[4*r: 4*r + 4]
x = cls.final_transformation(x, wk7, wk6, wk5, wk4)
else:
sk0, sk1, sk2, sk3 = cls.round_keys[4*r: 4*r + 4]
x = cls.round_function(x, sk3, sk2, sk1, sk0)
return list(reversed(x))
class HightCipher(Cipher):
key_schedule = HightKeySchedule
encryption = HightEncryption
rounds = 34
max_rounds = 34
@classmethod
def set_rounds(cls, new_rounds):
cls.rounds = new_rounds
cls.key_schedule.set_rounds(new_rounds)
cls.encryption.set_rounds(new_rounds)
@classmethod
def set_skip_rounds(cls, skip_rounds):
cls.encryption.skip_rounds = skip_rounds
cls.key_schedule.skip_rounds = skip_rounds
@classmethod
def test(cls):
"""Test Hight with official test vectors."""
# https://tools.ietf.org/html/draft-kisa-hight-00#section-5
old_rounds = cls.rounds
cls.set_rounds(34)
plaintext = (0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
key = (0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff)
assert cls(plaintext, key) == (0x00, 0xf4, 0x18, 0xae, 0xd9, 0x4f, 0x03, 0xf2)
plaintext = (0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77)
key = (0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, 0x00)
assert cls(plaintext, key) == (0x23, 0xce, 0x9f, 0x72, 0xe5, 0x43, 0xe6, 0xd8)
cls.set_rounds(old_rounds)
|
"""
file_name
@author: Gregory Kramida
Copyright: (c) Gregory Kramida 2016
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from lxml import etree # @UnresolvedImport
import numpy as np
def make_opencv_matrix_xml_element(root, mat, name):
"""
Construct an xml element out of a numpy matrix formatted for OpenCV XML input
@type root: lxml.etree.SubElement
@param root: root xml element to build under
@type mat: numpy.ndarray
@param mat: the numpy matrix to convert
@type name: str
@param name: name of the matrix XML element
"""
mat_element = etree.SubElement(root, name, attrib={"type_id": "opencv-matrix"})
rows_elem = etree.SubElement(mat_element, "rows")
rows_elem.text = str(mat.shape[0])
cols_elem = etree.SubElement(mat_element, "cols")
cols_elem.text = str(mat.shape[1])
dt_elem = etree.SubElement(mat_element, "dt")
if mat.dtype == np.dtype('float64'):
dt_elem.text = "d"
elif mat.dtype == np.dtype("float32"):
dt_elem.text = "f"
else:
raise ValueError("dtype " + str(mat.dtype) + "not supported. Expecting float64 or float32.")
data_elem = etree.SubElement(mat_element, "data")
data_string = str(mat.flatten()).replace("\n", "").replace("[", "").replace("]", "")
data_string = re.sub("\s+", " ", data_string)
data_elem.text = data_string
return mat_element
def make_opencv_size_xml_element(root, sizelike, name):
if len(sizelike) != 2:
raise ValueError("Expecting a tuple of length 2. Got length {:d}".format(len(tuple)))
size_element = etree.SubElement(root, name)
size_element.text = str(sizelike[0]) + " " + str(sizelike[1])
return size_element
def parse_xml_matrix(mat_element):
"""
Generate numpy matrix from opencv-formatted xml of a 2d matrix
"""
rows = int(mat_element.find("rows").text)
cols = int(mat_element.find("cols").text)
type_flag = mat_element.find("dt").text
if type_flag == "f":
dtype = np.float32
elif type_flag == "d":
dtype = np.float64
else:
raise ValueError("dtype flag " + type_flag + " not supported.")
data_string = mat_element.find("data").text
data = np.array([float(part) for part in data_string.strip().split(" ") if len(part) > 0])
return data.reshape((rows, cols)).astype(dtype)
|
"""
Configuration Parser for V2D
"""
import json
import threading
import os
lock = threading.Lock()
def get_config():
with lock:
if hasattr(get_config, "config"):
return get_config.config
fname = os.environ.get("V2D_CONFIG_FILE", "config.json")
with open(fname, "r") as fin:
get_config.config = json.load(fin)
return get_config.config
def clear():
with lock:
if hasattr(get_config, "config"):
delattr(get_config, "config")
def get_app_config():
return get_config()["app"]
def get_rpc_config():
return get_config()["rpc"]
def get_vpreprocess_config():
return get_config()["vpreprocess"]
def get_vocab_config():
return get_config()["vocab"]
def get_tests_config():
return get_config()["tests"]
|
import json
import requests
import os
import sql
def call_crawlers() -> bool:
"""
Fetches the list of all shops, does some load balancing magic and calls all registered crawler
instances to start them
:return: If the calls have been successful
"""
product_ids = sql.getProductsToCrawl()
# crawler_urls = ['crawl.p4ddy.com', 'crawl.betterzon.xyz']
crawler_urls = ['http://localhost:22026']
balanced_lists = []
products_per_crawler = len(product_ids) // len(crawler_urls)
rest = len(product_ids) % len(crawler_urls)
# Distrubute available products over available crawler instances
for crawler_id in range(len(crawler_urls)):
amount_of_prods = products_per_crawler
# If we e.g. have 7 products but 2 crawlers, the first needs to crawl 4 products and the 2nd 3
if crawler_id < rest:
amount_of_prods += 1
# Assign the required amount of product ids to the current crawler and remove them from the
# list of all product ids
balanced_lists.append(product_ids[:amount_of_prods])
product_ids = product_ids[amount_of_prods:]
# Make the callouts to the instances
successful = 0
for crawler_id in range(len(crawler_urls)):
prods = balanced_lists[crawler_id]
url = crawler_urls[crawler_id]
# Send request
data = {
'key': os.environ['CRAWLER_ACCESS_KEY'],
'products': prods
}
headers = {'content-type': 'application/json', 'accept': 'application/json'}
resp = requests.post(url=url, data=json.dumps(data), headers=headers)
if resp.status_code == 200:
successful += 1
return successful == len(crawler_urls)
if __name__ == '__main__':
call_crawlers()
|
from django import forms
from django.forms import widgets
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
from accounts.models import Profile
class UserRegisterForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = get_user_model()
fields = ["username", "email", "first_name", "last_name"]
labels = {
"username": "Nazwa użytkownika",
"email": "Adres email",
"first_name": "Imię",
"last_name": "Nazwisko",
}
class ProfileForm(forms.ModelForm):
profile_photo = forms.ImageField(label="Zdjęcie profilowe", required=False)
first_name = forms.CharField(label="Imię", required=True)
last_name = forms.CharField(label="Nazwisko", required=True)
email = forms.CharField(label="Adres email", required=False)
bio = forms.CharField(label="O sobie", required=False)
# birth_date = forms.DateField(widget=forms.DateInput(attrs={'class':'datepicker'}))
class Meta:
model = Profile
fields = [
"profile_photo",
"first_name",
"last_name",
"email",
"bio",
"birth_date",
]
labels = {
"birth_date": "Data urodzin",
}
widgets = {"birth_date": widgets.DateInput(attrs={"type": "date"})}
|
db_entity.zaruba_field_name = zaruba_entity_name_data.zaruba_field_name
|
from abc import ABC, abstractmethod
class AbstractWriter(ABC):
@abstractmethod
def write(self, data):
pass
@abstractmethod
def release(self):
pass
|
import unittest
from katas.kyu_8.grasshopper_summation import summation
class SummationTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(summation(1), 1)
def test_equals_2(self):
self.assertEqual(summation(8), 36)
|
import pygame
import numpy as np
import random
import copy
from astar import astar
from constants import DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_SIZE, BLACK, GREEN
class Snake:
def __init__(self):
self.x = round(random.randrange(0, DISPLAY_WIDTH - PIXEL_SIZE) / 20.0) * 20
self.y = round(random.randrange(0, DISPLAY_HEIGHT - PIXEL_SIZE) / 20.0) * 20
self.snake_body = [[self.x, self.y]]
self.prev = [[self.x, self.y]]
self.snake_len = 1
self.speed_x = 0
self.speed_y = 0
self.eaten = True
self.path = []
def draw(self, game_display):
l = 0
for snake in self.snake_body:
if(l == len(self.snake_body) - 1):
pygame.draw.rect(game_display, GREEN, [snake[0], snake[1], PIXEL_SIZE, PIXEL_SIZE])
else:
pygame.draw.rect(game_display, BLACK, [snake[0], snake[1], PIXEL_SIZE, PIXEL_SIZE])
l += 1
def eat(self, apple_x, apple_y):
if(self.x == apple_x and self.y == apple_y):
self.eaten = True
self.snake_len += 1
self.score += 1
return True
return False
def dead(self):
if self.x >= DISPLAY_WIDTH or self.x <= 0 or self.y >= DISPLAY_HEIGHT or self.y <= 0:
print('Snake killed! Rip!')
self.eaten = True
return True
for each in self.snake_body[:-1]:
if each == self.snake_body[-1]:
print('Snake killed! Rip!')
self.eaten = True
return True
return False
def update(self):
snakehead = []
snakehead.append(self.x)
snakehead.append(self.y)
self.snake_body.append(snakehead)
if len(self.snake_body) > self.snake_len:
del self.snake_body[0]
def move(self, grid, food, game_display):
if(len(self.path) == 0 and self.eaten == False):
self.snake_len += 1
for body in self.prev:
grid.remove_snake_on_grid(body)
self.prev = []
self.update()
food.update(self.snake_body)
food.draw(game_display)
self.eaten = True
if(self.eaten):
self.eaten = False
for body in self.snake_body:
grid.put_snake_on_grid(body)
self.prev = copy.deepcopy(self.snake_body)
self.path = astar(grid.grid, (self.y // PIXEL_SIZE, self.x // PIXEL_SIZE), (food.y // PIXEL_SIZE, food.x // PIXEL_SIZE))
self.speed_x = 0
self.speed_y = 0
if(self.path[0][1] * PIXEL_SIZE < self.x):
self.speed_x = - PIXEL_SIZE
elif(self.path[0][1] * PIXEL_SIZE > self.x):
self.speed_x = PIXEL_SIZE
if(self.path[0][0] * PIXEL_SIZE > self.y):
self.speed_y = PIXEL_SIZE
elif(self.path[0][0] * PIXEL_SIZE < self.y):
self.speed_y = - PIXEL_SIZE
self.x += self.speed_x
self.y += self.speed_y
self.update()
self.path.pop(0)
|
'''
Units tests for the cpptraj wrappers for nasqm
'''
import os
import pytest
import numpy as np
from pynasqm.nmr.nmrgroupsingle import NMRGroupSingle
import pynasqm.userinput as nasqm_user_input
import pynasqm.inputceon as inputceon
def setup_module(module):
'''
Switch to test directory
'''
os.chdir("tests/nmrTests/nmrGroupSingle")
def teardown_module(module):
'''
Return to main directory
'''
os.chdir("../../..")
def test_nmr_writer():
restricted_atoms1 = [[4055, 4056]]
restricted_atoms2 = [[4065]]
desired_distance = [4]
writer = NMRGroupSingle(restricted_atoms1, restricted_atoms2, desired_distance)
result_file = "nmr_groupsingle.dist"
answer_file = "nmr_groupsingle_test.dist"
writer.write_to(result_file)
result = open(result_file, 'r').read()
answer = open(answer_file, 'r').read()
assert result == answer
|
"""Utility functions & decorators for dealing with FOOOF, as a module."""
from importlib import import_module
from functools import wraps
###################################################################################################
###################################################################################################
def safe_import(*args):
"""Try to import a module, with a safety net for if the module is not available.
Parameters
----------
*args : str
Module to import.
Returns
-------
mod : module or False
Requested module, if successfully imported, otherwise boolean (False).
Notes
-----
The input, `*args`, can be either 1 or 2 strings, as pass through inputs to import_module:
- To import a whole module, pass a single string, ex: ('matplotlib').
- To import a specific package, pass two strings, ex: ('.pyplot', 'matplotlib')
"""
try:
mod = import_module(*args)
except ImportError:
mod = False
# Prior to Python 3.5.4, import module could throw a SystemError
# Older approach requires the parent module be imported first
# If triggered, re-check for module after first importing the parent
except SystemError:
try:
_ = import_module(args[-1])
mod = import_module(*args)
except ImportError:
mod = False
return mod
def docs_drop_param(docstring):
"""Drop the first parameter description for a string representation of a docstring.
Parameters
----------
docstring : str
Docstring to drop first parameter from.
Returns
-------
str
New docstring, with first parameter dropped.
Notes
-----
This function assumes numpy docs standards.
It also assumes the parameter description to be dropped is only 2 lines long.
"""
sep = '----------\n'
ind = docstring.find(sep) + len(sep)
front, back = docstring[:ind], docstring[ind:]
for loop in range(2):
back = back[back.find('\n')+1:]
return front + back
def docs_append_to_section(docstring, section, add):
"""Append extra information to a specified section of a docstring.
Parameters
----------
docstring : str
Docstring to update.
section : str
Name of the section within the docstring to add to.
add : str
Text to append to specified section of the docstring.
Returns
-------
str
Updated docstring.
Notes
-----
This function assumes numpydoc documentation standard.
"""
return '\n\n'.join([split + add if section in split else split \
for split in docstring.split('\n\n')])
def copy_doc_func_to_method(source):
"""Decorator that copies method docstring from function, dropping first parameter.
Parameters
----------
source : function
Source function to copy docstring from.
Returns
-------
wrapper : function
The decorated function, with updated docs.
"""
def wrapper(func):
func.__doc__ = docs_drop_param(source.__doc__)
return func
return wrapper
def copy_doc_class(source, section='Attributes', add=''):
"""Decorator that copies method docstring from class, to another class, adding extra info.
Parameters
----------
source : cls
Source class to copy docstring from.
section : str, optional, default: 'Attributes'
Name of the section within the docstring to add to.
add : str, optional
Text to append to specified section of the docstring.
Returns
-------
wrapper : cls
The decorated class, with updated docs.
"""
def wrapper(func):
func.__doc__ = docs_append_to_section(source.__doc__, section, add)
return func
return wrapper
def check_dependency(dep, name):
"""Decorator that checks if an optional dependency is available.
Parameters
----------
dep : module or False
Module, if successfully imported, or boolean (False) if not.
name : str
Full name of the module, to be printed in message.
Returns
-------
wrap : callable
The decorated function.
Raises
------
ImportError
If the requested dependency is not available.
"""
def wrap(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
if not dep:
raise ImportError("Optional FOOOF dependency " + name + \
" is required for this functionality.")
func(*args, **kwargs)
return wrapped_func
return wrap
|
import random
from time import sleep
from magicgui import magicgui
from magicgui.tqdm import tqdm, trange
# if magicui.tqdm.tqdm or trange are used outside of a @magicgui function, (such as in
# interactive use in IPython), then they fall back to the standard terminal output
# If use inside of a magicgui-decorated function
# a progress bar widget will be added to the magicgui container
@magicgui(call_button=True, layout="vertical")
def long_function(
steps=10, repeats=4, choices="ABCDEFGHIJKLMNOP12345679", char="", delay=0.05
):
"""Long running computation with nested iterators."""
# trange and tqdm accept all the kwargs from tqdm itself, as well as any
# valid kwargs for magicgui.widgets.ProgressBar, (such as "label")
for r in trange(repeats, label="repeats"):
letters = [random.choice(choices) for _ in range(steps)]
# `tqdm`, like `tqdm`, accepts any iterable
# this progress bar is nested and will be run & reset multiple times
for letter in tqdm(letters, label="steps"):
long_function.char.value = letter
sleep(delay)
long_function.show(run=True)
|
#!/usr/bin/env python
"""FunCLI root execution function."""
import logging
import click
from .constants import CONTEXT_SETTINGS, PASS_CONTEXT
from .classes import FunCLI
@click.command(cls=FunCLI, context_settings=CONTEXT_SETTINGS)
@click.option('-d', '--debug', is_flag=True, help='Enable DEBUG mode.')
@click.option('-v', '--verbose', is_flag=True, help='Enabled VERBOSE mode.')
@PASS_CONTEXT
def main(ctx, debug, verbose):
"""A playful command line interface."""
ctx.verbose = verbose
if debug:
logging.root.setLevel(logging.DEBUG)
elif verbose:
logging.root.setLevel(logging.INFO)
logging.info('INFO')
logging.debug('DEBUG')
if __name__ == '__main__':
logging.basicConfig(
format='[%(levelname)s] %(module)s:%(funcName)s - %(message)s')
main() # pylint: disable=E1120
|
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2012, 2013, 2014 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
from __future__ import unicode_literals
from zope.interface import Interface
from zope.schema import TextLine
from . import GSMessageFactory as _
class IGSSiteName(Interface):
name = TextLine(
title=_('site-name-entry-label', 'Site name'),
description=_('site-name-entry-description',
'The name of your site. It will be used in the body '
'of pages, email messages, and at the top of every '
'page.'),
required=True)
|
from django.db import models
from data_refinery_common.models.computational_result import ComputationalResult
from data_refinery_common.models.sample import Sample
class SampleResultAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "sample_result_associations"
unique_together = ("result", "sample")
|
"""Streets _dags file."""
from __future__ import print_function
from airflow.operators.python_operator import PythonOperator
from trident.operators.s3_file_transfer_operator import S3FileTransferOperator
from airflow.operators.latest_only_operator import LatestOnlyOperator
from trident.operators.poseidon_email_operator import PoseidonEmailFileUpdatedOperator
from trident.operators.poseidon_sonar_operator import PoseidonSonarCreator
from airflow.models import DAG
from trident.util import general
from trident.util.notifications import notify
from trident.util.seaboard_updates import *
from dags.streets.streets_jobs import *
# All times in Airflow UTC. Set Start Time in PST?
args = general.args
conf = general.config
schedule = general.schedule['streets']
start_date = general.start_date['streets']
#: Dag spec
dag = DAG(dag_id='streets', default_args=args, start_date=start_date, schedule_interval=schedule)
#: Latest Only Operator for imcat
streets_latest_only = LatestOnlyOperator(task_id='streets_latest_only', dag=dag)
#: Get streets data from DB
get_streets_data = PythonOperator(
task_id='get_streets_paving_data',
python_callable=get_streets_paving_data,
on_failure_callback=notify,
on_retry_callback=notify,
on_success_callback=notify,
dag=dag)
#: Process data for public
process_data_sdif = PythonOperator(
task_id='process_sdif',
python_callable=process_paving_data,
op_kwargs={'mode': 'sdif'},
provide_context=True,
on_failure_callback=notify,
on_retry_callback=notify,
on_success_callback=notify,
dag=dag)
#: Process data for imcat
process_data_imcat = PythonOperator(
task_id='process_imcat',
python_callable=process_paving_data,
op_kwargs={'mode': 'imcat'},
provide_context=True,
on_failure_callback=notify,
on_retry_callback=notify,
on_success_callback=notify,
dag=dag)
#: Upload imcat streets file to S3
upload_imcat_data = S3FileTransferOperator(
task_id='upload_streets_data_imcat',
source_base_path=conf['prod_data_dir'],
source_key='sd_paving_imcat_datasd_v1.csv',
dest_s3_conn_id=conf['default_s3_conn_id'],
dest_s3_bucket=conf['dest_s3_bucket'],
dest_s3_key='tsw/sd_paving_imcat_datasd_v1.csv',
on_failure_callback=notify,
on_retry_callback=notify,
on_success_callback=notify,
replace=True,
dag=dag)
#: Upload sdif streets file to S3
upload_sdif_data = S3FileTransferOperator(
task_id='upload_streets_data_sdif',
source_base_path=conf['prod_data_dir'],
source_key='sd_paving_datasd_v1.csv',
dest_s3_conn_id=conf['default_s3_conn_id'],
dest_s3_bucket=conf['dest_s3_bucket'],
dest_s3_key='tsw/sd_paving_datasd_v1.csv',
on_failure_callback=notify,
on_retry_callback=notify,
on_success_callback=notify,
replace=True,
dag=dag)
update_json_date = PythonOperator(
task_id='update_json_date',
python_callable=update_json_date,
provide_context=True,
op_kwargs={'ds_fname': 'streets_repair_projects'},
on_failure_callback=notify,
on_retry_callback=notify,
on_success_callback=notify,
dag=dag)
send_esri_file = PythonOperator(
task_id='upload_streets_gis',
python_callable=send_arcgis,
on_failure_callback=notify,
on_retry_callback=notify,
on_success_callback=notify,
dag=dag)
#: send file update email to interested parties
#send_last_file_updated_email = PoseidonEmailFileUpdatedOperator(
#task_id='send_last_file_updated',
#to='chudson@sandiego.gov',
#subject='IMCAT Streets File Updated',
#file_url='http://{}/{}'.format(conf['dest_s3_bucket'],
#'tsw/sd_paving_imcat_datasd_v1.csv'),
#on_failure_callback=notify,
#on_retry_callback=notify,
#on_success_callback=notify,
#dag=dag)
#: Update portal modified date
update_streets_md = get_seaboard_update_dag('streets-repair-projects.md', dag)
#for i in ['total', 'overlay', 'slurry']:
#sonar_task = PoseidonSonarCreator(
#task_id='create_sdif_{}_miles_paved_sonar'.format(i),
#range_id='days_30',
#value_key='sdif_{}_miles'.format(i),
#value_desc='Miles Paved {}'.format(i),
#python_callable=build_sonar_miles_aggs,
#op_kwargs={'mode': 'sdif',
#'pav_type': i},
#on_failure_callback=notify,
#on_retry_callback=notify,
#on_success_callback=notify,
#dag=dag)
#: Depends on successful run of get_streets_data
#sonar_task.set_upstream(process_data_sdif)
#: Execution order
streets_latest_only >> get_streets_data >> [process_data_sdif,process_data_imcat]
process_data_sdif >> [upload_sdif_data,send_esri_file]
process_data_imcat >> upload_imcat_data
[update_json_date,update_streets_md] << upload_sdif_data
#: email notification is sent after the data was uploaded to S3
#send_last_file_updated_email.set_upstream(upload_imcat_data)
|
#!/usr/bin/env python3
# rewritten by me from C code
# C code author: @andreyorst
for i in range(0, 100):
if not i % 3:
print('Fizz', end='')
if not i % 5:
print('Buzz', end='')
if i % 3 and i % 5:
print(i, end='')
print()
|
# -*- encoding: utf-8 -*-
import os
from colorama import Fore
import nltk
import re
import sys
import tempfile
from utilities.GeneralUtilities import print_say
from CmdInterpreter import CmdInterpreter
# register hist path
HISTORY_FILENAME = tempfile.TemporaryFile('w+t')
PROMPT_CHAR = '~>'
"""
AUTHORS' SCOPE:
We thought that the source code of Jarvis would
be more organized if we treat Jarvis as Object.
So we decided to create this Jarvis Class which
implements the core functionality of Jarvis in a
simpler way than the original __main__.py.
HOW TO EXTEND JARVIS:
In progress..
DETECTED ISSUES:
* Furthermore, "near me" command is unable to find
the actual location of our laptops.
"""
class Jarvis(CmdInterpreter, object):
# variable used at Breakpoint #1.
# allows Jarvis say "Hi", only at the first interaction.
first_reaction_text = ""
first_reaction_text += Fore.BLUE + \
'Jarvis\' sound is by default disabled.' + Fore.RESET
first_reaction_text += "\n"
first_reaction_text += Fore.BLUE + 'In order to let Jarvis talk out loud type: '
first_reaction_text += Fore.RESET + Fore.RED + 'enable sound' + Fore.RESET
first_reaction_text += "\n"
first_reaction_text += Fore.BLUE + \
"Type 'help' for a list of available actions." + Fore.RESET
first_reaction_text += "\n"
prompt = (
Fore.RED
+ "{} Hi, what can I do for you?\n".format(PROMPT_CHAR)
+ Fore.RESET)
# Used to store user specific data
def __init__(self, first_reaction_text=first_reaction_text,
prompt=prompt, first_reaction=True,
directories=["jarviscli/plugins", "custom"]):
directories = self._rel_path_fix(directories)
if sys.platform == 'win32':
self.use_rawinput = False
self.regex_dot = re.compile('\\.(?!\\w)')
CmdInterpreter.__init__(self, first_reaction_text, prompt,
directories, first_reaction)
def _rel_path_fix(self, dirs):
dirs_abs = []
work_dir = os.path.dirname(__file__)
# remove 'jarviscli/' from path
work_dir = os.path.dirname(work_dir)
# fix nltk path
nltk.data.path.append(os.path.join(work_dir, "jarviscli/data/nltk"))
# relative -> absolute paths
for directory in dirs:
if not directory.startswith(work_dir):
directory = os.path.join(work_dir, directory)
dirs_abs.append(directory)
return dirs_abs
def default(self, data):
"""Jarvis let's you know if an error has occurred."""
print_say("I could not identify your command...", self, Fore.RED)
def precmd(self, line):
"""Hook that executes before every command."""
words = line.split()
HISTORY_FILENAME.write(line + '\n')
# append calculate keyword to front of leading char digit (or '-') in line
if words and (words[0].isdigit() or line[0] == "-"):
line = "calculate " + line
words = line.split()
if line.startswith("help"):
return line
if line.startswith("status"):
return line
if not words:
line = "None"
else:
line = self.parse_input(line)
return line
def postcmd(self, stop, line):
"""Hook that executes after every command."""
if self.first_reaction:
self.prompt = (
Fore.RED
+ "{} What can I do for you?\n".format(PROMPT_CHAR)
+ Fore.RESET)
self.first_reaction = False
if self.enable_voice:
self.speech.text_to_speech("What can I do for you?\n")
def speak(self, text):
if self.enable_voice:
self.speech.text_to_speech(text)
def parse_input(self, data):
"""This method gets the data and assigns it to an action"""
data = data.lower()
# say command is better if data has punctuation marks
if "say" not in data:
data = data.replace("?", "")
data = data.replace("!", "")
data = data.replace(",", "")
# input sanitisation to not mess up urls / numbers
data = self.regex_dot.sub("", data)
# Check if Jarvis has a fixed response to this data
if data in self.fixed_responses:
output = self.fixed_responses[data]
else:
# if it doesn't have a fixed response, look if the data corresponds
# to an action
output = self.find_action(
data, self._plugin_manager.get_plugins().keys())
return output
def find_action(self, data, actions):
"""Checks if input is a defined action.
:return: returns the action"""
output = "None"
if not actions:
return output
action_found = False
words = data.split()
actions = list(actions)
# return longest matching word
# TODO: Implement real and good natural language processing
# But for now, this code returns acceptable results
actions.sort(key=lambda l: len(l), reverse=True)
# check word by word if exists an action with the same name
for action in actions:
words_remaining = data.split()
for word in words:
words_remaining.remove(word)
# For the 'near' keyword, the words before 'near' are also needed
if word == "near":
initial_words = words[:words.index('near')]
output = word + " " +\
" ".join(initial_words + ["|"] + words_remaining)
elif word == action: # command name exists
action_found = True
output = word + " " + " ".join(words_remaining)
break
if action_found:
break
return output
def executor(self, command):
"""
If command is not empty, we execute it and terminate.
Else, this method opens a terminal session with the user.
We can say that it is the core function of this whole class
and it joins all the function above to work together like a
clockwork. (Terminates when the user send the "exit", "quit"
or "goodbye command")
:return: Nothing to return.
"""
if command:
self.execute_once(command)
else:
self.cmdloop()
|
# -*- coding: utf-8 -*-
"""
Contains the mode that control the external changes of file.
"""
import os
from pyqode.core.api import TextHelper
from pyqode.core.api.mode import Mode
from pyqode.qt import QtCore, QtWidgets
from pyqode.core.cache import Cache
class FileWatcherMode(Mode, QtCore.QObject):
""" Watches the current file for external modifications.
FileWatcher mode, check if the opened file has changed externally.
"""
#: Signal emitted when the file has been deleted. The Signal is emitted
#: with the current editor instance so that user have a chance to close
#: the editor.
file_deleted = QtCore.Signal(object)
#: Signal emitted when the file has been reloaded in the editor.
file_reloaded = QtCore.Signal()
@property
def auto_reload(self):
"""
Automatically reloads changed files
"""
return self._auto_reload
@auto_reload.setter
def auto_reload(self, value):
self._auto_reload = value
if self.editor:
# propagate changes to every clone
for clone in self.editor.clones:
try:
clone.modes.get(FileWatcherMode).auto_reload = value
except KeyError:
# this should never happen since we're working with clones
pass
def __init__(self):
QtCore.QObject.__init__(self)
Mode.__init__(self)
self._auto_reload = False
self._flg_notify = False
self._data = (None, None)
self._timer = QtCore.QTimer()
self._timer.setInterval(1000)
self._timer.timeout.connect(self._check_file)
self._mtime = 0
self._notification_pending = False
self._processing = False
def on_state_changed(self, state):
if state:
self.editor.new_text_set.connect(self._update_mtime)
self.editor.new_text_set.connect(self._timer.start)
self.editor.text_saving.connect(self._cancel_next_change)
self.editor.text_saved.connect(self._update_mtime)
self.editor.text_saved.connect(self._restart_monitoring)
self.editor.focused_in.connect(self._check_for_pending)
else:
self._timer.stop()
self.editor.new_text_set.connect(self._update_mtime)
self.editor.new_text_set.connect(self._timer.start)
self.editor.text_saving.disconnect(self._cancel_next_change)
self.editor.text_saved.disconnect(self._restart_monitoring)
self.editor.focused_in.disconnect(self._check_for_pending)
self._timer.stop()
def _cancel_next_change(self):
self._timer.stop()
for e in self.editor.clones:
try:
w = e.modes.get(self.__class__)
except KeyError:
pass
else:
w._cancel_next_change()
def _restart_monitoring(self):
self._update_mtime()
for e in self.editor.clones:
try:
w = e.modes.get(self.__class__)
except KeyError:
pass
else:
w._restart_monitoring()
self._timer.start()
def _update_mtime(self):
""" Updates modif time """
try:
self._mtime = os.path.getmtime(self.editor.file.path)
except OSError:
# file_path does not exists.
self._mtime = 0
self._timer.stop()
except (TypeError, AttributeError):
# file path is none, this happen if you use setPlainText instead of
# openFile. This is perfectly fine, we just do not have anything to
# watch
try:
self._timer.stop()
except AttributeError:
pass
def _check_file(self):
"""
Checks watched file moficiation time and permission changes.
"""
try:
self.editor.toPlainText()
except RuntimeError:
self._timer.stop()
return
if self.editor and self.editor.file.path:
if not os.path.exists(self.editor.file.path) and self._mtime:
self._notify_deleted_file()
else:
mtime = os.path.getmtime(self.editor.file.path)
if mtime > self._mtime:
self._mtime = mtime
self._notify_change()
# check for permission change
writeable = os.access(self.editor.file.path, os.W_OK)
self.editor.setReadOnly(not writeable)
def _notify(self, title, message, expected_action=None):
"""
Notify user from external event
"""
if self.editor is None:
return
inital_value = self.editor.save_on_focus_out
self.editor.save_on_focus_out = False
self._flg_notify = True
dlg_type = (QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
expected_action = (
lambda *x: None) if not expected_action else expected_action
if (self._auto_reload or QtWidgets.QMessageBox.question(
self.editor, title, message, dlg_type,
QtWidgets.QMessageBox.Yes) == QtWidgets.QMessageBox.Yes):
expected_action(self.editor.file.path)
self._update_mtime()
self.editor.save_on_focus_out = inital_value
def _notify_change(self):
"""
Notify user from external change if autoReloadChangedFiles is False
then reload the changed file in the editor
"""
def inner_action(*args):
""" Inner action: open file """
# cache cursor position before reloading so that the cursor
# position is restored automatically after reload has finished.
# See OpenCobolIDE/OpenCobolIDE#97
Cache().set_cursor_position(
self.editor.file.path,
self.editor.textCursor().position())
if os.path.exists(self.editor.file.path):
self.editor.file.open(self.editor.file.path)
self.file_reloaded.emit()
else:
# file moved just after a change, see OpenCobolIDE/OpenCobolIDE#337
self._notify_deleted_file()
args = (_("File changed"),
_("The file <i>%s</i> has changed externally.\nDo you want to "
"reload it?") % os.path.basename(self.editor.file.path))
kwargs = {"expected_action": inner_action}
if self.editor.hasFocus() or self.auto_reload:
self._notify(*args, **kwargs)
else:
# show the reload prompt as soon as the editor has focus
self._notification_pending = True
self._data = (args, kwargs)
def _check_for_pending(self, *args, **kwargs):
"""
Checks if a notification is pending.
"""
if self._notification_pending and not self._processing:
self._processing = True
args, kwargs = self._data
self._notify(*args, **kwargs)
self._notification_pending = False
self._processing = False
def _notify_deleted_file(self):
"""
Notify user from external file deletion.
"""
self.file_deleted.emit(self.editor)
# file deleted, disable file watcher
self.enabled = False
def clone_settings(self, original):
self.auto_reload = original.auto_reload
|
import unittest
from tests.recipes.recipe_lib_test import BaseTestForMakeRecipe
class TestLibx264Recipe(BaseTestForMakeRecipe, unittest.TestCase):
"""
An unittest for recipe :mod:`~pythonforandroid.recipes.libx264`
"""
recipe_name = "libx264"
sh_command_calls = ["./configure"]
|
import json
import numpy as np
with open('../data/inceptionV3.json', 'r') as fp:
std = json.load(fp)
print("std read finish")
with open('../data/inceptionV3-check.json', 'r') as fp:
out = json.load(fp)
print("out read finish")
for key in std.keys():
print("checking", key, "...")
if key not in out.keys():
print('[Error] Key', key, 'not in standard keys.')
for key2 in std[key].keys():
if key2 not in out[key].keys():
print('[Error] Secondary key', key2, 'not in standard keys.')
std_val = std[key][key2]
out_val = out[key][key2]
if type(std_val) != list:
std_val = [std_val]
if type(out_val) != list:
out_val = [out_val]
std_np = np.array(std_val).flatten()
out_np = np.array(out_val).flatten()
if std_np.shape[0] != out_np.shape[0]:
print('[Error] Shape mismatch.');
len = std_np.shape[0]
for k in range(len):
if(abs(std_np[k] - out_np[k]) > 1e-5):
print('[Error] value differs!')
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""postprocess for 310 inference"""
import os
import argparse
import numpy as np
from mindspore.nn import Top1CategoricalAccuracy
parser = argparse.ArgumentParser("vgg16 quant postprocess")
parser.add_argument("--result_path", type=str, required=True, help="path to inference results.")
parser.add_argument("--label_path", type=str, required=True, help="path to label.npy.")
args, _ = parser.parse_known_args()
def calculate_acc(result_path, label_path):
"""
Calculate accuracy of VGG16 inference.
Args:
result_path (str): the directory or inference result.
label_path (str): the path of data label in .npy format.
"""
top1_acc = Top1CategoricalAccuracy()
labels = np.load(label_path, allow_pickle=True)
batch_size = 1
for idx, _ in enumerate(labels):
f_name = os.path.join(result_path, "VGG16_data_bs" + str(batch_size) + "_" + str(idx) + "_output_0.bin")
pred = np.fromfile(f_name, np.float32)
pred = pred.reshape(batch_size, int(pred.shape[0] / batch_size))
top1_acc.update(pred, labels[idx])
print("acc: ", top1_acc.eval())
if __name__ == '__main__':
calculate_acc(args.result_path, args.label_path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.