max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
LeetCode/leetcode 691.py
|
Xi-Plus/OJ-Code
| 0
|
12780751
|
class Solution:
def minStickers(self, stickers, target):
targetdict = {}
for char in target:
if char not in targetdict:
targetdict[char] = 0
targetdict[char] += 1
pool = [{'cost': 0}]
for sticker in stickers:
for i in range(len(pool)):
print(pool)
temp = pool[i].copy()
for j in range(1, 16):
temp['cost'] += 1
ok = False
for char in sticker:
if char not in targetdict:
continue
if char not in temp:
temp[char] = 0
temp[char] += 1
if char in targetdict and temp[char] <= targetdict[char]:
ok = True
if ok:
pool.append(temp.copy())
else:
break
ans = 1e10
for pack in pool:
if pack['cost'] >= ans:
continue
ok = True
for char in targetdict:
if char not in pack or pack[char] < targetdict[char]:
ok = False
break
if ok:
ans = pack['cost']
if ans == 1e10:
return -1
return ans
if __name__ == "__main__":
print(Solution().minStickers(["with", "example", "science"], "thehat"))
print(Solution().minStickers(["notice", "possible"], "basicbasic"))
# print(Solution().minStickers([
# "control", "heart", "interest", "stream", "sentence", "soil", "wonder", "them", "month", "slip", "table", "miss", "boat", "speak", "figure", "no", "perhaps", "twenty", "throw", "rich", "capital", "save", "method", "store", "meant", "life", "oil", "string", "song", "food", "am", "who", "fat", "if", "put", "path", "come", "grow", "box", "great", "word", "object", "stead", "common", "fresh", "the", "operate", "where", "road", "mean"], "stoodcrease"))
| 3.453125
| 3
|
tests/unit/streaming_hmm/utils.py
|
cylance/perturbed-sequence-model
| 4
|
12780752
|
import numpy as np
import pytest
def get_argmax_of_matrix_as_tuple(mat):
return np.unravel_index(mat.argmax(), mat.shape)
| 2.171875
| 2
|
client/sdk/python/network/network_pb2_grpc.py
|
gofortwos/micro
| 37
|
12780753
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from network import network_pb2 as network_dot_network__pb2
class NetworkStub(object):
"""Network service is usesd to gain visibility into networks
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Connect = channel.unary_unary(
'/network.Network/Connect',
request_serializer=network_dot_network__pb2.ConnectRequest.SerializeToString,
response_deserializer=network_dot_network__pb2.ConnectResponse.FromString,
)
self.Graph = channel.unary_unary(
'/network.Network/Graph',
request_serializer=network_dot_network__pb2.GraphRequest.SerializeToString,
response_deserializer=network_dot_network__pb2.GraphResponse.FromString,
)
self.Nodes = channel.unary_unary(
'/network.Network/Nodes',
request_serializer=network_dot_network__pb2.NodesRequest.SerializeToString,
response_deserializer=network_dot_network__pb2.NodesResponse.FromString,
)
self.Routes = channel.unary_unary(
'/network.Network/Routes',
request_serializer=network_dot_network__pb2.RoutesRequest.SerializeToString,
response_deserializer=network_dot_network__pb2.RoutesResponse.FromString,
)
self.Services = channel.unary_unary(
'/network.Network/Services',
request_serializer=network_dot_network__pb2.ServicesRequest.SerializeToString,
response_deserializer=network_dot_network__pb2.ServicesResponse.FromString,
)
self.Status = channel.unary_unary(
'/network.Network/Status',
request_serializer=network_dot_network__pb2.StatusRequest.SerializeToString,
response_deserializer=network_dot_network__pb2.StatusResponse.FromString,
)
class NetworkServicer(object):
"""Network service is usesd to gain visibility into networks
"""
def Connect(self, request, context):
"""Connect to the network
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Graph(self, request, context):
"""Returns the entire network graph
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Nodes(self, request, context):
"""Returns a list of known nodes in the network
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Routes(self, request, context):
"""Returns a list of known routes in the network
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Services(self, request, context):
"""Returns a list of known services based on routes
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Status(self, request, context):
"""Status returns network status
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_NetworkServicer_to_server(servicer, server):
rpc_method_handlers = {
'Connect': grpc.unary_unary_rpc_method_handler(
servicer.Connect,
request_deserializer=network_dot_network__pb2.ConnectRequest.FromString,
response_serializer=network_dot_network__pb2.ConnectResponse.SerializeToString,
),
'Graph': grpc.unary_unary_rpc_method_handler(
servicer.Graph,
request_deserializer=network_dot_network__pb2.GraphRequest.FromString,
response_serializer=network_dot_network__pb2.GraphResponse.SerializeToString,
),
'Nodes': grpc.unary_unary_rpc_method_handler(
servicer.Nodes,
request_deserializer=network_dot_network__pb2.NodesRequest.FromString,
response_serializer=network_dot_network__pb2.NodesResponse.SerializeToString,
),
'Routes': grpc.unary_unary_rpc_method_handler(
servicer.Routes,
request_deserializer=network_dot_network__pb2.RoutesRequest.FromString,
response_serializer=network_dot_network__pb2.RoutesResponse.SerializeToString,
),
'Services': grpc.unary_unary_rpc_method_handler(
servicer.Services,
request_deserializer=network_dot_network__pb2.ServicesRequest.FromString,
response_serializer=network_dot_network__pb2.ServicesResponse.SerializeToString,
),
'Status': grpc.unary_unary_rpc_method_handler(
servicer.Status,
request_deserializer=network_dot_network__pb2.StatusRequest.FromString,
response_serializer=network_dot_network__pb2.StatusResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'network.Network', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Network(object):
"""Network service is usesd to gain visibility into networks
"""
@staticmethod
def Connect(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/network.Network/Connect',
network_dot_network__pb2.ConnectRequest.SerializeToString,
network_dot_network__pb2.ConnectResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Graph(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/network.Network/Graph',
network_dot_network__pb2.GraphRequest.SerializeToString,
network_dot_network__pb2.GraphResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Nodes(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/network.Network/Nodes',
network_dot_network__pb2.NodesRequest.SerializeToString,
network_dot_network__pb2.NodesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Routes(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/network.Network/Routes',
network_dot_network__pb2.RoutesRequest.SerializeToString,
network_dot_network__pb2.RoutesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Services(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/network.Network/Services',
network_dot_network__pb2.ServicesRequest.SerializeToString,
network_dot_network__pb2.ServicesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Status(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/network.Network/Status',
network_dot_network__pb2.StatusRequest.SerializeToString,
network_dot_network__pb2.StatusResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 2.21875
| 2
|
src/steps/step_configure.py
|
RLogik/phpytex
| 0
|
12780754
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# IMPORTS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from src.local.misc import *;
from src.local.system import *;
from src.local.typing import *;
from src.core.log import *;
from src.core.utils import createNewFileName;
from src.core.utils import formatPath;
from src.core.utils import getAttribute;
from src.core.utils import getFilesByPattern;
from src.core.utils import lengthOfWhiteSpace;
from src.core.utils import readYamlFile;
from src.core.utils import restrictDictionary;
from src.core.utils import toPythonKeysDict;
from src.customtypes.exports import ProjectTree;
from src.setup import appconfig;
from src.setup.userconfig import setupYamlReader;
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# GLOBAL VARIABLES
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# METHOD: step get config
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def step(fnameConfig: str):
logInfo('READ CONFIG STARTED');
## get configuration file
config = getPhpytexConfig(fnameConfig);
## get main parts of config
config_compile = getAttribute(config, 'compile', 'options', expectedtype=dict, default=None) \
or getAttribute(config, 'compile', expectedtype=dict, default={});
config_compile = preProcessCompileConfig({ **restrictDictionary(config, ['ignore']), **config_compile });
config_stamp = getAttribute(config, 'stamp', expectedtype=dict, default={});
config_parameters = getAttribute(config, 'parameters', expectedtype=dict, default={});
## set app config
setCompileConfig(**config_compile);
setStampConfig(**toPythonKeysDict(config_stamp));
setParamsConfig(**toPythonKeysDict(config_parameters));
setConfigFilesAndFolders(toPythonKeysDict(config));
logInfo('READ CONFIG COMPLETE');
return;
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# SECONDARY METHODS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def getPhpytexConfig(fnameConfig: str) -> Dict[str, Any]:
setupYamlReader();
try:
if not isinstance(fnameConfig, str) or fnameConfig == '':
fnameConfig = getFilesByPattern(
path = appconfig.getPathRoot(),
filepattern = appconfig.getPatternConfig()
)[0];
except:
raise Exception('Could not find or read any phpytex configuration files.');
return readYamlFile(fnameConfig);
def preProcessCompileConfig(config: Dict[str, Any]) -> Dict[str, Any]:
return dict(
ignore = getAttribute(config, 'ignore', expectedtype=bool, default=False),
legacy = getAttribute(config, 'legacy', expectedtype=bool, default=False),
startfile = getAttribute(config, ['root', 'input'], expectedtype=str),
outputfile = getAttribute(config, 'output', expectedtype=str, default='main.tex'),
debug = getAttribute(config, 'debug', expectedtype=bool, default=False),
compile = getAttribute(config, ['compile-latex', 'compile'], expectedtype=bool, default=False),
insert_bib = getAttribute(config, 'insert-bib', expectedtype=bool, default=True),
comments = getAttribute(config, 'comments', expectedtype=(str,bool), default='auto'),
show_tree = getAttribute(config, ['show-structure', 'show-tree'], expectedtype=bool, default=False),
max_length = getAttribute(config, 'max-length', expectedtype=int, default=10000),
tabs = getAttribute(config, 'tabs', expectedtype=bool, default=False),
spaces = getAttribute(config, 'spaces', expectedtype=int, default=4),
## NOTE: Do not force seed to be set if not given. Allow user to decide to NOT seed the rng.
seed = getAttribute(config, 'seed', expectedtype=int, default=None),
offset = getAttribute(config, 'offset', expectedtype=str, default=''),
);
def setCompileConfig(
ignore: bool,
legacy: bool,
startfile: str,
outputfile: str,
debug: bool,
compile: bool,
insert_bib: bool,
comments: Union[str, bool],
show_tree: bool,
max_length: int,
tabs: bool,
spaces: int,
seed: Any,
offset: str
):
root = appconfig.getPathRoot();
appconfig.setOptionLegacy(legacy);
appconfig.setOptionIgnore(ignore);
appconfig.setOptionDebug(debug);
appconfig.setOptionCompileLatex(compile);
appconfig.setOptionInsertBib(insert_bib);
appconfig.setOptionShowTree(show_tree);
if isinstance(comments, str):
appconfig.setOptionCommentsAuto(comments in [ 'auto', 'default' ]);
appconfig.setOptionCommentsOn(comments in [ 'on', 'default' ] or not comments in [ 'off' ]);
else:# elif isinstance(comments, bool):
appconfig.setOptionCommentsAuto(False);
appconfig.setOptionCommentsOn(comments);
if isinstance(seed, int):
appconfig.setSeed(seed);
appconfig.setOffsetSymbol(offset);
appconfig.setMaxLengthOutput(max_length);
if tabs:
appconfig.setIndentCharacter('\t');
appconfig.setIndentCharacterRe(r'\t');
else:
appconfig.setIndentCharacter(' '*spaces);
appconfig.setIndentCharacterRe(' '*spaces);
indentsymb = appconfig.getIndentCharacter();
if lengthOfWhiteSpace(indentsymb) == 0:
raise AttributeError('Indentation symbol cannot be the empty string!');
if legacy:
appconfig.setOffsetSymbol(indentsymb);
fileStart = formatPath(startfile, root=root, relative=False);
fileOutput = formatPath(outputfile, root=root, relative=False, ext_if_empty='.tex');
assert os.path.dirname(fileOutput) == root, 'The output file can only be set to be in the root directory!';
assert not (fileStart == fileOutput), 'The output and start (\'root\'-attribute in config) paths must be different!';
appconfig.setFileStart(fileStart);
appconfig.setFileOutput(fileOutput);
# file = createNewFileName(dir=root, nameinit='phpytex_transpiled.py', namescheme='phpytex_transpiled_{}.py');
file = 'phpytex_transpiled.py';
appconfig.setFileTranspiled(formatPath(file, root=root, relative=False));
return;
def setStampConfig(
file: str = '',
overwrite: bool = True,
options: Dict[str, Any] = dict()
):
root = appconfig.getPathRoot();
if not isinstance(file, str) or file == '':
file = 'stamp.tex';
file = os.path.relpath(path=file, start=root);
if not isinstance(options, dict) or len(options) == 0:
appconfig.setWithFileStamp(False);
else:
appconfig.setWithFileStamp(True);
appconfig.setFileStamp(formatPath(file, root=root, relative=False));
appconfig.setOptionOverwriteStamp(overwrite);
appconfig.setDictionaryStamp(options);
return;
def setParamsConfig(
file: str = '',
overwrite: bool = True,
options: Dict[str, Any] = dict(),
):
appconfig.setOptionOverwriteParams(overwrite);
appconfig.setDictionaryParams(options);
root = appconfig.getPathRoot();
modulename = file if isinstance(file, str) else '';
if re.match(r'^[^\.\s]*(\.[^\.\s]*)+$', file):
path = re.sub(r'([^\.]+)\.', r'\1/', file) + '.py';
else:
logWarn('\033[1mparameters > file\033[0m option must by a python-like import path (relative to the root of the project).');
path = 'parameters.py';
path = os.path.relpath(path, root);
modulename = re.sub(r'\/', '.', os.path.splitext(path)[0]);
if not isinstance(options, dict) or len(options) == 0:
appconfig.setWithFileParamsPy(False);
else:
appconfig.setWithFileParamsPy(True);
appconfig.setImportParamsPy(modulename);
appconfig.setFileParamsPy(formatPath(path, root=root, relative=False));
return;
def setConfigFilesAndFolders(config: Dict[str, Any]):
appconfig.setProjectTree(ProjectTree(**config));
return;
| 1.570313
| 2
|
OpenFOAM-wrapper/beam_solver.py
|
Lenferd/ANSYS-OpenFOAM
| 0
|
12780755
|
from executor.executor import Executor
from argparse import ArgumentParser
from configs.mesh import add_mesh_switch_arguments
from configs.mesh import SimpleBlockMeshConfig, SimpleBlockMeshArguments
from configs.mesh import RailMeshArguments, RailMeshConfig
from configs.fragmentation import FragmentationConfig, FragmentationArguments
from configs.execution import ExecutionConfig, ExecutionArguments
from mesh_generator.simple_generator import SimpleBlockMeshGenerator
from mesh_generator.rail_generator import RailMeshGenerator
from sys import argv
# TODO use subprocess.getoutput()
# @brief Beam end load task, only two configurable parameters and two restrictions (3 functions)
# @restrictions
# 1) Stress is not more than specified value
# 2) Deformation is not more than specified value
# @criterion
# 1) Weight should be minimum
class BeamSolver:
def __init__(self):
self.k_max_deformation = 2.139e-6
self.k_max_stress = 775900
self.k_density = 7850
self.k_mm_to_m = 0.001
# Create default mesh generator config and fragmentation config
self.mesh_config = SimpleBlockMeshConfig()
self.fragmentation_config = FragmentationConfig()
self.execution_config = ExecutionConfig()
self.execution_config.execution_folder = "/home/lenferd/OpenFOAM/lenferd-v1906/run/beamEndLoad-20-04-25/"
self.execution_config.output_dir = self.execution_config.execution_folder + "out/"
self.execution_config.prepare_env_script = "$HOME/prog/OpenFOAM/OpenFOAM-dev/etc/bashrc_modified"
def set_plane_sizes(self, height, width):
self.mesh_config.length_mm = 1000
self.mesh_config.height_mm = height
self.mesh_config.width_mm = width
mesh = SimpleBlockMeshGenerator(self.mesh_config, self.fragmentation_config, self.execution_config)
mesh.create()
mesh.generate()
# Deformation not more then
def constraint_0(self):
deformation_name = "D"
# FIXME execution for reproduced constrain. Need to use hash if possible
executor = Executor(self.execution_config, self.mesh_config, self.fragmentation_config)
executor.run()
results = executor.get_results()
print("==== D constraint_0")
print(results)
print(results[deformation_name])
print(results[deformation_name] < self.k_max_deformation)
print(results[deformation_name] - self.k_max_deformation)
return results[deformation_name] - self.k_max_deformation
# Stress not more then
def constraint_1(self):
stresss_name = "D"
executor = Executor(self.execution_config, self.mesh_config, self.fragmentation_config)
executor.run()
results = executor.get_results()
print("==== stress constraint_1")
print(results)
print(results[stresss_name])
print(results[stresss_name] < self.k_max_stress)
print(results[stresss_name] - self.k_max_stress)
return results[stresss_name] - self.k_max_stress
# Weight (minimum should be)
def criterion_0(self):
print("==== mass criterion_0")
weight = self.k_density * \
self.mesh_config.width_mm * self.k_mm_to_m \
* self.mesh_config.height_mm * self.k_mm_to_m \
* self.mesh_config.length_mm * self.k_mm_to_m
print(weight)
return weight
if __name__ == '__main__':
# print("BEAM SOLVER")
# print("args: {}".format(argv))
parameters = argv[1]
paramList = parameters.split(";")
dict = {}
for param in paramList:
split_result = param.split(":")
pair = {split_result[0]: split_result[1]}
dict.update(pair)
# print(dict)
dict["Points"] = dict["Points"].split(",")
dict["Points"] = [float(i) for i in dict["Points"]]
# print(dict)
function = dict["Function"]
points = dict["Points"]
# Create BeamSolver
beamSolver = BeamSolver()
# first - height, second - width
beamSolver.set_plane_sizes(points[0], points[1])
result = None
if function == "constraint.0":
result = beamSolver.constraint_0()
if function == "constraint.1":
result = beamSolver.constraint_1()
if function == "criterion.0":
result = beamSolver.criterion_0()
print("BeamSolver:[{}]".format(result))
| 2.125
| 2
|
apps/tasks/templatetags/taskfilter.py
|
raiots/CHRDITools
| 0
|
12780756
|
<reponame>raiots/CHRDITools
from django import template
from datetime import datetime
from dateutil.relativedelta import relativedelta
register = template.Library()
@register.filter(name='quarter_cate')
def quarter_cate(value, year_quarter):
year_now = datetime.now().strftime('%Y')
month = value.deadline.strftime('%m')
year = value.deadline.strftime('%Y')
month = int(month)
# year = int(year)
# year_now = int(year) 不知道为什么,如果转整数会把2021和2022认为相同
# print(quarter)
req_year = str(year_quarter[1])
quarter = int(year_quarter[0])
# 可能造成性能损失,每次数据库会调出符合“当年”的任务或工作包的全部任务下工作包,并逐个判断
if year == req_year:
if quarter == 1 and 1 <= month <= 3:
return str(value) + ' '
elif quarter == 2 and 4 <= month <= 6:
return str(value) + ' '
elif quarter == 3 and 7 <= month <= 9:
return str(value) + ' '
elif quarter == 4 and 10 <= month <= 12:
return str(value) + ' '
else:
return ''
else:
return ''
@register.filter(name='last_month')
def last_month(value):
curent_date = datetime.strptime(value, '%Y年%m月')
last_date = curent_date - relativedelta(months=+1)
last_month = last_date.strftime('%Y/%m')
return last_month
@register.filter(name='next_month')
def next_month(value):
curent_date = datetime.strptime(value, '%Y年%m月')
next_date = curent_date + relativedelta(months=+1)
next_month = next_date.strftime('%Y/%m')
return next_month
@register.filter(name='this_month')
def this_month(value):
curent_date = datetime.strptime(value, '%Y年%m月')
return curent_date.strftime('%m')
@register.filter(name='last_year')
def last_year(value):
curent_year = value[1]
last_year = curent_year - 1
return last_year
@register.filter(name='next_year')
def next_year(value):
curent_year = value[1]
next_year = curent_year + 1
return next_year
@register.filter(name='this_year')
def this_year(value):
curent_year = value[1]
return curent_year
| 2.671875
| 3
|
tests/test_remix.py
|
audeering/audresample
| 2
|
12780757
|
from glob import glob
from os import path
import pytest
import audiofile as af
import numpy as np
import audresample
def set_ones(signal, channels):
signal[channels, :] = 1
return signal
def mixdown(signal):
return np.atleast_2d(np.mean(signal, axis=0))
@pytest.mark.parametrize(
'signal, channels, mixdown, upmix, always_copy, expect',
[
# empty signal
(
np.zeros(0, dtype=np.float32),
None,
False,
None,
False,
np.zeros((1, 0), dtype=np.float32),
),
(
np.zeros((1, 0), dtype=np.float32),
None,
False,
None,
False,
np.zeros((1, 0), dtype=np.float32),
),
(
np.zeros((1, 0), dtype=np.float32),
0,
False,
None,
False,
np.zeros((1, 0), dtype=np.float32),
),
(
np.zeros((1, 0), dtype=np.float32),
1,
False,
'repeat',
False,
np.zeros((1, 0), dtype=np.float32),
),
(
np.zeros((1, 0), dtype=np.float32),
1,
False,
'zeros',
False,
np.zeros((1, 0), dtype=np.float32),
),
(
np.zeros((1, 0), dtype=np.float32),
[0, 2],
False,
'zeros',
False,
np.zeros((2, 0), dtype=np.float32),
),
# single channel
(
np.zeros((16000,)),
None,
False,
None,
False,
np.zeros((1, 16000), dtype=np.float32),
),
(
np.zeros((1, 16000), np.float32),
None,
False,
None,
False,
np.zeros((1, 16000), dtype=np.float32),
),
(
np.zeros((1, 16000), np.float32),
None,
True,
None,
False,
np.zeros((1, 16000), dtype=np.float32),
),
(
np.zeros((1, 16000), np.float32),
0,
False,
None,
False,
np.zeros((1, 16000), dtype=np.float32),
),
(
np.zeros((1, 16000), np.float32),
0,
True,
None,
False,
np.zeros((1, 16000), dtype=np.float32),
),
(
np.ones((1, 16000), np.float32),
0,
True,
'zeros',
False,
np.ones((1, 16000), dtype=np.float32),
),
(
np.ones((1, 16000), np.float32),
1,
True,
'repeat',
False,
np.ones((1, 16000), dtype=np.float32),
),
(
np.ones((1, 16000), np.float32),
1,
True,
'zeros',
False,
np.zeros((1, 16000), dtype=np.float32),
),
(
np.ones((1, 16000), np.float32),
-2,
True,
'zeros',
False,
np.ones((1, 16000), dtype=np.float32),
),
(
np.ones((1, 16000), np.float32),
[0, 2],
False,
'zeros',
False,
np.concatenate(
[
np.ones((1, 16000), dtype=np.float32),
np.zeros((1, 16000), dtype=np.float32),
]
),
),
(
np.ones((1, 16000), np.float32),
[0, 2],
True,
'zeros',
False,
0.5 * np.ones((1, 16000), dtype=np.float32),
),
# multiple channels
(
set_ones(np.zeros((4, 16000), np.float32), 2),
2,
False,
None,
False,
np.ones((1, 16000), dtype=np.float32),
),
(
set_ones(np.zeros((4, 16000), np.float32), -1),
-1,
False,
None,
False,
np.ones((1, 16000), dtype=np.float32),
),
(
set_ones(np.zeros((4, 16000), np.float32), [1, 3]),
[1, 3],
False,
None,
False,
np.ones((2, 16000), dtype=np.float32),
),
(
set_ones(np.zeros((4, 16000), np.float32), [0, 1, 2, 3]),
[0, 1, 2, 3],
False,
None,
False,
np.ones((4, 16000), dtype=np.float32),
),
(
set_ones(np.zeros((4, 16000), np.float32), [0, 1, 2]),
range(3),
False,
None,
False,
np.ones((3, 16000), dtype=np.float32),
),
(
set_ones(np.zeros((3, 16000), np.float32), 0),
[1, 0, 0],
False,
None,
False,
set_ones(np.zeros((3, 16000), np.float32), [1, 2]),
),
(
set_ones(np.zeros((3, 16000), np.float32), 0),
[3, 0, 0],
False,
'zeros',
False,
set_ones(np.zeros((3, 16000), np.float32), [1, 2]),
),
(
set_ones(np.zeros((3, 16000), np.float32), 0),
[3, 0, 0],
False,
'repeat',
False,
np.ones((3, 16000), np.float32),
),
(
set_ones(np.zeros((3, 16000), np.float32), 0),
[-6, 0, 0],
False,
'repeat',
False,
np.ones((3, 16000), np.float32),
),
# multiple channels with mixdown
(
audresample.am_fm_synth(16000, 2, 16000),
None,
True,
None,
False,
mixdown(audresample.am_fm_synth(16000, 2, 16000)),
),
(
audresample.am_fm_synth(16000, 3, 16000),
[0, 1],
True,
None,
False,
mixdown(audresample.am_fm_synth(16000, 2, 16000)),
),
# always copy
(
np.zeros((1, 16000), dtype=np.float32),
None,
False,
None,
True,
np.zeros((1, 16000), dtype=np.float32),
),
# wrong channel index
pytest.param(
np.zeros((2, 16000)),
2,
False,
None,
False,
None,
marks=pytest.mark.xfail(raises=ValueError),
),
pytest.param(
np.zeros((2, 16000)),
[0, 1, 2],
False,
None,
False,
None,
marks=pytest.mark.xfail(raises=ValueError),
),
# wrong input shape
pytest.param(
np.zeros((16000, 2, 3)),
None,
False,
None,
False,
None,
marks=pytest.mark.xfail(raises=RuntimeError),
),
# wrong upmix type
pytest.param(
np.zeros((2, 16000)),
2,
False,
'fancy',
False,
None,
marks=pytest.mark.xfail(raises=ValueError),
),
]
)
def test_resample_signal(
signal,
channels,
mixdown,
upmix,
always_copy,
expect,
):
result = audresample.remix(
signal,
channels,
mixdown,
upmix=upmix,
always_copy=always_copy,
)
np.testing.assert_equal(result, expect)
if signal.size > 0 and\
channels is None and\
not mixdown and\
signal.dtype == np.float32:
if always_copy:
assert id(signal) != id(result)
else:
assert id(signal) == id(result)
| 2.328125
| 2
|
notebooks/helpers.py
|
thomasfrederikhoeck/ml_tooling
| 7
|
12780758
|
<gh_stars>1-10
import numpy as np
import re
title_map = {
"Mr.": "Mr",
"Miss.": "Miss",
"Mrs.": "Mrs",
"Master.": "Master",
"Dr.": "Other",
"Rev.": "Other",
"Major.": "Other",
"Col.": "Other",
"Mlle.": "Miss",
"Lady.": "Other",
"Jonkheer.": "Other",
"Ms.": "Miss",
"Capt.": "Other",
"Don.": "Other",
"Sir.": "Other",
"Countess": "Other",
"Mme.": "Mrs",
}
match = re.compile(r"([A-Z][a-z]*\.)")
def binarize_na(series):
return np.where(series.isna(), 1, 0)
def convert_to_title(series):
return series.str.extract(match, expand=False).map(title_map)
def create_ticket_letters(series):
return (
series.str.extract(r"(.*\s)", expand=False)
.str.replace(".", "")
.str.replace(" ", "")
.str.strip()
.fillna("None")
)
def consolidate_ticket_letters(series):
return series.where(series.isin(["FCC", "PC"]), "Other")
def is_missing(series):
return series.isna() * 1
def get_cabin_letter(value):
if isinstance(value, list):
return value[0][0]
return "X"
def extract_cabin_letter(series):
return series.str.split(" ").apply(get_cabin_letter)
def get_num_cabins(value):
if isinstance(value, list):
return len(value)
return 0
def extract_num_cabins(series):
return series.str.split(" ").apply(get_num_cabins)
| 2.6875
| 3
|
ons_ras_common/ons_rest_exercise.py
|
ONSdigital/ras-common
| 1
|
12780759
|
<reponame>ONSdigital/ras-common
"""
Generic Configuration tool for Micro-Service environment discovery
License: MIT
Copyright (c) 2017 Crown Copyright (Office for National Statistics)
ONSCase wraps routines used to access the case service
"""
class ONSExercise(object):
"""
This class is designed to take all the work out of accessing the case service. Initially it
should be able to validate and log events against the case service and also query the event
service for specific combinations of events. (for example to determine case status)
"""
def __init__(self, env):
self._env = env
def activate(self):
""""""
pass
def get_by_id(self, exercise_id):
"""
Recover an exercise by exercise_id
:param exercise_id: The id of the exercise in question
:return: An exercise record
"""
exercise = self._env.asyncio.access_endpoint('/collectionexercises/{}'.format(exercise_id))
if not exercise:
return 404, {'code': 404, 'text': 'unable to find exercise for this exercise_id'}
return 200, {'code': 200, 'case': exercise}
| 2.40625
| 2
|
src/units/_identificator.py
|
ferrocactus/cellar
| 7
|
12780760
|
<gh_stars>1-10
import json
from abc import abstractmethod
from functools import reduce
import os
import numpy as np
from scipy.stats import hypergeom
from ..log import setup_logger
from ..utils.tools import parse
from ._unit import Unit
this_dir = os.path.dirname(__file__)
def join_root(path):
return os.path.abspath(os.path.join(this_dir, path))
def _get_dict(path):
"""
Parameters
__________
path: string or array
Path to json file. In case a list of paths is provided instead,
read them all and merge then into a single dict. Assumes depth two.
Returns
_______
d: dict
Dictionary containing marker information.
d = {
key: {
subkey: [...],
...
},
...
}
"""
# TODO straighten up the spaghetti
if isinstance(path, str):
with open(path, "r") as f:
return json.load(f)
else:
d = {}
for path in path:
with open(path, "r") as f:
d_part = json.load(f)
for key in d_part:
if key in d:
for subkey in d_part[key]:
if subkey in d[key]:
# to remove duplicates
d[key][subkey] = list(set().union(
d[key][subkey], d_part[key][subkey]))
else:
d[key][subkey] = d_part[key][subkey]
else:
d[key] = d_part[key]
return d
class Ide_HyperGeom(Unit):
"""
Runs hypergeom to find matching populations. Compute for every label
in x, the pop in pops where x is most likely to have been drawn from.
It is assumed that the dictionary that is passed has two levels of
hierarchy of types. First determine the lvl1 type, then the lvl2 subtype.
"""
def __init__(self, path=join_root('../markers/cell_type_marker.json'), tissue='all'):
self.logger = setup_logger("HyperGeom")
self.path = path
self.tissue = tissue
def get(self, x):
"""
Extended keys are: lvl1_type, lvl1_sv, lvl1_intersec, lvl1_total,
lvl2_type, lvl2_sv, lvl2_intersec, lvl2_total
type (string): identified type
sv (float): survival value from Hypergeometric Test
intersec (np.ndarray): array of names that overlap
total (int): total number of names in dict[type]
Returns the types of cells in x.
Args:
x (dict): x = {
label_1: {
outp_names: [name_1, ...],
...
},
...
}
Returns:
(dict): Extends x with new keys (returns copy).
"""
x = x.copy()
lvl2 = _get_dict(self.path)
# Construct lvl1 dict by merging all lvl2 dicts
lvl1 = {}
for pop in lvl2:
lvl1[pop] = parse(
np.array(reduce(lambda a, b: a+b, lvl2[pop].values()))
)
if self.tissue == 'all':
self.process_level(x, lvl1, level=1)
self.process_level(x, lvl2, level=2)
else:
self.logger.info(
"Running HyperGeom for {0} only.".format(self.tissue))
self.process_tissue(x, tissue=self.tissue, level_dict=lvl2)
return x
def process_level(self, x, level_dict, level):
for key in x:
if level > 1 and x[key]['lvl{0}_type'.format(level-1)] == 'None':
tp, sv, intersec, total = "None", 1, np.array([]), 0
all_pops = {'svs': np.array([]),
'intersecs': np.array([]),
'lens': np.array([])}
else:
if level > 1:
tp, sv, intersec, total, all_pops = self.find_population(
# x[key]['outp_names'],
x[key]['lvl{0}_intersec'.format(level-1)],
level_dict[x[key]['lvl{0}_type'.format(level-1)]]
)
else:
tp, sv, intersec, total, all_pops = self.find_population(
x[key]['outp_names'],
level_dict
)
x[key]['lvl{0}_type'.format(level)] = tp
x[key]['lvl{0}_sv'.format(level)] = sv
x[key]['lvl{0}_intersec'.format(level)] = intersec
x[key]['lvl{0}_total'.format(level)] = total
x[key]['lvl{0}_all'.format(level)] = all_pops
self.logger.info("Finished finding lvl{0} types.".format(level))
def process_tissue(self, x, tissue, level_dict):
for key in x:
tp, sv, intersec, total, all_pops = self.find_population(
x[key]['outp_names'],
level_dict[tissue]
)
x[key]['lvl1_type'] = "User Defined"
x[key]['lvl1_sv'] = 1
x[key]['lvl1_intersec'] = np.array([])
x[key]['lvl1_total'] = 0
x[key]['lvl1_all'] = {}
x[key]['lvl2_type'] = tp
x[key]['lvl2_sv'] = sv
x[key]['lvl2_intersec'] = intersec
x[key]['lvl2_total'] = total
x[key]['lvl2_all'] = all_pops
self.logger.info("Finished finding lvl2 types.")
def find_population(self, x, pops):
"""
See find_populations. Assumes x is a single list.
Args:
x (np.ndarray): 1D list of names.
pops (dict): Dictionary of populations: pops = {
type: [name_1, name_2, ...],
...
}
Returns:
(string): population name
(float): survival value
(np.ndarray): common names
(int): total number of names in matched population
"""
M = sum([len(pops[pop]) for pop in pops])
N = len(x)
survival_values = []
intersections = []
lens = []
rsv, rpop, rk = 2, -1, 0
for pop in pops:
n = len(pops[pop])
intersec = np.intersect1d(x, pops[pop])
k = len(intersec)
sv = hypergeom.sf(k-1, M=M, n=n, N=N) if k > 0 else 1
survival_values.append(sv)
intersections.append(intersec)
lens.append(len(pops[pop]))
if sv <= rsv or (rsv == 2 and k > 0):
rsv, rpop, rk = sv, pop, k
all_pops = {'svs': np.array(survival_values),
'intersecs': np.array(intersections),
'lens': np.array(lens)}
if rk == 0: # in case of no intersection, return -1
return "None", 1, np.array([]), 0, all_pops
else:
return rpop, rsv, np.intersect1d(x, pops[rpop]), len(pops[rpop]), all_pops
| 2.53125
| 3
|
jass/logic/player.py
|
gregunz/JassAI
| 0
|
12780761
|
from typing import List, Dict, Optional
from jass.agents.agent import Agent
from jass.agents.state import PlayCardState, ChooseTrumpState
from jass.logic.card import Card, Suit
from jass.logic.exceptions import IllegalMoveError
from jass.logic.hand import Hand
class Player:
def __init__(self, name: str, agent: Agent):
self.__name: str = name
self.__agent: Agent = agent
self.__hand: Hand = None
@property
def hand_cards(self) -> List[Card]:
return self.__hand.cards
def give(self, hand: Hand) -> None:
self.__hand = hand
def play(self, trump: Suit, trump_chooser: 'Player', players: List['Player'], trick_cards: Dict['Player', Card],
round_tricks: List[Dict['Player', Card]]) -> Card:
assert self.__hand is not None
assert self == players[0]
cards_on_table = [trick_cards[p] for p in players if p in trick_cards]
cards_playable = self.__hand.playable_cards(cards_played=cards_on_table, trump=trump)
state = PlayCardState(
trick_trump=trump,
trump_chooser_idx=players.index(trump_chooser),
player_hand=self.__hand.cards,
playable_cards=cards_playable,
trick_history=cards_on_table,
round_history=[[trick[p] for p in players] for trick in round_tricks]
)
card = self.__agent.play_card(state).card_to_play
self.__hand.play(card, cards_played=cards_on_table, trump=trump)
return card
def choose_trump(self, can_chibre) -> Optional[Suit]:
if self.__hand is None:
raise IllegalMoveError('Cannot choose trump before having cards')
state = ChooseTrumpState(self.__hand.cards, can_chibre=can_chibre) # todo: allow chibre
return self.__agent.choose_trump(state).suit
def reward(self, points: int, is_last_trick: bool) -> None:
self.__agent.trick_end(reward=points, done=is_last_trick)
def has_7_diamonds(self) -> bool:
return self.__hand.has(Card(7, Suit.diamonds))
def __eq__(self, other: 'Player') -> bool:
return self.__name == other.__name
def __hash__(self) -> int:
return hash(self.__name)
def __repr__(self) -> str:
return self.__name
| 2.84375
| 3
|
tests/fixtures/abaco.py
|
SD2E/python-datacatalog
| 0
|
12780762
|
<reponame>SD2E/python-datacatalog<gh_stars>0
import pytest
from datacatalog.identifiers import abaco
__all__ = ['nonce_id', 'manager_actor_id',
'actor_id', 'exec_id', 'worker_id']
@pytest.fixture(scope='session')
def nonce_id():
return abaco.nonceid.generate()
@pytest.fixture(scope='session')
def manager_actor_id():
return abaco.actorid.generate()
@pytest.fixture(scope='session')
def actor_id():
return abaco.actorid.generate()
@pytest.fixture(scope='session')
def exec_id():
return abaco.execid.generate()
@pytest.fixture(scope='session')
def worker_id():
return abaco.execid.generate()
| 2.234375
| 2
|
next_release_problem/problems.py
|
mandriv/next-release-problem
| 0
|
12780763
|
<reponame>mandriv/next-release-problem
from abc import ABCMeta, abstractmethod
from platypus import Problem, Binary, Real, RandomGenerator
class NRP_Problem():
__metaclass__ = ABCMeta
def __init__(self, requirements, clients, budget_constraint):
self.requirements = requirements.copy()
self.clients = clients.copy()
self.max_budget = self.get_max_budget(budget_constraint)
def get_max_budget(self, budget_constraint):
sum = 0
for req in self.requirements:
sum += req
if budget_constraint is None:
return sum
return sum / budget_constraint
def get_requirements_met(self, candidate):
requirements_met = []
for i in range(len(candidate)):
if candidate[i]:
requirements_met.append(i + 1)
return requirements_met
def get_score(self, candidate):
# score is a sum of all customer weighted scores
requirements_met = self.get_requirements_met(candidate)
score = 0
for requirement_number in requirements_met:
for client in self.clients:
client_value = client[0]
client_requirements = client[1]
client_requirement_weight = 0.0
for req in client_requirements:
if req[1] == requirement_number:
client_requirement_weight = req[0]
break
score += client_value * client_requirement_weight
return score
def get_cost(self, candidate):
cost = 0;
for i in range(len(candidate)):
if candidate[i]:
cost -= self.requirements[i]
return cost
@abstractmethod
def get_problem_function(self):
raise NotImplementedError('Method not implemented')
@abstractmethod
def generate_problem(self):
raise NotImplementedError('Method not implemented')
class NRP_MOO(NRP_Problem):
def get_problem_function(self, x):
score = self.get_score(x[0])
cost = self.get_cost(x[0])
number_of_requirements_met = len(self.get_requirements_met(x[0]))
max_budget_constraint = cost - self.max_budget
return [score, cost], [number_of_requirements_met, max_budget_constraint]
def generate_problem(self):
# 1 decision variables, 2 objectives, 2 constraint
problem = Problem(1, 2, 2)
problem.types[:] = Binary(len(self.requirements))
problem.directions[:] = Problem.MAXIMIZE
problem.constraints[0] = "!=0"
problem.constraints[1] = "<=0"
problem.function = self.get_problem_function
return problem
class NRP_SOO(NRP_Problem):
def __init__(self, requirements, clients, budget_constraint, score_weight, cost_weight):
super(NRP_SOO, self).__init__(requirements, clients, budget_constraint)
self.score_weight = score_weight
self.cost_weight = cost_weight
def get_problem_function(self, x):
score = self.get_score(x[0])
cost = self.get_cost(x[0])
weighted_score = self.score_weight * score
weighted_cost = self.cost_weight * cost
fitness = weighted_score + weighted_cost
number_of_requirements_met = len(self.get_requirements_met(x[0]))
max_budget_constraint = cost - self.max_budget
return [fitness], [number_of_requirements_met, max_budget_constraint]
def generate_problem(self):
# 1 decision variables, 1 objectives, 2 constraints
problem = Problem(1, 1, 2)
problem.types[:] = Binary(len(self.requirements))
problem.directions[:] = Problem.MAXIMIZE
problem.constraints[0] = "!=0"
problem.constraints[1] = "<=0"
problem.function = self.get_problem_function
return problem
class NRP_Random(NRP_MOO):
def generate_solutions(self):
problem = super(NRP_Random, self).generate_problem()
random_generator = RandomGenerator()
solutions = []
for _ in range(1000):
solutions.append(random_generator.generate(problem))
return solutions
| 2.71875
| 3
|
calculator.py
|
savrus/algory
| 2
|
12780764
|
#!/usr/bin/python
import sys
all_tokens = dict([
["_", -1],
["!", 0], ["~", 0], ["^", 1], ["&", 1], ["|", 1],
["**", 2], ["*", 3], ["/", 3], ["%", 3], ["+", 4], ["-", 4],
["&&", 6], ["||", 6], ["==", 7], ["!=", 7], ["<", 7], [">", 7], ["<=", 7], [">=", 7],
["=", 8], ["(", 99], [")", 99]])
unary = set(["!", "~", "_"])
class Token:
OPERATOR=0
NUMERIC=1
VARIABLE=2
def __init__(self, kind, token):
self.kind = kind
self.token = token
def __repr__(self):
return str(self.token)
def make_token(s):
if s in all_tokens: return Token(Token.OPERATOR, s)
elif s.isdigit(): return Token(Token.NUMERIC, int(s))
elif s.isalpha(): return Token(Token.VARIABLE, s)
else: raise Exception("Error: invalid token: '{}'".format(token))
def tokenize(s):
tokens = []
e = b = 0
while e < len(s):
if s[e] == ' ' or s[e] in all_tokens or s[e:e+2] in all_tokens:
if b < e: tokens.append(make_token(s[b:e]))
b = e + 1
if s[e:e+2] in all_tokens:
tokens.append(make_token(s[e:e+2]))
e, b = e + 1, e + 2
elif s[e] in all_tokens: tokens.append(make_token(s[e]))
e += 1
if b < len(s): tokens.append(make_token(s[b:]))
return tokens
def parse(tokens):
result = []
stack = []
expectExpr = True
for t in tokens:
if t.kind == Token.NUMERIC or t.kind == Token.VARIABLE:
result.append(t)
expectExpr = False
else:
if t.token == "(":
if not expectExpr: raise Exception("Error: expected a binary operator, but got '('")
stack.append(t)
elif t.token == ")":
if expectExpr: raise Exception("Error: expected an expression, but got ')'")
while len(stack) > 0 and stack[-1].token != "(": result.append(stack.pop())
if len(stack) == 0 or stack[-1].token != "(": raise Exception("Error: unmatched ')'")
stack.pop()
expectExpr = False
elif t.token in unary:
if not expectExpr: raise Exception("Error: expected a binary operator, but got unary operator '{}'". format(t.token))
stack.append(t)
elif expectExpr and t.token == "-":
stack.append(Token(Token.OPERATOR, "_"))
else:
if expectExpr: raise Exception("Error: expected an expression, but got binary operator '{}'". format(t.token))
while len(stack) > 0 and all_tokens[stack[-1].token] < all_tokens[t.token]: result.append(stack.pop())
stack.append(t)
expectExpr = True
while len(stack) > 0 and stack[-1].token != "(": result.append(stack.pop())
if len(stack) > 0: raise Exception("Error: unmatched '('")
return result
class Calculator:
def __init__(self):
self.variables = {}
def value(self, token):
if token.kind == Token.VARIABLE:
if token.token not in self.variables:
raise Exception("Error: variable '{}' is referenced before assignment".format(token.token))
else: return self.variables[token.token]
else: return token.token
def execute(self, tokens):
stack = []
tmp = {}
def append(val):
stack.append(Token(Token.NUMERIC, val))
for t in tokens:
if t.kind == Token.OPERATOR:
if t.token == "=":
val = self.value(stack.pop())
v = stack.pop()
if v.kind != Token.VARIABLE:
raise Exception("Error: left hand of the assignment is not an l-value")
tmp[v.token] = val
append(val)
elif t.token in unary:
val = self.value(stack.pop())
if t.token == "!": append(0 if val != 0 else 1)
elif t.token == "~": append(~val)
elif t.token == "_": append(-val)
else: raise Exception("Uknown operator '{}'".format(t.token))
else:
rhs = self.value(stack.pop())
lhs = self.value(stack.pop())
if t.token == "+": append(lhs + rhs)
elif t.token == "-": append(lhs - rhs)
elif t.token == "*": append(lhs * rhs)
elif t.token == "/": append(lhs / rhs)
elif t.token == "%": append(lhs % rhs)
elif t.token == "&": append(lhs & rhs)
elif t.token == "|": append(lhs | rhs)
elif t.token == "^": append(lhs ^ rhs)
elif t.token == "**": append(int(lhs ** rhs))
elif t.token == "&&": append(1 if (lhs != 0 and rhs != 0) else 0)
elif t.token == "||": append(0 if (lhs == 0 and rhs == 0) else 1)
elif t.token == "==": append(1 if lhs == rhs else 0)
elif t.token == "!=": append(0 if lhs == rhs else 1)
elif t.token == ">": append(1 if lhs > rhs else 0)
elif t.token == "<": append(1 if lhs < rhs else 0)
elif t.token == ">=": append(1 if lhs >= rhs else 0)
elif t.token == "<=": append(1 if lhs <= rhs else 0)
else: raise Exception("Uknown operator '{}'".format(t.token))
else: stack.append(t)
if len(stack) > 2: raise Exception("Unfinished expression")
self.variables.update(tmp)
return self.value(stack[0]) if len(stack) == 1 else 0
if __name__ == "__main__":
c = Calculator()
while True:
try:
line = raw_input("> ")
tokens = tokenize(line)
if len(tokens) > 0:
parsed = parse(tokens)
result = c.execute(parsed)
print result
except EOFError: print; break
except Exception as e: print e
| 3.375
| 3
|
build/lib/agent/mapping.py
|
SAEONData/textfile-harvester
| 0
|
12780765
|
mapping = {
"ORIGIN": {"key": "XXXXX", "default": "ZZZZZ"},
"REQUEST_ID": {"key": "XXXXX", "default": "ZZZZZ"},
"LANDSAT_SCENE_ID": {"key": "XXXXX", "default": "ZZZZZ"},
"COLLECTION_NUMBER": {"key": "XXXXX", "default": "ZZZZZ"},
"FILE_DATE": {"key": "publicationYear", "default": "2018", 'slice': (0, 4)},
"STATION_ID": {"key": "XXXXX", "default": "ZZZZZ"},
"PROCESSING_SOFTWARE_VERSION": {"key": "XXXXX", "default": "ZZZZZ"},
"DATA_TYPE": {"key": "XXXXX", "default": "ZZZZZ"},
"COLLECTION_CATEGORY": {"key": "XXXXX", "default": "ZZZZZ"},
"ELEVATION_SOURCE": {"key": "XXXXX", "default": "ZZZZZ"},
"OUTPUT_FORMAT": {"key": "XXXXX", "default": "ZZZZZ"},
"SPACECRAFT_ID": {"key": "XXXXX", "default": "ZZZZZ"},
"SENSOR_ID": {"key": "XXXXX", "default": "ZZZZZ"},
"WRS_PATH": {"key": "XXXXX", "default": "ZZZZZ"},
"WRS_ROW": {"key": "XXXXX", "default": "ZZZZZ"},
"NADIR_OFFNADIR": {"key": "XXXXX", "default": "ZZZZZ"},
"TARGET_WRS_PATH": {"key": "XXXXX", "default": "ZZZZZ"},
"TARGET_WRS_ROW": {"key": "XXXXX", "default": "ZZZZZ"},
"DATE_ACQUIRED": {"key": "XXXXX", "default": "ZZZZZ"},
"SCENE_CENTER_TIME": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_UL_LAT_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_UL_LON_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_UR_LAT_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_UR_LON_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_LL_LAT_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_LL_LON_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_LR_LAT_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_LR_LON_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_UL_PROJECTION_X_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_UL_PROJECTION_Y_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_UR_PROJECTION_X_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_UR_PROJECTION_Y_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_LL_PROJECTION_X_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_LL_PROJECTION_Y_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_LR_PROJECTION_X_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"CORNER_LR_PROJECTION_Y_PRODUCT": {"key": "XXXXX", "default": "ZZZZZ"},
"PANCHROMATIC_LINES": {"key": "XXXXX", "default": "ZZZZZ"},
"PANCHROMATIC_SAMPLES": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTIVE_LINES": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTIVE_SAMPLES": {"key": "XXXXX", "default": "ZZZZZ"},
"FILE_NAME_BAND_1": {"key": "XXXXX", "default": "ZZZZZ"},
"FILE_NAME_BAND_2": {"key": "XXXXX", "default": "ZZZZZ"},
"FILE_NAME_BAND_3": {"key": "XXXXX", "default": "ZZZZZ"},
"FILE_NAME_BAND_4": {"key": "XXXXX", "default": "ZZZZZ"},
"FILE_NAME_BAND_5": {"key": "XXXXX", "default": "ZZZZZ"},
"FILE_NAME_BAND_6": {"key": "XXXXX", "default": "ZZZZZ"},
"FILE_NAME_BAND_7": {"key": "XXXXX", "default": "ZZZZZ"},
"FILE_NAME_BAND_8": {"key": "XXXXX", "default": "ZZZZZ"},
"FILE_NAME_BAND_9": {"key": "XXXXX", "default": "ZZZZZ"},
"FILE_NAME_BAND_QUALITY": {"key": "XXXXX", "default": "ZZZZZ"},
"METADATA_FILE_NAME": {"key": "XXXXX", "default": "ZZZZZ"},
"CPF_NAME": {"key": "XXXXX", "default": "ZZZZZ"},
"BPF_NAME_OLI": {"key": "XXXXX", "default": "ZZZZZ"},
"RLUT_FILE_NAME": {"key": "XXXXX", "default": "ZZZZZ"},
"CLOUD_COVER": {"key": "XXXXX", "default": "ZZZZZ"},
"CLOUD_COVER_LAND": {"key": "XXXXX", "default": "ZZZZZ"},
"IMAGE_QUALITY_OLI": {"key": "XXXXX", "default": "ZZZZZ"},
"ROLL_ANGLE": {"key": "XXXXX", "default": "ZZZZZ"},
"SUN_AZIMUTH": {"key": "XXXXX", "default": "ZZZZZ"},
"SUN_ELEVATION": {"key": "XXXXX", "default": "ZZZZZ"},
"EARTH_SUN_DISTANCE": {"key": "XXXXX", "default": "ZZZZZ"},
"SATURATION_BAND_1": {"key": "XXXXX", "default": "ZZZZZ"},
"SATURATION_BAND_2": {"key": "XXXXX", "default": "ZZZZZ"},
"SATURATION_BAND_3": {"key": "XXXXX", "default": "ZZZZZ"},
"SATURATION_BAND_4": {"key": "XXXXX", "default": "ZZZZZ"},
"SATURATION_BAND_5": {"key": "XXXXX", "default": "ZZZZZ"},
"SATURATION_BAND_6": {"key": "XXXXX", "default": "ZZZZZ"},
"SATURATION_BAND_7": {"key": "XXXXX", "default": "ZZZZZ"},
"SATURATION_BAND_8": {"key": "XXXXX", "default": "ZZZZZ"},
"SATURATION_BAND_9": {"key": "XXXXX", "default": "ZZZZZ"},
"TRUNCATION_OLI": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MAXIMUM_BAND_1": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MINIMUM_BAND_1": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MAXIMUM_BAND_2": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MINIMUM_BAND_2": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MAXIMUM_BAND_3": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MINIMUM_BAND_3": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MAXIMUM_BAND_4": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MINIMUM_BAND_4": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MAXIMUM_BAND_5": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MINIMUM_BAND_5": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MAXIMUM_BAND_6": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MINIMUM_BAND_6": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MAXIMUM_BAND_7": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MINIMUM_BAND_7": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MAXIMUM_BAND_8": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MINIMUM_BAND_8": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MAXIMUM_BAND_9": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MINIMUM_BAND_9": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MAXIMUM_BAND_1": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MINIMUM_BAND_1": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MAXIMUM_BAND_2": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MINIMUM_BAND_2": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MAXIMUM_BAND_3": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MINIMUM_BAND_3": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MAXIMUM_BAND_4": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MINIMUM_BAND_4": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MAXIMUM_BAND_5": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MINIMUM_BAND_5": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MAXIMUM_BAND_6": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MINIMUM_BAND_6": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MAXIMUM_BAND_7": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MINIMUM_BAND_7": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MAXIMUM_BAND_8": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MINIMUM_BAND_8": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MAXIMUM_BAND_9": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MINIMUM_BAND_9": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MAX_BAND_1": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MIN_BAND_1": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MAX_BAND_2": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MIN_BAND_2": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MAX_BAND_3": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MIN_BAND_3": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MAX_BAND_4": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MIN_BAND_4": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MAX_BAND_5": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MIN_BAND_5": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MAX_BAND_6": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MIN_BAND_6": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MAX_BAND_7": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MIN_BAND_7": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MAX_BAND_8": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MIN_BAND_8": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MAX_BAND_9": {"key": "XXXXX", "default": "ZZZZZ"},
"QUANTIZE_CAL_MIN_BAND_9": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MULT_BAND_1": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MULT_BAND_2": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MULT_BAND_3": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MULT_BAND_4": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MULT_BAND_5": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MULT_BAND_6": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MULT_BAND_7": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MULT_BAND_8": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_MULT_BAND_9": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_ADD_BAND_1": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_ADD_BAND_2": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_ADD_BAND_3": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_ADD_BAND_4": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_ADD_BAND_5": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_ADD_BAND_6": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_ADD_BAND_7": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_ADD_BAND_8": {"key": "XXXXX", "default": "ZZZZZ"},
"RADIANCE_ADD_BAND_9": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MULT_BAND_1": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MULT_BAND_2": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MULT_BAND_3": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MULT_BAND_4": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MULT_BAND_5": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MULT_BAND_6": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MULT_BAND_7": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MULT_BAND_8": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_MULT_BAND_9": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_ADD_BAND_1": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_ADD_BAND_2": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_ADD_BAND_3": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_ADD_BAND_4": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_ADD_BAND_5": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_ADD_BAND_6": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_ADD_BAND_7": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_ADD_BAND_8": {"key": "XXXXX", "default": "ZZZZZ"},
"REFLECTANCE_ADD_BAND_9": {"key": "XXXXX", "default": "ZZZZZ"},
"MAP_PROJECTION": {"key": "XXXXX", "default": "ZZZZZ"},
"DATUM": {"key": "XXXXX", "default": "ZZZZZ"},
"ELLIPSOID": {"key": "XXXXX", "default": "ZZZZZ"},
"UTM_ZONE": {"key": "XXXXX", "default": "ZZZZZ"},
"GRID_CELL_SIZE_PANCHROMATIC": {"key": "XXXXX", "default": "ZZZZZ"},
"GRID_CELL_SIZE_REFLECTIVE": {"key": "XXXXX", "default": "ZZZZZ"},
"ORIENTATION": {"key": "XXXXX", "default": "ZZZZZ"},
"RESAMPLING_OPTION": {"key": "XXXXX", "default": "ZZZZZ"},
}
| 1.515625
| 2
|
portfolio/migrations/0008_auto_20190225_1927.py
|
sarath196/Portfolio-Web-Theme
| 3
|
12780766
|
# Generated by Django 2.0 on 2019-02-25 19:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0007_auto_20190225_1849'),
]
operations = [
migrations.AddField(
model_name='portfoliopage',
name='git_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='portfoliopage',
name='linkedin_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='portfoliopage',
name='source',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| 1.53125
| 2
|
oct_26_2019/myproject.py
|
SoundBoySelecta/BDT_1160_homework
| 0
|
12780767
|
<filename>oct_26_2019/myproject.py
import sys
import pandas as pd
import matplotlib.pyplot as plt
import os
file_name = sys.argv[1]
#print(file_name)
df = pd.read_csv(file_name,
sep=',',
header=None)
df.columns = ['id', 'diagnosis', 'mean radius', 'mean texture', 'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness', 'mean concavity',
'mean concave points', 'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error', 'perimeter error', 'area error',
'smoothness error', 'compactness error', 'concavity error',
'concave points error', 'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture', 'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness', 'worst concavity',
'worst concave points', 'worst symmetry', 'worst fractal dimension']
df.drop('id', axis=1, inplace=True)
os.makedirs('plots', exist_ok=True)
# Plotting line plot
df_mean_area = df['mean area']
plt.plot(df_mean_area)
plt.title('Linear Plot')
plt.xlabel('Instance')
plt.ylabel('Mean Area')
plt.savefig('plots/mean_area_by_each_instance_plot.png', dpi=300)
plt.show()
# Plotting scatterplot
df_mean_area = df['mean area']
df_mean_symmetry = df['mean symmetry']
plt.scatter(df_mean_area , df_mean_symmetry, color='b')
plt.title('Mean Area vs Mean Symmetry')
plt.xlabel('Mean Area')
plt.ylabel('Mean Symmetry')
plt.savefig('plots/mean_area_vs_mean_symmetry.png', format='png')
plt.show()
plt.close()
| 2.84375
| 3
|
trainer/rename.py
|
Originofamonia/pylon
| 16
|
12780768
|
def rename(newname):
"""define the name of the function"""
def decorator(f):
f.__name__ = newname
return f
return decorator
| 3.640625
| 4
|
Mundo1/aula7c.py
|
OliveiraVasconcelos/Python-CursoemVideo
| 0
|
12780769
|
<gh_stars>0
### crie um algoritmo que leia um numero e mostre seu dobro, triplo e raizq
num = int(input('Digite um valor: '))
dobro = num * 2
triplo = num * 3
raizq = num ** (1/2)
print('{} foi recebido, o dobro de num é {}, o triplo é {} e a raiz quadrada é {}'.format(num, dobro, triplo, raizq))
| 3.90625
| 4
|
katachi/tools/assign_landmarks.py
|
WhoIsJack/data-driven-analysis-lateralline
| 3
|
12780770
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 11:04:07 2017
@author: <NAME> @ Gilmour group @ EMBL Heidelberg
@descript: Generates a sparse 3D point cloud representation for each cell in
a segmented stack based on the intensity distribution of a second
stack within the cell's segmentation region.
"""
#------------------------------------------------------------------------------
# IMPORTS
# External
from __future__ import division
import os, pickle
import numpy as np
import scipy.ndimage as ndi
import matplotlib.pyplot as plt
from tifffile import imread
# Internal
from katachi.utilities.ISLA import isla
#------------------------------------------------------------------------------
# FUNCTION: ASSIGN LANDMARKS TO EACH CELL OF A SEGMENTATION
def assign_landmarks(fpath_seg, fpath_int, num_LMs, save_centroids=False,
fpath_out=None, show_cells=None, verbose=False,
global_prep_func=None, global_prep_params=None,
local_prep_func=None, local_prep_params=None,
landmark_func='default', landmark_func_params=None):
"""Generate a sparse 3D point cloud representation of a segmented cell.
For each segmented object (cell) specified by a labeled segmentation stack,
a point cloud is generated that represents the intensity distribution of a
second stack in that cell's region.
Custom preprocessing functions can be applied both to the global image and
locally to each cell crop-out. The function used for determining landmark
coordinates may also be specified by the user. By default, no preprocessing
is done and landmarks are extracted using ISLA (for more information, see
`katachi.utilities.ISLA`).
The results are written as a .npy file containing an array of shape (N,M,3)
where N is the number of cells, M the number of landmarks per cell and 3
are the three spatial dimensions of the input stack. The origin of the
coordinate system is set to the centroid of the cell's segmentation region
and the axes are scaled according to the specified pixel resolution.
Parameters
----------
fpath_seg : string
The path (either local from cwd or global) to a tif file containing
the single-cell segmentation stack. A point cloud will be generated for
each labeled object in this segmentation.
fpath_int : string
The path (either local from cwd or global) to a tif file containing
intensity values. This stack must have the same shape as the fpath_seg
stack. The point clouds generated for each cell in the segmentation
represent the intensity distribution of this stack within that cell.
num_LMs : int
The number of landmarks to extract for each cell.
save_centroids : bool, optional, default False
If True, the centroid coordinates (in the image space) of each labeled
object will be added to the stack_metadata file (with key "centroids").
fpath_out : string, optional, default None
A path (either local from cwd or global) specifying a file to which the
results should be written. If None, fpath_int is used but with the
suffix `_LMs.npy` instead of the original suffix `.tif`.
show_cells : int, optional, default None
If an int `i` is given, a slice from each cell up to `i` is displayed
with an overlay of the extracted landmarks. Once `i` cells have been
processed, the rest of the cells is processed without displaying.
verbose : bool, optional, default False
If True, more information is printed.
global_prep_func : callable, optional, default None
A user-defined function for custom preprocessing. This function will
be applied to the entire intensity stack prior to landmark extraction.
The callable must accept the intensity stack and the segmentation stack
as the first two arguments and it may accept a third argument that
packages additional parameters (see just below). The function must
return another valid intensity stack that is of the same shape as the
original.
global_prep_params : any type, optional, default None
Third argument (after intensity stack and segmentation stack) to the
global_prep_func (see just above). This could be anything, but will
usually be a list of additional parameters for the function.
local_prep_func : callable, optional, default None
A user-defined function for custom preprocessing. This function will
be applied to the bounding box crop of each cell just prior to landmark
extraction. The callable must accept the crops of the intensity stack
and the segmentation stack as well as the cell index as the first three
arguments. It may accept a fourth argument that packages additional
parameters (see just below). The function must return another valid
cropped intensity stack that is of the same shape as the original.
local_prep_params : any type, optional, default None
4th argument (after intensity crop, segmentation crop and cell index)
to the local_prep_func (see just above). This could be anything, but
will usually be a list of additional parameters for the function.
landmark_func : callable or 'default', optional, default 'default'
Function used to extract landmark coordinates from the bounding box
crop of each cell after the region around the cell (outside the cell's
segmentation region) have been set to zero. The function must accept
the (masked) intensity crop and num_LMs as its first two arguments and
may accept a third argument that packages additional parameters (see
just below). The function must return an array of shape (M,3), where M
is the number of landmarks and 3 are the three spatial dimensions of
the input cell.
landmark_func_params : any type, optional, default None
Third argument (after masked intensity crop and num_LMs) to the
landmark_func (see just above). This could be anything, but will
usually be a list of additional parameters for the function.
"""
#--------------------------------------------------------------------------
### Load data
if verbose: print "Loading stacks..."
# Try loading the segmentation stack
try:
img_seg = imread(fpath_seg)
except:
print "Attempting to load segmentation stack failed with this error:"
raise
# Check dimensionality
if not img_seg.ndim == 3:
raise IOError("Expected a 3D segmentation stack, got " +
str(img_seg.ndim) + "D instead.")
# Try loading the intensity stack
try:
img_int = imread(fpath_int)
except:
print "Attempting to load intensity stack failed with this error:"
raise
# Check dimensionality
if not img_int.ndim == 3:
raise IOError("Expected a 3D intensity stack, got " +
str(img_int.ndim) + "D instead.")
# Double-check that show_cells is an integer
if show_cells is not None and type(show_cells) != int:
raise IOError("Argument show_cells expects int or None, got " +
str(type(show_cells)))
# Load the metadata and get the resolution from it
try:
dirpath, fname = os.path.split(fpath_int)
fpath_meta = os.path.join(dirpath, fname[:10]+"_stack_metadata.pkl")
with open(fpath_meta, 'rb') as metafile:
meta_dict = pickle.load(metafile)
res = meta_dict['resolution']
except:
print "Getting resolution from metadata failed with this error:"
raise
#--------------------------------------------------------------------------
### Apply a user-defined global preprocessing function
if global_prep_func is not None:
if verbose: print "Applying global preprocessing function..."
if global_prep_params:
img_int = global_prep_func(img_int, img_seg, global_prep_params)
else:
img_int = global_prep_func(img_int, img_seg)
#--------------------------------------------------------------------------
### Run landmark extraction for each cell
if verbose: print "Performing landmark assignments..."
# Get bounding boxes
bboxes = ndi.find_objects(img_seg)
# Prep
cell_indices = np.unique(img_seg)[1:]
landmarks = np.zeros((cell_indices.size, num_LMs, 3))
if save_centroids: centroids = np.zeros((cell_indices.size, 3))
# For each cell...
for c, cell_idx in enumerate(cell_indices):
# Crop the cell
cell_int = np.copy(img_int[bboxes[c][0], bboxes[c][1], bboxes[c][2]])
cell_seg = img_seg[bboxes[c][0], bboxes[c][1], bboxes[c][2]]
# Apply a user-defined local preprocessing function
if local_prep_func is not None:
if local_prep_params:
cell_int = local_prep_func(cell_int, cell_seg, cell_idx,
local_prep_params)
else:
cell_int = local_prep_func(cell_int, cell_seg, cell_idx)
# Mask the signal
cell_int[cell_seg!=cell_idx] = 0
# Assign landmarks
if landmark_func == 'default':
landmarks[c,:,:] = isla(cell_int, num_LMs, seed=42)
elif landmark_func_params is None:
landmarks[c,:,:] = landmark_func(cell_int, num_LMs)
else:
landmarks[c,:,:] = landmark_func(cell_int, num_LMs,
params=landmark_func_params)
# Show the first few cells
if show_cells is not None:
plt.imshow(cell_int[cell_int.shape[0]//2,:,:],
interpolation='none', cmap='gray')
lms_show = landmarks[c,landmarks[c,:,0]==cell_int.shape[0]//2,:]
plt.scatter(lms_show[:,2], lms_show[:,1],
c='red', edgecolor='', marker='s', s=5)
plt.title("Cell "+str(c+1))
plt.axis('off')
plt.show()
if c >= show_cells-1:
show_cells = None
# Center origin on segmentation object's centroid
# Note: `centroid==center_of_mass` for uniformly dense bodies.
centroid = np.array(ndi.center_of_mass(cell_seg==cell_idx))
landmarks[c,:,:] = landmarks[c,:,:] - centroid
# Keep the centroids (relative to image space) as metadata
if save_centroids:
centroids[c,:] = centroid + np.array( [bboxes[c][d].start
for d in range(3)] )
# Scale the axes to the stack's pixel resolution
landmarks = landmarks * np.array(res)
if save_centroids: centroids = centroids * np.array(res)
#--------------------------------------------------------------------------
### Save, report and return
if verbose: print "Saving result..."
# Save the landmarks
if fpath_out is None:
np.save(fpath_int[:-4]+"_LMs", landmarks)
else:
np.save(fpath_out, landmarks)
# Save the centroids to the metadata
if save_centroids:
dirpath, fname = os.path.split(fpath_int)
fpath_meta = os.path.join(dirpath, fname[:10]+"_stack_metadata.pkl")
with open(fpath_meta, 'rb') as metafile:
meta_dict = pickle.load(metafile)
meta_dict["centroids"] = centroids
with open(fpath_meta, 'wb') as metafile:
pickle.dump(meta_dict, metafile, pickle.HIGHEST_PROTOCOL)
# Report and return
if verbose: print "Processing complete!"
return
#------------------------------------------------------------------------------
| 2.234375
| 2
|
livereload/cli.py
|
andreycizov/python-livereload
| 0
|
12780771
|
<filename>livereload/cli.py
import argparse
import tornado.log
from livereload.server import Server
parser = argparse.ArgumentParser(description='Start a `livereload` server')
parser.add_argument(
'--host',
help='Hostname to run `livereload` server on',
type=str,
default='127.0.0.1'
)
parser.add_argument(
'-p', '--port',
help='Port to run `livereload` server on',
type=int,
default=35729
)
parser.add_argument(
'--port-override',
dest='override_port',
help='Port at which `livereload` appears to the client',
type=int,
default=None
)
parser.add_argument(
'directory',
help='Directory to serve files from',
type=str,
default='.',
nargs='?'
)
parser.add_argument(
'-t', '--target',
help='File or directory to watch for changes',
type=str,
)
parser.add_argument(
'-w', '--wait',
help='Time delay in seconds before reloading',
type=float,
default=0.0
)
parser.add_argument(
'-o', '--open-url-delay',
help='If set, triggers browser opening <D> seconds after starting',
type=float
)
parser.add_argument(
'-d', '--debug',
help='Enable Tornado pretty logging',
action='store_true'
)
def main(argv=None):
args = parser.parse_args()
if args.debug:
tornado.log.enable_pretty_logging()
# Create a new application
server = Server()
server.watcher.watch(args.target or args.directory, delay=args.wait)
server.serve(host=args.host, port=args.port, root=args.directory,
open_url_delay=args.open_url_delay,
override_port=args.override_port)
| 2.546875
| 3
|
panda/migrations/0001_initial.py
|
higs4281/panda
| 72
|
12780772
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table('panda_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=256, db_index=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
))
db.send_create_signal('panda', ['Category'])
# Adding model 'TaskStatus'
db.create_table('panda_taskstatus', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('status', self.gf('django.db.models.fields.CharField')(default='PENDING', max_length=50)),
('message', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('start', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('end', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('traceback', self.gf('django.db.models.fields.TextField')(default=None, null=True, blank=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name='tasks', null=True, to=orm['auth.User'])),
))
db.send_create_signal('panda', ['TaskStatus'])
# Adding model 'Dataset'
db.create_table('panda_dataset', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=256, db_index=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=256)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('initial_upload', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='initial_upload_for', null=True, to=orm['panda.DataUpload'])),
('columns', self.gf('panda.fields.JSONField')(default=None, null=True)),
('sample_data', self.gf('panda.fields.JSONField')(default=None, null=True)),
('row_count', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('current_task', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['panda.TaskStatus'], null=True, blank=True)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name='datasets', to=orm['auth.User'])),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True, blank=True)),
('last_modification', self.gf('django.db.models.fields.TextField')(default=None, null=True, blank=True)),
('last_modified_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
))
db.send_create_signal('panda', ['Dataset'])
# Adding M2M table for field categories on 'Dataset'
db.create_table('panda_dataset_categories', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('dataset', models.ForeignKey(orm['panda.dataset'], null=False)),
('category', models.ForeignKey(orm['panda.category'], null=False))
))
db.create_unique('panda_dataset_categories', ['dataset_id', 'category_id'])
# Adding model 'DataUpload'
db.create_table('panda_dataupload', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('filename', self.gf('django.db.models.fields.CharField')(max_length=256)),
('original_filename', self.gf('django.db.models.fields.CharField')(max_length=256)),
('size', self.gf('django.db.models.fields.IntegerField')()),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('creation_date', self.gf('django.db.models.fields.DateTimeField')()),
('dataset', self.gf('django.db.models.fields.related.ForeignKey')(related_name='data_uploads', null=True, to=orm['panda.Dataset'])),
('data_type', self.gf('django.db.models.fields.CharField')(max_length=4, null=True, blank=True)),
('encoding', self.gf('django.db.models.fields.CharField')(default='utf-8', max_length=32)),
('dialect', self.gf('panda.fields.JSONField')(null=True)),
('columns', self.gf('panda.fields.JSONField')(null=True)),
('sample_data', self.gf('panda.fields.JSONField')(null=True)),
('imported', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('panda', ['DataUpload'])
# Adding model 'Export'
db.create_table('panda_export', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('filename', self.gf('django.db.models.fields.CharField')(max_length=256)),
('original_filename', self.gf('django.db.models.fields.CharField')(max_length=256)),
('size', self.gf('django.db.models.fields.IntegerField')()),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('creation_date', self.gf('django.db.models.fields.DateTimeField')()),
('dataset', self.gf('django.db.models.fields.related.ForeignKey')(related_name='exports', to=orm['panda.Dataset'])),
))
db.send_create_signal('panda', ['Export'])
# Adding model 'Notification'
db.create_table('panda_notification', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('recipient', self.gf('django.db.models.fields.related.ForeignKey')(related_name='notifications', to=orm['auth.User'])),
('message', self.gf('django.db.models.fields.TextField')()),
('type', self.gf('django.db.models.fields.CharField')(default='Info', max_length=16)),
('sent_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('read_at', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True, blank=True)),
('related_task', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['panda.TaskStatus'], null=True)),
('related_dataset', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['panda.Dataset'], null=True)),
))
db.send_create_signal('panda', ['Notification'])
# Adding model 'RelatedUpload'
db.create_table('panda_relatedupload', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('filename', self.gf('django.db.models.fields.CharField')(max_length=256)),
('original_filename', self.gf('django.db.models.fields.CharField')(max_length=256)),
('size', self.gf('django.db.models.fields.IntegerField')()),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('creation_date', self.gf('django.db.models.fields.DateTimeField')()),
('dataset', self.gf('django.db.models.fields.related.ForeignKey')(related_name='related_uploads', to=orm['panda.Dataset'])),
))
db.send_create_signal('panda', ['RelatedUpload'])
# Adding model 'UserProfile'
db.create_table('panda_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
('activation_key', self.gf('django.db.models.fields.CharField')(max_length=40)),
))
db.send_create_signal('panda', ['UserProfile'])
def backwards(self, orm):
# Deleting model 'Category'
db.delete_table('panda_category')
# Deleting model 'TaskStatus'
db.delete_table('panda_taskstatus')
# Deleting model 'Dataset'
db.delete_table('panda_dataset')
# Removing M2M table for field categories on 'Dataset'
db.delete_table('panda_dataset_categories')
# Deleting model 'DataUpload'
db.delete_table('panda_dataupload')
# Deleting model 'Export'
db.delete_table('panda_export')
# Deleting model 'Notification'
db.delete_table('panda_notification')
# Deleting model 'RelatedUpload'
db.delete_table('panda_relatedupload')
# Deleting model 'UserProfile'
db.delete_table('panda_userprofile')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'panda.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256', 'db_index': 'True'})
},
'panda.dataset': {
'Meta': {'ordering': "['-creation_date']", 'object_name': 'Dataset'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['panda.Category']"}),
'columns': ('panda.fields.JSONField', [], {'default': 'None', 'null': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': "orm['auth.User']"}),
'current_task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['panda.TaskStatus']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_upload': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'initial_upload_for'", 'null': 'True', 'to': "orm['panda.DataUpload']"}),
'last_modification': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'row_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sample_data': ('panda.fields.JSONField', [], {'default': 'None', 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256', 'db_index': 'True'})
},
'panda.dataupload': {
'Meta': {'ordering': "['creation_date']", 'object_name': 'DataUpload'},
'columns': ('panda.fields.JSONField', [], {'null': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'data_type': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data_uploads'", 'null': 'True', 'to': "orm['panda.Dataset']"}),
'dialect': ('panda.fields.JSONField', [], {'null': 'True'}),
'encoding': ('django.db.models.fields.CharField', [], {'default': "'utf-8'", 'max_length': '32'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'sample_data': ('panda.fields.JSONField', [], {'null': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'panda.export': {
'Meta': {'ordering': "['creation_date']", 'object_name': 'Export'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exports'", 'to': "orm['panda.Dataset']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'panda.notification': {
'Meta': {'ordering': "['-sent_at']", 'object_name': 'Notification'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'read_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notifications'", 'to': "orm['auth.User']"}),
'related_dataset': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['panda.Dataset']", 'null': 'True'}),
'related_task': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['panda.TaskStatus']", 'null': 'True'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Info'", 'max_length': '16'})
},
'panda.relatedupload': {
'Meta': {'ordering': "['creation_date']", 'object_name': 'RelatedUpload'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_uploads'", 'to': "orm['panda.Dataset']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'panda.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'null': 'True', 'to': "orm['auth.User']"}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '50'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'traceback': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'panda.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['panda']
| 2.203125
| 2
|
Hybrid-P2P_ChatPy/server_users.py
|
davidcawork/uah-lrss_
| 0
|
12780773
|
<reponame>davidcawork/uah-lrss_<filename>Hybrid-P2P_ChatPy/server_users.py
#usr/bin/env python3
import socket
import sys
import pickle
import select
import os
#PROTOCOL MSGs
P2P_CHAT_PY_PROTOCOL_HI = 'ChatPy_Hi'
P2P_CHAT_PY_PROTOCOL_HI_ACK = 'ChatPy_Hi_Ack'
P2P_CHAT_PY_PROTOCOL_BYE = 'ChatPy_Bye'
P2P_CHAT_PY_PROTOCOL_BYE_ACK = 'ChatPy_Bye_Ack'
P2P_CHAT_PY_PROTOCOL_UPDATE = 'ChatPy_Update'
P2P_CHAT_PY_PROTOCOL_UPDATE_ACK = 'ChatPy_Update_Ack'
P2P_CHAT_PY_PROTOCOL_CONN = 'ChatPy_Conn'
P2P_CHAT_PY_PROTOCOL_CONN_ACK = 'ChatPy_Conn_Ack'
P2P_CHAT_PY_PROTOCOL_DIS = 'ChatPy_Dis'
P2P_CHAT_PY_PROTOCOL_DIS_ACK = 'ChatPy_Dis_Ack'
P2P_CHAT_PY_PROTOCOL_MSG = 'ChatPy_Msg'
#To print all clients connected
def print_conn(sock_addr_port):
os.system('clear')
for clients in sock_addr_port:
print('Client: ' + str(clients[1]) + '| Port: '+str(clients[2]))
#To remove from list when some client goes out
def remove_client_from_list(sock_addr_port,sock_to_remove):
for clients in sock_addr_port:
if clients[0] is sock_to_remove:
sock_addr_port.remove(clients)
def getIpFromSocket(sock_addr_port,sock_to_rcv):
for clients in sock_addr_port:
if clients[0] is sock_to_rcv:
return clients[1]
if __name__ == "__main__":
#Check argv's
if len(sys.argv) < 2:
print('Error: usage: ./' + sys.argv[0] + ' <Port>')
exit()
else:
port = int(sys.argv[1])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setblocking(0)
s.bind(('',port))
s.listen(5)
#Sockets to read
sockets_rd = [s]
#Just for track all conn
sock_addr_port = []
#Peer list
peer_list = []
id_peer = 1
while True:
try:
events_rd,events_wr,events_excp = select.select( sockets_rd,[],[])
except KeyboardInterrupt:
print('\n\n\nShutdown....')
for sock in sockets_rd:
sock.close()
sys.exit(0)
for event in events_rd:
if event == s:
#Add user into peers list
conn, addr = s.accept()
sock_addr_port.append([conn,addr[0],addr[1]])
conn.setblocking(0)
sockets_rd.append(conn)
else:
#Handle other conn
for sock_to_rcv in sockets_rd:
if sock_to_rcv != s and sock_to_rcv is event:
data = pickle.loads(sock_to_rcv.recv(4096))
if data:
if data[0] == P2P_CHAT_PY_PROTOCOL_HI:
#First add him to peer list
data[1].append(getIpFromSocket(sock_addr_port,sock_to_rcv))
data[1].append(id_peer)
peer_list.append(data[1])
#Second send him peer list
sock_to_rcv.sendall(pickle.dumps([P2P_CHAT_PY_PROTOCOL_HI_ACK,peer_list,id_peer]))
id_peer += 1
elif data[0] == P2P_CHAT_PY_PROTOCOL_BYE:
#First remove the peer from the peer list
data[1].append(getIpFromSocket(sock_addr_port,sock_to_rcv))
data[1].append(data[2])
peer_list.remove(data[1])
#Second send to him bye_ack
sock_to_rcv.sendall(pickle.dumps([P2P_CHAT_PY_PROTOCOL_BYE_ACK, data[1]]))
sockets_rd.remove(sock_to_rcv)
remove_client_from_list(sock_addr_port,sock_to_rcv)
elif data[0] == P2P_CHAT_PY_PROTOCOL_UPDATE:
#Just update our peer with the peer list
sock_to_rcv.sendall(pickle.dumps([P2P_CHAT_PY_PROTOCOL_UPDATE_ACK,peer_list]))
else:
#Remove one Peer
sock_to_rcv.close()
sockets_rd.remove(sock_to_rcv)
remove_client_from_list(sock_addr_port,sock_to_rcv)
#Print al active conn
print_conn(sock_addr_port)
| 2.421875
| 2
|
klotski/image_recognition.py
|
Strivingperson/store
| 0
|
12780774
|
import requests
import json
import base64
import numpy as np
import matplotlib.pyplot as plt
import pickle
import imageio
def get_jsonstr(url):
url = "http://172.16.58.3:8089/api/problem?stuid=031804104"
response = requests.get(url)
jsonstr = json.loads(response.text)
return jsonstr
def split_image(img): # 输入为图像矩阵np
'''分割图像'''
imgs = []
for i in range(0,900,300):
for j in range(0,900,300):
imgs.append(img[i:i+300,j:j+300].tolist())
return (imgs) # 返回值是九块图像矩阵的列表
def encode_image(title_image,store_image):
'''图像编码为数字'''
current_table = [] # 图像对应的表数字编码
ans_type = list(range(1,10)) # 答案类型
for ls_title in title_image:
try:
pos_code = store_image.index(ls_title)+1
current_table.append(pos_code)
ans_type.remove(pos_code)
except:
current_table.append(0) # IndexError:空格匹配不到
return current_table,ans_type[0] # 返回表编码和答案类型
def main(json_image):
# 读取无框字符分割成9份后的图像列表
save_name = 'ls_img.pkl'
pkl_file = open(save_name, 'rb')
store_images = pickle.load(pkl_file)
pkl_file.close()
# 获取题给图像
bs64_img = base64.b64decode(json_image) # 图像是base64编码
np_img = imageio.imread(bs64_img)
title_image = split_image(np_img)
for ls_store in store_images: # 遍历存储的所有无框字符
count = 0
for ls_title in title_image: # 遍历题给图像块
if (np.array(ls_title) == 255).all() == True: # 被挖去的空白
continue # 跳过
if ls_title in ls_store: # 该图块在无框字符中
count += 1
else:
break
if count == 8: # 除空白块外都相同,则判就是该无框字符,对题给图块进行编码
current_table, ans_type = encode_image(title_image, ls_store)
return current_table,ans_type
if __name__ == "__main__":
# 读取无框字符分割成9份后的图像列表
save_name = 'ls_img.pkl'
pkl_file = open(save_name,'rb')
store_images = pickle.load(pkl_file)
pkl_file.close()
# 获取题给图像
url = "http://47.102.118.1:8089/api/problem?stuid=031804104"
response = requests.get(url)
jsonstr = json.loads(response.text)
bs64_img = base64.b64decode(jsonstr['img']) #图像是base64编码
np_img = imageio.imread(bs64_img)
title_image = split_image(np_img)
plt.imshow(np_img)
plt.show()
for ls_store in store_images: #遍历存储的所存储的无框字符
count = 0
for ls_title in title_image: #遍历题给图像块
if (np.array(ls_title) == 255).all() == True: # 被挖去的空白
continue # 跳过
if ls_title in ls_store: # 该图块在无框字符中
count += 1
else:
break
if count == 8: # 除空白块外都相同,则判就是该无框字符,对题给图块进行编码
current_table,ans_type = encode_image(title_image,ls_store)
print(current_table, ans_type)
ls = [331,332,333,334,335,336,337,338,339]
for i in range(9):
plt.subplot(ls[i])
plt.imshow(np.array(ls_store[i]))
plt.show()
for i in range(9):
plt.subplot(ls[i])
plt.imshow(np.array(title_image[i]))
plt.show()
break
| 2.953125
| 3
|
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch03_recursion/ex09_pascal_test.py
|
Kreijeck/learning
| 0
|
12780775
|
<gh_stars>0
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by <NAME>
import pytest
from ch03_recursion.solutions.ex09_pascal_triangle import calc_pascal_with_action
@pytest.mark.parametrize("n, expected",
[(1, [1]),
(2, [1, 1]),
(3, [1, 2, 1]),
(4, [1, 3, 3, 1]),
(5, [1, 4, 6, 4, 1]),
(6, [1, 5, 10, 10, 5, 1]),
(7, [1, 6, 15, 20, 15, 6, 1])])
def test_calc_pascal_with_action(n, expected):
assert calc_pascal_with_action(n, None) == expected
| 3.0625
| 3
|
setup.py
|
IPASC/IPASC_DataConversionTool
| 3
|
12780776
|
import setuptools
with open('README.md', 'r') as readme_file:
long_description = readme_file.read()
with open('requirements.txt', 'r') as requirements_file:
requirements = requirements_file.read().splitlines()
setuptools.setup(
name="ipasc_tool",
version="0.1.3",
author="International Photoacoustic Standardisation Consortium (IPASC)",
description="Standardised Data Access Tool of IPASC",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
packages=setuptools.find_packages(include=["ipasc_tool", "ipasc_tool.*"]),
install_requires=requirements,
python_requires=">=3.7"
)
| 1.5625
| 2
|
pyxing/res.py
|
sharebook-kr/pyxing
| 12
|
12780777
|
<reponame>sharebook-kr/pyxing
# RES file parser
'''
{'trcode': "CSPAT00600",
'inblock': [
{'CSPAT00600InBlock1': [ ]}
],
'outblock': [
{'CSPAT00600OutBlock1': [ ]}
{'CSPAT00600OutBlock2': [ ]}
}
'''
# res 파일 정보 라인 파싱 함수
def parse_info(data):
tokens = data.split(',')
return tokens[2].strip()
# InBlock/OutBlock 파싱 함수
def parse_block(data):
# block 코드, 타입
block_info = data[0]
tokens = block_info.split(",")
block_code, block_type = tokens[0], tokens[-1][:-1]
# block fields
field_codes = []
fields = data[2:]
for line in fields:
if len(line) > 0:
field_code = line.split(',')[1].strip()
field_codes.append(field_code)
ret_data = {}
ret_data[block_code] = field_codes
return block_type, ret_data
def parse_res(lines):
lines = [line.strip() for line in lines]
info_index = [i for i,x in enumerate(lines) if x.startswith((".Func", ".Feed"))][0]
begin_indices = [i-1 for i,x in enumerate(lines) if x == "begin"]
end_indices = [i for i,x in enumerate(lines) if x == "end"]
block_indices = zip(begin_indices, end_indices)
ret_data = {"trcode": None, "inblock": [], "outblock": []}
# TR Code
tr_code = parse_info(lines[info_index])
ret_data["trcode"] = tr_code
# Block
for start, end in block_indices:
block_type, block_data= parse_block(lines[start:end])
if block_type == "input":
ret_data["inblock"].append(block_data)
else:
ret_data["outblock"].append(block_data)
return ret_data
if __name__ == "__main__":
# TR(t8340)
f = open("c:/eBEST/xingAPI/Res/t8430.res", encoding="euc-kr")
lines = f.readlines()
f.close()
# f = open("c:/eBEST/xingAPI/Res/CSPAT00600.res", encoding="euc-kr")
# lines = f.readlines()
# f.close()
# Real
#f = open("c:/eBEST/xingAPI/Res/NWS.res", encoding="euc-kr")
#lines = f.readlines()
#f.close()
import pprint
data = parse_res(lines)
pprint.pprint(data)
| 2.609375
| 3
|
Scripts/simulation/retail/retail_elements.py
|
velocist/TS4CheatsInfo
| 0
|
12780778
|
<reponame>velocist/TS4CheatsInfo
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\retail\retail_elements.py
# Compiled at: 2016-03-08 02:17:38
# Size of source mod 2**32: 6140 bytes
from interactions import ParticipantTypeSingleSim
from interactions.utils.interaction_elements import XevtTriggeredElement
from retail.retail_utils import RetailUtils
from sims4.tuning.tunable import TunableEnumEntry, AutoFactoryInit, TunableVariant, TunableRange, TunableList, HasTunableSingletonFactory
import sims4.log
logger = sims4.log.Logger('Retail', default_owner='trevor')
class RetailCustomerAdjustBrowseTime(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'time_multiplier': TunableRange(description='\n The remaining time the customer has to browse will be multiplied by\n this number. A value of 2.0 will double the remaining time, causing\n the customer to spend more time browsing. A value of 0.5 will cut\n the remaining browse time in half, causing the customer to move on\n to the next state sooner. A value of 0 will instantly push the\n customer to go to the next state. If the customer is not currently\n in the browse state, this element will do nothing.\n ',
tunable_type=float,
default=1,
minimum=0)}
def apply_action(self, sim, situation):
situation.adjust_browse_time(self.time_multiplier)
class RetailCustomerAdjustTotalShopTime(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'time_multiplier': TunableRange(description='\n The remaining time the customer has to shop will be multiplied by\n this number. A value of 2.0 will double the remaining time, causing\n the customer to shop more. A value of 0.5 will cut the remaining\n browse time in half, causing the customer to shop less. A value of\n 0 will cause the customer to leave.\n ',
tunable_type=float,
default=1,
minimum=0)}
def apply_action(self, sim, situation):
situation.adjust_total_shop_time(self.time_multiplier)
class RetailCustomerAdjustPriceRange(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'min_price_adjustment_multiplier':TunableRange(description='\n The amount to multiply the minimum price range for this customer.\n ',
tunable_type=float,
default=1,
minimum=0),
'max_price_adjustment_multiplier':TunableRange(description='\n The amount to multiply the maximum price range for this customer.\n ',
tunable_type=float,
default=1,
minimum=0)}
def apply_action(self, sim, situation):
situation.adjust_price_range(min_multiplier=(self.min_price_adjustment_multiplier), max_multiplier=(self.max_price_adjustment_multiplier))
class RetailCustomerAction(XevtTriggeredElement):
FACTORY_TUNABLES = {'customer':TunableEnumEntry(description='\n The customer participant to which the action is applied.\n ',
tunable_type=ParticipantTypeSingleSim,
default=ParticipantTypeSingleSim.TargetSim),
'actions':TunableList(description='\n The actions to apply to the customer.\n ',
tunable=TunableVariant(description='\n The action to apply to the customer.\n ',
adjust_browse_time=RetailCustomerAdjustBrowseTime.TunableFactory(description="\n Change the browse time of the customer by some multiple of the\n remaining browse time. This does nothing if the customer isn't\n already browsing. (i.e. loitering customers won't be affected)\n "),
adjust_total_shop_time=RetailCustomerAdjustTotalShopTime.TunableFactory(description='\n Change the total shop time of the customer by some multiple of\n the remaining shop time.\n '),
adjust_min_max_price_range=RetailCustomerAdjustPriceRange.TunableFactory(description='\n Change the min and/or max price range of this customer.\n ')))}
def _do_behavior(self):
customer = self.interaction.get_participant(self.customer)
if customer is None:
logger.error('Got a None customer trying to run a RetailCustomerAction element.')
return False
situation = RetailUtils.get_retail_customer_situation_from_sim(customer)
if situation is None:
logger.warn("Trying to run a customer action on a sim that isn't running a retail situation.")
return False
for action in self.actions:
action.apply_action(customer, situation)
| 1.945313
| 2
|
views/login.py
|
vug/personalwebapp
| 0
|
12780779
|
<reponame>vug/personalwebapp
"""
This Blueprint gives the ability of user logins, and login_required functionality using Flask-Login extension.
"""
from flask import Blueprint, render_template, request, flash, redirect, url_for
from flask_login import login_required, logout_user, login_user
from extensions import login_manager
from models import User
login = Blueprint('login', __name__)
login_manager.login_view = 'login.login_page' # redirect to this when arrived a login_required view without logged in
@login_manager.user_loader
def load_user(user_id):
"""Get the User object given the user_id stored in the session.
This is a callback function to reload the user object from the user ID stored in the session.
:rtype: User
:return: A User if user_id exists, else None."""
return User.query.get(int(user_id))
@login.route('/login', methods=['GET', 'POST'])
def login_page():
if request.method == 'GET':
return render_template('login.html')
elif request.method == 'POST':
email = request.form['email']
password = request.form['password']
user = User.query.filter_by(email=email).first()
if user is not None and user.password == password:
login_user(user, remember=True)
fullname = user.fullname
redirect_url = request.args.get('next')
html = 'Logged in as email: {}, fullname: {}<br><a href="/">Home</a> '.format(email, fullname)
if redirect_url:
html += '<a href="{}">Redirect</a>'.format(redirect_url)
return html
else:
flash('Username or Password is invalid', 'error')
return redirect(url_for('login.login_page'))
@login.route("/logout")
def logout_page():
logout_user()
return render_template('logout.html')
| 2.703125
| 3
|
aggcat/tests/test_parser.py
|
djedi/python-aggcat
| 1
|
12780780
|
<reponame>djedi/python-aggcat<gh_stars>1-10
from __future__ import absolute_import
from ..parser import Objectify
class TestParser(object):
"""Test XML Objectification"""
@classmethod
def setup_class(self):
self.o = None
with open('aggcat/tests/data/sample_xml.xml', 'r') as f:
self.o = Objectify(f.read()).get_object()
def test_lists(self):
"""Parser Test: Lists object added are correct"""
assert hasattr(self.o, '_list')
assert isinstance(self.o._list, list) == True
assert len(self.o) == 2
assert len(self.o[0].ingredients) == 2
assert len(self.o[1].ingredients) == 3
def test_attributes(self):
"""Parser Test: object attributes created have correct value"""
assert hasattr(self.o[0], 'name')
assert hasattr(self.o[0], 'ingredients')
assert hasattr(self.o[0], 'cook_time')
assert self.o[0].name == 'Fried Pickles'
assert self.o[0].ingredients[0].name == 'Flour'
assert self.o[1].name == 'Smoked Bacon'
assert self.o[1].ingredients[0].name == 'Bacon'
| 2.78125
| 3
|
quex/engine/analyzer/door_id_address_label.py
|
smmckay/quex-mirror
| 0
|
12780781
|
import quex.engine.state_machine.index as sm_index
from quex.engine.misc.tools import print_callstack, \
TypedSet
from quex.blackboard import Lng
from quex.constants import E_IncidenceIDs, \
E_StateIndices, \
E_DoorIdIndex
from collections import namedtuple
import itertools
#______________________________________________________________________________
#
# Address:
#
# Numeric representation of a 'goto target'. Using an address a variable may
# contain the information of what target to go, and the goto is then executed
# by a code fragment as
#
# switch( address_variable ) {
# ...
# case 0x4711: goto _4711;
# ...
# }
#______________________________________________________________________________
#
# TransitionID:
#
# Identifies a transition from one source to target state. There may be
# multiple transitions for the same source-target pair. Each one identified by
# an additional 'trigger_id'. TransitionIDs are connected with OpList-s
# at entry into a state; But
#
# n 1
# TransitionID <--------------> OpList
#
# That is, there may be multiple TransitionID-s with the same OpList.
# TransitionID-s are useful during the construction of entries.
#______________________________________________________________________________
#______________________________________________________________________________
#
# DoorID:
#
# Marks an entrance into a 'Processor', an FSM_State for example. A
# Processor can have multiple entries, each entry has a different DoorID. A
# DoorID identifies distinctly a OpList to be executed upon entry.
# No two OpList-s
# are the same except that their DoorID is the same.
#
#______________________________________________________________________________
class DoorID(namedtuple("DoorID_tuple", ("state_index", "door_index", "related_address"))):
def __new__(self, StateIndex, DoorIndex, dial_db=None):
assert isinstance(StateIndex, (int, long)) or StateIndex in E_StateIndices or StateIndex == E_IncidenceIDs.MATCH_FAILURE
assert isinstance(dial_db, DialDB)
# 'DoorIndex is None' --> right after the entry commands (targetted after reload).
assert isinstance(DoorIndex, (int, long)) or DoorIndex is None or DoorIndex in E_DoorIdIndex, "%s" % DoorIndex
# If the DoorID object already exists, than do not generate a second one.
result = dial_db.find_door_id(StateIndex, DoorIndex)
if result is not None: return result
# Any created DoorID must be properly registered.
address = dial_db.new_address()
result = super(DoorID, self).__new__(self, StateIndex, DoorIndex, address)
dial_db.register_door_id(result)
return result
@staticmethod
def drop_out(StateIndex, dial_db): return DoorID(E_StateIndices.DROP_OUT, StateIndex, dial_db=dial_db)
@staticmethod
def transition_block(StateIndex, dial_db): return DoorID(StateIndex, E_DoorIdIndex.TRANSITION_BLOCK, dial_db=dial_db)
@staticmethod
def incidence(IncidenceId, dial_db): return DoorID(dial_db.map_incidence_id_to_state_index(IncidenceId),
E_DoorIdIndex.ACCEPTANCE, dial_db=dial_db)
@staticmethod
def bipd_return(IncidenceId, dial_db): return DoorID(dial_db.map_incidence_id_to_state_index(IncidenceId), E_DoorIdIndex.BIPD_RETURN, dial_db=dial_db)
@staticmethod
def state_machine_entry(SM_Id, dial_db): return DoorID(SM_Id, E_DoorIdIndex.STATE_MACHINE_ENTRY, dial_db=dial_db)
@staticmethod
def global_state_router(dial_db): return DoorID(0L, E_DoorIdIndex.GLOBAL_STATE_ROUTER, dial_db=dial_db)
@staticmethod
def global_end_of_pre_context_check(dial_db): return DoorID(0L, E_DoorIdIndex.GLOBAL_END_OF_PRE_CONTEXT_CHECK, dial_db=dial_db)
@staticmethod
def global_reentry(dial_db): return DoorID(0L, E_DoorIdIndex.GLOBAL_REENTRY, dial_db=dial_db)
@staticmethod
def return_with_on_after_match(dial_db): return DoorID(0L, E_DoorIdIndex.RETURN_WITH_ON_AFTER_MATCH, dial_db=dial_db)
@staticmethod
def continue_with_on_after_match(dial_db): return DoorID(0L, E_DoorIdIndex.CONTINUE_WITH_ON_AFTER_MATCH, dial_db=dial_db)
@staticmethod
def continue_without_on_after_match(dial_db): return DoorID(0L, E_DoorIdIndex.CONTINUE_WITHOUT_ON_AFTER_MATCH, dial_db=dial_db)
def drop_out_f(self): return self.state_index == E_StateIndices.DROP_OUT
def last_acceptance_f(self): return self.door_index == E_DoorIdIndex.ACCEPTANCE \
and self.state_index == E_IncidenceIDs.VOID
def __repr__(self):
return "DoorID(s=%s, d=%s)" % (self.state_index, self.door_index)
#______________________________________________________________________________
# DialDB: DoorID, Address - Database
#
# A DoorID of a state entry is distincly linked to an 'address', i.e something
# a 'goto' can go to. The language's dictionary later relates an 'address' to
# a 'label' (i.e. something that the language uses as target of 'goto').
#
# 1 n 1 1
# StateIndex <-------> DoorID <---------> Address
#
# '---------------------._.-----------------------'
# '
# DialDB
#
# The DialDB maps from DoorID to Address and vice versa. Additionally, it
# keeps track of 'goto-ed' addresses. Thus, addresses that are never goto-ed,
# may not have to be instantiated.
#______________________________________________________________________________
# Globally Unique Incidence Id ________________________________________________
#
# (For ease: make it globally unique, not only mode-unique)
#
def new_incidence_id():
"""Incidence ids are used as DFA-ids => they MUST be aligned.
TODO: They are actually the same.
Replace 'state_machine_id' by 'incidence_id'.
"""
return sm_index.get_state_machine_id()
class DialDB(object):
__slots__ = ("__door_id_db", "__gotoed_address_set", "__routed_address_set", "__address_i", "__map_incidence_id_to_state_index" )
def __init__(self):
# Track all generated DoorID objects with 2d-dictionary that maps:
#
# StateIndex --> ( DoorSubIndex --> DoorID )
#
# Where the DoorID has the 'state_index' and 'door_index' equal to
# 'StateIndex' and 'DoorSubIndex'.
#
self.__door_id_db = {} # TypedDict(long, dict)
# Track addresses which are subject to 'goto' and those which need to
# be routed.
self.__gotoed_address_set = TypedSet(long)
self.__routed_address_set = TypedSet(long)
# Address counter to generate unique addresses
self.__address_i = itertools.count(start=0)
# Mapping from incidence_id to terminal state index
self.__map_incidence_id_to_state_index = {}
def __debug_address_generation(self, DoorId, Address, *SuspectAdrList):
"""Prints the callstack if an address of SuspectAdrList is generated.
"""
if Address not in SuspectAdrList:
return
print "#DoorID %s <-> Address %s" % (DoorId, Address)
print_callstack()
def __debug_incidence_generation(self, IncidenceId, StateIndex):
print "#Generated: %s -> state: %s" % (IncidenceId, StateIndex)
print_callstack()
def __debug_gotoed_address(self, Address, *SuspectAdrList):
if Address not in SuspectAdrList:
return
print "#Gotoed Address: %s" % Address
print_callstack()
def routed_address_set(self):
return self.__routed_address_set
def address_is_gotoed(self, Adr):
return Adr in self.__gotoed_address_set
def new_door_id(self, StateIndex=None):
"""Create a new entry in the database. First, a DoorID is generated.
Then a new address is linked to it. A list of existing
DoorID-s is maintained in '.__door_id_db'.
RETURNS: New DoorID
"""
state_index = StateIndex if StateIndex is not None \
else sm_index.get()
door_sub_index = self.max_door_sub_index(state_index) + 1
assert self.find_door_id(state_index, door_sub_index) is None
return DoorID(state_index, door_sub_index, dial_db=self)
def new_address(self):
return long(next(self.__address_i))
def max_door_sub_index(self, StateIndex):
"""RETURN: The greatest door sub index for a given StateIndex.
'-1' if not index has been used yet.
"""
result = - 1
sub_db = self.__door_id_db.get(StateIndex)
if sub_db is None: return result
for dsi in (x for x in sub_db.iterkeys() if isinstance(x, (int, long))):
if dsi > result: result = dsi
return result
def register_door_id(self, DoorId):
if False: # True/False activates debug messages
self.__debug_address_generation(DoorId, DoorId.related_address, 326)
sub_db = self.__door_id_db.get(DoorId.state_index)
if sub_db is None:
sub_db = {}
self.__door_id_db[DoorId.state_index] = sub_db
assert DoorId.door_index not in sub_db # Otherwise, it would not be new
sub_db[DoorId.door_index] = DoorId
def find_door_id(self, StateIndex, DoorSubIndex):
"""Try to get a DoorID from the set of existing DoorID-s. If a DoorID
with 'StateIndex' and 'DoorSubIndex' does not exist yet, then create it.
"""
sub_db = self.__door_id_db.get(StateIndex)
if sub_db is None: return None
door_id = sub_db.get(DoorSubIndex)
if door_id is None: return None
return door_id
def mark_address_as_gotoed(self, Address):
if False:
self.__debug_gotoed_address(Address, 39)
self.__gotoed_address_set.add(Address)
def mark_address_as_routed(self, Address):
self.__routed_address_set.add(Address)
# Any address which is subject to routing is 'gotoed', at least inside
# the router (e.g. "switch( ... ) ... case AdrX: goto LabelX; ...").
self.mark_address_as_gotoed(Address)
def map_incidence_id_to_state_index(self, IncidenceId):
assert isinstance(IncidenceId, (int, long)) \
or IncidenceId in E_IncidenceIDs, \
"Found <%s>" % IncidenceId
index = self.__map_incidence_id_to_state_index.get(IncidenceId)
if index is None:
index = sm_index.get()
self.__map_incidence_id_to_state_index[IncidenceId] = index
if False:
self.__debug_incidence_generation(IncidenceId, index)
return index
class DoorID_Scheme(tuple):
"""A TargetByStateKey maps from a index, i.e. a state_key to a particular
target (e.g. a DoorID). It is implemented as a tuple which can be
identified by the class 'TargetByStateKey'.
"""
def __new__(self, DoorID_List):
return tuple.__new__(self, DoorID_List)
@staticmethod
def concatinate(This, That):
door_id_list = list(This)
door_id_list.extend(list(That))
return DoorID_Scheme(door_id_list)
__routed_address_set = set([])
class IfDoorIdReferencedCode:
def __init__(self, DoorId, Code=None, dial_db=None):
"""LabelType, LabelTypeArg --> used to access __address_db.
Code = Code that is to be generated, supposed that the
address is actually referred (by goto).
(May be empty, so that that only the address is not printed.)
"""
assert isinstance(Code, list) or Code is None
self.address = DoorId.related_address
self.door_id = DoorId
if Code is None: self.code = [ Lng.LABEL(self.door_id) ]
else: self.code = Code
class IfDoorIdReferencedLabel(IfDoorIdReferencedCode):
def __init__(self, DoorId, dial_db):
IfDoorIdReferencedCode.__init__(self, DoorId, dial_db=dial_db)
def get_plain_strings(txt_list, dial_db):
"""-- Replaces unreferenced 'CodeIfLabelReferenced' objects by empty strings.
-- Replaces integers by indentation, i.e. '1' = 4 spaces.
"""
size = len(txt_list)
i = -1
while i < size - 1:
i += 1
elm = txt_list[i]
if type(elm) in [int, long]:
# Indentation: elm = number of indentations
txt_list[i] = " " * elm
elif not isinstance(elm, IfDoorIdReferencedCode):
# Text is left as it is
pass
elif dial_db.address_is_gotoed(elm.address):
# If an address is referenced, the correspondent code is inserted.
txt_list[i:i+1] = elm.code
# print "#elm.code:", elm.code
# txt_list = txt_list[:i] + elm.code + txt_list[i+1:]
size += len(elm.code) - 1
i -= 1
else:
# If an address is not referenced, the it is replaced by an empty string
txt_list[i] = ""
return txt_list
def __nice(SM_ID):
assert isinstance(SM_ID, (long, int))
return repr(SM_ID).replace("L", "").replace("'", "")
| 1.65625
| 2
|
software/utils/drc_utils/python/atlas_pressure_monitor.py
|
liangfok/oh-distro
| 92
|
12780782
|
import lcm
import drc as lcmdrc
import atlas as lcmatlas
import numpy as np
import time
class AtlasPressureCommander(object):
def __init__(self,
desired_rpm=5000,
max_psi=2500,
min_psi=1500):
self.desired_rpm = desired_rpm
self.max_psi = max_psi
self.min_psi = min_psi
self.lc = lcm.LCM()
self.desired_psi = self.min_psi
self.last_published_psi = None
def publish_pump_command(self):
print "Publishing new desired pump pressure: {:d} PSI".format(self.desired_psi)
msg = lcmatlas.pump_command_t()
msg.desired_psi = self.desired_psi
msg.desired_rpm = self.desired_rpm
msg.cmd_max = 60.0
msg.utime = int(time.time() * 1e6)
self.lc.publish('ATLAS_PUMP_COMMAND', msg.encode())
self.last_published_psi = self.desired_psi
def run(self):
while True:
time.sleep(0.01)
self.lc.handle()
class AutoPressureCommander(AtlasPressureCommander):
def __init__(self,
default_offset_psi=300,
eta=0.001,
publish_threshold_psi=100,
**kwargs):
super(AtlasPressureCommander, self).__init__(**kwargs)
self.default_offset_psi = default_offset_psi
self.eta = eta
self.publish_threshold_psi = publish_threshold_psi
self._setup_subscriptions()
def _setup_subscriptions(self):
self.lc.subscribe('ATLAS_STATE_EXTRA', self.handle_atlas_state_extra)
def handle_atlas_state_extra(self, channel, msg):
if isinstance(msg, str):
msg = lcmatlas.state_extra_t.decode(msg)
max_joint_psi = max(np.max(msg.psi_pos),
np.max(msg.psi_neg))
self.desired_psi = max(self.desired_psi * (1-self.eta),
max_joint_psi + self.default_offset_psi)
self.desired_psi = max(self.desired_psi, self.min_psi)
self.desired_psi = min(self.desired_psi, self.max_psi)
if self.desired_psi > self.last_published_psi + 5 or self.desired_psi - self.last_published_psi < -self.publish_threshold_psi:
self.publish_pump_command()
DEFAULT_PLAN_PRESSURE_MAP = {lcmdrc.controller_status_t.MANIPULATING: 2650,
lcmdrc.controller_status_t.WALKING: 2000}
DEFAULT_BEHAVIOR_PRESSURE_MAP = {'prep': 1500,
'stand': 2000,
'calibrate_null_bias': 1500,
'calibrate_electric_arms': 1500}
class PlanPressureCommander(AtlasPressureCommander):
def __init__(self,
plan_map=DEFAULT_PLAN_PRESSURE_MAP,
behavior_map=DEFAULT_BEHAVIOR_PRESSURE_MAP,
**kwargs):
super(PlanPressureCommander, self).__init__(**kwargs)
self.plan_map = plan_map
self.behavior_map = behavior_map
self._setup_subscriptions()
def _setup_subscriptions(self):
self.lc.subscribe('CONTROLLER_STATUS', self.handle_controller_status)
self.lc.subscribe('ATLAS_BEHAVIOR_COMMAND', self.handle_behavior)
def handle_controller_status(self, channel, msg):
if isinstance(msg, str):
msg = lcmdrc.controller_status_t.decode(msg)
if msg.state in self.plan_map:
self.desired_psi = self.plan_map[msg.state]
self.publish_pump_command()
def handle_behavior(self, channel, msg):
if isinstance(msg, str):
msg = lcmdrc.behavior_command_t.decode(msg)
s = msg.command.lower()
if s in self.behavior_map:
self.desired_psi = self.behavior_map[s]
self.publish_pump_command()
def run(self):
while True:
time.sleep(0.1)
self.lc.handle()
def main():
mon = PlanPressureCommander()
print "Pressure Command: ready"
mon.run()
if __name__ == '__main__':
main()
| 2.296875
| 2
|
git_sentry/handlers/git_team.py
|
git-sentry/git-sentry
| 0
|
12780783
|
from git_sentry.handlers.access_controlled_git_object import AccessControlledGitObject
from git_sentry.handlers.git_repo import GitRepo
from git_sentry.handlers.git_user import GitUser
class GitTeam(AccessControlledGitObject):
def __init__(self, git_object):
super().__init__(git_object)
def name(self):
return self._git_object.name
def login(self):
return self.name()
def as_dict(self):
return self._git_object.as_json()
def add_to_repo(self, repository_name, permission):
self._git_object.add_repository(repository_name, permission)
def repositories(self):
return [GitRepo(r) for r in self._git_object.repositories()]
def grant_access(self, user, role='member'):
if self.permission_for(user) != 'maintainer':
self._git_object.add_or_update_membership(user, role)
def revoke_access(self, username):
super().revoke_access(username)
def members(self, role=None):
return [GitUser(u) for u in self._git_object.members(role)]
def permission_for(self, username):
if any(m.login() == username for m in self.members('maintainer')):
return 'maintainer'
if any(m.login() == username for m in self.members('member')):
return 'member'
return None
def __eq__(self, other):
return self.name() == other.name()
def __repr__(self):
return f'GitTeam[{self.name()}]'
def __str__(self):
return f'GitTeam[{self.name()}]'
| 2.265625
| 2
|
framework/openCV/pyimagesearch_ppao/ch6_image_processing.py
|
friendlyantz/learning
| 1
|
12780784
|
import numpy as np
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Path to the image")
ap.add_argument("-i2", "--image2", required = True, help = "Path to the image 2")
ap.add_argument("-i3", "--image3", required = True, help = "Path to the image 3")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
# NOTE: CHAPTER 6
cv2.imshow("Original", image)
# 6.1 translation Left(-ve)/right(+ve) followed by up(-ve)/down(+ve)
M = np.float32([[1, 0, 25], [0, 1, 50]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
cv2.imshow("Shifted Down and Right", shifted)
# 6.1 translation
M = np.float32([[1, 0, -50], [0, 1, -90]])
shifted = cv2.warpAffine(image, M, (image.shape[0], image.shape[0]))
cv2.imshow("Shifted Up and Left", shifted)
# 6.2 in imutils.py
# 6.3 translate using imutils
shifted = imutils.translate(image, 0, 100)
cv2.imshow("Shifted Down", shifted)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.4 rotate counter-clockwise by default
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, 45, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("rotated by 45 degrees", rotated)
# 6.4 rotate -ve to rotate clockwise
M = cv2.getRotationMatrix2D(center, -90, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("rotated by -90 degrees", rotated)
# 6.5 move rotate to imutils.py
# 6.6 rotate using imutils.py
rotated = imutils.rotate(image, 180)
cv2.imshow("Rotated by 180 Degrees", rotated)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.7 resize
r = 150.0 / image.shape[1] # ratio - width = 150px
dim = (150, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA) # could also use INTER_LINEAR
# INTER_CUBIC or INTER_NEAREST
cv2.imshow("Resized (Width)", resized)
# 6.8 resize
r = 50.0 / image.shape[1] # ratio - height = 50px
dim = (50, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
cv2.imshow("Resized (Height)", resized)
# 6.11
# 6.9 resize in imutils.py
resized = imutils.resize(image, width = 66)
print("shape: {} pixels".format(resized.shape)) # NOTE: height width order not width height
cv2.imshow("Resized via Function", resized)
# 6.10 resize height via imutils.py
resized = imutils.resize(image, height = 110)
print("shape: {} pixels".format(resized.shape)) # NOTE: height width order not width height
cv2.imshow("Resized via Function height 50", resized)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.12 flipping
flipped = cv2.flip(image, 1)
cv2.imshow("Flipped Horizontally", flipped)
flipped = cv2.flip(image, 0)
cv2.imshow("Flipped Vertically", flipped)
flipped = cv2.flip(image, -1)
cv2.imshow("Flipped Horizontally & Vertically", flipped)
cv2.waitKey(0)
# 6.13 crop [y_start:y_end, x_start:x_end]
cropped = image[30:120, 240:335]
cv2.imshow("T-Rex Face", cropped)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.14 arithmetic
# cv2 uses max and min
print(" max of 255: {}".format(cv2.add(np.uint8([200]), np.uint8([100]))))
print(" min of 0: {}".format(cv2.add(np.uint8([ 50]), np.uint8([100]))))
# np wraps around
print("wrap around: {}".format(np.uint8([200]) + np.uint8([100])))
print("wrap around: {}".format(np.uint8([ 50]) + np.uint8([100])))
# 6.17 arithmetic on images
M = np.ones(image.shape, dtype = "uint8") * 100
added = cv2.add(image, M)
cv2.imshow("Added", added)
M = np.ones(image.shape, dtype = "uint8") *50
subtracted = cv2.subtract(image, M)
cv2.imshow("Subtracted", subtracted)
cv2.waitKey(0)
# 6.18 bitwise operations
rectangle = np.zeros((300, 300), dtype = "uint8")
cv2.rectangle(rectangle, (25, 25), (275, 275), 255, -1)
cv2.imshow("Rectangle", rectangle)
circle = np.zeros((300, 300), dtype = "uint8")
cv2.circle(circle, (150, 150), 150, 255, -1)
cv2.imshow("Circle", circle)
cv2.waitKey(0)
# 6.19 bitwise AND
bitwiseAnd = cv2.bitwise_and(rectangle, circle)
cv2.imshow("AND", bitwiseAnd)
cv2.waitKey(0)
# 6.19 bitwise OR
bitwiseOr = cv2.bitwise_or(rectangle, circle)
cv2.imshow("OR", bitwiseOr)
cv2.waitKey(0)
# 6.19 bitwise XOR
bitwiseXor = cv2.bitwise_xor(rectangle, circle)
cv2.imshow("XOR", bitwiseXor)
cv2.waitKey(0)
# 6.19 bitwise NOT
bitwiseNot = cv2.bitwise_not(circle)
cv2.imshow("NOT", bitwiseNot)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.20 masking
image2 = cv2.imread(args["image2"])
cv2.imshow("Original2", image2)
mask = np.zeros(image2.shape[:2], dtype = "uint8")
(cX, cY) = (image2.shape[1] // 2, image2.shape[0] // 2)
cv2.rectangle(mask, (cX - 75, cY -75), (cX + 75, cY +75), 255, -1)
cv2.imshow("Mask", mask)
masked = cv2.bitwise_and(image2, image2, mask = mask)
cv2.imshow("Mask Applied to Image", masked)
cv2.waitKey(0)
# 6.21 masking circle
mask = np.zeros(image2.shape[:2], dtype = "uint8")
cv2.circle(mask, (cX, cY), 100, 255, -1)
masked = cv2.bitwise_and(image2, image2, mask = mask)
cv2.imshow("Mask", mask)
cv2.imshow("Mask Applied to Image", masked)
cv2.waitKey(0)
# 6.22 splitting and merging channels
image3 = cv2.imread(args["image3"])
(B, G, R) = cv2.split(image3)
cv2.imshow("Red", R)
cv2.imshow("Green", G)
cv2.imshow("Blue", B)
merged = cv2.merge([B, G, R])
cv2.imshow("Merged", merged)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.23 merge only colour channel
zeros = np.zeros(image3.shape[:2], dtype = "uint8")
cv2.imshow("Red", cv2.merge([zeros, zeros, R]))
cv2.imshow("Green", cv2.merge([zeros, G, zeros]))
cv2.imshow("Blue", cv2.merge([B, zeros, zeros]))
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.24 colorspaces
cv2.imshow("Original", image2)
gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
cv2.imshow("Gray", gray)
hsv = cv2.cvtColor(image2, cv2.COLOR_BGR2HSV)
cv2.imshow("HSV", hsv)
lab = cv2.cvtColor(image2, cv2.COLOR_BGR2LAB)
cv2.imshow("L*a*b*", lab)
cv2.waitKey(0)
| 2.96875
| 3
|
users/factories.py
|
bllli/Django-China-API
| 187
|
12780785
|
# factories that automatically create user data
import factory
from users.models import User
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = User
username = factory.Sequence(lambda n: 'user%s' % n)
email = factory.LazyAttribute(lambda o: <EMAIL>' % o.username)
password = 'password'
mugshot = factory.django.ImageField()
@classmethod
def _create(cls, model_class, *args, **kwargs):
manager = cls._get_manager(model_class)
return manager.create_user(*args, **kwargs)
| 2.59375
| 3
|
Array/Easy/busyStudent.py
|
pavi-ninjaac/leetcode
| 0
|
12780786
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 30 22:57:38 2020
@author: ninjaac
"""
"""
Given two integer arrays startTime and endTime and given an integer queryTime.
The ith student started doing their homework at the time startTime[i] and finished it at time endTime[i].
Return the number of students doing their homework at time queryTime. More formally, return the number of students where queryTime lays in the interval [startTime[i], endTime[i]] inclusive.
Example 1:
Input: startTime = [1,2,3], endTime = [3,2,7], queryTime = 4
Output: 1
Explanation: We have 3 students where:
The first student started doing homework at time 1 and finished at time 3 and wasn't doing anything at time 4.
The second student started doing homework at time 2 and finished at time 2 and also wasn't doing anything at time 4.
The third student started doing homework at time 3 and finished at time 7 and was the only student doing homework at time 4.
"""
class Solution:
@staticmethod
def busyStudent(startTime ,endTime ,queryTime):
res = 0
for i,j in zip(startTime,endTime):
if i<=queryTime and j>=queryTime:
res+=1
return res
print(Solution.busyStudent([5], [5], 5))
| 3.421875
| 3
|
TEP/lista7/D.py
|
GuilhermeBraz/unb-workflow
| 0
|
12780787
|
<filename>TEP/lista7/D.py<gh_stars>0
# You've got a rectangular parallelepiped with integer edge lengths. You know the areas of its three faces that have a common vertex.
# Your task is to find the sum of lengths of all 12 edges of this parallelepiped.
from math import sqrt
x, y, z = map(int, input().split())
perimeter = (4*sqrt((x*y)/z)) * (1+z/x+z/y)
print(round(perimeter))
| 3.671875
| 4
|
api/algo.py
|
unithmallavaram/TeamFormationAssistant_V2
| 3
|
12780788
|
import sys
f = open("success.txt", "w")
| 1.40625
| 1
|
home/views.py
|
apoorvanand/tweet-search
| 51
|
12780789
|
import datetime
import random
import csv
import json
# TODO: Fix * imports
from django.shortcuts import *
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth import logout as auth_logout
from social.apps.django_app.default.models import UserSocialAuth
from gnip_search.gnip_search_api import QueryError as GNIPQueryError
from chart import Chart
from timeframe import Timeframe
from frequency import Frequency
from tweets import Tweets
from home.utils import *
# import twitter
KEYWORD_RELEVANCE_THRESHOLD = .1 # Only show related terms if > 10%
TWEET_QUERY_COUNT = 10 # For real identification, > 100. Max of 500 via Search API.
DEFAULT_TIMEFRAME = 1 # When not specified or needed to constrain, this # of days lookback
TIMEDELTA_DEFAULT_TIMEFRAME = datetime.timedelta(days=DEFAULT_TIMEFRAME)
TIMEDELTA_DEFAULT_30 = datetime.timedelta(days=30)
DATE_FORMAT = "%Y-%m-%d %H:%M"
DATE_FORMAT_JSON = "%Y-%m-%dT%H:%M:%S"
def login(request):
"""
Returns login page for given request
"""
context = {"request": request}
return render_to_response('login.html', context, context_instance=RequestContext(request))
@login_required
# @user_passes_test(lambda u: u.is_staff or u.is_superuser, login_url='/')
def home(request):
"""
Returns home page for given request
"""
query = request.GET.get("query", "")
context = {"request": request, "query0": query}
tweets = []
return render_to_response('home.html', context, context_instance=RequestContext(request))
@login_required
def query_chart(request):
"""
Returns query chart for given request
"""
# TODO: Move this to one line e.g. queries to query
query = request.GET.get("query", None)
queries = request.GET.getlist("queries[]")
if query:
queries = [query]
request_timeframe = Timeframe(start = request.GET.get("start", None),
end = request.GET.get("end", None),
interval = request.GET.get("interval", "hour"))
response_chart = None
try:
response_chart = Chart(queries = queries,
start = request_timeframe.start,
end = request_timeframe.end,
interval = request_timeframe.interval)
except GNIPQueryError as e:
return handleQueryError(e)
response_data = {}
response_data['days'] = request_timeframe.days
response_data['start'] = request_timeframe.start.strftime(DATE_FORMAT_JSON)
response_data['end'] = request_timeframe.end.strftime(DATE_FORMAT_JSON)
response_data['columns'] = response_chart.columns
response_data['total'] = response_chart.total
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required
def query_frequency(request):
query = request.GET.get("query", None)
response_data = {}
sample = 500
if query is not None:
# Get Timeframe e.g. process time from request
request_timeframe = Timeframe(start = request.GET.get("start", None),
end = request.GET.get("end", None),
interval = request.GET.get("interval", "hour"))
data = None
try:
# Query GNIP and get frequency
data = Frequency(query = query,
sample = sample,
start = request_timeframe.start,
end = request_timeframe.end)
except GNIPQueryError as e:
return handleQueryError(e)
response_data["frequency"] = data.freq
response_data["sample"] = sample
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required
def query_tweets(request):
"""
Returns tweet query
"""
request_timeframe = Timeframe(start = request.GET.get("start", None),
end = request.GET.get("end", None),
interval = request.GET.get("interval", "hour"))
query_count = int(request.GET.get("embedCount", TWEET_QUERY_COUNT))
export = request.GET.get("export", None)
query = request.GET.get("query", "")
try:
tweets = Tweets(query=query, query_count=query_count, start=request_timeframe.start, end=request_timeframe.end, export=export)
except GNIPQueryError as e:
return handleQueryError(e)
response_data = {}
if export == "csv":
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="export.csv"'
writer = csv.writer(response, delimiter=',', quotechar="'", quoting=csv.QUOTE_ALL)
writer.writerow(['count','time','id','user_screen_name','user_id','status','retweet_count','favorite_count','is_retweet','in_reply_to_tweet_id','in_reply_to_screen_name'])
count = 0;
for t in tweets.get_data():
count = count + 1
body = t['body'].encode('ascii', 'replace')
status_id = t['id']
status_id = status_id[status_id.rfind(':')+1:]
user_id = t['actor']['id']
user_id = user_id[user_id.rfind(':')+1:]
writer.writerow([count, t['postedTime'], status_id, t['actor']['preferredUsername'], user_id, body, t['retweetCount'], t['favoritesCount'], 'X', 'X', 'X'])
return response
else:
response_data['tweets'] = tweets.get_data()
return HttpResponse(json.dumps(response_data), content_type="application/json")
def logout(request):
"""
Returns a redirect response and logs out user
"""
auth_logout(request)
return HttpResponseRedirect('/')
| 2.359375
| 2
|
tests/test_data_models.py
|
datarevenue-berlin/omigami
| 7
|
12780790
|
<reponame>datarevenue-berlin/omigami<filename>tests/test_data_models.py
from py_muvr.data_structures import SelectedFeatures
def test_n_features(dataset):
assert dataset.n_features == 12
def test_input_data_slice(dataset):
assert dataset[:5, 3:7].X.shape == (5, 4)
assert dataset[[1, 2, 5], [3, 4, 7]].X.shape == (3, 3)
assert dataset[1:3, :].X.shape == (2, 12)
assert dataset[:, :].X.shape == dataset.X.shape
def test_selected_features():
sf = SelectedFeatures(min="min", mid="mid", max="max")
assert sf["mid"]
| 2.15625
| 2
|
yapylib/helpers.py
|
twocucao/YaPyLib
| 2
|
12780791
|
<filename>yapylib/helpers.py<gh_stars>1-10
import os
def get_load_dotenv(default=True):
val = os.environ.get('YAPYLIB_SKIP_DOTENV')
if not val:
return default
return val.lower() in ('0', 'false', 'no')
| 2.375
| 2
|
exp/data_processing/get_cropped_image.py
|
dangxuanvuong98/pineapples_harvester
| 0
|
12780792
|
<reponame>dangxuanvuong98/pineapples_harvester<gh_stars>0
import cv2
import os
import xml.etree.ElementTree as ET
from tqdm import tqdm
data_directory_path = '/media/trivu/data/DataScience/ComputerVision/dua/new_data/train'
result_path = '/media/trivu/data/DataScience/ComputerVision/dua/new_data/cropped_pineapple'
labels_path = os.listdir(data_directory_path)
labels_path = [label_path for label_path in labels_path if label_path.endswith('.xml')]
_id = 0
for label_path in tqdm(labels_path):
tree = ET.parse(os.path.join(data_directory_path, label_path))
root = tree.getroot()
filename = root.find('filename').text
img_path = os.path.join(data_directory_path, filename)
if not os.path.exists(img_path):
continue
img = cv2.imread(img_path)
for obj in root.findall('object'):
obj_name = obj.find('name').text
# if not obj_name.startswith('full'):
if not obj_name in ['body ripe pineapple', 'full ripe pineapple']:
continue
bndbox = obj.find('bndbox')
xmin = int(bndbox.find('xmin').text)
ymin = int(bndbox.find('ymin').text)
ymax = int(bndbox.find('ymax').text)
xmax = int(bndbox.find('xmax').text)
img_cropped = img[ymin:ymax, xmin:xmax]
_id += 1
cv2.imwrite(os.path.join(result_path, obj_name+str(_id)+'.jpg'), img_cropped)
# break
| 2.421875
| 2
|
logistic_regression.py
|
AbChatt/Final-Project-C11-Part-2-Python
| 0
|
12780793
|
<gh_stars>0
"""
CSCC11 - Introduction to Machine Learning, Winter 2020, Exam
<NAME>, <NAME>
===========================================================
COMPLETE THIS TEXT BOX:
Student Name: <NAME>
Student number: 1004820615
UtorID: chatt114
I hereby certify that the work contained here is my own
_<NAME>_
(sign with your name)
===========================================================
"""
import numpy as np
from utils import softmax
class LogisticRegression:
def __init__(self,
num_features,
num_classes,
rng=np.random):
""" This class represents a multi-class logistic regression model.
NOTE: We assume the labels are 0 to K-1, where K is number of classes.
self.parameters contains the vector of model weights.
NOTE: the bias term is assume to be the B first element of the vecto
TODO: You will need to implement one method of this class:
- _compute_loss_and_gradient: ndarray, ndarray -> float, ndarray
Implementation description will be provided under each method.
For the following:
- N: Number of samples.
- D: Dimension of input features.
- K: Number of classes.
Args:
- num_features (int): The dimension of feature vectors for input data.
- num_classes (int): The number of classes in the task.
- rng (RandomState): The random number generator to initialize weights.
"""
self.num_features = num_features
self.num_classes = num_classes
self.rng = rng
# Initialize parameters
self.parameters = np.zeros(shape=(num_classes, self.num_features + 1))
def init_weights(self, factor=1, bias=0):
""" This initializes the model weights with random values.
Args:
- factor (float): A constant scale factor for the initial weights.
- bias (float): The bias value
"""
self.parameters[:, 1:] = factor * self.rng.rand(self.num_classes, self.num_features)
self.parameters[:, 0] = bias
def _compute_loss_and_gradient(self, X, y):
""" This computes the training loss and its gradient.
That is, the negative log likelihood (NLL) and the gradient of NLL.
Args:
- X (ndarray (shape: (N, D))): A NxD matrix containing N D-dimensional inputs.
- y (ndarray (shape: (N, 1))): A N-column vector containing N scalar outputs (labels).
Output:
- nll (float): The NLL of the training inputs and outputs.
- grad (ndarray (shape: (K, D + 1))): A Kx(D+1) weight matrix (including bias) containing the gradient of NLL
(i.e. partial derivatives of NLL w.r.t. self.parameters).
"""
(N, D) = X.shape
# ====================================================
# TODO: Implement your solution within the box
unique_labels = np.unique(y)
y_i_k = np.zeros(shape=(N, self.num_classes)) # (N, K) -> [[size K each, N of these]]
nll = 0
grad = np.ndarray(shape = (self.num_classes, D + 1)) # (K, D+1) -> [[size D + 1 each, K of these]]
print(np.shape(self.parameters))
# Representing one_hot_decoder from handout as a matrix
for i in range(N):
index = np.where(unique_labels == y[i])
y_i_k[i][index] = 1
# nested summations are like matrix multiplication
# multiply -1 at end
nll = nll + y_i_k * np.log(softmax(np.matmul(X, (np.delete(self.parameters, 0, axis=1).T)))) # remove bias term
nll = -1 * nll
grad_w_k = np.ndarray(shape = (self.num_classes, D))
# calculate gradient using equation 10 in handout
# multiple -1 at end
grad_w_k = np.matmul((y_i_k - softmax(np.matmul(X, (np.delete(self.parameters, 0, axis=1).T)))).T, X)
bias_terms = ((self.parameters).T)[0]
for i in range(self.num_classes):
np.append(grad[i], bias_terms[i])
for j in range(len(grad_w_k[i])):
np.append(grad[i], grad_w_k[i][j])
grad = -1 * grad
# ====================================================
return nll, grad
def learn(self,
train_X,
train_y,
num_epochs=1000,
step_size=1e-3,
check_grad=False,
verbose=False,
eps=np.finfo(np.float).eps):
""" This performs gradient descent to find the optimal model parameters given the training data.
NOTE: This method mutates self.parameters
Args:
- train_X (ndarray (shape: (N, D))): A NxD matrix containing N D-dimensional training inputs.
- train_y (ndarray (shape: (N, 1))): A N-column vector containing N scalar training outputs (labels).
- num_epochs (int): Number of gradient descent steps
NOTE: 1 <= num_epochs
- step_size (float): Gradient descent step size
- check_grad (bool): Whether or not to check gradient using finite difference.
- verbose (bool): Whether or not to print gradient information for every step.
- eps (float): Machine epsilon
ASIDE: The design for applying gradient descent to find local minimum is usually different from this.
You should think about a better way to do this! Scipy is a good reference for such design.
"""
assert len(train_X.shape) == len(train_y.shape) == 2, f"Input/output pairs must be 2D-arrays. train_X: {train_X.shape}, train_y: {train_y.shape}"
(N, D) = train_X.shape
assert N == train_y.shape[0], f"Number of samples must match for input/output pairs. train_X: {N}, train_y: {train_y.shape[0]}"
assert D == self.num_features, f"Expected {self.num_features} features. Got: {D}"
assert train_y.shape[1] == 1, f"train_Y must be a column vector. Got: {train_y.shape}"
assert 1 <= num_epochs, f"Must take at least 1 gradient step. Got: {num_epochs}"
nll, grad = self._compute_loss_and_gradient(train_X, train_y)
# Check gradient using finite difference
if check_grad:
original_parameters = np.copy(self.parameters)
grad_approx = np.zeros(shape=(self.num_classes, self.num_features + 1))
h = 1e-8
# Compute finite difference w.r.t. each weight vector component
for ii in range(self.num_classes):
for jj in range(self.num_features + 1):
self.parameters = np.copy(original_parameters)
self.parameters[ii][jj] += h
grad_approx[ii][jj] = (self._compute_loss_and_gradient(train_X, train_y)[0] - nll) / h
# Reset parameters back to original
self.parameters = np.copy(original_parameters)
print(f"Negative Log Likelihood: {nll}")
print(f"Analytic Gradient: {grad.T}")
print(f"Numerical Gradient: {grad_approx.T}")
print("The gradients should be nearly identical.")
# Perform gradient descent
for epoch_i in range(num_epochs):
original_parameters = np.copy(self.parameters)
# Check gradient flow
if np.linalg.norm(grad) < eps:
print(f"Gradient is close to 0: {eps}. Terminating gradient descent.")
break
# Determine the suitable step size.
step_size *= 2
self.parameters = original_parameters - step_size * grad
E_new, grad_new = self._compute_loss_and_gradient(train_X, train_y)
assert np.isfinite(E_new), f"Error is NaN/Inf"
while E_new >= nll and step_size > 0:
step_size /= 2
self.parameters = original_parameters - step_size * grad
E_new, grad_new = self._compute_loss_and_gradient(train_X, train_y)
assert np.isfinite(E_new), f"Error is NaN/Inf"
if step_size <= eps:
print(f"Infinitesimal step: {step_size}. Terminating gradient descent.")
break
if verbose:
print(f"Epoch: {epoch_i}, Step size: {step_size}, Gradient Norm: {np.linalg.norm(grad)}, NLL: {nll}")
# Update next loss and next gradient
grad = grad_new
nll = E_new
def predict(self, X):
""" This computes the probability of the K labels given the input X.
Args:
- X (ndarray (shape: (N, D))): A NxD matrix consisting N D-dimensional inputs.
Output:
- probs (ndarray (shape: (N, K))): A NxK matrix consisting N K-probabilities for each input.
"""
(N, D) = X.shape
assert D == self.num_features, f"Expected {self.num_features} features. Got: {D}"
# Pad 1's for bias term
X = np.hstack((np.ones(shape=(N, 1), dtype=np.float), X))
# This receives the probabilities of class 1 given inputs
probs = softmax(X @ self.parameters.T)
return probs
| 3.96875
| 4
|
testing/distributions/gamma_test.py
|
jnkm/MXFusion
| 0
|
12780794
|
import pytest
import mxnet as mx
import numpy as np
from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples
from mxfusion.components.distributions import Gamma, GammaMeanVariance
from mxfusion.util.testutils import numpy_array_reshape
from mxfusion.util.testutils import MockMXNetRandomGenerator
@pytest.mark.usefixtures("set_seed")
class TestGammaDistribution(object):
@pytest.mark.parametrize("dtype, mean, mean_isSamples, variance, variance_isSamples, rv, rv_isSamples, num_samples", [
(np.float64, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(5,3,2)), True, 5),
(np.float64, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(3,2)), False, 5),
(np.float64, np.random.uniform(0,10,size=(2)), False, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(3,2)), False, 5),
(np.float64, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(5,3,2)), True, np.random.uniform(1,10,size=(5,3,2)), True, 5),
(np.float32, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(5,3,2)), True, 5),
])
def test_log_pdf_mean_variance(self, dtype, mean, mean_isSamples, variance, variance_isSamples,
rv, rv_isSamples, num_samples):
import scipy as sp
isSamples_any = any([mean_isSamples, variance_isSamples, rv_isSamples])
rv_shape = rv.shape[1:] if rv_isSamples else rv.shape
n_dim = 1 + len(rv.shape) if isSamples_any and not rv_isSamples else len(rv.shape)
mean_np = numpy_array_reshape(mean, mean_isSamples, n_dim)
variance_np = numpy_array_reshape(variance, variance_isSamples, n_dim)
rv_np = numpy_array_reshape(rv, rv_isSamples, n_dim)
beta_np = mean_np / variance_np
alpha_np = mean_np * beta_np
log_pdf_np = sp.stats.gamma.logpdf(rv_np, a=alpha_np, loc=0, scale=1./beta_np)
mean_mx = mx.nd.array(mean, dtype=dtype)
if not mean_isSamples:
mean_mx = add_sample_dimension(mx.nd, mean_mx)
variance_mx = mx.nd.array(variance, dtype=dtype)
if not variance_isSamples:
variance_mx = add_sample_dimension(mx.nd, variance_mx)
rv_mx = mx.nd.array(rv, dtype=dtype)
if not rv_isSamples:
rv_mx = add_sample_dimension(mx.nd, rv_mx)
gamma = GammaMeanVariance.define_variable(mean=mean_mx, variance=variance_mx, shape=rv_shape, dtype=dtype).factor
variables = {gamma.mean.uuid: mean_mx, gamma.variance.uuid: variance_mx, gamma.random_variable.uuid: rv_mx}
log_pdf_rt = gamma.log_pdf(F=mx.nd, variables=variables)
assert np.issubdtype(log_pdf_rt.dtype, dtype)
assert is_sampled_array(mx.nd, log_pdf_rt) == isSamples_any
if isSamples_any:
assert get_num_samples(mx.nd, log_pdf_rt) == num_samples
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(log_pdf_np, log_pdf_rt.asnumpy(), rtol=rtol, atol=atol)
@pytest.mark.parametrize(
"dtype, mean, mean_isSamples, variance, variance_isSamples, rv_shape, num_samples",[
(np.float64, np.random.rand(5,2), True, np.random.rand(2)+0.1, False, (3,2), 5),
(np.float64, np.random.rand(2), False, np.random.rand(5,2)+0.1, True, (3,2), 5),
(np.float64, np.random.rand(2), False, np.random.rand(2)+0.1, False, (3,2), 5),
(np.float64, np.random.rand(5,2), True, np.random.rand(5,3,2)+0.1, True, (3,2), 5),
(np.float32, np.random.rand(5,2), True, np.random.rand(2)+0.1, False, (3,2), 5),
])
def test_draw_samples_mean_variance(self, dtype, mean, mean_isSamples, variance,
variance_isSamples, rv_shape, num_samples):
n_dim = 1 + len(rv_shape)
out_shape = (num_samples,) + rv_shape
mean_np = mx.nd.array(np.broadcast_to(numpy_array_reshape(mean, mean_isSamples, n_dim), shape=out_shape), dtype=dtype)
variance_np = mx.nd.array(np.broadcast_to(numpy_array_reshape(variance, variance_isSamples, n_dim), shape=out_shape), dtype=dtype)
gamma = GammaMeanVariance.define_variable(shape=rv_shape, dtype=dtype).factor
mean_mx = mx.nd.array(mean, dtype=dtype)
if not mean_isSamples:
mean_mx = add_sample_dimension(mx.nd, mean_mx)
variance_mx = mx.nd.array(variance, dtype=dtype)
if not variance_isSamples:
variance_mx = add_sample_dimension(mx.nd, variance_mx)
variables = {gamma.mean.uuid: mean_mx, gamma.variance.uuid: variance_mx}
mx.random.seed(0)
rv_samples_rt = gamma.draw_samples(
F=mx.nd, variables=variables, num_samples=num_samples)
mx.random.seed(0)
beta_np = mean_np / variance_np
alpha_np = mean_np * beta_np
rv_samples_mx = mx.nd.random.gamma(alpha=alpha_np, beta=beta_np, dtype=dtype)
assert np.issubdtype(rv_samples_rt.dtype, dtype)
assert is_sampled_array(mx.nd, rv_samples_rt)
assert get_num_samples(mx.nd, rv_samples_rt) == num_samples
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(rv_samples_mx.asnumpy(), rv_samples_rt.asnumpy(), rtol=rtol, atol=atol)
@pytest.mark.parametrize("dtype, alpha, alpha_isSamples, beta, beta_isSamples, rv, rv_isSamples, num_samples", [
(np.float64, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(5,3,2)), True, 5),
(np.float64, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(3,2)), False, 5),
(np.float64, np.random.uniform(0,10,size=(2)), False, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(3,2)), False, 5),
(np.float64, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(5,3,2)), True, np.random.uniform(1,10,size=(5,3,2)), True, 5),
(np.float32, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(5,3,2)), True, 5),
])
def test_log_pdf(self, dtype, alpha, alpha_isSamples, beta, beta_isSamples,
rv, rv_isSamples, num_samples):
import scipy as sp
isSamples_any = any([alpha_isSamples, beta_isSamples, rv_isSamples])
rv_shape = rv.shape[1:] if rv_isSamples else rv.shape
n_dim = 1 + len(rv.shape) if isSamples_any and not rv_isSamples else len(rv.shape)
alpha_np = numpy_array_reshape(alpha, alpha_isSamples, n_dim)
beta_np = numpy_array_reshape(beta, beta_isSamples, n_dim)
rv_np = numpy_array_reshape(rv, rv_isSamples, n_dim)
log_pdf_np = sp.stats.gamma.logpdf(rv_np, a=alpha_np, loc=0, scale=1./beta_np)
gamma = Gamma.define_variable(shape=rv_shape, dtype=dtype).factor
alpha_mx = mx.nd.array(alpha, dtype=dtype)
if not alpha_isSamples:
alpha_mx = add_sample_dimension(mx.nd, alpha_mx)
beta_mx = mx.nd.array(beta, dtype=dtype)
if not beta_isSamples:
beta_mx = add_sample_dimension(mx.nd, beta_mx)
rv_mx = mx.nd.array(rv, dtype=dtype)
if not rv_isSamples:
rv_mx = add_sample_dimension(mx.nd, rv_mx)
variables = {gamma.alpha.uuid: alpha_mx, gamma.beta.uuid: beta_mx, gamma.random_variable.uuid: rv_mx}
log_pdf_rt = gamma.log_pdf(F=mx.nd, variables=variables)
assert np.issubdtype(log_pdf_rt.dtype, dtype)
assert is_sampled_array(mx.nd, log_pdf_rt) == isSamples_any
if isSamples_any:
assert get_num_samples(mx.nd, log_pdf_rt) == num_samples
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(log_pdf_np, log_pdf_rt.asnumpy(), rtol=rtol, atol=atol)
@pytest.mark.parametrize(
"dtype, alpha, alpha_isSamples, beta, beta_isSamples, rv_shape, num_samples",[
(np.float64, np.random.rand(5,2), True, np.random.rand(2)+0.1, False, (3,2), 5),
(np.float64, np.random.rand(2), False, np.random.rand(5,2)+0.1, True, (3,2), 5),
(np.float64, np.random.rand(2), False, np.random.rand(2)+0.1, False, (3,2), 5),
(np.float64, np.random.rand(5,2), True, np.random.rand(5,3,2)+0.1, True, (3,2), 5),
(np.float32, np.random.rand(5,2), True, np.random.rand(2)+0.1, False, (3,2), 5),
])
def test_draw_samples(self, dtype, alpha, alpha_isSamples, beta,
beta_isSamples, rv_shape, num_samples):
n_dim = 1 + len(rv_shape)
out_shape = (num_samples,) + rv_shape
alpha_np = mx.nd.array(np.broadcast_to(numpy_array_reshape(alpha, alpha_isSamples, n_dim), shape=out_shape), dtype=dtype)
beta_np = mx.nd.array(np.broadcast_to(numpy_array_reshape(beta, beta_isSamples, n_dim), shape=out_shape), dtype=dtype)
gamma = Gamma.define_variable(shape=rv_shape, dtype=dtype).factor
alpha_mx = mx.nd.array(alpha, dtype=dtype)
if not alpha_isSamples:
alpha_mx = add_sample_dimension(mx.nd, alpha_mx)
beta_mx = mx.nd.array(beta, dtype=dtype)
if not beta_isSamples:
beta_mx = add_sample_dimension(mx.nd, beta_mx)
variables = {gamma.alpha.uuid: alpha_mx, gamma.beta.uuid: beta_mx}
mx.random.seed(0)
rv_samples_rt = gamma.draw_samples(
F=mx.nd, variables=variables, num_samples=num_samples)
mx.random.seed(0)
rv_samples_mx = mx.nd.random.gamma(alpha=alpha_np, beta=beta_np, dtype=dtype)
assert np.issubdtype(rv_samples_rt.dtype, dtype)
assert is_sampled_array(mx.nd, rv_samples_rt)
assert get_num_samples(mx.nd, rv_samples_rt) == num_samples
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(rv_samples_mx.asnumpy(), rv_samples_rt.asnumpy(), rtol=rtol, atol=atol)
| 2.046875
| 2
|
server/danesfield_server/loader.py
|
Kitware/Danesfield-App
| 25
|
12780795
|
<reponame>Kitware/Danesfield-App
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc. and Contributors
# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)
# See accompanying Copyright.txt and LICENSE files for details
###############################################################################
from girder import events
from girder.utility.config import getServerMode
from .rest import dataset, workingSet, processing, filter
from .event_handlers import onFinalizeUpload, onJobUpdate
from .workflow import DanesfieldWorkflow
from .workflow_manager import DanesfieldWorkflowManager
from .client_webroot import ClientWebroot
from .workflow_steps import RunDanesfieldImageless
def createWorkflow():
"""
Configure Danesfield Workflow.
"""
workflow = DanesfieldWorkflow()
for step in [
RunDanesfieldImageless,
]:
workflow.addStep(step())
return workflow
def load(info):
# Install event handlers
events.bind(
"model.file.finalizeUpload.after", "danesfield-after-upload", onFinalizeUpload
)
events.bind("jobs.job.update", "danesfield-job-update", onJobUpdate)
# Set workflow on workflow manager
# TODO: On each request to /process, set this to either the normal or point-cloud starting workflow?
DanesfieldWorkflowManager.instance().workflow = createWorkflow()
if getServerMode() == "production":
# Serve client from /
# Relocate girder to serve from /girder
info["serverRoot"], info["serverRoot"].girder = (
ClientWebroot(),
info["serverRoot"],
)
info["serverRoot"].api = info["serverRoot"].girder.api
# Add API routes
info["apiRoot"].dataset = dataset.DatasetResource()
info["apiRoot"].workingSet = workingSet.WorkingSetResource()
info["apiRoot"].filter = filter.FilterResource()
info["apiRoot"].processing = processing.ProcessingResource()
| 1.78125
| 2
|
file_builder/test/hash_dirs_test.py
|
btrekkie/file-builder
| 1
|
12780796
|
import hashlib
import os
from .. import FileBuilder
from .file_builder_test import FileBuilderTest
class HashDirsTest(FileBuilderTest):
"""Tests a hash directory build operation.
The build operation computes SHA-256 hashes for all of the files and
directories in a given root directory. A directory's hash
incorporates the hashes and names of the files and directories in
the directory.
This tests nested subbuilds, as each directory and file hash
operation has its own subbuild.
"""
def setUp(self):
super().setUp()
self._build_number = 0
self._input_dir = os.path.join(self._temp_dir, 'Input')
os.mkdir(self._input_dir)
def _hash_file(self, builder, filename):
"""Build file function that computes a file's hash."""
digest = hashlib.sha256()
with builder.read_binary(filename) as file_:
bytes_ = file_.read(1024)
while len(bytes_) > 0:
digest.update(bytes_)
bytes_ = file_.read(1024)
hash_ = digest.hexdigest()
return {
'build': self._build_number,
'hash': hash_,
}
def _hash_dirs(self, builder, dir_):
"""Subbuild function that computes a directory's hash."""
digest = hashlib.sha256()
subfile_results = {}
for subfile in sorted(builder.list_dir(dir_)):
digest.update(subfile.encode())
absolute_subfile = os.path.join(dir_, subfile)
if builder.is_file(absolute_subfile):
subfile_result = builder.subbuild(
'hash_file', self._hash_file, absolute_subfile)
else:
subfile_result = builder.subbuild(
'hash_dirs', self._hash_dirs, absolute_subfile)
subfile_results[subfile] = subfile_result
digest.update(subfile_result['hash'].encode())
hash_ = digest.hexdigest()
return {
'build': self._build_number,
'hash': hash_,
'subfiles': subfile_results,
}
def _build(self):
"""Execute the "hash dirs" build operation."""
self._build_number += 1
return FileBuilder.build(
self._cache_filename, 'hash_dirs_test', self._hash_dirs,
self._input_dir)
def _file_hash(self, hashes, *components):
"""Return the item in ``hashes`` for the specified file.
Return the ``'build'`` and ``'hash'`` entries of the item in
``hashes`` for ``os.path.join(self._input_dir, *components)``,
if any.
Returns:
dict<str, object>: The result.
"""
subhashes = hashes
for component in components:
if ('subfiles' not in subhashes or
component not in subhashes['subfiles']):
return None
subhashes = subhashes['subfiles'][component]
return {
'build': subhashes['build'],
'hash': subhashes['hash'],
}
def test_hash_dirs(self):
"""Test ``FileBuilder`` with the hash directory build operation."""
os.makedirs(os.path.join(self._input_dir, 'Book', 'Bus', 'Apple'))
os.mkdir(os.path.join(self._input_dir, 'Yarn'))
os.mkdir(os.path.join(self._input_dir, 'Window'))
self._write(
os.path.join(self._input_dir, 'Book', 'Cartwheel.txt'), 'Circle')
self._write(os.path.join(self._input_dir, 'Book', 'Igloo.txt'), 'Wide')
self._write(
os.path.join(self._input_dir, 'Book', 'Bus', 'Apple', 'Leaf.txt'),
'Alphabet')
self._write(
os.path.join(self._input_dir, 'Window', 'Cabinet.txt'), 'Orange')
hashes1 = self._build()
root_hash1 = self._file_hash(hashes1)
book_hash1 = self._file_hash(hashes1, 'Book')
bus_hash1 = self._file_hash(hashes1, 'Book', 'Bus')
apple_hash1 = self._file_hash(hashes1, 'Book', 'Bus', 'Apple')
yarn_hash1 = self._file_hash(hashes1, 'Yarn')
window_hash1 = self._file_hash(hashes1, 'Window')
cartwheel_hash1 = self._file_hash(hashes1, 'Book', 'Cartwheel.txt')
igloo_hash1 = self._file_hash(hashes1, 'Book', 'Igloo.txt')
leaf_hash1 = self._file_hash(
hashes1, 'Book', 'Bus', 'Apple', 'Leaf.txt')
cabinet_hash1 = self._file_hash(hashes1, 'Window', 'Cabinet.txt')
self.assertIsNotNone(root_hash1)
self.assertIsNotNone(book_hash1)
self.assertIsNotNone(bus_hash1)
self.assertIsNotNone(apple_hash1)
self.assertIsNotNone(yarn_hash1)
self.assertIsNotNone(window_hash1)
self.assertIsNotNone(cartwheel_hash1)
self.assertIsNotNone(igloo_hash1)
self.assertIsNotNone(leaf_hash1)
self.assertIsNotNone(cabinet_hash1)
self._write(
os.path.join(self._input_dir, 'Window', 'Cabinet.txt'), 'Bicycle')
hashes2 = self._build()
root_hash2 = self._file_hash(hashes2)
book_hash2 = self._file_hash(hashes2, 'Book')
bus_hash2 = self._file_hash(hashes2, 'Book', 'Bus')
apple_hash2 = self._file_hash(hashes2, 'Book', 'Bus', 'Apple')
yarn_hash2 = self._file_hash(hashes2, 'Yarn')
window_hash2 = self._file_hash(hashes2, 'Window')
cartwheel_hash2 = self._file_hash(hashes2, 'Book', 'Cartwheel.txt')
igloo_hash2 = self._file_hash(hashes2, 'Book', 'Igloo.txt')
leaf_hash2 = self._file_hash(
hashes2, 'Book', 'Bus', 'Apple', 'Leaf.txt')
cabinet_hash2 = self._file_hash(hashes2, 'Window', 'Cabinet.txt')
self.assertNotEqual(root_hash1['hash'], root_hash2['hash'])
self.assertEqual(2, root_hash2['build'])
self.assertNotEqual(window_hash1['hash'], window_hash2['hash'])
self.assertEqual(2, window_hash2['build'])
self.assertNotEqual(cabinet_hash1['hash'], cabinet_hash2['hash'])
self.assertEqual(2, cabinet_hash2['build'])
self.assertEqual(book_hash1, book_hash2)
self.assertEqual(bus_hash1, bus_hash2)
self.assertEqual(apple_hash1, apple_hash2)
self.assertEqual(yarn_hash1, yarn_hash2)
self.assertEqual(cartwheel_hash1, cartwheel_hash2)
self.assertEqual(igloo_hash1, igloo_hash2)
self.assertEqual(leaf_hash1, leaf_hash2)
self._write(
os.path.join(self._input_dir, 'Book', 'Bus', 'Clock.txt'),
'Flower')
self._write(os.path.join(self._input_dir, 'Yarn', 'Road.txt'), 'Sky')
os.mkdir(os.path.join(self._input_dir, 'Fruit'))
os.remove(os.path.join(self._input_dir, 'Window', 'Cabinet.txt'))
hashes3 = self._build()
root_hash3 = self._file_hash(hashes3)
book_hash3 = self._file_hash(hashes3, 'Book')
bus_hash3 = self._file_hash(hashes3, 'Book', 'Bus')
apple_hash3 = self._file_hash(hashes3, 'Book', 'Bus', 'Apple')
yarn_hash3 = self._file_hash(hashes3, 'Yarn')
window_hash3 = self._file_hash(hashes3, 'Window')
fruit_hash3 = self._file_hash(hashes3, 'Fruit')
cartwheel_hash3 = self._file_hash(hashes3, 'Book', 'Cartwheel.txt')
igloo_hash3 = self._file_hash(hashes3, 'Book', 'Igloo.txt')
leaf_hash3 = self._file_hash(
hashes3, 'Book', 'Bus', 'Apple', 'Leaf.txt')
cabinet_hash3 = self._file_hash(hashes3, 'Window', 'Cabinet.txt')
clock_hash3 = self._file_hash(hashes3, 'Book', 'Bus', 'Clock.txt')
road_hash3 = self._file_hash(hashes3, 'Yarn', 'Road.txt')
self.assertNotEqual(root_hash2['hash'], root_hash3['hash'])
self.assertEqual(3, root_hash3['build'])
self.assertNotEqual(book_hash2['hash'], book_hash3['hash'])
self.assertEqual(3, book_hash3['build'])
self.assertNotEqual(bus_hash2['hash'], bus_hash3['hash'])
self.assertEqual(3, bus_hash3['build'])
self.assertNotEqual(yarn_hash2['hash'], yarn_hash3['hash'])
self.assertEqual(3, yarn_hash3['build'])
self.assertNotEqual(window_hash2['hash'], window_hash3['hash'])
self.assertEqual(3, window_hash3['build'])
self.assertIsNone(cabinet_hash3)
self.assertEqual(apple_hash2, apple_hash3)
self.assertEqual(cartwheel_hash2, cartwheel_hash3)
self.assertEqual(igloo_hash2, igloo_hash3)
self.assertEqual(leaf_hash2, leaf_hash3)
self.assertEqual(3, fruit_hash3['build'])
self.assertEqual(3, clock_hash3['build'])
self.assertEqual(3, road_hash3['build'])
hashes4 = self._build()
root_hash4 = self._file_hash(hashes4)
book_hash4 = self._file_hash(hashes4, 'Book')
bus_hash4 = self._file_hash(hashes4, 'Book', 'Bus')
apple_hash4 = self._file_hash(hashes4, 'Book', 'Bus', 'Apple')
yarn_hash4 = self._file_hash(hashes4, 'Yarn')
window_hash4 = self._file_hash(hashes4, 'Window')
fruit_hash4 = self._file_hash(hashes4, 'Fruit')
cartwheel_hash4 = self._file_hash(hashes4, 'Book', 'Cartwheel.txt')
igloo_hash4 = self._file_hash(hashes4, 'Book', 'Igloo.txt')
leaf_hash4 = self._file_hash(
hashes4, 'Book', 'Bus', 'Apple', 'Leaf.txt')
clock_hash4 = self._file_hash(hashes4, 'Book', 'Bus', 'Clock.txt')
road_hash4 = self._file_hash(hashes4, 'Yarn', 'Road.txt')
self.assertNotEqual(root_hash3, root_hash4)
self.assertEqual(book_hash3, book_hash4)
self.assertEqual(bus_hash3, bus_hash4)
self.assertEqual(apple_hash3, apple_hash4)
self.assertEqual(yarn_hash3, yarn_hash4)
self.assertEqual(window_hash3, window_hash4)
self.assertEqual(fruit_hash3, fruit_hash4)
self.assertEqual(cartwheel_hash3, cartwheel_hash4)
self.assertEqual(igloo_hash3, igloo_hash4)
self.assertEqual(leaf_hash3, leaf_hash4)
self.assertEqual(clock_hash3, clock_hash4)
self.assertEqual(road_hash3, road_hash4)
hashes5 = self._build()
self.assertEqual(5, hashes5['build'])
self.assertEqual(3, hashes5['subfiles']['Book']['build'])
hashes6 = self._build()
self.assertEqual(6, hashes6['build'])
self.assertEqual(3, hashes6['subfiles']['Book']['build'])
| 3.109375
| 3
|
2019/02_ProgramAlarm/aoc_pa.py
|
deanearlwright/AdventOfCode
| 1
|
12780797
|
# ======================================================================
# Program Alarm
# Advent of Code 2019 Day 02 -- <NAME> -- https://adventofcode.com
#
# Computer simulation by Dr. <NAME> III
# ======================================================================
# ======================================================================
# a o c _ p a . p y
# ======================================================================
"Solve the Program Alarm problem for Advent of Code 2019 day 03"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
import argparse
import sys
import intcode
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# parse_commnd_line
# ----------------------------------------------------------------------
def parse_command_line():
"Parse the command line options"
# 1. Create the command line parser
desc = 'Program Alarm - day 02 of Advent of Code 2019'
sample = 'sample: python aoc_pa.py input.txt'
parser = argparse.ArgumentParser(description=desc,
epilog=sample)
parser.add_argument('-v', '--verbose', action='store_true', default=False,
dest='verbose', help='Print status messages to stdout')
parser.add_argument('-p', '--part', action='store', default=1, type=int,
dest='part', help='Puzzle Part (1 or 2)')
parser.add_argument('-t', '--max-time', action='store', default=0, type=int,
dest='maxtime', help='Maximum timer ticks before quitting')
parser.add_argument('filepath', metavar='FILENAME', action='store', type=str,
help="Location of puzzle input")
# 2. Get the options and arguments
return parser.parse_args()
# ----------------------------------------------------------------------
# part_one
# ----------------------------------------------------------------------
def part_one(args, input_lines):
"Process part one of the puzzle"
# 1. Optionally select fixex
noun = None
verb = None
if len(input_lines[0]) > 100:
print("Fixing up input at 1 and 2 to be 12 and 2")
noun = 12
verb = 2
# 3. Create the computer with fixes
computer = intcode.IntCode(text=input_lines[0], noun=noun, verb=verb)
if args.verbose:
print("The computer has %d positions" % len(computer.positions))
print(computer.instructions())
# 3. Run the computer until it stops
solution = computer.run(max_steps=args.maxtime, watch=args.verbose)
# 4. Check it ran out of time
if solution is None:
print("No solution found after %d steps" % args.maxtime)
# 5. Check it stopped with an error
elif solution != intcode.STOP_HLT:
print("Computer alarm %d" % solution)
solution = None
# 6. The solution is at position 0
else:
solution = computer.fetch(intcode.ADDR_RSLT)
print("The solution is %d" % (solution))
# 7. Return result
return solution is not None
# ----------------------------------------------------------------------
# part_two
# ----------------------------------------------------------------------
def part_two(args, input_lines):
"Process part two of the puzzle"
# 1. Set target
target = 19690720
if args.verbose:
print("The target is %d" % target)
# 2. Loop over possible nouns
for noun in range(100):
# 3. Loop over possible verbs
if args.verbose:
print("Checking noun = %d" % noun)
for verb in range(100):
# 4. Create the computer
computer = intcode.IntCode(text=input_lines[0], noun=noun, verb=verb)
# 5. Run the computer until it stops
solution = computer.run(max_steps=args.maxtime)
# 6. Check it ran out of time
if solution is None:
print("No solution found after %d steps for noun = %d and verb = %d" %
(args.maxtime, noun, verb))
return False
# 7. Check it stopped with an error
if solution != intcode.STOP_HLT:
print("Computer alarm %d with noun = %d and verb = %d" %
(solution, noun, verb))
return False
# 8. The solution is at position 0
solution = computer.fetch(intcode.ADDR_RSLT)
if solution == target:
print("Target of %d found with noun = %d and verb = %d" %
(solution, noun, verb))
print("Solution = %d" % (100 * noun + verb))
return True
# 9. Unsuccessful
print("Target of %d not found" % target)
return False
# ----------------------------------------------------------------------
# from_file
# ----------------------------------------------------------------------
def from_file(filepath):
"Read the file"
return from_text(open(filepath).read())
# ----------------------------------------------------------------------
# from_text
# ----------------------------------------------------------------------
def from_text(text):
"Break the text into trimed, non-comment lines"
# 1. We start with no lines
lines = []
# 2. Loop for lines in the text
for line in text.split('\n'):
# 3. But ignore blank and non-claim lines
line = line.rstrip(' \r')
if not line:
continue
if line.startswith('#'):
continue
# 4. Add the line
lines.append(line)
# 5. Return a list of clean lines
return lines
# ----------------------------------------------------------------------
# main
# ----------------------------------------------------------------------
def main():
"""Read Program Alarm and solve it"""
# 1. Get the command line options
args = parse_command_line()
# 2. Read the puzzle file
input_text = from_file(args.filepath)
# 3. Process the appropiate part of the puzzle
if args.part == 1:
result = part_one(args, input_text)
else:
result = part_two(args, input_text)
# 5. Set return code (0 if solution found, 2 if not)
if result:
sys.exit(0)
sys.exit(2)
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
# ======================================================================
# end a o c _ p a . p y end
# ======================================================================
| 2.5
| 2
|
timm/version.py
|
chilung/pytorch-image-models
| 0
|
12780798
|
<reponame>chilung/pytorch-image-models
__version__ = '0.4.13.22'
| 0.941406
| 1
|
lib/solutions/checkout.py
|
DPNT-Sourcecode/CHK-hqvw01
| 0
|
12780799
|
# Basic price table, without offers
PRICES = {"A": 50,
"B": 30,
"C": 20,
"D": 15,
"E": 40,
"F": 10,
"G": 20,
"H": 10,
"I": 35,
"J": 60,
"K": 70,
"L": 90,
"M": 15,
"N": 40,
"O": 10,
"P": 50,
"Q": 30,
"R": 50,
"S": 20,
"T": 20,
"U": 40,
"V": 50,
"W": 20,
"X": 17,
"Y": 20,
"Z": 21}
#------------------------------------------------------------------------------
# Offer Functions
# - The names of the functions should make their purpose fairly clear.
#------------------------------------------------------------------------------
def x_for_y(sku_to_count, cost, product, number, item_cost):
if sku_to_count[product] >= number:
cost += item_cost * (sku_to_count[product] // number)
sku_to_count[product] = sku_to_count[product] % number
return (sku_to_count, cost)
def x_get_one_y_free(sku_to_count, buy_prod, number_of_buy, free_prod):
if sku_to_count[buy_prod] >= number_of_buy:
if sku_to_count[free_prod] > sku_to_count[buy_prod] // number_of_buy:
sku_to_count[free_prod] -= (sku_to_count[buy_prod] // number_of_buy)
else:
sku_to_count[free_prod] = 0
return sku_to_count
def x_get_x_free(sku_to_count, product, number):
if sku_to_count[product] >= 3:
sku_to_count[product] -= sku_to_count[product] // number
return sku_to_count
def any_3_stxyz_45(sku_to_count, cost):
# note that the following is in order of increasing price.
products = ["X", "T", "S", "Y", "Z"]
list_of_items = [sku_to_count[item] for item in products \
if item in sku_to_count]
number_of_items = 0
for item in list_of_items:
number_of_items += item
to_remove = {item: 0 for item in products}
current_item = products.pop()
count_to_3 = 0
while (number_of_items + count_to_3) // 3 != 0:
while current_item not in sku_to_count and products:
current_item = products.pop()
if not products:
break
if sku_to_count[current_item] - to_remove[current_item] > 0:
to_remove[current_item] += 1
count_to_3 += 1
number_of_items -= 1
if count_to_3 % 3 == 0:
# We actually remove them now, and add the cost
for item in to_remove:
sku_to_count[item] -= to_remove[item]
cost += 45
print sku_to_count
print to_remove
return sku_to_count, cost
#------------------------------------------------------------------------------
# End offer functions
#------------------------------------------------------------------------------
# noinspection PyUnusedLocal
# skus = unicode string
def checkout(skus):
if not isinstance(skus, unicode):
return -1
for item in skus:
if item not in PRICES:
return -1
skus = str(skus)
cost = 0
sku_to_count = {}
for sku in PRICES:
sku_to_count[sku] = skus.count(sku)
# Apply special offers.
# We always start with E - it should be applied before B's as it's the
# cheaper way.
sku_to_count = x_get_one_y_free(sku_to_count, "E", 2, "B")
# As above for R.
sku_to_count = x_get_one_y_free(sku_to_count, "R", 3, "Q")
# Begin remaining offers in order.
sku_to_count, cost = x_for_y(sku_to_count, cost, "A", 5, 200)
sku_to_count, cost = x_for_y(sku_to_count, cost, "A", 3, 130)
sku_to_count, cost = x_for_y(sku_to_count, cost, "B", 2, 45)
sku_to_count = x_get_x_free(sku_to_count, "F", 3)
sku_to_count, cost = x_for_y(sku_to_count, cost, "H", 10, 80)
sku_to_count, cost = x_for_y(sku_to_count, cost, "H", 5, 45)
sku_to_count, cost = x_for_y(sku_to_count, cost, "K", 2, 120)
sku_to_count = x_get_one_y_free(sku_to_count, "N", 3, "M")
sku_to_count, cost = x_for_y(sku_to_count, cost, "P", 5, 200)
sku_to_count, cost = x_for_y(sku_to_count, cost, "Q", 3, 80)
sku_to_count = x_get_x_free(sku_to_count, "U", 4)
sku_to_count, cost = x_for_y(sku_to_count, cost, "V", 3, 130)
sku_to_count, cost = x_for_y(sku_to_count, cost, "V", 2, 90)
# Now iterate over and add the remaining prices
for item in PRICES:
cost += (sku_to_count[item] * PRICES[item])
return cost
| 3.90625
| 4
|
make.py
|
kagu/kunquat
| 13
|
12780800
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Author: <NAME>, Finland 2014-2018
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
from copy import deepcopy
from optparse import Option, SUPPRESS_HELP
import ast
import os
import os.path
import shutil
import subprocess
import sys
sys.dont_write_bytecode = True
import support.fabricate as fabricate
import scripts.command as command
from scripts.cc import get_cc
import scripts.configure as configure
from scripts.build_libs import build_libkunquat, build_libkunquatfile
from scripts.test_libkunquat import test_libkunquat
from scripts.build_examples import build_examples
from scripts.install_libs import install_libkunquat, install_libkunquatfile
from scripts.install_examples import install_examples
from scripts.install_share import install_share
import options
# Add definitions of options.py as command line switches
cmdline_opts = []
opt_vars = []
options_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'options.py')
with open(options_path) as f:
data = f.read()
raw_entries = [e.strip() for e in data.split('\n\n') if e.strip()]
type_names = { str: 'string', int: 'int' }
for raw_entry in raw_entries:
lines = raw_entry.split('\n')
desc_lines = lines[:-1]
def_line = lines[-1]
desc = '\n'.join(dl[1:].strip() for dl in desc_lines)
var_name, _, value_str = (s.strip() for s in def_line.partition('='))
name = '--' + var_name.replace('_', '-')
value = ast.literal_eval(value_str)
opt_vars.append(var_name)
if type(value) == bool:
first_word = var_name.split('_')[0]
if first_word == 'enable':
negated_name = name.replace('enable', 'disable', 1)
elif first_word == 'with':
negated_name = name.replace('with', 'without', 1)
else:
assert False
if value == True:
negated_desc = (desc.replace('enable', 'disable', 1)
if desc.startswith('enable') else ('do not ' + desc))
full_desc = '{} (default: enabled)'.format(negated_desc)
neg_opt = Option(
negated_name,
action='store_false',
dest=var_name,
help=full_desc)
pos_opt = Option(
name, action='store_true', dest=var_name, help=SUPPRESS_HELP)
else:
full_desc = '{} (default: disabled)'.format(desc)
pos_opt = Option(
name, action='store_true', dest=var_name, help=full_desc)
neg_opt = Option(
negated_name,
action='store_false',
dest=var_name,
help=SUPPRESS_HELP)
cmdline_opts.extend((neg_opt, pos_opt))
elif value == None:
if var_name == 'cc':
desc = ('select C compiler'
' (supported values: gcc (default), clang)')
option = Option(name, type='choice', choices=['gcc', 'clang'], help=desc)
cmdline_opts.append(option)
else:
assert False
else:
type_name = type_names[type(value)]
full_desc = '{} (default: {})'.format(desc, value)
option = Option(name, type=type_name, help=full_desc)
cmdline_opts.append(option)
def process_cmd_line():
for var_name in opt_vars:
override = fabricate.main.options.__dict__[var_name]
if override != None:
options.__dict__[var_name] = override
# Make sure the installation prefix is absolute
options.prefix = os.path.abspath(os.path.expanduser(options.prefix))
class PrettyBuilder(fabricate.Builder):
def __init__(self, *args, **kwargs):
fabricate.Builder.__init__(self, *args, **kwargs)
def echo(self, message):
'''Suppress printing of an empty string.'''
if message:
fabricate.Builder.echo(self, message)
def build():
process_cmd_line()
if options.enable_python_tests and options.enable_long_tests:
python_modules = ['scripts', 'kunquat']
fabricate.run('pylint', *python_modules)
fabricate.run('flake8', *python_modules)
cc = get_cc(options.cc)
cc.set_debug(options.enable_debug)
if options.enable_debug_asserts:
cc.add_define('ENABLE_DEBUG_ASSERTS')
#if options.enable_profiling:
# compile_flags.append('-pg')
# link_flags.append('-pg')
if options.enable_native_arch:
cc.set_native_arch()
if options.optimise not in range(5):
print('Unsupported optimisation level: {}'.format(options.optimise),
file=sys.stderr)
sys.exit(1)
cc.set_optimisation(options.optimise)
builder = PrettyBuilder()
if options.enable_python_bindings:
try:
python_cmd = command.PythonCommand()
except RuntimeError:
print('Python bindings were requested but Python 2.7 was not found.',
file=sys.stderr)
sys.exit(1)
if options.enable_tests_mem_debug:
try:
output = subprocess.check_output(
['valgrind', '--version'], stderr=subprocess.STDOUT)
except (OSError, subprocess.CalledProcessError):
output = b''
if not output.startswith(b'valgrind'):
print('Memory debugging of libkunquat tests was requested'
' but Valgrind was not found.',
file=sys.stderr)
sys.exit(1)
# Check dependencies
configure.test_add_common_external_deps(builder, options, cc)
# Build libkunquat
if options.enable_libkunquat:
libkunquat_cc = deepcopy(cc)
configure.test_add_libkunquat_external_deps(builder, options, libkunquat_cc)
build_libkunquat(builder, options, libkunquat_cc)
# Build libkunquatfile
if options.enable_libkunquatfile:
libkunquatfile_cc = deepcopy(cc)
configure.test_add_libkunquatfile_external_deps(
builder, options, libkunquatfile_cc)
build_libkunquatfile(builder, options, libkunquatfile_cc)
# Run tests
if options.enable_tests:
test_cc = deepcopy(cc)
configure.test_add_test_deps(builder, options, test_cc)
test_libkunquat(builder, options, test_cc)
if options.enable_python_tests:
fabricate.run(
'env',
'LD_LIBRARY_PATH=build/src/lib',
'python3',
'-m',
'unittest',
'discover',
'-v')
# Build examples
if options.enable_examples:
build_examples(builder)
def clean():
if os.path.exists('build'):
# Remove Python-specific build directories first
for name in os.listdir('build'):
expected_suffix = '-{}.{}'.format(sys.version_info[0], sys.version_info[1])
if name.endswith(expected_suffix) or name == 'lib':
path = os.path.join('build', name)
shutil.rmtree(path)
fabricate.autoclean()
def install():
build()
install_builder = None
if options.enable_libkunquat:
install_libkunquat(
install_builder, options.prefix, options.enable_libkunquat_dev)
if options.enable_libkunquatfile:
install_libkunquatfile(
install_builder, options.prefix, options.enable_libkunquatfile_dev)
if options.enable_examples:
install_examples(install_builder, options.prefix)
install_share(install_builder, options.prefix)
if options.enable_python_bindings:
python_cmd = command.PythonCommand()
args = ['py-setup.py', 'install', '--prefix={}'.format(options.prefix)]
if not options.enable_export:
args.append('--disable-export')
if not options.enable_player:
args.append('--disable-player')
if not options.enable_tracker:
args.append('--disable-tracker')
try:
python_cmd.run(install_builder, *args)
except subprocess.CalledProcessError:
sys.exit(1)
fabricate.main(extra_options=cmdline_opts)
| 2.109375
| 2
|
scraper/urls.py
|
pekasen/DBoeS-Automatization
| 0
|
12780801
|
"""
List of German Wikipedia pages to extract parliamentarian information from.
Index points to table indices (running id) where to find information on the page.
"""
BASE = "https://de.wikipedia.org/wiki/"
parliaments = {
"sachsen": {
"name": "Sachsen",
"url": BASE + "Liste_der_Mitglieder_des_S%C3%A4chsischen_Landtags_(6._Wahlperiode)",
"index": 2
},
"hamburg": {
"name": "Hamburg",
"url": BASE + "Liste_der_Mitglieder_der_Hamburgischen_B%C3%BCrgerschaft_(22._Wahlperiode)",
"index": 2
},
"bawue": {
"name": "Baden-Württemberg",
"url": BASE + "Liste_der_Mitglieder_des_Landtags_von_Baden-W%C3%BCrttemberg_(16._Wahlperiode)",
"index": 8
},
"mcpomm": {
"name": "Mecklenburg-Vorpommern",
"url": BASE + "Liste_der_Mitglieder_des_Landtages_Mecklenburg-Vorpommern_(7._Wahlperiode)",
"index": 2
},
"brandenburg": {
"name": "Brandenburg",
"url": BASE + "Liste_der_Mitglieder_des_Landtags_Brandenburg_(7._Wahlperiode)",
"index": 1
},
"berlin": {
"name": "Berlin",
"url": BASE + "Liste_der_Mitglieder_des_Abgeordnetenhauses_von_Berlin_(18._Wahlperiode)",
"index": 0
},
"thueringen": {
"name": "Thüringen",
"url": BASE + "Liste_der_Mitglieder_des_Th%C3%BCringer_Landtags_(7._Wahlperiode)",
"index": 2
},
"bremen": {
"name": "Bremen",
"url": BASE + "Liste_der_Mitglieder_der_Bremischen_B%C3%BCrgerschaft_(20._Wahlperiode)",
"index": 0
},
"sachsen-anhalt": {
"name": "Sachsen-Anhalt",
"url": BASE + "Liste_der_Mitglieder_des_Landtages_Sachsen-Anhalt_(7._Wahlperiode)",
"index": 2
},
"bayern": {
"name": "Bayern",
"url": BASE + "Liste_der_Mitglieder_des_Bayerischen_Landtags_(18._Wahlperiode)",
"index": 1
},
"rlp": {
"name": "Rheinland-Pfalz",
"url": BASE + "Liste_der_Mitglieder_des_Landtages_Rheinland-Pfalz_(17._Wahlperiode)",
"index": 2
},
"hessen": {
"name": "Hessen",
"url": BASE + "Liste_der_Mitglieder_des_Hessischen_Landtags_(20._Wahlperiode)",
"index": 2
},
"niedersachsen": {
"name": "Niedersachsen",
"url": BASE + "Liste_der_Mitglieder_des_Nieders%C3%A4chsischen_Landtages_(18._Wahlperiode)",
"index": 2
},
"nrw": {
"name": "Nordrhein-Westfalen",
"url": BASE + "Liste_der_Mitglieder_des_Landtages_Nordrhein-Westfalen_(17._Wahlperiode)",
"index": 4
},
"sh": {
"name": "Schleswig-Holstein",
"url": BASE + "Liste_der_Mitglieder_des_Landtages_Schleswig-Holstein_(19._Wahlperiode)",
"index": 2
},
"saarland": {
"name": "Saarland",
"url": BASE + "Liste_der_Mitglieder_des_Landtages_des_Saarlandes_(16._Wahlperiode)",
"index": 3
},
"bundestag": {
"name": "Bundestag",
"url": BASE + "Liste_der_Mitglieder_des_Deutschen_Bundestages_(19._Wahlperiode)",
"index": 3
},
"eu": {
"name": "EU-Parlament",
"url": BASE + "Liste_der_deutschen_Abgeordneten_zum_EU-Parlament_(2019%E2%80%932024)",
"index": 1
}
}
| 2.921875
| 3
|
examples/z_segmenting_accurately.py
|
thejasvibr/itsfm
| 1
|
12780802
|
"""
Segmenting real-world sounds correctly with synthetic sounds
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It's easy to figure out if a sound is being correcly segmented if the
signal at hand is well defined, and repeatable, like in many technological/
engineering applications. However, in bioacoustics, or
a more open-ended field recording situation, it can be very hard
to know the kind of signal that'll be recorded, or what its
parameters are.
Just because an output is produced by the package, it doesn't
always lead to a meaningful result. Given a set of parameters,
any function will produce an output as long as its sensible. This
means, with one set of parameters/methods the CF segment might
be 10ms long, while with another more lax parameter set it might
be 20ms long! Remember, as always, `GIGO <https://en.wikipedia.org/wiki/Garbage_in,_garbage_out>`_ (Garbage In, Garbage Out):P.
How to segment a sound into CF and FM segments in an accurate
way?
Synthetic calls to the rescue
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Synthetic calls are sounds that we know to have specific properties
and can be used to test if a parameter set/ segmentation method
is capable of correctly segmenting our real-world sounds and
uncovering the true underlying properties.
The `simulate_calls` module has a bunch of helper functions
which allow the creation of FM sweeps, constant frequency
tones and silences. In combination, these can be used to
get a feeling for which segmentation methods and parameter sets
work well for your real-world sound (bat, bird, cat, <insert sound source of choice>)
Generating a 'classical' CF-FM bat call
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
from itsfm.simulate_calls import make_cffm_call,make_tone, make_fm_chirp, silence
from itsfm.view_horseshoebat_call import visualise_call
from itsfm.segment_horseshoebat_call import segment_call_into_cf_fm
from itsfm.signal_processing import dB, rms
fs = 96000
call_props = {'cf':(40000, 0.01),
'upfm':(38000,0.002),
'downfm':(30000,0.003)}
cffm_call, freq_profile = make_cffm_call(call_props, fs)
cffm_call *= signal.tukey(cffm_call.size, 0.1)
w,s = visualise_call(cffm_call, fs, fft_size=128)
# %%
# Remember, the terminal frequencies and durations of the CF-FM calls can be adjusted to the
# calls of your species of interest!!
# %%
# A multi-component bird call
# >>>>>>>>>>>>>>>>>>>>>>>>>>>
#
# Let's make a sound with two FMs and CFs, and gaps in between
fs = 44100
fm1 = make_fm_chirp(1000, 5000, 0.01, fs)
cf1 = make_tone(5000, 0.005, fs)
fm2 = make_fm_chirp(5500, 9000, 0.01, fs)
cf2 = make_tone(8000, 0.005, fs)
gap = silence(0.005, fs)
synth_birdcall = np.concatenate((gap,
fm1, gap,
cf1, gap,
fm2, gap,
cf2,
gap))
w, s = visualise_call(synth_birdcall, fs, fft_size=64)
# %%
# Let there be Noise
# >>>>>>>>>>>>>>>>>>
#
# Any kind of field recording *will* have some form of noise. Each of the
# the segmentation methods is differently susceptible to noise, and it's
# a good idea to test how well they can tolerate it. For starters, let's
# just add white noise and simulate different signal-to-noise ratios (SNR).
noisy_bird_call = synth_birdcall.copy()
noisy_bird_call += np.random.normal(0,10**(-10/20), noisy_bird_call.size)
noisy_bird_call /= np.max(np.abs(noisy_bird_call)) # keep sample values between +/- 1
# %%
# Estimate an approximate SNR by looking at the rms of the gaps to that of
# a song component
level_background = dB(rms(noisy_bird_call[gap.size]))
level_song = dB(rms(noisy_bird_call[gap.size:2*gap.size]))
snr_approx = level_song-level_background
print('The SNR is approximately: %f'%np.around(snr_approx))
w, s = visualise_call(noisy_bird_call, fs, fft_size=64)
# %%
# We could try to run the segmentation + measurement on a noisy sound straight away,
# but this might lead to poor measurements. Now, let's bandpass the audio
# to remove the ambient noise outside of the song's range.
| 2.609375
| 3
|
code/cloudmanager/install/hws/hws_util.py
|
Hybrid-Cloud/cloud_manager
| 0
|
12780803
|
<filename>code/cloudmanager/install/hws/hws_util.py
import os
from heat.engine.resources.hwcloud.hws_service.hws_client import HWSClient
from heat.engine.resources.cloudmanager.util.retry_decorator import RetryDecorator
from heat.openstack.common import log as logging
from heat.engine.resources.cloudmanager.util.cloud_manager_exception import *
from heat.engine.resources.cloudmanager.exception import *
import time
import heat.engine.resources.cloudmanager.constant as constant
from heat.engine.resources.cloudmanager.commonutils import *
RSP_STATUS = "status"
RSP_BODY = "body"
RSP_STATUS_OK = "2"
MAX_RETRY = 50
#unit=second
SLEEP_TIME = 3
MAX_CHECK_TIMES = 2000
LOG = logging.getLogger(__name__)
def start_hws_gateway(host_ip, user, passwd):
execute_cmd_without_stdout(
host=host_ip, user=user, password=<PASSWORD>,
cmd='cd %(dis)s; sh %(script)s start'
% {"dis": constant.PatchesConstant.REMOTE_HWS_SCRIPTS_DIR,
"script":
constant.PatchesConstant.START_HWS_GATEWAY_SCRIPT}
)
def stop_hws_gateway(host_ip, user, password):
LOG.info("start hws java gateway ...")
execute_cmd_without_stdout(
host=host_ip, user=user, password=password,
cmd='cd %(dis)s; sh %(script)s stop'
% {"dis": constant.PatchesConstant.REMOTE_HWS_SCRIPTS_DIR,
"script":
constant.PatchesConstant.START_HWS_GATEWAY_SCRIPT}
)
class HwsInstaller(object):
def __init__(self, ak, sk, region, protocol, host, port, project_id):
self.hws_client = HWSClient(ak, sk, region, protocol, host, port)
self.project_id = project_id
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="create vm"))
def create_vm(self, image_ref, flavor_ref, name, vpcid, nics_subnet_list, root_volume_type,availability_zone,
personality_path=None, personality_contents=None, adminPass=None, public_ip_id=None, count=None,
data_volumes=None, security_groups=None, key_name=None):
result = self.hws_client.ecs.create_server(self.project_id, image_ref, flavor_ref, name, vpcid, nics_subnet_list, root_volume_type,
availability_zone, personality_path, personality_contents, adminPass, public_ip_id, count,
data_volumes, security_groups, key_name)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="create cascaded vm")
return result[RSP_BODY]["job_id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=UninstallCascadedFailed(
current_step="delete vm"))
def delete_vm(self, server_id_list, delete_public_ip, delete_volume):
result = self.hws_client.ecs.delete_server\
(self.project_id, server_id_list, delete_public_ip, delete_volume)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise UninstallCascadedFailed(current_step="delete cascaded vm")
return result[RSP_BODY]["job_id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="create vm"))
def create_vpc(self, name, cidr):
result = self.hws_client.vpc.create_vpc(self.project_id, name, cidr)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="create vpc")
return result[RSP_BODY]["vpc"]["id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=UninstallCascadedFailed(
current_step="delete vpc"))
def delete_vpc(self, vpc_id):
result = self.hws_client.vpc.delete_vpc(self.project_id, vpc_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise UninstallCascadedFailed(current_step="delete vpc")
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="create subnet"))
def create_subnet(self, name, cidr, availability_zone, gateway_ip, vpc_id,
dhcp_enable=None, primary_dns=None, secondary_dns=None):
result = self.hws_client.vpc.create_subnet(self.project_id, name, cidr,
availability_zone, gateway_ip, vpc_id,
dhcp_enable, primary_dns, secondary_dns)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="create subnet")
return result[RSP_BODY]["subnet"]["id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=UninstallCascadedFailed(
current_step="delete subnet"))
def delete_subnet(self, vpc_id, subnet_id):
result = self.hws_client.vpc.delete_subnet(self.project_id, vpc_id, subnet_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise UninstallCascadedFailed(current_step="delete subnet")
return result[RSP_BODY]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get job detail"))
def get_job_detail(self, job_id):
result = self.hws_client.vpc.get_job_detail(self.project_id, job_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="get job detail")
return result[RSP_BODY]
def block_until_delete_resource_success(self, job_id):
for i in range(MAX_CHECK_TIMES):
result = self.get_job_detail(job_id)
status = result[RSP_STATUS]
if status == "FAILED":
raise InstallCascadedFailed(current_step="delete resource")
elif status == "SUCCESS":
return
else:
time.sleep(3)
pass
def block_until_create_vm_success(self, job_id):
server_id = None
for i in range(MAX_CHECK_TIMES):
result = self.get_job_detail(job_id)
status = result[RSP_STATUS]
if status == "FAILED":
break
elif status == "SUCCESS":
server_id = result['entities']['sub_jobs'][0]["entities"]["server_id"]
break
else:
time.sleep(SLEEP_TIME)
if server_id is None:
raise InstallCascadedFailed(current_step="create vm")
return server_id
def block_until_create_nic_success(self, job_id):
nic_id = None
for i in range(MAX_CHECK_TIMES):
result = self.get_job_detail(job_id)
status = result[RSP_STATUS]
if status == "FAILED":
break
elif status == "SUCCESS":
nic_id = result['entities']['sub_jobs'][0]["entities"]["nic_id"]
break
else:
time.sleep(3)
if nic_id is None:
raise InstallCascadedFailed(current_step="create nic")
return nic_id
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get server nics info"))
def get_all_nics(self, server_id):
result = self.hws_client.ecs.get_all_nics(self.project_id, server_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise UninstallCascadedFailed(current_step="get server incs info")
return result[RSP_BODY]["interfaceAttachments"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get server ips"))
def get_server_ips(self, server_id):
result = self.hws_client.ecs.get_server_ips(self.project_id, server_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise UninstallCascadedFailed(current_step="get server ips")
return result[RSP_BODY]["interfaceAttachments"]["fixed_ips"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get free public ip"))
def alloc_public_ip(self, name):
result = self.hws_client.vpc.list_public_ips(self.project_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="get free public ip")
free_ip = None
public_ips = result[RSP_BODY]["publicips"]
for ip in public_ips:
if ip["status"] == "DOWN":
free_ip = ip
return free_ip
if free_ip is None:
publicip = dict()
bandwidth = dict()
publicip["type"]="5_bgp"
bandwidth["name"]=name
bandwidth["size"]=100
bandwidth["share_type"]="PER"
bandwidth["charge_mode"]= "traffic"
result = self.hws_client.vpc.create_public_ip(self.project_id, publicip, bandwidth)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="create public ip")
free_ip = result[RSP_BODY]["publicip"]
return free_ip
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=UninstallCascadedFailed(
current_step="release public ip"))
def release_public_ip(self, public_ip_id):
result = self.hws_client.vpc.delete_public_ip(self.project_id, public_ip_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise UninstallCascadedFailed(current_step="release public ip")
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get security group"))
def get_security_group(self, vpc_id):
opts = dict()
opts["vpc_id"] = vpc_id
result = self.hws_client.vpc.list_security_groups(self.project_id,opts = opts)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="get security group")
security_groups = result[RSP_BODY]["security_groups"]
for security_group in security_groups:
if security_group["name"] == "default":
return security_group["id"]
return security_groups[0]["id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="create security group rule"))
def create_security_group_rule(self, security_group_id, direction, ethertype):
result = self.hws_client.vpc.create_security_group_rule(
self.project_id, security_group_id, direction, ethertype)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="create security group rule")
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get_external_api_port_id"))
def get_external_api_port_id(self, server_id, external_api_nic_id):
result = self.hws_client.ecs.get_nic_info(self.project_id, server_id, external_api_nic_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="get_external_api_port_id")
interfaceAttachment = result[RSP_BODY]["interfaceAttachment"]
return interfaceAttachment["port_id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="bind public ip to cascaded"))
def bind_public_ip(self, public_ip_id, port_id):
result = self.hws_client.vpc.bind_public_ip(
self.project_id, public_ip_id, port_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="bind public ip to cascaded")
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="add nics to vm"))
def add_nics(self, server_id, subnet_id, security_groups, ip_address = None):
nic = dict()
nic["subnet_id"] = subnet_id
nic["security_groups"] = security_groups
if ip_address:
nic["ip_address"] = ip_address
nics = [nic]
result = self.hws_client.ecs.add_nics(
self.project_id, server_id, nics)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="add nics to cascaded")
return result[RSP_BODY]["job_id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="reboot cascaded"))
def reboot(self, server_id, type):
result = self.hws_client.ecs.reboot(self.project_id, server_id, type)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="reboot cascaded")
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="unbound vpn ip-mac"))
def unbound_ip_mac(self, port_id, mac_address):
allowed_address_pairs = []
#allow all ip_addresses to access
pair1={"ip_address":"0.0.0.1/1",
"mac_address":mac_address}
pair2={"ip_address":"172.16.17.32/1",
"mac_address":mac_address}
allowed_address_pairs.append(pair1)
allowed_address_pairs.append(pair2)
result = self.hws_client.vpc.update_port(port_id, allowed_address_pairs=allowed_address_pairs)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="unbound vpn ip-mac")
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get image id"))
def get_image_id(self, name):
result = self.hws_client.ims.list(name=name)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="get image id")
image_id = result[RSP_BODY]["images"][0]["id"]
return image_id
| 1.890625
| 2
|
tests/conftest.py
|
eleme/meepo
| 50
|
12780804
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
logging.basicConfig(level=logging.DEBUG)
import json
import os
import uuid
import pymysql
import pytest
import redis
from meepo._compat import urlparse
@pytest.fixture(scope="session")
def conf():
"""Try load local conf.json
"""
fname = os.path.join(os.path.dirname(__file__), "conf.json")
if os.path.exists(fname):
with open(fname) as f:
return json.load(f)
@pytest.fixture(scope="session")
def redis_dsn(request, conf):
"""Redis server dsn
"""
redis_dsn = conf["redis_dsn"] if conf else "redis://localhost:6379/1"
def fin():
r = redis.Redis.from_url(redis_dsn, socket_timeout=1)
r.flushdb()
request.addfinalizer(fin)
return redis_dsn
@pytest.fixture(scope="module")
def mysql_dsn(conf):
"""MySQL server dsn
This fixture will init a clean meepo_test database with a 'test' table
"""
logger = logging.getLogger("fixture_mysql_dsn")
dsn = conf["mysql_dsn"] if conf else \
"mysql+pymysql://root@localhost/meepo_test"
# init database
parsed = urlparse(dsn)
db_settings = {
"host": parsed.hostname,
"port": parsed.port or 3306,
"user": parsed.username,
"passwd": <PASSWORD>
}
conn = pymysql.connect(**db_settings)
cursor = conn.cursor()
conn.begin()
cursor.execute("DROP DATABASE IF EXISTS meepo_test")
cursor.execute("CREATE DATABASE meepo_test")
cursor.execute("DROP TABLE IF EXISTS meepo_test.test")
cursor.execute('''CREATE TABLE meepo_test.test (
id INT NOT NULL AUTO_INCREMENT,
data VARCHAR (256) NOT NULL,
PRIMARY KEY (id)
)''')
cursor.execute("RESET MASTER")
conn.commit()
logger.debug("executed")
# release conn
cursor.close()
conn.close()
return dsn
@pytest.fixture(scope="function")
def mock_session():
class MockSession(object):
def __init__(self):
self.meepo_unique_id = uuid.uuid4().hex
self.info = {"name": "mock"}
return MockSession()
| 1.9375
| 2
|
examples/e12.py
|
pepprseed/svgdatashapes
| 11
|
12780805
|
<reponame>pepprseed/svgdatashapes<gh_stars>10-100
import svgdatashapes as s
import svgdatashapes_dt as sdt # for date/time support
def example12(): # Secchi depth readings plot with reversed Y axis
depthdata = [
('09/21/2016', 6.60), ('09/19/2016', 6.20), ('09/08/2016', 4.85), ('09/01/2016', 6.00),
('08/18/2016', 7.00), ('08/09/2016', 7.60), ('08/03/2016', 7.10), ('07/28/2016', 7.25),
('07/22/2016', 8.10), ('07/14/2016', 8.65), ('07/08/2016', 9.95), ('06/29/2016', 9.60),
('06/22/2016', 9.40), ('06/16/2016', 8.60), ('06/9/2016', 8.40), ('06/02/2016', 8.30),
('05/26/2016', 8.40), ('05/19/2016', 7.85), ('05/11/2016', 7.95), ('05/05/2016', 7.70),
('04/28/2016', 7.85), ('04/19/2016', 7.15), ('03/30/2016', 7.20) ]
s.svgbegin( width=800, height=220 )
sdt.dateformat( '%m/%d/%Y' )
s.settext( color='#777', style='font-family: sans-serif; font-weight: bold;' )
s.setline( color='#777' )
# find our date range for X and build some stubs
xrange = sdt.daterange( column=0, datarows=depthdata, nearest='month', inc='month',
stubformat='%b', inc2='year', stub2format=' %Y' )
# set up X space...
s.xspace( svgrange=(100,750), datarange=xrange )
# find Y max and set up reversed Y space (0 at top)
for dp in depthdata:
s.findrange( testval=dp[1] )
yrange = s.findrange( finish=True, addlpad=1 )
s.yspace( svgrange=(60,180), datarange=(0,yrange.axmax), reverse=True )
# render axes...
s.xaxis( stublist=xrange.stublist, tics=8 )
s.setline( color='#cfc' )
s.yaxis( tics=8, grid=True )
s.setline( color='#777' )
s.plotdeco( title='Secchi depth readings indicating water clarity: Stormy Lake', ylabel='Depth (m)', outline=True )
# render the blue depth lines...
s.setline( color='#99f', width=2 )
for dp in depthdata:
xloc = sdt.toint(dp[0])
s.line( x1=xloc, y1=0.0, x2=xloc, y2=dp[1] )
s.datapoint( x=xloc, y=dp[1], diameter=5, color='#99f' )
# return the svg. The caller could then add it in to the rendered HTML.
return s.svgresult()
| 2.359375
| 2
|
ques3.py
|
shreya643/test_feb_19
| 0
|
12780806
|
<gh_stars>0
'''Q3. Create a Employee class and initialize it with first_name, last_name and salary. Also, it has a derived attribute called email, which is self generated when instance is created. Now, make methods to :
a. Display - It should display all information of the employee instance.'''
class Employee:
def __init__(self,first_name,last_name,salary):
self.first_name=first_name
self.last_name=last_name
self.salary=salary
def emailGen(self):
self.email=self.first_name+'.'+self.last_name+'<EMAIL>'
def display(self):
print ("First name: {}".format(self.first_name))
print ("Last Name:{}".format(self.last_name))
print("Salay:{}".format(self.salary))
print ("Email:{}".format(self.email))
emp1=Employee('Shreya','Sapkota',800)
emp1.emailGen()
emp1.display()
| 4.3125
| 4
|
rebase/api/predicter.py
|
rebaseenergy/rebase-sdk
| 0
|
12780807
|
import rebase as rb
import pickle
from datetime import datetime
import rebase.util.api_request as api_request
class Predicter():
@classmethod
def load_data(cls, pred, start_date, end_date):
site_config = rb.Site.get(pred.site_id)
return pred.load_data(site_config, start_date, end_date)
@classmethod
def load_latest_data(cls, predicter):
Predicter.load_data(predicter)
@classmethod
def train(cls, pred, params, start_date, end_date):
weather_df, observation_df = Predicter.load_data(pred, start_date, end_date)
dataset = pred.preprocess(weather_df, observation_df)
return pred.train(dataset, params)
@classmethod
def hyperparam_search(self, pred, params_list):
models = []
for p in params_list:
model, score = pred.train(dataset, p)
@classmethod
def deploy(cls, pred):
print("Deploying {}".format(pred.name))
path = 'platform/v1/site/train/{}'.format(pred.site_id)
response = api_request.post(path)
if response.status_code == 200:
print("Success!")
else:
print("Failed")
@classmethod
def predict(cls, pred):
Predicter.load_latest_data()
@classmethod
def status(cls, pred):
path = '/platform/v1/site/train/state/{}'.format(pred.site_id)
r = api_request.get(path)
status = {'status': None, 'history': []}
if r.status_code == 200:
data = r.json()
if len(data) > 0:
status['status'] = data[-1]['state']
status['history'] = data
return status
class Model():
def setup(self):
pass
def load_data(self, site_config, start_date, end_date):
"""This method should load the data for training
Args:
site_config (dict): config for the site
start_date (datetime): the start date for the period
end_date (datetime): the end date for the period
Returns:
- pd.DataFrame: one df
- pd.DataFrame: one df
"""
raise NotImplementedError(
'Your subclass must implement the load_data() method'
)
def load_latest_data(self, site_config):
"""This method should load the predict data for training
Args:
site_config (dict): config for the site
Returns:
"""
raise NotImplementedError(
'Your subclass must implement the load_data() method'
)
def preprocess(self, weather_data, observation_data=None):
raise NotImplementedError(
'Your subclass must implement the preprocess() method'
)
def train(self, train_set, params={}):
raise NotImplementedError(
'Your subclass must implement the train() method'
)
# weather_df - weather for a ref time
# target_observations - like recent production power, could be used for intraday
def predict(self, predict_set):
raise NotImplementedError(
'Your subclass must implement the predict() method'
)
# serialize() should be overriden with custom serialization
# method if @param model can't be pickled
def serialize(self, model):
return pickle.dumps(model)
# deserialize() should be overriden with custom deserialization method
# if @param serialized_model can't be loaded from pickle
def deserialize(self, serialized_model):
return pickle.loads(serialized_model)
| 2.421875
| 2
|
setup.py
|
MrBurtyyy/yoti-python-sdk
| 0
|
12780808
|
<gh_stars>0
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from yoti_python_sdk import __version__
long_description = (
"This package contains the tools you need to quickly "
"integrate your Python back-end with Yoti, so that your "
"users can share their identity details with your "
"application in a secure and trusted way."
)
setup(
name="yoti",
version=__version__,
packages=find_packages(),
license="MIT",
description="The Yoti Python SDK, providing API support for Login, Verify (2FA) and Age Verification.",
long_description=long_description,
url="https://github.com/getyoti/yoti-python-sdk",
author="Yoti",
author_email="<EMAIL>",
install_requires=[
"cryptography>=2.2.1",
"protobuf>=3.1.0",
"requests>=2.11.1",
"future>=0.11.0",
"asn1==2.2.0",
"pyopenssl>=18.0.0",
],
extras_require={
"examples": [
"Django>1.11.16",
"Flask>=0.10",
"python-dotenv>=0.7.1",
"django-sslserver>=0.2",
"Werkzeug==0.11.15",
]
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords="yoti sdk 2FA multifactor authentication verification identity login register verify 2Factor",
)
| 1.304688
| 1
|
gupb/controller/shallow_mind/utils.py
|
syforcee/GUPB
| 0
|
12780809
|
from typing import Tuple, List
from gupb.model.characters import Action
def points_dist(cord1, cord2):
return int(((cord1.x - cord2.x) ** 2 +
(cord1.y - cord2.y) ** 2) ** 0.5)
def get_first_possible_move(moves: List[Tuple[Action, int]]):
return next((next_move for next_move in moves if next_move[0] != Action.DO_NOTHING), (Action.DO_NOTHING, -1))
| 2.640625
| 3
|
src/TrussModel.py
|
somu15/Small_Pf_code
| 0
|
12780810
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 15:24:18 2020
@author: dhulls
"""
from anastruct import SystemElements
import numpy as np
class TrussModel:
def HF(self, young1=None, young2=None, area1=None, area2=None, P1=None, P2=None, P3=None, P4=None, P5=None, P6=None):
ss = SystemElements()
# young1 = 2.1e11
# area1 = 2e-3
# young2 = 2.1e11
# area2 = 1e-3
ss.add_truss_element(location=[[0, 0], [4,0]], EA=(area1*young1))
ss.add_truss_element(location=[[4, 0], [8,0]], EA=(area1*young1))
ss.add_truss_element(location=[[8, 0], [12,0]], EA=(area1*young1))
ss.add_truss_element(location=[[12, 0], [16,0]], EA=(area1*young1))
ss.add_truss_element(location=[[16, 0], [20,0]], EA=(area1*young1))
ss.add_truss_element(location=[[20, 0], [24,0]], EA=(area1*young1))
ss.add_truss_element(location=[[2, 2], [6,2]], EA=(area1*young1))
ss.add_truss_element(location=[[6, 2], [10,2]], EA=(area1*young1))
ss.add_truss_element(location=[[10, 2], [14,2]], EA=(area1*young1))
ss.add_truss_element(location=[[14, 2], [18,2]], EA=(area1*young1))
ss.add_truss_element(location=[[18, 2], [22,2]], EA=(area1*young1))
ss.add_truss_element(location=[[0, 0], [2,2]], EA=(area2*young2))
ss.add_truss_element(location=[[2,2], [4,0]], EA=(area2*young2))
ss.add_truss_element(location=[[4,0], [6,2]], EA=(area2*young2))
ss.add_truss_element(location=[[6,2], [8,0]], EA=(area2*young2))
ss.add_truss_element(location=[[8,0], [10,2]], EA=(area2*young2))
ss.add_truss_element(location=[[10,2], [12,0]], EA=(area2*young2))
ss.add_truss_element(location=[[12,0], [14,2]], EA=(area2*young2))
ss.add_truss_element(location=[[14,2], [16,0]], EA=(area2*young2))
ss.add_truss_element(location=[[16,0], [18,2]], EA=(area2*young2))
ss.add_truss_element(location=[[18,2], [20,0]], EA=(area2*young2))
ss.add_truss_element(location=[[20,0], [22,2]], EA=(area2*young2))
ss.add_truss_element(location=[[22,2], [24,0]], EA=(area2*young2))
ss.add_support_hinged(node_id=1)
ss.add_support_roll(node_id=7, direction='x')
# P1 = -5e4
# P2 = -5e4
# P3 = -5e4
# P4 = -5e4
# P5 = -5e4
# P6 = -5e4
ss.point_load(node_id=8, Fy=P1)
ss.point_load(node_id=9, Fy=P2)
ss.point_load(node_id=10, Fy=P3)
ss.point_load(node_id=11, Fy=P4)
ss.point_load(node_id=12, Fy=P5)
ss.point_load(node_id=13, Fy=P6)
ss.solve()
# ss.show_structure()
# ss.show_displacement(factor=10)
K = ss.get_node_results_system(node_id=4)['uy']
return np.array(K)
def LF(self, young1=None, young2=None, area1=None, area2=None, P1=None, P2=None, P3=None, P4=None, P5=None, P6=None):
ss = SystemElements()
# young1 = 2.1e11
# area1 = 2e-3
# young2 = 2.1e11
# area2 = 1e-3
ss.add_truss_element(location=[[0, 0], [12,0]], EA=(area1*young1))
ss.add_truss_element(location=[[12, 0], [24,0]], EA=(area1*young1))
ss.add_truss_element(location=[[6, 2], [18,2]], EA=(area1*young1))
ss.add_truss_element(location=[[0, 0], [6,2]], EA=(area2*young2))
ss.add_truss_element(location=[[6,2], [12,0]], EA=(area2*young2))
ss.add_truss_element(location=[[12,0], [18,2]], EA=(area2*young2))
ss.add_truss_element(location=[[18,2], [24,0]], EA=(area2*young2))
ss.add_support_hinged(node_id=1)
ss.add_support_roll(node_id=3, direction='x')
# P1 = -5e4
# P2 = -5e4
# P3 = -5e4
# P4 = -5e4
# P5 = -5e4
# P6 = -5e4
ss.point_load(node_id=4, Fy=np.sum([P1,P2,P3]))
ss.point_load(node_id=5, Fy=np.sum([P4,P5,P6]))
ss.solve()
# ss.show_structure()
# ss.show_displacement(factor=10)
K = ss.get_node_results_system(node_id=4)['uy']
return np.array(K)
| 2.40625
| 2
|
research/realty_type_quantities.py
|
Ramilll/raifhack_ds
| 0
|
12780811
|
<reponame>Ramilll/raifhack_ds<gh_stars>0
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data_dir = '../data/'
train = pd.read_csv(data_dir + 'train.csv')
rt = train.realty_type
counts = rt.groupby(rt).count().rename('count').reset_index()
fig, ax = plt.subplots(figsize=(10, 5))
sns.barplot(data=counts, x='realty_type', y='count', ax=ax)
fig.savefig('plot/realty_type_distribution.jpg')
| 2.515625
| 3
|
file/models.py
|
CaptainMorch/TJU_Cats
| 0
|
12780812
|
from django.db import models
from django.urls import reverse
from datetime import date
# Create your models here.
class Photo(models.Model):
"""猫猫相片的数据库模型"""
image = models.ImageField(
'图像',
upload_to='image/'
)
title = models.CharField('标题', blank=True, max_length=8)
description = models.TextField('图片描述', blank=True)
author = models.ForeignKey(
'campus.User',
verbose_name='拍摄者',
on_delete=models.SET_NULL,
null = True,
blank = True,
related_name = 'photos',
related_query_name = 'photo'
)
author_name = models.CharField('拍摄者名称', max_length=16, blank=True)
date = models.DateField('拍摄日期', default=date.today, null=True, blank=True)
cats = models.ManyToManyField(
'cat.Cat',
verbose_name='出镜猫猫们',
related_name='photos',
related_query_name='photo'
)
class Meta:
verbose_name = '相片'
verbose_name_plural = '相片'
def __str__(self):
name = ''
if self.cats.count() < 3:
for cat in self.cats.all():
name = name + str(cat) + '-'
else:
cats = self.cats.all()
name = str(cats[0]) + '-...-' + str(cats[1]) + '-'
if self.title:
name = name + self.title + '-'
if self.date:
name = name + str(self.date.year) + '-'
return name[:-1]
def get_absolute_url(self):
return reverse('file:photo', {'pk': self.pk})
def get_author(self):
"""拍摄者名称"""
if self.author:
return self.author.username
elif self.author_name:
return self.author_name
else:
return '佚名'
| 2.59375
| 3
|
datasets/NIH.py
|
tnaren3/pytorch-adda
| 0
|
12780813
|
<filename>datasets/NIH.py
import os
import csv
from PIL import Image
import numpy as np
import torch
import torch.utils.data as data
from torchvision import datasets, transforms
import params
class NIH(data.Dataset):
def __init__(self, root, train=True, val=False, transform=None):
"""Init NIH dataset."""
# init params
self.root = os.path.expanduser(root)
self.train = train
self.val = val
self.transform = transform
self.dataset_size = None
self.train_data, self.train_labels = self.load_samples()
if self.train:
total_num_samples = self.train_labels.shape[0]
indices = np.arange(total_num_samples)
np.random.shuffle(indices)
self.train_data = self.train_data[indices[0:self.dataset_size]]
self.train_labels = self.train_labels[indices[0:self.dataset_size]]
def __getitem__(self, index):
"""Get images and target for data loader.
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, label = self.train_data[index], self.train_labels[index]
if self.transform is not None:
img = self.transform(img)
label = torch.FloatTensor([np.int64(label[0]).item(), np.int64(label[1]).item(), np.int64(label[2]).item(),
np.int64(label[3]).item(), np.int64(label[4]).item()])
return img, label
def __len__(self):
"""Return size of dataset."""
return self.dataset_size
def load_samples(self):
"""Load sample images from dataset."""
numtr = 126
numts = 60
numvl = 14
data_root = os.path.join(self.root, 'NIH')
path = os.path.join(data_root, 'images')
images = []
labels = []
if self.val:
val_info = csv.reader(open(os.path.join(data_root, 'multi-val-split.csv'), 'r'))
for count, row in enumerate(val_info):
if count == numvl:
break
image = np.repeat(np.array(Image.open(os.path.join(path, row[0])).convert('L').resize((224, 224)))[..., np.newaxis], 3, -1)
images.append(image)
#labels.append(row[1])
labels.append(np.array([row[1], row[2], row[3], row[4], row[5]]))
elif self.train:
train_info = csv.reader(open(os.path.join(data_root, 'multi-train-split.csv'), 'r'))
for count, row in enumerate(train_info):
if count == numtr:
break
image = np.repeat(np.array(Image.open(os.path.join(path, row[0])).convert('L').resize((224, 224)))[..., np.newaxis], 3, -1)
images.append(image)
#labels.append(row[1])
labels.append(np.array([row[1], row[2], row[3], row[4], row[5]]))
elif not self.val and not self.train:
test_info = csv.reader(open(os.path.join(data_root, 'multi-test-split.csv'), 'r'))
for count, row in enumerate(test_info):
if count == numts:
break
image = np.repeat(np.array(Image.open(os.path.join(path, row[0])).convert('L').resize((224, 224)))[..., np.newaxis], 3, -1)
images.append(image)
#labels.append(row[1])
labels.append(np.array([row[1], row[2], row[3], row[4], row[5]]))
images = np.asarray(images)
labels = np.asarray(labels)
self.dataset_size = labels.shape[0]
return images, labels
def get_nih(train, val):
"""Get nih dataset loader."""
# image pre-processing
pre_process = transforms.Compose([transforms.ToTensor(),
#transforms.Normalize(
#mean=params.dataset_mean,
#std=params.dataset_std)])
])
# dataset and data loader
nih_dataset = NIH(root=params.data_root,
train=train,
val=val,
transform=pre_process)
nih_data_loader = torch.utils.data.DataLoader(
dataset=nih_dataset,
batch_size=params.batch_size,
shuffle=True,
drop_last = True)
return nih_data_loader
| 3
| 3
|
src/djai/util/cli/_server_files/asgi.py
|
Django-AI/DjAI
| 3
|
12780814
|
"""
ASGI config for DjAI project.
It exposes the ASGI callable as a module-level variable named ``application``
For more information on this file, see
docs.djangoproject.com/en/dev/howto/deployment/asgi
"""
# ref: django-configurations.readthedocs.io
import os
# from django.core.asgi import get_asgi_application
from configurations.asgi import get_asgi_application
os.environ.setdefault(key='DJANGO_SETTINGS_MODULE', value='settings')
os.environ.setdefault(key='DJANGO_CONFIGURATION', value='Default')
application = get_asgi_application()
| 1.546875
| 2
|
src/utils.py
|
carlps/bikeshare
| 2
|
12780815
|
<filename>src/utils.py
# Common utils used in different scripts
from os import environ
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
def get_session(env='DEV', echo=False):
''' Create db connection and sqlalchemy engine
Return a session to interact with db
Echo defaults to False, but if you want for debugging,
just pass echo=True and sql statements will print to console'''
if env == 'DEV':
pg_user = environ['POSTGRES_USER']
pg_pw = environ['POSTGRES_PW']
db = 'bikeshare'
elif env == 'TST':
pg_user = environ['POSTGRES_USER_TST']
pg_pw = environ['POSTGRES_PW_TST']
db = 'bikeshare_tst'
host = 'localhost'
port = '5432'
engine = create_engine(f'postgres://{pg_user}:{pg_pw}@{host}:{port}/{db}',
echo=echo)
Session = sessionmaker(bind=engine)
return Session()
if __name__ == '__main__':
print('Why are you running this? It should only be imported.')
| 2.5625
| 3
|
shared/management/commands/settings.py
|
dArignac/shared
| 0
|
12780816
|
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from pprint import pformat
class Command(BaseCommand):
args = '<setting>'
help = 'Outputs the value of the given setting name'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError('Please enter exactly one setting name!')
name = args[0]
if hasattr(settings, name):
self.stdout.write(pformat(getattr(settings, name), indent=4, width=160))
else:
self.stderr.write('no setting with name %s available!' % name)
| 2.609375
| 3
|
model.py
|
pedrocarvalhoaguiar/ProjetoP2-Braga-Alessandra
| 0
|
12780817
|
from estruturadedados.avltree import AVL
from estruturadedados.queue import Queue
from biometria.biometria import Biometria as Bio
from bancodedados.paths import *
import json
from os import listdir, remove
class GerenciadorPrincipal():
def __init__(self):
self.gerVacina = GerenciadorVacina()
self.gerPessoas = GerenciadorPessoas()
self.gerBiometria = GerenciadorBiometria()
def cadastrarVacina(self, vacina):
self.gerVacina.cadastrarVacina(vacina)
def cadastrarPessoa(self, pessoa):
self.gerPessoas.cadastrarPessoa(pessoa=pessoa)
def retornarPessoa(self, chave, tipo):
return self.gerPessoas.procurarPessoa(chave, tipo)
def retornarBioNova(self):
return self.gerBiometria.cadastrarBiometria()
def vacinarPessoa(self, pessoa, vacina):
self.gerPessoas.vacinarPessoa(pessoa, vacina)
self.gerVacina.diminuirEstoque(vacina.getLote())
def retornarVacinaValida(self, fab=None):
vacina = self.gerVacina.getVacina(fab=fab)
return vacina
def retornarQuantidadeEstoque(self):
return self.gerVacina.retornarEstoque()
def retornarPessoaBio(self, path):
nomeBio = self.gerBiometria.compararBiometria(path)
if nomeBio:
pessoaB = self.retornarPessoa(nomeBio, 'bio')
return pessoaB
return False
def excluirCadastro(self, pessoa):
self.gerPessoas.excluirPessoa(pessoa)
try:
self.gerBiometria.excluirBiometria(pessoa.getBiometria())
except:
pass
def retornarArvoreVacinas(self):
return self.gerVacina.arvoreVacinas
def retornarArvoreCPF(self):
return self.gerPessoas.arvorePessoasCPF
def retornarArvoreBio(self):
return self.gerPessoas.arvorePessoasBiometria
class GerenciadorPessoas():
def __init__(self):
self.arvorePessoasCPF = AVL()
self.arvorePessoasBiometria = AVL()
self._carregarArvore(VACBIO)
self._carregarArvore(VACCPF)
def _carregarArvore(self, caminho):
arvore, tipoPessoa, lastAtt = self._chooseType(caminho)
try:
with open(f'{caminho}', 'r') as nomeArquivo:
listaPessoas = json.load(nomeArquivo)
for k, v in listaPessoas.items():
chave = k
pessoa = tipoPessoa(v['nome'], v['idade'], v['dose'], v['vacina'], v[f'{lastAtt}'])
arvore.insert(chave, valor=pessoa)
except:
with open(f'{caminho}', 'w') as f:
data = {}
json.dump(data, f, indent=4, ensure_ascii=False)
def cadastrarPessoa(self, pessoa):
arvore, chave, caminho = self._chooseArvore(pessoa=pessoa)
arvore.insert(chave, valor=pessoa)
with open(f'{caminho}', 'r+', encoding='UTF-8') as nomeArquivo:
listaPessoa = json.load(nomeArquivo)
listaPessoa[chave] = pessoa.lineRegistry()
with open(f'{caminho}', 'w', encoding='UTF-8') as nomeArquivo:
json.dump(listaPessoa, nomeArquivo, indent=4, ensure_ascii=False)
def vacinarPessoa(self, pessoa, vacina):
arvore, chave, caminho = self._chooseArvore(pessoa=pessoa)
pArvore = arvore.search(chave)
pArvore.getValor().setDose(1)
pArvore.getValor().setVacina(vacina.fabricante)
with open(f'{caminho}', 'r+', encoding='UTF-8') as nomeArquivo:
listaPessoas = json.load(nomeArquivo)
p = listaPessoas[chave]
p['vacina'] = vacina.getFabricante()
p['dose'] += 1
with open(f'{caminho}', 'w', encoding='UTF-8') as nomeArquivo:
json.dump(listaPessoas, nomeArquivo, indent=4, ensure_ascii=False)
def excluirPessoa(self, pessoa):
arvore, chave, caminho = self._chooseArvore(pessoa=pessoa)
arvore.delete(chave)
with open(f'{caminho}', 'r+', encoding='UTF-8') as nomeArquivo:
listaPessoas = json.load(nomeArquivo)
listaPessoas.pop(chave)
with open(f'{caminho}', 'w', encoding='UTF-8') as nomeArquivo:
json.dump(listaPessoas, nomeArquivo, indent=4, ensure_ascii=False)
def procurarPessoa(self, chave, tipo):
arvore = self._chooseArvore(tipo=tipo)
pessoa = arvore.search(chave)
return pessoa.getValor()
def _chooseType(self, caminho):
arvore = self.arvorePessoasCPF if caminho == VACCPF else self.arvorePessoasBiometria
tipoPessoa = PessoaCPF if caminho == VACCPF else PessoaBiometria
lastAtt = 'cpf' if caminho == VACCPF else 'biometria'
return arvore, tipoPessoa, lastAtt
def _chooseArvore(self, tipo=None, pessoa=None):
if tipo:
arvore = self.arvorePessoasCPF if tipo == 'cpf' else self.arvorePessoasBiometria
return arvore
if pessoa:
arvore = self.arvorePessoasCPF if pessoa.__class__.__name__ == 'PessoaCPF' else self.arvorePessoasBiometria
chave = pessoa.getCpf() if arvore == self.arvorePessoasCPF else pessoa.getBiometria()
path = VACCPF if arvore == self.arvorePessoasCPF else VACBIO
return arvore, chave, path
class Pessoa:
def __init__(self, nome, idade, dose=0, vacina=None):
self.nome = nome
self.idade = idade
self.dose = dose
self.vacina = self.setVacina(vacina)
def isVac(self):
if self.dose > 1:
return True
return False
def getNomeVacina(self):
if self.vacina == 'N/A':
return self.vacina
return self.vacina
def setVacina(self, valor):
if valor == None:
return 'N/A'
else:
return valor
def getDose(self):
return self.dose
def setDose(self, valor):
self.dose += valor
def __repr__(self):
return f'| NOME:{self.nome} \n| IDADE: {self.idade}\n| DOSE VACINA: {self.dose}'
class PessoaCPF(Pessoa):
def __init__(self, nome, idade, dose=0, vacina=None, cpf=0):
super().__init__(nome, idade, dose, vacina)
self.cpf = cpf
def getCpf(self):
return self.cpf
def lineRegistry(self):
return {'nome': self.nome, 'idade': self.idade, 'vacina': self.getNomeVacina(), 'dose': self.dose, 'cpf': self.cpf}
class PessoaBiometria(Pessoa):
def __init__(self, nome, idade, dose=0, vacina=None, biom=0):
super().__init__(nome, idade, dose, vacina)
self.biometria = biom
def getBiometria(self):
return self.biometria
def associarBiometria(self, biometria):
self.biometria = biometria
def lineRegistry(self):
return {'nome': self.nome, 'idade': self.idade, 'vacina': self.getNomeVacina(), 'dose': self.dose, 'biometria': self.biometria}
class GerenciadorBiometria():
def __init__(self):
self.arvoreBiometrias = AVL()
self._carregarArvore()
def cadastrarBiometria(self):
biometria = Bio.criar('_')
self.arvoreBiometrias.insert(str(biometria))
return biometria
def compararBiometria(self, path):
nome = nameFromPath(path)
caminho = caminhoFromPath(path)
biometriaBD = self._procurarBiometria(nome)
if biometriaBD:
biometriaTeste = Bio.leArquivo(nome, path=caminho)
biometriaBD = Bio.leArquivo(biometriaBD.getChave())
arvoreTeste = self._carregarArvoreTeste(biometriaTeste)
arvoreBD = self._carregarArvoreTeste(biometriaBD)
if self._igual(arvoreBD.getRoot(), arvoreTeste.getRoot()):
return nome
return False
def _pegarNomes(self):
nomes = [".".join(f.split(".")[:-1]) for f in listdir(path=BIO) if f.endswith('.json')]
return nomes
def excluirBiometria(self, nome):
remove(f'{BIO}{nome}.json')
self.arvoreBiometrias.delete(nome)
def _carregarArvore(self):
nomes = self._pegarNomes()
self.arvoreBiometrias.inserirLista(nomes)
def _carregarArvoreTeste(self, lista):
arvore = AVL()
arvore.inserirLista(lista)
return arvore
def _procurarBiometria(self, chave):
try:
biometria = self.arvoreBiometrias.search(chave)
except:
return False
return biometria
def _igual(self, p1, p2):
if p1 == None and p2 == None:
return True
if p1 == None or p2 == None:
return False
fila1 = Queue()
fila2 = Queue()
fila1.push(p1)
fila2.push(p2)
count = 0
while not fila1.isEmpty() and not fila2.isEmpty():
pos1 = fila1.first.valor
pos2 = fila2.first.valor
if pos1.getChave() != pos2.getChave():
return False
fila1.pop()
fila2.pop()
count +=1
if count > 40:
return True
if pos1.getLeft() and pos2.getLeft():
fila1.push(pos1.getLeft())
fila2.push(pos2.getLeft())
elif pos1.getLeft() or pos2.getLeft():
return False
if pos1.getRight() and pos2.getRight():
fila1.push(pos1.getRight())
fila2.push(pos2.getRight())
elif pos1.getRight() or pos2.getRight():
return False
return True
class GerenciadorVacina():
def __init__(self):
self.arvoreVacinas = AVL()
self.estoque = 0
self._carregarArvore()
def _carregarArvore(self):
try:
with open(f'{VACI}', 'r', encoding='UTF-8') as nomeArquivo:
listaVacinas = json.load(nomeArquivo)
for k, v in listaVacinas.items():
if v['quantidade'] == 0:
continue
vacina = Vacina(v['fabricante'], v['lote'], v['quantidade'])
self.setEstoque(v['quantidade'])
self.arvoreVacinas.insert(k, valor=vacina)
except:
with open(f'{VACI}', 'w', encoding='UTF-8') as nomeArquivo:
data = {}
json.dump(data, nomeArquivo, indent=4, ensure_ascii=False)
def cadastrarVacina(self, vacina):
self.arvoreVacinas.insert(vacina.getLote(), valor=vacina)
self.setEstoque(vacina.quantidade)
with open(f'{VACI}', 'r+', encoding='UTF-8') as nomeArquivo:
listaVacinas = json.load(nomeArquivo)
listaVacinas[f'{vacina.getLote()}'] = vacina.lineRegistry()
with open(f'{VACI}', 'w', encoding='UTF-8') as nomeArquivo:
json.dump(listaVacinas, nomeArquivo, indent=4, ensure_ascii=False)
def diminuirEstoque(self, lote):
vacina = self.arvoreVacinas.search(lote)
vacina.getValor().setQuantidade(-1)
self.setEstoque(-1)
if not vacina.valor.temVacina():
self.arvoreVacinas.delete(lote)
with open(f'{VACI}', 'r+', encoding='UTF-8') as nomeArquivo:
listaVacinas = json.load(nomeArquivo)
vacina = listaVacinas[lote]
vacina['quantidade'] -= 1
with open(f'{VACI}', 'w', encoding='UTF-8') as nomeArquivo:
json.dump(listaVacinas, nomeArquivo, indent=4, ensure_ascii=False)
def getVacina(self, fab=None):
if self.arvoreVacinas.isEmpty():
return None
if fab == 'N/A':
return self.arvoreVacinas.getRoot().getValor()
for node in self.arvoreVacinas:
if node.getValor().getFabricante() == fab and node.getValor().temVacina():
return node.getValor()
def retornarEstoque(self):
return self.estoque
def setEstoque(self, qnt):
if qnt > 0:
self.estoque += qnt
elif qnt < 0:
self.estoque = self.estoque - 1
else:
self.estoque = 0
class Vacina:
def __init__(self, fab, lote, quantidade=0):
self.fabricante = fab
self.lote = lote
self.quantidade = quantidade
def setQuantidade(self, qnt):
if self.quantidade == 0:
self.quantidade = 0
elif qnt > 0:
self.quantidade += qnt
elif qnt < 0:
self.quantidade = self.quantidade - 1
else:
self.quantidade = 0
def temVacina(self):
if self.quantidade == 0:
return False
return True
def getLote(self):
return self.lote
def getFabricante(self):
return self.fabricante
def lineRegistry(self):
return {'fabricante': self.fabricante, 'lote': self.lote, 'quantidade': self.quantidade}
def __repr__(self):
return f'| Fabricante: {self.fabricante}\n| Quantidade: {self.quantidade}\n| Lote: {self.lote}'
| 2.484375
| 2
|
icevision/models/rcnn/mask_rcnn/fastai/__init__.py
|
lee00286/icevision
| 0
|
12780818
|
<filename>icevision/models/rcnn/mask_rcnn/fastai/__init__.py
from icevision.models.rcnn.mask_rcnn.fastai.callbacks import *
from icevision.models.rcnn.mask_rcnn.fastai.learner import *
| 1.148438
| 1
|
Unet/threshold_analysis.py
|
prediction2020/unet-vessel-segmentation
| 23
|
12780819
|
<reponame>prediction2020/unet-vessel-segmentation
# -*- coding: utf-8 -*-
"""
File name: threshold_analysis.py
Author: <NAME>
Date created: 06/12/2018
The goal of this script is to:
1) Display calibration plots of the pulled Unet and half-Unet output
2) Assuming the output is not calibrated, Find the optimal threshold on the classifier output to yield best F1-score (based on the validation-set)
"""
# Import the relevant modules:
import os, glob, sys
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
import nibabel as nib
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
# Define helper functions:
def calc_opt_f1(labels,probs):
# The function gets the classifier's labels(=ground truth), probs (=scores, i.e. classifier output) and possible thresholds
# And returns the best possible F1-score and the threshold that yielded it
from sklearn.metrics import f1_score
thresholds = np.linspace(0.,1.,110) # downsample to avoid long calc. times
opt_f1 = 0 # we assume the classifier can do better than this...
for thresh in (thresholds):
pred_class = probs>thresh
F1 = f1_score(labels,pred_class)
if F1>opt_f1:
opt_f1 = F1
opt_thresh = thresh
return opt_f1,opt_thresh
# Config the tested architecture: Unet of half-Unet
full_architecture = False # True = Unet / False = half-Unet
# set path to the data, based on the used architecture:
os.chdir('C:\\Users\\livnem\\OneDrive\\Charite\\Projects\\Jana_segmentation\\for_calibration')
if full_architecture:
arctype = 'full'
path = 'C:\\Users\\livnem\\OneDrive\\Charite\\Projects\\Jana_segmentation\\for_calibration\\full_architecture\\results\\val'
else:
arctype = 'half'
path = 'C:\\Users\\livnem\\OneDrive\\Charite\\Projects\\Jana_segmentation\\for_calibration\\half_architecture\\results\\val'
full_path = 'C:\\Users\\livnem\\OneDrive\\Charite\\Projects\\Jana_segmentation\\for_calibration\\full_architecture\\results\\val'
# set path to the data, extract the relevant data-filenames
val_labels_path = 'C:\\Users\\livnem\\OneDrive\\Charite\\Projects\\Jana_segmentation\\for_calibration\\ground_truth_for _calibration\\val'
test_labels_path = 'C:\\Users\\livnem\\OneDrive\\Charite\\Projects\\Jana_segmentation\\for_calibration\\ground_truth_for _calibration\\test'
val_label_files = sorted(os.listdir(val_labels_path))
test_label_files = sorted(os.listdir(test_labels_path))
val_all_files = sorted(os.listdir(path))
val_prob_files = os.listdir(path)
# Get the model filenames based on the extracted data-filenames. The models-filenames consist of the used hyperparameters for that calculated model
models = val_all_files[0:24] # Take only the first 24 files, because after 24, the models repeat for different validation sets
models[:] = [model[15:] for model in models] # extract the models names. Ignore the first 15 characters, that contain the validation-set pt. numbering
# For each model: concatenate flatten validation ground-truth labels to one vector
os.chdir(path)
model = models[0] # start at the first model
val_prob_files = glob.glob('*'+model) # take all the validation-set probability files calculated for this model
labels_full_vec = np.ndarray(0) # initiate the flatten vector
for [i,file] in enumerate(val_prob_files): # for each model, pull (cocatenate) all probability maps into one vector (=prob_full_vec) and pull (concatenate) all labels into one vector (=labels_full_vec)
# concatenate flatten validation ground-truth labels to one vector for the specific model (classifier)
os.chdir(val_labels_path)
label_file = val_label_files[i]
label_img = nib.load(label_file)
label_mat = label_img.get_data()
label_vec = label_mat.flatten()
labels_full_vec = np.append(labels_full_vec, label_vec, axis=0)
lables_full_vec = labels_full_vec.round() # Make sure the labels are binary
# Loop over the different models and make calibration plots for the pulled validation-sets per-model
os.chdir(path)
best_f1=0
prob_full_vec = np.ndarray(0) # initiate the flatten vector
for [i,model] in enumerate(models): # For each model, calculate and plot the calibration plots and calculate best thresh based on precision-recall curves
val_prob_files = glob.glob('*'+model)
prob_full_vec = np.ndarray(0)
for [i,file] in enumerate(val_prob_files):
prob = nib.load(file)
prob_mat = prob.get_data()
prob_vec = prob_mat.flatten()
prob_full_vec = np.append(prob_full_vec, prob_vec, axis=0)
# Sanity-check: make sure that the range of scores is [0,1]
if max(prob_full_vec)>1 or min(prob_full_vec)<0:
sys.exit('The probability range of the validation set for the model was in the wrong range',min(prob_full_vec),max(prob_full_vec))
# Make precision-recall plot for the model:
average_precision = average_precision_score(labels_full_vec,prob_full_vec)
precision,recall,_ = precision_recall_curve(labels_full_vec,prob_full_vec)
opt_f1,opt_thresh = calc_opt_f1(labels_full_vec,prob_full_vec)
if opt_f1>best_f1:
best_f1 = opt_f1
best_thresh = opt_thresh
best_model = model
print('For the architecture:',arctype)
print('The best F1 score on the validation set was:',best_f1, 'with threshold:',best_thresh)
print('Model:',best_model)
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, step='post', alpha=0.2,
color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(
average_precision))
# Make calibration plots:
plt.figure(i)
ax1 = plt.subplot2grid((3,1),(0,0),rowspan=2)
ax2 = plt.subplot2grid((3,1),(2,0))
ax1.plot([0,1],[0,1],"k:",label="Perfectly calibrated")
fraction_of_positives, mean_predicted_value = \
calibration_curve(labels_full_vec.round(), prob_full_vec, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-")
ax2.hist(prob_full_vec, range=(0, 1), bins=10,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
## Since the classifier is clearly not calibrated, the threshold is found empirically on each validation set (pulled analysis)
# plot precision-recall curves for each model, and extract optimal threshold for maximal F1 score:
# extract best threshold for maximal Hausdorff distance*** ??? --> classify using best thresh --> calc Hausdorff dist. --> take best
# Take the model with best F1 score
| 2.71875
| 3
|
simulations/simulation.py
|
Jackil1993/metainventory
| 3
|
12780820
|
<filename>simulations/simulation.py
import numpy as np
import matplotlib.pyplot as plt
# create a .txt file to write down all the discrete events for further tracing)
file = open("protocol.txt","w")
# a static class containing all the utilized random number generators
class Generator:
def normal(self, mu, sigma):
return np.random.normal(mu, sigma)
def exponential(self, betta):
return np.random.exponential(betta)
# the "Warehouse" includes the parameters shared by all the operable products
class Warehouse:
def __init__(self, adjustments, parameters, costs, strategy, total_capacity=300):
self.total_capacity = total_capacity
self.free_capacity = total_capacity
self.time = 0.0
self.products = []
self.to_report = bool(adjustments[0][1])
# we generate the operable products
for product in range(len(parameters)):
self.products.append(Product(product, adjustments[product], parameters[product], costs[product], strategy[product],
total_capacity))
def check_free_capacity(self):
used = 0.0
for product in range(len(self.products)):
used += sum(self.products[product].lots)
self.free_capacity = self.total_capacity - used
if self.free_capacity < 0.0:
self.free_capacity = 0.0
if self.to_report is True:
file.write('\nFree capacity = {}'.format(round(self.free_capacity, 2)))
def advance_time(self):
demands = list(map(lambda x: x.next_demand, self.products))
replenishments = list(map(lambda x: x.next_rep, self.products))
self.check_free_capacity()
if min(demands) <= min(replenishments):
self.time = self.products[np.argmin(demands)].handle_demand(self.time)
else:
self.time = self.products[np.argmin(replenishments)].handle_replenishment(self.time, self.free_capacity)
class Product:
def __init__(self, id, adjustments, parameters, costs, strategy, total_capacity):
self.g = Generator()
self.id = id
self.interarrivals = parameters[0]
self.demand = parameters[1]
self.replenishment_lead = parameters[2]
self.expiry = parameters[3]
self.lots = [strategy[0]]
self.expiry_date = [self.g.normal(parameters[3][0], parameters[3][1])]
self.status = False
self.next_demand = self.g.exponential(self.interarrivals)
self.next_rep = float('inf')
self.next_event = self.next_demand
self.reorder_point = strategy[1]
self.reorder_size = strategy[0]
#counters
self.backorders = 0.0
self.overflows = 0.0
self.expired = 0.0
#costs
self.purchase_prise = costs[0] #includes delivery
self.sales_prise = costs[1]
self.total_capacity = total_capacity # needed to calculate return to scale
self.purchase_prise = self.purchase_prise if self.reorder_size < 0.5*self.total_capacity else 0.7*self.purchase_prise
self.handling_cost = costs[2]
self.backorder_fee = costs[3]
self.overflow_fee = costs[4]
self.recycle_fee = costs[5]
self.income = 0.0
self.costs = 0.0
self.to_plot = bool(adjustments[0])
self.to_report = bool(adjustments[1])
if self.to_plot == True:
self.stats = Statistics(self.reorder_point, self.reorder_size)
def check_expiry(self, time):
for lot in range(len(self.expiry_date)):
if self.expiry_date[lot] <= 0.0 and self.lots[lot] > 0:
if self.to_report is True:
file.write('\nThe lot № {} of product {} is expired. {} pieces will be recycled'.format(lot, self.id,
round(float(self.lots[lot]), 2)))
try:
self.stats.expires.append(time)
self.stats.expires_time.append(sum(self.lots))
except AttributeError:
pass
if len(self.lots) > 1:
self.expiry_date.pop(lot)
tmp = self.lots.pop(lot)
self.expired += tmp
self.costs += self.recycle_fee * tmp
else:
self.expiry_date[0] = 0
self.expired += self.lots[0]
self.costs += self.recycle_fee * self.lots[0]
self.lots[0] = 0
break
def handle_demand(self, time):
to_handle = abs(self.g.normal(self.demand[0], self.demand[1]))
self.expiry_date = list(map(lambda x: x - (self.next_demand - time), self.expiry_date))
self.check_expiry(time)
self.costs += (self.next_demand - time)*sum(self.lots)*self.handling_cost #handling costs
time = self.next_demand
self.next_demand = time + self.g.exponential(self.interarrivals)
if self.to_report is True:
file.write('\ntime {}: {} pieces of product {} have been demanded'.format(round(time, 2), round(to_handle, 2), self.id))
empty_lots = 0
for lot in range(len(self.lots)):
if self.lots[lot] >= to_handle > 0.0:
self.lots[lot] -= to_handle
self.income += to_handle * self.sales_prise
if self.to_report is True:
file.write('\nNo backorder arose')
break
else:
to_handle -= self.lots[lot]
self.income += self.lots[lot] * self.sales_prise
self.backorders += to_handle
empty_lots += 1
try:
self.stats.backorders.append(time)
except AttributeError:
pass
if self.to_report is True:
file.write('\nBackorder of {} arose'.format(round(to_handle, 2)))
if empty_lots != 0.0 and len(self.lots) > 1:
for i in range(empty_lots):
self.lots.pop(i)
self.expiry_date.pop(i)
break
empty_lots = 0.0
if self.to_report is True:
file.write('\nStorge {} is {}. Backorder is {}'.format(self.id, [round(element) for element in self.lots], round(self.backorders)))
if sum(self.lots) <= self.reorder_point and self.status == False:
self.replenish(time)
self.costs += self.purchase_prise * self.reorder_size #product is orderd
try:
self.stats.update_storage(time, sum(self.lots),
(self.income - self.costs)) #gather stats
except AttributeError:
pass
return time
def replenish(self, time):
self.status = True
self.next_rep = time + self.g.normal(self.replenishment_lead[0], self.replenishment_lead[1])
if self.to_report is True:
file.write('\n{} pieces of product {} have been ordered'.format(round(self.reorder_size, 2), self.id))
def handle_replenishment(self, time, free_capacity):
self.expiry_date = list(map(lambda x: x - (self.next_rep - time), self.expiry_date))
self.check_expiry(time)
if free_capacity >= self.reorder_size:
self.lots.append(self.reorder_size)
self.expiry_date.append(self.g.normal(self.expiry[0], self.expiry[1]))
if self.to_report is True:
file.write('\ntime {}: {} pieces of product {} have been replehished'.format(round(time, 2),
round(self.reorder_size, 2), self.id))
else:
self.lots.append(free_capacity)
self.expiry_date.append(self.g.normal(self.expiry[0], self.expiry[1]))
self.overflows += (self.reorder_size - free_capacity)
self.costs += (self.reorder_size - free_capacity) * self.overflow_fee #fee for overflow
if self.to_report is True:
file.write('\nStorage overflow {} pieces of product {} are sent back'.format((self.reorder_size - free_capacity), self.id))
self.costs += (self.next_rep - time) * sum(self.lots) * self.handling_cost #handling costs
time = self.next_rep
self.status = False
self.next_rep = float('inf')
if free_capacity < self.reorder_size or free_capacity < 0.0:
free_capacity = 0.0
return time
class Statistics:
def __init__(self, reorder_line, size_line):
self.time = []
self.storage = []
self.profit = []
self.reorder_line = reorder_line
self.size_line = size_line
self.backorders = []
self.expires = []
self.expires_time =[]
self.overflows = []
def update_storage(self, time, storage, profit):
self.time.append(time)
self.storage.append(storage)
self.profit.append(profit)
def plot_storage(self):
storage_dynamics = plt.figure()
plt.plot(self.time, self.storage, color='orange', label='storage')
plt.axhline(self.reorder_line, color='red', linestyle='--', label='reorder point')
plt.axhline(self.size_line, color='black', linestyle='--', label='reorder size')
plt.scatter(self.expires, self.expires_time, color='black', marker='x', label='goods are expired')
plt.scatter(self.backorders, [0 for i in range(len(self.backorders))], color='red', s=2, label='backorders')
plt.legend()
plt.xlabel('modeling time')
plt.ylabel('inventory')
plt.show()
def plot_profit(self):
money_dynamics = plt.figure()
plt.plot(self.time, self.profit, color='orange')
plt.axhline(self.reorder_line, color='red', linestyle='--', label='break-even point')
plt.legend()
plt.xlabel('modeling time')
plt.ylabel('net profit')
plt.show()
def plot_phase(self):
phase = plt.figure()
#plt.plot(self.storage, self.profit)
plt.plot(self.storage[0:-1], self.storage[1:], color='blue')
#plt.plot(self.profit[0:-1], self.profit[1:])
plt.title('Pseudo phase portait')
plt.xlabel('I(t)')
plt.ylabel('I(t+1)')
plt.show()
class Simulation:
def __init__(self, adjustments, parameters, costs, strategy, horizon=660.0):
#np.random.seed(seed)
self.w = Warehouse(adjustments, parameters, costs, strategy)
self.horizon = horizon
def simulate(self):
while self.w.time < self.horizon:
self.w.advance_time()
try:
self.w.products[0].stats.plot_storage()
self.w.products[0].stats.plot_profit()
self.w.products[0].stats.plot_phase()
except AttributeError:
pass
total_cost = 0.0
for i in range(len(self.w.products)):
total_cost += (self.w.products[i].income - self.w.products[i].costs - self.w.products[i].backorders*self.w.products[i].backorder_fee)
return total_cost
| 3.53125
| 4
|
AML/HW3/utils/utils.py
|
ZRZ-Unknow/20fall-CourseNote
| 0
|
12780821
|
<filename>AML/HW3/utils/utils.py<gh_stars>0
import numpy as np
import os
from bidict import bidict
def load_data(shared_nums):
""" Return:
data: list, each of it is a numpy array of shape (k,321)
label: list, each of it is a numpy array of shape (k,)
"""
train_path, test_path = './Dataset/train/', './Dataset/test/'
train_data, train_data_label = [], []
test_data, test_data_label = [], []
label_dict = bidict()
label_num = 0
for file in os.listdir(train_path):
with open(train_path+file,'r') as f:
p = f.read().split('\n')
if '' in p:
p.remove('')
label_string = p[0]
tmp_x, tmp_y = [], []
for i in range(1,len(p)):
label = label_string[i-1]
if label not in label_dict.keys():
label_dict[label] = label_num
label_num += 1
tmp_y.append(label_dict[label])
nums = [int(j) for j in p[i].split()]
nums = nums[1:]
nums_ = []
for i in range(0, len(nums), shared_nums):
nums_.append( sum(nums[i:i+shared_nums]))
tmp_x.append(nums_)
train_data.append(np.array(tmp_x,dtype=int))
train_data_label.append(np.array(tmp_y,dtype=int))
for file in os.listdir(test_path):
with open(test_path+file,'r') as f:
p = f.read().split('\n')
if '' in p:
p.remove('')
assert len(p[0])==len(p)-1
label_string = p[0]
tmp_x, tmp_y = [], []
for i in range(1,len(p)):
label = label_string[i-1]
tmp_y.append(label_dict[label])
nums = [int(j) for j in p[i].split()]
nums = nums[1:]
nums_ = []
for i in range(0, len(nums), shared_nums):
nums_.append( sum(nums[i:i+shared_nums]))
tmp_x.append(nums_)
test_data.append(np.array(tmp_x,dtype=int))
test_data_label.append(np.array(tmp_y,dtype=int))
return train_data, train_data_label, test_data, test_data_label, label_dict
def load_data_c(shared_nums):
train_path, test_path = './Dataset/train/', './Dataset/test/'
train_data, train_data_label = [], []
test_data, test_data_label = [], []
label_dict = bidict()
label_num = 0
for file in os.listdir(train_path):
with open(train_path+file,'r') as f:
p = f.read().split('\n')
if '' in p:
p.remove('')
label_string = p[0]
tmp_x, tmp_y = [], []
for i in range(1,len(p)):
label = label_string[i-1]
if label not in label_dict.keys():
label_dict[label] = label_num
label_num += 1
tmp_y.append(label_dict[label])
nums = [int(j) for j in p[i].split()]
nums = nums[1:]
nums_ = []
for i in range(0, len(nums), shared_nums):
nums_.append( sum(nums[i:i+shared_nums]))
nums = nums_
add = np.zeros(10)
add[label_dict[label]] = 1
nums = np.hstack([nums,add])
tmp_x.append(nums)
train_data.append(np.array(tmp_x,dtype=int))
train_data_label.append(np.array(tmp_y,dtype=int))
for file in os.listdir(test_path):
with open(test_path+file,'r') as f:
p = f.read().split('\n')
if '' in p:
p.remove('')
assert len(p[0])==len(p)-1
label_string = p[0]
tmp_x, tmp_y = [], []
for i in range(1,len(p)):
label = label_string[i-1]
tmp_y.append(label_dict[label])
nums = [int(j) for j in p[i].split()]
nums = nums[1:]
nums_ = []
for i in range(0, len(nums), shared_nums):
nums_.append( sum(nums[i:i+shared_nums]))
nums = nums_
add = np.zeros(10)
add[label_dict[label]] = 1
nums = np.hstack([nums,add])
tmp_x.append(nums)
test_data.append(np.array(tmp_x,dtype=int))
test_data_label.append(np.array(tmp_y,dtype=int))
return train_data, train_data_label, test_data, test_data_label, label_dict
| 2.25
| 2
|
utils/sampling.py
|
PranavPai/Dissertation
| 0
|
12780822
|
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
test_size = 0.25
def sampling(**kwargs):
if kwargs['dataset'] == 'moons':
X, y = datasets.make_moons(n_samples=kwargs['sample_size'],
noise=kwargs['noise'],
random_state=5)
return train_test_split(X,
y.astype(str),
test_size=kwargs['test_size'],
random_state=5), X, y
elif kwargs['dataset'] == 'circles':
X, y = datasets.make_circles(n_samples=kwargs['sample_size'],
noise=kwargs['noise'],
factor=0.5,
random_state=1)
return train_test_split(X,
y.astype(str),
test_size=kwargs['test_size'],
random_state=5), X, y
elif kwargs['dataset'] == 'LS':
X, y = datasets.make_classification(n_samples=kwargs['sample_size'],
n_features=2,
n_redundant=0,
n_informative=2,
random_state=2,
n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += kwargs['noise'] * rng.uniform(size=X.shape)
return train_test_split(X,
y.astype(str),
test_size=kwargs['test_size'],
random_state=5), X, y
else:
return ValueError('error!')
def df_split(**kwargs):
_df = kwargs['df']
return train_test_split(
_df[['x', 'y']].to_numpy(),
_df['c'].to_numpy().astype(str),
test_size=kwargs['test_size'],
random_state=5), _df[['x', 'y']].to_numpy(), _df['c'].to_numpy()
def data_split(**kwargs):
return train_test_split(kwargs['X'],
kwargs['y'].astype(str),
test_size=kwargs['test_size'],
random_state=5), kwargs['X'], kwargs['y']
| 2.953125
| 3
|
app/grandchallenge/algorithms/migrations/0020_auto_20200214_0912.py
|
njmhendrix/grand-challenge.org
| 1
|
12780823
|
# Generated by Django 3.0.2 on 2020-02-14 09:12
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("cases", "0019_auto_20200120_0604"),
("algorithms", "0019_auto_20200210_0523"),
]
operations = [
migrations.RenameField(
model_name="algorithm",
old_name="visible_to_public",
new_name="public",
),
migrations.AddField(
model_name="result",
name="comment",
field=models.TextField(blank=True, default=""),
),
migrations.AddField(
model_name="result",
name="public",
field=models.BooleanField(
default=False,
help_text="If True, allow anyone to view this result along with the input image. Otherwise, only the job creator and algorithm editor will have permission to view this result.",
),
),
migrations.AlterField(
model_name="result",
name="images",
field=models.ManyToManyField(
editable=False,
related_name="algorithm_results",
to="cases.Image",
),
),
migrations.AlterField(
model_name="result",
name="job",
field=models.OneToOneField(
editable=False,
on_delete=django.db.models.deletion.CASCADE,
to="algorithms.Job",
),
),
migrations.AlterField(
model_name="result",
name="output",
field=django.contrib.postgres.fields.jsonb.JSONField(
default=dict, editable=False
),
),
]
| 1.953125
| 2
|
Final Project/new_clrs/clrs/_src/algorithms/strings_graph_structures.py
|
mohammedElfatihSalah/string-experiments
| 0
|
12780824
|
import numpy as np
def get_predecessor(T,P):
# copy the inputs
T = np.copy(T)
P = np.copy(P)
P_size = P.shape[0]
T_size = T.shape[0]
adj = np.zeros((P_size + T_size,P_size + T_size))
# predecessor for Text
for i in range(1,T_size):
adj[i, i-1] = 1
# predecessor for Pattern
for i in range(1,P_size):
adj[T_size+i, T_size+i-1] = 1
return adj
def get_graph_struct(T, P, h_i, h_j, h_s):
# copy the inputs
T = np.copy(T)
P = np.copy(P)
P_size = P.shape[0]
T_size = T.shape[0]
adj = np.zeros((P_size + T_size,P_size + T_size))
for i in range(h_s+1, h_i):
adj[i, h_i] = 1
adj[T_size, T_size + h_j] = 1
for i in range(T_size):
adj[i, T_size+h_j] = 1
for i in range(P_size):
adj[i+T_size, h_i] = 1
return adj
def get_seq_mat(T,P):
n = T.shape[0]
m = P.shape[0]
mat = np.eye((n+m))
# connect each character to its previous
for i in range(1,n+m):
if i == n:
# don't do it for the start of the pattern
continue
mat[i, i-1] = 1
# connect each character in text to its equal charcter in the pattern
for i in range(n):
for j in range(m):
if T[i] == P[j]:
mat[i, j+n] = 1
mat[j+n, i] = 1
# connect the start of the pattern with all character upfront
mat[n, n+1:] = 1
return mat
def get_t(T, P, s):
i = s
j = 0
N = T.shape[0]
M = P.shape[0]
while i < N:
if T[i] != P[j]:
return i
j +=1
i +=1
if j >= M:
return i
return N - 1
def get_bipartite_mat(T, P, s, num_classes=3):
'''
args
-----------------------------
T: the text
P: the pattern
s: current hint s
returns
-----------------------------
mat: constructed mat as the following:
1- all irrelevant edges will have a value of 0
2- relevant edges will have a value of 1 if they are equal,
otherwise they will have a value of 2
'''
# length of the text
N = T.shape[0]
# length of the pattern
M = P.shape[0]
mat = np.zeros((N+M, N+M), dtype=np.int)
t = get_t(T, P, s)
for i in range(M):
p_char = P[i]
for j in range(s,t):
t_char = T[j]
if t_char == p_char:
mat[j, i+N] = 1
mat[i+N, j] = 1
else:
mat[j, i+N] = 2
mat[i+N, j] = 2
one_hot_mat = np.zeros((N+M, N+M, num_classes), dtype=np.int)
for i in range(len(mat)):
for j in range(len(mat[0])):
class_id = mat[i, j]
one_hot_mat[i, j, class_id] = 1
return one_hot_mat
#=== *** ===#
def get_everything_matched_to_this_point(T, P, s):
'''
return a binary mask for the pattern
'''
result = np.zeros(T.shape[0] + P.shape[0],dtype=np.int)
i = s
j = 0
while j < P.shape[0]:
if T[i] == P[j]:
result[T.shape[0]+j] = 1
i+=1
j+=1
else:
break
return result
def get_bipartite_mat_from_pattern_to_text(T, P, s):
# length of the text
N = T.shape[0]
# length of the pattern
M = P.shape[0]
mat = np.zeros((N+M, N+M), dtype=np.int)
for i in range(M):
p_char = P[i]
for j in range(s,N):
t_char = T[j]
if t_char == p_char:
mat[j, i+N] = 1
mat[i+N, j] = 1
else:
mat[j, i+N] = 2
mat[i+N, j] = 2
def get_seq_mat_i_j(T, P , i ,j, s):
n = T.shape[0]
m = P.shape[0]
mat = np.zeros((n+m, n+m))
# connect each character to its previous
# for i in range(1,n+m):
# if i == n:
# # don't do it for the start of the pattern
# continue
# mat[i, i-1] = 1
# connect node i with node j
mat[i, j+n] = 1
mat[j+n, i] = 1
# connect node s with i
mat[s, i] = 1
mat[i,s] = 1
# connect first node in P with node
mat[n,n+j] = 1
return mat
def get_edge_mat(T, P, start, end):
'''
edge between start and end
'''
mat = np.zeros((n+m,n+m))
mat[start, end] = 1
return mat
| 3.171875
| 3
|
model/BFS_Agent.py
|
cogtoolslab/tools_block_construction
| 0
|
12780825
|
import os
import sys
proj_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0,proj_dir)
import random
from itertools import repeat
import utils.blockworld as blockworld
from model.utils.Search_Tree import *
class BFS_Agent:
"""An agent performing exhaustive BFS search. This can take a long time to finish."""
def __init__(self, world=None,shuffle=False,random_seed=None):
self.world = world
self.shuffle = shuffle
self.random_seed = random_seed
def __str__(self):
"""Yields a string representation of the agent"""
return self.__class__.__name__+' shuffle:'+str(self.shuffle)+' random seed: '+str(self.random_seed)
def set_world(self,world):
self.world = world
def get_parameters(self):
"""Returns dictionary of agent parameters."""
return {
'agent_type':self.__class__.__name__,
'random_seed':self.random_seed
}
def search(self,current_nodes):
"""Performs one expansion of the nodes in current nodes. Returns either list of expanded nodes, found solution node or empty list. To introduce randomness, the current nodes can be shuffled."""
cost = 0 #track number of states that are evaluated
if self.shuffle:
random.seed(self.random_seed) #fix random seed
random.shuffle(current_nodes)
next_nodes = [] #holds the nodes we get from the current expansion step
for node in current_nodes: #expand current nodes
possible_actions = node.state.possible_actions()
children = []
for action in possible_actions:
child = Node(node.state.transition(action),node.actions+[action]) #generate new node
#check if child node is winning
cost += 1
if child.state.is_win():
#we've found a winning state
return "Winning", child, cost
next_nodes.append(child)
return "Ongoing",next_nodes, cost
def act(self, steps = None, verbose = False):
"""Makes the agent act, including changing the world state."""
#Ensure that we have a random seed if none is set
states_evaluated = 0
if self.random_seed is None: self.random_seed = random.randint(0,99999)
#check if we even can act
if self.world.status()[0] != 'Ongoing':
print("Can't act with world in status",self.world.status())
return [],{'states_evaluated':states_evaluated}
# if steps is not None:
# print("Limited number of steps selected. This is not lookahead, are you sure?")
#perform BFS search
current_nodes = [Node(self.world.current_state,[])] #initialize root node
result = "Ongoing"
while current_nodes != [] and result == "Ongoing":
#keep expanding until solution is found or there are no further states to expand
result, out, cost = self.search(current_nodes) #run expansion step
states_evaluated += cost
if result != "Winning":
current_nodes = out #we have no solution, just the next states to expand
if verbose: print("Found",len(current_nodes),"to evaluate at cost",cost)
#if we've found a solution
if result == "Winning":
actions = out.actions
actions = actions[0:steps] #extract the steps to take. None gives complete list
if verbose: print("Found solution with ",len(actions),"actions")
#apply steps to world
for action in actions: self.world.apply_action(action)
if verbose: print("Done, reached world status: ",self.world.status())
#only returns however many steps we actually acted, not the entire sequence
else:
actions = []
if verbose: print("Found no solution")
return actions,{'states_evaluated':states_evaluated}
| 2.96875
| 3
|
scalation_kernel/__init__.py
|
KevinBonanno/scalation_kernel
| 0
|
12780826
|
<reponame>KevinBonanno/scalation_kernel
"""A Scala+ScalaTionkernel for Jupyter"""
__version__ = '1.0'
from .kernel import ScalaTionKernel
| 1.015625
| 1
|
rf_optimizer.py
|
bGhorbani/linearized_neural_networks
| 0
|
12780827
|
<gh_stars>0
"""This file implements functionalities that allow fitting large random feature models with
conjugate gradient type algorithms. """
import tensorflow as tf
import numpy as np
import sys
sys.path.insert(0, './linear_algebra/')
from tensor_utils import AssignmentHelper
class RF_Optimizer(object):
def __init__(self, model, sess, initializer, sess_dict, optim_reg, n, y_place_holder, penalize_const=False):
self._N = np.int(model._variables[0].shape[0])
self._n = n
self._assignment_obj = AssignmentHelper(model._variables)
self._sess = sess
self._init = initializer
self._dict = sess_dict
self._Y = np.copy(sess_dict[y_place_holder])
self._ph = y_place_holder
# Model Quantities
self._loss = model._loss
labels = model._labels
if len(labels.shape) < 2:
modified_labels = tf.expand_dims(labels, 1)
else:
modified_labels = labels
self._weighted_sum = tf.matmul(tf.transpose(modified_labels), model._phi)
self._ws = model._wsum
self._pten = model._pten
self._assign_op = model._assign_op
ShapeList = [np.prod(g.shape) for g in model._variables]
self._params = np.int(np.sum(ShapeList))
assert self._params == self._N + 1
self._pen_const = penalize_const
self._reg = optim_reg
self._ave = self.AtxGPU(np.ones((self._n, 1)) / np.sqrt(self._n))
self._ave = self._ave.T
temp = self.AtxGPU(self._Y)
self._aty = np.zeros((self._params,), dtype=np.float64)
self._aty[:-1] = temp[0, :]
self._aty[-1] = np.sum(self._Y) / np.sqrt(self._n)
self._aty = self._aty.astype(np.float32)
def fun(self, x):
self._sess.run(self._init, feed_dict=self._dict)
total_loss = 0.0
# initialize the data
end_of_data = False
count = 0.0
self._assignment_obj.assign(x, self._sess)
while not end_of_data:
try:
loss = self._sess.run(self._loss)
total_loss += loss
count += 1.0
except tf.errors.OutOfRangeError:
end_of_data = True
val = total_loss / (count + 0.0)
# Taking the intercept into account
if self._pen_const:
reg = self._reg * (np.linalg.norm(x) ** 2)
else:
reg = self._reg * (np.linalg.norm(x[:-1]) ** 2)
return val + reg
def Atx(self):
return self._aty
def AtxGPU(self, x):
x = x.reshape((self._n, 1))
x = x.astype(np.float32)
self._dict[self._ph] = x
end_of_data = False
result = np.zeros((1, self._N), dtype=np.float64)
# initialize the data
self._sess.run(self._init, feed_dict=self._dict)
while not end_of_data:
try:
temp = self._sess.run(self._weighted_sum)
temp = temp.astype(np.float64)
result += temp / np.sqrt(self._n + 0.0)
except tf.errors.OutOfRangeError:
end_of_data = True
self._dict[self._ph] = self._Y
return result
def Hv(self, x):
"""Warning: This function assume that the Hessian is constant."""
x = x.reshape((self._params, 1))
x0 = x[:-1]
c0 = x[-1]
self._sess.run(self._init, feed_dict=self._dict)
end_of_data = False
total = np.zeros((self._params, 1), dtype=np.float64)
count = 0.0
self._sess.run(self._assign_op, {self._pten: x0})
while not end_of_data:
try:
temp = self._sess.run(self._ws)
temp = temp.astype(np.float64)
total[:-1, 0] += temp * 2.0
count += 1
except tf.errors.OutOfRangeError:
end_of_data = True
total = total / (count + 0.0)
total[:-1] += self._ave * c0 * 2
total[-1] = 2 * (c0 + np.dot(self._ave.T, x0))
if self._pen_const:
total += 2 * self._reg * x
else:
total[:-1, :] += 2 * self._reg * x0
return total[:, 0].astype(np.float32)
def num_params(self):
return self._params
| 2.171875
| 2
|
mrsketch/__init__.py
|
Simonl07/mrsketch
| 0
|
12780828
|
<reponame>Simonl07/mrsketch<filename>mrsketch/__init__.py
import threading
import types
from copy import copy, deepcopy
from functools import reduce
import json
import pickle
supported_graph_serialization = {
'json': json,
'pickle': pickle
}
class StateDescriptor(object):
def __init__(self, constructor, updater, merger, serializer, deserializer, extractor=lambda s: s):
self.constructor = constructor
self.updater = updater
self.merger = merger
self.serializer = serializer
self.deserializer = deserializer
self.extractor = extractor
class Path(object):
'''
Represents an immutable unbounded path (sequence of values) along certain dimension
Keys (level-names) can be provided to each level for accessing by name
Without names, levels can be accessed by index
- Length of a path is the length of its sequence of values
- Path1 is extendable_by Path2 if Path2 is longer than Path1
- Path1.extend_by_one(Path2) returns a new Path where Path1
is extended by one level towards Path2
'''
def __init__(self, values=tuple(), keys=tuple()):
self.keys = keys
self.values = values
def __len__(self):
return len(self.values)
def __getitem__(self, key):
return self.values[self.keys.index(key) if key in self.keys else key]
def subpath(self, i, j):
return Path(self.values[i:j])
def extendable_by(self, other):
return len(other) > len(self)
def extend_by_one(self, target):
assert len(target) > len(self)
return Path(self.values + [target.values[len(self)]])
def pathstring(self):
return '/' + '/'.join([str(v) for v in self.values])
def __repr__(self):
return f'Path(values={self.values}, keys={self.keys})'
def __str__(self):
return self.pathstring()
def __hash__(self):
return hash(self.pathstring())
def __eq__(self, other):
return all([self[i] == None or self[i] == other[i] for i in range(len(other))])
class NDPath(object):
'''
N-dimensional path is a mapping of keys (dimension names) to paths
'''
def __init__(self, paths):
self.paths = paths
def __len__(self):
return sum([len(path) for path in self.paths.values()])
def __getitem__(self, key):
return self.paths[key]
def __contains__(self, key):
return key in self.paths
def __iter__(self):
return iter(self.paths)
def items(self):
return self.paths.items()
def with_path(self, key, path):
newpaths = self.paths.copy()
newpaths[key] = path
return NDPath(newpaths)
def dimensions(self):
return self.paths.keys()
def extendable_by(self, other):
return self.dimensions() == other.dimensions() and any([self[key].extendable_by(other[key]) for key in self.paths])
def pathextend(self, other):
if not self.extendable_by(other):
raise BaseException(f'{self} is not extendable by {other}')
return [self.with_path(key, path.extend_by_one(other[key])) for key, path in self.items() if path.extendable_by(other[key])]
def __str__(self):
output = ''
for key in self:
output += f'{key}={self[key]} '
return output
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.dimensions() == other.dimensions() and all([self[key] == other[key] for key in self])
def __hash__(self):
return hash(str(self))
def to_hex(self):
return pickle.dumps([(name, path) for name, path in self.paths.items()]).hex()
def from_hex(hex):
paths = {}
for t in pickle.loads(bytes.fromhex(hex)):
paths[t[0]] = t[1]
return NDPath(paths)
class Graph(object):
def __init__(self, state_descriptors, path_extractor):
self.adjlist = {}
self.path_extractor = lambda x: NDPath({
key: Path(value)
for key, value in path_extractor(x).items()
})
self.state_descriptors = state_descriptors
self.lock = threading.Lock()
self.size = 0
def __newnode(self):
node = types.SimpleNamespace()
node.states = {name: descriptor.constructor() for name, descriptor in self.state_descriptors.items()}
node.lock = threading.Lock()
node.children = set()
return node
def push(self, record):
target = self.path_extractor(record)
visited = set()
for dimension, path in target.items():
root = NDPath({
d: Path([]) if d != dimension else path.subpath(0,1)
for d, p in target.items()
})
# Lock adjlist
self.lock.acquire()
if root not in self.adjlist:
self.adjlist[root] = self.__newnode()
self.lock.release()
self.__insert(root, target, visited, record)
def __insert(self, currpath, target, visited, record):
# Put currpath in visited to prevent re-visits
visited.add(currpath)
node = self.adjlist[currpath]
# Lock node & Update all states with record
node.lock.acquire()
for name, state in node.states.items():
self.state_descriptors[name].updater(record, state)
node.lock.release()
if currpath.extendable_by(target):
ndpaths = currpath.pathextend(target)
for ndpath in ndpaths:
# Lock adjlist for new nodes
self.lock.acquire()
if ndpath not in self.adjlist:
self.adjlist[ndpath] = self.__newnode()
self.lock.release()
# Lock node for modifying children
node.lock.acquire()
if ndpath not in node.children:
node.children.add(ndpath)
node.lock.release()
if ndpath not in visited:
self.__insert(ndpath, target, visited, record)
def combinedstate(self, nodes):
states = {}
# group by states
for node in nodes:
# Lock nodes for state access
node.lock.acquire()
for name, state in node.states.items():
if name not in states:
states[name] = []
states[name].append(state)
node.lock.release()
# reduce by states
# Note: At this point states can still be mutated
for name, availiable_states in states.items():
states[name] = reduce(self.state_descriptors[name].merger, availiable_states)
return states
def get(self, ndpath):
ndpath = NDPath({
key: Path(value)
for key, value in ndpath.items()
})
if len(ndpath) == 0:
return self.combinedstate([node for ndpath, node in self.adjlist.items() if len(ndpath) == 1])
if ndpath in self.adjlist:
return {name: self.state_descriptors[name].extractor(state) for name, state in self.adjlist[ndpath].states.items()}
return {name: sd.constructor() for name, sd in self.state_descriptors.items()}
def edges(self):
output = []
for ndpath, node in self.adjlist.items():
if len(node.children) == 0:
output.append((str(ndpath), "None"))
for children in node.children:
output.append((str(ndpath), str(children)))
return output
def merge(self, other):
# Lock the whole graph for state_descriptors updated
self.lock.acquire()
# Merge state descriptors
for name, descriptor in other.state_descriptors.items():
if name not in self.state_descriptors:
self.state_descriptors[name] = descriptor
self.lock.release()
# Merge nodes
for ndpath, node in other.adjlist.items():
if ndpath not in self.adjlist:
self.adjlist[ndpath] = self.__newnode()
self.adjlist[ndpath].states = deepcopy(node.states)
continue
# If nodes with same ndpath exists, merge their states using merge function
mystates = self.adjlist[ndpath].states
otherstates = node.states
for name, otherstate in otherstates.items():
if name in mystates:
# --merge() must create a new state from the two instead of mutate
mystates[name] = self.state_descriptors[name].merger(mystates[name], otherstate)
else:
mystates[name] = deepcopy(otherstate)
def serialize(self, encoding='json'):
if encoding not in supported_graph_serialization:
raise BaseException(f'{encoding} is not a valid encoding option (options: json, pickle)')
output = {}
self.lock.acquire()
for ndpath, node in self.adjlist.items():
node.lock.acquire()
encoded_node_states = {}
for name, state in node.states.items():
encoded_node_states[name] = self.state_descriptors[name].serializer(state)
encoded_children = []
for children in node.children:
encoded_children.append(children.to_hex())
node.lock.release()
output[ndpath.to_hex()] = (encoded_node_states, encoded_children)
self.lock.release()
return supported_graph_serialization[encoding].dumps(output, indent=4)
def from_serialization(self, bytes, encoding='json'):
if encoding not in supported_graph_serialization:
raise BaseException(f'{encoding} is not a valid encoding option (options: json, pickle)')
tuple_dict = supported_graph_serialization[encoding].loads(bytes)
self.lock.acquire()
for ndpath_hex, encoded_node in tuple_dict.items():
ndpath = NDPath.from_hex(ndpath_hex)
self.adjlist[ndpath] = self.__newnode()
self.adjlist[ndpath].lock.acquire()
self.adjlist[ndpath].states = {
name: self.state_descriptors[name].deserializer(encoded_state)
for name, encoded_state in encoded_node[0].items()
}
self.adjlist[ndpath].children = [NDPath.from_hex(c) for c in encoded_node[1]]
self.adjlist[ndpath].lock.release()
self.lock.release()
| 2.5
| 2
|
main.py
|
biogui/royal-flush-telegram-bot
| 0
|
12780829
|
from telegram.ext import Updater, CommandHandler, ConversationHandler, MessageHandler, Filters, CallbackQueryHandler
from env import TOKEN
from commands import show_challs, choose_chall_to_show, SHOWS_CHOSEN_CHALL
from commands import try_answer, choose_chall_to_answer, check_answer, CHOOSE_CHALL_TO_ANSWER
def start(update, context):
welcome_txt = ['Hello, welcome to RoyalFlushBot!']
welcome_txt.append(
'The bot of "Royal Flush: A Puzzle Story", a puzzle hunt game about \
playing cards, poker hands, kings, queens and brain challenges. \
[Early Access Version]'
)
update.message.reply_text('\n'.join(welcome_txt))
def main():
updater = Updater(token=TOKEN, use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler('start', start))
dp.add_handler(ConversationHandler(
entry_points=[CommandHandler('show', show_challs)],
states={
SHOWS_CHOSEN_CHALL: [CallbackQueryHandler(choose_chall_to_show)],
},
fallbacks=[]
))
dp.add_handler(ConversationHandler(
entry_points=[CommandHandler('try', try_answer)],
states={
CHOOSE_CHALL_TO_ANSWER: [
CallbackQueryHandler(choose_chall_to_answer),
MessageHandler(Filters.text, check_answer)
]
},
fallbacks=[]
))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
print('=== BOT ATIVADO ===')
print('Digite Ctrl + C para desativar.')
main()
print('=== BOT DESATIVADO ===')
| 2.453125
| 2
|
main.py
|
KevHg/tictactoe-cli
| 0
|
12780830
|
import random
from copy import deepcopy
def print_board(board, max_width):
for row in range(len(board)):
for col in range(len(board)):
print("{:>{}}".format(board[row][col], max_width), end='')
print()
def win_check(board, player, n, row, col):
horizontal, vertical, diagonal_down, diagonal_up = True, True, True, True
# Check for horizontal win
for i in range(n):
if board[row][i] != player:
horizontal = False
# Check for vertical win
for i in range(n):
if board[i][col] != player:
vertical = False
# check for downwards diagonal (i.e. top left to bottom right)
for i in range(n):
if board[i][i] != player:
diagonal_down = False
# Check for upwards diagonal (i.e. bottom left to top right)
for i in range(n):
if board[i][n - 1 - i] != player:
diagonal_up = False
return horizontal or vertical or diagonal_down or diagonal_up
def vs_bot(board, n, possible_moves, difficulty):
max_width = len(str(n ** 2)) + 1
while True:
print_board(board, max_width)
num = int(input("Player - Input location: "))
if num < 0 or num >= (n ** 2):
print("Please choose a valid location!")
continue
row = num // n
col = num % n
if board[row][col] == 'O' or board[row][col] == 'X':
print("Cannot replace a player's piece!")
continue
board[row][col] = 'O'
possible_moves.remove(num)
if win_check(board, 'O', n, row, col):
print_board(board, max_width)
print("You win!")
break
if not possible_moves:
print_board(board, max_width)
print("Draw! Board is full.")
break
# Bot move begins here
print("Bot is thinking...")
bot_num = -1
check = random.randint(0, 100)
# Medium difficulty - 50% chance of bot being easy, 50% chance being abyssal
if difficulty == 2:
if check <= 50:
difficulty = 0
else:
difficulty = 4
# Hard difficulty - 20% chance of bot being easy, 80% chance being abyssal
elif difficulty == 3:
if check <= 20:
difficulty = 0
else:
difficulty = 4
print(possible_moves)
# Easy difficulty - Bot selects a random move
if difficulty == 1:
bot_num = random.choice(possible_moves)
# Abyssal difficulty - Bot utilizes minimax to find optimal move
elif difficulty == 4:
temp, bot_num = minimax(board, n, possible_moves, True)
if bot_num == -1:
print("Bot has forfeited! You won!")
break
row = bot_num // n
col = bot_num % n
board[row][col] = 'X'
possible_moves.remove(bot_num)
if win_check(board, 'X', n, row, col):
print_board(board, max_width)
print("You lost!")
break
if not possible_moves:
print_board(board, max_width)
print("Draw! Board is full.")
break
# Returns winning player (O or X), or D if draw
def find_winner(board, n):
for i in range(n):
horizontal = True
for j in range(0, n - 1):
if board[i][j] == '.':
break
if board[i][j] != board[i][j + 1]:
horizontal = False
if horizontal:
return board[i][0]
for i in range(n):
vertical = True
for j in range(0, n - 1):
if board[j][i] == '.':
break
if board[j][i] != board[j + 1][i]:
vertical = False
if vertical:
return board[0][i]
diagonal_down = True
for i in range(0, n - 1):
if board[i][i] == '.':
break
if board[i][i] != board[i + 1][i + 1]:
diagonal_down = False
if diagonal_down:
return board[0][0]
diagonal_up = True
for i in range(0, n - 1):
if board[i][n - 1 - i] == '.':
break
if board[i][n - 1 - i] != board[i + 1][n - 2 - i]:
diagonal_up = False
if diagonal_up:
return board[0][n - 1]
return 'D'
def minimax(board, n, possible_moves, maximizing_player):
best_move = -1
if not possible_moves:
winner = find_winner(board, n)
if winner == 'O':
return -1, best_move
elif winner == 'X':
return 1, best_move
else:
return 0, best_move
if maximizing_player:
value = -10
for move in possible_moves:
new_board = deepcopy(board)
new_possible = deepcopy(possible_moves)
row = move // n
col = move % n
new_board[row][col] = 'X'
new_possible.remove(move)
new_value, new_move = minimax(new_board, n, new_possible, False)
if new_value > value:
value = new_value
best_move = move
return value, best_move
else:
value = 10
for move in possible_moves:
new_board = deepcopy(board)
new_possible = deepcopy(possible_moves)
row = move // n
col = move % n
new_board[row][col] = 'O'
new_possible.remove(move)
new_value, new_move = minimax(new_board, n, new_possible, True)
if new_value < value:
value = new_value
best_move = move
return value, best_move
def vs_player(board, n, possible_moves):
max_width = len(str(n ** 2)) + 1
player = 'O'
while True:
print_board(board, max_width)
num = int(input("Player " + player + " - Input location: "))
if num < 0 or num >= (n ** 2):
print("Please choose a valid location!")
continue
row = num // n
col = num % n
if board[row][col] == 'O' or board[row][col] == 'X':
print("Cannot replace a player's piece!")
continue
board[row][col] = player
possible_moves.remove(num)
if not possible_moves:
print_board(board, max_width)
print("Draw! Board is full.")
break
if win_check(board, player, n, row, col):
print_board(board, max_width)
print("Player " + player + " wins!")
break
if player == 'O':
player = 'X'
else:
player = 'O'
def main():
while True:
n = int(input("Input size of tic-tac-toe board: "))
if n > 1:
break
else:
print("Board cannot be smaller than size 2!")
board = []
possible_moves = []
for i in range(n):
new_row = []
for j in range(n):
new_row.append(i * n + j)
possible_moves.append(i * n + j)
board.append(new_row)
print("Select game mode:")
while True:
print("1 - Easy bot")
print("2 - Medium bot")
print("3 - Hard bot")
print("4 - Abyssal bot (You're not expected to win!)")
print("5 - Multiplayer")
play_type = int(input("Your choice: "))
if play_type == 1:
vs_bot(board, n, possible_moves, 1)
break
elif play_type == 2:
vs_bot(board, n, possible_moves, 2)
break
elif play_type == 3:
vs_bot(board, n, possible_moves, 3)
break
elif play_type == 4:
vs_bot(board, n, possible_moves, 4)
break
elif play_type == 5:
vs_player(board, n, possible_moves)
break
else:
print("Invalid option!")
print("Game over! Press return to close...")
input()
main()
| 4.03125
| 4
|
faker_GB.py
|
bogordon86/Python_Faker
| 0
|
12780831
|
<gh_stars>0
#Import Dependencies
from faker import Faker
import pandas as pd
#For generating fake ID
import random
#Great Britain fake data (why not?)
fake = Faker('en_GB')
# dictionary
dict_data = {
"name":[],
"address":[],
"dob":[],
"gender":[],
"phone_number":[],
"imei":[]
}
#Loop through fake data
for x in range(100):
dict_data["name"].append(fake.name())
dict_data["address"].append(fake.address())
dict_data["dob"].append(fake.date_of_birth())
dict_data["gender"].append(fake.random.choice(['male', 'female']))
dict_data["phone_number"].append(fake.phone_number())
dict_data["imei"].append(fake.random_number(15))
# using pandas to create a data frame makes it into a more presentable format
output_data = pd.DataFrame(dict_data)
output_data.to_csv("fake_data.csv", header=["name", "address", "dob", "gender", "phone_number", "imei"], index=False)
| 3.203125
| 3
|
2task1.py
|
inwk6312fall2018/programmingtask2-Adityapuni
| 0
|
12780832
|
file=open('Crime.csv')
lineA=[]
lineB=[]
dict={} #take crime id as key and crime name as value
def histogram(s):
"""to check s in string"""
d={}
for c in s:
d[c]=1+d.get(c,0)
return d
"""make list of all words"""
for line in file:
line.strip()
for i in line.split(','):
lineA.append(i)
"""all crime ID"""
i=16
while i<len(lineA):
lineB.append(lineA[i])
i+=9
dic=histogram(lineB) # dict of crime ID , and repetation of it
"""make dict of Crime Id vs Crime name"""
lineD=line(dic.keys())
for i in range(len(lineA)):
if lineA[i] in lineD:
dict[lineA[i]]=lineA[i+1].strip()
"""print table"""
print("{0:25s} {1:25s} {2:1s}".format('Crime type','Crime ID','Crime Count'))
for key,value in dic.items():
if key in dict.keys():
print("{0:25} {1:25} {2:1}".format(dict[key],key,dic[key]))
| 3.25
| 3
|
pages/quitPage.py
|
amalthomas-exe/Windown-Installer-GUI
| 0
|
12780833
|
<filename>pages/quitPage.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'quitPage.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(691, 557)
Dialog.setStyleSheet("QDialog{\n"
" background-color: white\n"
"}")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(10, 20, 81, 61))
self.label.setStyleSheet("QLabel{\n"
" background-image: url(:/newPrefix/win11.png);\n"
"}")
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap(":/newPrefix/win11.png"))
self.label.setScaledContents(True)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(80, 30, 331, 41))
font = QtGui.QFont()
font.setFamily("Noto Sans")
font.setPointSize(32)
font.setItalic(False)
self.label_2.setFont(font)
self.label_2.setStyleSheet("QLabel{\n"
" color : rgb(47, 47, 212) \n"
"}")
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(10, 510, 141, 31))
self.label_3.setStyleSheet("QLabel{\n"
" \n"
" background-image: url(:/newPrefix/microsoft.png);\n"
"}")
self.label_3.setText("")
self.label_3.setPixmap(QtGui.QPixmap(":/newPrefix/microsoft.png"))
self.label_3.setScaledContents(True)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(170, 510, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(Dialog)
self.label_5.setGeometry(QtCore.QRect(250, 510, 71, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.pushButtonNext = QtWidgets.QPushButton(Dialog)
self.pushButtonNext.setGeometry(QtCore.QRect(590, 510, 91, 31))
font = QtGui.QFont()
font.setFamily("Noto Sans")
font.setPointSize(13)
self.pushButtonNext.setFont(font)
self.pushButtonNext.setStyleSheet("QPushButton{\n"
" background-color: rgb(255, 254, 226);\n"
" border-radius: 0px;\n"
" color: black;\n"
" border: 1px dotted rgb(60,96,255)\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color: rgb(60, 96, 255)\n"
"}")
self.pushButtonNext.setObjectName("pushButtonNext")
self.label_6 = QtWidgets.QLabel(Dialog)
self.label_6.setGeometry(QtCore.QRect(30, 100, 341, 41))
font = QtGui.QFont()
font.setPointSize(15)
self.label_6.setFont(font)
self.label_6.setStyleSheet("QLabel{\n"
" color: black\n"
"}")
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(Dialog)
self.label_7.setGeometry(QtCore.QRect(30, 160, 551, 161))
font = QtGui.QFont()
font.setPointSize(15)
self.label_7.setFont(font)
self.label_7.setStyleSheet("QLabel{\n"
" color: black\n"
"}")
self.label_7.setWordWrap(True)
self.label_7.setObjectName("label_7")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label_2.setText(_translate("Dialog", "Windows 11"))
self.label_4.setText(_translate("Dialog", "<a style=\"color:black; text-decoration:none\" href=\"https://www.aka.ms/support\">Support</a>"))
self.label_5.setText(_translate("Dialog", "<a style=\"color:black; text-decoration:none\" href=\"https://www.microsoft/en-us/legal\">Legal</a>"))
self.pushButtonNext.setText(_translate("Dialog", "OK"))
self.label_6.setText(_translate("Dialog", "Exiting..."))
self.label_7.setText(_translate("Dialog", "Thank you for using Windows 11 installer.<br><br>In case you desire a refund, please visit <a href=\"https://aka.ms/refund\" style=\"text-decoration:none\">aka.ms/refund</a> to get more info regarding refund policy."))
| 2.15625
| 2
|
quotes/routes.py
|
vsgobbi/Quotes-Pyramid-REST-API
| 0
|
12780834
|
def includeme(config):
config.add_route('index', '/')
config.add_route('home', '/home')
config.add_route('quotes', '/quotes')
config.add_route('random_quote', '/quotes/random')
config.add_route('get_quote', '/quotes/{id}')
config.add_route('delete_quote', '/delete/{id}')
config.add_route('get_sessions', '/sessions')
config.add_route('get_session', '/sessions/{id}')
| 1.703125
| 2
|
examples/root_std_map.py
|
jjacob/DailyPythonScripts
| 0
|
12780835
|
import rootpy.stl as stl
import ROOT
# Create a vector type
StrVector = stl.vector(stl.string)
# Instantiate
strvector = StrVector()
strvector.push_back("Hello")
# etc.
MapStrRoot = stl.map(stl.string, ROOT.TH1D)
MapStrRootPtr = stl.map(stl.string, "TH1D*")
StrHist = stl.pair(stl.string, "TH1*")
m = MapStrRootPtr()
a = ROOT.TH1D('t1', 't1', 10, 0, 1)
m.insert(StrHist("test", a))
print m
| 3.203125
| 3
|
selseq/selseq_clustering.py
|
GrigoriiPechkovskii/selseq
| 0
|
12780836
|
print('start identity_percent')
import os
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
import subprocess
from selseq_main import *#all?
from selseq_constant import *
def clustering_kmeans_aln(aln_file,itself=True):
'''input file the aligned sequence
output clustering by kmeans files
'''
aln_file = calculate_identity_percent(aln_file,itself=True)
if any((aln_file.identity_matrix<60).any()):
kmeans = KMeans(n_clusters=2)
kmeans.fit(aln_file.identity_matrix)
y_kmeans = kmeans.predict(aln_file.identity_matrix)
for kmeans_index in range(len(y_kmeans)):
name_aln_file_0 = aln_file.file_dir[0:-4] + '_0'
name_aln_file_1 = aln_file.file_dir[0:-4] + '_1'
if y_kmeans[kmeans_index] == 0:
with open(name_aln_file_0,'a') as aln_clustered:
aln_clustered.write(aln_file.name_lst[kmeans_index] + aln_file.seq_lst[kmeans_index].replace('-','').replace('\n','') + '\n')
if y_kmeans[kmeans_index] == 1:
with open(name_aln_file_1,'a') as aln_clustered:
aln_clustered.write(aln_file.name_lst[kmeans_index] + aln_file.seq_lst[kmeans_index].replace('-','').replace('\n','') + '\n')
subprocess.call('muscle ' + '-in ' +name_aln_file_0 + ' -out ' + name_aln_file_0 + '.aln 2>' + HOME_DIRECTORY + '111',shell = True)
subprocess.call('muscle ' + '-in ' +name_aln_file_1 + ' -out ' + name_aln_file_1 + '.aln 2>' + HOME_DIRECTORY + '111',shell = True)
clustering_kmeans_aln(name_aln_file_0 + '.aln',itself=True)
clustering_kmeans_aln(name_aln_file_1 + '.aln',itself=True)
os.remove(name_aln_file_0)
os.remove(name_aln_file_1)
os.remove(aln_file.file_dir)
else:
return aln_file
def calculate_identity_percent(aln_file,itself=True):
'''input file the aligned sequence
output SequenceFasta with identity_percent and identity_matrix
itself - parametr for calculate identity percent the alone sequence
'''
aln_file = SequenceFasta(aln_file)
aln_file.seq_process(strip=False)
data_persent = pd.Series()
identity_matrix = pd.DataFrame()
if itself and len(aln_file.seq_lst) == 1:
data_persent[find_tag('seq_id',aln_file.name_lst[0])+'and'+find_tag('seq_id',aln_file.name_lst[0])] = 110
aln_file.data_persent = data_persent
identity_matrix = pd.DataFrame([])
aln_file.identity_matrix = identity_matrix
return aln_file
else:
name_lst_seq_id = []
for name_seq in aln_file.name_lst:
name_lst_seq_id.append(find_tag('seq_id',name_seq))
array_100 = np.zeros((len(aln_file.name_lst), len(aln_file.name_lst))) +100
identity_matrix = pd.DataFrame(array_100,columns=name_lst_seq_id,index=name_lst_seq_id)
n=0
identical = 0
for seq_id_1 in range(len(aln_file.seq_lst)):
n += 1
for seq_id_2 in range(n,len(aln_file.seq_lst)):
for character1, character2 in zip(aln_file.seq_lst[seq_id_1],aln_file.seq_lst[seq_id_2]):
if character1 == character2:
identical +=1
seq_1 = find_tag('seq_id',aln_file.name_lst[seq_id_1])
seq_2 = find_tag('seq_id',aln_file.name_lst[seq_id_2])
persent_identical = identical / len(aln_file.seq_lst[seq_id_1]) * 100
data_persent[seq_1+'and'+seq_2] = persent_identical
identity_matrix[seq_1][seq_2] = persent_identical
identity_matrix[seq_2][seq_1] = persent_identical
identical = 0
aln_file.data_persent = data_persent
aln_file.identity_matrix = identity_matrix
return aln_file
def clustering_aln(directory):
directory_files = os.listdir(directory)
for file in directory_files:
if file.endswith('.aln'):
clustering_kmeans_aln(ALNDATA_DIRECTORY + file,itself=True)
def enumeration_identity_percent(directory):
'''Just for plot'''
data_persent_for_plot = pd.Series()
directory_files = os.listdir(directory)
for file in directory_files:
if file.endswith('.aln'):
aln_file = calculate_identity_percent(ALNDATA_DIRECTORY + file,itself=True)
data_persent_for_plot = data_persent_for_plot.append(aln_file.data_persent)
return data_persent_for_plot
print('end indentity_persent')
| 2.703125
| 3
|
zeee_bot/cogs/test.py
|
zeee2/ZEEE-Discord-Bot
| 1
|
12780837
|
from os import name
import pathlib
from discord.ext import commands
import discord
from dislash import InteractionClient, ActionRow, Button, ButtonStyle, SelectMenu, SelectOption
from colored import fore, back, style
from PIL import Image, ImageFont, ImageDraw, ImageEnhance
from zeee_bot.common import glob
class Test(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="test")
async def ___test(self, ctx):
row = ActionRow(
Button(
style=ButtonStyle.green,
label="sexy bread",
custom_id="bread_btn"
)
)
msg = await ctx.send("마 눌러바라 게이야", components=[row])
on_click = msg.create_click_listener(timeout=5)
@on_click.matching_id("bread_btn")
async def on_bread_button(inter):
await inter.reply("헤으응 부끄러웟", delete_after=2.5)
@on_click.timeout
async def on_timeout():
await msg.delete()
await ctx.send("응애 타임아웃!")
def drawProgressBar(self, d, x, y, w, h, progress, bg="black", fg="red"):
# draw background
d.ellipse((x+w, y, x+h+w, y+h), fill=bg, outline=None)
d.ellipse((x, y, x+h, y+h), fill=bg, outline=None)
d.rectangle((x+(h/2), y, x+w+(h/2), y+h), fill=bg, outline=None)
# draw progress bar
w *= progress
d.ellipse((x+w, y, x+h+w, y+h),fill=fg, outline=None)
d.ellipse((x, y, x+h, y+h),fill=fg, outline=None)
d.rectangle((x+(h/2), y, x+w+(h/2), y+h),fill=fg, outline=None)
return d
@commands.command(name='ㅅ')
async def testtest(self, ctx):
a = 'get base img.'
msg = await ctx.send(a)
base_img = Image.open(f"{pathlib.Path(__file__).parent.parent}/images/now_base.png").convert("RGBA")
draw = ImageDraw.Draw(base_img)
color = (96, 197, 241)
draw = self.drawProgressBar(draw, 15, 11, 572.5, 29, 0.5, bg=color, fg=color)
# ImageDraw.floodfill(base_img, xy=(14,24), value=color, thresh=40)
a += "\nwriting image."
await msg.edit(content=a)
base_img.save('test2.png')
a += "\nDone."
await msg.delete()
await ctx.send(file=discord.File("test2.png"))
@commands.command(name="test2")
async def __test2(self, ctx):
msg = await ctx.send(
"마 한번 골라바라 게이야",
components=[
SelectMenu(
custom_id = "bread_sexy",
placeholder="골라바라 게이야 낄낄",
max_values=2,
options=[
SelectOption("빵", "빵"),
SelectOption("빵빵", "빵빵"),
SelectOption("빵빵빵", "빵빵빵")
]
)
]
)
inter = await msg.wait_for_dropdown()
labels = [option.value for option in inter.select_menu.selected_options]
await msg.edit(content="골라부럇구만!", components=[])
await inter.reply(f"{''.join(labels)}")
def setup(bot: commands.Bot):
bot.add_cog(Test(bot))
| 2.5
| 2
|
setup.py
|
gabrielfern/automated-leda-tasks
| 9
|
12780838
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
try:
from setuptools import setup
except ImportError:
from os import system
system('pip install --user setuptools')
from setuptools import setup
setup(
name='automated',
version='1.3.2',
description='Automatizador de tarefas - LEDA',
license='MIT',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
url='https://github.com/gabrielfern/automated-leda-tasks',
author='<NAME>',
author_email='<EMAIL>',
packages=['automated'],
install_requires=['requests', 'python-crontab'],
)
| 1.5625
| 2
|
example.py
|
JustEdro/appstoreconnectapi
| 0
|
12780839
|
<reponame>JustEdro/appstoreconnectapi
#!/usr/bin/env python
import sys
from appstoreconnect import Api
if __name__ == "__main__":
key_id = sys.argv[1]
key_file = sys.argv[2]
issuer_id = sys.argv[3]
api = Api(key_id, key_file, issuer_id)
apps = api.apps()
for app in apps["data"]:
print(app["attributes"]["name"])
| 2.15625
| 2
|
computeFeatures/seqStep/seqToolManagers/conservationTools/Al2coManager.py
|
rsanchezgarc/BIPSPI
| 5
|
12780840
|
from __future__ import absolute_import, print_function
import os
import numpy as np
from subprocess import Popen, PIPE
from Bio.PDB.Polypeptide import aa1 as AA_STANDARD
from ....featuresComputer import FeatureComputerException
from ...seqToolManager import SeqToolManager
from .al2coWorkers.parsePsiBlast import parsePsiBlast
from utils import myMakeDir, tryToRemove
class Al2coManager(SeqToolManager):
'''
Computes al2co and processes their outputs. Extends class seqToolManager
'''
VAR_LIST= ["al2coScore", "al2coScoreNorm"]
BAD_SCORE_CONSERVATION = "-1048576" #Something went wrong tag
def __init__(self, computedFeatsRootDir, winSize=None, statusManager=None):
'''
:param computedFeatsRootDir: str. root path where results will be saved
:param winSize: int>=1 or None. The size of the windows for sliding window if desired
:param statusManager: class that implements .setStatus(msg) to communicate
'''
SeqToolManager.__init__(self, computedFeatsRootDir, winSize)
self.al2coOutPath= myMakeDir(self.computedFeatsRootDir,"al2co")
if winSize:
self.al2coPathWindowed= myMakeDir(self.computedFeatsRootDir,"al2co_wSize"+str(winSize))
else:
self.al2coPathWindowed= None
def getFinalPath(self):
'''
returns path where results are saved
:return al2coOutPath: str
'''
return self.al2coOutPath
def getFNames(self, prefixExtended):
'''
Returns a dict that contains the fnames that will be used by al2co
:param prefixExtended. prefix for output fnames.
:return list of fnames: [ fname1, fnam2, ...]
'''
al2coProc= os.path.join(self.al2coOutPath, prefixExtended+".al2co.gz")
fNames=[al2coProc]
if not self.winSize is None:
al2coWindowedOutName= os.path.join(self.al2coPathWindowed, prefixExtended+".wsize"+str(self.winSize)+".al2co.gz")
fNames+= [al2coWindowedOutName]
return fNames
def computeFromSeqStructMapper(self, seqStructMap, prefixExtended, psiblastOutName, pssmOutNameRaw):
'''
Computes al2co for the sequence seqStr, that is contained at fastaInFname. This sequence is
associated with prefixExtended as an unambiguous id
:param seqStructMap: computeFeatures.seqStep.seqToolManagers.seqExtraction.SeqStructMapper
:param prefixExtended: str. unambiguous id of the sequence that will be the prefix of output names
:param psiblastOutName: str. Path to psiblast aligments results
:param pssmOutNameRaw: str. Path to psiblast pssms results
'''
msaFname= None
prefix, chainType, chainId= self.splitExtendedPrefix(prefixExtended)[:3]
seqStr, fastaFname= seqStructMap.getSeq(chainType, chainId) # repeat as psiBlastManager can modify seqs
seqStructMap.setCurrentSeq(seqStr, chainType, chainId)
if self.checkAlreayComputed(prefixExtended):
print("Al2co already computed for %s"%prefixExtended)
return 0
fNames= self.getFNames(prefixExtended)
print("launching al2co over %s"%prefixExtended)
al2coProcName= fNames[0]
al2coRawName= os.path.join(self.al2coOutPath, prefixExtended+".fasta.csv")
try:
if os.path.isfile(psiblastOutName):
alignedSeqsDict= parsePsiBlast( inputSeq=seqStr, psiBlastOut=psiblastOutName)
filteredSeqsFname= self.runCdHit(alignedSeqsDict, inputSeq=seqStr, psiBlastOut=psiblastOutName)
msaFname= self.runClustalW(filteredSeqsFname, psiBlastOut=psiblastOutName)
cmd= [self.al2coBin, "-i", msaFname,"-m", "0", "-f", "2", "-a", "F", "-b", "50",
"-g", "0.50", "-w", "1", "-c", "0", "-o", al2coRawName, "-t", al2coProcName]
print(" ".join(cmd))
process= Popen(cmd, stdout=PIPE, stderr=PIPE)
processOut= process.communicate()
if len(processOut[1])>0:
print("Error computing al2co. Caught stdin/stderr:\n",processOut[0],processOut[1])
else:
print("Error computing al2co. Psiout does not exists for %s"%(prefixExtended))
al2coRawName=None
dataList= self.processAl2co(seqStr, seqStructMap, prefixExtended, al2coRawName, al2coProcName)
if self.winSize:
self.makeWindowed( dataList, ["al2co", "al2coNorm"], [Al2coManager.BAD_SCORE_CONSERVATION]*2, [None]*2,
fNames[1])
except (Exception, KeyboardInterrupt):
self.tryToRemoveAllFnames(prefixExtended)
raise
finally:
if msaFname: tryToRemove(msaFname)
def processAl2co(self, seq, seqStructMap, prefixExtended, al2coRaw, al2coProc):
'''
Reads al2co output file and writes another one with tabulated format, headers and
some error checking.
:param: seq: str. Sequence of the chain
:param prefixExtended: str. unambiguous id of the sequence that will be the prefix of output names
:param al2coRaw: str. Path to al2co results
:param al2coProc: str. Path where formatted results will be saved.
'''
if al2coRaw is None:
conserData = [(letter, Al2coManager.BAD_SCORE_CONSERVATION) for letter in seq]
else:
try:
conserData = self.loadRawAl2co(al2coRaw)
except IOError:
conserData= [ (letter, Al2coManager.BAD_SCORE_CONSERVATION) for letter in seq]
prefix, chainType, chainId= self.splitExtendedPrefix(prefixExtended)[:3]
# print(len(conserData)); raw_input("enter")
try:
alcoIx=0
seqIx=0
seqLen= len(seq)
letters, conserVals = zip(* conserData)
conserVals= [float(elem) for elem in conserVals]
alcoLen= len(conserData)
dataList=[]
listOfRowsToPrint=[]
mean_val= np.mean(conserVals)
std_val= np.std(conserVals)
while seqIx<seqLen and alcoIx<alcoLen:
letter= seq[seqIx]
letterAl2co, consVal= conserData[alcoIx]
if letterAl2co== letter or (letterAl2co=="-" and letter=="X"):
structIndex= seqStructMap.seqToStructIndex(chainType, chainId, seqIx, asString= True)
# print(seqIx, letter, alcoIx, structIndex)
if structIndex:
if self.filterOutLabels and structIndex[-1].isalpha():
continue
else:
structIndex=str(seqIx)+"?"
if std_val!=0:
consValNormalized= (float(consVal)- mean_val)/std_val
else:
consValNormalized=float(consVal)
dataList.append( ( (chainId, structIndex,letter), ( [consVal], [str(consValNormalized)],) ) )
listOfRowsToPrint.append( "%s %s %s %s %s"%( chainId, structIndex, letter, consVal, consValNormalized) )
alcoIx+=1
seqIx+=1
elif not letter in AA_STANDARD and letterAl2co=="-":
alcoIx+=1
seqIx+=1
elif letterAl2co=="-":
alcoIx+=1
else:
print(conserData)
print(alcoIx, seqIx)
raise ValueError("Al2co mismatch %s %s "%(letterAl2co, letter))
# print(len(listOfRowsToPrint)); raw_input("enter to continue")
self.writeResultsFromDataDictSingleChain( {chainId: listOfRowsToPrint }, outName= al2coProc)
return dataList
except (KeyboardInterrupt, Exception):
print("Exception happend computing %s"%al2coProc)
tryToRemove(al2coProc)
raise
finally:
if al2coRaw is not None:
tryToRemove(al2coRaw)
pass
def loadRawAl2co(self, filename):
'''
Loads an al2co file
:param fname: str. Path to al2co file.
:return list of strings. ["row0_Al2co","row1Al2co"...]
'''
conserv= []
for line in open(filename):
lineArray=line.split()
if lineArray[0][0].isdigit():
conserv.append(lineArray[1:3])
else:
break
return conserv
def runCdHit(self, allHits, inputSeq, psiBlastOut, pairSeqIdThr=0.95):
tmpName= os.path.basename(psiBlastOut).split(".")[0]
tmpName= os.path.join(self.tmp, tmpName)
cdhitInName= tmpName+".in-cdhit"
cdhitOutName= tmpName+".out-cdhit"
try:
with open(cdhitInName, "w") as f:
for hit in allHits:
f.write("> %s\n"%(hit["target_full_id"]))
f.write("%s\n"%(hit["targetSeq"].replace("-","")) )
if(pairSeqIdThr > .70 and pairSeqIdThr <= 1.00): n=5
elif (pairSeqIdThr <= .70 and pairSeqIdThr >= .55): n=4
elif (pairSeqIdThr < .55 and pairSeqIdThr >= .50): n=3
elif (pairSeqIdThr < .50 and pairSeqIdThr >= .40): n=2
else: raise ValueError("Error, just .4<=pairSeqIdThr<=1.00 allowed")
cdhitCmd= [self.cdHitBin, "-i", cdhitInName, "-o", cdhitOutName, "-n", str(n),
"-c", str(pairSeqIdThr), "-T", str(self.psiBlastNThrs)]
print(" ".join(cdhitCmd))
proc = Popen(cdhitCmd, stdin= PIPE, stdout=PIPE, stderr=PIPE)
output= proc.communicate()
if output== None or output[1]!="" or "There was an error cd-hit psiblast" in output[0]:
print(output)
print ("Error when parsing %s for al2Co"%psiBlastOut)
raise FeatureComputerException("Error when cd-hit %s for al2Co"%psiBlastOut)
with open(cdhitOutName, "r+") as f:
fileData = f.read()
f.seek(0, 0)
f.write("> InputSeq\n")
f.write("%s\n"%(inputSeq.replace("-","")) )
f.write(fileData+"\n")
return cdhitOutName
except (Exception, KeyboardInterrupt):
tryToRemove(cdhitOutName)
raise
finally:
tryToRemove(cdhitInName)
def runClustalW(self, filteredSeqsFname, psiBlastOut, clustalWOutName=None):
tmpFnameCommon= ".".join(filteredSeqsFname.split(".")[:-1])
if clustalWOutName is None:
clustalWOutName= tmpFnameCommon+".clustalw"
clustalCommand=[self.clustalW, "-infile=%s"%filteredSeqsFname, "-outfile=%s"%clustalWOutName, "-outorder=INPUT"]
print(" ".join(clustalCommand))
try :
proc = Popen(clustalCommand, stdin= PIPE, stdout=PIPE, stderr=PIPE)
output= proc.communicate()
if output== None or output[1]!="" or "There was an error parsing psiblast, clustalw" in output[0]:
print(output)
print ("Error when clustalw %s for al2Co"%psiBlastOut)
raise FeatureComputerException("Error when clustalw %s for al2Co"%psiBlastOut)
return clustalWOutName
except (Exception, KeyboardInterrupt):
tryToRemove(clustalWOutName)
raise
finally:
tryToRemove(filteredSeqsFname)
tryToRemove(filteredSeqsFname+".clstr")
tryToRemove( tmpFnameCommon+".dnd")
| 2.28125
| 2
|
data.py
|
nikhilroxtomar/Polyp-Segmentation-using-UNET-in-TensorFlow-2.0
| 45
|
12780841
|
import os
import numpy as np
import cv2
from glob import glob
import tensorflow as tf
from sklearn.model_selection import train_test_split
def load_data(path, split=0.1):
images = sorted(glob(os.path.join(path, "images/*")))
masks = sorted(glob(os.path.join(path, "masks/*")))
total_size = len(images)
valid_size = int(split * total_size)
test_size = int(split * total_size)
train_x, valid_x = train_test_split(images, test_size=valid_size, random_state=42)
train_y, valid_y = train_test_split(masks, test_size=valid_size, random_state=42)
train_x, test_x = train_test_split(train_x, test_size=test_size, random_state=42)
train_y, test_y = train_test_split(train_y, test_size=test_size, random_state=42)
return (train_x, train_y), (valid_x, valid_y), (test_x, test_y)
def read_image(path):
path = path.decode()
x = cv2.imread(path, cv2.IMREAD_COLOR)
x = cv2.resize(x, (256, 256))
x = x/255.0
return x
def read_mask(path):
path = path.decode()
x = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
x = cv2.resize(x, (256, 256))
x = x/255.0
x = np.expand_dims(x, axis=-1)
return x
def tf_parse(x, y):
def _parse(x, y):
x = read_image(x)
y = read_mask(y)
return x, y
x, y = tf.numpy_function(_parse, [x, y], [tf.float64, tf.float64])
x.set_shape([256, 256, 3])
y.set_shape([256, 256, 1])
return x, y
def tf_dataset(x, y, batch=8):
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.map(tf_parse)
dataset = dataset.batch(batch)
dataset = dataset.repeat()
return dataset
| 2.609375
| 3
|
reddit/posts/urls.py
|
Arnman16/reddirt
| 0
|
12780842
|
<gh_stars>0
from django.urls import include, path
from .views import PostView, SubredditView, AllSubredditsView, FrontPageView
app_name = 'reddit'
urlpatterns = [
path("frontpage/", FrontPageView.as_view()),
path("all/", AllSubredditsView.as_view(), name='all' ),
path(route='<slug:slug>/', view=SubredditView.as_view(), name='subreddit'),
path(route='<slug:subreddit_slug>/<slug:slug>', view=PostView.as_view(), name='post'),
]
| 2.046875
| 2
|
baseline/CLIP-zero-shot/detection/sliding_params.py
|
Wuziyi616/slot_attention
| 0
|
12780843
|
<filename>baseline/CLIP-zero-shot/detection/sliding_params.py
from typing import Tuple
import attr
@attr.s(auto_attribs=True)
class SlidingParams:
# model configs
resolution: Tuple[int, int] = (256, 256) # since we not using ViT
num_slots: int = 7 # at most 6 obj per image/video
# architecture of CLIP pre-trained model
clip_arch: str = 'ViT-B/32'
# data
# data_root: str = "/scratch/ssd004/scratch/ziyiwu/data/CLEVR_viewpoint_video"
data_root: str = "/scratch/ssd004/scratch/jiaqixi/data/clevr_video/train/"
shuffle_obj: bool = False
# Normalization for natural img or original slot attention one
simple_normalize: bool = False # since we not using ViT
| 2.046875
| 2
|
day23/lib.py
|
heijp06/AoC-2021
| 0
|
12780844
|
from queue import PriorityQueue
from burrow import Burrow, parse
def part1(rows: list[str]) -> int | None:
return go(rows)
def part2(rows: list[str]) -> int | None:
new_rows = list(rows[:3]) + [
" #D#C#B#A#",
" #D#B#A#C#",
] + list(rows[3:])
return go(new_rows)
def go(rows: list[str]) -> int | None:
burrow = parse(rows)
burrows: PriorityQueue = PriorityQueue()
burrows.put((burrow.min_cost_to_solution(), burrow))
seen = {burrow: 0}
min_cost = 0 if burrow.final() else None
while burrows.qsize():
min_cost_to_solution, burrow = burrows.get()
if min_cost and min_cost <= min_cost_to_solution:
break
old_cost = seen[burrow]
for extra_cost, new_burrow in move(burrow):
new_cost = old_cost + extra_cost
if (
(not min_cost or new_cost < min_cost)
and (new_burrow not in seen or new_cost < seen[new_burrow])
):
seen[new_burrow] = new_cost
if new_burrow.final():
min_cost = new_cost
else:
burrows.put((new_cost + new_burrow.min_cost_to_solution(), new_burrow))
return min_cost
def move(burrow: Burrow) -> list[tuple[int, Burrow]]:
for amphipod in burrow.amphipods:
new_burrow = amphipod.move_home(burrow)
if new_burrow:
return [new_burrow]
return [
new_burrow
for amphipod in burrow.amphipods
for new_burrow in amphipod.move_hallway(burrow)
]
def dump(burrow: Burrow) -> None:
for row in range(burrow.height):
for column in range(13 if row < 3 else 11):
amphipod = burrow[row, column]
if amphipod:
print(amphipod.kind, end='')
continue
if (
row == 0 or
column in (0, 12) and row == 1 or
column in (0, 1, 2, 4, 6, 8, 10, 11, 12) and row == 2 or
column in (2, 4, 6, 8, 10) and 2 < row < burrow.height - 1 or
column in (2, 3, 4, 5, 6, 7, 8, 9,
10) and row == burrow.height - 1
):
print('#', end='')
continue
if row > 2 and (column < 2 or column > 10):
print(' ', end='')
continue
print('.', end='')
print()
| 3.625
| 4
|
InterviewBit/Trees/postorder_traversal.py
|
codervikash/online-courses
| 0
|
12780845
|
"""
Given a binary tree, return the postorder traversal of its nodes’ values.
Example :
Given binary tree
1
\
2
/
3
return [3,2,1].
Using recursion is not allowed.
"""
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def peek(self, stack):
if len(stack) > 0:
return stack[-1]
return None
# @param A : root node of tree
# @return a list of integers
def postorderTraversal(self, root):
ans = []
if root is None:
return
stack = []
while(True):
while (root):
if root.right is not None:
stack.append(root.right)
stack.append(root)
root = root.left
root = stack.pop()
if (root.right is not None and
self.peek(stack) == root.right):
stack.pop()
stack.append(root)
root = root.right
else:
ans.append(root.val)
root = None
if (len(stack) <= 0):
break
return ans
| 4.0625
| 4
|
hits_per_minute.py
|
nathan242/sysadmin-tools
| 0
|
12780846
|
<gh_stars>0
#!/usr/bin/python
import sys
import os
import time
count = {}
total = 0
highest = 0
average = 0
log = open(sys.argv[1], "r")
for line in log:
datestring = line.split()[3].replace("[", "")[:-3]
if datestring in count:
count[datestring] += 1
else:
count[datestring] = 1
for i in sorted(count.keys()):
print i+" : "+str(count[i])
total += count[i]
if count[i] > highest:
highest = count[i]
average = total/len(count)
print "TOTAL: "+str(total)
print "HIGHEST: "+str(highest)
print "AVERAGE: "+str(average)
| 3.515625
| 4
|
Easy/Codeland Username Validation.py
|
edaaydinea/Coderbyte
| 6
|
12780847
|
import re
def CodelandUsernameValidation(strParam):
# code goes here
valid = "false"
if strParam[0].isalpha():
if 4 < len(strParam) < 25:
if strParam[-1] != '_':
if re.match('^[a-zA-Z0-9_]+$', strParam):
valid = "true"
# code goes here
return valid
# keep this function call here
print(CodelandUsernameValidation(input()))
| 3.34375
| 3
|
inject_version.py
|
Open-Security-Tools/security_model_tw_plugin
| 1
|
12780848
|
#!/usr/bin/env python
import os
import subprocess
TID_FILE = "src/tiddlers/system/plugins/security_tools/twsm.tid"
VERSION_FILE = "VERSION"
def get_commit_count():
return int(subprocess.check_output(["git", "rev-list", "--count", "HEAD"]).decode('utf-8'))
def main():
with open(VERSION_FILE, "r") as f:
version = f.read().strip()
# Some sanity
mm = version.split(".")
assert len(mm) == 2, "Expected version format MAJOR.MINOR"
assert int(mm[0]) + int(mm[1]), "Expected version integers MAJOR.MINOR"
ls = list()
with open(TID_FILE, "r") as f:
version_string = "version: {}.{}".format(version, get_commit_count())
for l in f:
if l.startswith("version:"):
print("Injecting version: {}".format(version_string))
ls.append(version_string + "\n")
else:
ls.append(l)
with open(TID_FILE, "w") as f:
f.write("".join(ls))
print("Finished")
if __name__ == "__main__":
main()
| 2.625
| 3
|
image_viewer.py
|
juu7g/Python-Image-Viewer
| 0
|
12780849
|
"""
画像ビューワー
"""
import itertools, os, sys
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.font as tkFont
from tkinter import filedialog
from tkinterdnd2 import *
from typing import Tuple # 関数アノテーション用
from PIL import Image, ImageTk # Pillow
from PIL.ExifTags import TAGS, GPSTAGS # Exifタグ情報
class ListView(ttk.Frame):
"""
画像をリストビューで表示する
"""
check_str = {"uncheck":"☐", "checked":"☑"} # ☐☑☒チェックボックス用文字
def __init__(self, master):
"""
画面の作成
上のFrame: 入力用
下のFrame: 出力用
"""
super().__init__(master)
self.image_op = ImageOp()
self.u_frame = tk.Frame(bg="white") # 背景色を付けて配置を見る
self.b_frame = tk.Frame(bg="green") # 背景色を付けて配置を見る
self.u_frame.pack(fill=tk.X)
self.b_frame.pack(fill=tk.BOTH, expand=True)
self.create_input_frame(self.u_frame)
self.treeview1 = self.create_tree_frame(self.b_frame)
# bind
self.treeview1.bind("<Button 1>", self.togle_checkbox) # マウスを左クリックしたときの動作
# self.treeview1.bind("<Double 1>", self.preview_image) # マウスをダブルクリックしたときの動作
self.treeview1.bind("<Double 3>", self.preview_image) # マウスを右ダブルクリックしたときの動作
# マウスのクリックとダブルクリックを併用する場合
# self.double_click_flag =False
# self.treeview1.bind("<Button 1>", self.mouse_click) # マウスを左クリックしたときの動作
# self.treeview1.bind("<Double 1>", self.double_click) # マウスをダブルクリックしたときの動作
def fixed_map(self, option):
# Fix for setting text colour for Tkinter 8.6.9
# From: https://core.tcl.tk/tk/info/509cafafae
#
# Returns the style map for 'option' with any styles starting with
# ('!disabled', '!selected', ...) filtered out.
# style.map() returns an empty list for missing options, so this
# should be future-safe.
return [elm for elm in self.style.map('Treeview', query_opt=option) if
elm[:2] != ('!disabled', '!selected')]
def create_input_frame(self, parent):
"""
入力項目の画面の作成
上段:ファイル選択ボタン、すべて選択、選択解除、プレビューボタン
下段:メッセージ
"""
self.btn_f_sel = tk.Button(parent, text="ファイル選択", command=self.select_files)
self.btn_select_all = tk.Button(parent, text="すべて選択", command=self.select_all)
self.btn_deselection = tk.Button(parent, text="選択解除", command=self.deselection)
self.btn_preview = tk.Button(parent, text="プレビュー", command=self.preview_images)
self.msg = tk.StringVar(value="msg")
self.lbl_msg = tk.Label(parent
, textvariable=self.msg
, justify=tk.LEFT
, font=("Fixedsys", 11)
, relief=tk.RIDGE
, anchor=tk.W)
# pack
self.lbl_msg.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True) # 先にpackしないと下に配置されない
self.btn_preview.pack(side=tk.RIGHT, padx=5)
self.btn_deselection.pack(side=tk.RIGHT, padx=5)
self.btn_select_all.pack(side=tk.RIGHT, padx=5)
self.btn_f_sel.pack(side=tk.RIGHT, padx=5)
# bind
def create_tree_frame(self, parent:tk.Frame) -> ttk.Treeview:
"""
Treeviewとスクロールバーを持つframeを作成する。
frameは、Treeviewとスクロールバーをセットする
Treeviewは、ツリーと表形式、ツリーに画像、行は縞模様
Args:
Frame: 親Frame
Returns:
Treeview: ツリービュー
"""
# tagを有効にするためstyleを更新 tkinter8.6?以降必要みたい
# 表の文字色、背景色の設定に必要
self.style = ttk.Style()
self.style.map('Treeview', foreground=self.fixed_map('foreground')
, background=self.fixed_map('background'))
# スタイルの設定
self.style.configure("Treeview", rowheight = 150) # 画像を150pxで表示するので初期設定する
# frameの作成。frameにTreeviewとScrollbarを配置する
frame4tree = tk.Frame(parent, bg="pink")
frame4tree.pack(side=tk.TOP, fill=tk.BOTH, expand=True, padx=2, pady=2)
# Treeviewの作成
treeview1 = ttk.Treeview(frame4tree, style="Treeview")
# treeview1["show"] = "headings" # デフォルトは treeとheadingsなので設定しない
treeview1.tag_configure("odd", background="ivory2") # 奇数行の背景色を指定するtagを作成
# 水平スクロールバーの作成
h_scrollbar = tk.Scrollbar(frame4tree, orient=tk.HORIZONTAL, command=treeview1.xview)
treeview1.configure(xscrollcommand=h_scrollbar.set)
# 垂直スクロールバーの作成
v_scrollbar = tk.Scrollbar(frame4tree, orient=tk.VERTICAL, command=treeview1.yview)
treeview1.configure(yscrollcommand=v_scrollbar.set)
# pack expandがある方を後にpackしないと他が見えなくなる
h_scrollbar.pack(side=tk.BOTTOM, fill=tk.X) # 先にパックしないと表示されない
v_scrollbar.pack(side=tk.RIGHT, fill=tk.Y) # 先にパックしないと表示されない
treeview1.pack(side=tk.TOP, fill=tk.BOTH, expand=True, padx=2, pady=2)
treeview1.column("#0", width=200, stretch=False) # ツリー列の幅の設定
return treeview1
def update_tree_column(self, tree1:ttk.Treeview, columns:list):
"""
TreeViewの列定義と見出しを設定
見出しの文字長で列幅を初期設定
Args:
Treeview: treeviewオブジェクト
list: 列名のリスト
"""
tree1["columns"] = columns # treeviewの列定義を設定
font1 = tkFont.Font()
for col_name in columns:
tree1.heading(col_name, text=col_name) # 見出しの設定
width1 = font1.measure(col_name) # 見出しの文字幅をピクセルで取得
tree1.column(col_name, width=width1) # 見出し幅の設定
def update_tree_by_result(self, tree1:ttk.Treeview, rows:list, images:list):
"""
rows(表データ)、images(画像のデータ)をTreeViewに設定
要素の文字幅が見出しの文字幅より長い場合は、列幅を変更する。
奇数列の背景色を変更
Args:
Treeview: Treeviewインスタンス
list: 行データ(行リストの列リスト)
list: 画像データ
"""
if not rows: # 要素が無ければ戻る
return
font1 = tkFont.Font()
# 要素の長さにより列幅を修正
for i, _ in enumerate(rows[0]): # 列数分回す(1行目の要素数分)
# 同じ列のデータをリストにし列の値の長さを求め、最大となる列のデータを求める。
# 値は数字もあるので文字に変換し長さを求める。また、Noneは'None'となるので' 'とする。
max_str = max([x[i] for x in rows], key=lambda x:len(str(x))) or " "
# 求めたものが文字列だったら、改行された状態での最大となるデータを求める。
# 厳密にはこの状態で最大となるデータを探さなければならないが割愛
if type(max_str) is str:
max_str = max(max_str.split("\n"), key=len)
width1 = font1.measure(max_str) # 文字幅をピクセルで取得
curent_width = tree1.column(tree1['columns'][i], width=None) # 現在の幅を取得
# 設定済みの列幅より列データの幅の方が大きいなら列幅を再設定
if width1 > curent_width:
tree1.column(tree1['columns'][i], width=width1) # 見出し幅の再設定
# print(f"幅の再設定 幅:{width1}、値:{max_str}") # debug用
tree1.delete(*tree1.get_children()) # Treeviewをクリア
# 要素の追加
for i, row in enumerate(rows):
tags1 = [] # tag設定値の初期化
if i & 1: # 奇数か? i % 2 == 1:
tags1.append("odd") # 奇数番目(treeviewは0始まりなので偶数行)だけ背景色を変える(oddタグを設定)
# 要素の追加(image=はツリー列の画像、text=はツリー列の文字(疑似チェックボックス))
iid = tree1.insert("", tk.END, values=row, tags=tags1,
image=images[i], text=self.check_str["uncheck"]) # Treeviewに1行分のデータを設定
def open_file_and_get_data(self, event=None):
"""
self.file_pathsのパスからファイル情報、画像サムネイルを作成
Treeviewに情報追加
データの幅でTreeviewの列の幅を設定する
データの行数でTreeviewの行の高さを設定する(行ごとにはできないので一番高い行に合わせる)
"""
self.image_op.msg = ""
# DnD対応
if event:
# DnDのファイル情報はevent.dataで取得
# "{空白を含むパス名1} 空白を含まないパス名1"が返る
# widget.tk.splitlistでパス名のタプルに変換
self.file_paths = self.u_frame.tk.splitlist(event.data)
# 取得したパスから拡張子がself.extentiosのkeyに含まれるものだけにする
file_paths2 = tuple(path for path in self.file_paths if os.path.splitext(path)[1].lower() in self.image_op.extensions)
if len(file_paths2) == 0:
self.image_op.msg = "対象のファイルがありません"
self.msg.set(self.image_op.msg)
return
if file_paths2 != self.file_paths:
self.image_op.msg = "対象外のファイルは除きました"
self.file_paths = file_paths2
# 取得したパスから表示データと画像を作成
columns1, rows1, images1, msg1 = self.image_op.get_images(self.file_paths)
self.d_images = [] # ダイアログ表示用画像初期化
self.msg.set(self.image_op.msg) # エラーメッセージの表示
# 見出しの文字長で列幅を初期設定、treeviewのカラム幅を文字長に合わせて調整
self.update_tree_column(self.treeview1, columns1)
# 列項目を右寄せ
# self.treeview1.column("#0", anchor=tk.E) # 列項目を右寄せ(ツリー)#0には働かないみたい
self.treeview1.column("#2", anchor=tk.E) # 列項目を右寄せ(幅)
self.treeview1.column("#3", anchor=tk.E) # 列項目を右寄せ(高さ)
self.treeview1.column("#4", anchor=tk.E) # 列項目を右寄せ(サイズ)
# rows、画像をTreeViewに設定
# 要素の文字幅が見出しの文字幅より長い場合は、列幅を変更する。偶数列の背景色を変更
self.update_tree_by_result(self.treeview1, rows1, images1)
# 一番行数の多い行に合わせて高さを設定する
# 2次元のデータを平坦化しstr型だけを抽出する
cells = [s for s in itertools.chain.from_iterable(rows1) if type(s) is str]
if cells:
# 抽出したリストの要素の中で改行の数の最も多い要素を取得
longest_cell = max(cells, key=lambda x:x.count("\n"))
max_row_lines = longest_cell.count("\n") + 1 # 改行の数を数える
# Treeviewの行の高さを変更 # タブごとのスタイルの設定
if max_row_lines * 18 > 150:
self.style.configure("Treeview", rowheight = 18 * max_row_lines)
def select_files(self, event=None):
"""
ファイル選択ダイアログを表示。選択したファイルパスを取得
ファイル情報や画像を取得して表示
"""
# 拡張子の辞書からfiletypes用のデータを作成
# 辞書{".csv":"CSV", ".tsv":"TSV"}、filetypes=[("CSV",".csv"), ("TSV",".tsv")]
self.file_paths = filedialog.askopenfilenames(
filetypes=[(value, key) for key, value in self.image_op.extensions.items()])
self.open_file_and_get_data() # ファイル情報や画像を取得して表示
# マウスのクリックとダブルクリックを併用する場合
# 反応が鈍いので未使用。参考に残す。
def mouse_click(self, event=None):
"""
マウスのシングルクリック時の処理
シングルクリックとダブルクリックイベントは両方発生するので
シングルクリックイベントでダブルクリックイベントの発生を待ち、
ダブルクリックが発生してから共通の処理(中身は分ける)を実行する
"""
self.treeview1.after(200, self.mouse_action, event)
# マウスのクリックとダブルクリックを併用する場合
def double_click(self,event=None):
"""
マウスのダブルクリック時の処理
ダブルマリックの発生をフラグに設定
"""
self.double_click_flag = True
# マウスのクリックとダブルクリックを併用する場合
def mouse_action(self, event=None):
"""
マウスクリック時の処理
ダブルクリック発生フラグを確認して処理を実行
ダブルクリック用処理実行後はフラグをクリア
"""
if self.double_click_flag:
self.preview_image(event)
self.double_click_flag =False
else:
self.togle_checkbox(event)
def togle_checkbox(self, event=None):
"""
チェックボックスの状態を反転
"""
rowid = self.treeview1.identify_row(event.y) # マウスの座標から対象の行を取得する
if self.treeview1.item(rowid, text=None) == self.check_str["uncheck"]:
self.treeview1.item(rowid, text=self.check_str["checked"])
else:
self.treeview1.item(rowid, text=self.check_str["uncheck"])
def preview_image(self, event=None, path=""):
"""
画像のプレビュー
ダイアログ表示
Args:
string: ファイルパス(ない場合もある)
"""
# マウスのクリックとダブルクリックを併用する場合
# マウスクリックイベントが先に動いているので打ち消す
# クリックとダブルクリックを左ボタンで実装する時の考慮
# self.togle_checkbox(event)
if event:
rowid = self.treeview1.identify_row(event.y) # マウスの座標から対象の行を取得する
path1 = self.treeview1.item(rowid)["values"][0].replace("\n", "") # ファイル名取得
else:
path1 = path
# ダイアログ表示
dialog = tk.Toplevel(self) # モードレスダイアログの作成
dialog.title("Preview") # タイトル
self.d_images.append(ImageTk.PhotoImage(file=path1)) # 複数表示する時のために画像を残す
label1 = tk.Label(dialog, image=self.d_images[-1]) # 最後のものを表示
label1.pack()
def preview_images(self, event=None):
"""
選択された画像のプレビュー
"""
self.msg.set("")
# Treeviewのチェックボックスがオンの行のファイル名列(1列)を取得。改行してあるので除く。
paths = [self.treeview1.item(x)["values"][0].replace("\n", "") for x in self.treeview1.get_children() if self.treeview1.item(x)["text"] == self.check_str["checked"]]
for path1 in paths:
self.preview_image(path=path1)
if not paths:
self.msg.set("選択された画像がありません")
def select_all(self, event=None):
"""
Treeviewの要素をすべて選択する
"""
self.set_all_checkbox("checked")
def deselection(self, event=None):
"""
Treeviewの要素をすべて選択解除する
"""
self.set_all_checkbox("uncheck")
def set_all_checkbox(self, check_stat:str):
"""
Treeviewのチェックボックスをすべて設定する
Args:
str: "checked" または "uncheck"
"""
for iid in self.treeview1.get_children():
self.treeview1.item(iid, text=self.check_str[check_stat])
class ImageOp():
"""
画像データの操作を行う
"""
def __init__(self):
self.msg = "" # メッセージ受渡し用
# 対象拡張子 辞書(key:拡張子、値:表示文字)
self.extensions = {".png .jpg .gif .webp":"画像", ".png":"PNG",
".jpg":"JPEG", ".gif":"GIF", ".webp":"WebP"}
def get_images(self, file_names:tuple) -> Tuple[list, str]:
"""
画像ファイルを読みデータを返す
Args:
str: ファイル名
Returns:
columns1(list): 列名
rows1(list): 行データ(行リストの列リスト)
self.images(list): 画像データ
msg1(str): エラーメッセージ(空文はエラーなし)
"""
msg1 = ""
columns1 = ["ファイル名", "幅(px)", "高さ(px)", "サイズ(kB)", "画像情報 EXIF", "位置情報 GPS"]
try:
self.images = [] # selfでないとうまくいかない。理由はローカル変数だと関数終了後gcされるため
rows1 = []
for file_name in file_names: # パス名で回す
# basename = os.path.basename(file_name)
f = os.path.normpath(file_name)
wrap_file_name = f.replace("\\", "\\\n")
# 画像のサイズ
file_size = os.path.getsize(file_name)
# 画像の取得
image1 = Image.open(file_name)
# ファイルサイズの取得
image_size = image1.size
# Exif情報の取得
exif_dict = image1.getexif()
exif = [TAGS.get(k, "Unknown")+ f": {str(v)}" for k, v in exif_dict.items()]
exif_str = "\n".join(exif)
# GPS情報の取得
gps_dict = exif_dict.get_ifd(34853)
gps = [GPSTAGS.get(k, "Unknown") + f": {str(v)}" for k, v in gps_dict.items()]
gps_str = "\n".join(gps)
# 縮小
image1.thumbnail((150, 150), Image.BICUBIC)
# サムネイルの大きさを統一(そうしないとチェックボックスの位置がまちまちになるため)
# ベース画像の作成と縮小画像の貼り付け(中央寄せ)
# base_image = Image.new(image1.mode, (160, 160), "#ffffff")
base_image = Image.new('RGBA', (160, 160), (255, 0, 0, 0)) # 透明なものにしないとgifの色が変わる
horizontal = int((base_image.size[0] - image1.size[0]) / 2)
vertical = int((base_image.size[1] - image1.size[1]) / 2)
# print(f"size:{image1.size} h,v:{horizontal},{vertical}, base:{base_image.size}") # debug
base_image.paste(image1, (horizontal, vertical))
image1 = base_image
# PhotoImageへ変換
image1 = ImageTk.PhotoImage(image1)
# 列データと画像データを追加
self.images.append(image1)
rows1.append([wrap_file_name, image_size[0], image_size[1],
"{:.1f}".format(file_size/1024), exif_str, gps_str])
except Exception as e:
msg1 = e
print(f"error:{e}")
finally:
return columns1, rows1, self.images, msg1
if __name__ == '__main__':
root = TkinterDnD.Tk() # トップレベルウィンドウの作成 tkinterdnd2の適用
root.title("画像 viewer") # タイトル
root.geometry("800x710") # サイズ
listview = ListView(root) # ListViewクラスのインスタンス作成
root.drop_target_register(DND_FILES) # ドロップ受け取りを登録
root.dnd_bind("<<Drop>>", listview.open_file_and_get_data) # ドロップ後に実行するメソッドを登録
# コマンドライン引数からドラッグ&ドロップされたファイル情報を取得
if len(sys.argv) > 1:
listview.file_paths = tuple(sys.argv[1:])
listview.open_file_and_get_data() # オープン処理の実行
root.mainloop()
| 3.296875
| 3
|
envs_agents/cartpole/dqn_cartpole_minimal_example.py
|
dertilo/reinforcement-learning
| 0
|
12780850
|
<reponame>dertilo/reinforcement-learning<filename>envs_agents/cartpole/dqn_cartpole_minimal_example.py
import os
import time
from typing import Iterator, Dict, NamedTuple, Generator
import gym
import torch
import torch.nn as nn
import numpy as np
from gym.envs.classic_control import CartPoleEnv
from gym.wrappers import Monitor
from torch import optim
from torch.optim.rmsprop import RMSprop
import torch.nn.functional as F
from tqdm import tqdm
def mix_in_some_random_actions(policy_actions, eps, num_actions):
if eps > 0.0:
random_actions = torch.randint_like(policy_actions, num_actions)
selector = torch.rand_like(random_actions, dtype=torch.float32)
actions = torch.where(selector > eps, policy_actions, random_actions)
else:
actions = policy_actions
return actions
class CartPoleAgent(nn.Module):
def __init__(self, obs_space, action_space):
super().__init__()
self.num_actions = action_space.n
self.nn = nn.Sequential(
*[
nn.Linear(obs_space.shape[0], 24),
nn.ReLU(),
nn.Linear(24, 24),
nn.ReLU(),
nn.Linear(24, self.num_actions),
]
)
def calc_q_values(self, obs_batch):
observation_tensor = torch.tensor(obs_batch, dtype=torch.float)
q_values = self.nn(observation_tensor)
return q_values
def step_batch(self, obs_batch, eps=0.001):
q_values = self.calc_q_values(obs_batch)
policy_actions = q_values.argmax(dim=1)
actions = mix_in_some_random_actions(policy_actions, eps, self.num_actions)
return actions
def step(self, obs, eps=0.001):
obs_batch = np.expand_dims(obs, 0)
actions = self.step_batch(obs_batch, eps)
return int(actions.numpy()[0])
def visualize_it(env: gym.Env, agent: CartPoleAgent, max_steps=1000):
while True:
obs = env.reset()
for steps in range(max_steps):
is_open = env.render()
if not is_open:
return
action = agent.step(obs)
obs, reward, done, info = env.step(action)
if done:
break
if steps < max_steps - 1:
print("only %d steps" % steps)
class Experience(NamedTuple):
obs: np.ndarray
next_obs: np.ndarray
action: int
next_reward: float
next_done: bool
class ExperienceArrays(NamedTuple):
obs: np.ndarray
next_obs: np.ndarray
action: np.ndarray
next_reward: np.ndarray
next_done: np.ndarray
def experience_generator(agent, env: gym.Env) -> Generator[Experience, None, None]:
while True:
obs = env.reset()
for it in range(1000):
action = agent.step(obs)
next_obs, _, next_done, info = env.step(action)
yield Experience(
**{
"obs": obs,
"next_obs": next_obs,
"action": action,
"next_reward": -10.0 if next_done else 1.0,
"next_done": next_done,
}
)
obs = next_obs
if next_done:
break
def gather_experience(
experience_iter: Iterator, batch_size: int = 32
) -> ExperienceArrays:
experience_batch = [next(experience_iter) for _ in range(batch_size)]
exp_arrays = {
key: np.array([getattr(exp, key) for exp in experience_batch])
for key in Experience._fields
}
return ExperienceArrays(**exp_arrays)
def calc_estimated_return(agent: CartPoleAgent, exp: ExperienceArrays, discount=0.99):
next_q_values = agent.calc_q_values(exp.next_obs)
max_next_value, _ = next_q_values.max(dim=1)
mask = torch.tensor((1 - exp.next_done), dtype=torch.float)
next_reward = torch.tensor(exp.next_reward, dtype=torch.float)
estimated_return = next_reward + discount * max_next_value * mask
return estimated_return
def calc_loss(agent, estimated_return, observation, action):
q_values = agent.calc_q_values(observation)
actions_tensor = torch.tensor(action).unsqueeze(1)
q_selected = q_values.gather(1, actions_tensor).squeeze(1)
loss_value = F.mse_loss(q_selected, estimated_return)
return loss_value
def train(agent: CartPoleAgent, env: gym.Env, num_batches=3_000, batch_size=32):
optimizer = optim.Adam(agent.parameters(), lr=1e-2)
exp_iter = iter(experience_generator(agent, env))
for it in tqdm(range(num_batches)):
with torch.no_grad():
agent.eval()
exp: ExperienceArrays = gather_experience(exp_iter, batch_size=batch_size)
estimated_return = calc_estimated_return(agent, exp)
agent.train()
loss_value = calc_loss(agent, estimated_return, exp.obs, exp.action)
optimizer.zero_grad()
loss_value.backward()
optimizer.step()
def run_cartpole_dqn(num_batches=1000, batch_size=32, log_dir="./logs/dqn",seed=0):
os.makedirs(log_dir, exist_ok=True)
env = CartPoleEnv()
env.seed(seed)
torch.manual_seed(seed)
agent = CartPoleAgent(env.observation_space, env.action_space)
from baselines.bench import Monitor as BenchMonitor
env = BenchMonitor(env, log_dir, allow_early_resets=True)
train(agent, env, num_batches=num_batches, batch_size=batch_size)
return agent, env
if __name__ == "__main__":
agent, env = run_cartpole_dqn()
from baselines.common import plot_util as pu
from matplotlib import pyplot as plt
results = pu.load_results("logs")
f, ax = pu.plot_results(results)
f.savefig("logs/dqn_cartpole.png")
env = Monitor(env, "./vid", video_callable=lambda episode_id: True, force=True)
visualize_it(env, agent)
| 2.28125
| 2
|