hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aadc6caacc7ccc064997d62040d5601470ca380f | 1,656 | py | Python | remove_svn_folder.py | sdenisen/test | 709bcdd16d0f83f03b04e0bc7918bb3785993c59 | [
"Unlicense"
] | null | null | null | remove_svn_folder.py | sdenisen/test | 709bcdd16d0f83f03b04e0bc7918bb3785993c59 | [
"Unlicense"
] | 2 | 2017-08-24T18:55:37.000Z | 2017-08-24T18:59:06.000Z | remove_svn_folder.py | sdenisen/test | 709bcdd16d0f83f03b04e0bc7918bb3785993c59 | [
"Unlicense"
] | null | null | null | __author__ = 'Sergey'
import shutil
import os
import stat
def read_all_directory_path(root_folder, final_directory_list=[], folder_for_remove='.svn'):
under_files_and_folders = os.listdir(root_folder)
if os.path.split(root_folder)[1] == folder_for_remove:
final_directory_list.append(root_folder)
return final_directory_list
if len(under_files_and_folders) == 0:
final_directory_list.append(root_folder)
return final_directory_list
for dir in under_files_and_folders:
sub_path = root_folder + "\\" + dir
if os.path.isfile(sub_path):
continue
read_all_directory_path(sub_path, final_directory_list)
def change_file_attributes_in_folder(folder):
list_files_and_directories = os.listdir(folder)
for item in list_files_and_directories:
sub_path = folder + "\\" + item
if os.path.isfile(sub_path):
os.chmod(sub_path, stat.S_IWRITE)
os.unlink(sub_path)
else:
change_file_attributes_in_folder(sub_path)
directory_list = []
root = "D:\\_svn_repo\\trunk"
remove_dir = ".svn"
# 1 - read all directories recursively and store to a variable list of directory.
read_all_directory_path(root, directory_list)
# 2 - remove directory from the variable.
for directory in directory_list:
if os.path.split(directory)[1] == remove_dir:
try:
shutil.rmtree(directory)
except WindowsError, e:
if "Access is denied" in e.strerror:
change_file_attributes_in_folder(directory)
shutil.rmtree(directory)
else:
raise
| 29.571429 | 92 | 0.679348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.114734 |
aadcc926ae2bae8f3f547c0e153e1ffc86f3b16e | 65 | py | Python | tackle/__init__.py | robcxyz/tackle-box | a7a1403d4f7549cdacb32e5b11c1f9043bdd5762 | [
"BSD-3-Clause"
] | 5 | 2021-01-05T04:21:37.000Z | 2022-01-01T22:12:32.000Z | tackle/__init__.py | robcxyz/tackle-box | a7a1403d4f7549cdacb32e5b11c1f9043bdd5762 | [
"BSD-3-Clause"
] | 51 | 2021-01-03T00:41:59.000Z | 2022-03-27T00:13:51.000Z | tackle/__init__.py | robcxyz/tackle-box | a7a1403d4f7549cdacb32e5b11c1f9043bdd5762 | [
"BSD-3-Clause"
] | 1 | 2022-01-03T11:46:02.000Z | 2022-01-03T11:46:02.000Z | """Main package for tackle box."""
__version__ = "0.1.0-alpha.4"
| 21.666667 | 34 | 0.661538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.753846 |
aaddeb43199a4c0500f1f08b0da1b9779cd9a4f7 | 1,374 | py | Python | app/core/console/base.py | cPoolChia/ChiaAutoplotter-Worker | 7368396bda018a5adedef74173aba7b6f8f0eccc | [
"MIT"
] | 2 | 2021-06-02T08:19:27.000Z | 2021-06-02T09:53:11.000Z | app/core/console/base.py | cPoolChia/ChiaAutoplotter-Worker | 7368396bda018a5adedef74173aba7b6f8f0eccc | [
"MIT"
] | null | null | null | app/core/console/base.py | cPoolChia/ChiaAutoplotter-Worker | 7368396bda018a5adedef74173aba7b6f8f0eccc | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import Any, AsyncIterable, Callable, Awaitable, Optional, Union
import uuid
class BaseCommandExecution(ABC):
@abstractmethod
async def execute(
self, command: str, *, stdin: Optional[bytes] = None, cwd: Optional[str] = None
) -> None:
...
@abstractmethod
async def output(self) -> AsyncIterable[str]:
...
@property
@abstractmethod
def return_code(self) -> Optional[int]:
...
AsyncStrCallback = Callable[[str], Awaitable[None]]
class FiltrationError(PermissionError):
...
class BaseCommandExecutor(ABC):
@abstractmethod
async def execute(
self,
command: Union[list[str], str],
*,
filter_id: Optional[uuid.UUID] = None,
stdin: Optional[bytes] = None,
cwd: Optional[str] = None,
on_starting: Optional[Callable[[], None]] = None,
) -> uuid.UUID:
...
@abstractmethod
def finished(self, execution_id: uuid.UUID) -> bool:
...
@abstractmethod
def result(self, execution_id: uuid.UUID) -> Optional[tuple[int, str]]:
...
@abstractmethod
def __contains__(self, execution_id: Union[uuid.UUID, Any]) -> bool:
...
@abstractmethod
async def listen(self, callback: AsyncStrCallback, execution_id: uuid.UUID) -> None:
... | 24.105263 | 88 | 0.617904 | 1,188 | 0.864629 | 0 | 0 | 1,032 | 0.751092 | 580 | 0.422125 | 0 | 0 |
aadfff06b29955168bb01b2fd687e7d85bcf363a | 563 | py | Python | apps/flow/run.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 349 | 2020-08-04T10:21:01.000Z | 2022-03-23T08:31:29.000Z | apps/flow/run.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 2 | 2021-01-07T06:17:05.000Z | 2021-04-01T06:01:30.000Z | apps/flow/run.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 70 | 2020-08-24T06:46:14.000Z | 2022-03-25T13:23:27.000Z | from apps.flow.settings import config
if config.SERVER_ENV != 'dev':
from gevent import monkey
monkey.patch_all()
else:
pass
from apps.flow.views.deploy import deploy
from apps.flow.views.flow import flow
from library.api.tFlask import tflask
def create_app():
app = tflask(config)
register_blueprints(app)
return app
def register_blueprints(app):
app.register_blueprint(flow, url_prefix="/v1/flow")
app.register_blueprint(deploy, url_prefix="/v1/deploy")
if __name__ == '__main__':
create_app().run(port=config.PORT)
| 20.107143 | 59 | 0.730018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.065719 |
aae039b933dcd56d996e1972cd2fbbaabcc08e08 | 493 | py | Python | villas/controller/components/gateways/villas_relay.py | VILLASframework/VILLAScontroller | e672439797f209afdd5bc62078f7d49c60269aa4 | [
"Apache-2.0"
] | null | null | null | villas/controller/components/gateways/villas_relay.py | VILLASframework/VILLAScontroller | e672439797f209afdd5bc62078f7d49c60269aa4 | [
"Apache-2.0"
] | null | null | null | villas/controller/components/gateways/villas_relay.py | VILLASframework/VILLAScontroller | e672439797f209afdd5bc62078f7d49c60269aa4 | [
"Apache-2.0"
] | null | null | null | from villas.controller.components.gateway import Gateway
class VILLASrelayGateway(Gateway):
def __init__(self, manager, args):
# Some default properties
props = {
'category': 'gateway',
'type': 'villas-relay',
'realm': manager.realm,
'name': args['identifier']
}
props['ws_url'] = manager.api_url_external + '/' + args['identifier']
props.update(args)
super().__init__(manager, **props)
| 24.65 | 77 | 0.578093 | 433 | 0.878296 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.227181 |
aae0ef6f919b713b8a03794a527288d50661e12d | 880 | py | Python | ticket/triggers/trg_ticket_prioridade.py | rafaelnoronha/tickets | 5497b8de4efed02e5d2cad78fe8e8811ad1e03c1 | [
"MIT"
] | null | null | null | ticket/triggers/trg_ticket_prioridade.py | rafaelnoronha/tickets | 5497b8de4efed02e5d2cad78fe8e8811ad1e03c1 | [
"MIT"
] | null | null | null | ticket/triggers/trg_ticket_prioridade.py | rafaelnoronha/tickets | 5497b8de4efed02e5d2cad78fe8e8811ad1e03c1 | [
"MIT"
] | null | null | null | def trigger():
return """
CREATE OR REPLACE FUNCTION trg_ticket_prioridade()
RETURNS TRIGGER AS $$
DECLARE
prioridade_grupo smallint;
prioridade_subgrupo smallint;
BEGIN
prioridade_grupo = COALESCE((SELECT prioridade FROM grupo WHERE id = NEW.grupo_id), 0);
prioridade_subgrupo = COALESCE((SELECT prioridade FROM subgrupo WHERE id = NEW.subgrupo_id), 0);
NEW.prioridade = prioridade_grupo + prioridade_subgrupo;
RETURN NEW;
END
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_ticket_prioridade ON ticket;
CREATE TRIGGER trg_ticket_prioridade
BEFORE INSERT OR UPDATE ON ticket
FOR EACH ROW EXECUTE PROCEDURE trg_ticket_prioridade();
"""
| 36.666667 | 112 | 0.586364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 853 | 0.969318 |
aae1ec807aa6c921790407473d22408db473c3dc | 4,315 | py | Python | itertable/gis/mixins.py | 1Q1-Open-Source/itertable | 73e59be2d1db8433ba954b0242d7964b3da2bf0e | [
"MIT"
] | 16 | 2019-11-11T12:50:32.000Z | 2022-03-25T08:52:07.000Z | itertable/gis/mixins.py | 1Q1-Open-Source/itertable | 73e59be2d1db8433ba954b0242d7964b3da2bf0e | [
"MIT"
] | 3 | 2019-09-24T14:14:04.000Z | 2021-11-22T02:17:54.000Z | itertable/gis/mixins.py | 1Q1-Open-Source/itertable | 73e59be2d1db8433ba954b0242d7964b3da2bf0e | [
"MIT"
] | 2 | 2021-11-08T07:38:31.000Z | 2021-12-01T00:01:41.000Z | import fiona
from shapely import wkt, geometry
from ..loaders import FileLoader
from ..parsers.base import BaseParser
from ..mappers import TupleMapper
class FionaLoaderParser(FileLoader, BaseParser):
"""
Composite loader & parser mixin for GIS data, powered by Fiona
"""
layer_id = None
meta = {}
key_field = 'id'
def load(self):
try:
self.layers = fiona.listlayers(self.filename)
except (ValueError, IOError):
driver = guess_driver(self.filename)
self.meta = {'driver': driver}
self.empty_file = True
def parse(self):
# If multiple layers, parse all of them (!)
if len(self.layers) > 1 and self.layer_id is None:
cls = type(self)
self.data = [{
'id': id,
'name': name,
'data': cls(filename=self.filename, layer_id=id)
} for id, name in enumerate(self.layers)]
else:
# One layer, load & parse GIS data
with fiona.open(self.filename, layer=self.layer_id) as f:
self.meta = f.meta
if 'id' in f.meta.get('schema', {}).get('properties', {}):
# TODO: Is this correct?
del f.meta['schema']['properties']['id']
self.data = list(map(self.parse_feature, f))
def parse_feature(self, f):
# Flatten Fiona's GeoJSON-style representation into something more
# amenable to namedtuple-ing
feat = {key: value for key, value in f['properties'].items()}
if 'id' not in feat and 'ID' not in feat:
feat['id'] = f['id']
feat['geometry'] = f['geometry']
return feat
def dump_feature(self, feat, i):
# Undo aforementioned flattening
return {
'id': feat.get('id', feat.get('ID', i)),
'geometry': feat['geometry'],
'properties': {
key: value for key, value in feat.items()
if key not in ('geometry', 'id',)
}
}
def dump(self):
# Dump and save the dataset at the same time via Fiona
pass
def save(self):
with fiona.open(self.filename, 'w', **self.meta) as f:
for i, feat in enumerate(self.data):
f.write(self.dump_feature(feat, i))
class GisMapper(TupleMapper):
"""
GIS-aware tuple mapper
"""
def as_dataframe(self):
# Mimic BaseIter.as_dataframe() but with GeoDataFrame
# (also, key_field is always set)
from geopandas import GeoDataFrame
key = self.get_key_field()
data = [self.item_dict(row) for row in self.values()]
df = GeoDataFrame(data)
df.set_index(key, inplace=True)
return df
def item_dict(self, uitem):
# Turn usable item into GeoDataFrame-friendly dict
data = uitem._asdict()
data['geometry'] = geometry.shape(data['geometry'])
return data
class ShapeMapper(GisMapper):
"""
Map Fiona's GeoJSON-style geometries to and from Shapely shapes
"""
def map_value(self, field, value):
value = super(ShapeMapper, self).map_value(field, value)
if field == 'geometry':
value = geometry.shape(value)
return value
def unmap_value(self, field, value):
if field == 'geometry':
value = geometry.mapping(value)
return super(ShapeMapper, self).unmap_value(field, value)
def item_dict(self, uitem):
return uitem._asdict()
class WktMapper(ShapeMapper):
"""
Map geometries to and from WKT (good for Django integration)
"""
def map_value(self, field, value):
value = super(WktMapper, self).map_value(field, value)
if field == 'geometry':
value = wkt.dumps(value)
return value
def unmap_value(self, field, value):
if field == 'geometry':
value = wkt.loads(value)
return super(WktMapper, self).unmap_value(field, value)
def item_dict(self, uitem):
data = uitem._asdict()
data['geometry'] = wkt.loads(data['geometry'])
return data
def guess_driver(filename):
if filename.endswith(".shp"):
return "ESRI Shapefile"
else:
return "GeoJSON"
| 31.268116 | 74 | 0.575435 | 4,020 | 0.931634 | 0 | 0 | 0 | 0 | 0 | 0 | 984 | 0.228042 |
aae3bc4aae7976bccac4f5b0de858e6206edb768 | 4,331 | py | Python | src/service_framework/connections/out/requester.py | ZacharyATanenbaum/service_framework | b5dde4407998350d1b7ad09284110b986fd4e12a | [
"MIT"
] | 1 | 2020-03-20T21:33:56.000Z | 2020-03-20T21:33:56.000Z | src/service_framework/connections/out/requester.py | ZacharyATanenbaum/service_framework | b5dde4407998350d1b7ad09284110b986fd4e12a | [
"MIT"
] | 1 | 2020-03-22T03:48:45.000Z | 2020-03-22T03:48:45.000Z | src/service_framework/connections/out/requester.py | ZacharyATanenbaum/service_framework | b5dde4407998350d1b7ad09284110b986fd4e12a | [
"MIT"
] | null | null | null | """ File to house a requester connection """
from logging import getLogger
import zmq
from service_framework.utils.connection_utils import BaseConnection
from service_framework.utils.msgpack_utils import msg_pack, msg_unpack
from service_framework.utils.socket_utils import get_requester_socket
LOG = getLogger(__name__)
class Requester(BaseConnection):
"""
Needed to automatically generate all connection functions/sockets so external
calls will be properly handled.
"""
def __init__(self, model, addresses):
super().__init__(model, addresses)
self.addresses = addresses
self.context = None
self.socket = None
def __del__(self):
if hasattr(self, 'socket') and self.socket:
self.socket.close()
@staticmethod
def get_addresses_model():
"""
This is needed so the BaseConnector can validate the
provided addresses and throw an error if any are missing.
As well as automatically generate documentation.
NOTE: types must always be "str"
return = {
'required_addresses': {
'req_address_name_1': str,
'req_address_name_2': str,
},
'optional_addresses': {
'opt_address_name_1': str,
'opt_address_name_2': str,
},
}
"""
return {
'required_addresses': {'requester': str},
'optional_addresses': {},
}
@staticmethod
def get_connection_arguments_model():
"""
This is needed so the BaseConnection can validate the provided
model explicitly state the arguments to be passed on each
send message.
return = {
'required_connection_arguments': {
'required_connection_arg_1': type,
'required_connection_arg_2': type,
},
'optional_connection_arguments': {
'optional_connection_arg_1': type,
'optional_connection_arg_2': type,
},
}
"""
return {
'required_connection_arguments': {},
'optional_connection_arguments': {},
}
@staticmethod
def get_creation_arguments_model():
"""
This is needed so the BaseConnection can validate the provided
creation arguments as well as for auto documentation.
return = {
'required_creation_arguments': {
'required_creation_arg_1': type,
'required_creation_arg_2': type,
},
'optional_creation_arguments': {
'optional_creation_arg_1': type,
'optional_creation_arg_2': type,
},
}
"""
return {
'required_creation_arguments': {},
'optional_creation_arguments': {},
}
def get_inbound_sockets_and_triggered_functions(self):
"""
Method needed so the service framework knows which sockets to listen
for new messages and what functions to call when a message appears.
return [{
'inbound_socket': zmq.Context.Socket,
'decode_message': def(bytes) -> payload,
'arg_validator': def(args),
'connection_function': def(args) -> args or None,
'model_function': def(args, to_send, conifg) -> return_args or None,
'return_validator': def(return_args)
'return_function': def(return_args),
}]
"""
self.context = zmq.Context()
self.socket = get_requester_socket(
self.addresses['requester'],
self.context
)
return []
def runtime_setup(self):
"""
Method called directly after instantiation to conduct all
runtime required setup. I.E. Setting up a zmq.Context().
"""
self.context = zmq.Context()
self.socket = get_requester_socket(
self.addresses['requester'],
self.context
)
def send(self, payload):
"""
This is needed to wrap socket calls. So all calls to the connection
will be properly formatted.
"""
self.socket.send(msg_pack(payload))
return msg_unpack(self.socket.recv())
| 31.845588 | 81 | 0.58647 | 4,004 | 0.924498 | 0 | 0 | 2,111 | 0.487416 | 0 | 0 | 2,795 | 0.645347 |
aae3dee92f7ddb11b47b7b8a14769a88c001f66c | 25,675 | py | Python | module/quantization/cluster_q.py | fightingnoble/Robust-ReRAM-NN | 22d14386a5298c7a9dd3dce894363c30896abd19 | [
"MIT"
] | null | null | null | module/quantization/cluster_q.py | fightingnoble/Robust-ReRAM-NN | 22d14386a5298c7a9dd3dce894363c30896abd19 | [
"MIT"
] | null | null | null | module/quantization/cluster_q.py | fightingnoble/Robust-ReRAM-NN | 22d14386a5298c7a9dd3dce894363c30896abd19 | [
"MIT"
] | null | null | null | # from sklearn.cluster._kmeans import *
import copy
from typing import Union
import torch
import torch.nn as nn
from sklearn.cluster._robustq import *
from .quantizer import Quantizer
__all__ = ['MiniBatchRobustqTorch', 'RobustqTorch']
class ClusterQuantizerBase(Quantizer):
def __init__(self, n_feature=1, n_clusters=8, name='',
quant_fun=lambda x: x):
super(ClusterQuantizerBase, self).__init__()
self.n_clusters = n_clusters
self.name = name
# specify the initial values for loading judgment
self.register_buffer("labels_", torch.zeros((0, ),dtype=torch.long))
# specify the initial values for initial judgment
self.register_buffer("cluster_centers_", torch.zeros(n_clusters, n_feature))
self.quant_fun = quant_fun
def reset(self):
super().reset()
# self.labels_.zero_()
self.register_buffer("labels_", torch.zeros((0, ),dtype=torch.long))
self.cluster_centers_.data.copy_(torch.linspace(-1, 1, steps=self.n_clusters).view(-1, 1))
def forward(self, inputs):
output = self.quant_func(inputs)
return output
def extra_repr(self) -> str:
return 'name={},cluster={}'.format(self.name, self.n_clusters)
@staticmethod
def quant_calib(net,wrapped_modules,calib_loader):
calib_layers=[]
n_calibration_steps=1
for name,module in wrapped_modules.items():
module.mode='calibration_forward'
calib_layers.append(name)
n_calibration_steps=max(n_calibration_steps,module.quantizer.n_calibration_steps)
print(f"prepare calibration for {calib_layers}\n n_calibration_steps={n_calibration_steps}")
for step in range(n_calibration_steps):
print(f"Start calibration step={step+1}")
for name,module in wrapped_modules.items():
module.quantizer.calibration_step=step+1
with torch.no_grad():
for inp,target in calib_loader:
inp=inp.cuda()
net(inp)
for name,module in wrapped_modules.items():
print(f"{name}: {module.quantizer}")
module.mode='qat_forward'
print("calibration finished")
class RobustqTorch(ClusterQuantizerBase):
def __init__(self, # data_or_size,
n_feature=1, n_clusters=8, name='',
alpha=0.1, gamma=1.0, q_level_init='uniform', **kwargs):
super(RobustqTorch, self).__init__(n_feature, n_clusters=n_clusters, name=name)
self.alpha = alpha
self.gamma = gamma
self.kmeans = RobustQ(n_clusters=n_clusters, **kwargs)
# if hasattr(data_or_size, '__array__'):
# data = data_or_size
# else:
# data = None
# # if isinstance(data, torch.Tensor):
# # data = data.detach().clone().cpu().view(-1, 1).numpy()
# if isinstance(data, np.ndarray):
# data = self.label_.new_tensor(torch.from_numpy(data))
# self.init_layer_cluster_center(data, n_clusters, q_level_init)
self.init_layer_cluster_center(None, n_clusters, q_level_init)
def init_layer_cluster_center(self, data, n_clusters, method="uniform"):
if method == "uniform" or data is None:
self.cluster_centers_.data.copy_(torch.linspace(-1, 1, steps=n_clusters).view(-1, 1))
self.kmeans.cluster_centers_ = self.cluster_centers_.data.cpu().numpy()
else:
self.fit(data, tol=1e-2)
def reset(self):
super().reset()
self.kmeans.cluster_centers_ = self.cluster_centers_.data.cpu().numpy()
def fit(self, X: torch.Tensor, y=None, sample_weight=None, n_init=None, init=None, tol=None):
# 210626 data copy optimization
# data = X.detach().clone().view(-1, 1)
data = X.view(-1, 1)
if X.requires_grad:
data = data.detach()
data = data.cpu().numpy()
bak = copy.deepcopy([self.kmeans.n_init, self.kmeans.init, self.kmeans.tol])
self.kmeans.n_init, self.kmeans.init, self.kmeans.tol = [new if new is not None else old
for new, old in zip((n_init, init, tol), bak)]
self.kmeans.fit(data, y=y, sample_weight=sample_weight, var_std=self.alpha, var_weight=self.gamma)
# self.labels_.data.copy_(torch.from_numpy(self.kmeans.labels_))
self.register_buffer("labels_", torch.as_tensor(self.kmeans.labels_,dtype=torch.long))
self.cluster_centers_.data.copy_(torch.from_numpy(self.kmeans.cluster_centers_))
self.kmeans.n_init, self.kmeans.init, self.kmeans.tol = bak
def predict(self, X, sample_weight=None):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None).
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
# 210626 data copy optimization
# data = X.detach().clone().view(-1, 1)
data = X.view(-1, 1)
if X.requires_grad:
data = data.detach()
data = data.cpu().numpy()
return self.kmeans.predict(data, sample_weight, var_std=self.alpha, var_weight=self.gamma)
def forward(self, inputs):
# To avoid fault fitness in initial iterations
# if (self.cluster_centers_.data == 0).all():
# # use uniform quantization to avoid further fitness with bad data
# self.init_layer_cluster_center(inputs, self.weight_qbit)
if self.calibration and not self.calibrated:
self.fit(inputs)
labels = self.labels_
weight_quan = self.cluster_centers_[:, 0][labels].view(inputs.shape)
elif self.training:
# label should change as weights are updated
labels = self.predict(inputs)
weight_quan_temp = self.cluster_centers_[:, 0][labels].view(inputs.shape)
weight_quan = inputs - inputs.detach() + weight_quan_temp
else:
# to avoid load the model without pre-fitness
# if len(self.labels_.data) == 0:
# # self.labels_.data.copy_(torch.from_numpy(self.predict(inputs)).view(-1))
# self.register_buffer("labels_", torch.from_numpy(self.predict(inputs)).view(-1))
assert len(self.labels_.data)
labels = self.labels_
weight_quan_temp = self.cluster_centers_[:, 0][labels].view(inputs.shape)
weight_quan = weight_quan_temp
return weight_quan
def extra_repr(self) -> str:
return super(RobustqTorch, self).extra_repr() + " gamma:{}, alpha:{} )".format(self.gamma, self.alpha)
class MiniBatchRobustqTorch(RobustqTorch):
def __init__(self, # batch_size, # data_or_size,
n_feature=1, n_clusters=8, name='',
alpha=0.1, gamma=1.0, q_level_init='uniform', **kwargs):
if "batch_size" in kwargs:
kwargs.pop("batch_size")
super().__init__(n_feature=n_feature, n_clusters=n_clusters, name=name,
alpha=alpha, gamma=gamma, q_level_init=q_level_init, **kwargs)
self.kmeans = MiniBatchRobustQ(n_clusters=n_clusters,**kwargs)
# if hasattr(data_or_size, '__array__'):
# data = data_or_size
# else:
# data = None
# # if isinstance(data, torch.Tensor):
# # data = data.detach().clone().cpu().view(-1, 1).numpy()
# if isinstance(data, np.ndarray):
# data = self.label_.new_tensor(torch.from_numpy(data))
# self.init_layer_cluster_center(data, n_clusters, q_level_init)
self.init_layer_cluster_center(None, n_clusters, q_level_init)
def partial_fit(self, X, y=None, sample_weight=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Coordinates of the data points to cluster. It must be noted that
X will be copied if it is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None).
Returns
-------
self
"""
# 210626 data copy optimization
# data = X.detach().clone().view(-1, 1)
data = X.view(-1, 1)
if X.requires_grad:
data = data.detach()
data = data.cpu().numpy()
self.kmeans.partial_fit(data, y, sample_weight, var_std=self.alpha, var_weight=self.gamma)
# self.labels_.data.copy_(torch.from_numpy(self.kmeans.labels_))
self.register_buffer("labels_", torch.as_tensor(self.kmeans.labels_,dtype=torch.long))
self.cluster_centers_.data.copy_(torch.from_numpy(self.kmeans.cluster_centers_))
def extra_repr(self) -> str:
return super(MiniBatchRobustqTorch, self).extra_repr() + " gamma:{}, alpha:{} )".format(self.gamma, self.alpha)
# TODO: Use close package
def insert_robust_quntizer(module:nn.Module, quantizer: Union[RobustqTorch, MiniBatchRobustqTorch], alpha, gamma):
for k, m in module.named_modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
n_samples = m.weight.numel()
n_clusters = 2 ** m.quanizer.w_bit - 1
batch_factor = 800
# if q_type == 'robust_batch':
if isinstance(quantizer, MiniBatchRobustqTorch):
m.quantizer.w_quantizer = MiniBatchRobustqTorch(n_feature=1,
n_clusters=n_clusters,
alpha=alpha, gamma=gamma,
batch_size=n_clusters * batch_factor
if n_clusters * batch_factor < int(0.3 * n_samples)
else int(0.2 * n_samples),
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
# elif q_type == 'robust':
elif isinstance(quantizer, RobustqTorch):
m.quantizer.w_quantizer = RobustqTorch(n_feature=1,
n_clusters=n_clusters,
alpha=alpha, gamma=gamma,
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
if __name__ == '__main__':
import numpy as np
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
torch.set_printoptions(3)
import sklearn
sklearn.show_versions()
a = {}
# vgg = models.vgg11(pretrained=True)
# if torch.cuda.is_available():
# vgg.cuda()
# a['state_dict'] = vgg.state_dict()
a = torch.load("plot/checkpoints/resnet18_batch256_imagenet_20200708-34ab8f90.pth",
map_location=torch.device('cpu') if not torch.cuda.is_available() else torch.device('cuda'))
num_class = 7
batch_factor = 800
gamma = 0.
train_flg = False
robustq_torch_batch = []
robustq_sklean_batch = []
robustq_torch = []
robustq_sklean = []
kmeans_sklean = []
kmeans_sklean_batch = []
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
print(n_samples)
# from sklearn
kmeans_sklean.append(
KMeans(n_clusters=num_class, n_init=1, max_iter=30, random_state=0, algorithm="full"))
kmeans_sklean_batch.append(
MiniBatchKMeans(n_clusters=num_class, n_init=1, max_iter=30, random_state=0, # tol=1e-4,
batch_size=num_class * batch_factor if num_class * 300 < int(
0.3 * n_samples) else int(0.2 * n_samples)))
# from Robustq
robustq_sklean.append(
RobustQ(n_clusters=num_class, n_init=1, max_iter=30, random_state=0, algorithm="full"))
robustq_sklean_batch.append(MiniBatchRobustQ(n_clusters=num_class,
n_init=1, max_iter=30, random_state=0, # tol=1e-4,
batch_size=num_class * batch_factor
if num_class * batch_factor < int(0.3 * n_samples)
else int(0.2 * n_samples)))
# from clusterq
robustq_torch_batch_t = MiniBatchRobustqTorch(n_feature=1,
n_clusters=num_class,
alpha=0.12, gamma=gamma,
batch_size=num_class * batch_factor
if num_class * batch_factor < int(0.3 * n_samples)
else int(0.2 * n_samples),
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
if not train_flg:
robustq_torch_batch_t.eval()
robustq_torch_t = RobustqTorch(n_feature=1,
n_clusters=num_class,
alpha=0.12, gamma=gamma,
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
if not train_flg:
robustq_torch_t.eval()
if torch.cuda.is_available():
robustq_torch_batch_t.cuda()
robustq_torch_t.cuda()
robustq_torch.append(robustq_torch_t)
robustq_torch_batch.append(robustq_torch_batch_t)
import sys
sys.path.append("../")
from utee.misc import time_measurement
@time_measurement(False, 0, 0)
def f1(quantizer_list, is_np=False):
print("start\n")
ix = 0
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
data_o = v.detach().view(-1, 1)
if is_np:
data = data_o.cpu().numpy()
else:
data = data_o.cuda()
quantizer_list[ix].fit(data)
data_o = v.detach().view(-1, 1)
if is_np:
datac = data_o.cpu().numpy()
t = (datac != data)
tt = t if not isinstance(t, np.ndarray) else t.any()
# print("data is modified:", tt)
else:
datac = data_o.cuda()
t = (datac != data)
tt = t.any().item()
# print("data is modified:", tt)
if tt:
print("max difference:", ((datac - data_o)[t]).max())
ix += 1
# import visdom
#
# vis = visdom.Visdom()
class Visdom():
def bar(self, *args, **kwargs):
pass
def line(self, *args, **kwargs):
pass
vis = Visdom()
def plot(quantizer, name="None", is_np=False):
print(quantizer.labels_)
print(quantizer.cluster_centers_)
# ------------- visdom draw --------------
# histogram of weight distribution
qw = quantizer.cluster_centers_[:, 0][quantizer.labels_] # .view(weight.shape)
qw_hist = []
if is_np:
qw_v = np.unique(qw)
for v in qw_v:
qw_hist.append((qw == v).sum())
else:
qw_v = qw.unique()
for v in qw_v:
qw_hist.append((qw == v).sum().item())
vis.bar(torch.tensor(qw_hist), qw_v, win=name + " hist",
opts=dict(title=name + " hist" + ' gamma={}'.format(gamma)))
# vis.histogram(qw, win=name+" hist",
# opts=dict(title=name+" hist"+' gamma={}'.format(gamma)))
# transform function
x = torch.arange(-1., 1., 0.01)
print(x.shape)
if is_np:
x = x.view(-1, 1).cpu().numpy()
elif torch.cuda.is_available():
x = x.view(-1, 1).cuda()
else:
x = x.view(-1, 1)
level1 = quantizer.cluster_centers_[:, 0][quantizer.predict(x)]
# print(level1.shape, x.shape)
vis.line(Y=level1, X=x.reshape(-1),
win=name,
opts=dict(title=name))
@time_measurement(False, 0, 0)
def get_q_loss(quantizer_list, is_np=False):
ix = 0
loss = 0
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
if is_np:
data = v.detach().view(-1, 1)
data = data.cpu().numpy()
q_data = quantizer_list[ix].cluster_centers_[:, 0][quantizer_list[ix].predict(data)].reshape(
data.shape)
else:
data = v
q_data = quantizer_list[ix](data).reshape(data.shape)
loss += ((q_data - data) ** 2).sum()
# print(n)
ix += 1
print(loss)
print("=======test kmeans_sklean======\n")
f1(kmeans_sklean, True)
get_q_loss(kmeans_sklean, True)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# q_data = kmeans_sklean[ix].cluster_centers_[:, 0][kmeans_sklean[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
print("=======test kmeans_sklean_batch======\n")
f1(kmeans_sklean_batch, True)
get_q_loss(kmeans_sklean_batch, True)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# q_data = kmeans_sklean_batch[ix].cluster_centers_[:, 0][kmeans_sklean_batch[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
print("=======test robustq_sklean======\n")
f1(robustq_sklean, True)
get_q_loss(robustq_sklean, True)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# q_data = robustq_sklean[ix].cluster_centers_[:, 0][robustq_sklean[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
plot(robustq_sklean[0], 'robustq_sklean', True)
print("=======test robustq_sklean_batch======\n")
f1(robustq_sklean_batch, True)
get_q_loss(robustq_sklean_batch, True)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# q_data = robustq_sklean_batch[ix].cluster_centers_[:, 0][robustq_sklean_batch[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
plot(robustq_sklean_batch[0], 'robustq_sklean_batch', True)
print("=======test robustq_torch======\n")
f1(robustq_torch)
get_q_loss(robustq_torch)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v
# q_data = robustq_torch[ix].cluster_centers_[:, 0][robustq_torch[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
plot(robustq_torch[0], 'robustq_torch')
print("=======test robustq_torch_batch======\n")
f1(robustq_torch_batch)
get_q_loss(robustq_torch_batch)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v
# q_data = robustq_torch_batch[ix].cluster_centers_[:, 0][robustq_torch_batch[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
plot(robustq_torch_batch[0], 'robustq_torch_batch')
# print("======= cudalib ======\n")
# from libKMCUDA import kmeans_cuda
# clq_temp = []
# import time
# t_s = time.monotonic()
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# samples = data.cpu().numpy()
# centroids, assignments = kmeans_cuda(samples, num_class, )
# clq_temp.append([centroids, assignments])
# t_e = time.monotonic()
# s, ms = divmod((t_e - t_s) * 1000, 1000)
# m, s = divmod(s, 60)
# h, m = divmod(m, 60)
# print("%d:%02d:%02d:%03d" % (h, m, s, ms))
#
# t_s = time.monotonic()
# ix = 0
# loss=0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# centroids, assignments = clq_temp[ix]
# q_data = centroids[:, 0][assignments].reshape(data.shape)
# loss += ((q_data - data) ** 2).sum()
# ix +=1
# t_e = time.monotonic()
# s, ms = divmod((t_e - t_s) * 1000, 1000)
# m, s = divmod(s, 60)
# h, m = divmod(m, 60)
# print("%d:%02d:%02d:%03d" % (h, m, s, ms))
# print(loss)
print("=======test uniform======\n")
from module.quantization.quant_functions import linear_quantize, compute_integral_part
bits = 3
print("start\n")
ix = 0
q2_loss = 0
q2_list = []
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
w = v.detach()
sf = bits - 1. - compute_integral_part(w, overflow_rate=0)
q2 = linear_quantize(w, sf, bits=bits)
q2_list.append(q2)
q2_loss += ((q2 - w)**2).sum()
ix += 1
print(q2_loss)
# vis.histogram(q2_list[0].view(-1), win='uniform'+" hist",
# opts=dict(title='uniform'+" hist"))
qw = q2_list[0]
qw_v = qw.unique()
qw_hist = []
for v in qw_v:
qw_hist.append((qw == v).sum().item())
vis.bar(torch.tensor(qw_hist), qw_v, win='uniform' + " hist",
opts=dict(title='uniform' + " hist"))
# 2021/08/31: remove dulplicated code of MiniBatchRobustqTorch and RobustqTorch,
# 2021/08/31: MiniBatchRobustqTorch inherits functions from RobustqTorch. | 41.014377 | 123 | 0.518598 | 9,621 | 0.374722 | 0 | 0 | 2,997 | 0.116728 | 0 | 0 | 9,077 | 0.353535 |
aae44756cbe9fb17074f9c08844383373bfa435e | 247 | py | Python | kattis/Shopping List.py | jaredliw/python-question-bank | 9c8c246623d8d171f875700b57772df0afcbdcdf | [
"MIT"
] | 1 | 2021-04-08T07:49:15.000Z | 2021-04-08T07:49:15.000Z | kattis/Shopping List.py | jaredliw/leetcode-solutions | 9c8c246623d8d171f875700b57772df0afcbdcdf | [
"MIT"
] | null | null | null | kattis/Shopping List.py | jaredliw/leetcode-solutions | 9c8c246623d8d171f875700b57772df0afcbdcdf | [
"MIT"
] | 1 | 2022-01-23T02:12:24.000Z | 2022-01-23T02:12:24.000Z | # CPU: 0.18 s
n_rows, _ = map(int, input().split())
common_items = set(input().split())
for _ in range(n_rows - 1):
common_items = common_items.intersection(set(input().split()))
print(len(common_items))
print(*sorted(common_items), sep="\n")
| 30.875 | 66 | 0.684211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.068826 |
2a9a4d87b28f9436adbf59fa348294bf68f453a8 | 155 | py | Python | revuo/forms/__init__.py | Lasanha/revuo | 3fb2a06515194416aff3b9a9efac321f1af3209a | [
"BSD-3-Clause"
] | 1 | 2020-03-24T23:33:57.000Z | 2020-03-24T23:33:57.000Z | revuo/forms/__init__.py | Lasanha/revuo | 3fb2a06515194416aff3b9a9efac321f1af3209a | [
"BSD-3-Clause"
] | 26 | 2015-01-20T13:31:57.000Z | 2020-06-05T17:26:18.000Z | revuo/forms/__init__.py | Lasanha/revuo | 3fb2a06515194416aff3b9a9efac321f1af3209a | [
"BSD-3-Clause"
] | null | null | null | from .blog_item import FormBlogItem
from .edit_profile import FormEditProfile
from .news_item import FormNewsItem
from .publication import FormPublication
| 31 | 41 | 0.870968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2a9a824358bcc69196e77f5087a8a0a54404562e | 22,333 | py | Python | helperscripts/es2json.py | waehniger/efre-lod-elasticsearch-tools | e5025ac810c369135fab90be88d29ab073ccbebb | [
"Apache-2.0"
] | null | null | null | helperscripts/es2json.py | waehniger/efre-lod-elasticsearch-tools | e5025ac810c369135fab90be88d29ab073ccbebb | [
"Apache-2.0"
] | null | null | null | helperscripts/es2json.py | waehniger/efre-lod-elasticsearch-tools | e5025ac810c369135fab90be88d29ab073ccbebb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from datetime import datetime
import json
import elasticsearch
import argparse
import logging
import sys, os, time, atexit
from signal import SIGTERM #needed for Daemon
from httplib2 import Http #needed for put_dict
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(self.stdin, 'r')
so = open(self.stdout, 'a+')
se = open(self.stderr, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
with open(self.pidfile,'w+') as pf:
pf.write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
with open(self.pidfile,'r') as pf:
pid = int(pf.read().strip())
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
except IOError:
pid = None
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
with open(self.pidfile,'r') as pf:
pid = int(pf.read().strip())
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print(str(err))
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
class simplebar():
count=0
def __init__(self):
self.count=0
def reset(self):
self.count=0
def update(self,num=None):
if num:
self.count+=num
else:
self.count+=1
sys.stderr.write(str(self.count)+"\n"+"\033[F")
sys.stderr.flush()
def put_dict(url, dictionary):
'''
Pass the whole dictionary as a json body to the url.
Make sure to use a new Http object each time for thread safety.
'''
http_obj = Http()
resp, content = http_obj.request(
uri=url,
method='PUT',
headers={'Content-Type': 'application/json'},
body=json.dumps(dictionary),
)
def ArrayOrSingleValue(array):
if isinstance(array,(int,float)):
return array
if array:
length=len(array)
if length>1 or isinstance(array,dict):
return array
elif length==1:
for elem in array:
return elem
elif length==0:
return None
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def eprintjs(*args, **kwargs):
for arg in args:
print(json.dumps(arg,indent=4), file=sys.stderr, **kwargs)
def esfatgenerator(host=None,port=9200,index=None,type=None,body=None,source=True,source_exclude=None,source_include=None,timeout=10):
if not source:
source=True
es=elasticsearch.Elasticsearch([{'host':host}],port=port)
try:
if elasticsearch.VERSION<(7,0,0):
page = es.search(
index = index,
doc_type = type,
scroll = '12h',
size = 1000,
body = body,
_source=source,
_source_exclude=source_exclude,
_source_include=source_include,
request_timeout=timeout)
elif elasticsearch.VERSION>=(7,0,0):
page = es.search(
index = index,
scroll = '12h',
size = 1000,
body = body,
_source=source,
_source_excludes=source_exclude,
_source_includes=source_include,
request_timeout=timeout)
except elasticsearch.exceptions.NotFoundError:
sys.stderr.write("aborting.\n")
exit(-1)
sid = page['_scroll_id']
scroll_size = page['hits']['total']
yield page.get('hits').get('hits')
while (scroll_size > 0):
pages = es.scroll(scroll_id = sid, scroll='12h')
sid = pages['_scroll_id']
scroll_size = len(pages['hits']['hits'])
yield pages.get('hits').get('hits')
# returns records which have a certain ID from an ID-File from an elasticsearch-index
# IDs in the ID-File shall be non-quoted, newline-seperated
#
def esgenerator(host=None,port=9200,index=None,type=None,id=None,body=None,source=True,source_exclude=None,source_include=None,headless=False,timeout=10,verbose=False):
progress=1000
if not source:
source=True
es=elasticsearch.Elasticsearch([{'host':host}],port=port,timeout=timeout,max_retries=10,retry_on_timeout=True)
try:
if id:
if elasticsearch.VERSION<(7,0,0):
record=es.get(index=index,doc_type=type,id=id)
elif elasticsearch.VERSION>=(7,0,0):
record=es.get(index=index,id=id)
if headless:
yield record["_source"]
else:
yield record
return
if elasticsearch.VERSION<(7,0,0):
page = es.search(
index = index,
doc_type = type,
scroll = '12h',
size = 1000,
body = body,
_source=source,
_source_exclude=source_exclude,
_source_include=source_include)
elif elasticsearch.VERSION>=(7,0,0): # no doc_type and slightly different _source parameters in elasticsearch7
page = es.search(
index = index,
scroll = '12h',
size = 1000,
body = body,
_source=source,
_source_excludes=source_exclude,
_source_includes=source_include)
except elasticsearch.exceptions.NotFoundError:
sys.stderr.write("not found: "+host+":"+str(port)+"/"+index+"/"+type+"/_search\n")
exit(-1)
sid = page['_scroll_id']
scroll_size = page['hits']['total']
for hits in page['hits']['hits']:
if headless:
yield hits['_source']
else:
yield hits
while (scroll_size > 0):
pages = es.scroll(scroll_id = sid, scroll='12h')
sid = pages['_scroll_id']
scroll_size = len(pages['hits']['hits'])
if verbose:
eprint("{}/{}".format(progress,pages['hits']['total']))
progress+=1000
for hits in pages['hits']['hits']:
if headless:
yield hits['_source']
else:
yield hits
def esidfilegenerator(host=None,port=9200,index=None,type=None,body=None,source=True,source_exclude=None,source_include=None,idfile=None,headless=False,chunksize=1000,timeout=10):
if os.path.isfile(idfile):
if not source:
source=True
tracer = logging.getLogger('elasticsearch')
tracer.setLevel(logging.WARNING)
tracer.addHandler(logging.FileHandler('errors.txt'))
es=elasticsearch.Elasticsearch([{'host':host}],port=port,timeout=timeout, max_retries=10, retry_on_timeout=True)
ids=set()
with open(idfile,"r") as inp:
for ppn in inp:
_id=ppn.rstrip()
ids.add(_id)
if len(ids)>=chunksize:
if body and "query" in body and "match" in body["query"]:
searchbody={"query":{"bool":{"must":[{"match":body["query"]["match"]},{}]}}}
for _id in ids:
searchbody["query"]["bool"]["must"][1]={"match":{"_id":_id}}
#eprint(json.dumps(searchbody))
for doc in esgenerator(host=host,port=port,index=index,type=type,body=searchbody,source=source,source_exclude=source_exclude,source_include=source_include,headless=False,timeout=timeout,verbose=False):
if headless:
yield doc.get("_source")
else:
yield doc
ids.clear()
else:
searchbody={'ids':list(ids)}
try:
if elasticsearch.VERSION<(7,0,0):
for doc in es.mget(index=index,doc_type=type,body=searchbody,_source_include=source_include,_source_exclude=source_exclude,_source=source).get("docs"):
if headless:
yield doc.get("_source")
else:
yield doc
elif elasticsearch.VERSION>=(7,0,0): # no doc_type and slightly different _source parameters in elasticsearch7
for doc in es.mget(index=index,body=searchbody,_source_includes=source_include,_source_excludes=source_exclude,_source=source).get("docs"):
if headless:
yield doc.get("_source")
else:
yield doc
ids.clear()
except elasticsearch.exceptions.NotFoundError:
continue
if len(ids)>0:
if body and "query" in body and "match" in body["query"]:
searchbody={"query":{"bool":{"must":[{"match":body["query"]["match"]},{}]}}}
for _id in ids:
searchbody["query"]["bool"]["must"][1]={"match":{"_id":_id}}
#eprint(json.dumps(searchbody))
for doc in esgenerator(host=host,port=port,index=index,type=type,body=searchbody,source=source,source_exclude=source_exclude,source_include=source_include,headless=False,timeout=timeout,verbose=False):
if headless:
yield doc.get("_source")
else:
yield doc
ids.clear()
else:
searchbody={'ids':list(ids)}
try:
if elasticsearch.VERSION<(7,0,0):
for doc in es.mget(index=index,doc_type=type,body=searchbody,_source_include=source_include,_source_exclude=source_exclude,_source=source).get("docs"):
if headless:
yield doc.get("_source")
else:
yield doc
elif elasticsearch.VERSION>=(7,0,0): # no doc_type and slightly different _source parameters in elasticsearch7
for doc in es.mget(index=index,body=searchbody,_source_includes=source_include,_source_excludes=source_exclude,_source=source).get("docs"):
if headless:
yield doc.get("_source")
else:
yield doc
ids.clear()
except elasticsearch.exceptions.NotFoundError:
pass
# returns records which have a certain ID from an ID-File from an elasticsearch-index
# IDs in the ID-File shall be non-quoted, newline-seperated
# "consumes" the file, which means if it runs clean, the file will be deleted. if some errors occure, only the IDs which arent downloaded get preserved
#
def esidfileconsumegenerator(host=None,port=9200,index=None,type=None,body=None,source=True,source_exclude=None,source_include=None,idfile=None,headless=False,chunksize=1000,timeout=10):
if os.path.isfile(idfile):
ids=list()
notfound_ids=set()
with open(idfile,"r") as inp:
for ppn in inp:
_id=ppn.rstrip()
ids.append(_id)
if not source:
source=True
tracer = logging.getLogger('elasticsearch')
tracer.setLevel(logging.WARNING)
tracer.addHandler(logging.FileHandler('errors.txt'))
es=elasticsearch.Elasticsearch([{'host':host}],port=port,timeout=timeout, max_retries=10, retry_on_timeout=True)
success=False
_ids=set()
try:
for _id in ids:
_ids.add(ids.pop())
if len(_ids)>=chunksize:
if elasticsearch.VERSION<(7,0,0):
for doc in es.mget(index=index,doc_type=type,body={'ids':list(_ids)},_source_include=source_include,_source_exclude=source_exclude,_source=source).get("docs"):
if headless:
yield doc.get("_source")
else:
yield doc
elif elasticsearch.VERSION>=(7,0,0): # no doc_type and slightly different _source parameters in elasticsearch7
for doc in es.mget(index=index,body={'ids':list(_ids)},_source_includes=source_include,_source_excludes=source_exclude,_source=source).get("docs"):
if headless:
yield doc.get("_source")
else:
yield doc
_ids.clear()
if len(_ids)>0:
if elasticsearch.VERSION<(7,0,0):
for doc in es.mget(index=index,doc_type=type,body={'ids':list(_ids)},_source_include=source_include,_source_exclude=source_exclude,_source=source).get("docs"):
if headless:
yield doc.get("_source")
else:
yield doc
elif elasticsearch.VERSION>=(7,0,0): # no doc_type and slightly different _source parameters in elasticsearch7
for doc in es.mget(index=index,body={'ids':list(_ids)},_source_includes=source_include,_source_excludes=source_exclude,_source=source).get("docs"):
if headless:
yield doc.get("_source")
else:
yield doc
_ids.clear()
ids.clear()
except elasticsearch.exceptions.NotFoundError:
notfound_ids.add(_ids)
else:
os.remove(idfile)
finally:
ids+=notfound_ids
with open(idfile,"w") as outp:
for _id in ids:
print(_id,file=outp)
### avoid dublettes and nested lists when adding elements into lists
def litter(lst, elm):
if not lst:
return elm
else:
if isinstance(elm,(str,dict)):
if isinstance(lst,list) and elm in lst:
return lst
else:
if isinstance(lst,(dict,str)):
return [lst,elm]
elif isinstance(lst,list):
lst.append(elm)
return lst
elif isinstance(elm,list):
if isinstance(lst,str):
lst=[lst]
if isinstance(lst,list):
for element in elm:
if element not in lst:
lst.append(element)
return lst
else:
return lst
def isint(num):
try:
int(num)
return True
except (ValueError, TypeError):
return False
def isfloat(num):
try:
float(num)
return True
except (ValueError, TypeError):
return False
def isiter(obj):
try:
_ = (e for e in obj)
return True
except TypeError:
return False
if __name__ == "__main__":
parser=argparse.ArgumentParser(description='simple ES.Getter!')
parser.add_argument('-host',type=str,default="127.0.0.1",help='hostname or IP-Address of the ElasticSearch-node to use, default is localhost.')
parser.add_argument('-port',type=int,default=9200,help='Port of the ElasticSearch-node to use, default is 9200.')
parser.add_argument('-index',type=str,help='ElasticSearch Search Index to use')
parser.add_argument('-type',type=str,help='ElasticSearch Search Index Type to use')
parser.add_argument('-source',type=str,help='just return this field(s)')
parser.add_argument("-include",type=str,help="include following _source field(s)")
parser.add_argument("-exclude",type=str,help="exclude following _source field(s)")
parser.add_argument("-id",type=str,help="retrieve single document (optional)")
parser.add_argument("-headless",action="store_true",default=False,help="don't include Elasticsearch Metafields")
parser.add_argument('-body',type=json.loads,help='Searchbody')
parser.add_argument('-server',type=str,help="use http://host:port/index/type/id?pretty. overwrites host/port/index/id/pretty") #no, i don't steal the syntax from esbulk...
parser.add_argument('-idfile',type=str,help="path to a file with newline-delimited IDs to process")
parser.add_argument('-idfile_consume',type=str,help="path to a file with newline-delimited IDs to process")
parser.add_argument('-pretty',action="store_true",default=False,help="prettyprint")
args=parser.parse_args()
if args.server:
slashsplit=args.server.split("/")
args.host=slashsplit[2].rsplit(":")[0]
if isint(args.server.split(":")[2].rsplit("/")[0]):
args.port=args.server.split(":")[2].split("/")[0]
args.index=args.server.split("/")[3]
if len(slashsplit)>4:
args.type=slashsplit[4]
if len(slashsplit)>5:
if "?pretty" in args.server:
args.pretty=True
args.id=slashsplit[5].rsplit("?")[0]
else:
args.id=slashsplit[5]
if args.pretty:
tabbing=4
else:
tabbing=None
if args.idfile:
for json_record in esidfilegenerator(host=args.host,port=args.port,index=args.index,type=args.type,body=args.body,source=args.source,headless=args.headless,source_exclude=args.exclude,source_include=args.include,idfile=args.idfile):
sys.stdout.write(json.dumps(json_record,indent=tabbing)+"\n")
elif args.idfile_consume:
for json_record in esidfileconsumegenerator(host=args.host,port=args.port,index=args.index,type=args.type,body=args.body,source=args.source,headless=args.headless,source_exclude=args.exclude,source_include=args.include,idfile=args.idfile_consume):
sys.stdout.write(json.dumps(json_record,indent=tabbing)+"\n")
elif not args.id:
for json_record in esgenerator(host=args.host,port=args.port,index=args.index,type=args.type,body=args.body,source=args.source,headless=args.headless,source_exclude=args.exclude,source_include=args.include,verbose=True):
sys.stdout.write(json.dumps(json_record,indent=tabbing)+"\n")
else:
es=elasticsearch.Elasticsearch([{"host":args.host}],port=args.port)
json_record=None
if not args.headless and elasticsearch.VERSION<(7,0,0):
json_record=es.get(index=args.index,doc_type=args.type,_source=True,_source_exclude=args.exclude,_source_include=args.include,id=args.id)
elif not args.headless and elasticsearch.VERSION>(7,0,0):
json_record=es.get(index=args.index,_source=True,_source_excludes=args.exclude,_source_includes=args.include,id=args.id)
elif elasticsearch.VERSION<(7,0,0):
json_record=es.get_source(index=args.index,doc_type=args.type,_source=True,_source_exclude=args.exclude,_source_include=args.include,id=args.id)
elif elasticsearch.VERSION>(7,0,0):
json_record=es.get_source(index=args.index,_source=True,_source_excludes=args.exclude,_source_includes=args.include,id=args.id)
if json_record:
sys.stdout.write(json.dumps(json_record,indent=tabbing)+"\n")
| 42.458175 | 255 | 0.557561 | 3,940 | 0.176421 | 11,409 | 0.510858 | 0 | 0 | 0 | 0 | 3,975 | 0.177988 |
2a9ab14e54ff5f86c5d259ed762fb8e2b9216336 | 2,195 | py | Python | 4/1.py | ZYM-PKU/Raspberry-3B | bca939a71575d7298d09a8f56cd697ddf63f34d5 | [
"MIT"
] | null | null | null | 4/1.py | ZYM-PKU/Raspberry-3B | bca939a71575d7298d09a8f56cd697ddf63f34d5 | [
"MIT"
] | null | null | null | 4/1.py | ZYM-PKU/Raspberry-3B | bca939a71575d7298d09a8f56cd697ddf63f34d5 | [
"MIT"
] | null | null | null | import time
import os
import spidev as SPI
import SSD1306
from PIL import Image, ImageDraw, ImageFont # 调用相关库文件
from datetime import datetime
PATH = os.path.dirname(__file__)
RST = 19
DC = 16
bus = 0
device = 0 # 树莓派管脚配置
disp = SSD1306.SSD1306(rst=RST, dc=DC, spi=SPI.SpiDev(bus, device))
disp.begin()
disp.clear()
def gettime():
dt = datetime.now()
hour = '0'+str(dt.hour) if len(str(dt.hour)) == 1 else str(dt.hour)
minute = '0'+str(dt.minute) if len(str(dt.minute)) == 1 else str(dt.minute)
second = '0'+str(dt.second) if len(str(dt.second)) == 1 else str(dt.second)
timestr = hour+':'+minute+':'+second
return timestr
def disp1():
'''显示helloworld'''
font = ImageFont.truetype("comicsansms.ttf", 20)
image = Image.new('RGB', (disp.width, disp.height), 'black').convert('1')
draw = ImageDraw.Draw(image)
draw.bitmap((0, 0), image, fill=1)
draw.text((10, 20), 'Hello World!', font=font, fill=255)
disp.image(image)
disp.display() # 显示图片
def disp2():
'''显示时钟'''
while True:
nowtime = gettime()
logo = Image.open(os.path.join(PATH, 'p128.png')).resize(
(32, 32), Image.ANTIALIAS).convert('1') # logo
img = Image.new('1', (disp.width, disp.height), 'black') # final_img
img.paste(logo, (0, 0, logo.size[0], logo.size[1]))
font = ImageFont.truetype("comicsansms.ttf", 13)
draw = ImageDraw.Draw(img)
draw.bitmap((0, 0), img, fill=1)
draw.text((64, 0), nowtime, font=font, fill=255)
draw.text((32, 15), "Count down of ", font=font, fill=255)
draw.text((50, 30), "mid-term:", font=font, fill=255)
tardate = datetime(2020, 11, 9)
nowdate = datetime.now()
delta = tardate-nowdate
days = delta.days
seconds = delta.seconds
hours = seconds//3600
seconds = seconds % 3600
minutes = seconds//60
seconds = seconds % 60
draw.text((0, 45), f"{days}d {hours}hour {minutes}min {seconds}s",
font=font, fill=255)
disp.clear()
disp.image(img)
disp.display()
time.sleep(0.1)
if __name__ == "__main__":
disp2()
| 27.098765 | 79 | 0.591344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.134641 |
2a9af691df37378dbd6bf5930ef45f2b0d815dc1 | 81 | py | Python | python/943.find-the-shortest-superstring.py | stavanmehta/leetcode | 1224e43ce29430c840e65daae3b343182e24709c | [
"Apache-2.0"
] | null | null | null | python/943.find-the-shortest-superstring.py | stavanmehta/leetcode | 1224e43ce29430c840e65daae3b343182e24709c | [
"Apache-2.0"
] | null | null | null | python/943.find-the-shortest-superstring.py | stavanmehta/leetcode | 1224e43ce29430c840e65daae3b343182e24709c | [
"Apache-2.0"
] | null | null | null | class Solution:
def shortestSuperstring(self, A: List[str]) -> str:
| 20.25 | 55 | 0.617284 | 71 | 0.876543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2a9b3f2fbc45cf570c14b8caf9673a079f617761 | 4,008 | py | Python | DjangoDemo/language/chart.py | gaivin/GWeb | f3d39e0dcb1640c3466bcf2aa4ebae65c64d4348 | [
"MIT"
] | 1 | 2019-11-04T01:18:02.000Z | 2019-11-04T01:18:02.000Z | DjangoDemo/language/chart.py | gaivin/GWeb | f3d39e0dcb1640c3466bcf2aa4ebae65c64d4348 | [
"MIT"
] | null | null | null | DjangoDemo/language/chart.py | gaivin/GWeb | f3d39e0dcb1640c3466bcf2aa4ebae65c64d4348 | [
"MIT"
] | 1 | 2019-11-04T01:18:08.000Z | 2019-11-04T01:18:08.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: Gaivin Wang
@license: Apache Licence
@contact: gaivin@outlook.com
@site:
@software: PyCharm
@file: chart.py
@time: 10/10/2018 4:14 PM
"""
from pyecharts import Bar, Line, WordCloud
import pandas as pd
import random, os.path as path
DATA_PATH = path.join(path.dirname(__file__), "data")
def history_chart():
BOTTOM = 1
TOP = 200
XAXIS_COUNT = 10
XAXIS_INTERVAL = ((TOP - BOTTOM) // XAXIS_COUNT) - 1
chart = Line(title="Python History Ratings", subtitle="Source: www.tiobe.com",
title_color="DarkSlateGray", background_color="Azure",
width=1000, height=500, page_title="Python Ratings History")
chart.use_theme('walden')
df = pd.read_csv(path.join(DATA_PATH, "pythonratehistory.csv"), sep=",")
TOP = len(df.Python)
values = list(df.Python[BOTTOM:TOP])
title = list(df.Date[BOTTOM:TOP])
chart.add(name="Rating", x_axis=title, y_axis=values, yaxis_name="Rating (%)",
xaxis_name="Date",
# xaxis_interval=XAXIS_INTERVAL,
# is_label_show=True,
# label_formatter="{a}%",
is_legend_show=False,
is_smooth=True,
is_symbol_show=False,
line_width=4,
mark_point=['max'],
mark_point_symbolsize=60,
mark_line=["max", "min"],
is_datazoom_show=True,
is_visualmap=True,
visual_range=[0, 8])
return chart
def language_rank_chart():
TOP = 10
AXIS_LABEL_TEXT_COLOR = "BLACK"
bar = Bar(title="Program Language Ratings for September 2018", subtitle="Source: www.tiobe.com",
title_color="DarkSlateGray", background_color="Azure", width=1000, height=500,
page_title="Program Language Ratings"
)
# bar.use_theme('walden')
df = pd.read_csv(path.join(DATA_PATH, "program_language_rank.csv"), sep=",", usecols=[2, 3])
values = [float(x.replace("%", "")) for x in df.Ratings[0:TOP]]
title = list(df.ProgrammingLanguage[0:TOP])
bar.add(name="Rating", x_axis=title, y_axis=values, is_label_show=True,
yaxis_name="Rating (%)", yaxis_label_textcolor=AXIS_LABEL_TEXT_COLOR,
xaxis_name="Program Language", xaxis_interval=0, xaxis_label_textcolor=AXIS_LABEL_TEXT_COLOR,
label_formatter="{c}%", is_legend_show=False,
label_text_color=AXIS_LABEL_TEXT_COLOR,
mark_point=[{"coord": [2, 3], "name": "3rd"}, {"coord": [1, 2], "name": "2nd"},
{"coord": [0, 1], "name": "1st"}],
mark_point_symbolsize=80,
mark_point_textcolor="SteelBlue",
)
return bar
def world_cloud_chart():
CAT1 = 1000
CAT2 = 800
OFFSET = 20
item_dict = {
# "Python": CAT1 + random.randrange(-OFFSET, OFFSET),
# "Anywhere": CAT1 + random.randrange(-OFFSET, OFFSET),
"Web Apps": CAT1 + random.randrange(-OFFSET, OFFSET),
"Files": CAT1 + random.randrange(-OFFSET, OFFSET),
"Consoles": CAT1 + random.randrange(-OFFSET, OFFSET),
"Databases": CAT1 + random.randrange(-OFFSET, OFFSET),
"Scheduled Tasks": CAT1 + random.randrange(-OFFSET, OFFSET),
"Easy Deploy": CAT2 + random.randrange(-OFFSET, OFFSET),
"Develop Anywhere": CAT2 + random.randrange(-OFFSET, OFFSET),
"Amazing Support": CAT2 + random.randrange(-OFFSET, OFFSET),
"Teach & Learn": CAT2 + random.randrange(-OFFSET, OFFSET), }
name_list = item_dict.keys()
value_list = item_dict.values()
wordcloud = WordCloud(title="Python Anywhere Features and Advantages", width=1000, height=500,
page_title="Python anywhere Word Cloud")
wordcloud.add("", name_list, value_list, word_size_range=[30, 60])
return wordcloud
| 39.294118 | 106 | 0.600798 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,075 | 0.268214 |
2a9b5085bd109f647ca53c8bccc9f5901b048ec5 | 2,773 | py | Python | code/evaluate.py | kajal-puri/point-normals-upsampling | 5248c0d1b2b26ec9e002eb9520a204cdd186ab1d | [
"MIT"
] | 2 | 2021-04-27T15:09:41.000Z | 2021-07-01T00:32:54.000Z | code/evaluate.py | kajal-puri/point-normals-upsampling | 5248c0d1b2b26ec9e002eb9520a204cdd186ab1d | [
"MIT"
] | null | null | null | code/evaluate.py | kajal-puri/point-normals-upsampling | 5248c0d1b2b26ec9e002eb9520a204cdd186ab1d | [
"MIT"
] | 1 | 2021-10-05T14:08:19.000Z | 2021-10-05T14:08:19.000Z | # -*- coding: utf-8 -*-
import torch
import argparse
import numpy as np
from model import PointCloudNet
from code.utils import fp_sampling, knn_patch, helper_function
import os
parser = argparse.ArgumentParser()
parser.add_argument('--num_points', default=1024, type=int,
help='Number of points per patch')
parser.add_argument('--patch_num_ratio', default=4, type=int,
help='Number of points per patch')
parser.add_argument('--trained_model', type=str,
help='Trained model directory')
parser.add_argument('--test_file', type=str,
help='XYZ file for testing')
FLAGS = parser.parse_args()
if not os.path.exists("../results"):
os.mkdir("../results")
NUM_POINTS = FLAGS.num_points
PATCH_NUM_RATIO = FLAGS.patch_num_ratio
TRAINED_MODEL = FLAGS.trained_model
TEST_FILE = FLAGS.test_file
f_name = TEST_FILE.split("/")[-1]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#normaliaze data and extract patches
pc = torch.tensor(np.loadtxt(TEST_FILE)).float().to(device)
num_patches = int(pc.shape[0] / NUM_POINTS * PATCH_NUM_RATIO)
fps_idx = fp_sampling.furthest_point_sample(torch.unsqueeze(pc[:, 0:3], dim=0).contiguous(), num_patches)
patches = torch.tensor(knn_patch.extract_knn_patch(pc[torch.squeeze(fps_idx, dim=0).cpu().numpy(), 0:3].cpu().numpy(), pc.cpu().numpy(), NUM_POINTS)).to(device)
print(patches.shape)
centroid = torch.mean(patches[:, :, 0:3], dim=1, keepdim=True)
patches[:, :, 0:3] = patches[:, :, 0:3] - centroid
furthest_distance = torch.max(torch.sqrt(torch.sum(patches[:, :, 0:3] ** 2, dim=-1)), dim=1,keepdim=True).values
patches[:, :, 0:3] = patches[:, :, 0:3] / torch.unsqueeze(furthest_distance, dim=-1)
# read best epoch from trained model
trained_model_state = open("{0}/state.txt".format(TRAINED_MODEL), "r")
best_epoch, read_min_loss = helper_function.get_best_epoch(trained_model_state)
print(best_epoch, read_min_loss)
print("Best epoch (i.e., minimum loss) for {0}".format(read_min_loss))
#initialize model
net = PointCloudNet(3, 6, True, NUM_POINTS).to(device)
model = torch.load("{0}/epoch_{1}.pt".format(TRAINED_MODEL, best_epoch))
net.load_state_dict(model["model_state_dict"])
net.eval()
up_patches = net(patches)
#denormalize and merge patches
up_patches[:, :, 0:3] = up_patches[:, :, 0:3] * torch.unsqueeze(furthest_distance, dim=-1) + centroid
up_points = torch.cat([p for p in up_patches], dim=0)
fps_idx = fp_sampling.furthest_point_sample(torch.unsqueeze(up_points[:, 0:3], dim=0).contiguous(), pc.shape[0] * 4)
up_points = up_points[torch.squeeze(fps_idx, dim=0).cpu().numpy(), :].detach().cpu().numpy()
np.savetxt("../results/{0}".format(f_name), up_points, fmt='%.6f', delimiter=" ", newline="\n")
| 39.614286 | 161 | 0.705373 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 473 | 0.170573 |
2a9c29a9ca43c2e78dfd11fb0c0783a70a2f2e37 | 10,197 | py | Python | xyz2image.py | hentr/xyz2image | bee976e0062ee893594e56ead1e75cb5d7a4e8af | [
"MIT"
] | null | null | null | xyz2image.py | hentr/xyz2image | bee976e0062ee893594e56ead1e75cb5d7a4e8af | [
"MIT"
] | null | null | null | xyz2image.py | hentr/xyz2image | bee976e0062ee893594e56ead1e75cb5d7a4e8af | [
"MIT"
] | null | null | null | '''
Authors: Trond Henninen(trond.henninen@empa.ch) and Feng Wang
This script is for generating normalised Gaussian simulated annular dark-field scanning transmission electron microscopy (ADF-STEM) images from input atomic coordinates.
For rapidly generating a large dataset, it approximates a contrast similar to multislice simulated images by convolving a 2D Gaussian with the atomic coordinates.
This is a decent approximation for up to 10 overlapping atoms, as the contrast is linearly additive for such thin lattices.
Optimized for rapidly generating data with multiprocessing, so can generate millions of images per hour with a desktop processor.
Inputs .xyz files and outputs a .tif image and compressed arrays (.npz) for fast save/load data e.g. for machine learning.
The input coodinates gets blurred by a 3D gaussian and has the z-dimension flattened to make the 2d image.
xyz2image.xyz2image converts just one file, while xyz2image.folder convert all files in the folder.
Keep in mind, dimensions of the xyz coordinates, gauss_sigma and padxyz are all in ångström [å].
TODO: implement binary_2d and direct image output
TODO: convert binary_radius to [å] instead if pixels
TODO: set a parameter for voxsize (e.g. 9 pm), and calculate
Variables:
'folder' is the folder path where to look for .xyz files (default: '.' meaning current folder).
'gauss_sigma' is the gaussian gauss_sigma in ångström [å]. (default: 0.4).
'edgesize' is the size in pixels of the output square image and voxel cube (default: 128).
'padxyz' is the minimum padding (in [å]) added around the atomic coordinate array, adjusted to make the box cubic (default: 1.5)
'n_rotation' is the number of randomly rotated images (and 3D representations) that is generated for each xyz (1 means original orientation, while 8000 gives convenient files of 10s of MB)
'n_stacks' number of .npz stacks to be generated for each input file
'output_types' which type of outputs are generated (enable by setting the different options to True)
'bitrate' bitrate of output files (should be 8 or 16)
'binary_radius' radius of atoms in binary_3d/binary_2d [pixels]
'frameshift' and 'maxframes' are for .xyz files with many frames:
'frameshift' is which frame it start reading from (default 0)
'maxframes' is how many consecutive frames is read (default 50)
'''
import os, glob
import numpy as np
from ase.io import read
from scipy.ndimage.filters import gaussian_filter
from random import uniform
import tifffile as tif
import multiprocessing
from multiprocessing import Pool
######################## PARAMETERS ###################################
class Parameters: #use an empty object to store parameters
pass
P = Parameters() # this object also becomes available to the subprocesses in the pool without being passed to xyz2image()
P.output_types = { #set True for which type of output you want
'coordinates_3d':True, #3D coordinates of the rotated cluster
'delta_3d':True, #binary 3D-array with 1 for atom center coordinates and 0 for rest
'delta_2d':False, #binary 2D-array same as the 3D, but Z-coordinate has been collapsed
'gaussian_3d':False, #delta_3d convolved with a 3D gaussian
'gaussian_2d':True, #Simulated image made by delta_2d convolved with a 2D gaussian
'binary_3d':False, #binary 3D-array with 1 for spherical atoms with radius P.binary_radius, and 0 for rest
'binary_2d':False, #NOT YET IMPLEMENTED binary 2D-array same as the 3D, but Z-coordinate has been collapsed
'delta_2d_image':False, #delta_2d is also stored as a .tif stack
'gaussian_2d_image':False, #gaussian_2d is also stored as a .tif stack
'binary_2d_image':False, #NOT YET IMPLEMENTED binary_2d is also stored as a .tif stack
}
P.folder = r'.'
P.output_folder = './npz_stacks'
P.bitrate = 8 #output bitrate, 8 or 16
P.gauss_sigma = 0.4
P.edgesize = 80
P.padxyz = 0.3
P.n_rotation = (1024)*8 #number of rotations in one .npz stack, 8k is a good compromise of speed/filesize/memory consumption
P.n_stacks = 1 # number of .npz stacks will be generated for each input file
P.frameshift = 0
P.maxframes = 50
P.binary_radius = 7 #[pixels] radius of atoms in binary_3d
P.xyzfiles = glob.glob('*.xyz')
######################## /PARAMETERS ###################################
def load(filename):
# for loading numpy compressed ND-arrays (.npz) files
return(np.load(filename))
def xyz2image(file):
print(file)
fname = os.path.splitext(os.path.basename(file))[0] #gets the name of the file without the file extension
t = read(file,index=':')
t2 = t[min(P.frameshift,len(t)-1):min(P.frameshift+P.maxframes,len(t))] #frameshift and maxframes are for handling if multiple frames in the .xyz file
print(file,len(t2))
if not os.path.exists(P.output_folder): #make new folders if they don't exist
os.makedirs(P.output_folder)
if not os.path.exists(f'{P.output_folder}/{fname}'):
os.makedirs(f'{P.output_folder}/{fname}')
coordinates_3d_stack, delta_3d_stack, delta_2d_stack, gaussian_3d_stack = [],[],[],[]
gaussian_2d_stack, binary_3d_stack, binary_2d_stack = [],[],[]
for at in t2: #for handling if multiple frames in the .xyz file
del at[at.numbers == 6] #delete carbon atoms
for rot in range(P.n_rotation):
if rot == 0: # P.n_rotation == 1: #keep the first frame at same viewpoint as the input xyz file
at.rotate(90, 'z')
else: # random rotation
at.euler_rotate(uniform(0,360),uniform(0,360),uniform(0,360))
atoms = at.get_positions()
atoms[:,0] -= min(atoms[:,0]); atoms[:,1] -= min(atoms[:,1]); atoms[:,2] -= min(atoms[:,2])
maxx,maxy,maxz = max(atoms[:,0]),max(atoms[:,1]),max(atoms[:,2])
maxxyz = max(maxx,maxy,maxz)
padx,pady,padz = (maxxyz-maxx)/2+P.padxyz,(maxxyz-maxy)/2+P.padxyz, (maxxyz-maxz)/2+P.padxyz
atoms[:,0] += padx; atoms[:,1] += pady; atoms[:,2] += padz;
edgemax = maxxyz+2*P.padxyz
voxsize = edgemax/(P.edgesize-1)
sigpix = P.gauss_sigma/voxsize
#print(file,' - ',len(atoms),at,', voxel size -',voxsize)
normatoms = np.round(atoms/edgemax*(P.edgesize-1)) #normalize the coordinate box
normatoms = normatoms.astype(int)
delta_3d = np.zeros((P.edgesize,P.edgesize,P.edgesize))#,dtype=bool)
delta_3d[normatoms[:,0],normatoms[:,1],normatoms[:,2]] = 1
delta_2d = np.zeros((P.edgesize,P.edgesize))#,dtype=bool)
delta_2d[normatoms[:,0],normatoms[:,1]] = 1
if P.output_types['coordinates_3d'] == True:
coordinates_3d_stack.append(normatoms)
if P.output_types['delta_3d'] == True:
delta_3d_stack.append(delta_3d)
if P.output_types['delta_2d'] == True or P.output_types['delta_2d_image'] == True:
delta_2d_stack.append(delta_2d)
if P.output_types['gaussian_2d'] == True or P.output_types['gaussian_2d'] == True:
gaussian_2d = gaussian_filter(delta_2d, sigpix)
gaussian_2d /= np.max(gaussian_2d)/(2**P.bitrate-1)
gaussian_2d = gaussian_2d.astype('uint'+str(P.bitrate))
gaussian_2d_stack.append(gaussian_2d)
if P.output_types['gaussian_3d'] == True:
gaussian_3d = gaussian_filter(delta_3d, sigpix)
gaussian_3d /= np.max(gaussian_3d)/(2**P.bitrate-1)
gaussian_3d = gaussian_3d.astype('uint'+str(P.bitrate))
gaussian_3d_stack.append(gaussian_3d)
if P.output_types['binary_3d'] == True:
binary_3d = np.zeros((P.edgesize,P.edgesize,P.edgesize),dtype=bool)
for atom in normatoms:
y,x,z = np.ogrid[ -atom[0]:P.edgesize-atom[0], -atom[1]:P.edgesize-atom[1], -atom[2]:P.edgesize-atom[2] ]
mask = x*x + y*y + z*z <= P.binary_radius**2
binary_3d[mask] = True
binary_3d_stack.append(binary_3d)
# if P.output_types['binary_2d'] == True or P.output_types['binary_2d_image'] == True:
output_stacks = {}
if P.output_types['coordinates_3d'] == True:
output_stacks['coordinates_3d'] = np.asarray(coordinates_3d_stack)
if P.output_types['delta_3d'] == True:
output_stacks['delta_3d'] = np.asarray(delta_3d_stack).astype(bool)
if P.output_types['delta_2d'] == True:
output_stacks['delta_2d']= np.asarray(delta_2d_stack).astype(bool)
if P.output_types['gaussian_2d'] == True:
output_stacks['gaussian_2d'] = np.asarray(gaussian_2d_stack)
if P.output_types['gaussian_3d'] == True:
output_stacks['gaussian_3d'] = np.asarray(gaussian_3d_stack)
if P.output_types['binary_3d'] == True:
output_stacks['binary_3d'] = np.asarray(binary_3d_stack)
simulated_files = len( glob.glob(f'{P.output_folder}/{fname}/*' ))
file_name = f'{P.output_folder}/{fname}/{str(simulated_files+P.n_rotation*len(t2)).zfill(8)}'
if any([P.output_types['delta_3d'],P.output_types['delta_2d'],P.output_types['gaussian_2d'],P.output_types['gaussian_3d'],P.output_types['binary_3d']]):
np.savez_compressed(file_name+'.npz',**output_stacks) #these files can be loaded with np.load
if P.output_types['delta_2d_image'] == True:
tif.imsave(file_name+'delta_2d.tif',np.invert(np.asarray(delta_2d_stack).astype(bool)))
if P.output_types['gaussian_2d_image'] == True:
tif.imsave(file_name+'gaussian_2d.tif',np.asarray(gaussian_2d_stack))
#if P.output_types['binary_2d_image'] == True:
def folder_parallellized(P): #runs all the .xyz files in the folder, parallelized with one file per thread
os.chdir(P.folder)
threads = multiprocessing.cpu_count()
with Pool(threads) as p:
p.map(xyz2image, P.xyzfiles)
if __name__ == '__main__':
for i in range(0,P.n_stacks):
folder_parallellized(P)
#to load .npz files:
#npz = (load(file_name+'.npz'))
#print(npz.files,np.shape(npz['delta_3d']),np.shape(npz['gaussian_2d']))
| 55.721311 | 188 | 0.674806 | 67 | 0.006565 | 0 | 0 | 0 | 0 | 0 | 0 | 5,108 | 0.500539 |
2a9cd0d36db26dfa7b130266260e262e2636a4e0 | 197 | py | Python | CollegeBuddy_backend/framework/api/API_Exception.py | tejan-singh/CollegeBuddy | b067345dff6dbf60a54bd48bedb7fab1f463d22d | [
"MIT"
] | 2 | 2020-12-06T15:49:16.000Z | 2020-12-10T15:20:50.000Z | CollegeBuddy_backend/framework/api/API_Exception.py | tejan-singh/CollegeBuddy | b067345dff6dbf60a54bd48bedb7fab1f463d22d | [
"MIT"
] | 5 | 2020-11-30T17:26:12.000Z | 2021-01-01T15:40:22.000Z | CollegeBuddy_backend/framework/api/API_Exception.py | tejan-singh/CollegeBuddy | b067345dff6dbf60a54bd48bedb7fab1f463d22d | [
"MIT"
] | 4 | 2020-12-05T18:30:16.000Z | 2021-01-02T13:17:54.000Z | class APIException(Exception):
def __init__(self, message, code=None):
self.context = {}
if code:
self.context['errorCode'] = code
super().__init__(message)
| 28.142857 | 44 | 0.593909 | 196 | 0.994924 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.055838 |
2a9d635771da63e49497a253910a29aded54decf | 906 | py | Python | ex084.py | BrianBeyer/pythonExercicios | 062e2c6a9e6e6f513185f1fb1d4269d8ca1d9e89 | [
"MIT"
] | null | null | null | ex084.py | BrianBeyer/pythonExercicios | 062e2c6a9e6e6f513185f1fb1d4269d8ca1d9e89 | [
"MIT"
] | null | null | null | ex084.py | BrianBeyer/pythonExercicios | 062e2c6a9e6e6f513185f1fb1d4269d8ca1d9e89 | [
"MIT"
] | null | null | null | lista = []
pess = []
men = 0
mai = 0
while True:
pess.append(str(input('Nome:')))
pess.append(float(input('Peso:')))
if len(lista)==0:# se nao cadastrou ninguem ainda
mai = men = pess[1]#o maior é o mesmo que o menor e igual pess na posicao 1 que é o peso
else:
if pess[1] > mai:
mai = pess[1]
if pess[1] < men:
men = pess[1]
lista.append(pess[:])
pess.clear()
resp = str(input('Quer continuar? [S/N]: '))
if resp in 'Nn':
break
print(f'Ao todo, você cadastrou {len(lista)} pessoas.')
print(f'O maior peso foi de {mai}Kg. Peso de ',end='')
for p in lista:#para cada p dentro de lista, vai pegar cada lista e jogar em p
if p[1] == mai:
print(f'[{p[0]}]',end='')
print()
print(f'O menor peso foi de {men}Kg. peso de ',end='')
for p in lista:
if p[1] == men:
print(f'[{p[0]}]',end='')
print()
| 25.885714 | 96 | 0.555188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 368 | 0.40484 |
2a9e3a0006c28ed83f7150106d1a796d5e3250bc | 399 | py | Python | src/solution/3_longest_substring_without_repeating_characters.py | rsj217/leetcode-in-python3 | f5d9fa50e55ce60a159f9a8ccf6080dc86f56852 | [
"MIT"
] | 1 | 2021-03-01T07:33:45.000Z | 2021-03-01T07:33:45.000Z | src/solution/3_longest_substring_without_repeating_characters.py | rsj217/leetcode-in-python3 | f5d9fa50e55ce60a159f9a8ccf6080dc86f56852 | [
"MIT"
] | null | null | null | src/solution/3_longest_substring_without_repeating_characters.py | rsj217/leetcode-in-python3 | f5d9fa50e55ce60a159f9a8ccf6080dc86f56852 | [
"MIT"
] | null | null | null |
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
max_len = 0
l, r = 0, 0
count = dict()
while r < len(s):
count[s[r]] = count.get(s[r], 0) + 1
while count[s[r]] > 1:
count[s[l]] = count[s[l]] - 1
l += 1
max_len = max(max_len, r-l+1)
r += 1
return max_len
| 24.9375 | 54 | 0.41604 | 396 | 0.992481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2a9e6aba26f30258549a13ef7697fd4a41b47aac | 563 | py | Python | search_engine/worker.py | Dhruvacube/search-engine | 13ba35d99e1dcb7591975e352d70329d41f868e6 | [
"Apache-2.0"
] | null | null | null | search_engine/worker.py | Dhruvacube/search-engine | 13ba35d99e1dcb7591975e352d70329d41f868e6 | [
"Apache-2.0"
] | null | null | null | search_engine/worker.py | Dhruvacube/search-engine | 13ba35d99e1dcb7591975e352d70329d41f868e6 | [
"Apache-2.0"
] | null | null | null | import importlib
from uvicorn.workers import UvicornWorker
class DynamicUvicornWorker(UvicornWorker):
"""
This class is called `DynamicUvicornWorker` because it assigns values
according to the module available Union['asyncio', 'uvloop']
It also set `lifespan` to `off` :)
"""
spam_spec = importlib.util.find_spec("uvloop")
found = spam_spec is not None
if found:
CONFIG_KWARGS = {"loop": "uvloop", "http": "auto", "lifespan": "off"}
else:
CONFIG_KWARGS = {"loop": "auto", "http": "auto", "lifespan": "off"}
| 29.631579 | 77 | 0.65897 | 500 | 0.888099 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.492007 |
2a9f13a70d3783da3197770a209618a00e0a33a0 | 585 | py | Python | src/deviceinterface.py | nlitz88/metrics | a53cb0aa9bfbeea0dc9788f5461ff10905bc0352 | [
"MIT"
] | null | null | null | src/deviceinterface.py | nlitz88/metrics | a53cb0aa9bfbeea0dc9788f5461ff10905bc0352 | [
"MIT"
] | null | null | null | src/deviceinterface.py | nlitz88/metrics | a53cb0aa9bfbeea0dc9788f5461ff10905bc0352 | [
"MIT"
] | null | null | null | class DeviceInterface:
# This operation is used to initialize the device instance. Accepts a dictionary that is read in from the device's yaml definition.
# This device_config_yaml may contain any number of parameters/fields necessary to initialize/setup the instance for data collection.
def initialize(self, device_config_dict):
pass
# This is a standard operation that will return a dictionary to be converted to the appropriate format (json, plaintext, etc) for
# use by endpoints or other publishing methods.
def get_device_data(self):
pass | 65 | 137 | 0.760684 | 585 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 440 | 0.752137 |
2a9f331439bd99a892c7c6363b31983af66fb320 | 3,949 | py | Python | tensorflow/python/debug/lib/debug_events_reader.py | scentini/tensorflow | 204ed332c0886a0e0ab10b22ba8d67b97e1c83c4 | [
"Apache-2.0"
] | 27 | 2020-02-29T04:13:22.000Z | 2022-02-07T21:54:50.000Z | tensorflow/python/debug/lib/debug_events_reader.py | scentini/tensorflow | 204ed332c0886a0e0ab10b22ba8d67b97e1c83c4 | [
"Apache-2.0"
] | 5 | 2020-06-01T18:50:38.000Z | 2021-07-16T07:13:52.000Z | tensorflow/python/debug/lib/debug_events_reader.py | scentini/tensorflow | 204ed332c0886a0e0ab10b22ba8d67b97e1c83c4 | [
"Apache-2.0"
] | 10 | 2020-12-15T03:55:24.000Z | 2021-12-17T23:14:11.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reader class for tfdbg v2 debug events."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import threading
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
class DebugEventsReader(object):
"""Reader class for a tfdbg v2 DebugEvents directory."""
def __init__(self, dump_root):
if not os.path.isdir(dump_root):
raise ValueError("Specified dump_root is not a directory: %s" % dump_root)
metadata_paths = glob.glob(os.path.join(dump_root, "*.metadata"))
if not metadata_paths:
raise ValueError("Cannot find any metadata file in directory: %s" %
dump_root)
elif len(metadata_paths) > 1:
raise ValueError(
"Unexpected: Found multiple (%d) metadata in directory: %s" %
(len(metadata_paths), dump_root))
self._metadata_path = compat.as_bytes(metadata_paths[0])
self._metadata_reader = None
prefix = metadata_paths[0][:-len(".metadata")]
self._source_files_path = compat.as_bytes("%s.source_files" % prefix)
self._stack_frames_path = compat.as_bytes("%s.stack_frames" % prefix)
self._graphs_path = compat.as_bytes("%s.graphs" % prefix)
self._execution_path = compat.as_bytes("%s.execution" % prefix)
self._graph_execution_traces_path = compat.as_bytes(
"%s.graph_execution_traces" % prefix)
self._readers = dict() # A map from file path to reader.
self._readers_lock = threading.Lock()
def _generic_iterator(self, file_path):
"""A helper method that makes an iterator given a debug-events file path."""
# The following code uses the double-checked locking pattern to optimize
# the common case (where the reader is already initialized).
if file_path not in self._readers: # 1st check, without lock.
with self._readers_lock:
if file_path not in self._readers: # 2nd check, with lock.
with errors.raise_exception_on_not_ok_status() as status:
self._readers[file_path] = pywrap_tensorflow.PyRecordReader_New(
compat.as_bytes(file_path), 0, b"", status)
reader = self._readers[file_path]
while True:
try:
reader.GetNext()
except (errors.DataLossError, errors.OutOfRangeError):
# We ignore partial read exceptions, because a record may be truncated.
# PyRecordReader holds the offset prior to the failed read, so retrying
# will succeed.
break
yield debug_event_pb2.DebugEvent.FromString(reader.record())
def metadata_iterator(self):
return self._generic_iterator(self._metadata_path)
def source_files_iterator(self):
return self._generic_iterator(self._source_files_path)
def stack_frames_iterator(self):
return self._generic_iterator(self._stack_frames_path)
def graphs_iterator(self):
return self._generic_iterator(self._graphs_path)
def execution_iterator(self):
return self._generic_iterator(self._execution_path)
def graph_execution_traces_iterator(self):
return self._generic_iterator(self._graph_execution_traces_path)
| 41.135417 | 80 | 0.719423 | 2,870 | 0.726766 | 1,049 | 0.265637 | 0 | 0 | 0 | 0 | 1,486 | 0.376298 |
2aa12187c846911b063045b77f3dd27d4bcbb70f | 24,225 | py | Python | src/amuse/couple/bridge.py | aatrani/amuse | fd1abcfb1b118a9ab13031912abf6e65e9c60dde | [
"Apache-2.0"
] | null | null | null | src/amuse/couple/bridge.py | aatrani/amuse | fd1abcfb1b118a9ab13031912abf6e65e9c60dde | [
"Apache-2.0"
] | 2 | 2018-06-22T13:02:14.000Z | 2018-09-06T20:08:43.000Z | src/amuse/couple/bridge.py | aatrani/amuse | fd1abcfb1b118a9ab13031912abf6e65e9c60dde | [
"Apache-2.0"
] | null | null | null | """
bridge-like integrator for amuse
the bridge class provides a bridge like coupling between different
gravitational integrators. In this way a system composed of multiple
components can be evolved taking account of the self gravity of the whole
system self consistently, while choosing the most appropiate integrator
for the self-gravity of the component systems. This is mainly useful for
systems consist of two or more components that are either well separated
spatially or have different scales (otherwise using a single integrator is
more efficient)
The main idea is that systems experience each others gravity through
periodic velocty kicks with ordinary evolution in between - the evolution
is thus described by an alternation of drift (D) and kick (K) operators,
here chosen as:
K(1/2 dt) D(dt) K(1/2 dt)
K(dt) denotes a kick of the velocities over a timestep dt, while D(dt)
denotes a drift, meaning secular evolution using self gravity of the
system, over dt.
implementation notes:
In order to use bridge the component systems should be initialized as usual,
then a bridge systems is initialized, after which one or more systems are
added:
from amuse.ext.bridge import bridge
bridgesys=bridge(verbose=False)
bridgesys.add_system(galaxy, (cluster,), False)
bridgesys.add_system(cluster, (galaxy,), True )
bridge builds on the full gravity interface, so unit handling etc is
guaranteed. Bridge itself is a (somewhat incomplete) gravity interface,
so the usual evolve, get_potential methods work (and bridge can be a
component in a bridge systems). Note that a single coordinate system should
be used at the moment for all the components systems (different units are
allowed though). The call to add systems, for example:
bridgesys.add_system(galaxy, (cluster,), False)
has three arguments: the system, a set with *interaction* partners and
a flag to specify whether synchronization is needed . The
interaction partners indicate which systems will kick the system. In the
most simple case these would be the set of other systems that are added,
but usually this is not what you want to get good performace. In some
cases you want to ignore one direction of interaction (eg. in a combined
simulation of a galaxy and a comet orbits around a star you may want the
ignore the gravity of the comet), in other cases you want to use a
different force calculator (eg integrating a cluster in a galaxy where
the galaxy is evolved with a tree code and the cluster with a direct sum
code, one also would want to use a tree code to calculate the cluster
gravity for the galaxy. In such a case one can derive a skeleton gravity
interface from the cluster system. A module is provided with some
examples of such *derived* systems, derived_grav_systems.py
Hints for good use:
The bridgesys is flexible but care should be taken in order to obtain
valid results. For one thing, there is no restriction or check on the
validity of the assumption of well seperated dynamics: for example any
system could be split up and put together in bridge, but if the timestep
is chosen to be larger than the timestep criterion of the code, the
integration will show errors.
For good performance one should use derived systems to reduce the
complexity where possible.
There is an issue with the synchronization: some codes do not end on the
exact time of an evolve, or need an explicit sync call. In these cases it
is up to the user to determine whether bridge can be used (an explicit
sync call may induce extra errors that degrade the order of the
integrator).
"""
# issues:
# - for now, units in si
# - a common coordinate system is used for all systems
# - sync of systems should be checked
# - timestepping: adaptive dt?
import threading
from amuse.units import quantities
from amuse.units import units, constants, generic_unit_system, nbody_system
from amuse import datamodel
from amuse.support.exceptions import AmuseException
class AbstractCalculateFieldForCodes(object):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
"""
def __init__(self, input_codes, verbose=False, required_attributes=None):
"""
'verbose' indicates whether to output some run info
'required_attributes' specifies which particle attributes need to be
transferred from the input_codes to the code that will calculate the
field. For example, some codes don't need the velocity. Other codes
may (wrongly) interpret the radius of the input code as gravitational
softening. In the latter case
required_attributes=['mass', 'x','y','z', 'vx','vy','vz']
should prevent the radius of the input codes from being used.
"""
self.codes_to_calculate_field_for = input_codes
self.verbose=verbose
if required_attributes is None:
self.required_attributes = lambda p, attribute_name: True
else:
self.required_attributes = lambda p, attribute_name: attribute_name in required_attributes
def evolve_model(self,tend,timestep=None):
"""
"""
def get_potential_at_point(self,radius,x,y,z):
code = self._setup_code()
try:
for input_code in self.codes_to_calculate_field_for:
particles = input_code.particles.copy(filter_attributes = self.required_attributes)
code.particles.add_particles(particles)
code.commit_particles()
return code.get_potential_at_point(radius,x,y,z)
finally:
self._cleanup_code(code)
def get_gravity_at_point(self,radius,x,y,z):
code = self._setup_code()
try:
for input_code in self.codes_to_calculate_field_for:
particles = input_code.particles.copy(filter_attributes = self.required_attributes)
code.particles.add_particles(particles)
code.commit_particles()
return code.get_gravity_at_point(radius,x,y,z)
finally:
self._cleanup_code(code)
def _setup_code(self):
pass
def _cleanup_code(self, code):
pass
class CalculateFieldForCodes(AbstractCalculateFieldForCodes):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
The code is created for every calculation.
"""
def __init__(self, code_factory_function, input_codes, *args, **kwargs):
AbstractCalculateFieldForCodes.__init__(self, input_codes, *args, **kwargs)
self.code_factory_function = code_factory_function
def _setup_code(self):
return self.code_factory_function()
def _cleanup_code(self, code):
code.stop()
class CalculateFieldForCodesUsingReinitialize(AbstractCalculateFieldForCodes):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
The code is created for every calculation.
"""
def __init__(self, code, input_codes, *args, **kwargs):
AbstractCalculateFieldForCodes.__init__(self, input_codes, *args, **kwargs)
self.code = code
def _setup_code(self):
return self.code
def _cleanup_code(self, code):
code.reset()
class CalculateFieldForCodesUsingRemove(AbstractCalculateFieldForCodes):
"""
Calculated gravity and potential fields using the particles
of other codes with the code provided.
The code is created for every calculation.
"""
def __init__(self, code, input_codes, *args, **kwargs):
AbstractCalculateFieldForCodes.__init__(self, input_codes, *args, **kwargs)
self.code = code
def _setup_code(self):
return self.code
def _cleanup_code(self, code):
code.particles.remove_particles(code.particles)
class CalculateFieldForParticles(object):
"""
Calculates an field for a set of particles, the set
of particles can be from another code.
"""
def __init__(self, particles = None, gravity_constant = None,
softening_mode="shared", G = None):
if particles is None:
self.particles=datamodel.Particles()
else:
self.particles = particles
if gravity_constant is None:
gravity_constant = G
elif not G is None:
raise Exception("both the parameter 'gravity_constant'({0}) and the parameter 'G'({1}) are given, please specify only one!".format(gravity_constant, G))
if gravity_constant is None:
if len(particles) and hasattr(particles, 'mass'):
try:
particles[0].mass.value_in(units.kg)
self.gravity_constant = constants.G
except:
raise AmuseException("For generic units the gravity_constant must be specified")
else:
raise AmuseException("Particle data not yet available, so the gravity_constant must be specified")
else:
self.gravity_constant = gravity_constant
if softening_mode == "individual" or softening_mode == "radius":
self._softening_lengths_squared = self._softening_lengths_squared_individual
elif softening_mode == "h_smooth":
self._softening_lengths_squared = self._softening_lengths_squared_h_smooth
else:
self._softening_lengths_squared = self._softening_lengths_squared_shared
self.smoothing_length_squared = quantities.zero
def _softening_lengths_squared_individual(self):
return self.particles.radius**2
def _softening_lengths_squared_h_smooth(self):
return self.particles.h_smooth**2
def _softening_lengths_squared_shared(self):
return self.smoothing_length_squared#.as_vector_with_length(len(self.particles))
def cleanup_code(self):
self.particles = datamodel.Particles()
def evolve_model(self,tend,timestep=None):
"""
"""
def get_potential_at_point(self,radius,x,y,z):
positions = self.particles.position
result = quantities.AdaptingVectorQuantity()
for i in range(len(x)):
dx = x[i] - positions.x
dy = y[i] - positions.y
dz = z[i] - positions.z
dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
dr = (dr_squared + self._softening_lengths_squared()).sqrt()
energy_of_this_particle = (self.particles.mass / dr).sum()
result.append(-self.gravity_constant * energy_of_this_particle)
return result
def get_gravity_at_point(self,radius,x,y,z):
positions = self.particles.position
m1 = self.particles.mass
result_ax = quantities.AdaptingVectorQuantity()
result_ay = quantities.AdaptingVectorQuantity()
result_az = quantities.AdaptingVectorQuantity()
for i in range(len(x)):
dx = x[i] - positions.x
dy = y[i] - positions.y
dz = z[i] - positions.z
dr_squared = ((dx * dx) + (dy * dy) + (dz * dz) +
self._softening_lengths_squared() + radius[i]**2)
ax = -self.gravity_constant * (m1*dx/dr_squared**1.5).sum()
ay = -self.gravity_constant * (m1*dy/dr_squared**1.5).sum()
az = -self.gravity_constant * (m1*dz/dr_squared**1.5).sum()
result_ax.append(ax)
result_ay.append(ay)
result_az.append(az)
return result_ax, result_ay, result_az
class GravityCodeInField(object):
def __init__(self, code, field_codes, do_sync=True, verbose=False, radius_is_eps=False, h_smooth_is_eps=False, zero_smoothing=False):
"""
verbose indicates whether to output some run info
"""
self.code = code
self.field_codes = field_codes
if hasattr(self.code, 'model_time'):
self.time = self.code.model_time
else:
self.time = quantities.zero
self.do_sync=do_sync
self.verbose=verbose
self.timestep=None
self.radius_is_eps = radius_is_eps
self.h_smooth_is_eps = h_smooth_is_eps
required_attributes = ['mass', 'x', 'y', 'z', 'vx', 'vy', 'vz']
if self.radius_is_eps:
required_attributes.append('radius')
elif self.h_smooth_is_eps:
required_attributes.append('h_smooth')
self.required_attributes = lambda p, x : x in required_attributes
if not hasattr(self.code,"parameters"):
self.zero_smoothing=True
elif not hasattr(self.code.parameters,"epsilon_squared"):
self.zero_smoothing=True
else:
self.zero_smoothing=zero_smoothing
def evolve_model(self,tend,timestep=None):
"""
evolve combined system to tend, timestep fixes timestep
"""
if timestep is None:
timestep = self.timestep
first=True
while self.time < (tend-timestep/2.):
if first:
self.kick(timestep/2.)
first=False
else:
self.kick(timestep)
self.drift(self.time+timestep)
self.time+=timestep
if not first:
self.kick(timestep/2.)
def synchronize_model(self):
"""
explicitly synchronize all components
"""
if hasattr(self.code,"synchronize_model"):
if(self.verbose):
print(self.code.__class__.__name__,"is synchronizing", end=' ')
self.code.synchronize_model()
if(self.verbose):
print(".. done")
def get_potential_at_point(self,radius,x,y,z):
return self.code.get_potential_at_point(radius,x,y,z)
def get_gravity_at_point(self,radius,x,y,z):
return self.code.get_gravity_at_point(radius,x,y,z)
@property
def model_time(self):
return self.time
@property
def potential_energy(self):
if not hasattr(self.code, 'particles'):
return quantities.zero
result = self.code.potential_energy
particles = self.code.particles.copy(filter_attributes = self.required_attributes)
for y in self.field_codes:
energy = self.get_potential_energy_in_field_code(particles, y)
result += energy
return result
@property
def kinetic_energy(self):
return self.code.kinetic_energy
@property
def thermal_energy(self):
if hasattr(self.code,'thermal_energy'):
return self.code.thermal_energy
else:
return quantities.zero
@property
def particles(self):
return self.code.particles
@property
def gas_particles(self):
if hasattr(self.code, "gas_particles"):
return self.code.gas_particles
else:
raise AttributeError
@property
def dm_particles(self):
if hasattr(self.code, "dm_particles"):
return self.code.dm_particles
else:
raise AttributeError
def drift(self, tend):
if not hasattr(self.code,"evolve_model"):
return
if (self.verbose):
print(self.code.__class__.__name__, "is evolving to", tend)
self.code.evolve_model(tend)
if(self.verbose):
print(".. done")
def cannot_kick(self):
"""
check if the code is capable of kicking other particles,
please do not try to optimize this, I know it is called every kick but
only calculating it at the start causes an annoying bug in certain uses of the code.
"""
return len(self.code.particles)==0 or not (hasattr(self, 'particles') and 'vx' in self.particles.get_attribute_names_defined_in_store())
def kick(self, dt):
if self.cannot_kick():
return quantities.zero
particles = self.code.particles.copy(filter_attributes = self.required_attributes)
kinetic_energy_before = particles.kinetic_energy()
for field_code in self.field_codes:
if(self.verbose):
print(self.code.__class__.__name__,"receives kick from",field_code.__class__.__name__, end=' ')
self.kick_with_field_code(
particles,
field_code,
dt
)
if(self.verbose):
print(".. done")
channel=particles.new_channel_to(self.code.particles)
channel.copy_attributes(["vx","vy","vz"])
kinetic_energy_after = particles.kinetic_energy()
return kinetic_energy_after - kinetic_energy_before
def _softening_lengths(self, particles):
if self.radius_is_eps:
return particles.radius
elif self.h_smooth_is_eps:
return particles.h_smooth
elif self.zero_smoothing:
return 0.*particles.x
else:
return (self.code.parameters.epsilon_squared**0.5).as_vector_with_length(len(particles))
def get_potential_energy_in_field_code(self, particles, field_code):
pot=field_code.get_potential_at_point(
self._softening_lengths(particles),
particles.x,
particles.y,
particles.z
)
return (pot*particles.mass).sum() / 2
def kick_with_field_code(self, particles, field_code, dt):
ax,ay,az=field_code.get_gravity_at_point(
self._softening_lengths(particles),
particles.x,
particles.y,
particles.z
)
self.update_velocities(particles, dt, ax, ay, az)
def update_velocities(self,particles, dt, ax, ay, az):
particles.vx += dt * ax
particles.vy += dt * ay
particles.vz += dt * az
def stop(self):
self.code.stop()
class Bridge(object):
def __init__(self, timestep = None, verbose=False, use_threading=True,method=None):
"""
verbose indicates whether to output some run info
"""
self.codes=[]
self.time=quantities.zero
self.verbose=verbose
self.timestep=timestep
self.kick_energy = quantities.zero
self.use_threading = use_threading
self.time_offsets = dict()
self.method=method
self.channels = datamodel.Channels()
def add_system(self, interface, partners=set(), do_sync=True,
radius_is_eps=False, h_smooth_is_eps=False, zero_smoothing=False):
"""
add a system to bridge integrator
"""
if hasattr(interface, "particles"):
code = GravityCodeInField(interface, partners, do_sync, self.verbose,
radius_is_eps, h_smooth_is_eps, zero_smoothing)
self.add_code(code)
else:
if len(partners):
raise Exception("You added a code without particles, but with partners, this is not supported!")
self.add_code(interface)
def add_code(self, code):
self.codes.append(code)
if hasattr(code,"model_time"):
self.time_offsets[code]=(self.time-code.model_time)
else:
self.time_offsets[code]=quantities.zero
def evolve_model(self, tend, timestep=None):
"""
evolve combined system to tend, timestep fixes timestep
"""
if timestep is None:
if self.timestep is None:
timestep=tend-self.time
else:
timestep = self.timestep
if self.method is None:
return self.evolve_joined_leapfrog(tend,timestep)
else:
return self.evolve_simple_steps(tend,timestep)
def evolve_simple_steps(self,tend,timestep):
while self.time < (tend-timestep/2):
self._drift_time=self.time
self.method(self.kick_codes,self.drift_codes_dt, timestep)
self.channels.copy()
self.time=self.time+timestep
def evolve_joined_leapfrog(self,tend,timestep):
first=True
while self.time < (tend-timestep/2.):
if first:
self.kick_codes(timestep/2.)
first=False
else:
self.kick_codes(timestep)
self.drift_codes(self.time+timestep)
self.channels.copy()
self.time += timestep
if not first:
self.kick_codes(timestep/2.)
def synchronize_model(self):
"""
explicitly synchronize all components
"""
for x in self.codes:
if hasattr(x,"synchronize_model"):
if(self.verbose): print(x.__class__.__name__,"is synchronizing", end=' ')
x.synchronize_model()
if(self.verbose): print(".. done")
def stop(self):
for one_code in self.codes:
if hasattr(one_code, "stop"):
one_code.stop()
def get_potential_at_point(self,radius,x,y,z):
pot=quantities.zero
for code in self.codes:
_pot=code.get_potential_at_point(radius,x,y,z)
pot=pot+_pot
return pot
def get_gravity_at_point(self,radius,x,y,z):
ax=quantities.zero
ay=quantities.zero
az=quantities.zero
for code in self.codes:
_ax,_ay,_az=code.get_gravity_at_point(radius,x,y,z)
ax=ax+_ax
ay=ay+_ay
az=az+_az
return ax,ay,az
@property
def model_time(self):
return self.time
@property
def potential_energy(self):
result=quantities.zero
for x in self.codes:
result+=x.potential_energy
return result
@property
def kinetic_energy(self):
result=quantities.zero
for x in self.codes:
result+=x.kinetic_energy
return result #- self.kick_energy
@property
def thermal_energy(self):
result=quantities.zero
for x in self.codes:
if hasattr(x,'thermal_energy'):
result+=x.thermal_energy
return result
@property
def particles(self):
array=[]
for x in self.codes:
if hasattr(x,"particles"):
array.append(x.particles)
if len(array) == 0:
raise AttributeError
elif len(array) == 1:
return array[0]
return datamodel.ParticlesSuperset(array)
@property
def gas_particles(self):
array=[]
for x in self.codes:
if hasattr(x,"gas_particles"):
array.append(x.gas_particles)
if len(array) == 0:
raise AttributeError
elif len(array) == 1:
return array[0]
return datamodel.ParticlesSuperset(array)
@property
def dm_particles(self):
array=[]
for x in self.codes:
if hasattr(x,"dm_particles"):
array.append(x.dm_particles)
elif hasattr(x,"particles"):
array.append(x.particles)
if len(array) == 0:
raise AttributeError
elif len(array) == 1:
return array[0]
return datamodel.ParticlesSuperset(array)
# 'private' functions
def drift_codes_dt(self,dt):
self._drift_time+=dt
self.drift_codes(self._drift_time)
def drift_codes(self,tend):
threads=[]
for x in self.codes:
offset=self.time_offsets[x]
if hasattr(x,"drift"):
threads.append(threading.Thread(target=x.drift, args=(tend-offset,)) )
elif hasattr(x,"evolve_model"):
threads.append(threading.Thread(target=x.evolve_model, args=(tend-offset,)) )
if self.use_threading:
for x in threads:
x.start()
for x in threads:
x.join()
else:
for x in threads:
x.run()
def kick_codes(self,dt):
de = quantities.zero
for x in self.codes:
if hasattr(x,"kick"):
de += x.kick(dt)
self.kick_energy += de
| 33.881119 | 164 | 0.637647 | 20,135 | 0.831166 | 0 | 0 | 2,861 | 0.118101 | 0 | 0 | 6,822 | 0.28161 |
2aa127aad07445a64e9152911064c6bad0de9f2d | 8,808 | py | Python | active.py | cgretzem/leaguepkg | 8bb298417e948806b1bccd3ffe31ac23ccb62bf3 | [
"MIT"
] | 1 | 2021-09-17T22:42:26.000Z | 2021-09-17T22:42:26.000Z | active.py | cgretzem/leaguepkg | 8bb298417e948806b1bccd3ffe31ac23ccb62bf3 | [
"MIT"
] | null | null | null | active.py | cgretzem/leaguepkg | 8bb298417e948806b1bccd3ffe31ac23ccb62bf3 | [
"MIT"
] | null | null | null | """
Handles classes and methods for an active local Game.
Classes:
`ActiveGame` - Represents an active LOL game
`Player` - represents a player inside the game
`ActivePlayer` - subclass of player, represents host
`Item` - represents an item in game
Methods:
`check_status()` -> bool
Errors:
`RequestError(str)` - Cannot connect to LOL api
Misc Variables:
__version__
"""
import requests, json, random, time
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class Error(Exception):
"""Base class for custom exceptions."""
pass
class RequestError(Error):
"""Occurs when there is an issue retrieving a data.json from Riot API"""
def __init__(self, msg):
self.msg = msg
class PlayerNotFoundException(Error):
"""Occurs when a player is not found in the live client API"""
def __init__(self, msg):
self.msg = msg
def check_status():
"""Checks if a player is in a live game, returns false if not in a game."""
try:
output = requests.get('https://127.0.0.1:2999/liveclientdata/playerlist', verify = False)
except:
return False
response = output.json()
if not type(response) == list:
return False
else:
return True
class Item:
"""
A class to represent an item
Attributes:
----------
`can_use` : bool
True if item is an active item.
`consumable` : bool
True if item is a consumable.
`count` : int
Represents how many charges an item has left.
`display_name` : str
In-Game name of item.
`item_ID` :
ID of the item.
`price` : int
Cost of the item in the shop.
`slot` : int
Represents the slot the player has the item in.
"""
def __init__(self, item_dict : dict):
"""Constructor that initializes the item based on a dict of values"""
self.can_use = item_dict['canUse']
self.consumable = item_dict['consumable']
self.count = item_dict['count']
self.display_name = item_dict['displayName']
self.price = item_dict['price']
self.slot = item_dict['slot']
class Player:
"""
A class to represent a player inside an active game
Attributes:
----------
`champion_name` : str
name of the champion the player is piloting
`is_bot` : bool
true if the player is a bot
`is_dead` : bool
true if the player is dead
`items` : list[Item]
a list of items in the players inventory
`level` : int
current level of player
`position` : str
returns the expected position of the player (may not always be accurate)
`respawn_timer` : double
the remaining time on the respawn timer
`runes` : tuple(str)
a tuple of the basic rune paths in the format - (keystone, primaryTree, secondary Tree)
`score` : dict
a dictionary that holds assists, creepScore, deaths, kills, and ward score
`skin_ID` : int
ID of skin player is using, default skin is 0
`summoner_name` : str
name of the player
`summoner_spells` : tuple(str)
tuple of the two summoner spells
`team` : str
either CHAOS or ORDER
"""
def __init__(self, player_dict : dict):
"""A constructor that accepts a dict with player information inside to create values."""
self.champion_name = player_dict['championName']
self.is_bot = player_dict['isBot']
self.is_dead = player_dict['isDead']
self.items = []
for item in player_dict['items']:
self.items.append(Item(item))
self.level = player_dict['level']
self.position = player_dict['position']
self.respawn_timer = player_dict['respawnTimer']
self.runes = (player_dict['runes']['keystone']['displayName'],player_dict['runes']['primaryRuneTree']['displayName'],player_dict['runes']['secondaryRuneTree']['displayName'])
self.scores = player_dict['scores']
self.skin_ID = player_dict['skinID']
self.summoner_name = player_dict['summonerName']
self.summoner_spells = (player_dict['summonerSpells']['summonerSpellOne']['displayName'],player_dict['summonerSpells']['summonerSpellTwo']['displayName'])
self.team = player_dict['team']
class ActivePlayer(Player):
"""
A subclass of player that represents an active player, with more information
Attributes:
----------
`abilities` : dict of dicts
holds ability level, name, and ID
`champion_stats` : dict
represents all champion stats such as AP and armor
`gold` : double
gold value in players inventory
`full_runes` : list[dict]
gets the full runes of player
`stat_runes` : list[dict]
contains stat runes such as armor, mr, attack speed, or cooldown reduction
"""
def __init__(self, player_dict : dict, active_dict : dict):
Player.__init__(self, player_dict)
self.abilities = active_dict['abilities']
self.champion_stats = active_dict['championStats']
self.gold = active_dict['currentGold']
self.full_runes = active_dict['fullRunes']['generalRunes']
self.stat_runes = active_dict['fullRunes']['statRunes']
class ActiveGame:
"""
A class to represent an active League of Legends Game.
Attributes
----------
`eventList` : list[dict]
list of all events in the game
`players` : list[str]
list of players in current game
`friends`: list[dict]
list of friends from friends.json
Methods:
----------
`updateEventList()`: bool
updates the game event list
`getLastEvent()` : dict
returns the most recent event, or None if no events happened
"""
def __init__(self):
"""Initializes a new instance of an active game."""
self.event_list = []
self.active_player = None
self.players = []
self.updateEventList()
self.loadPlayerList()
def updateEventList(self):
"""Adds new Events to event_list, returns list of new events."""
#gets json data from leagueAPI
temp_event_list = self.event_list
url = "https://127.0.0.1:2999/liveclientdata/eventdata"
try:
response = requests.get(url, verify = False)
output = response.json()
self.event_list = output['Events']
except Exception:
raise RequestError('Unable to retrieve Game Events')
#checks if a new event has been added
eventIDList = []
newEvents = []
count = 0
for event in temp_event_list:
eventIDList.append(event['EventID'])
for event in self.event_list:
if not event['EventID'] in eventIDList:
newEvents.append(event)
return newEvents
def getLastEvent(self):
"""Gets the most recent event in the event_list, returns None if event_list is empty."""
if not self.event_list:
return None
else:
return self.event_list[-1]
def loadPlayerList(self):
try:
res = requests.get('https://127.0.0.1:2999/liveclientdata/playerlist', verify = False)
active = requests.get('https://127.0.0.1:2999/liveclientdata/activeplayer', verify = False)
except:
raise RequestError('Unable to retrieve playerlist')
output = res.json()
active_out = active.json()
self.players.clear()
for user in output:
if Player(user).summoner_name == active_out['summonerName']:
actPlayer = ActivePlayer(user, active_out)
self.players.append(actPlayer)
self.active_player = actPlayer
else:
self.players.append(Player(user))
def isPlayerPresent(self, player:str):
"""Checks if a player is in the current game"""
for user in self.players:
if user.summoner_name == player:
return True
return False
def getChampName(self, player:str):
"""Gets the champion name of specified player based on IGN."""
for user in self.players:
if user.summoner_name == player:
return user.champion_name
raise PlayerNotFoundException('Could not find player in playerlist')
| 33.112782 | 183 | 0.602293 | 7,770 | 0.882153 | 0 | 0 | 0 | 0 | 0 | 0 | 4,667 | 0.529859 |
2aa2c472094d956432f11fdf8f38e4862376bad8 | 6,406 | py | Python | UMLRT2Kiltera_MM/transformation_eq_outside/HExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 3 | 2017-06-02T19:26:27.000Z | 2021-06-14T04:25:45.000Z | UMLRT2Kiltera_MM/transformation_eq_outside/HExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 8 | 2016-08-24T07:04:07.000Z | 2017-05-26T16:22:47.000Z | UMLRT2Kiltera_MM/transformation_eq_outside/HExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 1 | 2019-10-31T06:00:23.000Z | 2019-10-31T06:00:23.000Z | from core.himesis import Himesis
import uuid
class HExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule ExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans, self).__init__(name='HExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """ExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'ExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class State() node
self.add_node()
self.vs[3]["mm__"] = """State"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class State()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class ExitPoint() node
self.add_node()
self.vs[5]["mm__"] = """ExitPoint"""
self.vs[5]["attr1"] = """+"""
# match_contains node for class ExitPoint()
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# apply class LocalDef() node
self.add_node()
self.vs[7]["mm__"] = """LocalDef"""
self.vs[7]["attr1"] = """1"""
# apply_contains node for class LocalDef()
self.add_node()
self.vs[8]["mm__"] = """apply_contains"""
# apply class ProcDef() node
self.add_node()
self.vs[9]["mm__"] = """ProcDef"""
self.vs[9]["attr1"] = """1"""
# apply_contains node for class ProcDef()
self.add_node()
self.vs[10]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[11]["mm__"] = """Name"""
self.vs[11]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[12]["mm__"] = """apply_contains"""
# apply class Par() node
self.add_node()
self.vs[13]["mm__"] = """Par"""
self.vs[13]["attr1"] = """1"""
# apply_contains node for class Par()
self.add_node()
self.vs[14]["mm__"] = """apply_contains"""
# apply class Trigger() node
self.add_node()
self.vs[15]["mm__"] = """Trigger"""
self.vs[15]["attr1"] = """1"""
# apply_contains node for class Trigger()
self.add_node()
self.vs[16]["mm__"] = """apply_contains"""
# match association State--exitPoints-->ExitPoint node
self.add_node()
self.vs[17]["attr1"] = """exitPoints"""
self.vs[17]["mm__"] = """directLink_S"""
# apply association LocalDef--def-->ProcDef node
self.add_node()
self.vs[18]["attr1"] = """def"""
self.vs[18]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[19]["attr1"] = """channelNames"""
self.vs[19]["mm__"] = """directLink_T"""
# apply association ProcDef--p-->Par node
self.add_node()
self.vs[20]["attr1"] = """p"""
self.vs[20]["mm__"] = """directLink_T"""
# apply association Par--p-->Trigger node
self.add_node()
self.vs[21]["attr1"] = """p"""
self.vs[21]["mm__"] = """directLink_T"""
# backward association State---->LocalDef node
self.add_node()
self.vs[22]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class State()
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class ExitPoint()
(1,8), # applymodel -> apply_contains
(8,7), # apply_contains -> apply_class LocalDef()
(1,10), # applymodel -> apply_contains
(10,9), # apply_contains -> apply_class ProcDef()
(1,12), # applymodel -> apply_contains
(12,11), # apply_contains -> apply_class Name()
(1,14), # applymodel -> apply_contains
(14,13), # apply_contains -> apply_class Par()
(1,16), # applymodel -> apply_contains
(16,15), # apply_contains -> apply_class Trigger()
(3,17), # match_class State() -> association exitPoints
(17,5), # association exitPoints -> match_class ExitPoint()
(7,18), # apply_class LocalDef() -> association def
(18,9), # association def -> apply_class ProcDef()
(9,19), # apply_class ProcDef() -> association channelNames
(19,11), # association channelNames -> apply_class Name()
(9,20), # apply_class ProcDef() -> association p
(20,13), # association p -> apply_class Par()
(13,21), # apply_class Par() -> association p
(21,15), # association p -> apply_class Trigger()
(7,22), # apply_class LocalDef() -> backward_association
(22,3), # backward_association -> apply_class State()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((3,'isComposite'),('constant','true')), ((7,'__ApplyAttribute'),('constant','localdefcompstate')), ((9,'name'),('concat',(('constant','B'),(5,'name')))), ((11,'literal'),('constant','sh_in')), ((13,'__ApplyAttribute'),('constant','parexitpoint')), ((15,'channel'),('constant','sh_in')), ]
| 38.824242 | 318 | 0.530909 | 6,349 | 0.991102 | 0 | 0 | 0 | 0 | 0 | 0 | 3,314 | 0.517328 |
2aa5453199355e4c36254ebb1452a62267afff50 | 1,553 | py | Python | shellish/command/contrib/ini.py | personnelink/shellish | f130599aabe1ed44323b2374c9e6c3b789f06517 | [
"MIT"
] | 4 | 2015-10-06T23:50:20.000Z | 2021-06-11T19:20:43.000Z | shellish/command/contrib/ini.py | personnelink/shellish | f130599aabe1ed44323b2374c9e6c3b789f06517 | [
"MIT"
] | null | null | null | shellish/command/contrib/ini.py | personnelink/shellish | f130599aabe1ed44323b2374c9e6c3b789f06517 | [
"MIT"
] | null | null | null | """
Show the INI config(s) used by a command tree.
"""
from .. import command
class Show(command.Command):
""" Show current INI configuration.
Programs may make use of a configuration file which is usually located in
your $HOME directory as .<prog>_config. The file is a standard INI
style config file where each `[section]` is the full path of a command
including spaces. """
name = 'show'
def setup_args(self, parser):
self.add_argument('section', nargs='?', help='Only show config for '
'this section.')
self.add_argument('--all', '-a', action='store_true', help='Show '
'all sections')
super().setup_args(parser)
def run(self, args):
if args.section:
try:
config = {args.section: self.session.config[args.section]}
except KeyError:
raise SystemExit("Invalid section: %s" % args.section)
else:
config = self.session.config
for section, values in config.items():
if values or args.all:
print("[%s]" % section)
for k, v in values.items():
print(" %s = %s" % (k, v))
print()
class INI(command.Command):
""" INI style configuration.
Commands support user configuration in an INI style config file. """
name = 'ini'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_subcommand(Show, default=True)
| 31.06 | 77 | 0.568577 | 1,468 | 0.945267 | 0 | 0 | 0 | 0 | 0 | 0 | 588 | 0.378622 |
2aa5942f73cecef566d01cdaae9fb817121f496b | 7,862 | py | Python | Report folder/Total_image_PCA_analysis/PCA_image_all_samples_1_plot.py | xiaoyanLi629/Coffee-ring-effect-image-analysis | 0873f55ae78775835209371845c26cd6ba1a73e0 | [
"MIT"
] | null | null | null | Report folder/Total_image_PCA_analysis/PCA_image_all_samples_1_plot.py | xiaoyanLi629/Coffee-ring-effect-image-analysis | 0873f55ae78775835209371845c26cd6ba1a73e0 | [
"MIT"
] | null | null | null | Report folder/Total_image_PCA_analysis/PCA_image_all_samples_1_plot.py | xiaoyanLi629/Coffee-ring-effect-image-analysis | 0873f55ae78775835209371845c26cd6ba1a73e0 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from PIL import Image
import pandas as pd
from sklearn.decomposition import PCA
from scipy.spatial.distance import pdist, squareform
from scipy.sparse.linalg import eigs
from numpy import linalg as LA
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.pyplot import figure
numImages = 60
# fig = plt.figure(figsize = (8,8))
X = np.zeros(shape = (numImages, 490*490))
for i in range(1, numImages + 1):
filename = str(i)+'.jpg'
img = mpimg.imread(filename)
img = img[:, :, 0]*0.299 + img[:, :, 1]*0.587 + img[:, :, 2]*0.114
X[i-1] = np.array(img.flatten()).reshape(1, img.shape[0]*img.shape[1])
numComponents = 60
pca = PCA(n_components=numComponents)
pca.fit(X)
Z = pca.transform(X)
fig1, ax = plt.subplots()
ax.scatter(Z[0:5, 0], Z[0:5, 1], s = 25, marker = 'x', c = 'r', label = '$NaCl\; 10mM,\; CaCl_2\; 3.0mM,\; MgCl_2\; 1.5mM$')
ax.scatter(Z[5:10, 0], Z[5:10, 1], s = 25, marker = 'o', facecolors = 'none', edgecolors='r',
label = '$NaCl\; 5.0mM,\; CaCl_2\; 3.0mM,\; MgCl_2\; 1.5mM$')
ax.scatter(Z[10:15, 0], Z[10:15, 1], s = 25, marker = 's', facecolors = 'none', edgecolors='r',
label = '$NaCl\; 2.5mM,\; CaCl_2\; 3.0mM,\; MgCl_2\; 1.5mM$')
ax.scatter(Z[15:20, 0], Z[15:20, 1], s = 25, marker = 'x', c = 'g', label = '$NaHCO_3\; 10mM,\; CaCl_2\; 0.5mM,\; MgCl_2\; 0.25mM$')
ax.scatter(Z[20:25, 0], Z[20:25, 1], s = 25, marker = 'o', facecolors = 'none', edgecolors='g',
label = '$NaHCO_3\; 5.0mM,\; CaCl_2\; 0.5mM,\; MgCl_2\; 0.25mM$')
ax.scatter(Z[25:30, 0], Z[25:30, 1], s = 25, marker = 's', facecolors = 'none', edgecolors='g',
label = '$NaHCO_3\; 2.5mM,\; CaCl_2\; 0.5mM,\; MgCl_2\; 0.25mM$')
ax.scatter(Z[30:35, 0], Z[30:35, 1], s = 25, marker = 'x', c = 'b', label = '$Na_2SO_4\; 10mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[35:40, 0], Z[35:40, 1], s = 25, marker = 'o', facecolors = 'none', edgecolors='b',
label = '$Na_2SO_4\; 5.0mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[40:45, 0], Z[40:45, 1], s = 25, marker = 's', facecolors = 'none', edgecolors='b',
label = '$Na_2SO_4\; 2.5mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[45:50, 0], Z[45:50, 1], s = 25, marker = 'x', c = 'y', label = '$NaHCO_3\; 10mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[50:55, 0], Z[50:55, 1], s = 25, marker = 'o', facecolors = 'none', edgecolors='y',
label = '$NaHCO_3\; 5.0mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[55:60, 0], Z[55:60, 1], s = 25, marker = 's', facecolors = 'none', edgecolors='y',
label = '$NaHCO_3\; 2.5mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
plt.xlabel('First component')
plt.ylabel('Second component')
ax.set_yticklabels([])
ax.set_xticklabels([])
# plt.title('PCA Image analysis for all samples')
ax.legend(loc='upper right', prop={'size': 7}, handletextpad = 0, labelspacing = 0)
plt.show()
fig1.savefig('PCA_all_images_2_components_1_plot.jpg', dpi = 1000)
# # use component 3 and 4
# fig2, ax = plt.subplots()
# ax.scatter(Z[0:5, 2], Z[0:5, 3], s = 100, marker = 'x', c = 'r', label = 'NaCl 10mM, CaCl2 3.0mM, MgCl2 1.5mM')
# ax.scatter(Z[5:10, 2], Z[5:10, 3], s = 100, marker = 'o', facecolors = 'none', edgecolors='r',
# label = 'NaCl 5.0mM, CaCl2 3.0mM, MgCl2 1.5mM')
# ax.scatter(Z[10:15, 2], Z[10:15, 3], s = 100, marker = 's', facecolors = 'none', edgecolors='r',
# label = 'NaCl 2.5mM, CaCl2 3.0mM, MgCl2 1.5mM')
#
# ax.scatter(Z[15:20, 2], Z[15:20, 3], s = 100, marker = 'x', c = 'g', label = 'NaHCO3 10mM, CaCl2 0.5mM, MgCl2 0.25mM')
# ax.scatter(Z[20:25, 2], Z[20:25, 3], s = 100, marker = 'o', facecolors = 'none', edgecolors='g',
# label = 'NaHCO3 5.0mM, CaCl2 0.5mM, MgCl2 0.25mM')
# ax.scatter(Z[25:30, 2], Z[25:30, 3], s = 100, marker = 's', facecolors = 'none', edgecolors='g',
# label = 'NaHCO3 2.5mM, CaCl2 0.5mM, MgCl2 0.25mM')
#
# ax.scatter(Z[30:35, 2], Z[30:35, 3], s = 100, marker = 'x', c = 'b', label = 'Na2SO4 5.0mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[35:40, 2], Z[35:40, 3], s = 100, marker = 'o', facecolors = 'none', edgecolors='b',
# label = 'Na2SO4 2.5mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[40:45, 2], Z[40:45, 3], s = 100, marker = 's', facecolors = 'none', edgecolors='b',
# label='Na2SO4 1.25mM, CaSO4 0.5mM, MgSO4 0.25mM')
#
# ax.scatter(Z[45:50, 2], Z[45:50, 3], s = 100, marker = 'x', c = 'y', label = 'NaHCO3 10mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[50:55, 2], Z[50:55, 3], s = 100, marker = 'o', facecolors = 'none', edgecolors='y',
# label = 'NaHCO3 5.0mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[55:60, 2], Z[55:60, 3], s = 100, marker = 's', facecolors = 'none', edgecolors='y',
# label = 'NaHCO3 2.5mM, CaSO4 0.5mM, MgSO4 0.25mM')
#
# plt.xlabel('Third component', fontsize = 20)
# plt.ylabel('Fourth component', fontsize = 20)
# plt.title('PCA Image analysis for all samples', fontsize = 20)
# ax.legend(loc = 'upper right', prop={'size': 7})
# plt.show()
#
# eigenvalues = pca.explained_variance_
# variance = []
# for i in range(len(eigenvalues)):
# if i == 0:
# variance.append(eigenvalues[0])
# else:
# variance.append(variance[i-1] + eigenvalues[i])
# variance = variance/variance[-1]
#
# fig3, ax = plt.subplots()
# plt.plot(variance, 'ro-', linewidth=1)
# plt.title('Scree Plot for all 60 images', fontsize=20)
# plt.xlabel('Principal Component', fontsize=20)
# plt.ylabel('Cumulative Eigenvalue', fontsize=20)
# fig3.savefig('Scree Plot for all 60 images.png')
# # 3d image
# # fig = plt.figure(num=None, figsize=(4, 3), dpi=80, facecolor='w', edgecolor='k')
# fig = plt.figure()
# # figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
# # fig, axs = plt.subplots(nrows=1, ncols=1, constrained_layout=True)
# ax = Axes3D(fig)
# ax.scatter(Z[0:5, 0], Z[0:5, 1], Z[0:5, 2], s = 100, marker = 'x', c = 'r', label = 'NaCl 10mM, CaCl2 3.0mM, MgCl2 1.5mM')
# ax.scatter(Z[5:10, 0], Z[5:10, 1], Z[5:10, 2], s = 100, marker = 's', c = 'r', label = 'NaCl 5.0mM, CaCl2 3.0mM, MgCl2 1.5mM')
# ax.scatter(Z[10:15, 0], Z[10:15, 1], Z[10:15, 2], s = 100, marker = 'o', c ='r', label = 'NaCl 2.5mM, CaCl2 3.0mM, MgCl2 1.5mM')
#
# ax.scatter(Z[15:20, 0], Z[15:20, 1], Z[15:20, 2], s = 100, marker = 'x', c = 'g', label = 'NaHCO3 10mM, CaCl2 0.5mM, MgCl2 0.25mM')
# ax.scatter(Z[20:25, 0], Z[20:25, 1], Z[20:25, 2], s = 100, marker = 's', c = 'g', label = 'NaHCO3 5.0mM, CaCl2 0.5mM, MgCl2 0.25mM')
# ax.scatter(Z[25:30, 0], Z[25:30, 1], Z[25:30, 2], s = 100, marker = 'o', c = 'g', label = 'NaHCO3 2.5mM, CaCl2 0.5mM, MgCl2 0.25mM')
#
# ax.scatter(Z[30:35, 0], Z[30:35, 1], Z[30:35, 2], s = 100, marker = 'x', c = 'b', label = 'Na2SO4 5.0mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[35:40, 0], Z[35:40, 1], Z[35:40, 2], s = 100, marker = 's', c = 'b', label = 'Na2SO4 2.5mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[40:45, 0], Z[40:45, 1], Z[40:45, 2], s = 100, marker = 'o', c = 'b', label='Na2SO4 1.25mM, CaSO4 0.5mM, MgSO4 0.25mM')
#
# ax.scatter(Z[45:50, 0], Z[45:50, 1], Z[45:50, 2], s = 100, marker = 'x', c = 'y', label = 'NaHCO3 10mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[50:55, 0], Z[50:55, 1], Z[50:55, 2], s = 100, marker = 's', c = 'y', label = 'NaHCO3 5.0mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[55:60, 0], Z[55:60, 1], Z[55:60, 2], s = 100, marker = 'o', c = 'y', label = 'NaHCO3 2.5mM, CaSO4 0.5mM, MgSO4 0.25mM')
#
# ax.set_xlabel('First component', fontsize = 15)
# ax.set_ylabel('Second component', fontsize = 15)
# ax.set_zlabel('Third component', fontsize = 15)
# ax.set_title('PCA image analysis for all samples \n with three components', fontsize = 20)
# ax.legend(loc = 'upper right', prop={'size': 7})
# plt.show()
# plt.close(fig) | 55.366197 | 134 | 0.589672 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,701 | 0.725134 |
2aa65b484384e454a6e81e84c558c2dde1338019 | 3,983 | py | Python | example/example_person.py | Shadow-linux/ex_dataclass | 1fe186ea990f6f2337fb7ee6b6ba54fa7791afa0 | [
"MIT"
] | 18 | 2021-06-29T11:12:42.000Z | 2022-03-31T20:31:10.000Z | example/example_person.py | Shadow-linux/ex_dataclass | 1fe186ea990f6f2337fb7ee6b6ba54fa7791afa0 | [
"MIT"
] | 1 | 2021-06-30T03:12:46.000Z | 2021-06-30T03:12:46.000Z | example/example_person.py | Shadow-linux/ex_dataclass | 1fe186ea990f6f2337fb7ee6b6ba54fa7791afa0 | [
"MIT"
] | 3 | 2021-06-29T11:12:43.000Z | 2021-07-08T08:16:12.000Z | import typing
from ex_dataclass import ex_dataclass, asdict, field, EXpack
@ex_dataclass
class User:
# default_factory: 需要给一个类(可callable的对象)
name: str = field(default_factory=str)
# default: 给定一个默认值
age: int = field(default=0)
@ex_dataclass
class Team:
team_name: str = field(default_factory=str)
# 没有值时,我们设置一个list给users字段
users: typing.List[User] = field(default_factory=list)
@ex_dataclass
class AllTeam:
id_: int = field(default=0)
teams: typing.List[Team] = field(default_factory=list)
# 看看TeamUser 接受参数或字典
all_team = AllTeam(**{
# 这个字段并未定义,所以会被忽略
"is_active": True,
"teams": [
{
"team_name": "Team-A",
"users" : [
{
"name": "zhangsan",
"age" : 18,
},
{
"name": "lisi",
"age" : 18,
}
]
},
{
"team_name": "Team-B",
"users" : [
{
"name": "jack",
"age" : 18,
},
{
"name": "rose",
"age" : 18,
}
]
}
]
})
# 可以看到运行结果,所有类型都被转换成对象,对象在python中是非常的友好可以进行全方位自动补全,并且方便维护;
print(all_team)
# AllTeam(teams=[Team(team_name='Team-A', users=[User(name='', age=18), User(name='', age=18)]), Team(team_name='Team-B', users=[User(name='', age=18), User(name='', age=18)])])
print(all_team.teams)
# [Team(team_name='Team-A', users=[User(name='', age=18), User(name='', age=18)]), Team(team_name='Team-B', users=[User(name='', age=18), User(name='', age=18)])]
print(all_team.teams[0].team_name)
print(all_team.teams[0].users)
# Team-A
# [User(name='', age=18), User(name='', age=18)]
print(all_team.teams[0].users[0].name)
# zhangsan
# 重新转回字典
print(asdict(all_team))
# {'teams': [{'team_name': 'Team-A', 'users': [{'name': 'zhangsan', 'age': 18}, {'name': 'lisi', 'age': 18}]}, {'team_name': 'Team-B', 'users': [{'name': 'jack', 'age': 18}, {'name': 'rose', 'age': 18}]}]}
@ex_dataclass
class Person:
# default_factory: 需要给一个类(可callable)
name: str = field(default_factory=str)
# default: 给定一个默认值
age: int = field(default=0)
height: float = field(default=float)
weight: float = field(default=float)
@ex_dataclass
class PersonDetails:
address: str = field(default_factory=str)
hobbies: typing.List[str] = field(default_factory=list)
phone: str = field(default_factory=str)
# 继承person使其拥有person的熟悉
@ex_dataclass
class Male(Person):
gender: str = field(default="male")
@ex_dataclass
class Female(Person):
gender: str = field(default="female")
@ex_dataclass
class Jack(Male):
# 当你默认值需要PersonDetails 对象时,可以写入到default_factory, 如果不需要则写dict或None
details: PersonDetails = field(default_factory=PersonDetails)
@ex_dataclass
class Rose(Female):
details: PersonDetails = field(default_factory=dict)
# 最终初始化两个人物,使用参数初始化, 这里并没有给出gender,因为已经设置默认值了
jack = Jack(
name="jack",
age=18,
height=1.80,
weight=125.0,
details={
"address": "xxxx",
"hobbies": ["aa", "bb", "cc"],
"phone" : "123456789"
}
)
# 使用字典初始化
rose = Rose(
name="rose",
age=18,
height=1.680,
weight=98.0,
details={
"address": "xxxx",
"hobbies": ["aa", "bb", "cc"],
"phone" : "987654321"
}
)
print(jack)
print(jack.details.phone)
print(rose)
print(rose.details.phone)
# Jack(name='jack', age=18, height=1.8, weight=125.0, gender='male', details=PersonDetails(address='xxxx', hobbies=['aa', 'bb', 'cc'], phone='123456789'))
# 123456789
# Rose(name='rose', age=18, height=1.68, weight=98.0, gender='female', details=PersonDetails(address='xxxx', hobbies=['aa', 'bb', 'cc'], phone='987654321'))
# 987654321
@ex_dataclass
class A:
pass
print(asdict(A()))
| 25.863636 | 205 | 0.568165 | 1,324 | 0.301526 | 0 | 0 | 1,464 | 0.333409 | 0 | 0 | 2,007 | 0.457071 |
2aa980f2e7bb9369dafe24972122e61fe79c1759 | 766 | py | Python | class and objects/Mro.py | ZephyrAveryl777/Python-Programs | 26de85c31af28382d406d27d54186b966a7b1bfc | [
"MIT"
] | 6 | 2020-08-13T11:49:29.000Z | 2021-03-07T05:46:17.000Z | class and objects/Mro.py | ZephyrAveryl777/Python-Programs | 26de85c31af28382d406d27d54186b966a7b1bfc | [
"MIT"
] | null | null | null | class and objects/Mro.py | ZephyrAveryl777/Python-Programs | 26de85c31af28382d406d27d54186b966a7b1bfc | [
"MIT"
] | 1 | 2021-04-24T06:12:48.000Z | 2021-04-24T06:12:48.000Z | '''
mro stands for Method Resolution Order.
It returns a list of types the class is
derived from, in the order they are searched
for methods'''
print(__doc__)
print('\n'+'-'*35+ 'Method Resolution Order'+'-'*35)
class A(object):
def dothis(self):
print('From A class')
class B1(A):
def dothis(self):
print('From B1 class')
pass
class B2(object):
def dothis(self):
print('From B2 class')
pass
class B3(A):
def dothis(self):
print('From B3 class')
# Diamond inheritance
class D1(B1, B3):
pass
class D2(B1, B2):
pass
d1_instance = D1()
d1_instance.dothis()
print(D1.__mro__)
d2_instance = D2()
d2_instance.dothis()
print(D2.__mro__) | 17.022222 | 53 | 0.591384 | 350 | 0.456919 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.347258 |
2aac09b051b13c891b16499f4e8579b8384792aa | 1,507 | py | Python | src/petmail/test/test_eventsource.py | warner/petmail | 2c43f6177e301275e235cc2a155aafba3e66b4c4 | [
"MIT"
] | 25 | 2015-04-14T15:55:46.000Z | 2021-01-23T16:00:55.000Z | src/petmail/test/test_eventsource.py | warner/petmail | 2c43f6177e301275e235cc2a155aafba3e66b4c4 | [
"MIT"
] | null | null | null | src/petmail/test/test_eventsource.py | warner/petmail | 2c43f6177e301275e235cc2a155aafba3e66b4c4 | [
"MIT"
] | 1 | 2018-05-17T08:41:47.000Z | 2018-05-17T08:41:47.000Z | from twisted.trial import unittest
from ..eventsource import EventSourceParser
class FakeTransport:
disconnecting = False
def parse_events(s):
fields = []
p = EventSourceParser(lambda name, data: fields.append((name,data)))
p.makeConnection(FakeTransport())
p.dataReceived(s)
return fields
class EventSource(unittest.TestCase):
def test_parse(self):
fields = []
p = EventSourceParser(lambda name, data: fields.append((name,data)))
p.makeConnection(FakeTransport())
self.failUnlessEqual(fields, [])
p.dataReceived(": comment")
self.failUnlessEqual(fields, [])
p.dataReceived("\n")
self.failUnlessEqual(fields, [])
p.dataReceived("\n")
self.failUnlessEqual(fields, [("", "comment")])
p.dataReceived("data: one line\n\n")
self.failUnlessEqual(fields, [("", "comment"),
("data", "one line")])
p.dataReceived("data: two\n")
self.failUnlessEqual(fields, [("", "comment"),
("data", "one line")])
p.dataReceived("lines\n")
self.failUnlessEqual(fields, [("", "comment"),
("data", "one line")])
p.dataReceived("\n")
self.failUnlessEqual(fields, [("", "comment"),
("data", "one line"),
("data", "two\nlines"),
])
| 35.880952 | 76 | 0.52223 | 1,235 | 0.819509 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.134041 |
2aacd69a9be78b0b125c9417b7a8c0c44c82bb18 | 116 | py | Python | coffea/nanoevents/methods/systematics/__init__.py | Junaid-muhamad/coffea | b644bcf2b48e272e6c78e4b1ddee3c233a67022d | [
"BSD-3-Clause"
] | null | null | null | coffea/nanoevents/methods/systematics/__init__.py | Junaid-muhamad/coffea | b644bcf2b48e272e6c78e4b1ddee3c233a67022d | [
"BSD-3-Clause"
] | null | null | null | coffea/nanoevents/methods/systematics/__init__.py | Junaid-muhamad/coffea | b644bcf2b48e272e6c78e4b1ddee3c233a67022d | [
"BSD-3-Clause"
] | null | null | null | from coffea.nanoevents.methods.systematics.UpDownSystematic import UpDownSystematic
__all__ = ["UpDownSystematic"]
| 29 | 83 | 0.853448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.155172 |
2aacece114f8bb2a0a9d49c428291bf6cbab0533 | 7,121 | py | Python | etl/parsers/etw/Microsoft_Windows_SecurityMitigationsBroker.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 104 | 2020-03-04T14:31:31.000Z | 2022-03-28T02:59:36.000Z | etl/parsers/etw/Microsoft_Windows_SecurityMitigationsBroker.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 7 | 2020-04-20T09:18:39.000Z | 2022-03-19T17:06:19.000Z | etl/parsers/etw/Microsoft_Windows_SecurityMitigationsBroker.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 16 | 2020-03-05T18:55:59.000Z | 2022-03-01T10:19:28.000Z | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-SecurityMitigationsBroker
GUID : ea8cd8a5-78ff-4418-b292-aadc6a7181df
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1003, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1003_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1004, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1004_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1005, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1005_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1006, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1006_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ACGState" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1007, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1007_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1008, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1008_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1009, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1009_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ACGState" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1010, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1010_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1011, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1011_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1012, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1012_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1013, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1013_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1014, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1014_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1015, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1015_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1016, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1016_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1017, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1017_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1018, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1018_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1019, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1019_0(Etw):
pattern = Struct(
"DriverId1" / Int64ul,
"DriverId2" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1020, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1020_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1021, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1021_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1022, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1022_0(Etw):
pattern = Struct(
"Description" / WString,
"VendorId" / Int32ul,
"DeviceId" / Int32ul,
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1023, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1023_0(Etw):
pattern = Struct(
"Description" / WString,
"VendorId" / Int32ul,
"DeviceId" / Int32ul,
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1024, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1024_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1025, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1025_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1026, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1026_0(Etw):
pattern = Struct(
"Description" / WString,
"VendorId" / Int32ul,
"DeviceId" / Int32ul,
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1027, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1027_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1030, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1030_0(Etw):
pattern = Struct(
"ModuleName" / WString
)
| 30.173729 | 123 | 0.696391 | 4,428 | 0.621823 | 0 | 0 | 6,664 | 0.935824 | 0 | 0 | 1,840 | 0.258391 |
2aad343bddb6ea115f42e2d6dd3a8e74891cc040 | 592 | py | Python | workbench/speech_text/_deepspeech/segment_audio.py | NAshwinKumar/deep-read | ee13bb0651e9b682c411db073fb52f3136d44ea1 | [
"MIT"
] | 6 | 2020-09-26T08:34:19.000Z | 2021-11-26T12:59:50.000Z | workbench/speech_text/_deepspeech/segment_audio.py | NAshwinKumar/deep-read | ee13bb0651e9b682c411db073fb52f3136d44ea1 | [
"MIT"
] | 26 | 2020-09-26T08:46:40.000Z | 2022-02-10T03:18:55.000Z | workbench/speech_text/_deepspeech/segment_audio.py | Acquil/deep-read | aae93c62a0c85ce31eb0e2d759d4d95d7d076c5d | [
"MIT"
] | 10 | 2020-10-08T09:23:41.000Z | 2020-12-03T14:59:09.000Z | from pydub import AudioSegment
from pydub.silence import split_on_silence
def segment(filename,foldername):
"""
filename : str
foldername: str folder to put all the chunks
"""
sound_file = AudioSegment.from_file(filename)
sound_file = sound_file.set_channels(1)
sound_file = sound_file.set_frame_rate(16000)
audio_chunks = split_on_silence(sound_file,min_silence_len=1000,silence_thresh=-60)
for i, chunk in enumerate(audio_chunks):
out_file = foldername+"/chunk{0}.wav".format(i)
print("exporting", out_file)
chunk.export(out_file, format="wav", bitrate="128")
| 32.888889 | 84 | 0.760135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.184122 |
2aad36ed8d9e01be2c017eacc17af6c7e6d3a8a0 | 126 | py | Python | 01_Language/01_Functions/python/fread.py | cliff363825/TwentyFour | 09df59bd5d275e66463e343647f46027397d1233 | [
"MIT"
] | 3 | 2020-06-28T07:42:51.000Z | 2021-01-15T10:32:11.000Z | 01_Language/01_Functions/python/fread.py | cliff363825/TwentyFour | 09df59bd5d275e66463e343647f46027397d1233 | [
"MIT"
] | 9 | 2021-03-10T22:45:40.000Z | 2022-02-27T06:53:20.000Z | 01_Language/01_Functions/python/fread.py | cliff363825/TwentyFour | 09df59bd5d275e66463e343647f46027397d1233 | [
"MIT"
] | 1 | 2021-01-15T10:51:24.000Z | 2021-01-15T10:51:24.000Z | # coding: utf-8
import os
size = os.path.getsize("test.txt")
with open("test.txt", mode="r") as f:
print(f.read(size))
| 14 | 37 | 0.626984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.301587 |
2aadc4ef847555f615099779462d93a8df39559d | 494 | py | Python | CalculateLodsTool.py | makeling/AGSSmartVectorTileTools | 009d925f883729f98c0d0744e1d466062dc260e6 | [
"Apache-2.0"
] | 3 | 2019-02-19T06:14:03.000Z | 2020-01-06T07:57:12.000Z | CalculateLodsTool.py | makeling/AGSSmartVectorTileTools | 009d925f883729f98c0d0744e1d466062dc260e6 | [
"Apache-2.0"
] | null | null | null | CalculateLodsTool.py | makeling/AGSSmartVectorTileTools | 009d925f883729f98c0d0744e1d466062dc260e6 | [
"Apache-2.0"
] | 2 | 2019-03-25T09:43:30.000Z | 2019-11-28T03:52:56.000Z | # -*- coding: utf-8 -*-
# !/usr/bin/python
__author__ = 'ma_keling'
# Version : 1.0.0
# Start Time : 2018-12-20
# Update Time :
# Change Log :
## 1.
## 2.
## 3.
import arcpy
import CalculateLods
def execute():
in_map = arcpy.GetParameter(0)
arcpy.AddMessage("Input map : {0}.".format(in_map))
in_layers = arcpy.GetParameter(1)
field_name = "lod"
CalculateLods.calculate_lods_for_feature(in_layers, field_name)
execute()
| 18.296296 | 68 | 0.597166 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.38664 |
2aadd1bf492d72d9413391d2877375cb66b76d8f | 1,099 | py | Python | neolearnit/src/main/python/get_vocabulary.py | BBN-E/LearnIt | 4f602f113cac9f4a7213b348a42c0fef23e2739c | [
"Apache-2.0"
] | 5 | 2020-08-29T21:23:05.000Z | 2022-03-24T19:57:44.000Z | neolearnit/src/main/python/get_vocabulary.py | BBN-E/LearnIt | 4f602f113cac9f4a7213b348a42c0fef23e2739c | [
"Apache-2.0"
] | 11 | 2020-03-04T23:03:34.000Z | 2022-02-18T04:04:04.000Z | neolearnit/src/main/python/get_vocabulary.py | BBN-E/LearnIt | 4f602f113cac9f4a7213b348a42c0fef23e2739c | [
"Apache-2.0"
] | 1 | 2021-05-19T11:51:51.000Z | 2021-05-19T11:51:51.000Z | import sys, os, re, codecs, json, glob
import random
from random import randint
from collections import defaultdict
from collections import Counter
from sets import Set
words=Set()
def read_json_data(strJsonFile):
with codecs.open(strJsonFile, 'r', encoding='utf-8') as f:
try:
json_data = json.load(f)
except ValueError as ve:
print "While loading: " + filename
print str(ve)
sys.exit(1)
return json_data
def main(input_json):
json_data = read_json_data(input_json)
for j in json_data:
for w in j['head']['word'].split(" "):
words.add(w)
for w in j['tail']['word'].split(" "):
words.add(w)
for w in j['sentence'].split(" "):
words.add(w)
for w in words:
print w
## sample instances
#print "writing json..."
#f.close()
#o.write(json.dumps(relation_mentions, sort_keys=True, indent=4, cls=json.JSONEncoder, ensure_ascii=False))
#o.close()
if __name__ == "__main__":
input_json = sys.argv[1]
main(input_json)
| 22.895833 | 111 | 0.604186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 250 | 0.22748 |
2aae87bca48325631f425f14e5ffd5fc9dbc7e0a | 19,727 | py | Python | openapi_client/api/service_api.py | hypostulate/mbta-api-client | f18903b6269c523c733a31574ff4579349fed3f8 | [
"MIT"
] | null | null | null | openapi_client/api/service_api.py | hypostulate/mbta-api-client | f18903b6269c523c733a31574ff4579349fed3f8 | [
"MIT"
] | null | null | null | openapi_client/api/service_api.py | hypostulate/mbta-api-client | f18903b6269c523c733a31574ff4579349fed3f8 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
MBTA
MBTA service API. https://www.mbta.com Source code: https://github.com/mbta/api # noqa: E501
The version of the OpenAPI document: 3.0
Contact: developer@mbta.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import (
ApiTypeError,
ApiValueError
)
class ServiceApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def api_web_service_controller_index(self, **kwargs): # noqa: E501
"""api_web_service_controller_index # noqa: E501
List of services. Service represents the days of the week, as well as extra days, that a trip is valid. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_web_service_controller_index(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int page_offset: Offset (0-based) of first element in the page
:param int page_limit: Max number of elements to return
:param str sort: Results can be [sorted](http://jsonapi.org/format/#fetching-sorting) by the id or any `/data/{index}/attributes` key. Assumes ascending; may be prefixed with '-' for descending | JSON pointer | Direction | `sort` | |--------------|-----------|------------| | `/data/{index}/attributes/added_dates` | ascending | `added_dates` | | `/data/{index}/attributes/added_dates` | descending | `-added_dates` | | `/data/{index}/attributes/added_dates_notes` | ascending | `added_dates_notes` | | `/data/{index}/attributes/added_dates_notes` | descending | `-added_dates_notes` | | `/data/{index}/attributes/description` | ascending | `description` | | `/data/{index}/attributes/description` | descending | `-description` | | `/data/{index}/attributes/end_date` | ascending | `end_date` | | `/data/{index}/attributes/end_date` | descending | `-end_date` | | `/data/{index}/attributes/rating_description` | ascending | `rating_description` | | `/data/{index}/attributes/rating_description` | descending | `-rating_description` | | `/data/{index}/attributes/rating_end_date` | ascending | `rating_end_date` | | `/data/{index}/attributes/rating_end_date` | descending | `-rating_end_date` | | `/data/{index}/attributes/rating_start_date` | ascending | `rating_start_date` | | `/data/{index}/attributes/rating_start_date` | descending | `-rating_start_date` | | `/data/{index}/attributes/removed_dates` | ascending | `removed_dates` | | `/data/{index}/attributes/removed_dates` | descending | `-removed_dates` | | `/data/{index}/attributes/removed_dates_notes` | ascending | `removed_dates_notes` | | `/data/{index}/attributes/removed_dates_notes` | descending | `-removed_dates_notes` | | `/data/{index}/attributes/schedule_name` | ascending | `schedule_name` | | `/data/{index}/attributes/schedule_name` | descending | `-schedule_name` | | `/data/{index}/attributes/schedule_type` | ascending | `schedule_type` | | `/data/{index}/attributes/schedule_type` | descending | `-schedule_type` | | `/data/{index}/attributes/schedule_typicality` | ascending | `schedule_typicality` | | `/data/{index}/attributes/schedule_typicality` | descending | `-schedule_typicality` | | `/data/{index}/attributes/start_date` | ascending | `start_date` | | `/data/{index}/attributes/start_date` | descending | `-start_date` | | `/data/{index}/attributes/valid_days` | ascending | `valid_days` | | `/data/{index}/attributes/valid_days` | descending | `-valid_days` |
:param str fields_service: Fields to include with the response. Multiple fields **MUST** be a comma-separated (U+002C COMMA, \",\") list. Note that fields can also be selected for included data types: see the [V3 API Best Practices](https://www.mbta.com/developers/v3-api/best-practices) for an example.
:param str filter_id: Filter by multiple IDs. **MUST** be a comma-separated (U+002C COMMA, \",\") list.
:param str filter_route: Filter by route. Multiple `route` **MUST** be a comma-separated (U+002C COMMA, \",\") list.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Services
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.api_web_service_controller_index_with_http_info(**kwargs) # noqa: E501
def api_web_service_controller_index_with_http_info(self, **kwargs): # noqa: E501
"""api_web_service_controller_index # noqa: E501
List of services. Service represents the days of the week, as well as extra days, that a trip is valid. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_web_service_controller_index_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int page_offset: Offset (0-based) of first element in the page
:param int page_limit: Max number of elements to return
:param str sort: Results can be [sorted](http://jsonapi.org/format/#fetching-sorting) by the id or any `/data/{index}/attributes` key. Assumes ascending; may be prefixed with '-' for descending | JSON pointer | Direction | `sort` | |--------------|-----------|------------| | `/data/{index}/attributes/added_dates` | ascending | `added_dates` | | `/data/{index}/attributes/added_dates` | descending | `-added_dates` | | `/data/{index}/attributes/added_dates_notes` | ascending | `added_dates_notes` | | `/data/{index}/attributes/added_dates_notes` | descending | `-added_dates_notes` | | `/data/{index}/attributes/description` | ascending | `description` | | `/data/{index}/attributes/description` | descending | `-description` | | `/data/{index}/attributes/end_date` | ascending | `end_date` | | `/data/{index}/attributes/end_date` | descending | `-end_date` | | `/data/{index}/attributes/rating_description` | ascending | `rating_description` | | `/data/{index}/attributes/rating_description` | descending | `-rating_description` | | `/data/{index}/attributes/rating_end_date` | ascending | `rating_end_date` | | `/data/{index}/attributes/rating_end_date` | descending | `-rating_end_date` | | `/data/{index}/attributes/rating_start_date` | ascending | `rating_start_date` | | `/data/{index}/attributes/rating_start_date` | descending | `-rating_start_date` | | `/data/{index}/attributes/removed_dates` | ascending | `removed_dates` | | `/data/{index}/attributes/removed_dates` | descending | `-removed_dates` | | `/data/{index}/attributes/removed_dates_notes` | ascending | `removed_dates_notes` | | `/data/{index}/attributes/removed_dates_notes` | descending | `-removed_dates_notes` | | `/data/{index}/attributes/schedule_name` | ascending | `schedule_name` | | `/data/{index}/attributes/schedule_name` | descending | `-schedule_name` | | `/data/{index}/attributes/schedule_type` | ascending | `schedule_type` | | `/data/{index}/attributes/schedule_type` | descending | `-schedule_type` | | `/data/{index}/attributes/schedule_typicality` | ascending | `schedule_typicality` | | `/data/{index}/attributes/schedule_typicality` | descending | `-schedule_typicality` | | `/data/{index}/attributes/start_date` | ascending | `start_date` | | `/data/{index}/attributes/start_date` | descending | `-start_date` | | `/data/{index}/attributes/valid_days` | ascending | `valid_days` | | `/data/{index}/attributes/valid_days` | descending | `-valid_days` |
:param str fields_service: Fields to include with the response. Multiple fields **MUST** be a comma-separated (U+002C COMMA, \",\") list. Note that fields can also be selected for included data types: see the [V3 API Best Practices](https://www.mbta.com/developers/v3-api/best-practices) for an example.
:param str filter_id: Filter by multiple IDs. **MUST** be a comma-separated (U+002C COMMA, \",\") list.
:param str filter_route: Filter by route. Multiple `route` **MUST** be a comma-separated (U+002C COMMA, \",\") list.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Services, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['page_offset', 'page_limit', 'sort', 'fields_service', 'filter_id', 'filter_route'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_web_service_controller_index" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and 'page_offset' in local_var_params and local_var_params['page_offset'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `page_offset` when calling `api_web_service_controller_index`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'page_limit' in local_var_params and local_var_params['page_limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `page_limit` when calling `api_web_service_controller_index`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'page_offset' in local_var_params and local_var_params['page_offset'] is not None: # noqa: E501
query_params.append(('page[offset]', local_var_params['page_offset'])) # noqa: E501
if 'page_limit' in local_var_params and local_var_params['page_limit'] is not None: # noqa: E501
query_params.append(('page[limit]', local_var_params['page_limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'fields_service' in local_var_params and local_var_params['fields_service'] is not None: # noqa: E501
query_params.append(('fields[service]', local_var_params['fields_service'])) # noqa: E501
if 'filter_id' in local_var_params and local_var_params['filter_id'] is not None: # noqa: E501
query_params.append(('filter[id]', local_var_params['filter_id'])) # noqa: E501
if 'filter_route' in local_var_params and local_var_params['filter_route'] is not None: # noqa: E501
query_params.append(('filter[route]', local_var_params['filter_route'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.api+json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key_in_header', 'api_key_in_query'] # noqa: E501
return self.api_client.call_api(
'/services', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Services', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def api_web_service_controller_show(self, id, **kwargs): # noqa: E501
"""api_web_service_controller_show # noqa: E501
Single service, which represents the days of the week, as well as extra days, that a trip is valid. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_web_service_controller_show(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: Unique identifier for a service (required)
:param str fields_service: Fields to include with the response. Multiple fields **MUST** be a comma-separated (U+002C COMMA, \",\") list. Note that fields can also be selected for included data types: see the [V3 API Best Practices](https://www.mbta.com/developers/v3-api/best-practices) for an example.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Service
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.api_web_service_controller_show_with_http_info(id, **kwargs) # noqa: E501
def api_web_service_controller_show_with_http_info(self, id, **kwargs): # noqa: E501
"""api_web_service_controller_show # noqa: E501
Single service, which represents the days of the week, as well as extra days, that a trip is valid. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_web_service_controller_show_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: Unique identifier for a service (required)
:param str fields_service: Fields to include with the response. Multiple fields **MUST** be a comma-separated (U+002C COMMA, \",\") list. Note that fields can also be selected for included data types: see the [V3 API Best Practices](https://www.mbta.com/developers/v3-api/best-practices) for an example.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Service, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'fields_service'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_web_service_controller_show" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `api_web_service_controller_show`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'fields_service' in local_var_params and local_var_params['fields_service'] is not None: # noqa: E501
query_params.append(('fields[service]', local_var_params['fields_service'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.api+json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key_in_header', 'api_key_in_query'] # noqa: E501
return self.api_client.call_api(
'/services/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Service', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 70.706093 | 2,464 | 0.65641 | 19,213 | 0.973944 | 0 | 0 | 0 | 0 | 0 | 0 | 14,624 | 0.741319 |
2ab21d6af7562df91669716faae1d4fb2aeb12d2 | 499 | py | Python | interactive_widgets/backend/contexts/docker_context.py | h3ndrk/interactive-widgets-backend | 926dea44f00aca8b51893dab533c566728d5e65d | [
"MIT"
] | null | null | null | interactive_widgets/backend/contexts/docker_context.py | h3ndrk/interactive-widgets-backend | 926dea44f00aca8b51893dab533c566728d5e65d | [
"MIT"
] | null | null | null | interactive_widgets/backend/contexts/docker_context.py | h3ndrk/interactive-widgets-backend | 926dea44f00aca8b51893dab533c566728d5e65d | [
"MIT"
] | null | null | null | import aiodocker
import interactive_widgets.backend.contexts.context
class DockerContext(interactive_widgets.backend.contexts.context.Context):
async def __aenter__(self):
self.docker = aiodocker.Docker(
url=self.configuration.get('url', None),
)
await self.docker.__aenter__()
self.logger.debug(await self.docker.version())
return self
async def __aexit__(self, *args, **kwargs):
await self.docker.__aexit__(*args, **kwargs)
| 27.722222 | 74 | 0.685371 | 426 | 0.853707 | 0 | 0 | 0 | 0 | 340 | 0.681363 | 5 | 0.01002 |
2ab257029d0dbdeb4891ca5f55bd97d468685a9c | 12,524 | py | Python | custom_components/xiaomi_cloud_map_extractor/vacuum_manager.py | elad-bar/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor | b68351b693687033ab6c58a00ce6599c7c4e4e70 | [
"MIT"
] | null | null | null | custom_components/xiaomi_cloud_map_extractor/vacuum_manager.py | elad-bar/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor | b68351b693687033ab6c58a00ce6599c7c4e4e70 | [
"MIT"
] | null | null | null | custom_components/xiaomi_cloud_map_extractor/vacuum_manager.py | elad-bar/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor | b68351b693687033ab6c58a00ce6599c7c4e4e70 | [
"MIT"
] | 1 | 2021-12-25T12:01:27.000Z | 2021-12-25T12:01:27.000Z | import io
import logging
import time
from typing import List, Optional
from custom_components.xiaomi_cloud_map_extractor.common.map_data import MapData
from custom_components.xiaomi_cloud_map_extractor.types import Colors, Drawables, ImageConfig, Sizes, Texts
try:
from miio import RoborockVacuum, DeviceException
except ImportError:
from miio import Vacuum as RoborockVacuum, DeviceException
import PIL.Image as Image
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_TOKEN, CONF_USERNAME
from custom_components.xiaomi_cloud_map_extractor.common.map_data_parser import MapDataParser
from custom_components.xiaomi_cloud_map_extractor.common.xiaomi_cloud_connector import XiaomiCloudConnector
from custom_components.xiaomi_cloud_map_extractor.const import *
from custom_components.xiaomi_cloud_map_extractor.dreame.vacuum import DreameVacuum
from custom_components.xiaomi_cloud_map_extractor.enums import CameraStatus
from custom_components.xiaomi_cloud_map_extractor.roidmi.vacuum import RoidmiVacuum
from custom_components.xiaomi_cloud_map_extractor.unsupported.vacuum import UnsupportedVacuum
from custom_components.xiaomi_cloud_map_extractor.viomi.vacuum import ViomiVacuum
from custom_components.xiaomi_cloud_map_extractor.xiaomi.vacuum import XiaomiVacuum
_LOGGER = logging.getLogger(__name__)
DEVICE_MAPPING = {
CONF_AVAILABLE_API_XIAOMI: XiaomiVacuum,
CONF_AVAILABLE_API_VIOMI: ViomiVacuum,
CONF_AVAILABLE_API_ROIDMI: RoidmiVacuum,
CONF_AVAILABLE_API_DREAME: DreameVacuum,
}
STATUS_LOG_LEVEL = {
CameraStatus.FAILED_TO_RETRIEVE_DEVICE: _LOGGER.error,
CameraStatus.UNABLE_TO_PARSE_MAP: _LOGGER.warning,
CameraStatus.UNABLE_TO_RETRIEVE_MAP: _LOGGER.warning
}
class VacuumManager:
def __init__(self, config):
host: str = config[CONF_HOST]
token: str = config[CONF_TOKEN]
username: str = config[CONF_USERNAME]
password: str = config[CONF_PASSWORD]
drawables = config.get(CONF_DRAW, [])
room_colors = config.get(CONF_ROOM_COLORS, {})
colors: Colors = config.get(CONF_COLORS, {})
for room, color in room_colors.items():
colors[f"{COLOR_ROOM_PREFIX}{room}"] = color
self._vacuum = RoborockVacuum(host, token)
self._connector = XiaomiCloudConnector(username, password)
self._name: str = config.get(CONF_NAME, DEFAULT_NAME)
self._should_poll: bool = config.get(CONF_AUTO_UPDATE, True)
self._image_config: ImageConfig = config.get(CONF_MAP_TRANSFORM, DEFAULT_MAP_TRANSFORM)
self._colors: Colors = colors
self._drawables: Drawables = CONF_AVAILABLE_DRAWABLES[1:] if DRAWABLE_ALL in drawables else drawables
self._sizes: Sizes = config.get(CONF_SIZES, DEFAULT_SIZES)
self._texts: Texts = config.get(CONF_TEXTS, [])
self._country: str = config.get(CONF_COUNTRY)
self._allowed_attributes: List[str] = config.get(CONF_ATTRIBUTES, [])
self._store_map_raw: bool = config.get(CONF_STORE_MAP_RAW, False)
self._store_map_image: bool = config.get(CONF_STORE_MAP_IMAGE)
self._store_map_path: str = config.get(CONF_STORE_MAP_PATH, DEFAULT_STORE_MAP_PATH)
self._forced_api: str = config.get(CONF_FORCE_API)
self._device = None
self._used_api = None
self._map_saved = None
self._image = None
self._map_data = None
self._logged_in = False
self._logged_in_previously = True
self._received_map_name_previously = True
self._attributes = {}
self._status = CameraStatus.INITIALIZING
@property
def image(self) -> Optional[bytes]:
return self._image
@property
def name(self):
return self._name
@property
def attributes(self):
return self._attributes
@property
def should_poll(self):
return self._should_poll
def turn_on(self):
self._should_poll = True
def turn_off(self):
self._should_poll = False
def _get_attributes_data(self):
map_data = self._map_data
rooms = []
if self._map_data.rooms is not None:
rooms = dict(
filter(lambda x: x[0] is not None, map(lambda x: (x[0], x[1].name), self._map_data.rooms.items())))
if len(rooms) == 0:
rooms = list(self._map_data.rooms.keys())
attributes = {
ATTRIBUTE_CALIBRATION: map_data.calibration(),
ATTRIBUTE_CHARGER: map_data.charger,
ATTRIBUTE_CLEANED_ROOMS: map_data.cleaned_rooms,
ATTRIBUTE_COUNTRY: self._country,
ATTRIBUTE_GOTO: map_data.goto,
ATTRIBUTE_GOTO_PATH: map_data.goto_path,
ATTRIBUTE_GOTO_PREDICTED_PATH: map_data.predicted_path,
ATTRIBUTE_IGNORED_OBSTACLES: map_data.ignored_obstacles,
ATTRIBUTE_IGNORED_OBSTACLES_WITH_PHOTO: map_data.ignored_obstacles_with_photo,
ATTRIBUTE_IMAGE: map_data.image,
ATTRIBUTE_IS_EMPTY: map_data.image.is_empty,
ATTRIBUTE_MAP_NAME: map_data.map_name,
ATTRIBUTE_NO_GO_AREAS: map_data.no_go_areas,
ATTRIBUTE_NO_MOPPING_AREAS: map_data.no_mopping_areas,
ATTRIBUTE_OBSTACLES: map_data.obstacles,
ATTRIBUTE_OBSTACLES_WITH_PHOTO: map_data.obstacles_with_photo,
ATTRIBUTE_PATH: map_data.path,
ATTRIBUTE_ROOM_NUMBERS: rooms,
ATTRIBUTE_ROOMS: map_data.rooms,
ATTRIBUTE_VACUUM_POSITION: map_data.vacuum_position,
ATTRIBUTE_VACUUM_ROOM: map_data.vacuum_room,
ATTRIBUTE_VACUUM_ROOM_NAME: map_data.vacuum_room_name,
ATTRIBUTE_WALLS: map_data.walls,
ATTRIBUTE_ZONES: map_data.zones
}
return attributes
def _update_attributes(self):
attributes = {}
if self._map_data is not None:
data = self._get_attributes_data()
for name, value in data.items():
if name in self._allowed_attributes:
attributes[name] = value
if self._store_map_raw:
attributes[ATTRIBUTE_MAP_SAVED] = self._map_saved
if self._device is not None:
attributes[ATTR_MODEL] = self._device.model
attributes[ATTR_USED_API] = self._used_api
if self._connector.two_factor_auth_url is not None:
attributes[ATTR_TWO_FACTOR_AUTH] = self._connector.two_factor_auth_url
self._attributes = attributes
def update(self, now):
counter = 10
if self._status != CameraStatus.TWO_FACTOR_AUTH_REQUIRED and not self._logged_in:
self._handle_login()
if self._device is None and self._logged_in:
self._handle_device()
map_name = self._handle_map_name(counter)
if map_name == "retry" and self._device is not None:
self._set_status(CameraStatus.FAILED_TO_RETRIEVE_MAP_FROM_VACUUM)
self._received_map_name_previously = map_name != "retry"
if self._logged_in and map_name != "retry" and self._device is not None:
self._handle_map_data(map_name)
else:
exists = self._device is not None
_LOGGER.debug(
f"Unable to retrieve map ({now}), "
f"Logged in: {self._logged_in} | "
f"Map name: {map_name} | "
f"Device retrieved: {exists}"
)
message = str(self._status)
map_data = MapDataParser.create_empty(self._colors, message)
self._set_map_data(map_data)
self._logged_in_previously = self._logged_in
self._update_attributes()
def _handle_login(self):
_LOGGER.debug("Logging in...")
self._logged_in = self._connector.login()
if self._logged_in is None:
self._set_status(CameraStatus.TWO_FACTOR_AUTH_REQUIRED)
elif self._logged_in:
self._set_status(CameraStatus.LOGGED_IN)
else:
self._set_status(CameraStatus.FAILED_LOGIN)
if self._logged_in_previously:
_LOGGER.error("Unable to log in, check credentials")
def _handle_device(self):
_LOGGER.debug(f"Retrieving device info, country: {self._country}")
country, user_id, device_id, model = self._connector.get_device_details(self._vacuum.token, self._country)
if model is not None:
self._country = country
_LOGGER.debug(f"Retrieved device model: {model}")
self._used_api = self._detect_api(model)
device_init = DEVICE_MAPPING.get(self._used_api, UnsupportedVacuum)
self._device = device_init(self._connector, self._country, user_id, device_id, model)
_LOGGER.debug(f"Created device, used api: {self._used_api}")
else:
self._set_status(CameraStatus.FAILED_TO_RETRIEVE_DEVICE)
def _handle_map_name(self, counter):
map_name = "retry"
if self._device is not None and not self._device.should_get_map_from_vacuum():
map_name = "0"
while map_name == "retry" and counter > 0:
_LOGGER.debug("Retrieving map name from device")
time.sleep(0.1)
try:
map_name = self._vacuum.map()[0]
_LOGGER.debug("Map name %s", map_name)
except OSError as exc:
_LOGGER.error(f"Got OSError while fetching the state: {str(exc)}")
except DeviceException as exc:
if self._received_map_name_previously:
_LOGGER.warning(f"Got exception while fetching the state: {str(exc)}")
self._received_map_name_previously = False
finally:
counter = counter - 1
return map_name
def _handle_map_data(self, map_name):
_LOGGER.debug("Retrieving map from Xiaomi cloud")
store_map_path = self._store_map_path if self._store_map_raw else None
map_data, map_stored = self._device.get_map(map_name, self._colors, self._drawables, self._texts,
self._sizes, self._image_config, store_map_path)
if map_data is not None:
# noinspection PyBroadException
try:
_LOGGER.debug("Map data retrieved")
self._set_map_data(map_data)
self._map_saved = map_stored
if self._map_data.image.is_empty:
self._set_status(CameraStatus.EMPTY_MAP)
if self._map_data is None or self._map_data.image.is_empty:
self._set_map_data(map_data)
else:
self._set_map_data(map_data)
self._set_status(CameraStatus.OK)
except Exception as ex:
self._set_status(CameraStatus.UNABLE_TO_PARSE_MAP, ex)
else:
self._logged_in = False
self._set_status(CameraStatus.UNABLE_TO_RETRIEVE_MAP)
def _set_status(self, status, ex: Optional[Exception] = None):
log = STATUS_LOG_LEVEL.get(status, _LOGGER.debug)
log_message = status
if ex is not None:
log_message = f"{status}, Error: {str(ex)}"
self._status = status
log(log_message)
def _set_map_data(self, map_data: MapData):
img_byte_arr = io.BytesIO()
map_data.image.data.save(img_byte_arr, format='PNG')
self._image = img_byte_arr.getvalue()
self._map_data = map_data
self._store_image()
def _detect_api(self, model: str):
if self._forced_api is not None:
return self._forced_api
if model in API_EXCEPTIONS:
return API_EXCEPTIONS[model]
def list_contains_model(prefixes):
return len(list(filter(lambda x: model.startswith(x), prefixes))) > 0
filtered = list(filter(lambda x: list_contains_model(x[1]), AVAILABLE_APIS.items()))
if len(filtered) > 0:
return filtered[0][0]
return CONF_AVAILABLE_API_XIAOMI
def _store_image(self):
if self._store_map_image:
try:
if self._image is not None:
image = Image.open(io.BytesIO(self._image))
image.save(f"{self._store_map_path}/map_image_{self._device.model}.png")
except Exception as ex:
_LOGGER.warning(f"Error while saving image, Error: {str(ex)}")
| 36.943953 | 115 | 0.660093 | 10,788 | 0.861386 | 0 | 0 | 267 | 0.021319 | 0 | 0 | 746 | 0.059566 |
2ab6714654a4d72674f09fdaa3a13475da0d16e1 | 974 | py | Python | util.py | yasser888/Semantic-Segmentation-SDC-P12 | 973f6b8574dfa13413558b625f5675a422a5161e | [
"MIT"
] | null | null | null | util.py | yasser888/Semantic-Segmentation-SDC-P12 | 973f6b8574dfa13413558b625f5675a422a5161e | [
"MIT"
] | null | null | null | util.py | yasser888/Semantic-Segmentation-SDC-P12 | 973f6b8574dfa13413558b625f5675a422a5161e | [
"MIT"
] | null | null | null | import os;
import time;
cont_file_path = './cont.txt';
log_file_path = './logs/';
def can_continue():
can_cont = True;
if os.path.exists(cont_file_path):
with open(cont_file_path,'r') as f:
line = f.readline();
if(line=='Y'):
print("Cont...");
can_cont = True;
elif(line=='N'):
print("| Stop |");
can_cont = False;
return can_cont;
# can_continue();
def create_log():
if(not os.path.exists(log_file_path)):
os.mkdir(log_file_path);
log_file = os.path.join(log_file_path , str(str(time.time())+'.txt'));
file = open(log_file,"w");
file.close();
return log_file;
def log_n_print(log_file,line):
with open(log_file,'a') as f:
f.writelines(line+'\n');
print(line);
# log_f = create_log();
# print(log_f)
# log_line(log_f,"1....a");
# log_line(log_f,"2....a");
# log_line(log_f,"3....a");
| 23.190476 | 74 | 0.541068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.205339 |
2ab6f2fd95e89231a622465bab0cf4bb48fade28 | 418 | py | Python | vtr_flow/scripts/benchtracker/flask_cors/__init__.py | HackerFoo/vtr-verilog-to-routing | 9bf3c1004e8a7c9f3756167905fd2e71218158d0 | [
"MIT"
] | 1 | 2020-05-07T17:38:58.000Z | 2020-05-07T17:38:58.000Z | vtr_flow/scripts/benchtracker/flask_cors/__init__.py | HackerFoo/vtr-verilog-to-routing | 9bf3c1004e8a7c9f3756167905fd2e71218158d0 | [
"MIT"
] | 12 | 2015-06-26T19:47:14.000Z | 2015-07-06T17:29:29.000Z | vtr_flow/scripts/benchtracker/flask_cors/__init__.py | HackerFoo/vtr-verilog-to-routing | 9bf3c1004e8a7c9f3756167905fd2e71218158d0 | [
"MIT"
] | 1 | 2020-08-15T03:04:34.000Z | 2020-08-15T03:04:34.000Z | # -*- coding: utf-8 -*-
"""
flask_cors
~~~~
Flask-CORS is a simple extension to Flask allowing you to support cross
origin resource sharing (CORS) using a simple decorator.
:copyright: (c) 2014 by Cory Dolphin.
:license: MIT, see LICENSE for more details.
"""
from .decorator import cross_origin
from .extension import CORS
from .version import __version__
__all__ = ['CORS', 'cross_origin']
| 26.125 | 75 | 0.696172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 303 | 0.72488 |
2ab807f882174850903dfd3ada3ceadfda7a1b90 | 554 | py | Python | ownblock/ownblock/apps/parking/views.py | danjac/ownblock | ac662fb7efb2f04567e2f85638c1250286452611 | [
"MIT"
] | 3 | 2015-06-12T04:42:02.000Z | 2018-10-29T17:09:10.000Z | ownblock/ownblock/apps/parking/views.py | danjac/ownblock | ac662fb7efb2f04567e2f85638c1250286452611 | [
"MIT"
] | null | null | null | ownblock/ownblock/apps/parking/views.py | danjac/ownblock | ac662fb7efb2f04567e2f85638c1250286452611 | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from .models import Vehicle
from .serializers import VehicleSerializer
class VehicleViewSet(viewsets.ModelViewSet):
model = Vehicle
serializer_class = VehicleSerializer
def pre_save(self, obj):
if obj.resident_id is None:
obj.resident = self.request.user
def get_queryset(self):
return super().get_queryset().filter(
resident__apartment__building=self.request.building
).select_related('resident',
'resident__apartment',)
| 25.181818 | 63 | 0.685921 | 442 | 0.797834 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.055957 |
2ab9fbffdfdf51006d95c7a23e622d34842779dc | 1,337 | py | Python | Pages/checkout_overview_page.py | gykudo/sauce_demo | 313b4bd956cad4807d225b0a7ed98ca0be9f3371 | [
"MIT"
] | null | null | null | Pages/checkout_overview_page.py | gykudo/sauce_demo | 313b4bd956cad4807d225b0a7ed98ca0be9f3371 | [
"MIT"
] | null | null | null | Pages/checkout_overview_page.py | gykudo/sauce_demo | 313b4bd956cad4807d225b0a7ed98ca0be9f3371 | [
"MIT"
] | null | null | null | from Locators.checkout_overview_locator import CheckoutOverviewLocator
from Objects.product import Product
from Pages.base_page_object import BasePage
from Utils.utility import Utils
class CheckoutOverViewPage(BasePage):
def __init__(self, driver):
super().__init__(driver)
def get_product_overview_info(self, index):
name = self.get_text(CheckoutOverviewLocator.PRODUCT_NAME_LABEL(index))
desc = self.get_text(CheckoutOverviewLocator.PRODUCT_DESC_LABEL(index))
price = self.get_text(CheckoutOverviewLocator.PRODUCT_PRICE_LABEL(index))
qty = self.get_text(CheckoutOverviewLocator.PRODUCT_QTY_LABEL(index))
return Product(name, desc, price, qty)
def get_product_price(self):
price_lbl = self.get_text(CheckoutOverviewLocator.TOTAL_ITEM_PRICE_LABEL)
price = Utils.convert_string_to_float(self, price_lbl)
return float(price)
def get_tax(self):
tax_lbl = self.get_text(CheckoutOverviewLocator.TAX_LABEL)
tax = Utils.convert_string_to_float(self, tax_lbl)
return float(tax)
def get_total_price(self):
total_price_lbl = self.get_text(CheckoutOverviewLocator.TOTAL_PRICE_LABEL)
total_price = Utils.convert_string_to_float(self, total_price_lbl)
return float(total_price)
def click_finish_button(self):
return self.click(CheckoutOverviewLocator.FINISH_BUTTON)
| 37.138889 | 78 | 0.799551 | 1,151 | 0.860883 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2aba7a80b083c387ee2bbc9ad1d0586ac7d2b75c | 6,336 | py | Python | plugin.video.vstream/resources/lib/comaddon.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 2 | 2018-11-02T19:55:30.000Z | 2020-08-14T02:22:20.000Z | plugin.video.vstream/resources/lib/comaddon.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | null | null | null | plugin.video.vstream/resources/lib/comaddon.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 3 | 2019-12-17T20:47:00.000Z | 2021-02-11T19:03:59.000Z | # -*- coding: utf-8 -*-
# https://github.com/Kodi-vStream/venom-xbmc-addons
import xbmcaddon, xbmcgui, xbmc
"""System d'importation
from resources.lib.comaddon import addon, dialog, VSlog, xbmcgui, xbmc
"""
"""
from resources.lib.comaddon import addon
addons = addon() en haut de page.
utiliser une fonction comaddon ou xbmcaddon
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcaddon.html
addons.VSlang(30305)
addons.getLocalizedString(30305)
addons.openSettings()
utiliser la fonction avec un autre addon
addons2 = addon('plugin.video.youtube')
addons2.openSettings()
"""
class addon(xbmcaddon.Addon):
#def __init__(self, id='plugin.video.vstream'):
# xbmcaddon.__init__(id)
# pass
def VSlang(self, lang):
return xbmc.translatePath(self.getLocalizedString(lang))
#xbmcaddon.Addon('plugin.video.vstream').getLocalizedString(lang))
#Bug avec accent xbmc.translatePath(xbmcaddon.Addon('plugin.video.vstream').getLocalizedString(lang)).decode('utf-8')
#deprecier utiliser addons.setSetting et addons.getSetting
def VSsetting(self, name, value = False):
#addons = addon()
#use addons.setting('name') pour getsetting
#use addons.setting('name', 'value) pour setsetting
if value:
return self.setSetting(name, value)
else:
return self.getSetting(name)
"""
from resources.lib.comaddon import dialog
ne peux pas utiliser les autres fonction que dialog
dialogs = dialog()
dialogs.VSinfo('test')
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#Dialog
"""
class dialog(xbmcgui.Dialog):
#def __init__(self):
# xbmcgui.__init__('')
# pass
def VSok(self, desc, title = 'vStream'):
dialog = self.ok(title, desc)
return dialog
def VSyesno(self, desc, title = 'vStream'):
dialog = self.yesno(title, desc)
return dialog
def VSselect(self, desc, title = 'vStream'):
ret = self.select(title, desc)
return ret
def VSselectqual(self, list_qual, list_url):
if len(list_url) == 0:
return ''
if len(list_url) == 1:
return list_url[0]
ret = self.select(addon().VSlang(30448), list_qual)
if ret > -1:
return list_url[ret]
return ''
def VSinfo(self, desc, title = 'vStream', iseconds = 0, sound = False):
if (iseconds == 0):
iseconds = 1000
else:
iseconds = iseconds * 1000
if (addon().getSetting('Block_Noti_sound') == 'true'):
sound = True
return self.notification(str(title), str(desc), xbmcgui.NOTIFICATION_INFO, iseconds, sound)
def VSerror(self, e):
return self.notification('vStream', 'Erreur: ' + str(e), xbmcgui.NOTIFICATION_ERROR, 2000), VSlog('Erreur: ' + str(e))
"""
from resources.lib.comaddon import progress
progress_ = progress()
progress_.VScreate(SITE_NAME)
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
progress_.VSclose(progress_)
dialog = progress() non recommander
progress = progress() non recommander
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#DialogProgress
"""
COUNT = 0
DIALOG2 = None
class empty():
def VSupdate(self, dialog, total, text = '', search = False):
pass
def iscanceled(self):
pass
def VSclose(self, dialog):
pass
class progress(xbmcgui.DialogProgress):
def VScreate(self, title = 'vStream', desc = ''):
global DIALOG2
currentWindow = xbmcgui.getCurrentWindowId()
if currentWindow == 10000:
return empty()
if DIALOG2 == None:
self.create(title, desc)
VSlog('create dialog')
DIALOG2 = self
return self
else:
return DIALOG2
def VSupdate(self, dialog, total, text = '', search = False):
if not search and window(10101).getProperty('search') == 'true':
return
global COUNT
COUNT += 1
iPercent = int(float(COUNT * 100) / total)
dialog.update(iPercent, 'Loading: ' + str(COUNT) + '/' + str(total), text)
def VSclose(self, dialog = ''):
if not dialog and DIALOG2:
dialog = DIALOG2
if not dialog:
return
if window(10101).getProperty('search') == 'true':
return
dialog.close()
VSlog('close dialog')
del dialog
return False
"""
from resources.lib.comaddon import window
window(10101).getProperty('test')
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#Window
"""
class window(xbmcgui.Window):
def __init__(self, id):
pass
"""
from resources.lib.comaddon import listitem
listitem.setLabel('test')
http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#ListItem
"""
class listitem(xbmcgui.ListItem):
#ListItem([label, label2, iconImage, thumbnailImage, path])
def __init__(self, label = '', label2 = '', iconImage = '', thumbnailImage = '', path = ''):
pass
"""
from resources.lib.comaddon import VSlog
VSlog('testtttttttttttt')
ou
xbmc.log
"""
#xbmc des fonctions pas des class
def VSlog(e, level = xbmc.LOGDEBUG):
#rapelle l'ID de l'addon pour être appelé hors addon
if (addon('plugin.video.vstream').getSetting('debug') == 'true'):
level = xbmc.LOGNOTICE
return xbmc.log('\t[PLUGIN] vStream: ' + str(e), level)
def VSupdate():
return xbmc.executebuiltin('Container.Refresh')
def VSshow_busy():
xbmc.executebuiltin('ActivateWindow(busydialog)')
def VShide_busy():
xbmc.executebuiltin('Dialog.Close(busydialog)')
while xbmc.getCondVisibility('Window.IsActive(busydialog)'):
xbmc.sleep(100)
def isKrypton():
try:
version = xbmc.getInfoLabel('system.buildversion')
if version[0:2] >= '17':
return True
else:
return False
except:
return False
def VSread(sHtmlContent):
import xbmcvfs
file = 'special://userdata/addon_data/plugin.video.vstream/html.txt'
if xbmcvfs.exists(file):
xbmcvfs.delete(file)
f = xbmcvfs.File (file, 'w')
result = f.write(sHtmlContent)
f.close()
#use cGui.showKeyBoard
def VSkeyboard(sDefaultText = ''):
return False
| 25.756098 | 126 | 0.640467 | 3,579 | 0.564689 | 0 | 0 | 0 | 0 | 0 | 0 | 2,622 | 0.413695 |
2abc2ed03517cdd21bec733eb97787935e4839c0 | 2,457 | py | Python | android/replace_apk_resource_pro/replace_source.py | roceys/tools_python | 9c8d5c1c7c1ae4a4c857a65f5b5f14da1c90e425 | [
"Apache-2.0"
] | 130 | 2019-05-19T16:17:26.000Z | 2022-03-30T11:48:38.000Z | android/replace_apk_resource_pro/replace_source.py | roceys/tools_python | 9c8d5c1c7c1ae4a4c857a65f5b5f14da1c90e425 | [
"Apache-2.0"
] | null | null | null | android/replace_apk_resource_pro/replace_source.py | roceys/tools_python | 9c8d5c1c7c1ae4a4c857a65f5b5f14da1c90e425 | [
"Apache-2.0"
] | 119 | 2019-05-27T09:45:14.000Z | 2022-03-09T03:44:53.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: xag
@license: Apache Licence
@contact: xinganguo@gmail.com
@site: http://www.xingag.top
@software: PyCharm
@file: replace_source.py
@time: 4/25/19 10:46
@description:替换apk的资源文件
"""
from file_utils import *
import os
from subprocess import Popen, PIPE, STDOUT
class ReplaceApkSource(object):
def __init__(self):
self.file_name = 'logo_white.png'
# 目标apk的名称
self.target_apk_name = 'new.apk'
def start(self):
# 1.使用apktool.jar解压apk
file_name_pre = self.__unzip_apk()
# 2.替换资源
self.__replace_source(file_name_pre)
# 3.重新打包
self.__rezip_apk(file_name_pre)
# 4.再次签名
self.__re_sign()
def __unzip_apk(self):
"""
解压当前目录下的apk文件
:return:
"""
# 文件名称,包含后缀名
file_name = get_current_folder_file('apk')
# 文件名称,不包含后缀名
file_name_pre = file_name.split('.')[0]
os.system('java -jar apktool.jar d %s' % file_name)
print('第1步:解压成功~')
return file_name_pre
def __replace_source(self, file_name_pre):
"""
替换资源
@:param file_name_pre 文件夹的名称
:return:
"""
print('生成文件夹的名字是:%s' % file_name_pre)
# 重命令当前目录下的文件
rename_current_file("png", self.file_name)
# 待替换的完成路径是
logo_file_path = './%s/res/drawable-mdpi/logo_white.png' % file_name_pre
# 开始替换文件
replace_file('./%s' % self.file_name, logo_file_path)
print('第2步:替换资源图片成功~')
def __rezip_apk(self, folder_name):
"""
重新打包成apk
@:param folder_name 文件夹的名称 source
:return:
"""
# 重新打包成apk
os.system('java -jar apktool.jar b %s -o %s' % (folder_name, self.target_apk_name))
# 删除临时文件夹
shutil.rmtree('./%s/' % folder_name)
print('第3步:重新打包成功~')
def __re_sign(self):
"""
重新签名
:return:
"""
# 重新签名
cmd = 'jarsigner -verbose -sigalg SHA1withRSA -digestalg SHA1 -keystore **.keystore -storepass ** %s **' % self.target_apk_name
p = Popen(cmd, stdout=PIPE, stdin=PIPE, stderr=STDOUT, shell=True)
# 输入参数
p.communicate(input=b'nantian')
print('第4步:重新签名成功~')
if __name__ == '__main__':
replace_apk_source = ReplaceApkSource()
replace_apk_source.start()
print('恭喜!完成操作~')
| 22.135135 | 135 | 0.582825 | 2,320 | 0.821821 | 0 | 0 | 0 | 0 | 0 | 0 | 1,390 | 0.492384 |
2abcd921b69ac065ccb1607e10b4019c98f27da0 | 226 | py | Python | catalog/bindings/gmd/circle_by_center_point_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/circle_by_center_point_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/circle_by_center_point_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from bindings.gmd.arc_by_center_point_type import ArcByCenterPointType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class CircleByCenterPointType(ArcByCenterPointType):
pass
| 22.6 | 70 | 0.836283 | 61 | 0.269912 | 0 | 0 | 72 | 0.318584 | 0 | 0 | 28 | 0.123894 |
2abdc47ec5c26a0913fa66840b8fe8ccfd50d515 | 192 | py | Python | python_modules/dagster/dagster_tests/cli_tests/workspace_tests/multi_file_target_workspace/example_one/pipelines.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster_tests/cli_tests/workspace_tests/multi_file_target_workspace/example_one/pipelines.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster_tests/cli_tests/workspace_tests/multi_file_target_workspace/example_one/pipelines.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | # type: ignore[attr-defined]
from solids import example_one_solid # pylint: disable=import-error
from dagster import pipeline
@pipeline
def example_one_pipeline():
example_one_solid()
| 19.2 | 68 | 0.786458 | 0 | 0 | 0 | 0 | 61 | 0.317708 | 0 | 0 | 58 | 0.302083 |
2abe21e7674c39c2e36ceacf5a1d3ae67238854a | 1,637 | py | Python | src/rxn_network/firetasks/utils.py | bigboyabhisthi/reaction-network | b84f16b7261ecd62d7aa8e2681907f6ea0c35565 | [
"BSD-3-Clause-LBNL"
] | 1 | 2022-02-22T23:09:47.000Z | 2022-02-22T23:09:47.000Z | src/rxn_network/firetasks/utils.py | bigboyabhisthi/reaction-network | b84f16b7261ecd62d7aa8e2681907f6ea0c35565 | [
"BSD-3-Clause-LBNL"
] | null | null | null | src/rxn_network/firetasks/utils.py | bigboyabhisthi/reaction-network | b84f16b7261ecd62d7aa8e2681907f6ea0c35565 | [
"BSD-3-Clause-LBNL"
] | null | null | null | "Utility Fireworks functions borrowed from the atomate package."
import logging
import os
import sys
from typing import Optional
def env_chk(
val: str,
fw_spec: dict,
strict: Optional[bool] = True,
default: Optional[str] = None,
):
"""
Code borrowed from the atomate package.
env_chk() is a way to set different values for a property depending
on the worker machine. For example, you might have slightly different
executable names or scratch directories on different machines.
Args:
val: any value, with ">><<" notation reserved for special env lookup values
fw_spec: fw_spec where one can find the _fw_env keys
strict: if True, errors if env format (>><<) specified but cannot be found in fw_spec
default: if val is None or env cannot be found in non-strict mode,
return default
"""
if val is None:
return default
if isinstance(val, str) and val.startswith(">>") and val.endswith("<<"):
if strict:
return fw_spec["_fw_env"][val[2:-2]]
return fw_spec.get("_fw_env", {}).get(val[2:-2], default)
return val
def get_logger(
name: str,
level=logging.DEBUG,
log_format="%(asctime)s %(levelname)s %(name)s %(message)s",
stream=sys.stdout,
):
"""
Code borrowed from the atomate package.
Helper method for acquiring logger.
"""
logger = logging.getLogger(name)
logger.setLevel(level)
formatter = logging.Formatter(log_format)
sh = logging.StreamHandler(stream=stream)
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
| 26.836066 | 93 | 0.660965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 860 | 0.525351 |
2abe330511d1c30bbc24a2fb6be6e9eb79bc3748 | 1,328 | py | Python | train.py | aryachiranjeev/TIC-TAC-TOE-using-RL- | 129efdac8246e9fd1416bda1ec7e134fb9cf90b1 | [
"MIT"
] | null | null | null | train.py | aryachiranjeev/TIC-TAC-TOE-using-RL- | 129efdac8246e9fd1416bda1ec7e134fb9cf90b1 | [
"MIT"
] | null | null | null | train.py | aryachiranjeev/TIC-TAC-TOE-using-RL- | 129efdac8246e9fd1416bda1ec7e134fb9cf90b1 | [
"MIT"
] | null | null | null | import random
import torch
from game import Game
from agent import RLAgent
from moves import Moves
game=Game()
agent=RLAgent()
moves=Moves()
num_win=0 #initialize no. of win by human
num_lose=0 #initialize no. of win by ai but loss by human
num_tie=0
random.seed(1000)
def check_board_and_may_update_state_values():
global num_win, num_lose, num_tie
win_or_tie=True
if game.who_wins()==-1: #human win
print("YOU WIN!!")
agent.update_state_values(0)
num_win+=1
elif game.who_wins()==1: #ai win
print("YOU LOSE!!")
agent.update_state_values(1)
num_lose+=1
elif game.who_wins()==2: #tie
print("TIE!!")
num_tie+=1
else:
win_or_tie=False
if win_or_tie:
game.clear()
agent.clear_history()
return win_or_tie
while True:
print("The number of wins are : {}\nThe number of loses : {}\nThe number of ties is : {}".format(num_win, num_lose, num_tie))
if (num_win+num_lose+num_tie) == 30000:
break
#moves
x,y=moves.random_move(game.board)
game.take_move(x,y,-1)
print(game)
#check
win_or_tie=check_board_and_may_update_state_values()
if win_or_tie:
continue
#RL AI move
x,y=agent.next_move(game.board)
game.take_move(x,y,1)
agent.cache_move(game.board)
print(game)
#check
check_board_and_may_update_state_values()
torch.save(agent.state_values,'tic_tac_toe.pth')
| 16.810127 | 126 | 0.72741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.194277 |
2abef5933d8688995ffca89d43bae2f0783a8e26 | 2,265 | py | Python | iota-clients/python-client/receive_msg.py | fprotopapa/iota-and-iot | b27690132452877822d95a9e379f275a5fd41edd | [
"MIT"
] | 1 | 2021-11-19T10:56:25.000Z | 2021-11-19T10:56:25.000Z | iota-clients/python-client/receive_msg.py | fprotopapa/iota-and-iot | b27690132452877822d95a9e379f275a5fd41edd | [
"MIT"
] | null | null | null | iota-clients/python-client/receive_msg.py | fprotopapa/iota-and-iot | b27690132452877822d95a9e379f275a5fd41edd | [
"MIT"
] | null | null | null | # receive_msg.py
#
# SPDX-FileCopyrightText: Copyright 2021 Fabbio Protopapa
#
# SPDX-License-Identifier: MIT
#
# Receive message from IOTA tangle
#
import iota_client
import os
import pprint
# Config
msg_meta = False
env_node_address = 'HORNET_NODE_ADDRESS'
# Print Message data
def show_message(message, meta=False):
if meta:
show = 'Message meta'
else:
show = 'Message'
print(
'''
{} data:
'''.format(show))
pprint.pprint(message)
# Connect to node and retrieve message
def main():
import argparse
parser = argparse.ArgumentParser(description='Receive message from IOTA tangle.')
parser.add_argument('--msg_id', dest='msg_id',
default='497c1b68e5480d07819bbd9c989c8d245fa748667a89fdf7dac884741f493326',
help='Id of message stored on tangle')
parser.add_argument('--node_info', dest='node_info',
default=False,
help='Print node information')
args = parser.parse_args()
message_id = args.msg_id
node_info = args.node_info
# Get node address out of environment
NODE_URL = os.getenv(env_node_address)
if not NODE_URL:
raise Exception("Please define environment variable with node URL.")
try:
# Initialize client
client = iota_client.Client(
nodes_name_password=[[NODE_URL]], node_sync_disabled=True)
except:
raise Exception('Node not found.')
# Check node status
if not client.get_health():
print('''
------------------
Node not healthy.
------------------''')
# Get node information
if node_info:
print('Node Information:')
pprint.pprint(client.get_info())
# Retrieve message from Tangle
message = client.get_message_data(message_id)
# Show results
show_message(message)
if msg_meta:
message_meta = client.get_message_metadata(message_id)
show_message(message_meta, True)
# Decode message
msg_str = bytes(message['payload']['indexation'][0]['data']).decode('utf-8')
print('''
Decoded message:
{}
'''.format(msg_str))
if __name__ == "__main__":
main() | 23.842105 | 99 | 0.61766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 911 | 0.402208 |
2abfb7b11aa2ded198b346e1492890affd3b19aa | 2,601 | py | Python | py3status/modules/systemd_suspend_inhibitor.py | boucman/py3status | 84b57304fbf71a466ccb1ed2f2dd039ece6eefb6 | [
"BSD-3-Clause"
] | 1 | 2020-04-07T19:11:36.000Z | 2020-04-07T19:11:36.000Z | py3status/modules/systemd_suspend_inhibitor.py | boucman/py3status | 84b57304fbf71a466ccb1ed2f2dd039ece6eefb6 | [
"BSD-3-Clause"
] | 2 | 2018-03-15T18:44:42.000Z | 2018-03-15T19:22:04.000Z | py3status/modules/systemd_suspend_inhibitor.py | boucman/py3status | 84b57304fbf71a466ccb1ed2f2dd039ece6eefb6 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Turn on and off systemd suspend inhibitor.
Configuration parameters:
format: display format for this module
(default '[\?color=state SUSPEND [\?if=state OFF|ON]]')
lock_types: specify state to inhibit, comma separated list
https://www.freedesktop.org/wiki/Software/systemd/inhibit/
(default ['handle-lid-switch', 'idle', 'sleep'])
thresholds: specify color thresholds to use
(default [(True, 'bad'), (False, 'good')])
Format placeholders:
{state} systemd suspend inhibitor state, eg True, False
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
@author Cyrinux https://github.com/cyrinux
@license BSD
SAMPLE OUTPUT
[{'full_text': 'SUSPEND ON', 'color': '#00FF00'}]
off
[{'full_text': 'SUSPEND OFF', 'color': '#FF0000'}]
"""
from dbus import SystemBus
from os import close
STRING_DBUS_EXCEPTION = "DBUS error, systemd-logind not started?"
STRING_BAD_LOCK_TYPES = "DBUS error, bad lock types used"
class Py3status:
"""
"""
# available configuration parameters
format = "[\?color=state SUSPEND [\?if=state OFF|ON]]"
lock_types = ["handle-lid-switch", "idle", "sleep"]
thresholds = [(True, "bad"), (False, "good")]
def post_config_hook(self):
try:
self.login1 = SystemBus().get_object(
"org.freedesktop.login1", "/org/freedesktop/login1"
)
except Exception:
raise Exception(STRING_DBUS_EXCEPTION)
self.lock = None
self.lock_types = ":".join(self.lock_types)
self.thresholds_init = self.py3.get_color_names_list(self.format)
def systemd_suspend_inhibitor(self):
suspend_data = {"state": bool(self.lock)}
for x in self.thresholds_init:
if x in suspend_data:
self.py3.threshold_get_color(suspend_data[x], x)
return {
"cached_until": self.py3.CACHE_FOREVER,
"full_text": self.py3.safe_format(self.format, suspend_data),
}
def on_click(self, event):
if self.lock is None:
self.lock = self.login1.Inhibit(
self.lock_types,
"Py3Status",
"Systemd suspend inhibitor module",
"block",
dbus_interface="org.freedesktop.login1.Manager",
).take()
else:
close(self.lock)
self.lock = None
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| 28.582418 | 73 | 0.618608 | 1,434 | 0.551326 | 0 | 0 | 0 | 0 | 0 | 0 | 1,262 | 0.485198 |
2ac03cc53a106bb021d3795e2d6e15a15beade33 | 1,595 | py | Python | maze/maze.py | SteliosKliafas/shortest_path_algorithms | 0f28973bce7d53feeee202424b448b5007b5df68 | [
"MIT"
] | null | null | null | maze/maze.py | SteliosKliafas/shortest_path_algorithms | 0f28973bce7d53feeee202424b448b5007b5df68 | [
"MIT"
] | null | null | null | maze/maze.py | SteliosKliafas/shortest_path_algorithms | 0f28973bce7d53feeee202424b448b5007b5df68 | [
"MIT"
] | null | null | null | import numpy as np
import time
def create_maze():
height = input("\n Enter the number of rows: ")
width = input("\n Enter the number of columns: ")
if height.isdigit() and width.isdigit():
height, width = int(height), int(width)
grid = np.random.randint(0, 10, (height, width))
grid[height - 1][width - 1] = 0
grid[0][0] = 0
print("\n The Integer Maze: \n")
print(grid)
return grid
else:
raise ValueError("Please provide positive integer values for the number of rows and columns")
def create_row(pos, grid):
row_of_distance_table = np.empty((len(grid), len(grid[0])))
row_of_distance_table[:] = np.NaN
row = []
for x in range(len(grid)):
for y in range(len(grid[0])):
if pos[0]+1 == x and pos[1] == y or pos[0]-1 == x and pos[1] == y \
or pos[0] == x and pos[1] == y+1 or pos[0] == x and pos[1] == y-1:
row_of_distance_table[x][y] = grid[x][y]
for array in row_of_distance_table:
row = np.concatenate([row, array], axis=None)
# print(row)
return row
def adjacency_matrix():
grid = create_maze()
distance_matrix = []
for x in range(len(grid)):
for y in range(len(grid[0])):
distance_matrix.append(create_row([x, y], grid))
distance_matrix = np.vstack(distance_matrix)
print("\n The Adjacency Matrix denoting the distances of all paths: \n")
print(distance_matrix)
print("\n height: ", len(distance_matrix), "width: ", len(distance_matrix[0]))
return distance_matrix
| 35.444444 | 101 | 0.598746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.165517 |
2ac0c0b791774e16cf9e50fa05289f36f0254019 | 2,707 | py | Python | lexer.py | standy66/kaleidoscope-python | e0fa536871e63ab8b06a0b6fc04375eca788df06 | [
"Apache-2.0"
] | 1 | 2020-06-09T12:10:49.000Z | 2020-06-09T12:10:49.000Z | lexer.py | standy66/kaleidoscope-python | e0fa536871e63ab8b06a0b6fc04375eca788df06 | [
"Apache-2.0"
] | null | null | null | lexer.py | standy66/kaleidoscope-python | e0fa536871e63ab8b06a0b6fc04375eca788df06 | [
"Apache-2.0"
] | null | null | null | from typing import Callable
from enum import Enum
from io import TextIOBase
import sys
class TokenType(Enum):
EOF = -1
DEF = -2
EXTERN = -3
IDENTIFIER = -4
NUMBER = -5
OP = -6
class Token:
GENERIC_TOKEN_TYPES = [TokenType.NUMBER, TokenType.IDENTIFIER, TokenType.OP]
def __init__(self, token_type: TokenType, **kwargs):
self.type = token_type
if self.type in Token.GENERIC_TOKEN_TYPES:
self.value = kwargs["value"]
else:
self.value = self.type.name
def __str__(self):
return repr(self)
def __repr__(self):
if self.type not in Token.GENERIC_TOKEN_TYPES:
return f"Token(token_type={self.type})"
else:
return f"Token(token_type={self.type}, value={repr(self.value)})"
class Lexer:
def __init__(self, fp: TextIOBase):
self.fp = fp
self.current_token : Token = None
self.last_char = " "
def next_token(self) -> Token:
self._read_while(str.isspace)
if self.last_char.isalpha():
# identifier or def or extern
word = self._read_while(str.isalnum)
if word == "def":
self.current_token = Token(TokenType.DEF)
elif word == "extern":
self.current_token = Token(TokenType.EXTERN)
else:
self.current_token = Token(TokenType.IDENTIFIER, value=word)
elif self.last_char.isdigit() or self.last_char == ".":
# number
word = self._read_while(str.isdigit)
if self.last_char == ".":
word += "."
self._eat(1)
word += self._read_while(str.isdigit)
self.current_token = Token(TokenType.NUMBER, value=float(word))
elif self.last_char == "#":
# comment until eof or eol
self._read_while(lambda x: x not in ["\n", "\r", ""])
self._eat(1)
elif self.last_char == "":
# EOF
self.current_token = Token(TokenType.EOF)
else:
self.current_token = Token(TokenType.OP, value=self.last_char)
self._eat(1)
return self.current_token
def _read_while(self, predicate: Callable[[str], bool]):
word = ""
while predicate(self.last_char):
word += self.last_char
self.last_char = self.fp.read(1)
return word
def _eat(self, n: int):
self.last_char = self.fp.read(n)
def main():
l = Lexer(sys.stdin)
while True:
tok = l.next_token()
print(tok)
if tok.type == TokenType.EOF:
break
if __name__ == "__main__":
main()
| 27.907216 | 80 | 0.561138 | 2,411 | 0.890654 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.080163 |
2ac2ca8daf085ca9db44042efdfb98a16efe100b | 1,175 | py | Python | astrosql/deprecated/query.py | ketozhang/astroSQL | 390195dc737df7a91b23e22c626f64b20fa04feb | [
"Apache-2.0"
] | null | null | null | astrosql/deprecated/query.py | ketozhang/astroSQL | 390195dc737df7a91b23e22c626f64b20fa04feb | [
"Apache-2.0"
] | 1 | 2020-03-27T20:34:27.000Z | 2020-03-28T01:46:30.000Z | astrosql/deprecated/query.py | ketozhang/astroSQL | 390195dc737df7a91b23e22c626f64b20fa04feb | [
"Apache-2.0"
] | null | null | null | import peeweedb
import astropy.units as u
def get_by_basename(db, table, basename):
"""Get data from SQL database by basename. Returns a list of dict"""
if isinstance(table, str):
assert table in db.get_tables(), "Sanity Check Failed: Table queried does not exist"
table = peeweedb.tables[table]
else:
table = table
query = table.select().where(table.basename == basename)
print(query.sql())
data = list(query.dicts())
return data
def get_by_radec(db, table, ra, dec, radius):
"""
Get data from SQL database within a square area of the sky determined by ra, dec, radius.
Returns a list of dict
"""
radius = radius*u.arcmin.to(u.deg)
if isinstance(table, str):
assert table in db.get_tables(), "Sanity Check Failed: Table queried does not exist"
table = peeweedb.tables[table]
else:
table = table
query = table.select().where(
table.centerRa.between(ra - radius, ra + radius),
table.centerDec.between(dec - radius, dec + radius)
)
print(query.sql())
data = list(query.dicts())
return data
| 29.375 | 94 | 0.622979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 305 | 0.259574 |
2ac61fcea69ad7476d8abf7af785c80670e0b3ef | 5,878 | py | Python | venv/lib/python3.8/site-packages/ansible_collections/community/general/plugins/modules/remote_management/redfish/ilo_redfish_info.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | 1 | 2022-02-24T18:15:56.000Z | 2022-02-24T18:15:56.000Z | venv/lib/python3.8/site-packages/ansible_collections/community/general/plugins/modules/remote_management/redfish/ilo_redfish_info.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/ansible_collections/community/general/plugins/modules/remote_management/redfish/ilo_redfish_info.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ilo_redfish_info
short_description: Gathers server information through iLO using Redfish APIs
version_added: 4.2.0
description:
- Builds Redfish URIs locally and sends them to iLO to
get information back.
- For use with HPE iLO operations that require Redfish OEM extensions.
options:
category:
required: true
description:
- List of categories to execute on iLO.
type: list
elements: str
command:
required: true
description:
- List of commands to execute on iLO.
type: list
elements: str
baseuri:
required: true
description:
- Base URI of iLO.
type: str
username:
description:
- User for authentication with iLO.
type: str
password:
description:
- Password for authentication with iLO.
type: str
auth_token:
description:
- Security token for authentication with iLO.
type: str
timeout:
description:
- Timeout in seconds for URL requests to iLO.
default: 10
type: int
author:
- "Bhavya B (@bhavya06)"
'''
EXAMPLES = '''
- name: Get iLO Sessions
community.general.ilo_redfish_info:
category: Sessions
command: GetiLOSessions
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result_sessions
'''
RETURN = '''
ilo_redfish_info:
description: Returns iLO sessions.
type: dict
contains:
GetiLOSessions:
description: Returns the iLO session msg and whether the function executed successfully.
type: dict
contains:
ret:
description: Check variable to see if the information was succesfully retrived.
type: bool
msg:
description: Information of all active iLO sessions.
type: list
elements: dict
contains:
Description:
description: Provides a description of the resource.
type: str
Id:
description: The sessionId.
type: str
Name:
description: The name of the resource.
type: str
UserName:
description: Name to use to log in to the management processor.
type: str
returned: always
'''
CATEGORY_COMMANDS_ALL = {
"Sessions": ["GetiLOSessions"]
}
CATEGORY_COMMANDS_DEFAULT = {
"Sessions": "GetiLOSessions"
}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils
def main():
result = {}
category_list = []
module = AnsibleModule(
argument_spec=dict(
category=dict(required=True, type='list', elements='str'),
command=dict(required=True, type='list', elements='str'),
baseuri=dict(required=True),
username=dict(),
password=dict(no_log=True),
auth_token=dict(no_log=True),
timeout=dict(type='int', default=10)
),
required_together=[
('username', 'password'),
],
required_one_of=[
('username', 'auth_token'),
],
mutually_exclusive=[
('username', 'auth_token'),
],
supports_check_mode=True
)
creds = {"user": module.params['username'],
"pswd": module.params['password'],
"token": module.params['auth_token']}
timeout = module.params['timeout']
root_uri = "https://" + module.params['baseuri']
rf_utils = iLORedfishUtils(creds, root_uri, timeout, module)
# Build Category list
if "all" in module.params['category']:
for entry in CATEGORY_COMMANDS_ALL:
category_list.append(entry)
else:
# one or more categories specified
category_list = module.params['category']
for category in category_list:
command_list = []
# Build Command list for each Category
if category in CATEGORY_COMMANDS_ALL:
if not module.params['command']:
# True if we don't specify a command --> use default
command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
elif "all" in module.params['command']:
for entry in CATEGORY_COMMANDS_ALL[category]:
command_list.append(entry)
# one or more commands
else:
command_list = module.params['command']
# Verify that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg="Invalid Command: %s" % cmd)
else:
# Fail if even one category given is invalid
module.fail_json(msg="Invalid Category: %s" % category)
# Organize by Categories / Commands
if category == "Sessions":
for command in command_list:
if command == "GetiLOSessions":
result[command] = rf_utils.get_ilo_sessions()
module.exit_json(ilo_redfish_info=result)
if __name__ == '__main__':
main()
| 31.433155 | 104 | 0.586764 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,420 | 0.581831 |
2ac69b5d4714157fc71eff7a6a23f99b61269132 | 8,407 | py | Python | train/solver.py | nhonth/DeLF-pytorch | 5577a447a0330b9e976cff56a10fc91669216b8c | [
"MIT"
] | 315 | 2019-01-28T05:45:25.000Z | 2022-03-28T07:58:13.000Z | train/solver.py | nhonth/DeLF-pytorch | 5577a447a0330b9e976cff56a10fc91669216b8c | [
"MIT"
] | 38 | 2019-02-21T06:17:00.000Z | 2021-07-05T12:40:00.000Z | train/solver.py | nhonth/DeLF-pytorch | 5577a447a0330b9e976cff56a10fc91669216b8c | [
"MIT"
] | 74 | 2019-01-28T08:08:10.000Z | 2021-12-20T09:27:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
PyTorch Implementation of training DeLF feature.
Solver for step 1 (finetune local descriptor)
nashory, 2018.04
'''
import os, sys, time
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from utils import Bar, Logger, AverageMeter, compute_precision_top_k, mkdir_p
'''helper functions.
'''
def __cuda__(x):
if torch.cuda.is_available():
return x.cuda()
else:
return x
def __is_cuda__():
return torch.cuda.is_available()
def __to_var__(x, volatile=False):
return Variable(x, volatile=volatile)
def __to_tensor__(x):
return x.data
class Solver(object):
def __init__(self, config, model):
self.state = {k: v for k, v in config._get_kwargs()}
self.config = config
self.epoch = 0 # global epoch.
self.best_acc = 0 # global best accuracy.
self.prefix = os.path.join('repo', config.expr)
# ship model to cuda
self.model = __cuda__(model)
# define criterion and optimizer
self.criterion = nn.CrossEntropyLoss()
if config.optim.lower() in ['rmsprop']:
self.optimizer = optim.RMSprop(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=config.lr,
weight_decay=config.weight_decay)
elif config.optim.lower() in ['sgd']:
self.optimizer = optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=config.lr,
weight_decay=config.weight_decay)
elif config.optim.lower() in ['adam']:
self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=config.lr,
weight_decay=config.weight_decay)
# decay learning rate by a factor of 0.5 every 10 epochs
self.lr_scheduler = optim.lr_scheduler.StepLR(
self.optimizer,
step_size=config.lr_stepsize,
gamma=config.lr_gamma)
# create directory to save result if not exist.
self.ckpt_path = os.path.join(self.prefix, config.stage, 'ckpt')
self.log_path = os.path.join(self.prefix, config.stage, 'log')
self.image_path = os.path.join(self.prefix, config.stage, 'image')
mkdir_p(self.ckpt_path)
mkdir_p(self.log_path)
mkdir_p(self.image_path)
# set logger.
self.logger = {}
self.title = 'DeLF-{}'.format(config.stage.upper())
self.logger['train'] = Logger(os.path.join(self.prefix, config.stage, 'log/train.log'))
self.logger['val'] = Logger(os.path.join(self.prefix, config.stage, 'log/val.log'))
self.logger['train'].set_names(
['epoch','lr', 'loss', 'top1_accu', 'top3_accu', 'top5_accu'])
self.logger['val'].set_names(
['epoch','lr', 'loss', 'top1_accu', 'top3_accu', 'top5_accu'])
def __exit__(self):
self.train_logger.close()
self.val_logger.close()
def __adjust_pixel_range__(self,
x,
range_from=[0,1],
range_to=[-1,1]):
'''
adjust pixel range from <range_from> to <range_to>.
'''
if not range_from == range_to:
scale = float(range_to[1]-range_to[0])/float(range_from[1]-range_from[0])
bias = range_to[0]-range_from[0]*scale
x = x.mul(scale).add(bias)
return x
def __save_checkpoint__(self, state, ckpt='ckpt', filename='checkpoint.pth.tar'):
filepath = os.path.join(ckpt, filename)
torch.save(state, filepath)
def __solve__(self, mode, epoch, dataloader):
'''solve
mode: train / val
'''
batch_timer = AverageMeter()
data_timer = AverageMeter()
prec_losses = AverageMeter()
prec_top1 = AverageMeter()
prec_top3 = AverageMeter()
prec_top5 = AverageMeter()
if mode in ['val']:
pass;
#confusion_matrix = ConusionMeter()
since = time.time()
bar = Bar('[{}]{}'.format(mode.upper(), self.title), max=len(dataloader))
for batch_idx, (inputs, labels) in enumerate(dataloader):
# measure data loading time
data_timer.update(time.time() - since)
# wrap inputs in variable
if mode in ['train']:
if __is_cuda__():
inputs = inputs.cuda()
labels = labels.cuda(async=True)
inputs = __to_var__(inputs)
labels = __to_var__(labels)
elif mode in ['val']:
if __is_cuda__():
inputs = inputs.cuda()
labels = labels.cuda(async=True)
inputs = __to_var__(inputs, volatile=True)
labels = __to_var__(labels, volatile=False)
# forward
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# backward + optimize
if mode in ['train']:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# statistics
prec_1, prec_3, prec_5 = compute_precision_top_k(
__to_tensor__(outputs),
__to_tensor__(labels),
top_k=(1,3,5))
batch_size = inputs.size(0)
prec_losses.update(__to_tensor__(loss)[0], batch_size)
prec_top1.update(prec_1[0], batch_size)
prec_top3.update(prec_3[0], batch_size)
prec_top5.update(prec_5[0], batch_size)
# measure elapsed time
batch_timer.update(time.time() - since)
since = time.time()
# progress
log_msg = ('\n[{mode}][epoch:{epoch}][iter:({batch}/{size})]'+
'[lr:{lr}] loss: {loss:.4f} | top1: {top1:.4f} | ' +
'top3: {top3:.4f} | top5: {top5:.4f} | eta: ' +
'(data:{dt:.3f}s),(batch:{bt:.3f}s),(total:{tt:})') \
.format(
mode=mode,
epoch=self.epoch+1,
batch=batch_idx+1,
size=len(dataloader),
lr=self.lr_scheduler.get_lr()[0],
loss=prec_losses.avg,
top1=prec_top1.avg,
top3=prec_top3.avg,
top5=prec_top5.avg,
dt=data_timer.val,
bt=batch_timer.val,
tt=bar.elapsed_td)
print(log_msg)
bar.next()
bar.finish()
# write to logger
self.logger[mode].append([self.epoch+1,
self.lr_scheduler.get_lr()[0],
prec_losses.avg,
prec_top1.avg,
prec_top3.avg,
prec_top5.avg])
# save model
if mode == 'val' and prec_top1.avg > self.best_acc:
print('best_acc={}, new_best_acc={}'.format(self.best_acc, prec_top1.avg))
self.best_acc = prec_top1.avg
state = {
'epoch': self.epoch,
'acc': self.best_acc,
'optimizer': self.optimizer.state_dict(),
}
self.model.write_to(state)
filename = 'bestshot.pth.tar'
self.__save_checkpoint__(state, ckpt=self.ckpt_path, filename=filename)
def train(self, mode, epoch, train_loader, val_loader):
self.epoch = epoch
if mode in ['train']:
self.model.train()
self.lr_scheduler.step()
dataloader = train_loader
else:
assert mode == 'val'
self.model.eval()
dataloader = val_loader
self.__solve__(mode, epoch, dataloader)
| 36.872807 | 102 | 0.512906 | 7,711 | 0.917212 | 0 | 0 | 0 | 0 | 0 | 0 | 1,250 | 0.148686 |
2ac6f82afb425514d16cfc4ba1244d63a2476877 | 13,248 | py | Python | models/p2pnet.py | mfatiho/CrowdCounting-P2PNet | b89ecf9b374bee8973c331bb44b99611152cd3ac | [
"BSD-3-Clause"
] | 89 | 2021-08-09T12:51:34.000Z | 2022-03-25T09:06:40.000Z | models/p2pnet.py | FeiGeChuanShu/CrowdCounting-P2PNet | a7c5a9546d0b5be16367db393fbbd81427c11b82 | [
"BSD-3-Clause"
] | 24 | 2021-08-16T09:17:38.000Z | 2022-03-30T08:29:02.000Z | models/p2pnet.py | FeiGeChuanShu/CrowdCounting-P2PNet | a7c5a9546d0b5be16367db393fbbd81427c11b82 | [
"BSD-3-Clause"
] | 25 | 2021-08-12T09:37:30.000Z | 2022-03-18T07:46:17.000Z | import torch
import torch.nn.functional as F
from torch import nn
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate,
is_dist_avail_and_initialized)
from .backbone import build_backbone
from .matcher import build_matcher_crowd
import numpy as np
import time
# the network frmawork of the regression branch
class RegressionModel(nn.Module):
def __init__(self, num_features_in, num_anchor_points=4, feature_size=256):
super(RegressionModel, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchor_points * 2, kernel_size=3, padding=1)
# sub-branch forward
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.output(out)
out = out.permute(0, 2, 3, 1)
return out.contiguous().view(out.shape[0], -1, 2)
# the network frmawork of the classification branch
class ClassificationModel(nn.Module):
def __init__(self, num_features_in, num_anchor_points=4, num_classes=80, prior=0.01, feature_size=256):
super(ClassificationModel, self).__init__()
self.num_classes = num_classes
self.num_anchor_points = num_anchor_points
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchor_points * num_classes, kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
# sub-branch forward
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.output(out)
out1 = out.permute(0, 2, 3, 1)
batch_size, width, height, _ = out1.shape
out2 = out1.view(batch_size, width, height, self.num_anchor_points, self.num_classes)
return out2.contiguous().view(x.shape[0], -1, self.num_classes)
# generate the reference points in grid layout
def generate_anchor_points(stride=16, row=3, line=3):
row_step = stride / row
line_step = stride / line
shift_x = (np.arange(1, line + 1) - 0.5) * line_step - stride / 2
shift_y = (np.arange(1, row + 1) - 0.5) * row_step - stride / 2
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
anchor_points = np.vstack((
shift_x.ravel(), shift_y.ravel()
)).transpose()
return anchor_points
# shift the meta-anchor to get an acnhor points
def shift(shape, stride, anchor_points):
shift_x = (np.arange(0, shape[1]) + 0.5) * stride
shift_y = (np.arange(0, shape[0]) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((
shift_x.ravel(), shift_y.ravel()
)).transpose()
A = anchor_points.shape[0]
K = shifts.shape[0]
all_anchor_points = (anchor_points.reshape((1, A, 2)) + shifts.reshape((1, K, 2)).transpose((1, 0, 2)))
all_anchor_points = all_anchor_points.reshape((K * A, 2))
return all_anchor_points
# this class generate all reference points on all pyramid levels
class AnchorPoints(nn.Module):
def __init__(self, pyramid_levels=None, strides=None, row=3, line=3):
super(AnchorPoints, self).__init__()
if pyramid_levels is None:
self.pyramid_levels = [3, 4, 5, 6, 7]
else:
self.pyramid_levels = pyramid_levels
if strides is None:
self.strides = [2 ** x for x in self.pyramid_levels]
self.row = row
self.line = line
def forward(self, image):
image_shape = image.shape[2:]
image_shape = np.array(image_shape)
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in self.pyramid_levels]
all_anchor_points = np.zeros((0, 2)).astype(np.float32)
# get reference points for each level
for idx, p in enumerate(self.pyramid_levels):
anchor_points = generate_anchor_points(2**p, row=self.row, line=self.line)
shifted_anchor_points = shift(image_shapes[idx], self.strides[idx], anchor_points)
all_anchor_points = np.append(all_anchor_points, shifted_anchor_points, axis=0)
all_anchor_points = np.expand_dims(all_anchor_points, axis=0)
# send reference points to device
if torch.cuda.is_available():
return torch.from_numpy(all_anchor_points.astype(np.float32)).cuda()
else:
return torch.from_numpy(all_anchor_points.astype(np.float32))
class Decoder(nn.Module):
def __init__(self, C3_size, C4_size, C5_size, feature_size=256):
super(Decoder, self).__init__()
# upsample C5 to get P5 from the FPN paper
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P5 elementwise to C4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P4 elementwise to C3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
def forward(self, inputs):
C3, C4, C5 = inputs
P5_x = self.P5_1(C5)
P5_upsampled_x = self.P5_upsampled(P5_x)
P5_x = self.P5_2(P5_x)
P4_x = self.P4_1(C4)
P4_x = P5_upsampled_x + P4_x
P4_upsampled_x = self.P4_upsampled(P4_x)
P4_x = self.P4_2(P4_x)
P3_x = self.P3_1(C3)
P3_x = P3_x + P4_upsampled_x
P3_x = self.P3_2(P3_x)
return [P3_x, P4_x, P5_x]
# the defenition of the P2PNet model
class P2PNet(nn.Module):
def __init__(self, backbone, row=2, line=2):
super().__init__()
self.backbone = backbone
self.num_classes = 2
# the number of all anchor points
num_anchor_points = row * line
self.regression = RegressionModel(num_features_in=256, num_anchor_points=num_anchor_points)
self.classification = ClassificationModel(num_features_in=256, \
num_classes=self.num_classes, \
num_anchor_points=num_anchor_points)
self.anchor_points = AnchorPoints(pyramid_levels=[3,], row=row, line=line)
self.fpn = Decoder(256, 512, 512)
def forward(self, samples: NestedTensor):
# get the backbone features
features = self.backbone(samples)
# forward the feature pyramid
features_fpn = self.fpn([features[1], features[2], features[3]])
batch_size = features[0].shape[0]
# run the regression and classification branch
regression = self.regression(features_fpn[1]) * 100 # 8x
classification = self.classification(features_fpn[1])
anchor_points = self.anchor_points(samples).repeat(batch_size, 1, 1)
# decode the points as prediction
output_coord = regression + anchor_points
output_class = classification
out = {'pred_logits': output_class, 'pred_points': output_coord}
return out
class SetCriterion_Crowd(nn.Module):
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[0] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def loss_labels(self, outputs, targets, indices, num_points):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], 0,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
return losses
def loss_points(self, outputs, targets, indices, num_points):
assert 'pred_points' in outputs
idx = self._get_src_permutation_idx(indices)
src_points = outputs['pred_points'][idx]
target_points = torch.cat([t['point'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.mse_loss(src_points, target_points, reduction='none')
losses = {}
losses['loss_point'] = loss_bbox.sum() / num_points
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_points, **kwargs):
loss_map = {
'labels': self.loss_labels,
'points': self.loss_points,
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_points, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
output1 = {'pred_logits': outputs['pred_logits'], 'pred_points': outputs['pred_points']}
indices1 = self.matcher(output1, targets)
num_points = sum(len(t["labels"]) for t in targets)
num_points = torch.as_tensor([num_points], dtype=torch.float, device=next(iter(output1.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_points)
num_boxes = torch.clamp(num_points / get_world_size(), min=1).item()
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, output1, targets, indices1, num_boxes))
return losses
# create the P2PNet model
def build(args, training):
# treats persons as a single class
num_classes = 1
backbone = build_backbone(args)
model = P2PNet(backbone, args.row, args.line)
if not training:
return model
weight_dict = {'loss_ce': 1, 'loss_points': args.point_loss_coef}
losses = ['labels', 'points']
matcher = build_matcher_crowd(args)
criterion = SetCriterion_Crowd(num_classes, \
matcher=matcher, weight_dict=weight_dict, \
eos_coef=args.eos_coef, losses=losses)
return model, criterion | 38.736842 | 113 | 0.648551 | 10,993 | 0.829786 | 0 | 0 | 0 | 0 | 0 | 0 | 2,135 | 0.161156 |
2ac6fe1fc438d094fff91a6578a469fe05245d15 | 7,110 | py | Python | visualize_fullscene.py | ronhlee/retinanet_ATR | 32b043850abdd19bb142a88e2f2e16179aea4ff7 | [
"Apache-2.0"
] | 1 | 2020-09-20T03:07:02.000Z | 2020-09-20T03:07:02.000Z | visualize_fullscene.py | ronhlee/retinanet_ATR | 32b043850abdd19bb142a88e2f2e16179aea4ff7 | [
"Apache-2.0"
] | null | null | null | visualize_fullscene.py | ronhlee/retinanet_ATR | 32b043850abdd19bb142a88e2f2e16179aea4ff7 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torchvision
import os
import argparse
import warnings
warnings.filterwarnings('ignore')
from PIL import Image, ImageDraw
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, models, transforms
import torchvision.ops
from retinanet.dataloader import CSVDataset, Resizer
from retinanet import Calc_AP
print('CUDA available: {}'.format(torch.cuda.is_available()))
def main(args=None):
parser = argparse.ArgumentParser(description='Simple inference/visualization script for a '
'RetinaNet network.')
parser.add_argument('csv_classes', help='Path to file containing class list')
parser.add_argument('csv_val', help='Path to file containing validation annotations')
parser.add_argument('model', help='Path to model (.pt) file.')
parser.add_argument('outputdir', help='Output directory')
parser.add_argument('--fullscene', type=bool, default=True,
help='If True, don\'t rescale the image.')
parser.add_argument('--detthresh', type=float, default=0.0,
help='detection threshold for visualizing')
parser.add_argument('--score_thresh', type=float, default=0.05,
help='score threshold to discard background/reduce runs processing time')
parser.add_argument('--iou_nms1', type=float, default=0.3,
help='iou for nms used during validation and inference')
parser.add_argument('--iou_nms2', type=float, default=0.5,
help='iou for nms used during reconstruction of the image patches')
parser.add_argument('--pix_overlap', type=int, default=200,
help='number of pixel overlapping between patches')
parser.add_argument('--evaluate', type=bool, default=False,
help='If csv_val contains truth info, trigger this flag to calculate'
'mAP score at the end.')
args = parser.parse_args()
if not os.path.isdir(args.outputdir): os.makedirs(args.outputdir)
if args.fullscene:
dataset_val = CSVDataset(train_file=args.csv_val, class_list=args.csv_classes)
else:
dataset_val = CSVDataset(train_file=args.csv_val, class_list=args.csv_classes,
transform=transforms.Compose([Resizer()]))
dataloader_val = DataLoader(dataset_val, num_workers=1, shuffle=False)
AllImgsAllDets = []
for idx, data in enumerate(dataloader_val):
iid = os.path.basename(dataset_val.image_names[idx])[:16]
data['img'] = data['img'].permute(0, 3, 1, 2)
_, ens, totalrows, totalcols = data['img'].shape
nonoverlap = 800 - args.pix_overlap
ufpatches = data['img'].unfold(2, 800, nonoverlap).unfold(3, 800, nonoverlap)
_, _, numrow_p, numcol_p, _, _ = ufpatches.shape
patches = ufpatches.contiguous().view(-1, 1, 800, 800)
# load the weight
retinanet = torch.load(args.model)
retinanet = retinanet.cuda()
retinanet.visualize = True
retinanet.score_thresh = args.score_thresh
retinanet.iou_nms1 = args.iou_nms1
retinanet.eval()
Allbbox = torch.tensor([])
Allscore = torch.tensor([])
Allclassification = torch.tensor([])
Allclassscore = torch.tensor([])
for patchidx in range(patches.shape[0]):
patch = patches[patchidx:patchidx + 1, ...]
with torch.no_grad():
# st = time.time()
class_scores, transformed_anchors = retinanet(patch.cuda().float())
# print('Elapsed time: {}'.format(time.time()-st))
if list(class_scores.shape) == [0]:
print('No detections')
continue
scores, classification = class_scores.max(dim=1)
# Compile all detections
Allscore = torch.cat((Allscore, scores.cpu().float()))
Allclassscore = torch.cat((Allclassscore, class_scores.cpu().float()))
Allclassification = torch.cat((Allclassification, classification.cpu().
float()))
shifted_anchors = torch.empty(transformed_anchors.shape)
shifted_anchors[:, [0, 2]] = transformed_anchors[:, [0, 2]].cpu() + \
patchidx % numcol_p * nonoverlap
shifted_anchors[:, [1, 3]] = transformed_anchors[:, [1, 3]].cpu() + \
patchidx // numcol_p * nonoverlap
Allbbox = torch.cat((Allbbox, shifted_anchors))
# save out detections to numpy file
if list(Allbbox.shape) == [0]:
np.save(os.path.join(args.outputdir, '%s.npy' % iid), np.zeros((0, 7), dtype=np.float32))
else:
Allcenter = torch.cat((torch.mean(Allbbox[:, [0, 2]], dim=1, keepdim=True),
torch.mean(Allbbox[:, [1, 3]], dim=1, keepdim=True)), dim=1)
anchors_nms_idx = torchvision.ops.nms(Allbbox, Allscore, args.iou_nms2)
# Alldetections is np array [detection scores, Allcenter, Allclassscore]
Alldetections = np.hstack((Allscore[anchors_nms_idx, None].numpy(),
Allbbox[anchors_nms_idx, :].numpy(),
Allclassscore[anchors_nms_idx, :].numpy()))
np.save(os.path.join(args.outputdir, '%s.npy' % iid), Alldetections)
# reformat results for mAP score
Allbbox = Allbbox[anchors_nms_idx]
Allclassification = Allclassification[anchors_nms_idx]
Allscore = Allscore[anchors_nms_idx]
topbbox = Allbbox[Allscore >= args.detthresh]
topclassification = Allclassification[Allscore >= args.detthresh]
topscore = Allscore[Allscore >= args.detthresh]
thisImgAllDets = [torch.cat((topbbox[topclassification == 0],
topscore[topclassification == 0, None]), dim=1),
torch.cat((topbbox[topclassification == 1],
topscore[topclassification == 1, None]), dim=1)]
AllImgsAllDets.append(thisImgAllDets)
# Visualize the whole scene
img = np.array(255 * data['img'][0, 0, ...])
img[img < 0] = 0
img[img > 255] = 255
fullscene = Image.fromarray(img).convert(mode='RGB')
im_draw = ImageDraw.Draw(fullscene)
for i in range(topscore.numpy().shape[0]):
bbox1 = topbbox[i, :]
im_draw.rectangle(list(bbox1), outline='red')
x0y0 = list(bbox1[:2])
x0y0[1] -= 10
label_name1 = dataset_val.labels[int(topclassification[i])] + ', ' + str(topscore[i].cpu().numpy())[:4]
im_draw.text(x0y0, label_name1, fill='yellow')
fullscene.save(os.path.join(args.outputdir, '%s.png' % iid))
_ = Calc_AP(dataloader_val, AllImgsAllDets, iou_threshold=0.5)
if __name__ == '__main__':
main()
| 47.4 | 115 | 0.598312 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,110 | 0.156118 |
2ac870289e4a135beaad406a435f626a1f8fb78e | 518 | py | Python | data_sqlalchemy/word.py | natter1/estonian_learner | da7837f0d64f4c1f6a212a9c473252c4b834699a | [
"MIT"
] | null | null | null | data_sqlalchemy/word.py | natter1/estonian_learner | da7837f0d64f4c1f6a212a9c473252c4b834699a | [
"MIT"
] | null | null | null | data_sqlalchemy/word.py | natter1/estonian_learner | da7837f0d64f4c1f6a212a9c473252c4b834699a | [
"MIT"
] | null | null | null | import datetime
from data_sqlalchemy.modelbase import SqlAlchemyBase
import sqlalchemy as sa
class Word(SqlAlchemyBase):
__tablename__ = "words"
# id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
id = sa.Column(sa.String, primary_key=True)
created_date = sa.Column(sa.DateTime, default=datetime.datetime.now, index=True)
hint = sa.String() # not supported by sqlite: nullable=True
def __repr__(self): # for more useful debug messages
return f"<Package {self.id}>"
| 32.375 | 84 | 0.727799 | 421 | 0.812741 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.322394 |
2acb0f83d07a4c5d2b9efe4b3bc5c3cf86acc90c | 9,023 | py | Python | nodes/BASE/socket_type.py | nwtajcky/RenderStackNode | 31516af6fa8572c9d6ee3df017e59cae394047a3 | [
"Apache-2.0"
] | null | null | null | nodes/BASE/socket_type.py | nwtajcky/RenderStackNode | 31516af6fa8572c9d6ee3df017e59cae394047a3 | [
"Apache-2.0"
] | 1 | 2021-12-27T06:39:08.000Z | 2021-12-27T06:39:08.000Z | nodes/BASE/socket_type.py | nwtajcky/RenderStackNode | 31516af6fa8572c9d6ee3df017e59cae394047a3 | [
"Apache-2.0"
] | null | null | null | import bpy
from bpy.props import *
from ...preferences import get_pref
def update_node(self, context):
try:
self.node.node_dict[self.name] = self.value
# update node tree
self.node.update_parms()
except Exception as e:
print(e)
class RenderNodeSocketInterface(bpy.types.NodeSocketInterface):
bl_socket_idname = 'RenderNodeSocket'
def draw(self, context, layout):
pass
def draw_color(self, context):
return (0, 1, 1, 1)
class RenderNodeSocket(bpy.types.NodeSocket):
bl_idname = 'RenderNodeSocket'
bl_label = 'RenderNodeSocket'
text: StringProperty(default='custom text')
value: IntProperty(default=0, update=update_node)
def draw(self, context, layout, node, text):
row = layout.row(align=1)
if self.is_linked:
row.label(text=self.text)
else:
row.prop(self, 'value', text=self.text)
def draw_color(self, context, node):
return 0.5, 0.5, 0.5, 1
class RenderNodeSocketBool(RenderNodeSocket):
bl_idname = 'RenderNodeSocketBool'
bl_label = 'RenderNodeSocketBool'
value: BoolProperty(default=False, update=update_node)
def draw_color(self, context, node):
return 0.9, 0.7, 1.0, 1
class RenderNodeSocketInt(RenderNodeSocket):
bl_idname = 'RenderNodeSocketInt'
bl_label = 'RenderNodeSocketInt'
value: IntProperty(default=0, update=update_node)
def draw_color(self, context, node):
return 0, 0.9, 0.1, 1
class RenderNodeSocketFloat(RenderNodeSocket):
bl_idname = 'RenderNodeSocketFloat'
bl_label = 'RenderNodeSocketFloat'
value: FloatProperty(default=0, update=update_node)
def draw_color(self, context, node):
return 0.5, 0.5, 0.5, 1
class RenderNodeSocketString(RenderNodeSocket):
bl_idname = 'RenderNodeSocketString'
bl_label = 'RenderNodeSocketString'
value: StringProperty(default='', update=update_node)
def draw_color(self, context, node):
return 0.2, 0.7, 1.0, 1
# Vector and Subtype
####################
class RenderNodeSocketVector(RenderNodeSocket):
bl_idname = 'RenderNodeSocketVector'
bl_label = 'RenderNodeSocketVector'
value: FloatVectorProperty(name='Vector', default=(0, 0, 0), subtype='NONE',
update=update_node)
def draw_color(self, context, node):
return 0.5, 0.3, 1.0, 1
def draw(self, context, layout, node, text):
col = layout.column(align=1)
if self.is_linked:
col.label(text=self.text)
else:
col.prop(self, 'value', text=self.text)
class RenderNodeSocketXYZ(RenderNodeSocketVector):
bl_idname = 'RenderNodeSocketXYZ'
bl_label = 'RenderNodeSocketXYZ'
value: FloatVectorProperty(name='Vector', default=(1.0, 1.0, 1.0), subtype='XYZ',
update=update_node)
class RenderNodeSocketTranslation(RenderNodeSocketVector):
bl_idname = 'RenderNodeSocketTranslation'
bl_label = 'RenderNodeSocketTranslation'
value: FloatVectorProperty(name='Vector', default=(0, 0, 0), subtype='TRANSLATION',
update=update_node)
class RenderNodeSocketEuler(RenderNodeSocketVector):
bl_idname = 'RenderNodeSocketEuler'
bl_label = 'RenderNodeSocketEuler'
value: FloatVectorProperty(name='Vector', default=(0, 0, 0), subtype='EULER',
update=update_node)
class RenderNodeSocketColor(RenderNodeSocketVector):
bl_idname = 'RenderNodeSocketColor'
bl_label = 'RenderNodeSocketColor'
value: FloatVectorProperty(update=update_node, subtype='COLOR',
default=(1.0, 1.0, 1.0),
min=0.0, max=1.0)
def draw_color(self, context, node):
return 0.9, 0.9, 0.3, 1
# Object and subtype
##################
class RenderNodeSocketObject(RenderNodeSocket):
bl_idname = 'RenderNodeSocketObject'
bl_label = 'RenderNodeSocketObject'
value: PointerProperty(type=bpy.types.Object, update=update_node)
def draw(self, context, layout, node, text):
row = layout.row(align=1)
if self.is_linked:
row.label(text=self.text)
else:
row.prop(self, 'value', text=self.text)
if self.value:
row.operator('rsn.select_object', icon='RESTRICT_SELECT_OFF', text='').name = self.value.name
def draw_color(self, context, node):
return 1, 0.6, 0.3, 1
def poll_camera(self, object):
return object.type == 'CAMERA'
class RenderNodeSocketCamera(RenderNodeSocket):
bl_idname = 'RenderNodeSocketCamera'
bl_label = 'RenderNodeSocketCamera'
value: PointerProperty(type=bpy.types.Object, update=update_node, poll=poll_camera)
def draw(self, context, layout, node, text):
row = layout.row(align=1)
if self.is_linked:
row.label(text=self.text)
else:
row.prop(self, 'value', text='')
if self.value:
row.operator('rsn.select_object', icon='RESTRICT_SELECT_OFF', text='').name = self.value.name
def draw_color(self, context, node):
return 1, 0.6, 0.3, 1
# other pointer property
###############
class RenderNodeSocketMaterial(RenderNodeSocket):
bl_idname = 'RenderNodeSocketMaterial'
bl_label = 'RenderNodeSocketMaterial'
value: PointerProperty(type=bpy.types.Material, update=update_node)
def draw_color(self, context, node):
return 1, 0.4, 0.4, 1
class RenderNodeSocketWorld(RenderNodeSocket):
bl_idname = 'RenderNodeSocketWorld'
bl_label = 'RenderNodeSocketWorld'
value: PointerProperty(type=bpy.types.World, update=update_node)
def draw_color(self, context, node):
return 1, 0.4, 0.4, 1
class RenderNodeSocketViewLayer(RenderNodeSocket):
bl_idname = 'RenderNodeSocketViewLayer'
bl_label = 'RenderNodeSocketViewLayer'
value: StringProperty(update=update_node)
def draw(self, context, layout, node, text):
row = layout.row(align=1)
if self.is_linked:
row.label(text=self.text)
else:
row.prop_search(self, "value", context.scene, "view_layers", text='')
def draw_color(self, context, node):
return 0.2, 0.7, 1.0, 1
### old types ###
#################
class RSNodeSocketTaskSettings(bpy.types.NodeSocket):
bl_idname = 'RSNodeSocketTaskSettings'
bl_label = 'RSNodeSocketTaskSettings'
def draw(self, context, layout, node, text):
if not self.is_linked:
io = layout.operator('rsn.search_and_link', text=text, icon='ADD')
io.node_name = node.name
if self.is_output:
io.output_id = int(self.path_from_id()[-2:-1])
io.input_id = 666
else:
io.input_id = int(self.path_from_id()[-2:-1])
io.output_id = 666
else:
layout.label(text=text)
def draw_color(self, context, node):
return 0.6, 0.6, 0.6, 1.0
class RSNodeSocketCamera(bpy.types.NodeSocket):
bl_idname = 'RSNodeSocketCamera'
bl_label = 'RSNodeSocketCamera'
def draw(self, context, layout, node, text):
layout.label(text=text)
def draw_color(self, context, node):
return 0.6, 0.6, 0.6, 1.0
class RSNodeSocketRenderSettings(bpy.types.NodeSocket):
bl_idname = 'RSNodeSocketRenderSettings'
bl_label = 'RSNodeSocketRenderSettings'
def draw(self, context, layout, node, text):
layout.label(text=text)
def draw_color(self, context, node):
return 0, 1, 0.5, 1.0
class RSNodeSocketOutputSettings(bpy.types.NodeSocket):
bl_idname = 'RSNodeSocketOutputSettings'
bl_label = 'RSNod eSocketOutputSettings'
def draw(self, context, layout, node, text):
layout.label(text=text)
def draw_color(self, context, node):
return 1, 0.8, 0.2, 1.0
class RSNodeSocketRenderList(bpy.types.NodeSocket):
bl_idname = 'RSNodeSocketRenderList'
bl_label = 'RSNodeSocketRenderList'
def draw(self, context, layout, node, text):
layout.label(text=text)
def draw_color(self, context, node):
return 0.95, 0.95, 0.95, 1.0
classes = (
RSNodeSocketCamera,
RSNodeSocketRenderSettings,
RSNodeSocketOutputSettings,
RSNodeSocketTaskSettings,
RSNodeSocketRenderList,
# new
RenderNodeSocketInterface,
RenderNodeSocket,
RenderNodeSocketObject,
RenderNodeSocketCamera,
RenderNodeSocketMaterial,
RenderNodeSocketWorld,
RenderNodeSocketViewLayer,
RenderNodeSocketBool,
RenderNodeSocketInt,
RenderNodeSocketFloat,
RenderNodeSocketString,
RenderNodeSocketVector,
RenderNodeSocketXYZ,
RenderNodeSocketTranslation,
RenderNodeSocketEuler,
RenderNodeSocketColor,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
| 27.259819 | 109 | 0.657653 | 7,677 | 0.850826 | 0 | 0 | 0 | 0 | 0 | 0 | 1,406 | 0.155824 |
2acb3aee2e0f5e506188c20bf399d1690f3f87a1 | 35 | py | Python | webapp/journal_plugins/example_plugin/models.py | TheCDC/journal_app | 18c84acfc7b996329d34c3bdd54348cdfdd55252 | [
"MIT"
] | 2 | 2018-03-08T16:21:45.000Z | 2018-10-22T02:13:22.000Z | webapp/journal_plugins/example_plugin/models.py | TheCDC/journal_app | 18c84acfc7b996329d34c3bdd54348cdfdd55252 | [
"MIT"
] | 3 | 2018-05-25T04:21:09.000Z | 2020-02-10T00:46:37.000Z | webapp/journal_plugins/example_plugin/models.py | TheCDC/journal_app | 18c84acfc7b996329d34c3bdd54348cdfdd55252 | [
"MIT"
] | null | null | null | from webapp.extensions import db
| 8.75 | 32 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2acb92b4e54de96f237951644e26af0eb47db325 | 5,823 | py | Python | Unsupervised/autoencoder/evaluate_model.py | darshil0193/CSC-522---Object-Recognition | f357e6fd3cd69e7335bc25d55eb57b9b5b4e5d88 | [
"MIT"
] | null | null | null | Unsupervised/autoencoder/evaluate_model.py | darshil0193/CSC-522---Object-Recognition | f357e6fd3cd69e7335bc25d55eb57b9b5b4e5d88 | [
"MIT"
] | null | null | null | Unsupervised/autoencoder/evaluate_model.py | darshil0193/CSC-522---Object-Recognition | f357e6fd3cd69e7335bc25d55eb57b9b5b4e5d88 | [
"MIT"
] | 1 | 2018-12-27T21:56:13.000Z | 2018-12-27T21:56:13.000Z | from keras.models import load_model
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
from sklearn.cluster import KMeans
from time import time
# Takes a pandas dataframe containing the cluster assignment and ground truth for each data point
# and returns the purity of the cluster results
def clustering_purity(cluster_results, index_to_name):
clusters = cluster_results['cluster'].unique()
m = cluster_results.shape[0]
# Purity for each cluster
cluster_purities = []
cluster_sizes = []
most_common_classes = []
for j in clusters:
cluster_j = cluster_results[cluster_results['cluster'] == j]
m_j = cluster_j.shape[0]
cluster_sizes.append(m_j)
classes = cluster_j['class'].unique()
# Class probability distribution for this cluster
class_probabilities = []
for i in classes:
cluster_j_class_i = cluster_j[cluster_j['class'] == i]
m_ij = cluster_j_class_i.shape[0]
class_probabilities.append(m_ij / m_j)
# Calculate cluster purity
cluster_purity = np.max(np.array(class_probabilities))
cluster_purities.append(cluster_purity)
# Save most common class per cluster
most_common_classes.append(index_to_name[class_probabilities.index(cluster_purity)])
total_purity = 0
for i, size in enumerate(cluster_sizes):
total_purity += (size / m) * cluster_purities[i]
# Pandas dataframe containing per cluster results
results_table = pd.DataFrame({'cluster': clusters,
'cluster_size': cluster_sizes,
'most_common_class': most_common_classes,
'purity': cluster_purities,
'total_purity': total_purity})
return total_purity, results_table
# Takes a pandas dataframe containing the cluster assignment and ground truth for each data point
# and returns the entropy of the cluster results
def clustering_entropy(cluster_results, index_to_name):
clusters = cluster_results['cluster'].unique()
m = cluster_results.shape[0]
# Entropy for each cluster
cluster_entropies = []
cluster_sizes = []
most_common_classes = []
for j in clusters:
cluster_j = cluster_results[cluster_results['cluster'] == j]
m_j = cluster_j.shape[0]
cluster_sizes.append(m_j)
classes = cluster_j['class'].unique()
# Class probability distribution for this cluster
class_probabilities = []
for i in classes:
cluster_j_class_i = cluster_j[cluster_j['class'] == i]
m_ij = cluster_j_class_i.shape[0]
class_probabilities.append(m_ij/m_j)
# Calculate cluster entropy
cluster_entropy = 0
for p in class_probabilities:
cluster_entropy -= p * np.log2(p)
cluster_entropies.append(cluster_entropy)
# Save most common class per cluster
most_common_classes.append(index_to_name[class_probabilities.index(np.max(np.array(class_probabilities)))])
total_entropy = 0
for i, size in enumerate(cluster_sizes):
total_entropy += (size / m) * cluster_entropies[i]
# Pandas dataframe containing per cluster results
results_table = pd.DataFrame({'cluster': clusters,
'cluster_size': cluster_sizes,
'most_common_class': most_common_classes,
'entropy': cluster_entropies,
'total_entropy': total_entropy})
return total_entropy, results_table
def main():
model_name = 'encoder_caltech256.h5'
encoder = load_model(model_name)
encode_datagen = ImageDataGenerator(rescale=1. / 255)
predict_generator = encode_datagen.flow_from_directory(
'data/256_ObjectCategories',
target_size=(128, 128),
batch_size=1,
class_mode='input', shuffle=False)
n_images = 29780
# Encode all images
encoded_imgs = encoder.predict_generator(predict_generator, n_images, verbose=1)
# Flatten encoded images to create feature vector for clustering
encoded_imgs_feature_vecs = encoded_imgs.reshape(n_images, 8 * 8 * 600)
# Perform K-means clustering on flattened feature vector
print('Starting K-means..')
t0 = time()
kmeans = KMeans(n_clusters=256, n_init=2, n_jobs=-1)
clusters = kmeans.fit_predict(encoded_imgs_feature_vecs)
duration = time() - t0
print("done in %fs" % (duration))
print()
# Prepare data for evaluation functions
cluster_results = pd.DataFrame({'cluster': clusters, 'class': predict_generator.classes})
# Save cluster results
cluster_results.to_csv(model_name[:-3] + 'cluster_results.csv', index=False)
class_index_to_name = {v: k for k, v in predict_generator.class_indices.items()}
print('Evaluating entropy..')
t0 = time()
total_entropy, entropy_per_cluster = clustering_entropy(cluster_results, index_to_name=class_index_to_name)
duration = time() - t0
print("done in %fs" % (duration))
print()
print('Evaluating purity..')
total_purity, purity_per_cluster = clustering_purity(cluster_results, index_to_name=class_index_to_name)
duration = time() - t0
print("done in %fs" % (duration))
print()
print('Entropy:')
print(str(total_entropy))
print(entropy_per_cluster.to_string())
print('\n\n\nPurity: ')
print(str(total_purity))
print(purity_per_cluster.to_string())
entropy_per_cluster.to_csv(model_name[:-3] + 'entropy_details.csv', index=False)
purity_per_cluster.to_csv(model_name[:-3] + 'purity_details.csv', index=False)
if __name__ == '__main__':
main() | 35.944444 | 115 | 0.670273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,328 | 0.228061 |
2acbee4216495a69c5721247c8f787b4e47f69f7 | 26,954 | py | Python | pkgs/core/bdsim/core/components.py | CallumJHays/bdsim.micropython | da2e61496d3ef20c7ac4a65dba0ab18255990bec | [
"MIT"
] | null | null | null | pkgs/core/bdsim/core/components.py | CallumJHays/bdsim.micropython | da2e61496d3ef20c7ac4a65dba0ab18255990bec | [
"MIT"
] | null | null | null | pkgs/core/bdsim/core/components.py | CallumJHays/bdsim.micropython | da2e61496d3ef20c7ac4a65dba0ab18255990bec | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Components of the simulation system, namely blocks, wires and plugs.
"""
from abc import ABC, abstractmethod
from typing import Any, Iterable, List, Optional as Opt, Tuple, Union, TYPE_CHECKING
from typing_extensions import Literal
from bdsim.core import np
if TYPE_CHECKING: # this lets us use type-hints without circular dependency
from bdsim.core import BlockDiagram
from collections import UserDict
# type alias
SourceType = Union['Block', 'Plug']
class Struct(UserDict):
"""
A dict like object that allows items to be added by attribute or by key.
For example::
>>> d = Struct('thing')
>>> d.a = 1
>>> d['b'] = 2
>>> d.a
1
>>> d['a']
1
>>> d.b
2
>>> str(d)
"thing {'a': 1, 'b': 2}"
"""
def __init__(self, name='Struct'):
super().__init__()
self.name = name
def __setattr__(self, name, value):
if name in ['data', 'name']:
super().__setattr__(name, value)
else:
self.data[name] = value
def __getattr__(self, name):
return self.data[name]
def __str__(self):
return self.name + ' ' + str(
{k: v
for k, v in self.data.items() if not k.startswith('_')})
def __repr__(self):
def fmt(k, v):
if isinstance(v, np.ndarray):
return '{:12s}| {:12s}'.format(
k,
type(v).__name__ + ' ' + str(v.shape))
else:
return '{:12s}| {:12s}'.format(k, type(v).__name__)
return self.name + ':\n' + '\n'.join(
[fmt(k, v) for k, v in self.data.items() if not k.startswith('_')])
# ------------------------------------------------------------------------- #
class Plug:
"""
Create a plug.
:param block: The block being plugged into
:type block: Block
:param port: The port on the block, defaults to 0
:type port: int, optional
:param type: 'start' or 'end', defaults to None
:type type: str, optional
:return: Plug object
:rtype: Plug
Plugs are the interface between a wire and block and have information
about port number and wire end. Plugs are on the end of each wire, and connect a
Wire to a specific port on a Block.
The ``type`` argument indicates if the ``Plug`` is at:
- the start of a wire, ie. the port is an output port
- the end of a wire, ie. the port is an input port
A plug can specify a set of ports on a block.
"""
def __init__(self, block: 'Block', port: Union[int, slice] = 0, type: Literal["start", "end"] = None):
self.block = block
self.port = port
self.type = type # start
@property
def isslice(self):
"""
Test if port number is a slice.
:return: Whether the port is a slice
:rtype: bool
Returns ``True`` if the port is a slice, eg. ``[0:3]``, and ``False``
for a simple index, eg. ``[2]``.
"""
return isinstance(self.port, slice)
@property
def portlist(self):
"""
Return port numbers.
:return: Port numbers
:rtype: int or list of int
If the port is a simple index, eg. ``[2]`` returns 2.
If the port is a slice, eg. ``[0:3]``, returns [0, 1, 2].
"""
if isinstance(self.port, slice):
return list(range(self.port.start, self.port.stop, self.port.step or 1))
else:
return [self.port]
@property
def width(self):
"""
Return number of ports connected.
:return: Number of ports
:rtype: int
If the port is a simple index, eg. ``[2]`` returns 1.
If the port is a slice, eg. ``[0:3]``, returns 3.
"""
return len(self.portlist)
def __mul__(self, right: SourceType):
"""
Operator for implicit wiring.
:param right: A block or plug to be wired to
:type right: Block or Plug
:return: ``right``
:rtype: Block or Plug
Implements implicit wiring, where the left-hand operator is a Plug, for example::
a = bike[2] * bd.GAIN(3)
will connect port 2 of ``bike`` to the input of the GAIN block.
Note that::
a = bike[2] * func[1]
will connect port 2 of ``bike`` to port 1 of ``func``, and port 1 of ``func``
will be assigned to ``a``. To specify a different outport port on ``func``
we need to use parentheses::
a = (bike[2] * func[1])[0]
which will connect port 2 of ``bike`` to port 1 of ``func``, and port 0 of ``func``
will be assigned to ``a``.
:seealso: Block.__mul__
"""
# called for the cases:
# block * block
# block * plug
s = self.block.bd
#assert isinstance(right, Block), 'arguments to * must be blocks not ports (for now)'
s.connect(self, right) # add a wire
#print('plug * ' + str(w))
return right
def __setitem__(self, port: Union[int, slice], src: SourceType):
"""
Convert a LHS block slice reference to a wire.
:param port: Port number
:type port: int | slice
:param src: the RHS
:type src: Block or Plug
Used to create a wired connection by assignment, for example::
c = bd.CONSTANT(1)
c[0] = x
Ths method is invoked to create a wire from ``x`` to input port 0 of
the constant block ``c``.
"""
# b[port] = src
# src --> b[port]
print('Plug connecting', src, self, port)
self.block.bd.connect(src, self.block[port])
def __repr__(self):
"""
Display plug details.
:return: Plug description
:rtype: str
String format::
bicycle.0[1]
"""
return str(self.block) + "[" + str(self.port) + "]"
# ------------------------------------------------------------------------- #
class Wire:
"""
Create a wire.
:param start: Plug at the start of a wire, defaults to None
:type start: Plug, optional
:param end: Plug at the end of a wire, defaults to None
:type end: Plug, optional
:param name: Name of wire, defaults to None
:type name: str, optional
:return: A wire object
:rtype: Wire
A Wire object connects two block ports. A Wire has a reference to the
start and end ports.
A wire records all the connections defined by the user. At compile time
wires are used to build inter-block references.
Between two blocks, a wire can connect one or more ports, ie. it can connect
a set of output ports on one block to a same sized set of input ports on
another block.
"""
def __init__(self, start: Plug = None, end: Plug = None, name: str = None):
self.name = name
self.id = None
self.start = start
self.end = end
self.value = None
self.type = None
self.name = None
@property
def info(self):
"""
Interactive display of wire properties.
Displays all attributes of the wire for debugging purposes.
"""
print("wire:")
for k, v in self.__dict__.items():
print(" {:8s}{:s}".format(k + ":", str(v)))
def send(self, value):
"""
Send a value to the port at end of this wire.
:param value: A port value
:type value: float, numpy.ndarray, etc.
The value is sent to the input port connected to the end of this wire.
"""
# dest is a Wire
return self.end.block.setinput(self.end.port, value)
def __repr__(self):
"""
Display wire with name and connection details.
:return: Long-form wire description
:rtype: str
String format::
wire.5: d2goal[0] --> Kv[0]
"""
return str(self) + ": " + self.fullname
@property
def fullname(self):
"""
Display wire connection details.
:return: Wire name
:rtype: str
String format::
d2goal[0] --> Kv[0]
"""
return "{:s}[{:d}] --> {:s}[{:d}]".format(str(self.start.block),
self.start.port,
str(self.end.block),
self.end.port)
def __str__(self):
"""
Display wire name.
:return: Wire name
:rtype: str
String format::
wire.5
"""
s = "wire."
if self.name is not None:
s += self.name
elif self.id is not None:
s += str(self.id)
else:
s += '??'
return s
# ------------------------------------------------------------------------- #
blocklist = []
def block(cls):
"""
Decorator for block classes
:param cls: A block to be registered for the simulator
:type cls: subclass of Block
:return: the class
:rtype: subclass of Block
@block
class MyBlock:
The modules in ``./blocks`` uses the ``block`` decorator to declare
that they are a block which will be made available as a method of the
``BlockDiagram`` instance. The method name is a capitalized version of
the class name.
"""
if issubclass(cls, Block):
blocklist.append(cls) # append class to a global list
else:
raise ValueError('@block used on non Block subclass')
return cls
# ------------------------------------------------------------------------- #
class Block(ABC):
"""
Construct a new block object.
:param name: Name of the block, defaults to None
:type name: str, optional
:param inames: Names of input ports, defaults to None
:type inames: list of str, optional
:param onames: Names of output ports, defaults to None
:type onames: list of str, optional
:param snames: Names of states, defaults to None
:type snames: list of str, optional
:param pos: Position of block on the canvas, defaults to None
:type pos: 2-element tuple or list, optional
:param bd: Parent block diagram, defaults to None
:type bd: BlockDiagram, optional
:param nin: Number of inputs, defaults to None
:type nin: int, optional
:param nout: Number of outputs, defaults to None
:type nout: int, optional
:param ``*inputs``: Optional incoming connections
:type ``*inputs``: Block or Plug
:param ``**kwargs``: Unknow arguments
:return: A Block superclass
:rtype: Block
A block object is the superclass of all blocks in the simulation environment.
This is the top-level initializer, and handles most options passed to
the superclass initializer for each block in the library.
"""
type: str # a string holding the name of a concrete block. Usually the lowercase
# name of the block's class definition
blockclass: Literal['source', 'sink', 'function', 'transfer', 'subsystem']
def __new__(cls, *args, bd=None, **kwargs):
"""
Construct a new Block object.
:param cls: The class to construct
:type cls: class type
:param *args: positional args passed to constructor
:type *args: list
:param **kwargs: keyword args passed to constructor
:type **kwargs: dict
:return: new Block instance
:rtype: Block instance
"""
# print('Block __new__', args,bd, kwargs)
block = super(Block, cls).__new__(cls) # create a new instance
# we overload setattr, so need to know whether it is being passed a port
# name. Add this attribute now to allow proper operation.
block.__dict__['portnames'] = [] # must be first, see __setattr__
block.bd = bd
block.nin = 0
block.nout = 0
block.nstates = 0
return block
_latex_remove = str.maketrans({
'$': '',
'\\': '',
'{': '',
'}': '',
'^': '',
'_': ''
})
def __init__(self,
name: str = None,
inames: List[str] = None,
onames: List[str] = None,
snames: List[str] = None,
pos: Tuple[int, int] = None,
nin: int = None,
nout: int = None,
bd: 'BlockDiagram' = None,
*inputs: Union['Block', Plug],
**kwargs):
# print('Block constructor, bd = ', bd)
if name is not None:
self.name_tex = name
self.name = self._fixname(name)
else:
self.name = None
self.pos = pos
self.id = None
self.out = []
self.updated = False
self.shape = 'block' # for box
self._inport_names = None
self._outport_names = None
self._state_names = None
self.initd = True
self.bd = self.bd or bd
self.nstates = self.nstates
# appease pylint
self.portnames = self.portnames # this gets set in Block.__new__()
# these get set in BlockDiagram.compile() They are None until wired..?
self.inports: List[Opt[Wire]] = []
self.outports: List[List[Wire]] = []
if nin is not None:
self.nin = nin
if nout is not None:
self.nout = nout
if inames is not None:
self.inport_names(inames)
if onames is not None:
self.outport_names(onames)
if snames is not None:
self.state_names(snames)
for i, input in enumerate(inputs):
self.bd.connect(input, Plug(self, port=i))
if len(kwargs) > 0:
print('WARNING: unused arguments', kwargs.keys())
@property
def info(self):
"""
Interactive display of block properties.
Displays all attributes of the block for debugging purposes.
"""
print("block: " + type(self).__name__)
for k, v in self.__dict__.items():
if k != 'sim':
print(" {:11s}{:s}".format(k + ":", str(v)))
self.inputs
# for use in unit testing
def _eval(self, *inputs: Any, t=None):
"""
Evaluate a block for unit testing.
:param *inputs: List of input port values
:type *inputs: list
:param t: Simulation time, defaults to None
:type t: float, optional
:return: Block output port values
:rtype: list
The output ports of the block are evaluated for a given set of input
port values and simulation time. Input and output port values are treated
as lists.
Mostly used for making concise unit tests.
"""
assert len(inputs) == self.nin, 'wrong number of inputs provided'
self.inputs = list(inputs)
out = self.output(t=t)
assert isinstance(out, list), 'result must be a list'
assert len(out) == self.nout, 'result list is wrong length'
return out
def __getitem__(self, port: int):
"""
Convert a block slice reference to a plug.
:param port: Port number
:type port: int
:return: A port plug
:rtype: Plug
Invoked whenever a block is referenced as a slice, for example::
c = bd.CONSTANT(1)
bd.connect(x, c[0])
bd.connect(c[0], x)
In both cases ``c[0]`` is converted to a ``Plug`` by this method.
"""
# block[i] is a plug object
#print('getitem called', self, port)
return Plug(self, port)
def __setitem__(self, port: int, src: SourceType):
"""
Convert a LHS block slice reference to a wire.
:param port: Port number
:type port: int
:param src: the RHS
:type src: Block or Plug
Used to create a wired connection by assignment, for example::
c = bd.CONSTANT(1)
c[0] = x
Ths method is invoked to create a wire from ``x`` to port 0 of
the constant block ``c``.
"""
# b[port] = src
# src --> b[port]
#print('connecting', src, self, port)
self.bd.connect(src, self[port])
def __setattr__(self, name: str, value: SourceType):
"""
Convert a LHS block name reference to a wire.
:param name: Port name
:type port: str
:param value: the RHS
:type value: Block or Plug
Used to create a wired connection by assignment, for example::
c = bd.CONSTANT(1, inames=['u'])
c.u = x
Ths method is invoked to create a wire from ``x`` to port 'u' of
the constant block ``c``.
Notes:
- this overloaded method handles all instances of ``setattr`` and
implements normal functionality as well, only creating a wire
if ``name`` is a known port name.
"""
# b[port] = src
# src --> b[port]
# gets called for regular attribute settings, as well as for wiring
if name in self.portnames:
# we're doing wiring
#print('in __setattr___', self, name, value)
self.bd.connect(value, getattr(self, name))
else:
#print('in __setattr___', self, name, value)
# regular case, add attribute to the instance's dictionary
self.__dict__[name] = value
def __mul__(self, right: SourceType):
"""
Operator for implicit wiring.
:param right: A block or plugto be wired to
:type right: Block or Plug
:return: ``right``
:rtype: Block or Plug
Implements implicit wiring, for example::
a = bd.CONSTANT(1) * bd.GAIN(2)
will connect the output of the CONSTANT block to the input of the
GAIN block. The result will be GAIN block, whose output in this case
will be assigned to ``a``.
Note that::
a = bd.CONSTANT(1) * func[1]
will connect port 0 of CONSTANT to port 1 of ``func``, and port 1 of ``func``
will be assigned to ``a``. To specify a different outport port on ``func``
we need to use parentheses::
a = (bd.CONSTANT(1) * func[1])[0]
which will connect port 0 of CONSTANT ` to port 1 of ``func``, and port 0 of ``func``
will be assigned to ``a``.
:seealso: Plug.__mul__
"""
# called for the cases:
# block * block
# block * plug
s = self.bd
#assert isinstance(right, Block), 'arguments to * must be blocks not ports (for now)'
w = s.connect(self, right) # add a wire
#print('block * ' + str(w))
return right
# make connection, return a plug
def __str__(self):
if hasattr(self, 'name') and self.name is not None:
return self.name
else:
return self.blockclass + '.??'
def __repr__(self):
return self.__str__()
def _fixname(self, s):
return s.translate(self._latex_remove)
def inport_names(self, names: Iterable[str]):
"""
Set the names of block input ports.
:param names: List of port names
:type names: list of str
Invoked by the ``inames`` argument to the Block constructor.
The names can include LaTeX math markup. The LaTeX version is used
where appropriate, but the port names are a de-LaTeXd version of the
given string with backslash, underscore, caret, braces and dollar signs
removed.
"""
self._inport_names = names
for port, name in enumerate(names):
fn = self._fixname(name)
setattr(self, fn, self[port])
self.portnames.append(fn)
def outport_names(self, names: Iterable[str]):
"""
Set the names of block output ports.
:param names: List of port names
:type names: list of str
Invoked by the ``onames`` argument to the Block constructor.
The names can include LaTeX math markup. The LaTeX version is used
where appropriate, but the port names are a de-LaTeXd version of the
given string with backslash, underscore, caret, braces and dollar signs
removed.
"""
self._outport_names = names
for port, name in enumerate(names):
fn = self._fixname(name)
setattr(self, fn, self[port])
self.portnames.append(fn)
def state_names(self, names: Iterable[str]):
self._state_names = names
def sourcename(self, port: int):
"""
Get the name of output port driving this input port.
:param port: Input port
:type port: int
:return: Port name
:rtype: str
Return the name of the output port that drives the specified input
port. The name can be:
- a LaTeX string if provided
- block name with port number given in square brackets. The block
name will the one optionally assigned by the user using the ``name``
keyword, otherwise a systematic default name.
:seealso: outport_names
"""
w = self.inports[port]
if w.name is not None:
return w.name
src = w.start.block
srcp = w.start.port
if src._outport_names is not None:
return src._outport_names[srcp]
return str(w.start)
# @property
# def fullname(self):
# return self.blockclass + "." + str(self)
def reset(self):
if self.nin > 0:
self.inputs: List[Any] = [None] * self.nin
self.updated = False
def add_outport(self, w: Wire):
port = w.start.port
assert port < len(self.outports), 'port number too big'
self.outports[port].append(w)
def add_inport(self, w: Wire):
port = w.end.port
assert self.inports[
port] is None, 'attempting to connect second wire to an input'
self.inports[port] = w
def setinput(self, port: int, value: Any):
"""
Receive input from a wire
:param self: Block to be updated
:type wire: Block
:param port: Input port to be updated
:type port: int
:param value: Input value
:type val: any
:return: If all inputs have been received
:rtype: bool
"""
# stash it away
self.inputs[port] = value
# check if all inputs have been assigned
if all([x is not None for x in self.inputs]):
self.updated = True
# self.update()
return self.updated
def setinputs(self, *pos):
assert len(pos) == self.nin, 'mismatch in number of inputs'
self.reset()
for i, val in enumerate(pos):
self.inputs[i] = val
def check(self): # check validity of block parameters at start
assert self.nin > 0 or self.nout > 0, 'no inputs or outputs specified'
assert hasattr(
self, 'initd'
) and self.initd, 'Block superclass not initalized. was super().__init__ called?'
def start(self, **kwargs): ... # begin of a simulation
def output(self, t: float): ...
def step(self): ... # to be deprecated; replaced solely by output()
def done(self, **kwargs): ... # end of simulation
class SinkBlock(Block):
"""
A SinkBlock is a subclass of Block that represents a block that has inputs
but no outputs. Typically used to save data to a variable, file or
graphics.
"""
blockclass = 'sink'
def __init__(self, **kwargs):
# print('Sink constructor')
super().__init__(**kwargs)
self.nout = 0
self.nstates = 0
class SourceBlock(Block):
"""
A SourceBlock is a subclass of Block that represents a block that has outputs
but no inputs. Its output is a function of parameters and time.
"""
blockclass = 'source'
def __init__(self, **kwargs):
# print('SourceType constructor')
super().__init__(**kwargs)
self.nin = 0
self.nstates = 0
class TransferBlock(Block):
"""
A TransferBlock is a subclass of Block that represents a block with inputs
outputs and states. Typically used to describe a continuous time dynamic
system, either linear or nonlinear.
"""
blockclass = 'transfer'
def __init__(self, **kwargs):
# print('Transfer constructor')
super().__init__(**kwargs)
self._x0 = ()
def reset(self):
super().reset()
self._x = self._x0
return self._x
def setstate(self, x):
self._x = x[:self.nstates] # take as much state vector as we need
return x[self.nstates:] # return the rest
def getstate(self):
return self._x0
def check(self):
assert len(
self._x0) == self.nstates, 'incorrect length for initial state'
assert self.nin > 0 or self.nout > 0, 'no inputs or outputs specified'
@abstractmethod
def deriv(self) -> np.ndarray: ...
class FunctionBlock(Block):
"""
A FunctionBlock is a subclass of Block that represents a block that has inputs
and outputs but no state variables. Typically used to describe operations
such as gain, summation or various mappings.
"""
blockclass = 'function'
def __init__(self, **kwargs):
# print('Function constructor')
super().__init__(**kwargs)
self.nstates = 0
class SubsystemBlock(Block):
"""
A Subsystem is a subclass of block thafrom bdsim. import npt contains a group of blocks within it
and a predefined set of inputs and outputs. It is synonymous to Simulink's groups.
When Subclassing SubsystemBlock, all connections between blocks within should be
performed in the constructor.
"""
blockclass = 'subsystem'
def __init__(self, ssvar: str = None, **kwargs):
# print('Subsystem constructor')
super().__init__(**kwargs)
self.ssvar = ssvar
# def _run(self, sub_block, inputs, t=None):
# "helper function to make processing internal blocks less cumbersome"
# sub_block.inputs = inputs
# return sub_block.output(t)
# def _sequential(self, sub_blocks, inputs, t=None):
# "helper function to run blocks sequentially"
# for block in sub_blocks:
# inputs = self._run(block, inputs, t)
# return inputs
# module exports
__all__ = ('block', 'Block', 'Wire', 'Plug', 'FunctionBlock', 'SinkBlock',
'SourceBlock', 'SubsystemBlock', 'Struct', 'TransferBlock', 'blocklist',
'SourceType')
| 29.076591 | 106 | 0.563441 | 25,211 | 0.935334 | 0 | 0 | 2,288 | 0.084885 | 0 | 0 | 16,590 | 0.615493 |
2accc4316592f369a2446f7d46a9301b4a27b128 | 4,509 | py | Python | code/alg/placement/random.py | S-Lab-System-Group/ChronusArtifact | bc7e15fefd53b80d1897170ab3c1aa7d353c3b79 | [
"MIT"
] | 7 | 2021-11-04T06:31:38.000Z | 2022-02-08T11:46:42.000Z | code/alg/placement/random.py | S-Lab-System-Group/ChronusArtifact | bc7e15fefd53b80d1897170ab3c1aa7d353c3b79 | [
"MIT"
] | null | null | null | code/alg/placement/random.py | S-Lab-System-Group/ChronusArtifact | bc7e15fefd53b80d1897170ab3c1aa7d353c3b79 | [
"MIT"
] | 1 | 2022-01-10T01:42:42.000Z | 2022-01-10T01:42:42.000Z | import os, sys
import math
import random
sys.path.insert(0, os.path.basename(__file__) + os.sep + '..' + os.sep + '..')
from server.switch import _Switch
from server.node import _Node
from utils import util
from alg.utils.topology import Topology
from .base import BasePlaceMent
class RandomPlaceMent(BasePlaceMent):
__alias__ = 'random'
def __init__(self, cluster, name, model_info):
super(RandomPlaceMent, self).__init__(cluster=cluster, name=name, model_info=model_info)
def place_jobs(self, job):
'''
randomly pick up enough resource for both PS and worker in a job
allocate one by one
'''
# early return false
if self.cluster.check_free_gpus() < job.required_gpu_num: return False
num_ps, num_w = len(job['ps_network']), job['num_gpu']
assert num_ps == num_w or (num_ps == 0 and num_w == 1)
# go through workers, each worker needs single gpu and two workers
w_node_list, w_switch_list = list(), list()
for w in range(num_w):
start_ngid = random.randint(0, self.num_node - 1)
allocated = False
for i in range(self.num_node):
n_gid = (start_ngid + i) % self.num_node
resource = self.get_node_with_gid(n_gid)
node, free_gpu = resource['node'], resource['node'].check_free_gpus()
if free_gpu > 0:
cpu_num = 6 if (num_w == 1 and num_ps == 0) else 2
allocated = allocated or self.allocate_resource(job=job, resource=resource, node_list=w_node_list, \
switch_list=w_switch_list, gpu_num=1, cpu_num=cpu_num, job_num=1)
# print('switch free_resource', resource['switch'].id, resource['node'].id, resource['node'].check_free_cpus(), resource['node'].check_free_gpus())
if allocated == True: break
if allocated == False:
assert False, 'should not run here'
for node in w_node_list:
assert node.release_job_gpu_cpu(num_gpu=1, num_cpu=2, job=job) == True
return False # short-cut return
# go through PS, worker requires 4 cpu
ps_node_list, ps_switch_list = list(), list()
for i in range(num_ps):
resource = {
'node':w_node_list[i],
'switch':w_node_list[i].belong_switch,
}
# print('free_resource', resource['switch'].id, resource['node'].id, resource['node'].check_free_cpus(), resource['node'].check_free_gpus())
allocated = self.allocate_resource(job=job, resource=resource, node_list=ps_node_list, \
switch_list=ps_switch_list, gpu_num=0, cpu_num=4, job_num=1)
assert allocated == True, 'should have enough resource to run'
# end of worker node & ps node
# node_list = list() # useful for network load, but now no use
# for i in range(len(w_node_list)):
# self.update_node_list_info(w_node_list[i], node_list, worker=1, ps=0)
# for i in range(len(ps_node_list)):
# self.update_node_list_info(ps_node_list[i], node_list, worker=0, ps=i)
# process job placement information
for i, (s_id, node) in enumerate(zip(w_switch_list, w_node_list)):
node_dict = {
'id' : node.id,
'node_instance' : node,
'num_gpu' : 1,
'num_cpu' : 6 if (num_w == 1 and num_ps == 0) else 2,
'mem' : job['model']['mem_util'],
'tasks': list(),
}
job['placements'].append({
'switch' : s_id,
'nodes' : [node_dict],
})
for i, (s_id, node) in enumerate(zip(ps_switch_list, ps_node_list)):
node_dict = {
'id' : node.id,
'node_instance' : node,
'num_gpu' : 0,
'num_cpu' : 4,
'mem' : 0, # job['model']['mem_util'], fix a bug
'tasks' : list(),
}
job['placements'].append({
'switch': s_id,
'nodes' : [node_dict]
})
job['topology'] = Topology(job=job, placements=job['placements'])
return True
| 40.621622 | 167 | 0.541583 | 4,218 | 0.935462 | 0 | 0 | 0 | 0 | 0 | 0 | 1,242 | 0.275449 |
2acdbe48c1a63c18df33e8e3cfb8731347ace259 | 466 | py | Python | fbl_handler/test_message_id_extractor.py | wheekey/fbl-handler | 918e2c7ac50033630eebd5741a7ad3e9c56c3930 | [
"MIT"
] | null | null | null | fbl_handler/test_message_id_extractor.py | wheekey/fbl-handler | 918e2c7ac50033630eebd5741a7ad3e9c56c3930 | [
"MIT"
] | null | null | null | fbl_handler/test_message_id_extractor.py | wheekey/fbl-handler | 918e2c7ac50033630eebd5741a7ad3e9c56c3930 | [
"MIT"
] | null | null | null | from unittest import TestCase
from fbl_handler.message_id_extractor import MessageIdExtractor
class TestMessageIdExtractor(TestCase):
def setUp(self):
self.message_id_extractor = MessageIdExtractor()
def test_extract_from_yandex_fbl(self):
with open('files/yandex_fbl.txt', 'r') as file:
file_content = file.read()
self.assertEqual(self.message_id_extractor.extract_from_yandex_fbl(file_content), '1jb6B3-0004MN-Dq')
| 31.066667 | 109 | 0.751073 | 367 | 0.787554 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.092275 |
2ace6c0babd8d4be95c012041013aaf8129f838b | 30,169 | py | Python | evohome_rf/opentherm.py | NotBobTheBuilder/evohome_rf | c3d9c3563d43fbe19a33c0493cde0864c1f4a23a | [
"MIT"
] | null | null | null | evohome_rf/opentherm.py | NotBobTheBuilder/evohome_rf | c3d9c3563d43fbe19a33c0493cde0864c1f4a23a | [
"MIT"
] | null | null | null | evohome_rf/opentherm.py | NotBobTheBuilder/evohome_rf | c3d9c3563d43fbe19a33c0493cde0864c1f4a23a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""Evohome RF - Opentherm processor."""
import logging
import struct
from typing import Any
from .const import __dev_mode__
DEV_MODE = __dev_mode__
_LOGGER = logging.getLogger(__name__)
if DEV_MODE:
_LOGGER.setLevel(logging.DEBUG)
# Data structure shamelessy copied, with thanks to @nlrb, from:
# github.com/nlrb/com.tclcode.otgw (node_modules/otg-api/lib/ot_msg.js),
# Other code shamelessy copied, with thanks to @mvn23, from:
# github.com/mvn23/pyotgw (pyotgw/protocol.py),
READ_WRITE = "RW"
READ_ONLY = "R-"
WRITE_ONLY = "-W"
EN = "en"
FLAGS = "flags"
DIR = "dir"
NL = "nl"
SENSOR = "sensor"
VAL = "val"
VAR = "var"
FLAG8 = "flag8"
FLAG = "flag"
U8 = "u8"
S8 = "s8"
F8_8 = "f8.8"
U16 = "u16"
S16 = "s16"
HB = "hb"
LB = "lb"
VALUE = "value"
COUNTER = "counter"
HUMIDITY = "humidity"
PERCENTAGE = "percentage"
PRESSURE = "pressure"
TEMPERATURE = "temperature"
OPENTHERM_MSG_TYPE = {
0b000: "Read-Data",
0b001: "Write-Data",
0b010: "Invalid-Data",
0b011: "-reserved-",
0b100: "Read-Ack",
0b101: "Write-Ack",
0b110: "Data-Invalid",
0b111: "Unknown-DataId",
}
# These must have either a FLAGS (preferred) or a VAR for their message name
OPENTHERM_MESSAGES = {
# OpenTherm status flags [ID 0: Master status (HB) & Slave status (LB)]
"status_flags": {
"0x0100": {
EN: "Central heating enable",
NL: "Centrale verwarming aan",
VAR: "StatusCHEnabled",
},
"0x0200": {
EN: "DHW enable",
NL: "Tapwater aan",
VAR: "StatusDHWEnabled",
},
"0x0400": {
EN: "Cooling enable",
NL: "Koeling aan",
VAR: "StatusCoolEnabled",
},
"0x0800": {
EN: "Outside temp. comp. active",
NL: "Compenseren buitentemp.",
VAR: "StatusOTCActive",
},
"0x1000": {
EN: "Central heating 2 enable",
NL: "Centrale verwarming 2 aan",
VAR: "StatusCH2Enabled",
},
"0x2000": {
EN: "Summer/winter mode",
NL: "Zomer/winter mode",
VAR: "StatusSummerWinter",
},
"0x4000": {
EN: "DHW blocking",
NL: "Tapwater blokkade",
VAR: "StatusDHWBlocked",
},
"0x0001": {
EN: "Fault indication",
NL: "Fout indicatie",
VAR: "StatusFault",
}, # no fault/fault
"0x0002": {
EN: "Central heating mode",
NL: "Centrale verwarming mode",
VAR: "StatusCHMode",
}, # not active/active
"0x0004": {
EN: "DHW mode",
NL: "Tapwater mode",
VAR: "StatusDHWMode",
}, # not active/active
"0x0008": {
EN: "Flame status",
NL: "Vlam status",
VAR: "StatusFlame",
}, # flame off/on
"0x0010": {
EN: "Cooling status",
NL: "Status koelen",
VAR: "StatusCooling",
}, # not active/active
"0x0020": {
EN: "Central heating 2 mode",
NL: "Centrale verwarming 2 mode",
VAR: "StatusCH2Mode",
}, # not active/active
"0x0040": {
EN: "Diagnostic indication",
NL: "Diagnose indicatie",
VAR: "StatusDiagnostic",
}, # no diagnostics/diagnostics event
},
# OpenTherm Master configuration flags [ID 2: master config flags (HB)]
"Master_config_flags": {
"0x0100": {
EN: "Smart Power",
VAR: "ConfigSmartPower",
},
},
# OpenTherm Slave configuration flags [ID 3: slave config flags (HB)]
"Slave_Config_flags": {
"0x0100": {
EN: "DHW present",
VAR: "ConfigDHWpresent",
},
"0x0200": {
EN: "Control type (modulating on/off)",
VAR: "ConfigControlType",
},
"0x0400": {
EN: "Cooling supported",
VAR: "ConfigCooling",
},
"0x0800": {
EN: "DHW storage tank",
VAR: "ConfigDHW",
},
"0x1000": {
EN: "Master low-off & pump control allowed",
VAR: "ConfigMasterPump",
},
"0x2000": {
EN: "Central heating 2 present",
VAR: "ConfigCH2",
},
},
# OpenTherm fault flags [ID 5: Application-specific fault flags (HB)]
"fault_flags": {
"0x0100": {
EN: "Service request",
NL: "Onderhoudsvraag",
VAR: "FaultServiceRequest",
},
"0x0200": {
EN: "Lockout-reset",
NL: "Geen reset op afstand",
VAR: "FaultLockoutReset",
},
"0x0400": {
EN: "Low water pressure",
NL: "Waterdruk te laag",
VAR: "FaultLowWaterPressure",
},
"0x0800": {
EN: "Gas/flame fault",
NL: "Gas/vlam fout",
VAR: "FaultGasFlame",
},
"0x1000": {
EN: "Air pressure fault",
NL: "Luchtdruk fout",
VAR: "FaultAirPressure",
},
"0x2000": {
EN: "Water over-temperature",
NL: "Water te heet",
VAR: "FaultOverTemperature",
},
},
# OpenTherm remote flags [ID 6: Remote parameter flags (HB)]
"Remote_flags": {
"0x0100": {
EN: "DHW setpoint enable",
VAR: "RemoteDHWEnabled",
},
"0x0200": {
EN: "Max. CH setpoint enable",
VAR: "RemoteMaxCHEnabled",
},
"0x0001": {
EN: "DHW setpoint read/write",
VAR: "RemoteDHWReadWrite",
},
"0x0002": {
EN: "Max. CH setpoint read/write",
VAR: "RemoteMaxCHReadWrite",
},
},
# OpenTherm messages
"messages": {
0x00: { # 0, Status
EN: "Status",
DIR: READ_ONLY,
VAL: FLAG8,
FLAGS: "StatusFlags",
},
0x01: { # 1, Control Setpoint
EN: "Control setpoint",
NL: "Ketel doeltemperatuur",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "ControlSetpoint",
SENSOR: TEMPERATURE,
},
0x02: { # 2, Master Member ID
EN: "Master configuration",
DIR: WRITE_ONLY,
VAL: {HB: FLAG8, LB: U8},
FLAGS: "MasterConfigFlags",
VAR: {LB: "MasterMemberId"},
},
0x03: { # 3, Slave Member ID
EN: "Slave configuration",
DIR: READ_ONLY,
VAL: {HB: FLAG8, LB: U8},
FLAGS: "SlaveConfigFlags",
VAR: {LB: "SlaveMemberId"},
},
0x04: { # 4, Remote Command
EN: "Remote command",
DIR: WRITE_ONLY,
VAL: U8,
VAR: "RemoteCommand",
},
0x05: { # 5, OEM Fault Code
EN: "Fault flags & OEM fault code",
DIR: READ_ONLY,
VAL: {HB: FLAG8, LB: U8},
VAR: {LB: "OEMFaultCode"},
FLAGS: "FaultFlags",
},
0x06: { # 6, Remote Flags
EN: "Remote parameter flags",
DIR: READ_ONLY,
VAL: FLAG8,
FLAGS: "RemoteFlags",
},
0x07: { # 7, Cooling Control Signal
EN: "Cooling control signal",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "CoolingControlSignal",
SENSOR: PERCENTAGE,
},
0x08: { # 8, CH2 Control Setpoint
EN: "Control setpoint for 2nd CH circuit",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "CH2ControlSetpoint",
SENSOR: TEMPERATURE,
},
0x09: { # 9, Remote Override Room Setpoint
EN: "Remote override room setpoint",
NL: "Overschreven kamer doeltemperatuur",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "RemoteOverrideRoomSetpoint",
SENSOR: TEMPERATURE,
},
0x0A: { # 10, TSP Number
EN: "Number of transparent slave parameters supported by slave",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "TSPNumber"},
},
0x0B: { # 11, TSP Entry
EN: "Index number/value of referred-to transparent slave parameter",
DIR: READ_WRITE,
VAL: U8,
VAR: {HB: "TSPIndex", LB: "TSPValue"},
},
0x0C: { # 12, FHB Size
EN: "Size of fault history buffer supported by slave",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "FHBSize"},
},
0x0D: { # 13, FHB Entry
EN: "Index number/value of referred-to fault history buffer entry",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "FHBIndex", LB: "FHBValue"},
},
0x0E: { # 14, Max Relative Modulation Level
EN: "Max. relative modulation level",
NL: "Max. relatief modulatie-niveau",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "MaxRelativeModulationLevel",
SENSOR: PERCENTAGE,
},
0x0F: { # 15, Max Boiler Capacity & Min Modulation Level
EN: "Max. boiler capacity (kW) and modulation level setting (%)",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "MaxBoilerCapacity", LB: "MinModulationLevel"},
},
0x10: { # 16, Current Setpoint
EN: "Room setpoint",
NL: "Kamer doeltemperatuur",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "CurrentSetpoint",
SENSOR: TEMPERATURE,
},
0x11: { # 17, Relative Modulation Level
EN: "Relative modulation level",
NL: "Relatief modulatie-niveau",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "RelativeModulationLevel",
SENSOR: PERCENTAGE,
},
0x12: { # 18, CH Water Pressure
EN: "Central heating water pressure",
NL: "Keteldruk",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "CHWaterPressure",
SENSOR: PRESSURE,
},
0x13: { # 19, DHW Flow Rate
EN: "DHW flow rate (litres/minute)",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "DHWFlowRate",
SENSOR: "flow",
},
0x14: { # 20, Day/Time
EN: "Day of week & time of day",
DIR: READ_WRITE,
VAR: "DayTime",
},
0x15: { # 21, Date
EN: "Date",
DIR: READ_WRITE,
VAL: U8,
VAR: "Date",
},
0x16: { # 22, Year
EN: "Year",
DIR: READ_WRITE,
VAL: U16,
VAR: "Year",
},
0x17: { # 23, CH2 Current Setpoint
EN: "Room setpoint for 2nd CH circuit",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "CH2CurrentSetpoint",
SENSOR: TEMPERATURE,
},
0x18: { # 24, Current Room Temperature
EN: "Room temperature",
NL: "Kamertemperatuur",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "CurrentTemperature",
SENSOR: TEMPERATURE,
},
0x19: { # 25, Boiler Water Temperature
EN: "Boiler water temperature",
NL: "Ketelwatertemperatuur",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "BoilerWaterTemperature",
SENSOR: TEMPERATURE,
},
0x1A: { # 26, DHW Temperature
EN: "DHW temperature",
NL: "Tapwatertemperatuur",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "DHWTemperature",
SENSOR: TEMPERATURE,
},
0x1B: { # 27, Outside Temperature
EN: "Outside temperature",
NL: "Buitentemperatuur",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "OutsideTemperature",
SENSOR: TEMPERATURE,
},
0x1C: { # 28, Return Water Temperature
EN: "Return water temperature",
NL: "Retourtemperatuur",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "ReturnWaterTemperature",
SENSOR: TEMPERATURE,
},
0x1D: { # 29, Solar Storage Temperature
EN: "Solar storage temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "SolarStorageTemperature",
SENSOR: TEMPERATURE,
},
0x1E: { # 30, Solar Collector Temperature
EN: "Solar collector temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "SolarCollectorTemperature",
SENSOR: TEMPERATURE,
},
0x1F: { # 31, CH2 Flow Temperature
EN: "Flow temperature for 2nd CH circuit",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "CH2FlowTemperature",
SENSOR: TEMPERATURE,
},
0x20: { # 32, DHW2 Temperature
EN: "DHW 2 temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "DHW2Temperature",
SENSOR: TEMPERATURE,
},
0x21: { # 33, Boiler Exhaust Temperature
EN: "Boiler exhaust temperature",
DIR: READ_ONLY,
VAL: S16,
VAR: "BoilerExhaustTemperature",
SENSOR: TEMPERATURE,
},
0x30: { # 48, DHW Boundaries
EN: "DHW setpoint boundaries",
DIR: READ_ONLY,
VAL: S8,
VAR: "DHWBoundaries",
SENSOR: TEMPERATURE,
},
0x31: { # 49, CH Boundaries
EN: "Max. central heating setpoint boundaries",
DIR: READ_ONLY,
VAL: S8,
VAR: "CHBoundaries",
SENSOR: TEMPERATURE,
},
0x32: { # 50, OTC Boundaries
EN: "OTC heat curve ratio upper & lower bounds",
DIR: READ_ONLY,
VAL: S8,
VAR: "OTCBoundaries",
},
0x38: { # 56, DHW Setpoint
EN: "DHW setpoint",
NL: "Tapwater doeltemperatuur",
DIR: READ_WRITE,
VAL: F8_8,
VAR: "DHWSetpoint",
SENSOR: TEMPERATURE,
},
0x39: { # 57, Max CH Water Setpoint
EN: "Max. central heating water setpoint",
NL: "Max. ketel doeltemperatuur",
DIR: READ_WRITE,
VAL: F8_8,
VAR: "MaxCHWaterSetpoint",
SENSOR: TEMPERATURE,
},
0x3A: { # 58, OTC Heat Curve Ratio
EN: "OTC heat curve ratio",
DIR: READ_WRITE,
VAL: F8_8,
VAR: "OTCHeatCurveRatio",
SENSOR: TEMPERATURE,
},
# OpenTherm 2.3 IDs (70-91) for ventilation/heat-recovery applications
0x46: { # 70, VH Status
EN: "Status ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: FLAG8,
VAR: "VHStatus",
},
0x47: { # 71, VH Control Setpoint
EN: "Control setpoint ventilation/heat-recovery",
DIR: WRITE_ONLY,
VAL: U8,
VAR: {HB: "VHControlSetpoint"},
},
0x48: { # 72, VH Fault Code
EN: "Fault flags/code ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: {HB: FLAG, LB: U8},
VAR: {LB: "VHFaultCode"},
},
0x49: { # 73, VH Diagnostic Code
EN: "Diagnostic code ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: U16,
VAR: "VHDiagnosticCode",
},
0x4A: { # 74, VH Member ID
EN: "Config/memberID ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: {HB: FLAG, LB: U8},
VAR: {LB: "VHMemberId"},
},
0x4B: { # 75, VH OpenTherm Version
EN: "OpenTherm version ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "VHOpenThermVersion",
},
0x4C: { # 76, VH Product Type/Version
EN: "Version & type ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "VHProductType", LB: "VHProductVersion"},
},
0x4D: { # 77, Relative Ventilation
EN: "Relative ventilation",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "RelativeVentilation"},
},
0x4E: { # 78, Relative Humidity
EN: "Relative humidity",
NL: "Luchtvochtigheid",
DIR: READ_WRITE,
VAL: U8,
VAR: {HB: "RelativeHumidity"},
SENSOR: HUMIDITY,
},
0x4F: { # 79, CO2 Level
EN: "CO2 level",
NL: "CO2 niveau",
DIR: READ_WRITE,
VAL: U16,
VAR: "CO2Level",
SENSOR: "co2",
},
0x50: { # 80, Supply Inlet Temperature
EN: "Supply inlet temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "SupplyInletTemperature",
SENSOR: TEMPERATURE,
},
0x51: { # 81, Supply Outlet Temperature
EN: "Supply outlet temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "SupplyOutletTemperature",
SENSOR: TEMPERATURE,
},
0x52: { # 82, Exhaust Inlet Temperature
EN: "Exhaust inlet temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "ExhaustInletTemperature",
SENSOR: TEMPERATURE,
},
0x53: { # 83, Exhaust Outlet Temperature
EN: "Exhaust outlet temperature",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "ExhaustOutletTemperature",
SENSOR: TEMPERATURE,
},
0x54: { # 84, Exhaust Fan Speed
EN: "Actual exhaust fan speed",
DIR: READ_ONLY,
VAL: U16,
VAR: "ExhaustFanSpeed",
},
0x55: { # 85, Inlet Fan Speed
EN: "Actual inlet fan speed",
DIR: READ_ONLY,
VAL: U16,
VAR: "InletFanSpeed",
},
0x56: { # 86, VH Remote Parameter
EN: "Remote parameter settings ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: FLAG8,
VAR: "VHRemoteParameter",
},
0x57: { # 87, Nominal Ventilation
EN: "Nominal ventilation value",
DIR: READ_WRITE,
VAL: U8,
VAR: "NominalVentilation",
},
0x58: { # 88, VH TSP Size
EN: "TSP number ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "VHTSPSize"},
},
0x59: { # 89, VH TSP Entry
EN: "TSP entry ventilation/heat-recovery",
DIR: READ_WRITE,
VAL: U8,
VAR: {HB: "VHTSPIndex", LB: "VHTSPValue"},
},
0x5A: { # 90, VH FHB Size
EN: "Fault buffer size ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "VHFHBSize"},
},
0x5B: { # 91, VH FHB Entry
EN: "Fault buffer entry ventilation/heat-recovery",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "VHFHBIndex", LB: "VHFHBValue"},
},
# OpenTherm 2.2 IDs
0x64: { # 100, Remote Override Function
EN: "Remote override function",
DIR: READ_ONLY,
VAL: {HB: FLAG8, LB: U8},
VAR: {HB: "RemoteOverrideFunction"},
},
0x73: { # 115, OEM Diagnostic Code
EN: "OEM diagnostic code",
DIR: READ_ONLY,
VAL: U16,
VAR: "OEMDiagnosticCode",
},
0x74: { # 116, Starts Burner
EN: "Number of starts burner",
DIR: READ_WRITE,
VAL: U16,
VAR: "StartsBurner",
SENSOR: COUNTER,
},
0x75: { # 117, Starts CH Pump
EN: "Number of starts central heating pump",
DIR: READ_WRITE,
VAL: U16,
VAR: "StartsCHPump",
SENSOR: COUNTER,
},
0x76: { # 118, Starts DHW Pump
EN: "Number of starts DHW pump/valve",
DIR: READ_WRITE,
VAL: U16,
VAR: "StartsDHWPump",
SENSOR: COUNTER,
},
0x77: { # 119, Starts Burner DHW
EN: "Number of starts burner during DHW mode",
DIR: READ_WRITE,
VAL: U16,
VAR: "StartsBurnerDHW",
SENSOR: COUNTER,
},
0x78: { # 120, Hours Burner
EN: "Number of hours burner is in operation (i.e. flame on)",
DIR: READ_WRITE,
VAL: U16,
VAR: "HoursBurner",
SENSOR: COUNTER,
},
0x79: { # 121, Hours CH Pump
EN: "Number of hours central heating pump has been running",
DIR: READ_WRITE,
VAL: U16,
VAR: "HoursCHPump",
SENSOR: COUNTER,
},
0x7A: { # 122, Hours DHW Pump
EN: "Number of hours DHW pump has been running/valve has been opened",
DIR: READ_WRITE,
VAL: U16,
VAR: "HoursDHWPump",
SENSOR: COUNTER,
},
0x7B: { # 123, Hours DHW Burner
EN: "Number of hours DHW burner is in operation during DHW mode",
DIR: READ_WRITE,
VAL: U16,
VAR: "HoursDHWBurner",
SENSOR: COUNTER,
},
0x7C: { # 124, Master OpenTherm Version
EN: "Opentherm version Master",
DIR: WRITE_ONLY,
VAL: F8_8,
VAR: "MasterOpenThermVersion",
},
0x7D: { # 125, Slave OpenTherm Version
EN: "Opentherm version Slave",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "SlaveOpenThermVersion",
},
0x7E: { # 126, Master Product Type/Version
EN: "Master product version and type",
DIR: WRITE_ONLY,
VAL: U8,
VAR: {HB: "MasterProductType", LB: "MasterProductVersion"},
},
0x7F: { # 127, Slave Product Type/Version
EN: "Slave product version and type",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "SlaveProductType", LB: "SlaveProductVersion"},
},
# ZX-DAVB extras
0x71: { # 113, Bad Starts Burner
EN: "Number of un-successful burner starts",
DIR: READ_WRITE,
VAL: U16,
VAR: "BadStartsBurner?",
SENSOR: COUNTER,
},
0x72: { # 114, Low Signals Flame
EN: "Number of times flame signal was too low",
DIR: READ_WRITE,
VAL: U16,
VAR: "LowSignalsFlame?",
SENSOR: COUNTER,
},
# https://www.domoticaforum.eu/viewtopic.php?f=70&t=10893
# 0x23: { # 35, Boiler Fan Speed (rpm/60?)?
# },
0x24: { # 36, Electrical current through burner flame (µA)
EN: "Electrical current through burner flame (µA)",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "BurnerCurrent",
},
0x25: { # 37, CH2 Room Temperature
EN: "Room temperature for 2nd CH circuit",
DIR: READ_ONLY,
VAL: F8_8,
VAR: "CH2CurrentTemperature",
SENSOR: TEMPERATURE,
},
0x26: { # 38, Relative Humidity, c.f. 0x4E
EN: "Relative humidity",
DIR: READ_ONLY,
VAL: U8,
VAR: {HB: "RelativeHumidity"}, # TODO: or LB?
SENSOR: HUMIDITY,
},
},
}
def parity(x: int) -> int:
"""Make this the docstring."""
shiftamount = 1
while x >> shiftamount:
x ^= x >> shiftamount
shiftamount <<= 1
return x & 1
def ot_msg_value(val_seqx, val_type) -> Any:
"""Make this the docstring."""
def _get_flag8(byte, *args) -> list:
"""Split a byte (as a str) into a list of 8 bits (1/0)."""
ret = [0] * 8
byte = bytes.fromhex(byte)[0]
for i in range(0, 8):
ret[i] = byte & 1
byte = byte >> 1
return ret
def _get_u8(byte, *args) -> int:
"""Convert a byte (as a str) into an unsigned int."""
return struct.unpack(">B", bytes.fromhex(byte))[0]
def _get_s8(byte, *args) -> int:
"""Convert a byte (as a str) into a signed int."""
return struct.unpack(">b", bytes.fromhex(byte))[0]
def _get_f8_8(msb, lsb) -> float:
"""Convert 2 bytes (as strs) into an OpenTherm f8_8 (float) value."""
return float(_get_s16(msb, lsb) / 256)
def _get_u16(msb, lsb) -> int:
"""Convert 2 bytes (as strs) into an unsigned int."""
buf = struct.pack(">BB", _get_u8(msb), _get_u8(lsb))
return int(struct.unpack(">H", buf)[0])
def _get_s16(msb, lsb) -> int:
"""Convert 2 bytes (as strs) into a signed int."""
buf = struct.pack(">bB", _get_s8(msb), _get_u8(lsb))
return int(struct.unpack(">h", buf)[0])
DATA_TYPES = {
FLAG8: _get_flag8,
U8: _get_u8,
S8: _get_s8,
F8_8: _get_f8_8,
U16: _get_u16,
S16: _get_s16,
}
if val_type in DATA_TYPES:
return DATA_TYPES[val_type](val_seqx[:2], val_seqx[2:])
return val_seqx
# See: https://www.opentherm.eu/request-details/?post_ids=2944
#
# ID0:HB0: Master status: CH enable
# ID0:HB1: Master status: DHW enable
# ID0:HB2: Master status: Cooling enable
# ID0:HB3: Master status: OTC active
# ID0:HB5: Master status: Summer/winter mode
# ID0:HB6: Master status: DHW blocking
# ID0:LB0: Slave Status: Fault indication
# ID0:LB1: Slave Status: CH mode
# ID0:LB2: Slave Status: DHW mode
# ID0:LB3: Slave Status: Flame status
# ID1: Control Setpoint i.e. CH water temperature Setpoint (°C)
# ID2:HB0: Master configuration: Smart power
# ID2:LB: Master MemberID Code
# ID3:HB0: Slave configuration: DHW present
# ID3:HB1: Slave configuration: Control type
# ID3:HB4: Slave configuration: Master low-off&pump control
# ID5:HB0: Service request
# ID5:HB1: Lockout-reset
# ID5:HB2: Low water pressure
# ID5:HB3: Gas/flame fault
# ID5:HB4: Air pressure fault
# ID5:HB5: Water over-temperature
# ID5:LB: OEM fault code
# ID6:HB0: Remote boiler parameter transfer-enable: DHW setpoint
# ID6:HB1: Remote boiler parameter transfer-enable: max. CH setpoint
# ID6:LB0: Remote boiler parameter read/write: DHW setpoint
# ID6:LB1: Remote boiler parameter read/write: max. CH setpoint
# ID9: Remote override room Setpoint
# ID10: Number of Transparent-Slave-Parameters supported by slave
# ID12: Size of Fault-History-Buffer supported by slave
# ID14: Maximum relative modulation level setting (%)
# ID16: Room Setpoint (°C)
# ID17: Relative Modulation Level (%)
# ID18: Water pressure in CH circuit (bar)
# ID19: Water flow rate in DHW circuit. (litres/minute)
# ID24: Room temperature (°C)
# ID25: Boiler flow water temperature (°C)
# ID26: DHW temperature (°C)
# ID27: Outside temperature (°C)
# ID28: Return water temperature (°C)
# ID48: DHW Setpoint upper & lower bounds for adjustment (°C)
# ID49: Max CH water Setpoint upper & lower bounds for adjustment (°C)
# ID56: DHW Setpoint (°C) (Remote parameter 1)
# ID57: Max CH water Setpoint (°C) (Remote parameters 2)
# ID126: Master product version number and type
# ID127: Slave product version number and type
# https://github.com/rvdbreemen/OTGW-firmware/blob/main/Specification/New%20OT%20data-ids.txt # noqa
"""
New OT Data-ID's - Found two new ID's at this device description:
http://www.opentherm.eu/product/view/18/feeling-d201-ot
ID 98: For a specific RF sensor the RF strength and battery level is written
ID 99: Operating Mode HC1, HC2/ Operating Mode DHW
Found new data-id's at this page:
https://www.opentherm.eu/request-details/?post_ids=1833
ID 109: Electricity producer starts
ID 110: Electricity producer hours
ID 111: Electricity production
ID 112: Cumulative Electricity production
Found new Data-ID's at this page:
https://www.opentherm.eu/request-details/?post_ids=1833
ID 36: {f8.8} "Electrical current through burner flame" (µA)
ID 37: {f8.8} "Room temperature for 2nd CH circuit"
ID 38: {u8 u8} "Relative Humidity"
For Data-ID's 37 and 38 I assumed their data types, for Data ID 36 I determined it by
matching qSense value with the correct data-type.
I also analysed OT Remeha qSense <-> Remeha Tzerra communication.
ID 131: {u8 u8} "Remeha dF-/dU-codes"
ID 132: {u8 u8} "Remeha Service message"
ID 133: {u8 u8} "Remeha detection connected SCU’s"
"Remeha dF-/dU-codes": Should match the dF-/dU-codes written on boiler nameplate.
Read-Data Request (0 0) returns the data. Also accepts Write-Data Requests (dF dU),
this returns the boiler to its factory defaults.
"Remeha Service message" Read-Data Request (0 0), boiler returns (0 2) in case of no
boiler service. Write-Data Request (1 255) clears the boiler service message.
boiler returns (1 1) = next service type is "A"
boiler returns (1 2) = next service type is "B"
boiler returns (1 3) = next service type is "C"
"Remeha detection connected SCU’s": Write-Data Request (255 1) enables detection of
connected SCU prints, correct response is (Write-Ack 255 1).
Other Remeha info:
ID 5: correponds with the Remeha E:xx fault codes
ID 11: correponds with the Remeha Pxx parameter codes
ID 35: reported value is fan speed in rpm/60
ID 115: correponds with the Remeha Status and Sub-status numbers, {u8 u8} data-type
"""
| 31.857445 | 101 | 0.515861 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14,438 | 0.478285 |
2acecaa7265345057c582cd5659c528d1e06424d | 3,030 | py | Python | mut/viz.py | RPGroup-PBoC/mwc_mutants | 35581602c35793fc8ec42c8aff37b8305c5e54e1 | [
"MIT"
] | 3 | 2020-11-11T21:33:26.000Z | 2021-07-14T21:22:43.000Z | mut/viz.py | RPGroup-PBoC/mwc_mutants | 35581602c35793fc8ec42c8aff37b8305c5e54e1 | [
"MIT"
] | null | null | null | mut/viz.py | RPGroup-PBoC/mwc_mutants | 35581602c35793fc8ec42c8aff37b8305c5e54e1 | [
"MIT"
] | 1 | 2021-07-14T21:22:45.000Z | 2021-07-14T21:22:45.000Z | import bokeh.io
import bokeh.plotting
import bokeh.layouts
import bokeh.palettes
import seaborn as sns
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib
def plotting_style(grid=True):
"""
Sets the style to the publication style
"""
rc = {'axes.facecolor': '#E3DCD0',
'font.family': 'Lucida Sans Unicode',
'grid.linestyle': '-',
'grid.linewidth': 0.5,
'grid.alpha': 0.75,
'grid.color': '#ffffff',
'axes.grid': grid,
'ytick.direction': 'in',
'xtick.direction': 'in',
'xtick.gridOn': True,
'ytick.gridOn': True,
'ytick.major.width':5,
'xtick.major.width':5,
'ytick.major.size': 5,
'xtick.major.size': 5,
'mathtext.fontset': 'stixsans',
'mathtext.sf': 'sans',
'legend.frameon': True,
'legend.facecolor': '#FFEDCE',
'figure.dpi': 150,
'xtick.color': 'k',
'ytick.color': 'k'}
plt.rc('text.latex', preamble=r'\usepackage{sfmath}')
plt.rc('mathtext', fontset='stixsans', sf='sans')
sns.set_style('darkgrid', rc=rc)
def color_selector(style):
"""
Select the color palette of your choice.
Parameters
----------
style: str "mut" or "pboc"
A string identifier for the style. "mut" gives colors for single and double mutants.
"pboc" returns the PBoC2e color palette.
Returns
-------
colors: dict
Dictionary of colors. If "dna", "double", or "inducer" is the selected style,
keys will be the mutants in upper case. Double mutant keys will be DNA-IND. For
pboc, the keys will be the typical color descriptors.
"""
# Ensure the provided style name makes sense.
if style.lower() not in ['mut', 'pboc']:
raise ValueError("Provided style must be 'pboc' or 'mut'. {} provided.".format(style))
# Set the color styles and return.
if style.lower() == 'mut':
colors = {'Y20I': '#738FC1', 'Q21A': '#7AA974', 'Q21M': '#AB85AC',
'F164T': '#A97C50', 'Q294K': '#5D737E', 'Q294V': '#D56C55',
'Q294R': '#B2AF58', 'Y20I-F164T': '#2d98da', 'Y20I-Q294K': '#34495e',
'Y20I-Q294V': '#8854d0', 'Q21A-F164T': '#4b6584', 'Q21A-Q294K': '#EE5A24',
'Q21A-Q294V': '#009432', 'Q21M-F164T': '#1289A7', 'Q21M-Q294K': '#6F1E51',
'Q21M-Q294V': '#006266', 'WT': '#3C3C3C'}
elif style.lower() == 'pboc':
colors = {'green': '#7AA974', 'light_green': '#BFD598',
'pale_green': '#DCECCB', 'yellow': '#EAC264',
'light_yellow': '#F3DAA9', 'pale_yellow': '#FFEDCE',
'blue': '#738FC1', 'light_blue': '#A9BFE3',
'pale_blue': '#C9D7EE', 'red': '#D56C55', 'light_red': '#E8B19D',
'pale_red': '#F1D4C9', 'purple': '#AB85AC',
'light_purple': '#D4C2D9', 'dark_green':'#7E9D90', 'dark_brown':'#905426'}
return colors
| 37.407407 | 94 | 0.550825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,859 | 0.613531 |
2ad0ac9327d191fb59e48707001f44685e276770 | 992 | py | Python | pipeline/decorator.py | jnis77diver/django-pipeline | 8bac57adae84615d9d79ad19b2b591c2e46879f9 | [
"MIT"
] | null | null | null | pipeline/decorator.py | jnis77diver/django-pipeline | 8bac57adae84615d9d79ad19b2b591c2e46879f9 | [
"MIT"
] | 1 | 2021-09-20T22:02:21.000Z | 2021-09-21T13:55:41.000Z | pipeline/decorator.py | jnis77diver/django-pipeline | 8bac57adae84615d9d79ad19b2b591c2e46879f9 | [
"MIT"
] | 1 | 2021-09-18T01:39:48.000Z | 2021-09-18T01:39:48.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
This code is a part of django.utils.six on https://github.com/django/django/blob/stable/2.2.x/django/utils/six.py removed form Django 3.0
To keep the backward compatibility between python 2 and 3 the decorator need to be used as well, during the time we find a proper way to
handle MetaClass overwright working on both versions (or dropping python 2 support).
"""
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper | 43.130435 | 141 | 0.667339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 498 | 0.502016 |
2ad3092df187e4cb74cb88c0ec1606105db4f6cc | 4,206 | py | Python | nowplaying/recognition/theaudiodb.py | whatsnowplaying/whats-now-playing | 45f5eaab5292430f86501e8c4b7cb0b9881436f4 | [
"MIT"
] | 16 | 2021-07-22T21:32:26.000Z | 2022-01-30T05:32:10.000Z | nowplaying/recognition/theaudiodb.py | whatsnowplaying/whats-now-playing | 45f5eaab5292430f86501e8c4b7cb0b9881436f4 | [
"MIT"
] | 175 | 2021-05-31T04:18:31.000Z | 2022-03-29T17:29:26.000Z | nowplaying/recognition/theaudiodb.py | whatsnowplaying/whats-now-playing | 45f5eaab5292430f86501e8c4b7cb0b9881436f4 | [
"MIT"
] | 6 | 2021-07-22T22:02:37.000Z | 2021-12-31T19:36:33.000Z | #!/usr/bin/env python3
''' start of support of theaudiodb '''
from html.parser import HTMLParser
import logging
import logging.config
import logging.handlers
import os
import requests
import requests.utils
import nowplaying.bootstrap
import nowplaying.config
from nowplaying.recognition import RecognitionPlugin
class HTMLFilter(HTMLParser):
''' simple class to strip HTML '''
text = ""
def handle_data(self, data):
self.text += data
def error(self, message):
logging.debug('HTMLFilter: %s', message)
class Plugin(RecognitionPlugin):
''' handler for TheAudioDB '''
def __init__(self, config=None, qsettings=None):
super().__init__(config=config, qsettings=qsettings)
self.htmlfilter = HTMLFilter()
def _filter(self, text):
self.htmlfilter.feed(text)
return self.htmlfilter.text
def _fetch(self, api):
apikey = self.config.cparser.value('theaudiodb/apikey')
if not apikey:
return None
try:
logging.debug('Fetching %s', api)
page = requests.get(
f'https://theaudiodb.com/api/v1/json/{apikey}/{api}',
timeout=5)
except Exception as error: # pylint: disable=broad-except
logging.error('TheAudioDB hit %s', error)
return None
return page.json()
def recognize(self, metadata):
''' do data lookup '''
if not self.config.cparser.value('theaudiodb/enabled', type=bool):
return None
if 'musicbrainzartistid' in metadata:
return self.artistdatafrommbid(metadata['musicbrainzartistid'])
if 'artist' in metadata:
return self.artistdatafromname(metadata['artist'])
return None
def artistdatafrommbid(self, mbartistid):
''' get artist data from mbid '''
metadata = {}
data = self._fetch(f'artist-mb.php?i={mbartistid}')
if not data or 'artists' not in data or not data['artists']:
return None
artdata = data['artists'][0]
if 'strBiographyEN' in artdata:
metadata['artistbio'] = self._filter(artdata['strBiographyEN'])
if 'strArtistThumb' in artdata:
metadata['artistthumb'] = artdata['strArtistThumb']
if 'strArtistLogo' in artdata:
metadata['artistlogo'] = artdata['strArtistLogo']
return metadata
def artistdatafromname(self, artist):
''' get artist data from name '''
metadata = {}
if not artist:
return None
urlart = requests.utils.requote_uri(artist)
data = self._fetch(f'search.php?s={urlart}')
if not data or 'artists' not in data or not data['artists']:
return None
artdata = data['artists'][0]
if 'strBiographyEN' in artdata:
metadata['artistbio'] = self._filter(artdata['strBiographyEN'])
if 'strArtistThumb' in artdata:
metadata['artistthumb'] = artdata['strArtistThumb']
if 'strArtistLogo' in artdata:
metadata['artistlogo'] = artdata['strArtistLogo']
return metadata
def providerinfo(self): # pylint: disable=no-self-use
''' return list of what is provided by this recognition system '''
return ['artistbio', 'artistlogo', 'artistthumb']
def connect_settingsui(self, qwidget):
''' pass '''
def load_settingsui(self, qwidget):
''' pass '''
def verify_settingsui(self, qwidget):
''' pass '''
def save_settingsui(self, qwidget):
''' pass '''
def defaults(self, qsettings):
''' pass '''
def main():
''' entry point as a standalone app'''
bundledir = os.path.abspath(os.path.dirname(__file__))
logging.basicConfig(level=logging.DEBUG)
nowplaying.bootstrap.set_qt_names()
# need to make sure config is initialized with something
config = nowplaying.config.ConfigFile(bundledir=bundledir)
theaudiodb = Plugin(config=config)
print(
theaudiodb.artistdatafrommbid('45074d7c-5307-44a8-854f-ae072e1622ae'))
print(theaudiodb.artistdatafromname('Cee Farrow'))
if __name__ == "__main__":
main()
| 30.70073 | 78 | 0.628626 | 3,335 | 0.792915 | 0 | 0 | 0 | 0 | 0 | 0 | 1,155 | 0.274608 |
2ad374aeec5403a30d049d72b93822e349758d54 | 2,237 | py | Python | reno/tests/test_create.py | mail2nsrajesh/reno | 7ab07f1d2677b5c40a29be4010c0729d3a908aab | [
"Apache-2.0"
] | null | null | null | reno/tests/test_create.py | mail2nsrajesh/reno | 7ab07f1d2677b5c40a29be4010c0729d3a908aab | [
"Apache-2.0"
] | null | null | null | reno/tests/test_create.py | mail2nsrajesh/reno | 7ab07f1d2677b5c40a29be4010c0729d3a908aab | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from reno import create
from reno.tests import base
class TestPickFileName(base.TestCase):
@mock.patch('os.path.exists')
def test_not_random_enough(self, exists):
exists.return_value = True
self.assertRaises(
ValueError,
create._pick_note_file_name,
'somepath',
'someslug',
)
@mock.patch('os.path.exists')
def test_random_enough(self, exists):
exists.return_value = False
result = create._pick_note_file_name('somepath', 'someslug')
self.assertIn('somepath', result)
self.assertIn('someslug', result)
class TestCreate(base.TestCase):
def setUp(self):
super(TestCreate, self).setUp()
self.tmpdir = self.useFixture(fixtures.TempDir()).path
def test_create_from_template(self):
filename = create._pick_note_file_name(self.tmpdir, 'theslug')
create._make_note_file(filename, 'i-am-a-template')
with open(filename, 'r') as f:
body = f.read()
self.assertEqual('i-am-a-template', body)
def test_edit(self):
self.useFixture(fixtures.EnvironmentVariable('EDITOR', 'myeditor'))
with mock.patch('subprocess.call') as call_mock:
self.assertTrue(create._edit_file('somepath'))
call_mock.assert_called_once_with(['myeditor', 'somepath'])
def test_edit_without_editor_env_var(self):
self.useFixture(fixtures.EnvironmentVariable('EDITOR'))
with mock.patch('subprocess.call') as call_mock:
self.assertFalse(create._edit_file('somepath'))
call_mock.assert_not_called()
| 33.893939 | 75 | 0.679928 | 1,579 | 0.705856 | 0 | 0 | 520 | 0.232454 | 0 | 0 | 795 | 0.355387 |
2ad4ce045c7a9b380a36fccf92dd30a4448a06dc | 8,328 | py | Python | maindb/query.py | DesmondYuan/ArtSearch | de0331b04c5b2b6187eae0bf610eb4e44c64eb08 | [
"MIT"
] | null | null | null | maindb/query.py | DesmondYuan/ArtSearch | de0331b04c5b2b6187eae0bf610eb4e44c64eb08 | [
"MIT"
] | null | null | null | maindb/query.py | DesmondYuan/ArtSearch | de0331b04c5b2b6187eae0bf610eb4e44c64eb08 | [
"MIT"
] | null | null | null | import os
import io
import os
import pandas as pd
import numpy as np
from google.cloud import vision
from PIL import Image
from google.cloud import vision
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import mean_squared_error as MSE
import pickle as pkl
import time
import tqdm
features_g_df = pd.read_csv("/static/FeatureTable_GoogleAnnot.PCA.csv", index_col=0)
pooling_df = pd.read_csv("/static/FeatureTable_Pooling.csv", index_col=0)
color_pkl = pkl.load(open("/static/FeatureTable_DominantColors.pkl", "rb"))
meta = pd.read_csv("/static/metadata.csv", index_col=0).fillna("Unknown")
client = vision.ImageAnnotatorClient()
path = "/static/img"
def get_nearest(fn):
# TODO: adding try/except for flask
if True: # TODO: pass a boole for filename/file
print("Getting the nearest...")
fns = os.listdir(path)
out = {
"distance_1": get_nearest_use_distance_1_fn(fn, fns),
"distance_2": get_nearest_use_distance_2_fn(fn, fns),
"distance_3": get_nearest_use_distance_3_fn(fn, fns),
"distance_4": get_nearest_use_distance_4_fn(fn, fns),
}
return out
def get_metadata(fn):
record = meta.loc[[fn]].transpose()
record = record.reset_index()
return record
"""
Distance 1: Cosine distance on GVision features
"""
def get_nearest_use_distance_1_fn(fn, fns):
cc = time.time()
y = features_g_df.loc[fn].values.reshape(1, -1)
best_score = 1e10
best_match = "No match found"
for fn_iter in tqdm.tqdm(fns):
if fn_iter != fn:
x = features_g_df.loc[fn_iter].values.reshape(1, -1)
score = 1 - cosine_similarity(y, x)
if best_score > score:
best_match = fn_iter
best_score = score
return {"best_match": best_match, "score": np.mean(best_score), "time": time.time() - cc}
def get_google_feature(fn):
feature = features_g_df.loc[fn]
return feature
def cosine_distance_GVision_PCA(fn1, fn2):
x1 = features_g_df.loc[fn1].values.reshape(1, -1)
x2 = features_g_df.loc[fn2].values.reshape(1, -1)
dist = 1 - cosine_similarity(x1, x2)
print("Cosine distance on GVision features for {}, {} = {}".format(fn1, fn2, dist))
return dist
"""
Distance 2: Color distance in RGB space
"""
def get_nearest_use_distance_2_fn(fn, fns):
cc = time.time()
y = get_dominant_color(fn)
best_score = 1e10
best_match = "No match found"
for fn_iter in tqdm.tqdm(fns):
if fn_iter != fn:
x = get_dominant_color(fn_iter)
score = color_distance(y, x)
if best_score > score:
best_match = fn_iter
best_score = score
return {"best_match": best_match, "score": best_score, "time": time.time() - cc}
def get_dominant_color(fn):
color = color_pkl[fn]
return color
# def get_dominant_color_deprecated(fn):
# if path not in fn:
# fn = os.path.join(path, fn)
# with io.open(fn, "rb") as image_file:
# content = image_file.read()
# image = vision.Image(content=content)
# response = client.image_properties(image=image)
# colors = response.image_properties_annotation.dominant_colors.colors
# colors = np.array(
# [
# (color.pixel_fraction, color.color.red, color.color.green, color.color.blue)
# for color in colors
# ]
# )
# return np.sort(colors, axis=0)[::-1]
def color_distance(colors1, colors2):
dist = 0
for c1 in colors1:
if c1[0] < 0.05:
continue
tmp_dist = []
for c2 in colors2:
if c1[0] < 0.05:
continue
dR, dG, dB = c1[1] - c2[1], c1[2] - c2[2], c1[3] - c2[3]
fraction_weight = c1[0] * c2[0]
r_mean = (c1[1] + c2[1]) / 2
dcolor = np.sqrt(
(2 + r_mean / 256) * dR ** 2
+ 4 * dG ** 2
+ (3 - r_mean / 256) * dB ** 2
)
tmp_dist.append([dcolor, fraction_weight, c2])
best_dcolor = np.sort(tmp_dist, axis=0)[0] # pick the closest color pair
colors2 = list(set(colors2) - set([best_dcolor[2]]))
dist += best_dcolor[0] * best_dcolor[1]
# print("Color distance for {}, {} = {}".format(fn1, fn2, dist))
return dist
"""
Distance 3: Cosine distance on rawdata (center cropping)
"""
def get_nearest_use_distance_3_fn(fn, fns):
cc = time.time()
fns = list(set(fns) - set([fn]))
y = get_pooled_img(fn)
scores = []
for fn_iter in tqdm.tqdm(fns):
x = get_pooled_img(fn_iter)
score = cosine_distance_raw_center_crop(y, x)
scores.append(score)
min_pos = np.argsort(scores)[0]
return {
"best_match": fns[min_pos],
"score": scores[min_pos],
"time": time.time() - cc,
}
def get_pooled_img(fn):
feature = pooling_df.loc[fn]
return feature.values.reshape([1, -1])
# def get_nearest_use_distance_3_fn_deprecated(fn, fns):
# cc = time.time()
# fns = list(set(fns) - set([fn]))
# fn = os.path.join(path, fn)
# fns = [os.path.join(path, f) for f in fns]
# y = get_pic_array(fn)
# scores = []
# for fn_iter in tqdm.tqdm(fns):
# x = get_pic_array(fn_iter)
# score = cosine_distance_raw_center_crop(y, x)
# scores.append(score)
# min_pos = np.argsort(scores)[0]
# return {
# "best_match": fns[min_pos],
# "score": scores[min_pos],
# "time": time.time() - cc,
# }
def crop_to_square(pic_array1, pic_array2):
a1, b1 = pic_array1.shape[0:2]
a2, b2 = pic_array2.shape[0:2]
c = min(a1, b1, a2, b2) / 2
c1 = pic_array1[
int(a1 / 2 - c) : int(a1 / 2 + c), int(b1 / 2 - c) : int(b1 / 2 + c)
]
c2 = pic_array2[
int(a2 / 2 - c) : int(a2 / 2 + c), int(b2 / 2 - c) : int(b2 / 2 + c)
]
return c1, c2
def get_pic_array(img_filename, res=32, use_dask=True):
import dask_image.imread
if use_dask:
im = dask_image.imread.imread(img_filename)[0]
pic = im.mean(axis=2)[:: im.shape[0] // res, :: im.shape[1] // res][:res, :res]
else:
with Image.open(img_filename) as im:
pic = np.array(im.getdata()).reshape(im.size[0], im.size[1], 3)
return pic
def cosine_distance_raw_center_crop(pic1, pic2, use_dask=True):
if use_dask:
dist = 1 - np.mean(cosine_similarity(pic1, pic2))
else:
x1, x2 = crop_to_square(pic1, pic2)
dist = 1 - np.mean(
[cosine_similarity(x1[:, :, layer], x2[:, :, layer]) for layer in range(1)]
)
# print("Cosine distance on rawdata (center crop) for {}, {} = {}".format(fn1, fn2, dist))
return dist
"""
Distance 4: Cosine distance on rawdata (center cropping)
"""
def get_nearest_use_distance_4_fn(fn, fns):
cc = time.time()
fns = list(set(fns) - set([fn]))
y = get_pooled_img(fn)
scores = []
for fn_iter in tqdm.tqdm(fns):
x = get_pooled_img(fn_iter)
score = euclidean_distance_raw_center_crop(y, x)
scores.append(score)
min_pos = np.argsort(scores)[0]
return {
"best_match": fns[min_pos],
"score": scores[min_pos],
"time": time.time() - cc,
}
# def get_nearest_use_distance_4_fn_deprecated(fn, fns):
# cc = time.time()
# fns = list(set(fns) - set(fn))
# fn = os.path.join(path, fn)
# fns = [os.path.join(path, f) for f in fns]
# y = get_pic_array(fn)
# best_match = "Not found"
# best_score = 1e10
# for fn_iter in tqdm.tqdm(fns):
# x = get_pic_array(fn_iter)
# score = euclidean_distance_raw_center_crop(y, x)
# if best_score > score:
# best_score = score
# best_match = fn_iter
# return {"best_match": best_match, "score": score, "time": time.time() - cc}
def euclidean_distance_raw_center_crop(pic1, pic2, use_dask=True):
if use_dask:
dist = np.mean(MSE(pic1, pic2))
else:
x1, x2 = crop_to_square(pic1, pic2)
dist = 1 - np.mean(
[cosine_similarity(x1[:, :, layer], x2[:, :, layer]) for layer in range(1)]
)
# print("Cosine distance on rawdata (center crop) for {}, {} = {}".format(fn1, fn2, dist))
return dist
| 30.173913 | 98 | 0.598223 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,714 | 0.325889 |
2ad5e8acfb1924c4fca0cbad2cf3c61e5ea92167 | 203 | py | Python | tests/common/scripts/cleaning/test_strip.py | arkhn/fhir-river | a12179c34fad131d16dedc20c61297ed83d805e6 | [
"Apache-2.0"
] | 42 | 2020-03-25T16:47:30.000Z | 2022-01-31T21:26:38.000Z | tests/common/scripts/cleaning/test_strip.py | arkhn/fhir-river | a12179c34fad131d16dedc20c61297ed83d805e6 | [
"Apache-2.0"
] | 367 | 2020-04-08T12:46:34.000Z | 2022-02-16T01:15:32.000Z | tests/common/scripts/cleaning/test_strip.py | arkhn/fhir-river | a12179c34fad131d16dedc20c61297ed83d805e6 | [
"Apache-2.0"
] | 3 | 2020-05-14T08:24:46.000Z | 2021-08-04T05:00:16.000Z | from common.scripts.cleaning import strip
def test_clean_identity():
assert strip(None) == ""
assert strip("NaN") == ""
row_input = "Holà chicanos"
assert strip(row_input) == row_input
| 22.555556 | 41 | 0.674877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.122549 |
2ad6982b913eaaeaa3f0f9e019314d56df2662a1 | 2,186 | py | Python | app.py | Munawwar/image-background-remove-tool | 90b348d1a99599992b5ee074aa314d399c9172ed | [
"Apache-2.0"
] | null | null | null | app.py | Munawwar/image-background-remove-tool | 90b348d1a99599992b5ee074aa314d399c9172ed | [
"Apache-2.0"
] | null | null | null | app.py | Munawwar/image-background-remove-tool | 90b348d1a99599992b5ee074aa314d399c9172ed | [
"Apache-2.0"
] | null | null | null | from main import process
import os
import platform
import subprocess
import sys
import threading
import webview
def worker_thread(window, inputfiles, model):
print('processing started ...')
count = 0
for inputfile in inputfiles:
count += 1
(inputfilepath, inputfilename) = os.path.split(inputfile)
outputfile = os.path.join(
inputfilepath,
'bg-removed',
# only support PNG files as of now
os.path.splitext(inputfilename)[0] + '.png'
)
window.evaluate_js(
"window.app.fileUploadButton.textContent = 'Processing "
+ str(count) + ' of ' + str(len(inputfiles)) + " ...'"
)
process(inputfile, outputfile, model)
window.evaluate_js("window.app.fileUploadButton.textContent = 'Select photos'")
print('processing complete')
open_folder(os.path.join(inputfilepath, 'bg-removed'))
def onWindowStart(window):
def openFileDialog():
file_types = ('Image Files (*.png;*.jpg;*.jpeg)', 'All files (*.*)')
inputfiles = window.create_file_dialog(webview.OPEN_DIALOG, allow_multiple=True, file_types=file_types)
print(inputfiles)
model = window.evaluate_js("window.app.getModel()")
print('use model =', model)
if inputfiles != None:
window.evaluate_js("window.app.fileUploadButton.disabled = true")
workerThread = threading.Thread(target=worker_thread, args=(window, inputfiles, model,))
workerThread.start()
workerThread.join()
window.evaluate_js("window.app.fileUploadButton.disabled = false")
# expose a function during the runtime
window.expose(openFileDialog)
def open_folder(path):
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
subprocess.Popen(["open", path])
else:
subprocess.Popen(["xdg-open", path])
def getfile(filename):
dir = os.path.dirname(__file__)
path = os.path.join(dir, filename)
f = open(path, 'r')
content = f.read()
f.close()
return content
html = getfile('index.html')
window = webview.create_window('Automated BG Removal Tool', html=html)
webview.start(onWindowStart, window) | 32.147059 | 109 | 0.667887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 528 | 0.241537 |
2ad7054044b890689ad7e39e0a17d04847e28d9a | 347 | py | Python | grpc_django/models.py | soheltarir/grpc-django | 890038de2d7b7c36e4fd37ef1727d3568b3f68f5 | [
"MIT"
] | 12 | 2018-08-03T08:02:32.000Z | 2020-05-06T03:49:43.000Z | grpc_django/models.py | localhost-sys/grpc-django | 890038de2d7b7c36e4fd37ef1727d3568b3f68f5 | [
"MIT"
] | 7 | 2018-07-18T07:18:44.000Z | 2022-02-10T10:28:42.000Z | grpc_django/models.py | localhost-sys/grpc-django | 890038de2d7b7c36e4fd37ef1727d3568b3f68f5 | [
"MIT"
] | 5 | 2019-10-13T08:22:04.000Z | 2021-06-11T02:25:04.000Z | from django.contrib.auth.models import AbstractUser
# Create your models here.
class ContextUser(AbstractUser):
"""
Just an abstract model class to represent Auth user coming as context in GRPC requests
"""
class Meta:
abstract = True
def __str__(self):
return " ".format([self.first_name, self.last_name])
| 23.133333 | 90 | 0.688761 | 265 | 0.763689 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.377522 |
2ad83d1e90396e90bab93b0cb7b84859c83b1c0f | 2,258 | py | Python | models/vgg_cifar_lp.py | mengjian0502/BNFusionTrain | 2cfb4a3dceb4dc468bbee88a1f65a66b29ee4de4 | [
"MIT"
] | null | null | null | models/vgg_cifar_lp.py | mengjian0502/BNFusionTrain | 2cfb4a3dceb4dc468bbee88a1f65a66b29ee4de4 | [
"MIT"
] | null | null | null | models/vgg_cifar_lp.py | mengjian0502/BNFusionTrain | 2cfb4a3dceb4dc468bbee88a1f65a66b29ee4de4 | [
"MIT"
] | null | null | null |
import math
import torch.nn as nn
from .modules import QConv2d, QLinear
def make_layers(cfg, batch_norm=False, wbit=4, abit=4):
layers = list()
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'A':
layers += [nn.AvgPool2d(kernel_size=2, stride=2)]
else:
conv2d = QConv2d(in_channels, v, kernel_size=3, padding=1, wbit=wbit, abit=abit)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
7: [128, 128, 'M', 256, 256, 'M', 512, 512, 'M'],
16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, num_classes=10, depth=16, batch_norm=False, wbit=4, abit=4, channel_wise=0):
super(VGG, self).__init__()
self.features = make_layers(cfg[depth], batch_norm, wbit=wbit, abit=abit)
if depth == 7:
self.classifier = nn.Sequential(
QLinear(8192, 1024, wbit=wbit, abit=abit),
nn.ReLU(True),
QLinear(1024, num_classes, wbit=wbit, abit=abit),
)
else:
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, num_classes),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
# m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class vgg7_Q:
base = VGG
args = list()
kwargs={'depth':7, 'batch_norm':True} | 33.205882 | 99 | 0.513286 | 1,263 | 0.559345 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.037644 |
2ad8e8c5d5e6c6097e19695b80c61293811bdb56 | 12,732 | py | Python | library/central_templates.py | ageoffron/aruba-central-ansible-role | 1216b2208689b2eddfb8f93fec3abf46e155198e | [
"MIT"
] | 1 | 2020-09-28T11:18:10.000Z | 2020-09-28T11:18:10.000Z | library/central_templates.py | ageoffron/aruba-central-ansible-role | 1216b2208689b2eddfb8f93fec3abf46e155198e | [
"MIT"
] | null | null | null | library/central_templates.py | ageoffron/aruba-central-ansible-role | 1216b2208689b2eddfb8f93fec3abf46e155198e | [
"MIT"
] | null | null | null | #!/usr/bin/python
'''
Central Templates Ansible Module
'''
# MIT License
#
# Copyright (c) 2020 Aruba, a Hewlett Packard Enterprise company
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: central_templates
version_added: 2.9.0
short_descriptions: REST API module for templates on Aruba Central
description: This module provides a mechanism to interact with or upload
configuration templates that are used for group-level and
device-level configuration on Aruba Central
options:
action:
description:
- Action to be performed on the template(s)
- "get_template_text" gets the contents of a template
- "get_all" gets info on all templates in a group
- "update" updates attributes of an existing template
- "create" creates a new template in a group
- "delete" deletes an existing template from a group
required: true
type: str
choices:
- get_template_text
- get_all
- update
- create
- delete
group_name:
description:
- Name of the group
required: true
type: str
template_name:
description:
- Name of the template on Aruba Central
- Used with actions "get_template_text", "create", "update", and
"delete"
required: false
type: str
device_type:
description:
- Type of device for which the template file is applicable
- Used with action "create"
- Used optionally with actions "get_all" and "update"
required: false
type: str
choices:
- IAP
- ArubaSwitch
- CX
- MobilityController
version:
description:
- Firmware version property of template
- Used with action "create"
- Used optionally with actions "get_all" and "update"
required: false
type: str
default: ALL
model:
description:
- Model property of template
- Used with action "create"
- Used optionally with actions "get_all" and "update"
- For the "ArubaSwitch" device_type (i.e. AOS-S switches),
the part number (J number) can be used
- e.g. 2920, J9727A, etc.
required: false
type: str
default: ALL
local_file_path:
description:
- Full local path of template file to be uploaded
- For HP Switches, the template text should include the following
commands to maintain connection with Central:
- aruba-central enable
- aruba-central url https://< URL | IP >/ws
- Used with actions "create" and "update"
required: false
type: str
limit:
description:
- Maximum number of records to be returned
- Used optionally as a filter parameter for "get_all"
required: false
type: int
default: 20
offset:
description:
- Number of items to be skipped before returning the data, which is
useful for pagination
- Used optionally as a filter parameter for get_all
required: false
type: int
default: 0
"""
EXAMPLES = """
#Usage Examples
- name: Get all templates in a given group
central_templates:
action: get_all
group_name: new-group
limit: 20
offset: 0
- name: Get templates in a given group for a particular device type
central_templates:
action: get_all
group_name: new-group
device_type: IAP
limit: 20
offset: 0
version: ALL
model: ALL
- name: Get template text
central_templates:
action: get_template_text
group_name: new-group
template_name: iap-temp
- name: Upload a new template file and create a new template for a given device type # NOQA
central_templates:
action: create
group_name: new-group
template_name: iap-temp
device_type: IAP
version: ALL
model: ALL
local_file_path: /home/iap_template.txt
- name: Update an existing template
central_templates:
action: update
group_name: new-group
template_name: iap-temp
device_type: IAP
version: ALL
model: ALL
local_file_path: /home/modified_iap_template.txt
- name: Delete an existing template
central_templates:
action: delete
group_name: new-group
template_name: iap-temp
"""
import json # NOQA
from ansible.module_utils.basic import AnsibleModule # NOQA
from ansible.module_utils.central_http import CentralApi # NOQA
def error_msg(action):
'''
Error handler for errors related to missing playbook parameters in
templates module
'''
result = {"resp": None, "code": 400}
if action == "get_template_text" or action == "delete":
resp = "Template name is not present in the playbook"
if action == "create" or action == "update":
resp = "Template name, device type, or local file path is not" \
" present in the playbook"
result['resp'] = resp
return result
def get_all_templates(central_api, group_name, **kwargs):
'''
Used to get info on all templates in a group
'''
endpoint = "/configuration/v1/groups/" + str(group_name) + "/templates"
query_params = {}
headers = central_api.get_headers(False, "get")
for key, val in kwargs.items():
if val is not None:
query_params[key] = val
path = central_api.get_url(endpoint, query_params)
result = central_api.get(path=path, headers=headers)
return result
def get_template_text(central_api, group_name, template_name):
'''
Used to get template text, which is the group configuration for applicable
devices
'''
if template_name is not None:
path = "/configuration/v1/groups/" + str(group_name) + "/templates/" +\
str(template_name)
headers = central_api.get_headers(True, "get")
result = central_api.get(path=path, headers=headers)
return result
return error_msg("get_template_text")
def create_update_template(central_api, group_name, template_name, **kwargs):
'''
Used to upload and create a new group template for various devices, as
well as change attributes for an existing template
'''
if None not in kwargs.values() and template_name is not None:
endpoint = "/configuration/v1/groups/"+str(group_name)+"/templates"
query_params = {"name": template_name,
"device_type": kwargs['device_type'],
"version": kwargs['version'], "model": kwargs['model']}
headers = central_api.get_headers(True, "post")
path = central_api.get_url(endpoint, query_params)
filepath = kwargs['file']
if kwargs['action'] == "create":
result = central_api.post(path=path, headers=headers,
filename=filepath)
elif kwargs['action'] == "update":
result = central_api.patch(path=path, headers=headers,
filename=filepath)
return result
return error_msg("create")
def delete_template(central_api, group_name, template_name):
'''
Used to delete an existing template from an existing group
'''
if template_name is not None:
headers = central_api.get_headers(False, "delete")
path = "/configuration/v1/groups/" + str(group_name) + "/templates/"\
+ str(template_name)
result = central_api.delete(path=path, headers=headers)
return result
return error_msg("delete")
def api_call(module):
'''
Uses playbook parameters to determine type of API request to be made
'''
central_api = CentralApi(module)
action = module.params.get('action').lower()
group_name = module.params.get('group_name')
template_name = module.params.get('template_name')
limit = module.params.get('limit')
offset = module.params.get('offset')
device_type = module.params.get('device_type')
version = module.params.get('version')
model = module.params.get('model')
local_file_path = module.params.get('local_file_path')
if action == "get_template_text":
result = get_template_text(central_api, group_name, template_name)
elif action == "get_all":
result = get_all_templates(central_api=central_api,
group_name=group_name,
limit=limit, offset=offset,
template=template_name,
device_type=device_type, version=version,
model=model)
elif action == "create" or action == "update":
result = create_update_template(central_api=central_api,
group_name=group_name,
template_name=template_name,
device_type=device_type,
version=version, model=model,
action=action,
file=local_file_path)
elif action == "delete":
result = delete_template(central_api, group_name, template_name)
else:
module.fail_json(changed=False, msg="Unsupported or no action provided"
" in playbook")
return result
def main():
'''
Central-template-related parameters definitions and response handling for
module
'''
module = AnsibleModule(
argument_spec=dict(
action=dict(required=True, type='str',
choices=["get_template_text", "get_all", "update",
"create", "delete"]),
group_name=dict(required=True, type='str'),
limit=dict(required=False, type='int', default=20),
offset=dict(required=False, type='int', default=0),
template_name=dict(required=False, type='str'),
device_type=dict(required=False, type='str',
choices=["IAP", "ArubaSwitch", "CX",
"MobilityController"]),
version=dict(required=False, type='str', default="ALL"),
model=dict(required=False, type='str', defaul="ALL"),
local_file_path=dict(required=False, type='path', default=None)
))
success_codes = [200, 201]
exit_codes = [304, 400, 404]
changed = False
if "get" not in module.params.get('action'):
changed = True
result = api_call(module)
try:
result['resp'] = json.loads(result['resp'])
except (TypeError, ValueError):
pass
if result['code'] and result['code'] in success_codes:
module.exit_json(changed=changed, msg=result['resp'],
response_code=result['code'])
elif result['code'] and result['code'] in exit_codes:
module.exit_json(changed=False, msg=result['resp'],
response_code=result['code'])
else:
module.fail_json(changed=False, msg=result['resp'],
response_code=result['code'])
if __name__ == '__main__':
main()
| 35.564246 | 92 | 0.613415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,251 | 0.56951 |
2ad91fd74a0335e007baaadc3de54f8cc151b31f | 897 | py | Python | habitat/_common/models/date.py | matrach/habitatOS | 1ae2a3caf6f279cf6d6d20bcd81f24d50f61d7d3 | [
"MIT"
] | 1 | 2021-02-01T19:04:39.000Z | 2021-02-01T19:04:39.000Z | habitat/_common/models/date.py | matrach/habitatOS | 1ae2a3caf6f279cf6d6d20bcd81f24d50f61d7d3 | [
"MIT"
] | null | null | null | habitat/_common/models/date.py | matrach/habitatOS | 1ae2a3caf6f279cf6d6d20bcd81f24d50f61d7d3 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
from habitat.timezone import get_timezone
timezone = get_timezone()
class MissionDate(models.Model):
date = models.CharField(
verbose_name=_(timezone.DATE_VERBOSE_NAME),
help_text=_(timezone.DATE_HELP_TEXT),
max_length=15,
default=timezone.date)
class Meta:
abstract = True
class MissionTime(models.Model):
time = models.TimeField(
verbose_name=_(timezone.TIME_VERBOSE_NAME),
help_text=_(timezone.TIME_HELP_TEXT),
default=timezone.time)
class Meta:
abstract = True
class MissionDateTime(MissionDate, MissionTime):
def datetime(self):
return timezone.datetime
datetime.allow_tags = False
datetime.short_description = _(timezone.DATETIME_VERBOSE_NAME)
class Meta:
abstract = True
| 21.878049 | 66 | 0.703456 | 733 | 0.817168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2adc0ea6b9bb37fa43a06659d0619937d02a6131 | 3,097 | py | Python | tools/view_control_loop.py | gbalke/bldc-controller | 99e4e71d5bdc0c7c7901d886aa7709c66db8b718 | [
"MIT"
] | null | null | null | tools/view_control_loop.py | gbalke/bldc-controller | 99e4e71d5bdc0c7c7901d886aa7709c66db8b718 | [
"MIT"
] | null | null | null | tools/view_control_loop.py | gbalke/bldc-controller | 99e4e71d5bdc0c7c7901d886aa7709c66db8b718 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
import sys
import serial
import time
from math import sin, cos, pi
import argparse
import ast
from comms import *
from boards import *
from livegraph import livegraph
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Drive motor module(s) with a given control mode and plot current measurements.')
parser.add_argument('serial', type=str, help='Serial port')
parser.add_argument('--baud_rate', type=int, help='Serial baud rate')
parser.add_argument('board_ids', type=str, help='Board ID (separate with comma)')
parser.add_argument('mode', type=str, help='Control mode: \
current (Id[A], Iq[A]), \
phase (dc,dc,dc), \
torque (N*m), \
velocity (rad/s), \
position (rad), \
pos_vel (rad,rad/s), \
pos_ff (rad,ff[A]), \
pwm (dc)')
parser.add_argument('actuations', type=str, help='Actuation amount in the units of the selected mode (if requires multiple args, separate by comma)')
parser.set_defaults(baud_rate=COMM_DEFAULT_BAUD_RATE, offset=COMM_BOOTLOADER_OFFSET)
args = parser.parse_args()
make_list = lambda x: list(x) if (type(x) == list or type(x) == tuple) else [x]
make_int = lambda x: [int(y) for y in x]
board_ids = make_int(make_list(ast.literal_eval(args.board_ids)))
actuations = make_list(ast.literal_eval(args.actuations))
mode = args.mode
ser = serial.Serial(port=args.serial, baudrate=args.baud_rate, timeout=0.05)
client = BLDCControllerClient(ser)
initialized = initBoards(client, board_ids)
client.leaveBootloader(board_ids)
client.resetInputBuffer()
initMotor(client, board_ids)
def updateCurrent(i):
data = []
for board_id in board_ids:
try:
driveMotor(client, board_ids, actuations, mode)
# Read the iq calulated
read = struct.unpack('<f', client.readRegisters([board_id], [0x3003], [1])[0])
data.append(read)
# Read the iq command
read = struct.unpack('<f', client.readRegisters([board_id], [0x3020], [1])[0])
data.append(read)
except (ProtocolError, struct.error):
#print("Failed to communicate with board: ", board_id)
data.append([0.0])
data.append([0.0])
return time.time(), data
flatten = lambda l: [item for sublist in l for item in sublist]
labels = []
labels.extend([[str(bid) + '\'s iq Reading', str(bid) + '\'s iq PID output'] for bid in board_ids])
labels = flatten(labels)
graph = livegraph(updateCurrent, labels, sample_interval=1, window_size = 2000)
graph.start()
| 40.75 | 153 | 0.572167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,019 | 0.329028 |
2add0268352da063cc8ff52c49d8f25e54f7941d | 1,889 | py | Python | pylot/planning/messages.py | fangedward/pylot | a742b3789ee8e7fa2d692ae22bda1e2960ed9345 | [
"Apache-2.0"
] | null | null | null | pylot/planning/messages.py | fangedward/pylot | a742b3789ee8e7fa2d692ae22bda1e2960ed9345 | [
"Apache-2.0"
] | null | null | null | pylot/planning/messages.py | fangedward/pylot | a742b3789ee8e7fa2d692ae22bda1e2960ed9345 | [
"Apache-2.0"
] | null | null | null | import erdos
class WaypointsMessage(erdos.Message):
"""Message class to be used to send waypoints.
Optionally can also send a target speed for each waypoint.
Args:
timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of
the message.
waypoints (list(:py:class:`~pylot.utils.Transform`), optional): List of
waypoint transforms.
target_speeds (list(float)), optional): List of target speeds.
"""
def __init__(self, timestamp, waypoints, target_speeds=None):
super(WaypointsMessage, self).__init__(timestamp, None)
self.waypoints = waypoints
if target_speeds is not None:
assert len(target_speeds) == len(waypoints), \
"Length of target speeds must match length of waypoints"
self.target_speeds = target_speeds
def __str__(self):
return \
'WaypointMessage(timestamp: {}, waypoints: {}, target speeds: {}'\
.format(self.timestamp, self.waypoints, self.target_speeds)
class BehaviorMessage(erdos.Message):
def __init__(self,
timestamp,
target_lane_id,
target_speed,
target_deadline,
target_leading_vehicle_id=None):
super(BehaviorMessage, self).__init__(timestamp, None)
self.target_lane_id = target_lane_id
self.target_speed = target_speed
self.target_deadline = target_deadline
self.target_leading_vehicle_id = target_leading_vehicle_id
def __str__(self):
return 'BehaviorMessage(timestamp: {}, target_lane_id: {}, '\
'target_speed: {}, target_deadline: {}, '\
'target_leading_vehicle_id: {})'.format(
self.timestamp, self.target_lane_id, self.target_speed,
self.target_deadline, self.target_leading_vehicle_id)
| 38.55102 | 79 | 0.640021 | 1,870 | 0.989942 | 0 | 0 | 0 | 0 | 0 | 0 | 661 | 0.349921 |
2add26f359559b67a49ef36a3a8133c20b0cdcba | 1,959 | py | Python | Intro/3_Smooth_Sailing/10_commonCharacterCount.py | RylandGomez/CS-Arcade | 086afa37fdba5bb2ca21fd93ed616bb5aca09d5d | [
"MIT"
] | null | null | null | Intro/3_Smooth_Sailing/10_commonCharacterCount.py | RylandGomez/CS-Arcade | 086afa37fdba5bb2ca21fd93ed616bb5aca09d5d | [
"MIT"
] | null | null | null | Intro/3_Smooth_Sailing/10_commonCharacterCount.py | RylandGomez/CS-Arcade | 086afa37fdba5bb2ca21fd93ed616bb5aca09d5d | [
"MIT"
] | null | null | null | def solution(s1, s2):
'''
EXPLANATION
-------------------------------------------------------------------
I approached this problem by creating two dictionaries, one for
each string. These dictionaries are formatted with characters as
keys, and counts as values. I then iterate over each string,
counting the instances of each character.
Finally, I iterate over the first dictionary, and if that character
exists in the second dictionary, I add the lesser of the two values
to create a total count of shared characters.
-------------------------------------------------------------------
'''
s1_dict = {}
s2_dict = {}
count = 0
for letter in s1:
s1_dict[letter] = s1.count(letter)
s1.replace(letter, "")
for letter in s2:
s2_dict[letter] = s2.count(letter)
s2.replace(letter, "")
for letter in s1_dict:
if letter in s2_dict:
if s1_dict[letter] > s2_dict[letter]:
count += s2_dict[letter]
else:
count += s1_dict[letter]
return count
def oneline(s1, s2):
'''
EXPLANATION
-------------------------------------------------------------------
keeping_it_leal from Germany comes back again with this far more
concise solution to the problem utilizing Python's set() function.
The set function returns all unique elements of an iterable. By
using this, we can simply create an array of the minimum counts
of each unique character in EITHER string (s1 chosen here), and
return the sum of these counts. If the character doesn't exist in
the second string, the minimum will therein be zero, which is a
more implicit way to compare characters between the strings.
-------------------------------------------------------------------
'''
return sum([min(s1.count(c),s2.count(c)) for c in set(s1)])
| 37.673077 | 71 | 0.552833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,335 | 0.68147 |
2add4f7159e9dd3d9ad91008943b222b20c46420 | 295 | py | Python | semlabs/iteralg/sem1.py | Revengerzz/Labs | 07eb514962e203ce5af8e8604ea92cf91fc8f3c4 | [
"MIT"
] | null | null | null | semlabs/iteralg/sem1.py | Revengerzz/Labs | 07eb514962e203ce5af8e8604ea92cf91fc8f3c4 | [
"MIT"
] | null | null | null | semlabs/iteralg/sem1.py | Revengerzz/Labs | 07eb514962e203ce5af8e8604ea92cf91fc8f3c4 | [
"MIT"
] | null | null | null | a = float(input('Введите число: '))
def koren (a):
x = 3
k = 0
if (a<0):
raise ValueError('Ошибка, введите число, меньшее нуля')
while k != (a**0.5):
k = 0.5 * (x + a/x)
x = k
return(k)
print('Корень из числа ' + str(a) + ' равен ' + str(koren(a)))
| 21.071429 | 63 | 0.488136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.39548 |
2addab6e6db1d5d6df636a69706b6de36199b161 | 2,350 | py | Python | dataset_tools/dataset_case/base_case.py | jprashant21/ElNeuKGQA | ebafa0a3d3e779e487a665fabc4f9ed50f21f134 | [
"MIT"
] | null | null | null | dataset_tools/dataset_case/base_case.py | jprashant21/ElNeuKGQA | ebafa0a3d3e779e487a665fabc4f9ed50f21f134 | [
"MIT"
] | null | null | null | dataset_tools/dataset_case/base_case.py | jprashant21/ElNeuKGQA | ebafa0a3d3e779e487a665fabc4f9ed50f21f134 | [
"MIT"
] | null | null | null | from typing import Optional
class BaseCaseMethodNotImplemented(Exception):
"""
Exception when a BaseCase method has not been implemented.
"""
pass
class BaseCase:
"""
Base class for representing a dataset case with a mandatory question text
and an optional question id and sparql query answer.
"""
@property
def question_id(self) -> Optional[int]:
"""
Get the question id, if exists.
:return: question integer identifier or None if there is no identifier.
"""
return None
@property
def question_text(self) -> str:
"""
Get the question text.
:exception: BaseCaseMethodNotImplemented if method has not been implemented.
:return: question string.
"""
raise BaseCaseMethodNotImplemented
@property
def sparql_query(self) -> Optional[str]:
"""
Get the SPARQL query answer.
:return: SPARQL query string or None if there is no SPARQL query answer.
"""
return None
class QuestionCase(BaseCase):
"""
Question case class for representing a dataset question case with
a mandatory question text and an optional question id. No SPARQL query answer required.
"""
def __init__(self, question_text: str, question_id: Optional[int] = None):
"""
Question case constructor.
:param question_text: question case string.
:param question_id: question case identifier.
"""
self.__id = question_id
self.__text = question_text
@property
def question_id(self) -> Optional[int]:
"""
Get the question id, if exists.
:return: question integer identifier or None if there is no identifier.
"""
return self.__id
@property
def question_text(self) -> str:
"""
Get the question text.
:return: question string.
"""
return self.__text
class Question(QuestionCase):
"""
Question class for representing a question case that only requires a question text.
No question id nor SPARQL query answer required.
"""
def __init__(self, question_text: str):
"""
Question constructor.
:param question_text: question case string.
"""
super().__init__(question_text)
| 25.543478 | 91 | 0.628085 | 2,310 | 0.982979 | 0 | 0 | 1,083 | 0.460851 | 0 | 0 | 1,453 | 0.618298 |
2ae05deb85d181924e6db8964898b466526392a4 | 2,151 | py | Python | 06.8_bst_spiral.py | EashanKaushik/Data-Structures | e5bd391e029cb47e650d5665647ff57590b9b343 | [
"MIT"
] | null | null | null | 06.8_bst_spiral.py | EashanKaushik/Data-Structures | e5bd391e029cb47e650d5665647ff57590b9b343 | [
"MIT"
] | null | null | null | 06.8_bst_spiral.py | EashanKaushik/Data-Structures | e5bd391e029cb47e650d5665647ff57590b9b343 | [
"MIT"
] | null | null | null | class Node:
def __init__(self, data=None, left=None, right=None):
self._left = left
self._data = data
self._right = right
@property
def left(self):
return self._left
@left.setter
def left(self, left):
self._left = left
@property
def right(self):
return self._right
@right.setter
def right(self, right):
self._right = right
@property
def data(self):
return self._data
@data.setter
def data(self, left):
self._data = data
def spiral(root):
stack1 = []
stack2 = []
stack1.append(root)
while len(stack1) > 0 or len(stack2) > 0:
while len(stack1) > 0:
curr_node = stack1.pop()
print(curr_node.data)
if curr_node.left:
stack2.append(curr_node.left)
if curr_node.right:
stack2.append(curr_node.right)
while len(stack2) > 0:
curr_node = stack2.pop()
print(curr_node.data)
if curr_node.right:
stack1.append(curr_node.right)
if curr_node.left:
stack1.append(curr_node.left)
def spiral_reverse(root):
stack1 = []
stack2 = []
stack1.append(root)
while len(stack1) > 0 or len(stack2) > 0:
while len(stack1) > 0:
curr_node = stack1.pop()
print(curr_node.data)
if curr_node.right:
stack2.append(curr_node.right)
if curr_node.left:
stack2.append(curr_node.left)
while len(stack2) > 0:
curr_node = stack2.pop()
print(curr_node.data)
if curr_node.left:
stack1.append(curr_node.left)
if curr_node.right:
stack1.append(curr_node.right)
if __name__ == '__main__':
tree = Node(1)
tree.left = Node(2)
tree.right = Node(3)
tree.left.left = Node(4)
tree.left.right = Node(5)
tree.right.right = Node(7)
tree.right.left = Node(18)
tree.right.right.right = Node(8)
tree.left.right.left = Node(17)
tree.left.right.right = Node(6)
tree.left.right.left.left = Node(19)
spiral_reverse(tree)
| 20.682692 | 54 | 0.572292 | 456 | 0.211994 | 0 | 0 | 310 | 0.144119 | 0 | 0 | 10 | 0.004649 |
2ae0a2bc2853c1126fffc979b1b96432d2186477 | 4,531 | py | Python | app/run.py | Jaghwani/Disaster-Response-Pipeline | 869ff0d150d9b247a6098cf384eff60ccb0f0344 | [
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null | app/run.py | Jaghwani/Disaster-Response-Pipeline | 869ff0d150d9b247a6098cf384eff60ccb0f0344 | [
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null | app/run.py | Jaghwani/Disaster-Response-Pipeline | 869ff0d150d9b247a6098cf384eff60ccb0f0344 | [
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null | import json
import plotly
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine('sqlite:///../data/DisasterDB.db')
df = pd.read_sql_table('disaster_messages', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
# TODO: Below is an example - modify to extract data for your own visuals
# Create values for Y values in bar by grouping the messages based on genre
genre_counts = df.groupby('genre').count()['message']
# Create the names for X values in bar of grouping the messages based on genre
genre_names = list(genre_counts.index)
# Use categories data only without other features
categories = df.drop(['original', 'genre','message','id'], axis =1)
# Sum the values for each category and save the for Y values of the bar
categories_counts = categories.sum().sort_values(ascending=False)
# Create for the names of the categories and save them for X values bar
categories_names = list(categories_counts.index)
# Find the messages are under genre Social only
social = df[df['genre'] == 'social']
# Drop othere unneeded features
social_droped = social.drop(['original', 'genre','message','id'], axis =1)
# Sum the categories values for genre Social
social_counts = social_droped.sum().sort_values(ascending=False)
# Create for the names of the categories and save them for X values bar
social_names = list(social_counts.index)
# create visuals
# TODO: Below is an example - modify to create your own visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
{
'data': [
Bar(
x=categories_names,
y=categories_counts
)
],
'layout': {
'title': 'Distribution of Messages Categories',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Categories"
}
}
},
{
'data': [
Bar(
x=social_names,
y=social_counts
)
],
'layout': {
'title': 'Distribution of Social Messages Categories',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Categories"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main() | 28.859873 | 82 | 0.573383 | 0 | 0 | 0 | 0 | 3,498 | 0.772015 | 0 | 0 | 1,631 | 0.359965 |
2ae152f052fcede641510f72585c3855762a8eb7 | 119 | py | Python | workflow/scripts/data/__init__.py | percyfal/wg-genealogy-smk | f9ed6e5876ca20c6693625237e57b22acd3f4cee | [
"MIT"
] | 1 | 2022-03-21T17:43:08.000Z | 2022-03-21T17:43:08.000Z | workflow/scripts/data/__init__.py | percyfal/wg-genealogy-smk | f9ed6e5876ca20c6693625237e57b22acd3f4cee | [
"MIT"
] | null | null | null | workflow/scripts/data/__init__.py | percyfal/wg-genealogy-smk | f9ed6e5876ca20c6693625237e57b22acd3f4cee | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__all__ = ("natural_earth",)
from .natural_earth import natural_earth
| 17 | 40 | 0.689076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.504202 |
2ae1ab750c7d91fe0b34736e84b79b4270ed4fbb | 355 | py | Python | 1_mundo_exercicios/ex012.py | GuilhermeLima182/CursoDePython | 7e72b117142794c38cbb14284d0fa6e1dbee5bf6 | [
"MIT"
] | null | null | null | 1_mundo_exercicios/ex012.py | GuilhermeLima182/CursoDePython | 7e72b117142794c38cbb14284d0fa6e1dbee5bf6 | [
"MIT"
] | null | null | null | 1_mundo_exercicios/ex012.py | GuilhermeLima182/CursoDePython | 7e72b117142794c38cbb14284d0fa6e1dbee5bf6 | [
"MIT"
] | null | null | null | #Faça um programa que pergunte ao usuario o valor do produto,
#e mostre o valor final com 5% de desconto.
valorProduto = float(input('Qual o valor do produto? R$ '))
desconto = (valorProduto * 5) / 100
vFinal = valorProduto - desconto
print('O valor do produto é R${}'.format(valorProduto))
print('O novo valor com o desconto é de R${}'.format(vFinal))
| 39.444444 | 62 | 0.721127 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.569832 |
2ae22ebc1ff5af7ea983e4b1cf1879b8fdb9382c | 9,880 | py | Python | python/plot_meridional_avg_multiple_fields_climo.py | E3SM-Project/a-prime | a8c084ab6f727904a2b38d8a93b9c83e2f978e3f | [
"BSD-3-Clause"
] | 1 | 2017-06-07T13:13:32.000Z | 2017-06-07T13:13:32.000Z | python/plot_meridional_avg_multiple_fields_climo.py | ACME-Climate/a-prime | a8c084ab6f727904a2b38d8a93b9c83e2f978e3f | [
"BSD-3-Clause"
] | 31 | 2017-06-07T00:26:58.000Z | 2018-04-09T17:03:15.000Z | python/plot_meridional_avg_multiple_fields_climo.py | ACME-Climate/a-prime | a8c084ab6f727904a2b38d8a93b9c83e2f978e3f | [
"BSD-3-Clause"
] | 1 | 2018-08-05T23:43:59.000Z | 2018-08-05T23:43:59.000Z | #
# Copyright (c) 2017, UT-BATTELLE, LLC
# All rights reserved.
#
# This software is released under the BSD license detailed
# in the LICENSE file in the top level a-prime directory
#
###Work in Progress: Plot meridional averages for different fields in the same plot.
###07/03/2017
import matplotlib as mpl
#changing the default backend to agg to resolve contouring issue on rhea
mpl.use('Agg')
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy
from netCDF4 import Dataset
from read_monthly_data_ts import read_monthly_data_ts
from get_season_months_index import get_season_months_index
from get_days_in_season_months import get_days_in_season_months
from get_reg_area_avg import get_reg_area_avg
from aggregate_ts_weighted import aggregate_ts_weighted
from get_reg_seasonal_avg import get_reg_seasonal_avg
from get_season_name import get_season_name
from get_reg_avg_climo import get_reg_avg_climo
from optparse import OptionParser
import argparse
def plot_meridional_avg_multiple_fields_climo (indir,
casename,
field_name,
interp_grid,
interp_method,
ref_case,
ref_interp_grid,
ref_interp_method,
begin_yr,
end_yr,
begin_month,
end_month,
aggregate,
debug = False):
n_fields = len(field_names)
for i,field_name in enumerate(field_names):
print __name__, 'casename: ', casename
meridional_avg, lon_reg, units = get_reg_meridional_avg_climo (
indir = indir,
casename = casename,
field_name = field_names[i],
interp_grid = interp_grid,
interp_method = interp_method,
begin_yr = begin_yr,
end_yr = end_yr,
begin_month = begin_month,
end_month = end_month,
reg = reg,
debug = debug)
if i == 0:
plot_field = numpy.zeros((n_fields, meridional_avg.shape[0]))
units_list = []
plot_field[i, :] = meridional_avg
units_list.append(units)
if ref_case == 'CERES-EBAF':
if field_name == 'FLNT': field_name_ref = 'FLUT'
if field_name == 'RESTOM': field_name_ref = 'RESTOA'
if field_name == 'FSNT': field_name_ref = 'FSNTOA'
elif ref_case == 'HadISST':
if field_name == 'TS': field_name_ref = 'SST'
else:
field_name_ref = field_name
ref_meridional_avg, lon_reg, ref_units = get_reg_meridional_avg_climo (
indir = ref_case_dir,
casename = ref_case,
field_name = field_name_ref,
interp_grid = ref_interp_grid,
interp_method = ref_interp_method,
begin_yr = begin_yr,
end_yr = end_yr,
begin_month = begin_month,
end_month = end_month,
reg = reg,
debug = debug)
if i == 0: ref_plot_field = numpy.zeros((n_fields, meridional_avg.shape[0]))
ref_plot_field[i, :] = ref_meridional_avg
if debug: print __name__, 'ref_plot_field.shape ', ref_plot_field.shape
if debug: print __name__, 'plot_field: ', plot_field
plot_field_mean = numpy.mean(plot_field, axis = 1)
ref_plot_field_mean = numpy.mean(ref_plot_field, axis = 1)
f, ax = plt.subplots(n_fields, sharex = True, figsize=(8.5,11))
nlon = lon_reg.shape[0]
f.text(0.5, 0.04, 'Longitude', ha='center', fontsize = 24)
season = get_season_name(begin_month, end_month)
plt.suptitle(reg_name + '\n Meridional Avg. ' + season, fontsize = 24)
ref_case_text = ref_case + ' ' + field_name_ref + ' climo'
for i,field_name in enumerate(field_names):
min_plot = min(numpy.amin(plot_field[i, :]), ref_plot_field[i, 0])
max_plot = max(numpy.amax(plot_field[i, :]), ref_plot_field[i, 0])
y_axis_ll = min_plot - 0.5*numpy.std(plot_field[i, :])
y_axis_ul = max_plot + 0.5 * numpy.std(plot_field[i,:])
ax[i].axis([lon_reg[0],lon_reg[-1], y_axis_ll, y_axis_ul])
print 'lon_reg[0],lon_reg[-1], 1.1*min_plot, 1.1*max_plot: ', \
lon_reg[0],lon_reg[-1], 1.1*min_plot, 1.1*max_plot
test_line, = ax[i].plot(lon_reg, plot_field[i, :], color = colors[i], linewidth = 1.0, label = casename)
ref_line, = ax[i].plot(lon_reg, ref_plot_field[i, :], color = 'black', linewidth = 1.0, label = ref_case)
if i == 0:
ax[i].legend(bbox_to_anchor = (1.0,1.5), handles=[ref_line, test_line], fontsize = 10)
ax[i].set_title(field_name, fontsize = 12)
ax[i].text(0.04, 0.5, field_name + ' (' + units_list[i] + ')', va='center', rotation='vertical', fontsize = 16)
ax[i].get_yaxis().get_major_formatter().set_useOffset(False)
ax[i].yaxis.set_major_locator(MaxNLocator(6))
for tick in ax[i].yaxis.get_major_ticks():
tick.label.set_fontsize(10)
for tick in ax[i].xaxis.get_major_ticks():
tick.label.set_fontsize(10)
plt.subplots_adjust(hspace=0.3)
mpl.rcParams['savefig.dpi']=300
outfile = plots_dir + '/' + casename + '_' \
+ meridional_avg + '_' + reg + '_' + season + '.png'
plt.savefig(outfile)
#plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(usage = "python %prog [options]")
parser.add_argument("-d", "--debug", dest = "debug", default = False,
help = "debug option to print some data")
parser.add_argument("--indir", dest = "indir",
help = "filepath to directory model data")
parser.add_argument("-c", "--casename", dest = "casename",
help = "casename of the run")
parser.add_argument("-f", "--field_name", dest = "field_names", nargs = '+',
help = "variable name")
parser.add_argument("--interp_grid", dest = "interp_grid",
help = "variable name")
parser.add_argument("--interp_method", dest = "interp_method",
help = "method used for interpolating the test case e.g. conservative_mapping")
parser.add_argument("--ref_case_dir", dest = "ref_case_dir",
help = "filepath to ref_case directory")
parser.add_argument("--ref_case", dest = "ref_case",
help = "reference casename")
parser.add_argument("--ref_interp_grid", dest = "ref_interp_grid",
help = "name of the interpolated grid of reference case")
parser.add_argument("--ref_interp_method", dest = "ref_interp_method",
help = "method used for interpolating the reference case e.g. conservative_mapping")
parser.add_argument("--begin_yr", dest = "begin_yr", type = int,
help = "begin year")
parser.add_argument("--end_yr", dest = "end_yr", type = int,
help = "end year")
parser.add_argument("--begin_month", dest = "begin_month", type = int,
help = "begin_month", default = 0)
parser.add_argument("--end_month", dest = "end_month", type = int,
help = "end_month", default = 11)
parser.add_argument("--aggregate", dest = "aggregate", type = int,
help = "end_month", default = 1)
parser.add_argument("--reg", dest = "reg", nargs = '+',
help = "regions to be analyzed/plotted")
parser.add_argument("--reg_name", dest = "reg_name", nargs = '+',
help = "names of regions to be placed in plots")
parser.add_argument("--plots_dir", dest = "plots_dir",
help = "filepath to GPCP directory")
args = parser.parse_args()
debug = args.debug
indir = args.indir
casename = args.casename
field_names = args.field_names
interp_grid = args.interp_grid
interp_method = args.interp_method
ref_case_dir = args.ref_case_dir
ref_case = args.ref_case
ref_interp_grid = args.ref_interp_grid
ref_interp_method = args.ref_interp_method
begin_yr = args.begin_yr
end_yr = args.end_yr
begin_month = args.begin_month
end_month = args.end_month
aggregate = args.aggregate
reg = args.reg
reg_name = args.reg_name
plots_dir = args.plots_dir
colors = ['b', 'g', 'r', 'c', 'm', 'y']
x = mpl.get_backend()
print 'backend: ', x
plot_meridional_avg_multiple_fields_climo(
indir = indir,
casename = casename,
field_names = field_names,
interp_grid = interp_grid,
interp_method = interp_method,
ref_case = ref_case,
ref_interp_grid = ref_interp_grid,
ref_interp_method = ref_interp_method,
begin_yr = begin_yr,
end_yr = end_yr,
begin_month = begin_month,
end_month = end_month,
reg = reg,
reg_name = reg_name,
aggregate = aggregate,
debug = debug)
| 37.003745 | 119 | 0.566093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,701 | 0.172166 |
2ae2318f1e53b32601c5c8db2c16277213b2f472 | 1,670 | py | Python | sample/sample.py | dogwood008/python-kabusapi | 7e7a5ac232e037c651b5447b408d8b0b6727c9b0 | [
"MIT"
] | 11 | 2020-10-02T04:27:50.000Z | 2022-01-05T03:41:03.000Z | sample/sample.py | dogwood008/python-kabusapi | 7e7a5ac232e037c651b5447b408d8b0b6727c9b0 | [
"MIT"
] | 1 | 2020-12-15T03:52:45.000Z | 2020-12-15T23:28:04.000Z | sample/sample.py | dogwood008/python-kabusapi | 7e7a5ac232e037c651b5447b408d8b0b6727c9b0 | [
"MIT"
] | 5 | 2021-02-21T16:25:50.000Z | 2022-02-15T08:11:43.000Z | import kabusapi
url = "localhost"
port = "18081" # 検証用, 本番用は18080
password = "hogehoge"
# 初期設定・トークン取得
api = kabusapi.Context(url, port, password)
# 取得トークンの表示
print(api.token)
# トークンを指定した初期設定 パスワードが不要
api = kabusapi.Context(url, port, token='fugafuga')
# 注文発注 (現物買い)
data = {
"Password": "hoge",
"Symbol": 8306, # MUFG
"Exchange": 1,
"SecurityType": 1,
"Side": 2,
"CashMargin": 1,
"MarginTradeType": None,
"DelivType": 1,
"FundType": "02",
"AccountType": 4,
"Qty": 100,
"ClosePositionOrder": None,
"Price": 0,
"ExpireDay": 0,
"FrontOrderType": 10,
}
response = api.sendorder(**data)
# 注文取消
data = {
"OrderId": "hoge",
"Password": "fuga",
}
response = api.cancelorder(**data)
# 取引余力(現物)
response = api.wallet.cash()
# 取引余力(現物)(銘柄指定)
data = {
"symbol": 8306,
"exchange": 1,
}
response = api.wallet.cash(**data)
# 取引余力(信用)
response = api.wallet.margin()
# 取引余力(信用)(銘柄指定)
data = {
"symbol": 8306,
"exchange": 1,
}
response = api.wallet.margin(**data)
# 時価情報・板情報
data = {
"symbol": 8306,
"exchange": 1,
}
response = api.board(**data)
# 銘柄情報
data = {
"symbol": 8306,
"exchange": 1,
}
response = api.symbol(**data)
# 注文約定照会
response = api.orders()
# 残高照会
response = api.positions()
# 銘柄登録
data = {
"Symbols": [
{"Symbol": 8306, "Exchange": 1, },
{"Symbol": 9433, "Exchange": 1, },
]
}
response = api.register(**data)
# 銘柄登録解除
data = {
"Symbols": [
{"Symbol": 8306, "Exchange": 1, },
{"Symbol": 9433, "Exchange": 1, },
]
}
response = api.unregister(**data)
# 銘柄登録全解除
response = api.unregister.all()
| 14.396552 | 51 | 0.575449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 891 | 0.455521 |
2ae48ee7262087d6a799cdd263fa2699c0692509 | 2,766 | py | Python | tests/runtime/doubling_vector.py | yangdanny97/chocopy-python-compiler | 588cba0cb330bd63f00e06420a32ba47c25c4468 | [
"MIT"
] | 7 | 2021-08-28T18:20:45.000Z | 2022-02-01T07:35:59.000Z | tests/runtime/doubling_vector.py | yangdanny97/chocopy-python-compiler | 588cba0cb330bd63f00e06420a32ba47c25c4468 | [
"MIT"
] | 1 | 2020-05-30T17:57:11.000Z | 2020-05-30T20:44:53.000Z | tests/runtime/doubling_vector.py | yangdanny97/chocopy-python-frontend | d0fb63fc744771640fa4d06076743f42089899c1 | [
"MIT"
] | 2 | 2022-02-05T06:16:16.000Z | 2022-02-24T11:07:09.000Z | # A resizable list of integers
class Vector(object):
# Attributes
items: [int] = None
size: int = 0
# Constructor
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int):
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 16
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
vec:Vector = None
num:int = 0
# Create a vector and populate it with The Numbers
vec = DoublingVector()
for num in [4, 8, 15, 16, 23, 42]:
vec.append(num)
__assert__(vec.capacity() == 8)
__assert__(vec.size == 6)
__assert__(vec.items[0] == 4)
__assert__(vec.items[1] == 8)
__assert__(vec.items[2] == 15)
__assert__(vec.items[3] == 16)
__assert__(vec.items[4] == 23)
__assert__(vec.items[5] == 42)
# extras from doubling
__assert__(vec.items[6] == 15)
__assert__(vec.items[7] == 16)
vec = Vector()
for num in [4, 8, 15, 16, 23, 42]:
vec.append(num)
__assert__(vec.capacity() == 6)
__assert__(vec.size == 6)
__assert__(vec.items[0] == 4)
__assert__(vec.items[1] == 8)
__assert__(vec.items[2] == 15)
__assert__(vec.items[3] == 16)
__assert__(vec.items[4] == 23)
__assert__(vec.items[5] == 42)
vec = vrange(0, 1)
__assert__(vec.capacity() == 1)
__assert__(vec.size == 1)
__assert__(vec.items[0] == 0)
vec = vrange(0, 2)
__assert__(vec.capacity() == 2)
__assert__(vec.size == 2)
__assert__(vec.items[0] == 0)
__assert__(vec.items[1] == 1)
vec = vrange(1, 3)
__assert__(vec.capacity() == 2)
__assert__(vec.size == 2)
__assert__(vec.items[0] == 1)
__assert__(vec.items[1] == 2)
vec = vrange(1, 1)
__assert__(vec.capacity() == 1)
__assert__(vec.size == 0)
vec = vrange(0, -1)
__assert__(vec.capacity() == 1)
__assert__(vec.size == 0)
vec = vrange(1, 100)
__assert__(vec.size == 99)
| 24.696429 | 63 | 0.629067 | 1,113 | 0.402386 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.164497 |
2ae50d70a3a83bb9efe62ed2d9ccae0243224d10 | 2,330 | py | Python | refinery/units/blockwise/map.py | bronxc/refinery | 9448facf48a0008f27861dd1a5ee8f5218e6bb86 | [
"BSD-3-Clause"
] | null | null | null | refinery/units/blockwise/map.py | bronxc/refinery | 9448facf48a0008f27861dd1a5ee8f5218e6bb86 | [
"BSD-3-Clause"
] | null | null | null | refinery/units/blockwise/map.py | bronxc/refinery | 9448facf48a0008f27861dd1a5ee8f5218e6bb86 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import Dict, Optional, Sequence
from refinery.units.blockwise import Arg, BlockTransformation
from refinery.lib.tools import isbuffer
class map(BlockTransformation):
"""
Each block of the input data which occurs as a block of the index argument is replaced by the
corresponding block of the image argument. If a block size is specified, and if the index or
image argument are byte sequences, they are unpacked into chunks of that size. To prevent any
automatic chunking, the `refinery.lib.argformats.DelayedArgument.btoi` handler can be used.
"""
_map: Optional[Dict[int, int]]
def __init__(
self,
index: Arg.NumSeq(help='index characters'),
image: Arg.NumSeq(help='image characters'),
blocksize=1
):
super().__init__(blocksize=blocksize, index=index, image=image)
self._map = None
def process(self, data):
index: Sequence[int] = self.args.index
image: Sequence[int] = self.args.image
if not self.bytestream:
if isbuffer(index):
self.log_info(F'chunking index sequence into blocks of size {self.args.blocksize}')
index = list(self.chunk(index))
self.log_debug(F'index sequence: {index}')
if isbuffer(image):
self.log_info(F'chunking image sequence into blocks of size {self.args.blocksize}')
image = list(self.chunk(image))
self.log_debug(F'image sequence: {image}')
if len(set(index)) != len(index):
raise ValueError('The index sequence contains duplicates.')
if len(index) > len(image):
raise ValueError('The index sequence is longer than the image sequence.')
if self.bytestream:
mapping = dict(zip(index, image))
mapping = bytes(mapping.get(c, c) for c in range(0x100))
if not isinstance(data, bytearray):
data = bytearray(data)
data[:] = (mapping[b] for b in data)
return data
try:
self._map = dict(zip(index, image))
return super().process(data)
finally:
self._map = None
def process_block(self, token):
return self._map.get(token, token)
| 40.172414 | 99 | 0.621459 | 2,133 | 0.915451 | 0 | 0 | 0 | 0 | 0 | 0 | 765 | 0.328326 |
2ae5dfb215b19942bd9a30e35a3057db94d2a78f | 299 | py | Python | Python-codes-CeV/51-Arithmetic_P.py | engcristian/Python | 726a53e9499fd5d0594572298e59e318f98e2d36 | [
"MIT"
] | 1 | 2021-02-22T03:53:23.000Z | 2021-02-22T03:53:23.000Z | Python-codes-CeV/51-Arithmetic_P.py | engcristian/Python | 726a53e9499fd5d0594572298e59e318f98e2d36 | [
"MIT"
] | null | null | null | Python-codes-CeV/51-Arithmetic_P.py | engcristian/Python | 726a53e9499fd5d0594572298e59e318f98e2d36 | [
"MIT"
] | null | null | null | '''
Arithmetic progression with 10 elements.
'''
first_term = int(input('Type the first term of this A.P: '))
reason = int(input('Type the reason of this A.P: '))
last_term = first_term + (50-1)*reason # A.P formula
for c in range (first_term, last_term + reason , reason):
print(c, end=' ► ')
| 29.9 | 60 | 0.668896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.445183 |
2ae6050a70fc0ee48b50e9bc45933afac8b8ea4a | 11,360 | py | Python | src/cascade_at/executor/dismod_db.py | ihmeuw/cascade-at | a5b1b5da1698163fd3bbafc6288968dd9c398096 | [
"MIT"
] | 1 | 2019-10-14T23:18:04.000Z | 2019-10-14T23:18:04.000Z | src/cascade_at/executor/dismod_db.py | ihmeuw/cascade | a5b1b5da1698163fd3bbafc6288968dd9c398096 | [
"MIT"
] | 35 | 2018-07-17T18:37:33.000Z | 2020-03-06T13:31:35.000Z | src/cascade_at/executor/dismod_db.py | ihmeuw/cascade | a5b1b5da1698163fd3bbafc6288968dd9c398096 | [
"MIT"
] | 4 | 2018-07-13T00:01:35.000Z | 2019-09-02T23:56:11.000Z | #!/usr/bin/env python
import logging
import sys
from pathlib import Path
from typing import Union, List, Dict, Any, Optional, Tuple
import os
import numpy as np
import pandas as pd
from cascade_at.core import CascadeATError
from cascade_at.context.model_context import Context
from cascade_at.core.log import get_loggers, LEVELS
from cascade_at.dismod.api.dismod_extractor import DismodExtractor
from cascade_at.dismod.api.dismod_filler import DismodFiller
from cascade_at.dismod.api.run_dismod import run_dismod_commands
from cascade_at.executor.args.arg_utils import ArgumentList
from cascade_at.executor.args.args import DmCommands, DmOptions, ParentLocationID, SexID
from cascade_at.executor.args.args import ModelVersionID, BoolArg, LogLevel, StrArg, IntArg
from cascade_at.inputs.measurement_inputs import MeasurementInputs
from cascade_at.model.grid_alchemy import Alchemy
from cascade_at.saver.results_handler import ResultsHandler
from cascade_at.settings.settings_config import SettingsConfig
from cascade_at.model.priors import Gaussian, _Prior
from cascade_at.dismod.api.fill_extract_helpers.posterior_to_prior import format_rate_grid_for_ihme
LOG = get_loggers(__name__)
ARG_LIST = ArgumentList([
ModelVersionID(),
ParentLocationID(),
SexID(),
DmCommands(),
DmOptions(),
BoolArg('--fill', help='whether or not to fill the dismod database with data'),
BoolArg('--prior-samples', help='whether or not the prior came from samples or just a mean fit'),
IntArg('--prior-parent', help='the location ID of the parent database to grab the prior for'),
IntArg('--prior-sex', help='the sex ID of the parent database to grab prior for'),
IntArg('--prior-mulcov', help='the model version id where mulcov stats is passed in', required=False),
BoolArg('--save-fit', help='whether or not to save the fit'),
BoolArg('--save-prior', help='whether or not to save the prior'),
LogLevel(),
StrArg('--test-dir', help='if set, will save files to the directory specified')
])
class DismodDBError(CascadeATError):
"""Raised when there is an error with running the dismod_db script."""
pass
def get_prior(path: Union[str, Path], location_id: int, sex_id: int,
rates: List[str], samples: bool = True) -> Dict[str, Dict[str, np.ndarray]]:
"""
Gets priors from a path to a database for a given location ID and sex ID.
"""
child_prior = DismodExtractor(path=path).gather_draws_for_prior_grid(
location_id=location_id,
sex_id=sex_id,
rates=rates,
samples=samples
)
return child_prior
def get_mulcov_priors(model_version_id: int) -> Dict[Tuple[str, str, str], _Prior]:
"""
Read in covariate multiplier statistics from a specific model version ID
and returns a dictionary with a prior object for that covariate
multiplier type, covariate name, and rate or integrand.
Parameters
----------
model_version_id
The model version ID to pull covariate multiplier statistics from
"""
convert_type = {'rate_value': 'alpha', 'meas_value': 'beta', 'meas_noise': 'gamma'}
mulcov_prior = {}
ctx = Context(model_version_id=model_version_id)
path = os.path.join(ctx.outputs_dir, 'mulcov_stats.csv')
if not os.path.exists(path):
return {}
mulcov_stats_df = pd.read_csv(path)
if mulcov_stats_df.empty:
return {}
for _, row in mulcov_stats_df.iterrows():
if row['rate_name'] != 'none':
mulcov_prior[
(convert_type[row['mulcov_type']], row['c_covariate_name'], row['rate_name'])
] = Gaussian(mean=row['mean'], standard_deviation=row['std'])
if row['integrand_name'] != 'none':
mulcov_prior[
(convert_type[row['mulcov_type']], row['c_covariate_name'], row['integrand_name'])
] = Gaussian(mean=row['mean'], standard_deviation=row['std'])
return mulcov_prior
def fill_database(path: Union[str, Path], settings: SettingsConfig,
inputs: MeasurementInputs, alchemy: Alchemy,
parent_location_id: int, sex_id: int, child_prior: Dict[str, Dict[str, np.ndarray]],
mulcov_prior: Dict[Tuple[str, str, str], _Prior],
options: Dict[str, Any]) -> DismodFiller:
"""
Fill a DisMod database at the specified path with the inputs, model, and settings
specified, for a specific parent and sex ID, with options to override the priors.
"""
df = DismodFiller(
path=path, settings_configuration=settings, measurement_inputs=inputs,
grid_alchemy=alchemy, parent_location_id=parent_location_id, sex_id=sex_id,
child_prior=child_prior, mulcov_prior=mulcov_prior,
)
df.fill_for_parent_child(**options)
return df
def save_predictions(db_file: Union[str, Path],
model_version_id: int, gbd_round_id: int,
out_dir: Path,
locations: Optional[List[int]] = None,
sexes: Optional[List[int]] = None,
sample: bool = False,
predictions: Optional[pd.DataFrame] = None) -> None:
"""
Save the fit from this dismod database for a specific location and sex to be
uploaded later on.
"""
LOG.info("Extracting results from DisMod SQLite Database.")
da = DismodExtractor(path=db_file)
predictions = da.format_predictions_for_ihme(
locations=locations, sexes=sexes, gbd_round_id=gbd_round_id,
samples=sample, predictions=predictions
)
LOG.info(f"Saving the results to {out_dir}.")
rh = ResultsHandler()
rh.save_draw_files(df=predictions, directory=out_dir,
add_summaries=True, model_version_id=model_version_id)
def dismod_db(model_version_id: int, parent_location_id: int, sex_id: int = None,
dm_commands: List[str] = [], dm_options: Dict[str, Union[int, str, float]] = {},
prior_samples: bool = False,
prior_parent: Optional[int] = None, prior_sex: Optional[int] = None,
prior_mulcov_model_version_id: Optional[int] = None,
test_dir: Optional[str] = None, fill: bool = False,
save_fit: bool = True, save_prior: bool = True) -> None:
"""
Creates a dismod database using the saved inputs and the file
structure specified in the context. Alternatively it will
skip the filling stage and move straight to the command
stage if you don't pass --fill.
Then runs an optional set of commands on the database passed
in the --commands argument.
Also passes an optional argument --options as a dictionary to
the dismod database to fill/modify the options table.
Parameters
----------
model_version_id
The model version ID
parent_location_id
The parent location for the database
sex_id
The parent sex for the database
dm_commands
A list of commands to pass to the run_dismod_commands function, executed
directly on the dismod database
dm_options
A dictionary of options to pass to the the dismod option table
prior_samples
Whether the prior was derived from samples or not
prior_mulcov_model_version_id
The model version ID to use for pulling covariate multiplier
statistics as priors for this fit
prior_parent
An optional parent location ID that specifies where to pull the prior
information from.
prior_sex
An optional parent sex ID that specifies where to pull the prior information from.
test_dir
A test directory to create the database in rather than the database
specified by the IHME file system context.
fill
Whether or not to fill the database with new inputs based on the model_version_id,
parent_location_id, and sex_id. If not filling, this script can be used
to just execute commands on the database instead.
save_fit
Whether or not to save the fit from this database as the parent fit.
save_prior
Whether or not to save the prior for the parent as the parent's prior.
"""
if test_dir is not None:
context = Context(model_version_id=model_version_id,
configure_application=False,
root_directory=test_dir)
else:
context = Context(model_version_id=model_version_id)
inputs, alchemy, settings = context.read_inputs()
if sex_id is None:
sex_id = settings.model.drill_sex
db_path = context.db_file(location_id=parent_location_id, sex_id=sex_id)
# If we want to override the rate priors with posteriors from a previous
# database, pass them in here.
if prior_parent or prior_sex:
if not (prior_parent and prior_sex):
raise DismodDBError("Need to pass both prior parent and sex or neither.")
prior_db = context.db_file(location_id=prior_parent, sex_id=prior_sex)
child_prior = get_prior(
path=prior_db,
location_id=parent_location_id, sex_id=sex_id,
rates=[r.rate for r in settings.rate],
samples=prior_samples
)
else:
child_prior = None
if prior_mulcov_model_version_id is not None:
LOG.info(f'Passing mulcov prior from model version id = {prior_mulcov_model_version_id}')
mulcov_priors = get_mulcov_priors(prior_mulcov_model_version_id)
else:
mulcov_priors = None
if fill:
filler = fill_database(
path=db_path, inputs=inputs, alchemy=alchemy, settings=settings,
parent_location_id=parent_location_id, sex_id=sex_id,
child_prior=child_prior, options=dm_options,
mulcov_prior=mulcov_priors,
)
if save_prior:
priors_to_save = format_rate_grid_for_ihme(
rates=filler.parent_child_model['rate'],
gbd_round_id=settings.gbd_round_id,
location_id=parent_location_id,
sex_id=sex_id
)
rh = ResultsHandler()
rh.save_summary_files(
df=priors_to_save, directory=context.prior_dir,
model_version_id=model_version_id
)
if dm_commands:
run_dismod_commands(dm_file=str(db_path), commands=dm_commands)
if save_fit:
save_predictions(
db_file=context.db_file(location_id=parent_location_id, sex_id=sex_id),
model_version_id=model_version_id,
gbd_round_id=settings.gbd_round_id,
out_dir=context.fit_dir
)
def main():
args = ARG_LIST.parse_args(sys.argv[1:])
logging.basicConfig(level=LEVELS[args.log_level])
dismod_db(
model_version_id=args.model_version_id,
parent_location_id=args.parent_location_id,
sex_id=args.sex_id,
dm_commands=args.dm_commands,
dm_options=args.dm_options,
fill=args.fill,
prior_samples=args.prior_samples,
prior_parent=args.prior_parent,
prior_sex=args.prior_sex,
prior_mulcov_model_version_id=args.prior_mulcov,
test_dir=args.test_dir,
save_fit=args.save_fit,
save_prior=args.save_prior,
)
if __name__ == '__main__':
main()
| 39.72028 | 106 | 0.678609 | 120 | 0.010563 | 0 | 0 | 0 | 0 | 0 | 0 | 3,779 | 0.332658 |
2ae6eff3bd8be08beceb2b936760471cfdb63209 | 414 | py | Python | astropy/config/__init__.py | xiaomi1122/astropy | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | [
"BSD-3-Clause"
] | null | null | null | astropy/config/__init__.py | xiaomi1122/astropy | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | [
"BSD-3-Clause"
] | null | null | null | astropy/config/__init__.py | xiaomi1122/astropy | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains configuration and setup utilities for the
`astropy` project. This includes all functionality related to the
affiliated package index.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .paths import *
from .configuration import *
from .affiliated import *
| 29.571429 | 66 | 0.748792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.543478 |
2aea2e195f4a15fce6635cf3266266d6afd53722 | 1,047 | py | Python | ABC169/F.py | shimomura314/AtcoderCodes | db1d62a7715f5f1b3c40eceff8d34f0f34839f41 | [
"MIT"
] | null | null | null | ABC169/F.py | shimomura314/AtcoderCodes | db1d62a7715f5f1b3c40eceff8d34f0f34839f41 | [
"MIT"
] | null | null | null | ABC169/F.py | shimomura314/AtcoderCodes | db1d62a7715f5f1b3c40eceff8d34f0f34839f41 | [
"MIT"
] | null | null | null | import sys
from collections import defaultdict
input = sys.stdin.readline
mod = 998244353
def extgcd(a,b):
r = [1,0,a]
w = [0,1,b]
while w[2] != 1:
q = r[2]//w[2]
r2 = w
w2 = [r[0]-q*w[0],r[1]-q*w[1],r[2]-q*w[2]]
r = r2
w = w2
return [w[0],w[1]]
def mod_inv(a,m):
x = extgcd(a,m)[0]
return ( m + x%m ) % m
def main():
n,s = map(int,input().split())
a = list(map(int, input().split()))
dp = [[0 for _ in range(s+1)] for _ in range(n+1)]
dp[0][0] = pow(2,n,mod)
MODINV2 = mod_inv(2,mod)
answer = 0
for i in range(n):
for j in range(s,-1,-1):
if j-a[i] < 0:
continue
dp[i+1][j] += dp[i][j-a[i]]*MODINV2
dp[i+1][j] %= mod
for j in range(s,-1,-1):
if j == s:
answer += dp[i+1][j]
answer %= mod
else:
dp[i+1][j] += dp[i][j]
dp[i+1][j] %= mod
print(answer)
if __name__ == "__main__":
main() | 21.8125 | 54 | 0.424069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.009551 |
2aecb72c40fb43c9fc2cbc47d687dce7b40d1d6a | 1,363 | py | Python | ferry/nlp/fetch/fetch_evals.py | coursetable/ferry | f369b9588557c359af8589f2575a03493d6b08b6 | [
"MIT"
] | 4 | 2020-11-12T19:37:06.000Z | 2021-12-14T01:38:39.000Z | ferry/nlp/fetch/fetch_evals.py | coursetable/ferry | f369b9588557c359af8589f2575a03493d6b08b6 | [
"MIT"
] | 96 | 2020-09-08T05:17:17.000Z | 2022-03-31T23:12:51.000Z | ferry/nlp/fetch/fetch_evals.py | coursetable/ferry | f369b9588557c359af8589f2575a03493d6b08b6 | [
"MIT"
] | 2 | 2021-03-03T23:02:40.000Z | 2021-06-17T23:33:05.000Z | # pylint: skip-file
import json
import requests
url = "http://localhost:8085/v1/graphql"
def fetch_evals(term, dump=True):
query = (
"""
query MyQuery {
courses(where: {
season: {season_code: {_eq: \""""
+ str(term)
+ """\"}},
school: {_eq: "YC"},
average_rating: {_is_null: false},
average_workload: {_is_null: false},
extra_info: {_neq: "CANCELLED"}
}) {
course_id
title
season_code
course_professors {professor {name}}
listings {subject number section}
average_rating
average_workload
description
evaluation_narratives {question_code comment}
evaluation_statistics {enrollment}
skills
areas
}
}
"""
)
r = requests.post(url, json={"query": query}, verify=False)
data = json.loads(r.text)["data"]["courses"]
if dump:
with open("./../data/evals/" + str(term) + ".txt", "w+") as outfile:
json.dump(data, outfile)
return data
for year in range(2009, 2021):
print(year)
for term in range(1, 4):
fetch_evals(str(year) + "0" + str(term), True)
| 26.211538 | 76 | 0.487894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 891 | 0.653705 |
2aece40023ab19be0d9af7b42aedee10e53c55c8 | 1,375 | py | Python | spec/person.py | ace6807/spec-data-validation-pattern | 57c7a757035f16ffa999244175814db0ec3964d9 | [
"MIT"
] | 1 | 2021-07-27T00:55:08.000Z | 2021-07-27T00:55:08.000Z | spec/person.py | ace6807/spec-data-validation-pattern | 57c7a757035f16ffa999244175814db0ec3964d9 | [
"MIT"
] | null | null | null | spec/person.py | ace6807/spec-data-validation-pattern | 57c7a757035f16ffa999244175814db0ec3964d9 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from . import Spec
from dto.person import PersonDTO
@dataclass
class HasNameSpec(Spec):
def passes(self, candidate: PersonDTO) -> bool:
return candidate.name is not None and candidate.name != ""
@dataclass
class HasAgeSpec(Spec):
def passes(self, candidate: PersonDTO) -> bool:
return candidate.age is not None
@dataclass
class AgeIsLessThan(Spec):
age: int
def passes(self, candidate: PersonDTO) -> bool:
return candidate.age < self.age
@dataclass
class AgeIsGreaterThan(Spec):
age: int
def passes(self, candidate: PersonDTO) -> bool:
return candidate.age > self.age
@dataclass
class IsFromCountry(Spec):
country: str
def passes(self, candidate: PersonDTO) -> bool:
return candidate.address.country == self.country
@dataclass
class ContestEligibilitySpec(Spec):
has_name_spec = HasNameSpec()
has_age_spec = HasAgeSpec()
age_less_than_spec = AgeIsLessThan(55)
age_greater_than_spec = AgeIsGreaterThan(18)
is_domestic = IsFromCountry("US")
def passes(self, candidate: PersonDTO) -> bool:
tests = (
self.has_age_spec
& self.has_name_spec
& self.age_less_than_spec
& self.age_greater_than_spec
& -self.is_domestic
)
return tests.passes(candidate)
| 22.916667 | 66 | 0.674909 | 1,205 | 0.876364 | 0 | 0 | 1,271 | 0.924364 | 0 | 0 | 6 | 0.004364 |
2aed7fe6b7fb1f101fb3fe39a4efc26c3e607f45 | 629 | py | Python | src/wattbike_importer.py | sdresearch/what-watt1 | d8765007bf36a1770d8ca390f8bdcf790dc0f9d5 | [
"MIT"
] | null | null | null | src/wattbike_importer.py | sdresearch/what-watt1 | d8765007bf36a1770d8ca390f8bdcf790dc0f9d5 | [
"MIT"
] | null | null | null | src/wattbike_importer.py | sdresearch/what-watt1 | d8765007bf36a1770d8ca390f8bdcf790dc0f9d5 | [
"MIT"
] | null | null | null | import requests
HUB_LOCATION = 'http://hub.wattbike.com/ranking/getSessionRows?sessionId='
class HubClient:
def __init__(self, location=None):
self.location = location if location else HUB_LOCATION
def _validate_session_id(self, session_id):
# TODO: add regex validation of session_id
return HUB_LOCATION + session_id
def get_session(self, session_id):
session_url = self._validate_session_id(session_id)
resp = requests.get(session_url)
if resp.status_code != 200:
raise requests.HTTPError('Response status code != 200')
return resp.json()
| 27.347826 | 74 | 0.693164 | 533 | 0.847377 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.206677 |
2aef266ecab631d6845cc494ea93892db8e19c6c | 1,436 | py | Python | main/views.py | ericrobskyhuntley/vialab.mit.edu | 1318d03b8eeb106c1662052e1caa53290e206ae7 | [
"MIT"
] | null | null | null | main/views.py | ericrobskyhuntley/vialab.mit.edu | 1318d03b8eeb106c1662052e1caa53290e206ae7 | [
"MIT"
] | null | null | null | main/views.py | ericrobskyhuntley/vialab.mit.edu | 1318d03b8eeb106c1662052e1caa53290e206ae7 | [
"MIT"
] | null | null | null | from django.shortcuts import get_object_or_404, render
from django.http import Http404
from .models import MainMetadata
from cal.models import Event
from classes.models import ClassInstance
from cal.models import Event
from datetime import date, timedelta
# Create your views here.
def about(request):
about = MainMetadata.objects.latest('created_at')
return render(request, 'main/about.html', {
'about': about,
}
)
def colophon(request):
meta = MainMetadata.objects.latest('created_at')
return render(request, 'main/colophon.html', {
'meta': meta,
}
)
def contact(request):
contact = MainMetadata.objects.latest('created_at')
return render(request, 'main/colophon.html', {
'contact': contact,
}
)
def landing(request):
today = date.today()
end_date = today + timedelta(weeks=20)
current_upcoming = ClassInstance.objects.select_related(
'cl__institution'
).select_related(
'semester'
).filter(
semester__end__range=[today, end_date]
).prefetch_related(
'sessions'
).order_by('semester__start')
upcoming_events = Event.objects.filter(
day__gte=today
).order_by('day')[0:5]
return render(request, 'main/landing.html', {
'current_upcoming': current_upcoming,
'upcoming_events': upcoming_events
}
) | 27.615385 | 60 | 0.647632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.176184 |
2af0b1d1c701ad3d57e0a7212ef9b12e29d346a9 | 1,761 | py | Python | backend/generate-events.py | daveygit2050/kow-london.co.uk | 1a4321fec1adb42e08b138ee2324e9ce47e89fa5 | [
"MIT"
] | null | null | null | backend/generate-events.py | daveygit2050/kow-london.co.uk | 1a4321fec1adb42e08b138ee2324e9ce47e89fa5 | [
"MIT"
] | null | null | null | backend/generate-events.py | daveygit2050/kow-london.co.uk | 1a4321fec1adb42e08b138ee2324e9ce47e89fa5 | [
"MIT"
] | null | null | null | import json
import requests
import tenacity
from bs4 import BeautifulSoup
from geopy.geocoders import Nominatim
from urllib.parse import parse_qs
from urllib.parse import urlparse
@tenacity.retry(retry=tenacity.retry_if_exception_type(IOError), wait=tenacity.wait_fixed(2))
def get_grid_ref_for_postcode(nominatim, postcode):
location = nominatim.geocode(f"{postcode}, United Kingdom")
return f"{location.longitude}, {location.latitude}"
def main():
kow_masters_url = "https://www.kowmasters.com"
kow_masters_events_response = requests.get(f"{kow_masters_url}/index.php?p=events")
kow_masters_events_soup = BeautifulSoup(kow_masters_events_response.text, 'html.parser')
kow_masters_event_urls = [link.get('href') for link in kow_masters_events_soup.find_all('a') if "p=event&i=" in link.get('href')]
venues = {}
nominatim = Nominatim(user_agent="kow-london-backend")
for kow_masters_event_url in kow_masters_event_urls:
event_response = requests.get(f"{kow_masters_url}/{kow_masters_event_url}")
kow_masters_event_soup = BeautifulSoup(event_response.text, 'html.parser')
event_name = kow_masters_event_soup.find("h1").text.strip()
iframe = kow_masters_event_soup.find(id="mapcanvas")
parsed_url = urlparse(iframe.attrs["src"])
postcode = parse_qs(parsed_url.query)["q"][0].strip()
event_grid_ref = get_grid_ref_for_postcode(nominatim, postcode)
try:
venues[event_grid_ref]["events"].append({"name": event_name})
except KeyError:
venues[event_grid_ref] = {"events": [{"name": event_name}]}
with open("venues.json", "w") as outfile:
outfile.write(json.dumps(venues, indent=2))
if __name__ == "__main__":
main()
| 45.153846 | 133 | 0.724588 | 0 | 0 | 0 | 0 | 265 | 0.150483 | 0 | 0 | 334 | 0.189665 |