content
stringlengths
5
1.05M
# -*- coding: utf-8 -*- """ (C) Guangcai Ren <rgc@bvrft.com> All rights reserved create time '2019/8/28 16:18' Module usage: 测试用例 """ import os from app import create_app # 设置配置的文件名 config_path = os.environ.get('CONFIG_NAME') or 'config_test.yml' os.environ['FLASK_CONFIG'] = os.path.dirname(os.path.dirname(__file__)) + '/config/' + config_path # 测试全局共用一个app app = create_app() app.config["TESTING"] = True class TestBase(object): """ 测试用例基类 此基类主要功能包括 建立数据库连接 """ app = app client = app.test_client() @classmethod def setup_class(cls): """创建 flask 上下文环境, 必须执行, 否则无法使用 flask 相关拓展""" cls.app_context = cls.app.app_context() cls.app_context.push() @classmethod def teardown_class(cls): if hasattr(cls, 'app_context'): cls.app_context.pop() @classmethod def send_request(cls, url, params=None, code=200, method='get', headers=None, is_json=True, test_id=None): """ 发送具体的数据请求到服务器 :param url: 请求地址 :param params: 请求参数 :param code: 响应码 :param test_id: 测试的唯一ID :param method: post请求 :param headers: :param is_json: """ if is_json: rsp = cls.client.open(path=url, method=method, json=params, headers=headers) else: rsp = cls.client.open(path=url, method=method, data=params, headers=headers) assert rsp.status_code == 200, f'test_id: {test_id}\n{rsp}' result = rsp.get_json(True) assert result["respCode"] == code, f'test_id: {test_id}\n{result}' @staticmethod def del_map(): """ 删除 HASH_RING_MAP数据 :return: """ # redis.delete(HASH_RING_MAP) pass
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import socket from urllib import parse from kazoo.client import KazooClient from kazoo.security import make_digest_acl from kazoo.exceptions import NodeExistsError from fate_arch.common import conf_utils from fate_arch.common.conf_utils import get_base_config from fate_flow.settings import FATE_FLOW_MODEL_TRANSFER_ENDPOINT, IP, HTTP_PORT, FATEFLOW_SERVICE_NAME from fate_flow.settings import stat_logger, SERVICES_SUPPORT_REGISTRY, FATE_SERVICES_REGISTERED_PATH class ServiceUtils(object): ZOOKEEPER_CLIENT = None registry_service_info = { "fatemanager": ["host", "port", "federatedId"], "studio": ["host", "port"] } @classmethod def get(cls, service_name, default=None): if get_base_config("use_registry", False) and service_name in SERVICES_SUPPORT_REGISTRY: return ServiceUtils.get_from_registry(service_name) return ServiceUtils.get_from_file(service_name, default) @classmethod def get_item(cls, service_name, key, default=None): return ServiceUtils.get(service_name, {}).get(key, default) @classmethod def get_from_file(cls, service_name, default=None): return conf_utils.get_base_config(service_name, default) @classmethod def get_zk(cls, ): if not get_base_config('use_registry', False): return zk_config = get_base_config("zookeeper", {}) if zk_config.get("use_acl", False): default_acl = make_digest_acl(zk_config.get("user", ""), zk_config.get("password", ""), all=True) zk = KazooClient(hosts=zk_config.get("hosts", []), default_acl=[default_acl], auth_data=[("digest", "{}:{}".format( zk_config.get("user", ""), zk_config.get("password", "")))]) else: zk = KazooClient(hosts=zk_config.get("hosts", [])) return zk @classmethod def get_from_registry(cls, service_name): if not get_base_config('use_registry', False): return try: zk = ServiceUtils.get_zk() zk.start() nodes = zk.get_children(FATE_SERVICES_REGISTERED_PATH.get(service_name, "")) services = nodes_unquote(nodes) zk.stop() return services except Exception as e: raise Exception('loading servings node failed from zookeeper: {}'.format(e)) @classmethod def register(cls, zk, party_model_id=None, model_version=None): if not get_base_config('use_registry', False): return model_transfer_url = 'http://{}:{}{}'.format(IP, HTTP_PORT, FATE_FLOW_MODEL_TRANSFER_ENDPOINT) if party_model_id is not None and model_version is not None: model_transfer_url += '/{}/{}'.format(party_model_id.replace('#', '~'), model_version) fate_flow_model_transfer_service = '{}/{}'.format(FATE_SERVICES_REGISTERED_PATH.get(FATEFLOW_SERVICE_NAME, ""), parse.quote(model_transfer_url, safe=' ')) try: zk.create(fate_flow_model_transfer_service, makepath=True, ephemeral=True) stat_logger.info("register path {} to {}".format(fate_flow_model_transfer_service, ";".join(get_base_config("zookeeper", {}).get("hosts")))) except NodeExistsError: pass except Exception as e: stat_logger.exception(e) @classmethod def register_models(cls, zk, models): if not get_base_config('use_registry', False): return for model in models: cls.register(zk, model.f_party_model_id, model.f_model_version) @classmethod def register_service(cls, service_config): update_server = {} for service_name, service_info in service_config.items(): if service_name not in cls.registry_service_info.keys(): continue cls.parameter_verification(service_name, service_info) manager_conf = conf_utils.get_base_config(service_name, {}) if not manager_conf: manager_conf = service_info else: manager_conf.update(service_info) conf_utils.update_config(service_name, manager_conf) update_server[service_name] = manager_conf return update_server @classmethod def parameter_verification(cls,service_name, service_info): if set(service_info.keys()) != set(cls.registry_service_info.get(service_name)): raise Exception(f'the registration service {service_name} configuration item is' f' {cls.registry_service_info.get(service_name)}') if "host" in service_info and "port" in service_info: cls.connection_test(service_info.get("host"), service_info.get("port")) @classmethod def connection_test(cls, ip, port): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = s.connect_ex((ip, port)) if result != 0: raise ConnectionRefusedError(f"connection refused: host {ip}, port {port}") def nodes_unquote(nodes): urls = [parse.unquote(node) for node in nodes] servers = [] for url in urls: try: servers.append(url.split('/')[2]) except Exception as e: stat_logger.exception(e) return servers
"""Base classes describing both classification and clustering models that can be used for training and prediction.""" from typing import Optional, Sequence import numpy as np from slub_docsa.common.document import Document class ClassificationModel: """Represents a classification model similar to a scikit-learn estimator and predictor interface. However, the input of both fit and predict_proba methods are a collection of `Document` instances, instead of raw vectorized feaures. """ def fit( self, train_documents: Sequence[Document], train_targets: np.ndarray, validation_documents: Optional[Sequence[Document]] = None, validation_targets: Optional[np.ndarray] = None, ): """Train a model to fit the training data. Parameters ---------- train_documents: Sequence[Document] The sequence of documents that is used for training a model. train_targets: numpy.ndarray The incidence matrix describing which document of `train_documents` belongs to which subjects. The matrix has to have a shape of (n_docs, n_subjects). validation_documents: Optional[Sequence[Document]] A sequence of documents that can be used to validate the trained model during training, e.g., for each epoch when training an artificial neural network validation_targets: Optional[numpy.ndarray] The incidence matrix for `validation_documents` Returns ------- Model self """ raise NotImplementedError() def predict_proba(self, test_documents: Sequence[Document]) -> np.ndarray: """Return predicted subject probabilities as a matrix. Parameters ---------- test_documents: Sequence[Document] The test sequence of documents that are supposed to be evaluated. Returns ------- numpy.ndarray The matrix of subject probabilities with a shape of (n_docs, n_subjects). The column order has to match the order that was provided as `train_targets` to the `fit` method. """ raise NotImplementedError() class PersistableClassificationModel(ClassificationModel): """Extends a classification model for save/load methods such that model can be persisted.""" def save(self, persist_dir: str): """Save the fitted model state to disk at some directory. Parameters ---------- persist_dir: str the path to a directory that can be used to save the model state """ raise NotImplementedError() def load(self, persist_dir: str): """Load a persisted model state from some directory. Parameters ---------- persist_dir: str the path to the directory from which the persisted model is loaded """ raise NotImplementedError() class ClusteringModel: """Represents a clustering model similar to the scikit-learn fit and predict clustering model. However, the input of both fit and predict methods are a collection of `Document` instances, instead of raw vectorized feaures. """ def fit(self, documents: Sequence[Document]): """Train a clustering model in case the clustering algorithm is based on some kind of model.""" raise NotImplementedError() def predict(self, documents: Sequence[Document]) -> np.ndarray: """Predict cluster assignments as a membership matrix of shape (len(documents), len(clusters)). The membership matrix may resemble an exact clustering, meaning each document is assigned to exactly one cluster. Alternatively, it may contain membership degrees, meaning each document can be part of multiple clusters to a certain degree, such that their degrees sum up to 1. Otherwise, the membership matrix may have arbitrary membership degrees, e.g., to represent a hierarchical clustering. In all cases, the maximum value of membership matrix elements is 1. """ raise NotImplementedError()
# -*- coding: utf-8 -*- """ orm types base module. """ from abc import abstractmethod from sqlalchemy import TypeDecorator from pyrin.core.exceptions import CoreNotImplementedError class CoreCustomType(TypeDecorator): """ core custom type class. all application custom types must be subclassed from this type. """ # the underlying type implementation of this custom type. # it must be a subclass of sqlalchemy types. impl = None @abstractmethod def _to_database(self, value, dialect): """ converts given value to be emitted to database. this method must be overridden in subclasses. :param object value: value to be processed. :param Dialect dialect: the dialect in use. :raises CoreNotImplementedError: core not implemented error. :rtype: object """ raise CoreNotImplementedError() @abstractmethod def _from_database(self, value, dialect): """ converts given value to python type after fetching it from database. :param object value: value to be processed. :param Dialect dialect: the dialect in use. :raises CoreNotImplementedError: core not implemented error. :rtype: object """ raise CoreNotImplementedError() @abstractmethod def _coerce_to_string(self, value, dialect): """ coerces the given value to string before sending to database. subclasses must override this method if they want to use literal params. :param object value: value to be processed. :param Dialect dialect: the dialect in use. :raises CoreNotImplementedError: core not implemented error. :rtype: str """ raise CoreNotImplementedError() def load_dialect_impl(self, dialect): """ returns a `TypeEngine` object corresponding to a dialect. :param Dialect dialect: the dialect in use. :rtype: TypeEngine """ return super().load_dialect_impl(dialect) def compare_against_backend(self, dialect, conn_type): """ returns True if this type is the same as the given database type. or None to allow the default implementation to compare these types. a return value of False means the given type does not match this type. :param Dialect dialect: the dialect in use. :param TypeEngine conn_type: type of the returned value from database. :rtype: bool """ return super().compare_against_backend(dialect, conn_type) def process_literal_param(self, value, dialect): """ receives a literal parameter value to be rendered inline within a statement. this method is used when the compiler renders a literal value without using binds, typically within DDL such as in the `server default` of a column or an expression within a CHECK constraint. the returned string will be rendered into the output string. :param TypeEngine value: data to operate upon. it could be `None`. :param Dialect dialect: the dialect in use. :rtype: str """ if value is None: return value result = self._to_database(value, dialect) if result is None or isinstance(result, str): return result return self._coerce_to_string(result, dialect) def process_bind_param(self, value, dialect): """ receives a bound parameter value to be converted. :param TypeEngine value: data to operate upon. it could be `None`. :param Dialect dialect: the dialect in use. :rtype: object """ if value is None: return value return self._to_database(value, dialect) def process_result_value(self, value, dialect): """ receives a result-row column value to be converted. :param str value: data to operate upon. it could be `None`. :param Dialect dialect: the dialect in use. :rtype: object """ if value is None: return value return self._from_database(value, dialect) @property @abstractmethod def python_type(self): """ gets the python type object expected to be returned by instances of this type. :raises CoreNotImplementedError: core not implemented error. :rtype: type """ raise CoreNotImplementedError()
def bubbleSort(l, n): for i in range(n): for j in range(n-1-i): if l[j][0] > l[j+1][0]: l[j], l[j+1] = l[j+1], l[j] return l[:] def selectionSort(l, n): for i in range(n): minj = i for j in range(i+1, n): if l[j][0] < l[minj][0]: minj = j l[i], l[minj] = l[minj], l[i] return l[:] n = int(input()) l = [] for c in input().split(): l.append((int(c[1]), c[0])) bl = ' '.join([k + str(n) for n, k in bubbleSort(l[:], n)]) sl = ' '.join([k + str(n) for n, k in selectionSort(l[:], n)]) print(bl) print('Stable') print(sl) print('Stable' if sl == bl else 'Not stable')
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'TabWater.ui' ## ## Created by: Qt User Interface Compiler version 5.14.1 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide2.QtCore import (QCoreApplication, QMetaObject, QObject, QPoint, QRect, QSize, QUrl, Qt) from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QIcon, QLinearGradient, QPalette, QPainter, QPixmap, QRadialGradient) from PySide2.QtWidgets import * class Ui_TabWater(object): def setupUi(self, TabWater): if TabWater.objectName(): TabWater.setObjectName(u"TabWater") TabWater.resize(793, 518) self.gridLayout_3 = QGridLayout(TabWater) self.gridLayout_3.setObjectName(u"gridLayout_3") self.splitter_2 = QSplitter(TabWater) self.splitter_2.setObjectName(u"splitter_2") self.splitter_2.setOrientation(Qt.Horizontal) self.sourceWaterGroup = QGroupBox(self.splitter_2) self.sourceWaterGroup.setObjectName(u"sourceWaterGroup") sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(4) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.sourceWaterGroup.sizePolicy().hasHeightForWidth()) self.sourceWaterGroup.setSizePolicy(sizePolicy) self.formLayout = QFormLayout(self.sourceWaterGroup) self.formLayout.setObjectName(u"formLayout") self.formLayout.setLabelAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter) self.lbl_name = QLabel(self.sourceWaterGroup) self.lbl_name.setObjectName(u"lbl_name") self.formLayout.setWidget(0, QFormLayout.LabelRole, self.lbl_name) self.sourceName = QLineEdit(self.sourceWaterGroup) self.sourceName.setObjectName(u"sourceName") self.formLayout.setWidget(0, QFormLayout.FieldRole, self.sourceName) self.lbl_calcium = QLabel(self.sourceWaterGroup) self.lbl_calcium.setObjectName(u"lbl_calcium") self.formLayout.setWidget(1, QFormLayout.LabelRole, self.lbl_calcium) self.sourceCalcium = QDoubleSpinBox(self.sourceWaterGroup) self.sourceCalcium.setObjectName(u"sourceCalcium") self.sourceCalcium.setDecimals(1) self.sourceCalcium.setMaximum(1000.000000000000000) self.sourceCalcium.setSingleStep(0.100000000000000) self.formLayout.setWidget(1, QFormLayout.FieldRole, self.sourceCalcium) self.lbl_magnesium = QLabel(self.sourceWaterGroup) self.lbl_magnesium.setObjectName(u"lbl_magnesium") self.formLayout.setWidget(2, QFormLayout.LabelRole, self.lbl_magnesium) self.sourceMagnesium = QDoubleSpinBox(self.sourceWaterGroup) self.sourceMagnesium.setObjectName(u"sourceMagnesium") self.sourceMagnesium.setDecimals(1) self.sourceMagnesium.setMaximum(1000.000000000000000) self.sourceMagnesium.setSingleStep(0.100000000000000) self.formLayout.setWidget(2, QFormLayout.FieldRole, self.sourceMagnesium) self.lbl_sodium = QLabel(self.sourceWaterGroup) self.lbl_sodium.setObjectName(u"lbl_sodium") self.formLayout.setWidget(3, QFormLayout.LabelRole, self.lbl_sodium) self.sourceSodium = QDoubleSpinBox(self.sourceWaterGroup) self.sourceSodium.setObjectName(u"sourceSodium") self.sourceSodium.setDecimals(1) self.sourceSodium.setMaximum(1000.000000000000000) self.sourceSodium.setSingleStep(0.100000000000000) self.formLayout.setWidget(3, QFormLayout.FieldRole, self.sourceSodium) self.lbl_chloride = QLabel(self.sourceWaterGroup) self.lbl_chloride.setObjectName(u"lbl_chloride") self.formLayout.setWidget(4, QFormLayout.LabelRole, self.lbl_chloride) self.sourceChloride = QDoubleSpinBox(self.sourceWaterGroup) self.sourceChloride.setObjectName(u"sourceChloride") self.sourceChloride.setDecimals(1) self.sourceChloride.setMaximum(1000.000000000000000) self.sourceChloride.setSingleStep(0.100000000000000) self.formLayout.setWidget(4, QFormLayout.FieldRole, self.sourceChloride) self.lbl_sulfate = QLabel(self.sourceWaterGroup) self.lbl_sulfate.setObjectName(u"lbl_sulfate") self.formLayout.setWidget(5, QFormLayout.LabelRole, self.lbl_sulfate) self.sourceSulfate = QDoubleSpinBox(self.sourceWaterGroup) self.sourceSulfate.setObjectName(u"sourceSulfate") self.sourceSulfate.setDecimals(1) self.sourceSulfate.setMaximum(1000.000000000000000) self.sourceSulfate.setSingleStep(0.100000000000000) self.formLayout.setWidget(5, QFormLayout.FieldRole, self.sourceSulfate) self.lbl_bicarbonate = QLabel(self.sourceWaterGroup) self.lbl_bicarbonate.setObjectName(u"lbl_bicarbonate") self.formLayout.setWidget(6, QFormLayout.LabelRole, self.lbl_bicarbonate) self.sourceBicarbonate = QDoubleSpinBox(self.sourceWaterGroup) self.sourceBicarbonate.setObjectName(u"sourceBicarbonate") self.sourceBicarbonate.setDecimals(1) self.sourceBicarbonate.setMaximum(1000.000000000000000) self.sourceBicarbonate.setSingleStep(0.100000000000000) self.formLayout.setWidget(6, QFormLayout.FieldRole, self.sourceBicarbonate) self.lbl_ph = QLabel(self.sourceWaterGroup) self.lbl_ph.setObjectName(u"lbl_ph") self.formLayout.setWidget(7, QFormLayout.LabelRole, self.lbl_ph) self.sourcePh = QDoubleSpinBox(self.sourceWaterGroup) self.sourcePh.setObjectName(u"sourcePh") self.sourcePh.setDecimals(1) self.sourcePh.setMaximum(14.000000000000000) self.sourcePh.setSingleStep(0.100000000000000) self.sourcePh.setValue(7.000000000000000) self.formLayout.setWidget(7, QFormLayout.FieldRole, self.sourcePh) self.splitter_2.addWidget(self.sourceWaterGroup) self.layoutWidget = QWidget(self.splitter_2) self.layoutWidget.setObjectName(u"layoutWidget") self.verticalLayout = QVBoxLayout(self.layoutWidget) self.verticalLayout.setObjectName(u"verticalLayout") self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.ratioGroup = QGroupBox(self.layoutWidget) self.ratioGroup.setObjectName(u"ratioGroup") sizePolicy1 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) sizePolicy1.setHorizontalStretch(2) sizePolicy1.setVerticalStretch(0) sizePolicy1.setHeightForWidth(self.ratioGroup.sizePolicy().hasHeightForWidth()) self.ratioGroup.setSizePolicy(sizePolicy1) self.gridLayout = QGridLayout(self.ratioGroup) self.gridLayout.setObjectName(u"gridLayout") self.lbl_source = QLabel(self.ratioGroup) self.lbl_source.setObjectName(u"lbl_source") self.gridLayout.addWidget(self.lbl_source, 1, 0, 1, 1) self.distilledPercent = QLineEdit(self.ratioGroup) self.distilledPercent.setObjectName(u"distilledPercent") self.distilledPercent.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter) self.distilledPercent.setReadOnly(True) self.gridLayout.addWidget(self.distilledPercent, 2, 1, 1, 1) self.sourcePercent = QLineEdit(self.ratioGroup) self.sourcePercent.setObjectName(u"sourcePercent") self.sourcePercent.setReadOnly(True) self.gridLayout.addWidget(self.sourcePercent, 2, 0, 1, 1) self.ratio = QSlider(self.ratioGroup) self.ratio.setObjectName(u"ratio") self.ratio.setMaximum(100) self.ratio.setValue(100) self.ratio.setOrientation(Qt.Horizontal) self.ratio.setInvertedAppearance(True) self.gridLayout.addWidget(self.ratio, 0, 0, 1, 2) self.lbl_distilled = QLabel(self.ratioGroup) self.lbl_distilled.setObjectName(u"lbl_distilled") self.lbl_distilled.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter) self.gridLayout.addWidget(self.lbl_distilled, 1, 1, 1, 1) self.sourceVolume = QLineEdit(self.ratioGroup) self.sourceVolume.setObjectName(u"sourceVolume") self.sourceVolume.setReadOnly(True) self.gridLayout.addWidget(self.sourceVolume, 3, 0, 1, 1) self.distilledVolume = QLineEdit(self.ratioGroup) self.distilledVolume.setObjectName(u"distilledVolume") self.distilledVolume.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter) self.distilledVolume.setReadOnly(True) self.gridLayout.addWidget(self.distilledVolume, 3, 1, 1, 1) self.verticalLayout.addWidget(self.ratioGroup) self.verticalSpacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding) self.verticalLayout.addItem(self.verticalSpacer) self.splitter_2.addWidget(self.layoutWidget) self.sourceWaterGroup_2 = QGroupBox(self.splitter_2) self.sourceWaterGroup_2.setObjectName(u"sourceWaterGroup_2") sizePolicy1.setHeightForWidth(self.sourceWaterGroup_2.sizePolicy().hasHeightForWidth()) self.sourceWaterGroup_2.setSizePolicy(sizePolicy1) self.formLayout_2 = QFormLayout(self.sourceWaterGroup_2) self.formLayout_2.setObjectName(u"formLayout_2") self.formLayout_2.setLabelAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter) self.lbl_calcium_2 = QLabel(self.sourceWaterGroup_2) self.lbl_calcium_2.setObjectName(u"lbl_calcium_2") self.formLayout_2.setWidget(0, QFormLayout.LabelRole, self.lbl_calcium_2) self.lbl_magnesium_2 = QLabel(self.sourceWaterGroup_2) self.lbl_magnesium_2.setObjectName(u"lbl_magnesium_2") self.formLayout_2.setWidget(1, QFormLayout.LabelRole, self.lbl_magnesium_2) self.lbl_sodium_2 = QLabel(self.sourceWaterGroup_2) self.lbl_sodium_2.setObjectName(u"lbl_sodium_2") self.formLayout_2.setWidget(2, QFormLayout.LabelRole, self.lbl_sodium_2) self.lbl_chloride_2 = QLabel(self.sourceWaterGroup_2) self.lbl_chloride_2.setObjectName(u"lbl_chloride_2") self.formLayout_2.setWidget(3, QFormLayout.LabelRole, self.lbl_chloride_2) self.lbl_sulfate_2 = QLabel(self.sourceWaterGroup_2) self.lbl_sulfate_2.setObjectName(u"lbl_sulfate_2") self.formLayout_2.setWidget(4, QFormLayout.LabelRole, self.lbl_sulfate_2) self.lbl_bicarbonate_2 = QLabel(self.sourceWaterGroup_2) self.lbl_bicarbonate_2.setObjectName(u"lbl_bicarbonate_2") self.formLayout_2.setWidget(5, QFormLayout.LabelRole, self.lbl_bicarbonate_2) self.lbl_ph_2 = QLabel(self.sourceWaterGroup_2) self.lbl_ph_2.setObjectName(u"lbl_ph_2") self.formLayout_2.setWidget(6, QFormLayout.LabelRole, self.lbl_ph_2) self.mixedCalcium = QLineEdit(self.sourceWaterGroup_2) self.mixedCalcium.setObjectName(u"mixedCalcium") self.mixedCalcium.setReadOnly(True) self.formLayout_2.setWidget(0, QFormLayout.FieldRole, self.mixedCalcium) self.mixedMagnesium = QLineEdit(self.sourceWaterGroup_2) self.mixedMagnesium.setObjectName(u"mixedMagnesium") self.mixedMagnesium.setReadOnly(True) self.formLayout_2.setWidget(1, QFormLayout.FieldRole, self.mixedMagnesium) self.mixedSodium = QLineEdit(self.sourceWaterGroup_2) self.mixedSodium.setObjectName(u"mixedSodium") self.mixedSodium.setReadOnly(True) self.formLayout_2.setWidget(2, QFormLayout.FieldRole, self.mixedSodium) self.mixedChloride = QLineEdit(self.sourceWaterGroup_2) self.mixedChloride.setObjectName(u"mixedChloride") self.mixedChloride.setReadOnly(True) self.formLayout_2.setWidget(3, QFormLayout.FieldRole, self.mixedChloride) self.mixedSulfate = QLineEdit(self.sourceWaterGroup_2) self.mixedSulfate.setObjectName(u"mixedSulfate") self.mixedSulfate.setReadOnly(True) self.formLayout_2.setWidget(4, QFormLayout.FieldRole, self.mixedSulfate) self.mixedBicarbonate = QLineEdit(self.sourceWaterGroup_2) self.mixedBicarbonate.setObjectName(u"mixedBicarbonate") self.mixedBicarbonate.setReadOnly(True) self.formLayout_2.setWidget(5, QFormLayout.FieldRole, self.mixedBicarbonate) self.mixedPh = QLineEdit(self.sourceWaterGroup_2) self.mixedPh.setObjectName(u"mixedPh") self.mixedPh.setReadOnly(True) self.formLayout_2.setWidget(6, QFormLayout.FieldRole, self.mixedPh) self.splitter_2.addWidget(self.sourceWaterGroup_2) self.gridLayout_3.addWidget(self.splitter_2, 0, 0, 1, 1) self.splitter = QSplitter(TabWater) self.splitter.setObjectName(u"splitter") self.splitter.setOrientation(Qt.Horizontal) self.load = QPushButton(self.splitter) self.load.setObjectName(u"load") self.load.setEnabled(False) sizePolicy2 = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed) sizePolicy2.setHorizontalStretch(1) sizePolicy2.setVerticalStretch(0) sizePolicy2.setHeightForWidth(self.load.sizePolicy().hasHeightForWidth()) self.load.setSizePolicy(sizePolicy2) self.splitter.addWidget(self.load) self.gridLayout_3.addWidget(self.splitter, 1, 0, 1, 1) self.group_fermentables = QGroupBox(TabWater) self.group_fermentables.setObjectName(u"group_fermentables") self.gridLayout_2 = QGridLayout(self.group_fermentables) self.gridLayout_2.setObjectName(u"gridLayout_2") self.filter = QLineEdit(self.group_fermentables) self.filter.setObjectName(u"filter") self.gridLayout_2.addWidget(self.filter, 0, 0, 1, 1) self.library = QTableView(self.group_fermentables) self.library.setObjectName(u"library") sizePolicy3 = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) sizePolicy3.setHorizontalStretch(0) sizePolicy3.setVerticalStretch(0) sizePolicy3.setHeightForWidth(self.library.sizePolicy().hasHeightForWidth()) self.library.setSizePolicy(sizePolicy3) self.library.setEditTriggers(QAbstractItemView.NoEditTriggers) self.library.setAlternatingRowColors(True) self.library.setSelectionMode(QAbstractItemView.SingleSelection) self.library.setSelectionBehavior(QAbstractItemView.SelectRows) self.library.setSortingEnabled(True) self.gridLayout_2.addWidget(self.library, 1, 0, 1, 1) self.gridLayout_3.addWidget(self.group_fermentables, 2, 0, 1, 1) QWidget.setTabOrder(self.sourceName, self.sourceCalcium) QWidget.setTabOrder(self.sourceCalcium, self.sourceMagnesium) QWidget.setTabOrder(self.sourceMagnesium, self.sourceSodium) QWidget.setTabOrder(self.sourceSodium, self.sourceChloride) QWidget.setTabOrder(self.sourceChloride, self.sourceSulfate) QWidget.setTabOrder(self.sourceSulfate, self.sourceBicarbonate) QWidget.setTabOrder(self.sourceBicarbonate, self.sourcePh) QWidget.setTabOrder(self.sourcePh, self.ratio) QWidget.setTabOrder(self.ratio, self.load) QWidget.setTabOrder(self.load, self.filter) QWidget.setTabOrder(self.filter, self.library) QWidget.setTabOrder(self.library, self.sourcePercent) QWidget.setTabOrder(self.sourcePercent, self.distilledPercent) QWidget.setTabOrder(self.distilledPercent, self.mixedCalcium) QWidget.setTabOrder(self.mixedCalcium, self.mixedMagnesium) QWidget.setTabOrder(self.mixedMagnesium, self.mixedSodium) QWidget.setTabOrder(self.mixedSodium, self.mixedChloride) QWidget.setTabOrder(self.mixedChloride, self.mixedSulfate) QWidget.setTabOrder(self.mixedSulfate, self.mixedBicarbonate) QWidget.setTabOrder(self.mixedBicarbonate, self.mixedPh) self.retranslateUi(TabWater) QMetaObject.connectSlotsByName(TabWater) # setupUi def retranslateUi(self, TabWater): TabWater.setWindowTitle(QCoreApplication.translate("TabWater", u"Form", None)) self.sourceWaterGroup.setTitle(QCoreApplication.translate("TabWater", u"Source Water Profile", None)) self.lbl_name.setText(QCoreApplication.translate("TabWater", u"Name:", None)) self.sourceName.setText(QCoreApplication.translate("TabWater", u"Distilled Water", None)) self.lbl_calcium.setText(QCoreApplication.translate("TabWater", u"Calcium:", None)) self.sourceCalcium.setSuffix(QCoreApplication.translate("TabWater", u" ppm", None)) self.lbl_magnesium.setText(QCoreApplication.translate("TabWater", u"Magnesium:", None)) self.sourceMagnesium.setSuffix(QCoreApplication.translate("TabWater", u" ppm", None)) self.lbl_sodium.setText(QCoreApplication.translate("TabWater", u"Sodium:", None)) self.sourceSodium.setSuffix(QCoreApplication.translate("TabWater", u" ppm", None)) self.lbl_chloride.setText(QCoreApplication.translate("TabWater", u"Chloride:", None)) self.sourceChloride.setSuffix(QCoreApplication.translate("TabWater", u" ppm", None)) self.lbl_sulfate.setText(QCoreApplication.translate("TabWater", u"Sulfate:", None)) self.sourceSulfate.setSuffix(QCoreApplication.translate("TabWater", u" ppm", None)) self.lbl_bicarbonate.setText(QCoreApplication.translate("TabWater", u"Bicarbonate:", None)) self.sourceBicarbonate.setSuffix(QCoreApplication.translate("TabWater", u" ppm", None)) self.lbl_ph.setText(QCoreApplication.translate("TabWater", u"pH:", None)) self.ratioGroup.setTitle(QCoreApplication.translate("TabWater", u"Source/Distilled Mix Ratio", None)) self.lbl_source.setText(QCoreApplication.translate("TabWater", u"Source", None)) self.distilledPercent.setText(QCoreApplication.translate("TabWater", u"0%", None)) self.sourcePercent.setText(QCoreApplication.translate("TabWater", u"100%", None)) self.lbl_distilled.setText(QCoreApplication.translate("TabWater", u"Distilled", None)) self.sourceVolume.setText(QCoreApplication.translate("TabWater", u"0.0 gal", None)) self.distilledVolume.setText(QCoreApplication.translate("TabWater", u"0.0 gal", None)) self.sourceWaterGroup_2.setTitle(QCoreApplication.translate("TabWater", u"Brewing Water Profile", None)) self.lbl_calcium_2.setText(QCoreApplication.translate("TabWater", u"Calcium:", None)) self.lbl_magnesium_2.setText(QCoreApplication.translate("TabWater", u"Magnesium:", None)) self.lbl_sodium_2.setText(QCoreApplication.translate("TabWater", u"Sodium:", None)) self.lbl_chloride_2.setText(QCoreApplication.translate("TabWater", u"Chloride:", None)) self.lbl_sulfate_2.setText(QCoreApplication.translate("TabWater", u"Sulfate:", None)) self.lbl_bicarbonate_2.setText(QCoreApplication.translate("TabWater", u"Bicarbonate:", None)) self.lbl_ph_2.setText(QCoreApplication.translate("TabWater", u"pH:", None)) self.mixedCalcium.setText(QCoreApplication.translate("TabWater", u"0.0 ppm", None)) self.mixedMagnesium.setText(QCoreApplication.translate("TabWater", u"0.0 ppm", None)) self.mixedSodium.setText(QCoreApplication.translate("TabWater", u"0.0 ppm", None)) self.mixedChloride.setText(QCoreApplication.translate("TabWater", u"0.0 ppm", None)) self.mixedSulfate.setText(QCoreApplication.translate("TabWater", u"0.0 ppm", None)) self.mixedBicarbonate.setText(QCoreApplication.translate("TabWater", u"0.0 ppm", None)) self.mixedPh.setText(QCoreApplication.translate("TabWater", u"7.0", None)) self.load.setText(QCoreApplication.translate("TabWater", u"Load Selected Profile", None)) self.group_fermentables.setTitle(QCoreApplication.translate("TabWater", u"Ingredient Library", None)) self.filter.setPlaceholderText(QCoreApplication.translate("TabWater", u"Filter...", None)) # retranslateUi
#!/usr/bin/env python from setuptools import setup, find_packages setup(name='cryptos', version='1.36', description='Python Crypto Coin Tools', long_description=open('README.md').read(), author='Paul Martin', author_email='paulmartinforwork@gmail.com', url='http://github.com/primal100/pybitcointools', packages=find_packages(), scripts=['cryptotool'], include_package_data=True, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Security :: Cryptography', ], )
class TokenException(Exception): """Base class for all exceptions generated by tokenization.""" def __init__(self, *args): pass
import json import pandas as pd from cognite import _constants as constants from cognite import config from cognite._utils import InputError from cognite.v05 import timeseries class DataTransferService: """Create a Data Transfer Service object. Fetch timeseries from the api. """ # TODO: Support files_data_spec and events_data_spec def __init__(self, data_spec, project=None, api_key=None, cookies=None, num_of_processes=None): """ Args: data_spec (data_transfer_service.DataSpec): Data Spec. project (str): Project name. api_key (str): Api key. cookies (dict): Cookies. """ config_api_key, config_project = config.get_config_variables(api_key, project) if not isinstance(data_spec, DataSpec): raise InputError("DataTransferService accepts a DataSpec instance.") self.data_spec = data_spec self.ts_data_specs = data_spec.time_series_data_specs self.files_data_spec = data_spec.files_data_spec self.api_key = api_key or config_api_key self.project = project or config_project self.cookies = cookies self.num_of_processes = num_of_processes def get_dataframes(self): """Return a dictionary of dataframes indexed by label - one per data spec.""" if isinstance(self.ts_data_specs[0], dict): return self.__get_dataframes_by_dict() elif isinstance(self.ts_data_specs[0], TimeSeriesDataSpec): return self.__get_dataframes_by_dto() raise InputError("DataSpec must be a dict or TimeSeriesDataSpec object.") def __get_dataframes_by_dto(self): dataframes = {} for tsds in self.ts_data_specs: ts_list = [] for ts in tsds.time_series: if isinstance(ts, dict): ts_list.append(ts) elif isinstance(ts, TimeSeries): ts_dict = dict(name=ts.name, aggregates=ts.aggregates, missingDataStrategy=ts.missing_data_strategy) ts_list.append(ts_dict) else: raise InputError("time_series parameter must be a dict or TimeSeries object") df = timeseries.get_datapoints_frame( ts_list, tsds.aggregates, tsds.granularity, tsds.start, tsds.end, api_key=self.api_key, project=self.project, cookies=self.cookies, processes=self.num_of_processes, ) df = self.__apply_missing_data_strategies(df, ts_list, tsds.missing_data_strategy) if dataframes.get(tsds.label) is not None: raise InputError("Unique labels for each dataspec must be used") dataframes[tsds.label] = df return dataframes def __get_dataframes_by_dict(self): dataframes = {} for data_spec in self.ts_data_specs: ts = data_spec[constants.TIMESERIES] aggregates = data_spec[constants.AGGREGATES] granularity = data_spec[constants.GRANULARITY] start = data_spec.get(constants.START) end = data_spec.get(constants.END) missing_data_strategy = data_spec.get(constants.MISSING_DATA_STRATEGY) label = data_spec.get(constants.LABEL, "default") df = timeseries.get_datapoints_frame( ts, aggregates, granularity, start, end, api_key=self.api_key, project=self.project, cookies=self.cookies, processes=self.num_of_processes, ) df = self.__apply_missing_data_strategies(df, ts, missing_data_strategy) dataframes[label] = df return dataframes def __apply_missing_data_strategies(self, df, ts_list, global_missing_data_strategy): """Applies missing data strategies to dataframe. Local strategies have precedence over global strategy. """ new_df = df["timestamp"] for ts in ts_list: name = ts["name"] colnames = [colname for colname in df.columns.values if colname.startswith(name)] missing_data_strategy = ts.get("missingDataStrategy", global_missing_data_strategy) partial_df = df[colnames] if missing_data_strategy == "ffill": partial_df = df[colnames].fillna(method="pad") elif missing_data_strategy and missing_data_strategy.endswith("Interpolation"): method = missing_data_strategy[:-13].lower() partial_df = df[colnames].interpolate(method=method, axis=0) new_df = pd.concat([new_df, partial_df], axis=1) return new_df class TimeSeries: def __init__(self, name, aggregates=None, missing_data_strategy=None): self.name = name self.aggregates = aggregates self.missing_data_strategy = missing_data_strategy class TimeSeriesDataSpec: def __init__( self, time_series, aggregates, granularity, missing_data_strategy=None, start=None, end=None, label=None ): self.time_series = time_series self.aggregates = aggregates self.granularity = granularity self.missing_data_strategy = missing_data_strategy self.start = start self.end = end self.label = label or "default" class DataSpec: def __init__(self, time_series_data_specs=None, files_data_spec=None): self.time_series_data_specs = time_series_data_specs self.files_data_spec = files_data_spec def to_JSON(self): return json.dumps(self.__dict__, cls=DataSpecEncoder) @classmethod def from_JSON(cls, json_repr): ds = cls(**json.loads(json_repr, cls=DataSpecDecoder)) for i, tsds in enumerate(ds.time_series_data_specs): ds.time_series_data_specs[i] = TimeSeriesDataSpec(**tsds) for j, ts in enumerate(ds.time_series_data_specs[i].time_series): ds.time_series_data_specs[i].time_series[j] = TimeSeries(**ts) return ds class DataSpecEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, (TimeSeries, TimeSeriesDataSpec, DataSpec)): return obj.__dict__ return super(DataSpecEncoder, self).default(obj) class DataSpecDecoder(json.JSONDecoder): def object_hook(self, obj): for key, value in obj.items(): if isinstance(value, str): try: obj[key] = json.loads(value) except ValueError: pass return obj
""" Recipes available to data with tags ['GSAOI', IMAGE']. Default is "reduce_nostack". """ recipe_tags = {'GSAOI', 'IMAGE'} from geminidr.gsaoi.recipes.sq.recipes_IMAGE import reduce_nostack _default = reduce_nostack
#!/usr/bin/env python # The MIT License (MIT) # # Copyright (c) 2015 Caian Benedicto <caian@ggaunicamp.com> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import ctypes, os, struct, sys, logging # TODO try-except around C calls class JobBinary(object): """Class binding for the external job binary loaded by spitz""" # Constructor def __init__(self, filename): filename = os.path.realpath(filename) self.filename = filename self.module = ctypes.CDLL(filename) # Create the return type for the *new functions, otherwise # it will assume int as return instead of void* rettype_new = ctypes.c_void_p self._spits_job_manager_new = self.module.spits_job_manager_new self._spits_job_manager_new.restype = rettype_new; self.module.spits_job_manager_new.restype = rettype_new; self.module.spits_worker_new.restype = rettype_new; self.module.spits_committer_new.restype = rettype_new; # Create the c function for the runner callback self.crunner = ctypes.CFUNCTYPE( ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), ctypes.c_void_p, ctypes.c_longlong, ctypes.POINTER(ctypes.c_void_p), ctypes.POINTER(ctypes.c_longlong)) # Create the c function for the pusher callback self.cpusher = ctypes.CFUNCTYPE( None, ctypes.c_void_p, ctypes.c_longlong, ctypes.c_void_p) def c_argv(self, argv): # Encode the string to byte array argv = [x.encode('utf8') for x in argv] # Cast the C arguments cargc = ctypes.c_int(len(argv)) cargv = (ctypes.c_char_p * len(argv))() cargv[:] = argv return cargc, cargv def bytes(self, it): try: if bytes != str: return bytes(it) except: pass return struct.pack('%db' % len(it), *it) def unbyte(self, s): try: if bytes != str: return s except: pass return struct.unpack('%db' % len(s), s) def to_c_array(self, it): # Cover the case where an empty array or list is passed if it == None or len(it) == 0: return ctypes.c_void_p(None), 0 # Normal C allocation cit = (ctypes.c_byte * len(it))() cit[:] = self.unbyte(it) citsz = ctypes.c_longlong(len(it)) return cit, citsz def to_py_array(self, v, sz): v = ctypes.cast(v, ctypes.POINTER(ctypes.c_byte)) return self.bytes(v[0:sz]) def spits_main(self, argv, runner): # Call the runner if the job does not have an initializer if not hasattr(self.module, 'spits_main'): return runner(argv, None) # Create an inner converter for the callback def run(argc, argv, jobinfo, jobinfosize, data, size): # Convert argc/argv back to a string list pargv = [argv[i].decode('utf8') for i in range(0, argc)] pjobinfo = self.to_py_array(jobinfo, jobinfosize) # Run the runner code r, pdata = runner(pargv, pjobinfo) # Convert the data result to a C pointer/size if pdata == None: data[0] = None size[0] = 0 else: cdata = (ctypes.c_byte * len(pdata))() cdata[:] = self.unbyte(pdata) data[0] = ctypes.cast(cdata, ctypes.c_void_p) size[0] = len(pdata) return r # Cast the C arguments cargc, cargv = self.c_argv(argv) crun = self.crunner(run) # Call the C function return self.module.spits_main(cargc, cargv, crun) def spits_job_manager_new(self, argv, jobinfo): # Cast the C arguments cargc, cargv = self.c_argv(argv) cjobinfo, cjobinfosz = self.to_c_array(jobinfo) return ctypes.c_void_p(self._spits_job_manager_new(cargc, cargv, cjobinfo, cjobinfosz)) def spits_job_manager_next_task(self, user_data, jmctx): res = [None, None, None] # Create an inner converter for the callback def push(ctask, ctasksz, ctx): # Thanks to python closures, the context is not # necessary, in any case, check for correctness res[1] = (self.to_py_array(ctask, ctasksz),) res[2] = ctx # Get the next task res[0] = self.module.spits_job_manager_next_task(user_data, self.cpusher(push), ctypes.c_void_p(jmctx)) return res def spits_job_manager_finalize(self, user_data): # Optional function if not hasattr(self.module, 'spits_job_manager_finalize'): return # Is expected that the framework will not mess with the # value inside user_data do its ctype will remain unchanged return self.module.spits_job_manager_finalize(user_data) def spits_worker_new(self, argv): # Cast the C arguments cargc, cargv = self.c_argv(argv) return ctypes.c_void_p(self.module.spits_worker_new(cargc, cargv)) def spits_worker_run(self, user_data, task, taskctx): res = [None, None, None] # Create the pointer to task and task size ctask, ctasksz = self.to_c_array(task) # Create an inner converter for the callback def push(cres, cressz, ctx): # Thanks to python closures, the context is not # necessary, in any case, check for correctness res[1] = (self.to_py_array(cres, cressz),) res[2] = ctx # Run the task res[0] = self.module.spits_worker_run(user_data, ctask, ctasksz, self.cpusher(push), ctypes.c_void_p(taskctx)) return res def spits_worker_finalize(self, user_data): # Optional function if not hasattr(self.module, 'spits_worker_finalize'): return # Is expected that the framework will not mess with the # value inside user_data do its ctype will remain unchanged return self.module.spits_worker_finalize(user_data) def spits_committer_new(self, argv, jobinfo): # Cast the C arguments cargc, cargv = self.c_argv(argv) cjobinfo, cjobinfosz = self.to_c_array(jobinfo) return ctypes.c_void_p(self.module.spits_committer_new(cargc, cargv, cjobinfo, cjobinfosz)) def spits_committer_commit_pit(self, user_data, result): # Create the pointer to result and result size cres, cressz = self.to_c_array(result) return self.module.spits_committer_commit_pit(user_data, cres, cressz) def spits_committer_commit_job(self, user_data, jobctx): fres = [None, None, None] # Create an inner converter for the callback def push(cfres, cfressz, ctx): # Thanks to python closures, the context is not # necessary, in any case, check for correctness fres[1] = (self.to_py_array(cfres, cfressz),) fres[2] = ctx # Commit job and get the final result fres[0] = self.module.spits_committer_commit_job(user_data, self.cpusher(push), ctypes.c_void_p(jobctx)) return fres def spits_committer_finalize(self, user_data): # Optional function if not hasattr(self.module, 'spits_committer_finalize'): return # Is expected that the framework will not mess with the # value inside user_data do its ctype will remain unchanged return self.module.spits_committer_finalize(user_data)
import csv import logging logger = logging.getLogger(__name__) CONSTELLATIONS = { '01': 'AND', '02': 'ANT', '03': 'APS', '04': 'AQR', '05': 'AQL', '06': 'ARA', '07': 'ARI', '08': 'AUR', '09': 'BOO', '10': 'CAE', '11': 'CAM', '12': 'CNC', '13': 'CVN', '14': 'CMA', '15': 'CMI', '16': 'CAP', '17': 'CAR', '18': 'CAS', '19': 'CEN', '20': 'CEP', '21': 'CET', '22': 'CHA', '23': 'CIR', '24': 'COL', '25': 'COM', '26': 'CRA', '27': 'CRB', '28': 'CRV', '29': 'CRT', '30': 'CRU', '31': 'CYG', '32': 'DEL', '33': 'DOR', '34': 'DRA', '35': 'EQU', '36': 'ERI', '37': 'FOR', '38': 'GEM', '39': 'GRU', '40': 'HER', '41': 'HOR', '42': 'HYA', '43': 'HYI', '44': 'IND', '45': 'LAC', '46': 'LEO', '47': 'LMI', '48': 'LEP', '49': 'LIB', '50': 'LUP', '51': 'LYN', '52': 'LYR', '53': 'MEN', '54': 'MIC', '55': 'MON', '56': 'MUS', '57': 'NOR', '58': 'OCT', '59': 'OPH', '60': 'ORI', '61': 'PAV', '62': 'PEG', '63': 'PER', '64': 'PHE', '65': 'PIC', '66': 'PSC', '67': 'PSA', '68': 'PUP', '69': 'PYX', '70': 'RET', '71': 'SGE', '72': 'SGR', '73': 'SCO', '74': 'SCL', '75': 'SCT', '76': 'SER', '77': 'SEX', '78': 'TAU', '79': 'TEL', '80': 'TRI', '81': 'TRA', '82': 'TUC', '83': 'UMA', '84': 'UMI', '85': 'VEL', '86': 'VIR', '87': 'VOL', '88': 'VUL', } TRANSLATION_MAP = {ord(ch): None for ch in '():/'} class GcvsParser(object): """ A parser for GCVS data format. Example usage: >>> with open('iii.dat', 'rb') as fp: ... parser = GcvsParser(fp) ... for star in parser: ... print(star['name']) R AND S AND #... V0515 VUL V0516 VUL """ def __init__(self, fp): """ Creates the parser and feeds it a file-like object. :param fp: a file-like object or a generator yielding strings """ self.reader = csv.reader(fp, delimiter=str('|')) # skip two initial lines next(self.reader) next(self.reader) def __iter__(self): for row in self.reader: if len(row) != 15: continue try: yield self.row_to_dict(row) except Exception: logger.exception("Error in row: %s", row) continue def row_to_dict(self, row): """ Converts a raw GCVS record to a dictionary of star data. """ constellation = self.parse_constellation(row[0]) name = self.parse_name(row[1]) ra, dec = self.parse_coordinates(row[2]) variable_type = row[3].strip() max_magnitude, symbol = self.parse_magnitude(row[4]) min_magnitude, symbol = self.parse_magnitude(row[5]) if symbol == '(' and max_magnitude is not None: # this is actually amplitude min_magnitude = max_magnitude + min_magnitude epoch = self.parse_epoch(row[8]) period = self.parse_period(row[10]) return { 'constellation': constellation, 'name': name, 'ra': ra, 'dec': dec, 'variable_type': variable_type, 'max_magnitude': max_magnitude, 'min_magnitude': min_magnitude, 'epoch': epoch, 'period': period, } def parse_constellation(self, constellation_str): constellation_num = constellation_str[:2] return CONSTELLATIONS[constellation_num] def parse_name(self, name_str): """ Normalizes variable star designation (name). """ name = name_str[:9] return ' '.join(name.split()).upper() def parse_coordinates(self, coords_str): """ Returns a pair of PyEphem-compatible coordinate strings (Ra, Dec). If the star has no coordinates in GCVS (there are such cases), a pair of None values is returned. """ if coords_str.strip() == '': return (None, None) ra = '%s:%s:%s' % (coords_str[0:2], coords_str[2:4], coords_str[4:8]) dec = '%s:%s:%s' % (coords_str[8:11], coords_str[11:13], coords_str[13:15]) return (ra, dec) def parse_magnitude(self, magnitude_str): """ Converts magnitude field to a float value, or ``None`` if GCVS does not list the magnitude. Returns a tuple (magnitude, symbol), where symbol can be either an empty string or a single character - one of '<', '>', '('. """ symbol = magnitude_str[0].strip() magnitude = magnitude_str[1:6].strip() return float(magnitude) if magnitude else None, symbol def parse_epoch(self, epoch_str): """ Converts epoch field to a float value (adding 24... prefix), or ``None`` if there is no epoch in GCVS record. """ epoch = epoch_str.translate(TRANSLATION_MAP)[:10].strip() return 2400000.0 + float(epoch) if epoch else None def parse_period(self, period_str): """ Converts period field to a float value or ``None`` if there is no period in GCVS record. """ period = period_str.translate(TRANSLATION_MAP)[3:14].strip() return float(period) if period else None
from test.TestPolicy import TestPolicy __all__ = ["TestPolicy"]
# -*- coding: utf-8 -*- """ This source code file is licensed under the GNU General Public License Version 3. For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package. Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. """ from libs.AgentAPI.AgentAPIMgr import * from .AbstractDebug import * # 与GameReg的调试类 class GameRegDebug(AbstractDebug): def __init__(self, canvas=None, ui=None): AbstractDebug.__init__(self, canvas, ui) self.testPrograme += "/GameReg SDKTool" def initialize(self): '''重写基类的initialize函数,初始化tbus以及发送任务消息''' self.gameRegAPI = AgentAPIMgr() env_dist = os.environ sdkPath = env_dist['AI_SDK_PATH'] if sdkPath is None: self.logger.error('there is no AI_SDK_PATH') return False # 初始化AgentAPI,建立tbus通道,以及读取task的配置文件 res = self.gameRegAPI.Initialize(sdkPath + "/cfg/task/gameReg/task_SDKTool.json", sdkPath + "/cfg/task/gameReg/refer_SDKTool.json", selfAddr="SDKToolAddr", cfgPath=TBUS_PATH) if res is False: self.logger.error("Agent API init failed") return False # 发送任务的消息给GameReg进程 res = self.gameRegAPI.SendCmd(MSG_SEND_GROUP_ID, 1) if res is False: self.logger.error("send task failed") return False return True def send_frame(self, frame=None): '''重写基类的send_frame函数,输入为图像帧,将其发送给GameReg进程''' srcImgDict = self._generate_img_dict(frame) ret = self.gameRegAPI.SendSrcImage(srcImgDict) if ret is False: self.logger.error('send frame failed') return False return True def recv_result(self): '''重写基类的recv_result函数,从GameReg进程接收识别结果,并返回对应的结果图像''' GameResult = self.gameRegAPI.GetInfo(GAME_RESULT_INFO) if GameResult is None: return return GameResult['image'] def _generate_img_dict(self, srcImg): '''返回发送图像的结构体''' srcImgDict = dict() srcImgDict['frameSeq'] = self.frameSeq self.frameSeq += 1 srcImgDict['image'] = srcImg srcImgDict['width'] = srcImg.shape[1] srcImgDict['height'] = srcImg.shape[0] srcImgDict['deviceIndex'] = 1 return srcImgDict
from flask import Flask from flask_restful import Api from decouple import config as env from app.main.config import config_by_name, cache from app.main.controller.stocks import StocksController def create_app(): app = Flask(__name__) app.config.from_object(config_by_name[env('YAHOO_STOCKS_API_ENV')]) cache.init_app(app) api = Api(app) api.add_resource(StocksController, "/stocks") return app
# -*- coding: utf-8 -*- # from odoo import http # class OverwriteIrSequence(http.Controller): # @http.route('/overwrite_ir_sequence/overwrite_ir_sequence/', auth='public') # def index(self, **kw): # return "Hello, world" # @http.route('/overwrite_ir_sequence/overwrite_ir_sequence/objects/', auth='public') # def list(self, **kw): # return http.request.render('overwrite_ir_sequence.listing', { # 'root': '/overwrite_ir_sequence/overwrite_ir_sequence', # 'objects': http.request.env['overwrite_ir_sequence.overwrite_ir_sequence'].search([]), # }) # @http.route('/overwrite_ir_sequence/overwrite_ir_sequence/objects/<model("overwrite_ir_sequence.overwrite_ir_sequence"):obj>/', auth='public') # def object(self, obj, **kw): # return http.request.render('overwrite_ir_sequence.object', { # 'object': obj # })
import testserver if __name__ == '__main__': app = testserver.get_from_cfg('config.py') app.run('0.0.0.0', 8080, use_reloader=False, threaded=True)
""" seed_location_table.py Seeds the location table using the geocoder function to identify the latitude and longitude for the given location. """ import os parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) os.sys.path.insert(0,parentdir) from model import Profile, Location, db, connect_to_db import geocoder def geo_code(): """ Adds the location name, latitude, longitude to locations table """ locations = db.session.query(Profile.location).filter(Profile.location!='Pacoima, CA', Profile.location!='Irvington, NJ').group_by(Profile.location).all() for location in locations: try: g = geocoder.google(location[0]) lat, lng = g.latlng new_location = Location(location=location[0], latitude=lat, longitude=lng) db.session.add(new_location) except: continue db.session.commit() if __name__ == "__main__": #connect db to app from flask_app import app connect_to_db(app) print "Connected to DB." #create the database db.create_all() geo_code()
from functools import reduce lines = [] with open('input.txt', 'r') as f: for line in f: line = line.strip() lines.append(line) def everyone_yes(c, ss): for s in ss: if c not in s: return False return True counts = 0 s = set() ss = [] all_c = set() for i, line in enumerate(lines): if not line or i == len(lines) - 1: if i == len(lines) - 1: for c in line: s.add(c) all_c.add(c) ss.append(s) for c in all_c: if everyone_yes(c, ss): counts += 1 s = set() ss = [] all_c = set() else: for c in line: s.add(c) all_c.add(c) ss.append(s) s = set() print(counts)
# Generated by Django 2.2.10 on 2021-01-13 13:59 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('agents', '0014_person_meta'), ] operations = [ migrations.AddField( model_name='agent', name='radical', field=models.BooleanField(default=False, help_text='Wether this person was considered a radical or not.'), ), ]
# -------------------- # FILE NAME # -------------------- # Purpose or description # *** SOME SETUP STUFF *** # MODULES FROM PYTHON'S STANDARD LIBRARY # MODULES FROM PYPI (the Python community) # MY MODULES # SOME FUNCTIONS AND VARIABLES FOR EASY REFERENCE # *** MAIN PROGRAM STARTS HERE ***
from datetime import datetime import logging class TestStep: """Representation of a single test step. This class provides the basic block for a test. Each test can consist of multiple steps with each step being represented by instances of this class. The test step logic should be contained in the __call__ method (without parameters) - see method documentation for more details. """ def __init__(self, name): self.name = name """Test step name.""" self.start_timestamp = None """Test step start timestamp as datetime.datetime object.""" self.duration = None """Test step duration as datetime.timedelta object.""" self.successful = False """Boolean flag representing test step execution status.""" self.logger = logging.getLogger(__name__) """Test step logger.""" def __call__(self): """Execute the test step. Derived classes should override this method and all test step logic should be placed inside it. A client programmer must manually set the test step duration time and mark it is successful. """ raise NotImplementedError('Class {class_name} does not implement __call__() method' .format(class_name=type(self).__name__)) def set_passed(self): """Mark the step as passed. Convenience method. """ self.successful = True def set_failed(self): """Mark the step as failed. Convenience method. """ self.successful = False def set_started(self): """Mark the start of the test. Convenience method. """ self.start_timestamp = datetime.now()
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html import os import sys # -- Project information ----------------------------------------------------- project = "unidist" copyright = "2021-2022, Modin Authors" author = "unidist contributors" sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) import unidist # noqa: E402 # The short project version like 1.1 version = f"{unidist.__version__}" # The full project version like 1.1.0rc1 release = version # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx.ext.intersphinx", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The master toctree document. master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # -- Options for HTML output ------------------------------------------------- # Maps git branches to Sphinx themes default_html_theme = "sphinx_rtd_theme" current_branch = "nature" # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { "sidebarwidth": 270, "collapse_navigation": False, "navigation_depth": 4, } # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # html_sidebars = { "**": ["globaltoc.html", "relations.html", "sourcelink.html", "searchbox.html"] } html_favicon = "img/unidist-icon-simple-32x32.ico" html_logo = "img/unidist-logo-simple-628x128.png"
__author__ = 'juan'
import torch import torch.nn as nn from lib.normalize import Normalize __all__ = ['Generator', 'Discriminator'] class Generator(nn.Module): """ Generator for generating hard positive samples. A three layer fully connected network. Takes (a, p, n) as input and output p'. """ def __init__(self, embedding_size=128): super(Generator, self).__init__() self.fc1 = nn.Linear(3 * embedding_size, 2 * embedding_size) self.fc2 = nn.Linear(2 * embedding_size, embedding_size) self.fc3 = nn.Linear(embedding_size, embedding_size) self.lrelu = nn.LeakyReLU(inplace=True) self.l2norm = Normalize(2) def forward(self, x): x = self.lrelu(self.fc1(x)) x = self.lrelu(self.fc2(x)) return self.l2norm(self.fc3(x)) class Discriminator(nn.Module): """ Discriminator for judging triplet (a, p, n) against (a, p', n), A three layer fully connected network. """ def __init__(self, embedding_size=128): super(Discriminator, self).__init__() self.fc1 = nn.Linear(3 * embedding_size, embedding_size) self.fc2 = nn.Linear(embedding_size, embedding_size // 2) self.fc3 = nn.Linear(embedding_size // 2, 1) self.lrelu = nn.LeakyReLU(inplace=True) def forward(self, x): x = self.lrelu(self.fc1(x)) x = self.lrelu(self.fc2(x)) return torch.sigmoid(self.fc3(x).squeeze(-1))
import logging import os import subprocess import sys import tempfile from concurrent.futures import ThreadPoolExecutor from pymongo import MongoClient from threading import Lock # Configuration for log output logger = logging.getLogger() logger.setLevel(logging.INFO) stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setLevel(logging.INFO) file_handler = logging.FileHandler('logs_one.log') file_handler.setLevel(logging.INFO) logger.addHandler(file_handler) logger.addHandler(stdout_handler) class BaseSolutionChecker: def __init__(self, solution, test_cases): self.solution = solution self.test_cases = test_cases with tempfile.NamedTemporaryFile(delete=False) as fp: self.fp = fp fp.write(solution.encode()) fp.write('\n'.encode()) fp.write(test_cases.encode()) def is_solution_ok(self): raise NotImplementedError def remove_temp_file(self): os.unlink(self.fp.name) class PythonSolutionChecker(BaseSolutionChecker): def is_solution_ok(self): proces = subprocess.run(['python3', self.fp.name]) self.remove_temp_file() return not proces.returncode language_mapping = {'Python': PythonSolutionChecker} class TestRunnerDaemon: def __init__(self, db, thread_workers=10): self.db = db self.thread_workers = thread_workers self.threadlock = Lock() def check_solution(self, solution): """ Work on the solution in a separate thread Change solution status to 'correct' or 'failed' """ checker = language_mapping[solution['language']]( solution['solution'], solution['test_cases']) status = 'correct' if checker.is_solution_ok() else 'failed' db.solution.update_one( {'_id': solution['_id']}, {'$set': {'status': status}}, ) def run(self): with ThreadPoolExecutor(max_workers=self.thread_workers) as executor: while True: solution = self.db.solution.find_one({'status': 'edited'}) if solution: self.threadlock.acquire() # Change solution status to 'testing' before solving db.solution.update_one( {'_id': solution['_id']}, {'$set': {'status': 'testing'}}, ) logging.info(f'Solution {solution["_id"]} in testing') self.threadlock.release() executor.submit(self.check_solution, solution) if __name__ == '__main__': MONGODB_USER = 'mongo-ad' MONGODB_USER_PASS = 'mongo-ad' MONGODB_HOST = 'localhost' url = f'mongodb://{MONGODB_USER}:{MONGODB_USER_PASS}@{MONGODB_HOST}/admin?retryWrites=true&w=majority' db = MongoClient(url).codearena_mdb TestRunnerDaemon(db).run()
import argparse import base64 import mimetypes import os import threading import time import webbrowser from http.server import HTTPServer, BaseHTTPRequestHandler from urllib.parse import quote RESPONSE = "" RESPONDED = False IOMD = """ %% fetch text: fileContent={filename} %% js // You can access the file content in js-space via the // `fileContent` variable fileContent %% py # similarly, in a python cell you can use the standard idiom # for importing the data from js import fileContent as file_content file_content """ class RequestHandler(BaseHTTPRequestHandler): def do_GET(self): global RESPONDED self.send_response(200) self.end_headers() self.wfile.write(bytes(RESPONSE.encode("utf-8"))) RESPONDED = True def serve(filename): global RESPONSE base_filename = os.path.basename(filename) file_content = open(filename, 'rb').read() RESPONSE = open('create-file-intermediary.html').read().format( server="localhost:8000", iomd=quote( IOMD.format(filename=base_filename)), filename=base_filename, mimetype=mimetypes.guess_type(base_filename)[ 0], content=base64.b64encode(file_content).decode("utf-8") ) httpd = HTTPServer(('localhost', 0), RequestHandler) server = threading.Thread(target=httpd.serve_forever) server.setDaemon(True) # don't hang on exit server.start() webbrowser.open_new_tab(f'http://localhost:{httpd.server_port}') while not RESPONDED: time.sleep(0.1) parser = argparse.ArgumentParser() parser.add_argument("file", nargs=1, help="file to upload") args = parser.parse_args() serve(args.file[0])
from django.contrib import admin from jobs.models import Job, WorkHistory, EducationHistory, Reference admin.site.register(Job) admin.site.register(WorkHistory) admin.site.register(EducationHistory) admin.site.register(Reference)
""" Module for random Recommender """ import numpy from overrides import overrides from lib.abstract_recommender import AbstractRecommender class RandomRecommender(AbstractRecommender): """ A class that takes in the rating matrix and oupits random predictions """ def __init__(self, initializer, evaluator, hyperparameters, options, verbose=False, load_matrices=True, dump_matrices=True, train_more=True, is_hybrid=False, update_with_items=False, init_with_content=True): """ Constructor of the random recommender. :param ModelInitializer initializer: A model initializer. :param Evaluator evaluator: Evaluator of the recommender and holder of the input data. :param dict hyperparameters: hyperparameters of the recommender, contains _lambda and n_factors :param dict options: Dictionary of the run options, contains n_iterations and k_folds :param boolean verbose: A flag if True, tracing will be printed :param boolean load_matrices: A flag for reinitializing the matrices. :param boolean dump_matrices: A flag for saving the matrices. :param boolean train_more: train_more the collaborative filtering after loading matrices. :param boolean is_hybrid: A flag indicating whether the recommender is hybrid or not. :param boolean update_with_items: A flag the decides if we will use the items matrix in the update rule. """ # setting input self.initializer = initializer self.evaluator = evaluator self.ratings = evaluator.get_ratings() self.n_users, self.n_items = self.ratings.shape self.k_folds = None self.prediction_fold = -1 # setting flags self._verbose = verbose self._load_matrices = load_matrices self._dump_matrices = dump_matrices self._train_more = train_more self._is_hybrid = is_hybrid self._update_with_items = update_with_items self._split_type = 'user' self._init_with_content = init_with_content self.set_hyperparameters(hyperparameters) self.set_options(options) @overrides def set_hyperparameters(self, hyperparameters): """ Set hyperparameters :param dict hyperparameters: hyperparameters of the recommender, contains _lambda and n_factors """ self.n_factors = hyperparameters['n_factors'] self._lambda = hyperparameters['_lambda'] self.predictions = None self.hyperparameters = hyperparameters.copy() self.hyperparameters['fold'] = 0 @overrides def train(self): """ Setting the data and printing the evaluation report """ if self.splitting_method == 'naive': self.set_data(*self.evaluator.naive_split(self._split_type)) else: self.set_data(*self.evaluator.naive_split(self._split_type)) self.fold_test_indices = self.evaluator.get_kfold_indices() return self.get_evaluation_report() @overrides def get_predictions(self): """ Predict random ratings for every user and item. :returns: A (user, document) matrix of predictions :rtype: ndarray """ if self.predictions is None: self.predictions = numpy.random.random_sample((self.n_users, self.n_items)) return self.predictions
import json from typing import List from podm.podm import Box class PCOCODataset: def __init__(self): self.annotations = [] # type: List[PCOCOAnnotation] self.images = [] # type: List[PCOCOImage] self.categories = [] # type: List[PCOCOCategory] self.licenses = [] # type: List[PCOCOLicense] self.contributor = '' self.description = '' self.url = '' self.date_created = '' self.version = '' self.year = 0 def to_dict(self): return { "licenses": [i.to_dict() for i in self.licenses], "info": { "contributor": self.contributor, "description": self.contributor, "url": self.url, "date_created": self.date_created, "version": self.version, "year": self.year }, 'annotations': [i.to_dict() for i in self.annotations], 'images': [i.to_dict() for i in self.images], 'categories': [i.to_dict() for i in self.categories], } @property def cat_name_to_id(self): return {v.name: v.id for v in self.categories} @property def img_name_to_id(self): return {v.file_name: v.id for v in self.images} class PCOCOImage: def __init__(self): self.width = 0 # type:int self.height = 0 # type:int self.flickr_url = '' # type:str self.coco_url = '' # type:str self.file_name = '' # type:str self.license = 0 # type:int self.id = 0 # type:int self.date_captured = 0 def to_dict(self): return { "width": self.width, "height": self.height, "flickr_url": self.flickr_url, "coco_url": self.coco_url, "file_name": self.file_name, "date_captured": self.date_captured, "license": self.license, "id": self.id, } class PCOCOLicense: def __init__(self): self.id = 0 # type:int self.name = '' # type:str self.url = '' # type:str def to_dict(self): return { "id": self.id, "name": self.name, "url": self.url, } class PCOCOCategory: def __init__(self): self.id = 0 # type:int self.name = '' # type:str self.supercategory = '' # type:str def to_dict(self): return { "id": self.id, "name": self.name, "supercategory": self.supercategory, } class PCOCOAnnotation: def __init__(self): self.id = 0 # type:int self.image_id = 0 # type:int self.category_id = 0 # type:int self.iscrowd = 0 # type:int self.score = 0. # type:float self.xtl = 0 self.ytl = 0 self.xbr = 0 self.ybr = 0 @property def area(self) -> float: return (self.xbr - self.xtl) * (self.ybr - self.ytl) def to_dict(self): return { "id": self.id, "image_id": self.image_id, "area": self.area, "category_id": self.category_id, "bbox": [self.xtl, self.ytl, self.xbr - self.xtl, self.ybr - self.ytl], "segmentation": [[self.xtl, self.ytl, self.xbr, self.ytl, self.xbr, self.ybr, self.xtl, self.ybr]], "iscrowd": self.iscrowd, "score": self.score } # def dump(dataset: PCOCODataset, fp, **kwargs): # json.dump(dataset.to_dict(), fp, **kwargs) def load(fp, **kwargs) -> PCOCODataset: coco_obj = json.load(fp, **kwargs) dataset = PCOCODataset() dataset.contributor = coco_obj['info']['contributor'] dataset.description = coco_obj['info']['description'] dataset.url = coco_obj['info']['url'] dataset.date_created = coco_obj['info']['date_created'] dataset.version = coco_obj['info']['version'] dataset.year = coco_obj['info']['year'] for ann_obj in coco_obj['annotations']: ann = PCOCOAnnotation() ann.id = ann_obj['id'] ann.category_id = ann_obj['category_id'] ann.image_id = ann_obj['image_id'] ann.iscrowd = ann_obj['iscrowd'] ann.xtl = ann_obj['bbox'][0] ann.ytl = ann_obj['bbox'][1] ann.xbr = ann_obj['bbox'][0] + ann_obj['bbox'][2] ann.ybr = ann_obj['bbox'][1] + ann_obj['bbox'][3] if 'score' in ann_obj: ann.score = ann_obj['score'] dataset.annotations.append(ann) for cat_obj in coco_obj['categories']: cat = PCOCOCategory() cat.id = cat_obj['id'] cat.name = cat_obj['name'] cat.supercategory = cat_obj['supercategory'] dataset.categories.append(cat) for img_obj in coco_obj['images']: img = PCOCOImage() img.id = img_obj['id'] img.height = img_obj['height'] img.width = img_obj['width'] img.file_name = img_obj['file_name'] img.flickr_url = img_obj['flickr_url'] img.coco_url = img_obj['coco_url'] img.date_captured = img_obj['date_captured'] img.license = img_obj['license'] dataset.images.append(img) for lic_obj in coco_obj['licenses']: lic = PCOCOLicense() lic.id = lic_obj['id'] lic.name = lic_obj['name'] lic.url = lic_obj['url'] dataset.licenses.append(lic) return dataset
class Solution: def maxProfit(self, prices): if len(prices) < 1: return 0 min_price = prices[0] max_profit = 0 for price in prices: if price - min_price > max_profit: max_profit = price - min_price if price < min_price: min_price = price return max_profit
# ============================================================================= # HEPHAESTUS VALIDATION 7 - LINEAR ALGEBRA SOLUTION DIFFERENCES # ============================================================================= # IMPORTS: from Structures import MaterialLib, Laminate, XSect from AircraftParts import Airfoil import numpy as np # HODGES XSECTION VALIDATION # Add the material property matLib = MaterialLib() matLib.addMat(1,'AS43501-6','trans_iso',[20.6e6,1.42e6,.34,.34,.87e6,0.],0.004826) matLib.addMat(2,'AS43501-6*','trans_iso',[20.6e6,1.42e6,.34,.42,.87e6,0.],.005) # Box Configuration 2 c2 = 0.53 xdim2 = [-0.8990566037735849,0.8990566037735849] af2 = Airfoil(c2,name='box') # B1 Box beam (0.5 x 0.923 in^2 box with laminate schedule [15]_6) n_i_B1 = [6] m_i_B1 = [2] th_B1 = [-15] lam1_B1 = Laminate(n_i_B1, m_i_B1, matLib, th=th_B1) lam2_B1 = Laminate(n_i_B1, m_i_B1, matLib, th=th_B1) lam3_B1 = Laminate(n_i_B1, m_i_B1, matLib, th=th_B1) lam4_B1 = Laminate(n_i_B1, m_i_B1, matLib, th=th_B1) lam1_B1.printPlies() laminates_B1 = [lam1_B1,lam2_B1,lam3_B1,lam4_B1] xsect_B1 = XSect(af2,xdim2,laminates_B1,matLib,typeXsect='box',meshSize=2) import time t1 = time.time() # With lu factorization xsect_B1.xSectionAnalysis() xsect_B1.printStiffMat() t2 = time.time() # With linalg.solve xsect_B1.xSectionAnalysis(linalgSolve=True) xsect_B1.printStiffMat() t3 = time.time() # With cholesky xsect_B1.xSectionAnalysis(sparse=True) xsect_B1.printStiffMat() t4 = time.time() print('LU Factor Time: %5.5f seconds' %(t2-t1)) print('Solve Time: %5.5f seconds' %(t3-t2)) print('Sparse Time: %5.5f seconds' %(t4-t3)) #xsect_B1.printStiffMat() #strn = np.array([0.,0.,0.,0.,0.,1.0]) #xsect_B1.strn2dspl(strn,figName='Validation Case B1',contour_Total_T=True) ''' # Layup 1 Box beam (0.5 x 0.923 in^2 box with laminate schedule [0]_6) n_i_Lay1 = [6] m_i_Lay1 = [2] th_Lay1 = [0] lam1_Lay1 = Laminate(n_i_Lay1, m_i_Lay1, matLib, th=th_Lay1) lam2_Lay1 = Laminate(n_i_Lay1, m_i_Lay1, matLib, th=th_Lay1) lam3_Lay1 = Laminate(n_i_Lay1, m_i_Lay1, matLib, th=th_Lay1) lam4_Lay1 = Laminate(n_i_Lay1, m_i_Lay1, matLib, th=th_Lay1) lam1_Lay1.printPlies() laminates_Lay1 = [lam1_Lay1,lam2_Lay1,lam3_Lay1,lam4_Lay1] xsect_Lay1 = XSect(af2,xdim2,laminates_Lay1,matLib,typeXsect='box',meshSize=2) xsect_Lay1.xSectionAnalysis() xsect_Lay1.printStiffMat() xsect_Lay1.strn2dspl(strn,figName='Validation Case Layup 1',contour_Total_T=True) # Layup 2 Box beam (0.5 x 0.923 in^2 box with laminate schedule [30,0]_3) n_i_Lay2 = [1,1,1,1,1,1] m_i_Lay2 = [2,2,2,2,2,2] th_Lay2 = [-30,0,-30,0,-30,0] lam1_Lay2 = Laminate(n_i_Lay2, m_i_Lay2, matLib, th=th_Lay2) lam2_Lay2 = Laminate(n_i_Lay2, m_i_Lay2, matLib, th=th_Lay2) lam3_Lay2 = Laminate(n_i_Lay2, m_i_Lay2, matLib, th=th_Lay2) lam4_Lay2 = Laminate(n_i_Lay2, m_i_Lay2, matLib, th=th_Lay2) lam1_Lay2.printPlies() laminates_Lay2 = [lam1_Lay2,lam2_Lay2,lam3_Lay2,lam4_Lay2] xsect_Lay2 = XSect(af2,xdim2,laminates_Lay2,matLib,typeXsect='box',meshSize=2) xsect_Lay2.xSectionAnalysis() xsect_Lay2.printStiffMat() xsect_Lay2.strn2dspl(strn,figName='Validation Case Layup 2',contour_Total_T=True) # Layup 2 Box beam (0.5 x 0.923 in^2 box with laminate schedule [30,0]_3) n_i_1 = [1,1,1,1,1,1] m_i_1 = [1,1,1,1,1,1] th_1 = [-15,-15,-15,-15,-15,-15] lam1 = Laminate(n_i_1, m_i_1, matLib, th=th_1) n_i_2 = [1,1,1,1,1,1] m_i_2 = [1,1,1,1,1,1] th_2 = [-15,15,-15,15,-15,15] lam2 = Laminate(n_i_2, m_i_2, matLib, th=th_2) n_i_3 = [1,1,1,1,1,1] m_i_3 = [1,1,1,1,1,1] th_3 = [15,15,15,15,15,15] lam3 = Laminate(n_i_3, m_i_3, matLib, th=th_3) n_i_4 = [1,1,1,1,1,1] m_i_4 = [1,1,1,1,1,1] th_4 = [-15,15,-15,15,-15,15] lam4 = Laminate(n_i_4, m_i_4, matLib, th=th_4) lam1.printPlies() lam2.printPlies() lam3.printPlies() lam4.printPlies() laminates_Lay3 = [lam1,lam2,lam3,lam4] xsect_Lay3 = XSect(af2,xdim2,laminates_Lay3,matLib,typeXsect='box',meshSize=2) xsect_Lay3.xSectionAnalysis() xsect_Lay3.printStiffMat() xsect_Lay3.strn2dspl(strn,figName='Validation Case Layup 3',contour_Total_T=True) '''
#!/usr/bin/env python # -*- coding: utf-8 -*- class ErrorServiceTopLevelException(Exception): pass class ErrorValidationSmsContent(ErrorServiceTopLevelException): pass class SmsContentTypeError(ErrorValidationSmsContent): pass class SmsContentLengthError(ErrorValidationSmsContent): pass class ErrorSmsSender(ErrorServiceTopLevelException): pass class ErrorTypeError(ErrorServiceTopLevelException): pass class ErrorWebServiceError(ErrorServiceTopLevelException): pass class ErrorTokenExpired(ErrorServiceTopLevelException): pass
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'ImagestoreAlbumCarousel.height' db.delete_column('cmsplugin_imagestorealbumcarousel', 'height') # Deleting field 'ImagestoreAlbumCarousel.width' db.delete_column('cmsplugin_imagestorealbumcarousel', 'width') # Adding field 'ImagestoreAlbumCarousel.size' db.add_column('cmsplugin_imagestorealbumcarousel', 'size', self.gf('django.db.models.fields.CharField')(default='72x72', max_length=20), keep_default=False) # Adding field 'ImagestoreAlbumCarousel.full_size' db.add_column('cmsplugin_imagestorealbumcarousel', 'full_size', self.gf('django.db.models.fields.CharField')(default='600x600', max_length=20), keep_default=False) def backwards(self, orm): # Adding field 'ImagestoreAlbumCarousel.height' db.add_column('cmsplugin_imagestorealbumcarousel', 'height', self.gf('django.db.models.fields.IntegerField')(default=200), keep_default=False) # Adding field 'ImagestoreAlbumCarousel.width' db.add_column('cmsplugin_imagestorealbumcarousel', 'width', self.gf('django.db.models.fields.IntegerField')(default=200), keep_default=False) # Deleting field 'ImagestoreAlbumCarousel.size' db.delete_column('cmsplugin_imagestorealbumcarousel', 'size') # Deleting field 'ImagestoreAlbumCarousel.full_size' db.delete_column('cmsplugin_imagestorealbumcarousel', 'full_size') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'cms.cmsplugin': { 'Meta': {'object_name': 'CMSPlugin'}, 'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}), 'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}), 'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, 'cms.placeholder': { 'Meta': {'object_name': 'Placeholder'}, 'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'imagestore.album': { 'Meta': {'ordering': "('created', 'name')", 'object_name': 'Album'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'head': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'head_of'", 'null': 'True', 'to': "orm['imagestore.Image']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'albums'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'imagestore.image': { 'Meta': {'ordering': "('order', 'id')", 'object_name': 'Image'}, 'album': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': "orm['imagestore.Album']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'tags': ('tagging.fields.TagField', [], {}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'imagestore_cms.imagestorealbumcarousel': { 'Meta': {'object_name': 'ImagestoreAlbumCarousel', 'db_table': "'cmsplugin_imagestorealbumcarousel'", '_ormbases': ['cms.CMSPlugin']}, 'album': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['imagestore.Album']"}), 'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}), 'full_size': ('django.db.models.fields.CharField', [], {'default': "'600x600'", 'max_length': '20'}), 'limit': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'size': ('django.db.models.fields.CharField', [], {'default': "'72x72'", 'max_length': '20'}), 'skin': ('django.db.models.fields.CharField', [], {'default': "'jcarousel-skin-tango'", 'max_length': '100'}) }, 'imagestore_cms.imagestorealbumptr': { 'Meta': {'object_name': 'ImagestoreAlbumPtr', 'db_table': "'cmsplugin_imagestorealbumptr'", '_ormbases': ['cms.CMSPlugin']}, 'album': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['imagestore.Album']"}), 'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}) } } complete_apps = ['imagestore_cms']
from copy import deepcopy import torch import torchvision import torch.nn as nn import torch.nn.functional as F import torchvision.models as models from .attention import Self_Attn2D def normalize_imagenet(x): ''' Normalize input images according to ImageNet standards. Args: x (tensor): input images ''' x = x.clone() x[:, 0] = (x[:, 0] - 0.485) / 0.229 x[:, 1] = (x[:, 1] - 0.456) / 0.224 x[:, 2] = (x[:, 2] - 0.406) / 0.225 return x def create_resnet(type): if type == 'resnet': return Resnet elif type =='attention-first': return ResnetFirst elif type == 'attention-last': return ResnetLast elif type == 'attention-all': return ResnetAll class Resnet(nn.Module): r''' ResNet encoder network for image input. Args: c_dim (int): output dimension of the latent embedding normalize (bool): whether the input images should be normalized ''' def __init__(self, normalize=False): super().__init__() self.normalize = normalize self.features = models.resnet18(pretrained=True) def forward(self, x): img = deepcopy(x) if self.normalize: x = normalize_imagenet(x) x = self.features.conv1(x) x = self.features.bn1(x) x = self.features.relu(x) x = self.features.maxpool(x) x = self.features.layer1(x) # 64 x = self.features.layer2(x) # 128 x = self.features.layer3(x) # 256 x = self.features.layer4(x) # 512 x = self.features.avgpool(x) x = torch.flatten(x, 1) # batch, 512 return x class ResnetFirst(nn.Module): r''' ResNet encoder network for image input. Args: c_dim (int): output dimension of the latent embedding normalize (bool): whether the input images should be normalized ''' def __init__(self, normalize=False): super().__init__() self.normalize = normalize self.features = models.resnet18(pretrained=True) self.att = Self_Attn2D(64) self.att2 = Self_Attn2D(128) def forward(self, x): img = deepcopy(x) if self.normalize: x = normalize_imagenet(x) x = self.features.conv1(x) x = self.features.bn1(x) x = self.features.relu(x) x = self.features.maxpool(x) x = self.features.layer1(x) # 64 x, _ = self.att(x) x = self.features.layer2(x) # 128 x, _ = self.att2(x) x = self.features.layer3(x) # 256 x = self.features.layer4(x) # 512 x = self.features.avgpool(x) x = torch.flatten(x, 1) # batch, 512 return x class ResnetLast(nn.Module): r''' ResNet encoder network for image input. Args: c_dim (int): output dimension of the latent embedding normalize (bool): whether the input images should be normalized ''' def __init__(self, normalize=False): super().__init__() self.normalize = normalize self.features = models.resnet18(pretrained=True) self.att3 = Self_Attn2D(256) self.att4 = Self_Attn2D(512) def forward(self, x): img = deepcopy(x) if self.normalize: x = normalize_imagenet(x) x = self.features.conv1(x) x = self.features.bn1(x) x = self.features.relu(x) x = self.features.maxpool(x) x = self.features.layer1(x) # 64 x = self.features.layer2(x) # 128 x = self.features.layer3(x) # 256 x, _ = self.att3(x) x = self.features.layer4(x) # 512 x, _ = self.att4(x) x = self.features.avgpool(x) x = torch.flatten(x, 1) # batch, 512 return x class ResnetAll(nn.Module): r''' ResNet encoder network for image input. Args: c_dim (int): output dimension of the latent embedding normalize (bool): whether the input images should be normalized ''' def __init__(self, normalize=False): super().__init__() self.normalize = normalize self.features = models.resnet18(pretrained=True) self.att = Self_Attn2D(64) self.att2 = Self_Attn2D(128) self.att3 = Self_Attn2D(256) self.att4 = Self_Attn2D(512) def forward(self, x): img = deepcopy(x) if self.normalize: x = normalize_imagenet(x) x = self.features.conv1(x) x = self.features.bn1(x) x = self.features.relu(x) x = self.features.maxpool(x) x = self.features.layer1(x) # 64 x, _ = self.att(x) x = self.features.layer2(x) # 128 x, _ = self.att2(x) x = self.features.layer3(x) # 256 x, _ = self.att3(x) x = self.features.layer4(x) # 512 x, _ = self.att4(x) x = self.features.avgpool(x) x = torch.flatten(x, 1) # batch, 512 return x
from django.core.exceptions import ValidationError from django.core.validators import RegexValidator from django.db import models from django.db.models import functions, Q, Subquery from django.urls import reverse from django.utils import timezone from django.utils.translation import gettext_lazy as _ UNIT_SEEDS = 's' UNIT_ROWS = 'r' UNIT_G = 'g' UNIT_COUNT = 'c' PLANTING_UNITS = [(UNIT_SEEDS, 'seeds'), (UNIT_ROWS, 'rows')] HARVEST_UNITS = [(UNIT_G, 'grams'), (UNIT_COUNT, 'count')] short_name_validator = RegexValidator(r'^[a-z\-0-9]+\Z', _('Only use lowercase a-z, numbers, and hyphens.')) class Note(models.Model): text = models.TextField(blank=True) def __repr__(self): pass def __str__(self): return "char[{}]".format(len(self.text)) class Environment(models.Model): name = models.CharField(max_length=64, unique=True) abbrev = models.CharField(max_length=16, unique=True, validators=[short_name_validator], verbose_name=_('abbreviation')) active = models.BooleanField(default=True) def __repr__(self): return "Environment({}, {}, {})".format(self.abbrev, self.name, self.active) def __str__(self): return self.__format__('') def __format__(self, format): active = '' if self.active else 'inactive ' if format == '': return "{}{} ({})".format(active, self.name, self.abbrev) elif format == 'only-name': return "{}{}".format(active, self.name) else: raise TypeError(_('Invalid format string.')) def get_absolute_url(self): return reverse('env', kwargs={'env_abbrev': self.abbrev}) class Bed(models.Model): name = models.CharField(max_length=64) abbrev = models.CharField(max_length=16, validators=[short_name_validator], verbose_name=_('abbreviation')) env = models.ForeignKey(Environment, on_delete=models.PROTECT, related_name='beds', verbose_name=_('environment')) active = models.BooleanField(default=True) @property def cur_plants(self): return Plant.objects.filter(transplants__bed=self, transplants__active=True).all() def __repr__(self): return "Bed({}, {}, {}, active={})".format(self.abbrev, self.name, self.env, self.active) def __str__(self): return self.__format__('') def __format__(self, format): active = '' if self.active else 'inactive ' if format == '': return "{}{} ({}) in {}".format(active, self.name, self.abbrev, self.env.name) elif format == 'no-env': return "{}{} ({})".format(active, self.name, self.abbrev) elif format == 'only-name': return "{}{}".format(active, self.name) elif format == 'env-and-name': return "{}{} {}".format(active, self.env.name, self.name) else: raise TypeError(_('Invalid format string.')) def get_absolute_url(self): return reverse('bed', kwargs={'env_abbrev': self.env.abbrev, 'bed_abbrev': self.abbrev}) class Meta: unique_together = [('abbrev', 'env'), ('name', 'env')] class PlantType(models.Model): common_name = models.CharField(max_length=64) variety = models.CharField(max_length=64) @property def full_name(self): return "{} {}".format(self.common_name, self.variety) def __repr__(self): return "PlantType({}, {})".format(self.common_name, self.variety) def __str__(self): return "{} {}".format(self.common_name, self.variety) def get_absolute_url(self): return reverse('plant-type', kwargs={'plant_type_id': self.id}) class Meta: unique_together = [('common_name', 'variety')] def validate_transplant_current(transplant): return transplant.active class PlantManager(models.Manager): def without_bed(self): transplants = Transplanting.objects.filter(active=True).all() return self.exclude(id__in=Subquery(transplants.values('plant'))) class Plant(models.Model): type = models.ForeignKey(PlantType, on_delete=models.PROTECT, related_name='plants') amount = models.PositiveIntegerField() unit = models.CharField(max_length=1, choices=PLANTING_UNITS, blank=False, default=UNIT_SEEDS) active = models.BooleanField(default=True) cur_transplant = models.ForeignKey('Transplanting', on_delete=models.PROTECT, related_name='+', blank=True, null=True) beds = models.ManyToManyField(Bed, through='Transplanting') objects = PlantManager() @property def cur_bed(self): return self.cur_transplant.bed def __repr__(self): return "Plant({}, {}, {}, active={})".format(self.type, self.amount, self.unit, self.active) def __str__(self): return self.__format__('') def __format__(self, format): active = '' if self.active else 'dead ' if format == '': return "{}plant {}, {} {}".format(active, self.type, self.amount, self.get_unit_display()) elif format == 'name': return "{} {}, {} {}".format(active, self.type, self.amount, self.get_unit_display()) else: raise TypeError(_('Invalid format string.')) def get_absolute_url(self): bed = self.cur_bed return reverse('plant', kwargs={'plant_id': self.id}) def get_edit_url(self): return reverse('edit-plant', kwargs={'plant_id': self.id}) def clean(self): # current transplant is active and refers to this plant if self.cur_transplant: if not self.cur_transplant.active: raise ValidationError(_('Plants current transplant cannot be inactive.')) if self.cur_transplant.plant != self: raise ValidationError(_('Plants current transplant must self-refer.')) class Transplanting(models.Model): plant = models.ForeignKey(Plant, on_delete=models.PROTECT, related_name='transplants') date = models.DateTimeField(default=timezone.now) bed = models.ForeignKey(Bed, on_delete=models.PROTECT, related_name='transplants') active = models.BooleanField(default=True) def __repr__(self): return "Transplanting({}, {}, {})".format(self.plant, self.date, self.bed) def __str__(self): return self.__format__('') def __format__(self, format): active = 'current' if self.active else 'past' if format == '': return "{} transplanting {:noname} {:%Y-%m-%d %H:%M} in {:env-and-name}".format( active, self.plant, self.date, self.bed) elif format == 'time_and_bed': return "{} at {:%Y-%m-%d %H:%M} in {:env-and-name}".format(active, self.date, self.bed) else: raise TypeError(_('Invalid format string.')) class Meta: constraints = [ # one active transplant per plant models.UniqueConstraint(fields=['plant'], condition=Q(active=True), name='unique_active') ] ordering = ['-date'] class Harvest(models.Model): plant = models.ForeignKey(Plant, on_delete=models.PROTECT) amount = models.PositiveIntegerField(blank=True) unit = models.CharField(max_length=1, choices=HARVEST_UNITS) in_stock = models.BooleanField(default=True) def __repr__(self): return "Harvest()".format() def __str__(self): return "harvest".format() class ObservationKind(models.Model): plant_type = models.ForeignKey(PlantType, on_delete=models.PROTECT, blank=True, null=True) plant = models.ForeignKey(Plant, on_delete=models.PROTECT, blank=True, null=True) bed = models.ForeignKey(Bed, on_delete=models.PROTECT, blank=True, null=True) env = models.ForeignKey(Environment, on_delete=models.PROTECT, blank=True, null=True, verbose_name=_('environment')) when = models.DateTimeField(default=timezone.now) note = models.ForeignKey(Note, on_delete=models.PROTECT, blank=True) class Meta: abstract = True class Observation(ObservationKind): def __repr__(self): return "Observation()".format() def __str__(self): return "observation".format() class TreatmentType(models.Model): name = models.CharField(max_length=64) type = models.CharField(max_length=64, blank=True) def __repr__(self): return "TreatmentType({}, {})".format(self.name, self.type) def __str__(self): return "treatment type {} {}".format(self.type, self.name) def get_absolute_url(self): return reverse('trt-type', kwargs={'trt_type_id': self.id}) class Treatment(ObservationKind): type = models.ForeignKey(TreatmentType, on_delete=models.PROTECT) def __repr__(self): return "Treatment({}, {}, {}, data=char[{}])".format(self.treatment, self.date, self.bed, len(self.details)) def __str__(self): return "treatment {} on {} in {} (char[{}])".format(self.treatment.name, self.date, self.bed, len(self.details)) class MaladyType(models.Model): name = models.CharField(max_length=64) type = models.CharField(max_length=64, blank=True) def __repr__(self): return "MaladyType({}, {})".format(self.name, self.type) def __str__(self): return "malady type {} {}".format(self.type.name, self.name) def get_absolute_url(self): return reverse('mal-type', kwargs={'mal_type_id': self.id}) class Malady(ObservationKind): type = models.ForeignKey(MaladyType, on_delete=models.PROTECT) def __repr__(self): return "Malady({}, {}, {})".format(self.malady, self.date, self.bed) def __str__(self): return "malady {} on {} in {}".format(self.malady, self.date, self.bed) class Meta: verbose_name_plural = 'maladies'
from flask import Flask, render_template, request, url_for, redirect, flash, session, Response from flask_cors import * app = Flask(__name__) fuck = 'what are you fucking doing' # -------------------------------------------------- # -------------------------------------------------- # -------------------------------------------------- # 音乐搜索 song_name = '' song_id = '' @app.route('/search', methods=['GET', 'POST']) @cross_origin() def search(): global song_name if request.method == 'POST': song = request.form.get('song') song_name = song return fuck @app.route('/search_result', methods=['GET', 'POST']) @cross_origin() def search_result(): from utils.MySong import get_song return get_song(song_name) @app.route('/comments', methods=['GET', 'POST']) @cross_origin() def comments(): global song_id from utils.MySong import get_comments if request.method == 'POST': print(request.form) id = request.form.get('id') song_id = id get_comments(id) return fuck @app.route('/comments_result', methods=['GET', 'POST']) @cross_origin() def comments_result(): from utils.MySong import get_comments return get_comments(song_id) @app.route('/wordcloud', methods=['GET', 'POST']) @cross_origin() def wordcloud(): from utils.MyWordcloud import get_cloud get_cloud() imgPath = 'utils/wordcloud.png' mdict = { 'jpeg': 'image/jpeg', 'jpg': 'image/jpeg', 'png': 'image/png', 'gif': 'image/gif' } mime = 'image/png' with open(imgPath, 'rb') as f: image = f.read() return Response(image, mimetype=mime) if __name__ == '__main__': app.run(host='0.0.0.0')
import logging import os import subprocess import glob import sys from shutil import copyfile import pdftotext import PyPDF2 from wand.image import Image as WandImage log = logging.getLogger(__name__) def convert_pdf_to_tif(source_file, output_dir): """ Create a TIF for every Page in the PDF and saves them to the output_dir. :param str source_file: path to the PDF :param str output_dir: path to the output Directory """ log.debug(f"Creating tif files from {source_file} to {output_dir}") with WandImage(filename=source_file, resolution=200) as img: pages = len(img.sequence) for i in range(pages): with WandImage(img.sequence[i]) as page_img: page_img.type = 'truecolor' name = os.path.splitext(os.path.basename(source_file))[0] page_img.save(filename=os.path.join(output_dir, f"{name}_{'%04i'% i}.tif")) def convert_pdf_to_txt(source_file, output_dir): """ Create text file for every page of source-PDF file. :param str source_file: PDF to generate text files from :param str output_dir: target directory for generated files """ log.debug(f"Creating txt files from {source_file} to {output_dir}") with open(source_file, "rb") as input_stream: pdf = pdftotext.PDF(input_stream) index = 0 # Needed as pdftotext is not a Python list with .index() capability. for page in pdf: if not os.path.exists(output_dir): os.makedirs(output_dir) name = os.path.splitext(os.path.basename(source_file))[0] with open(os.path.join(output_dir, f'{name}_{"%04i"% index}.txt'), 'wb') as output: output.write(page.encode('utf-8')) index += 1 def set_pdf_metadata(obj, metadata): """ Create a new PDF file with additional metadata. """ path = obj.path + "/data/pdf/" + obj.id + ".pdf" old_pdf = PyPDF2.PdfFileReader(path) new_pdf = PyPDF2.PdfFileWriter() new_pdf.cloneReaderDocumentRoot(old_pdf) new_pdf.addMetadata(metadata) with open(path, 'wb+') as stream: new_pdf.write(stream) def merge_pdf(files, path: str, filename='merged.pdf', remove_old=True, downscale_threshold_in_mb=250): """ Create a PDF file by combining a list of PDF files. File paths are relative to the path given in the parameters. :param list files: list the source pdf file paths :param string path: The path to the dir where the created file go :param string filename: name of the generated pdf file :param bool remove_old: if True, remove the files used for the split/merge :param int downscale_threshold_in_mb: Threshold in mb. If exceeded, the DPI of the final PDF will be reduced to 150. """ os.makedirs(path, exist_ok=True) outfile_name = os.path.join(path, filename) log.info(f"Combining {len(files)} PDF files at 300 dpi.") try: output = subprocess.check_output([ "mutool", "merge", "-o", outfile_name, *[os.path.join(path, name) for name in files] ]) optimize(path, outfile_name, outfile_name, "/printer") except subprocess.CalledProcessError as e: print(e) if os.path.getsize(outfile_name) > downscale_threshold_in_mb * 1000000: log.info(f"Combined PDF is larger than {downscale_threshold_in_mb}mb, reducing quality to 150 dpi.") optimize(path, outfile_name, outfile_name, "/ebook") if remove_old: for file_name in files: file_path = os.path.join(path, os.path.basename(file_name)) if os.path.isfile(file_path): os.remove(file_path) def split_merge_pdf(files, path: str, filename='merged.pdf', remove_old=True): """ Create a PDF file by combining sections of other PDFs. File paths are relative to the path given in the parameters. :param list files: list of the source pdf files as dict in the format: {'file': relative_path, 'range': [start, end]} :param string path: The path to the dir where the created file go :param string filename: name of the generated pdf file :param bool remove_old: if True, remove the files used for the split/merge """ # TODO: This function is currently unused (except old tests) and produces suboptimal PDF files os.makedirs(path, exist_ok=True) new_pdf = PyPDF2.PdfFileWriter() input_streams = [] for file in files: input_str = os.path.join(path, file['file']) input_stream = open(input_str, 'rb') pdf = PyPDF2.PdfFileReader(input_stream) if pdf.flattenedPages is None: pdf.getNumPages() # make the file page based if 'range' in file: start_end = file['range'] for index in range(start_end[0] - 1, start_end[-1]): try: new_pdf.addPage(pdf.getPage(index)) except IndexError: break else: for index in range(pdf.getNumPages()): new_pdf.addPage(pdf.getPage(index)) input_streams.append(input_stream) outfile_name = os.path.join(path, filename) with open(outfile_name, 'wb+') as stream: new_pdf.write(stream) for input_stream in input_streams: input_stream.close() if remove_old: for file in files: file_path = os.path.join(path, os.path.basename(file['file'])) if os.path.isfile(file_path): os.remove(file_path) def optimize(base_path, input_file_path, output_file_path, quality_setting): output = subprocess.check_output([ "gs", "-dNOPAUSE", "-sDEVICE=pdfwrite", "-o", os.path.join(base_path, "tmp_%03d.pdf"), f"-dPDFSETTINGS={quality_setting}", "-dBATCH", input_file_path ]) output = subprocess.check_output([ "mutool", "merge", "-o", output_file_path, *sorted(glob.glob(os.path.join(base_path, "tmp_*.pdf"))), ]) for f in glob.glob(os.path.join(base_path, "tmp_*.pdf")): os.remove(f)
![Callysto.ca Banner](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-top.jpg?raw=true) <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Science/NewtonsThirdLaw/newtons-third-law.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a> # Newton's Third Law %%html <script> function code_toggle() { if (code_shown){ $('div.input').hide('500'); $('#toggleButton').val('Show Code') } else { $('div.input').show('500'); $('#toggleButton').val('Hide Code') } code_shown = !code_shown } $( document ).ready(function(){ code_shown=false; $('div.input').hide() }); </script> <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form> ## The Third Law Let's remind ourselves of the Three Laws of motion from Newton: 1. A body at **rest** stays at **rest**. A body in **motion** stays in **motion**, with uniform speed in a straight line. A change happens only if a **force** acts on the body. 2. The **force** on the body equals the rate of change of its **momentum** (mass times velocity). 3. For every **action**, there is an equal and opposite **reaction**. In this notebook, we examine the Third Law: #3. For every **action**, there is an equal and opposite **reaction**. ### Example 1 A familiar example of the Third Law we see often on TV or the Internet - a rocket blasting off to space. The rocket demonstrates Newton's Third Law. In a NASA rocket, as in the video below, some chemicals inside the the rocket body are burned, creating hot gases that shoot out the bottom of the rocket. That motion of hot gases is the **action**. The rocket responds by moving in the opposite direction. That rocket motion is the **reaction**. <img src="https://media.giphy.com/media/6yWV529bDk3WU/giphy.gif" width="480" height="350" /> (Image via Giphy.com) ### Example 2 A similar example is with a water bottle rocket, as in the video below. The child pumps air into the water bottle creating pressure inside the container. When the lid on the bottom open, the air pressure forces the water out the bottom of the rocket. That motion of water is the **action.** The resulting **reaction** is the water bottle shooting up into the air. <img src="https://media.giphy.com/media/qGQ7uiYh4II2k/giphy.gif" width="480" height="270" /> (Image via Giphy.com) ### Example 3 ### Equal and opposite Equal reaction might not be obvious. In the video below, two people collide while holding big, bouncy balls. At the collision, one of them flies backwards (and lands comfortably on cushions). The other only slows down a bit. The reaction seems opposite (goes in the reverse direction), but not really equal. <img src="https://media.giphy.com/media/ri4ux6gkNifIs/giphy.gif" width="480" height="280" /> (Image via Giphy.com) ### Some details So, what's happening here? A more precise statement of Newton's Third Law is the following: > #3. When two objects interact, there are two forces acting, one on each object, that are equal strength and opposite in direction. In the video above with the two people colliding, each has a **force** on them (the force through the balls). These are equal and opposite **forces** acting on them. The person on the left is big, heavy, and moving quickly with his feet planting firmly on the ground. He will firmly absorb any force, and only slow down a bit. The person on the right is small, light-weight, not moving initially, and lifts her feet off the floor. When the equal-sized force hits her, she flies back quickly because her momentum, and her velocity must make a big change in reaction to the force of the collision. By the Second Law, a force causes a change in momentum (velocity times mass), so it is the change in **momentum** that must be equal. Here, the smaller person on the right, with lower mass, has to see a bigger change in velocity in the collision to have an equivalent change of momentum. So this person bounces back much faster. ### Example 4 ### A train derailment Tragic accidents can happen when we forget Newton's Third Law. Here is an example of a train derailment, captured in the video below. The train is following Newton's laws of motion, but something went terribly wrong. <img src="https://media.giphy.com/media/oQLfkLMkDdfnG/giphy.gif" width="480" height="354" /> (Image via Giphy.com) So, what went wrong here? First, notice the train is turning in a curve - by Newton's First Law, there must be a force on the train causing it to deviate from a straight line. It's not hard to imagine that this force is the action of the rails on the ground pushing against the wheels of the train. By the Third Law, the wheels/train push back on the rails. This is the equal, opposite force. Because it is a fast, heavy train, this results in a very strong force pushing on the rails. Normally, the rails do not move (much) because they are cemented into the ground. In this derailment video, though, the force is so strong that the rails break! Once they are broken, there is nothing to push against the train. With no force, Newton's First Law says the train will have to move in a straight line. So it does, which means it leaves the curved track and crashes into the wall. ### A simulation Following is a bouncing ball simulation, based on code by Ziggy Jonsson [Click here for the original](http://bl.ocks.org/ZJONSSON/1706849). Details are in this [paper](http://www.vobarian.com/collisions/2dcollisions2.pdf). What we see in the animation is a collection of balls that bounce off each other in elastic collisions. Each ball has a different mass, proportional to the area of the ball. So when a small ball hits a big ball, there is a force acting on each ball, in equal and opposite directions. The difference in masses means the velocity of the small ball changes a lot, while the big ball only changes a little. %%html <iframe src="C3.html" width=500 height=500></iframe> ## Second and Third Law together When two balls hit, the Third Law tells us that the force each feels is equal and opposite. The Second Law tells us the rate of change in **momentum** for each ball is equal to this force. This means the total change in **momentum** for the two balls is equal and opposite. That is to say that the total change in total momentum between two objects is $$\Delta \vec{p}_1 = - \Delta \vec{p}_2, $$ where delta $\Delta$ means the ''change in'' the momentum $\vec{p}$. So $\Delta \vec{p} = \vec{p}(\mbox{after}) - \vec{p}(\mbox{before}).$ The letter $\vec{p}$ stands for momentum (*petere* in Latin), and is just the product of mass times velocity. Rewriting momentum as mass times velocity, for the two balls (1 and 2) we have $$ \Delta \left( m_1 \cdot \vec{v}_1 \right) = - \Delta \left( m_2 \cdot \vec{v}_2\right).$$ The minus sign on the right is to remind us that the change is in opposite directions. For the balls in this simulation, the masses stay constant, and it is only the velocities that change, so we can write $$ m_1 \cdot (\Delta \vec{v}_1) = - m_2 \cdot (\Delta \vec{v}_2).$$ In other words, the relative change in velocities is related to the ratio of their masses, $$ \Delta \vec{v}_1 = - \frac{m_2}{m_1} \cdot \Delta \vec{v}_2.$$ So, if ball two has twice the mass of ball one, the change in speed of ball one is twice the change in ball two. ## Example 3 - colliding people. Recall in the thrid video above, we had a boy colliding into a girl. Suppose - the boy weighs 80 kg - the girl weighs 40 kg - the boy is running at 5 meters per second, and after the collision, has stopped dead. How fast is the girl going? Well, if we ignore the forces of their feet on the floor, we see the boy's change in velocity is 5 m/s. Since the ratio of masses $m_2/m_1$ is 2, the change in the girl's velocity must be twice as great. So she goes from 0 m/s to 10 m/s (which is very fast). If you like, we can compute further: - the boy's initial momentum is $80\times 5$ = $400$ kg m/s. - Going to a dead stop, his change in momentum is $- 400$ kg m/s. - The girl goes from zero to $40\times 10$ = $+400$ kg m/s. So the change in momentum of the boy is equal and opposite the change in momentum of the girl. ## Where's the force? Notice in the above example that we did not need to compute the force of the collision. This is one of the powerful aspects of Newton's Third Law. That is, without knowing any details of the forces involved, we can still figure out the change in velocities of the interacting bodies. In fact, the forces can be quite complicated. With the boy and girl colliding, the force is transmitted through the bouncy ball. So it is a small force, extended over a rather long time (a few tenths of a second). If they were holding hard steel balls, the force of collision would have been very large, over a short time (a hundredth of a second, say). Fortunately, we don't need to know these details to use the Third Law. ## Conservation law Newton's Third Law leads to the conservation of momentum law. This is a deeper physical result that holds true even in very complicated situations, with many bodies (not just two), and even for things like fluids, gases, and elastic solids. It is easy to see this conservation of total momentum for the bouncing balls. We write $$\mbox{Total Momentum}, \; \vec{p} = m_1\cdot \vec{v}_1 + m_2\cdot \vec{v}_2 + m_3\cdot \vec{v}_3 + \ldots$$ as the sum of the momenta of all the balls. Now, if two balls collide, say one and two, then the momentum of first ball goes up by $\mbox{ change in } (m_1 \cdot v_1)$ while the momentum of the second ball goes down by the same amount. This is the **equal and opposite** part of the Third Law. The Total Momentum stays the same, since this increase and decrease cancels out in the total sum of momenta. ## Summary of Newton's Third Law The Third Law really is a physical law. It says that when two objects interact, it is through forces. Each object experiences a force, that is equal to, and opposite to the force on the other object. From the First and Second Laws, we know there are consequences to these forces. The objects will change their motion, and the size of the change of their velocities will depend on their relative masses. So when big things hit small things, the small thing has a bigger change in velocity. One important consequence of Newtons' Third Law is the conservation of total momentum for a system of particles. ## Exercises 1. Suppose you are in your car, stopped at an intersection. A car hits you from behind (that is an action). What is the reaction? (What does your car do?) 2. If the car hitting you from behind has the same mass as yours, is the force your car experiences the same as the force the other car feels? 3. If the car hitting you from behind has 10 times the mass, is the force your car experiences the same as the force the other car feels? Explain. 4. If the car hitting you from behind has 10 times the mass as yours, is the change in velocity your car experiences the same as the change in velocity the other car feels? Explain. 5. From the passenger's point of view, any big increase in velocity is a problem because at some point in an accident, you get stopped (by an airbag, a windshield, or a road surface). Explain why getting hit by a heavy truck is worse than getting hit by a light car. 6. You and a friend are on a skating rink that is perfectly smooth, and your skates move frictionlessly on the ice. Standing face to face, if you push against each other, do you move? In which direction do each of you move? 7. As in 6, if you and your friend weigh the same, how fast do you move, compared to your friend? 8. As in 6, if you weigh twice as much as your friend, do you move twice as fast? Four times as fast? How fast? 6. A bird can lift itself into the sky by pushing the air with its wings. A rocket in outer space doesn't have air to push against -- so how does it move? (Hint -- think of the skaters. One skater could be like the rocket, the other skater is like the hot gases being pushed away from the rocket.) [![Callysto.ca License](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-bottom.jpg?raw=true)](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
import torch.nn as nn from abc import ABC, abstractmethod class LossEvaluator(ABC): @abstractmethod def compute_batch(self, model, image_array, label_array): pass def compute_individual(self, model, image, label): return self.compute_batch( model, image.unsqueeze(0), label.unsqueeze(0) ).squeeze(0) class CrossEntropyLossEvaluator(LossEvaluator): def __init__(self): self._criterion = nn.CrossEntropyLoss(reduction='none') def compute_batch(self, model, image_array, label_array): return self._criterion(model(image_array), label_array) class LossEvaluatorFactory: def create_loss_evaluator(self, name): if name == 'cross_entropy': return CrossEntropyLossEvaluator() else: raise Exception('unsupported loss evaluator')
# import json import os from dotenv import load_dotenv from ibm_watson import LanguageTranslatorV3 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator # """ # translator.py connects to ibm_watson # language translator and converts English # to French and vice versa using functions # frenchToEnglish and englishToFrench # """ load_dotenv('.env') apikey = os.environ['apikey'] url = os.environ['url'] authenticator = IAMAuthenticator(apikey) language_translator = LanguageTranslatorV3( version='2018-05-01', authenticator=authenticator ) language_translator.set_service_url(url) def englishToFrench(english_text): """ translate English to French """ if english_text is None: raise ValueError("Please enter a valid text in English") else: #write the code here french_text = language_translator.translate( text=english_text, model_id='en-fr').get_result()['translations'][0]['translation'] # print(json.dumps(translation, indent=2, ensure_ascii=False)) return french_text def frenchToEnglish(french_text): """ translate French to English """ if french_text is None: raise ValueError("Please enter a valid text in French") else: #write the code here english_text = language_translator.translate( text=french_text, model_id='fr-en').get_result()['translations'][0]['translation'] # print(json.dumps(translation, indent=2, ensure_ascii=False)) return english_text
import datetime import time import pytz from dateutil import relativedelta def datetime_str_to_timestamp(datetime_str, tz=None): dt = datetime.datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S') return datetime_to_timestamp(dt, tz) def datetime_to_timestamp(dt, tz=None): if tz: dt = pytz.timezone(tz).localize(dt) return int((dt - datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)).total_seconds()) return int(time.mktime(dt.timetuple())) def timestamp_to_datetime(ts, tz=None): if tz: return datetime.datetime.fromtimestamp(ts, pytz.timezone(tz)) return datetime.datetime.fromtimestamp(ts) def gen_time_df(periods, unit='D', start=None, complete=False): import pandas as pd if not start: today = datetime.date.today() if unit == 'D': start = today - datetime.timedelta(days=periods - 1) elif unit == 'W': start = today - datetime.timedelta(days=today.weekday()) - datetime.timedelta(weeks=periods - 1) elif unit == 'M': start = today.replace(day=1) - relativedelta.relativedelta(months=periods - 1) if unit == 'D': df = pd.DataFrame([[x] for x in pd.date_range(start, periods=periods, freq='D')], columns=['day']) if complete: df['weekday'] = df['day'].dt.dayofweek df['week_start'] = df.apply(lambda x: x['day'] - datetime.timedelta(days=x['weekday']), axis=1) df['week_end'] = df.apply(lambda x: x['week_start'] + datetime.timedelta(days=6), axis=1) df['month_start'] = df.apply(lambda x: x['day'].replace(day=1), axis=1) df['month_end'] = df.apply( lambda x: x['day'].replace(day=1) + relativedelta.relativedelta(months=1) - datetime.timedelta(days=1), axis=1) df['month'] = df['day'].dt.month df['year'] = df['day'].dt.year df['month'] = df.apply(lambda r: int(str(r['year']) + str(r['month']).zfill(2)), axis=1) df.drop('year', axis=1, inplace=True) elif unit == 'W': df = pd.DataFrame([[x] for x in pd.date_range(start, periods=periods, freq='W-MON')], columns=['week_start']) df['week_end'] = df.apply(lambda x: x['week_start'] + datetime.timedelta(days=6), axis=1) if complete: df['month_start'] = df.apply(lambda x: x['week_start'].replace(day=1), axis=1) df['month_end'] = df.apply( lambda x: x['week_start'].replace(day=1) + relativedelta.relativedelta(months=1) - datetime.timedelta( days=1), axis=1) df['month'] = df['week_start'].dt.month df['year'] = df['week_start'].dt.year df['month'] = df.apply(lambda r: int(str(r['year']) + str(r['month']).zfill(2)), axis=1) df.drop('year', axis=1, inplace=True) elif unit == 'M': df = pd.DataFrame([[x] for x in pd.date_range(start, periods=periods, freq='MS')], columns=['month_start']) df['month_end'] = df.apply( lambda x: x['month_start'].replace(day=1) + relativedelta.relativedelta(months=1) - datetime.timedelta( days=1), axis=1) df['month'] = df['month_start'].dt.month df['year'] = df['month_start'].dt.year df['month'] = df.apply(lambda r: int(str(r['year']) + str(r['month']).zfill(2)), axis=1) df.drop('year', axis=1, inplace=True) else: raise RuntimeError return df
""" Write the INTDER main and auxiliary input file """ import automol.geom import intder_io._util as intder_util def input_file(geo, zma=None): """ Write the main INTDER input file. Currently just supports a basic harmonic frequency and total energy distribution calculation. :param geo: geometry to build input for :type geo: automol geometry data structure :param zma: Z-Matrix corresponding to geometry :type zma: automol Z-Matrix data structure :rtype: str """ if zma is None: zma = automol.geom.zmatrix(geo) inp_str = ( intder_util.header_format(geo) + '\n' + intder_util.internals_format(zma) + '\n' + intder_util.geometry_format(geo) + '\n' + intder_util.symbols_format(geo) ) return inp_str def cart_hess_file(hess): """ Write a file with the Cartesian Hessian auxiliary input that corresponds to the FCMINT file for CFOUR. :param hess: mass-weighted Hessian (in a.u.) :type hess: numpy.ndarray :rtype: str """ natom = len(hess) hess_str = '{0:>6d}{1:>6d}\n'.format(natom, natom*3) hess_str += intder_util.hessian_format(hess) return hess_str
a = 5 b = 5 print(a + b) for number in [1, 2, 3, 4, 5, 6]: print(number, end=' ')
import decimal import sys #For the ratio, we want to look at the number of packs opened to percentage finished #What is the best number of packs to open to get the best dupes/(packs*itemsPerPack) completeSet=12 packs=28 itemsPerPack=1 #============================================Math Way# decimal.getcontext().prec = 100 print(sys.getrecursionlimit()) sys.setrecursionlimit(1500) def fact(n): return 1 if (n==0 or n==1) else n*fact(n-1) def chooseFunc(n,x): return (fact(n))/(fact(n-x)*fact(x)) def pieCalc(n,x,t): #n=decimal.Decimal(n) #x=decimal.Decimal(x) #t=decimal.Decimal(t) sumPIE=(chooseFunc(n,x)**t) #print(sumPIE) for i in range(n): if i+1==n: continue elif i%2==0: sumPIE-=chooseFunc(n,i+1)*(chooseFunc(n-(i+1),x)**t) #print(sumPIE/(chooseFunc(n,x)**t)) else: sumPIE+=chooseFunc(n,i+1)*(chooseFunc(n-(i+1),x)**t) #print(sumPIE/(chooseFunc(n,x)**t)) return sumPIE/(chooseFunc(n,x)**t) print(pieCalc(completeSet,itemsPerPack,packs)) #============================================RE-crete Method# import random def packArrayClean(packArray,completeSet): packArray=[] for i in range(completeSet): packArray.append(1) return packArray def average(lst): return sum(lst) / len(lst) def simMethod(completeSet,packs,itemsPerPack): packArray=[] confidence=100000 averageHold=[] ratioHold=[] dupesHold=[] for i in range(confidence): packArray=packArrayClean(packArray,completeSet) amountGotten=0 dupes=0 for j in range(packs): for k in range(itemsPerPack): temp=random.choice(packArray) if temp==1: amountGotten+=1 #print(str(amountGotten)+"test") packArray[amountGotten-1]=0 else: continue #print(str(amountGotten)+"Large") #print(packArray) #print(averageHold) averageHold.append(amountGotten) ratioHold.append((packs-amountGotten)/packs) dupesHold.append(packs-amountGotten) compCount=0 for i in averageHold: if i==completeSet: compCount+=1 finalCount=compCount/len(averageHold) print("Sim Calc Average Completion: "+str(average(averageHold))) print("Sim Calc Ratio: "+str(average(ratioHold))) print("Sim Calc Ratio: "+str(average(averageHold)/completeSet)) print("Sim Calc Dupes Gotten: "+str(average(dupesHold))) #print(finalCount) #============================================Maths# #print(sys.getrecursionlimit()) sys.setrecursionlimit(1500) def fact(n): return 1 if (n==0 or n==1) else n*fact(n-1) def chooseFunc(n,x): return (fact(n))/(fact(n-x)*fact(x)) def pieCalc(n,x,t): sumPIE=(chooseFunc(n,x)**t) #print(sumPIE) for i in range(n): if i+1==n: continue elif i%2==0: sumPIE-=chooseFunc(n,i+1)*(chooseFunc(n-(i+1),x)**t) #print(sumPIE/(chooseFunc(n,x)**t)) else: sumPIE+=chooseFunc(n,i+1)*(chooseFunc(n-(i+1),x)**t) #print(sumPIE/(chooseFunc(n,x)**t)) return sumPIE/(chooseFunc(n,x)**t) print("Maths Calc % Complete Set: "+str(pieCalc(completeSet,itemsPerPack,packs))) startPack=0 for i in range(30): print("\n===TEST with "+str(i+startPack+1)+" packs===") simMethod(completeSet,i+startPack+1,itemsPerPack)
from sklearn import preprocessing from sklearn.cluster import KMeans from sklearn.decomposition import PCA import seaborn as sns import numpy as np import pandas as pd def loudness_scaled(songs): loudness = songs[['loudness']].values min_max_scaler = preprocessing.MinMaxScaler() loudness_scaled = min_max_scaler.fit_transform(loudness) songs['loudness'] = pd.DataFrame(loudness_scaled) return songs def songs_features(songs): songs_ = loudness_scaled(songs) songs_feature = songs_.copy() songs_feature = songs_feature.drop(['name','artist','id'],axis=1) return songs_feature def labelling_dataset(songs): kmeans = KMeans(n_clusters=4) songs_feat = songs_features(songs) kmeans.fit(songs_feat) y_kmeans = kmeans.predict(songs_feat) return y_kmeans """ def visualisation_clusters(): songs_feat = songs_features() y_kmeans = labelling_dataset() pca = PCA(n_components=2) principal_components = pca.fit_transform(songs_feat) pc = pd.DataFrame(principal_components) pc['label'] = y_kmeans pc.columns = ['x', 'y','label'] cluster = sns.lmplot(data=pc, x='x', y='y', hue='label', fit_reg=False, legend=True, legend_out=True) """
import json from .GiteaUserCurrentEmails import GiteaUserCurrentEmails from .GiteaUser import GiteaUser class GiteaUserCurrent(GiteaUser): def __init__(self, client): super(GiteaUserCurrent, self).__init__(client) self.is_current = True def follow(self, username): try: self.client.api.user.userCurrentPutFollow(username=username, data=None) return True except Exception as e: if e.response.status_code == 404: self._log_debug("username does not exist") return False def unfollow(self, username): try: self.client.api.user.userCurrentDeleteFollow(username=username) return True except Exception as e: if e.response.status_code == 404: self._log_debug("username does not exist") return False @property def followers(self): result = [] for follower in self.client.api.user.userCurrentListFollowers().json(): user = self.client.users.new(username=follower["username"]) for k, v in follower.items(): setattr(user, k, v) result.append(user) return result @property def following(self): result = [] for following in self.client.api.user.userCurrentListFollowing().json(): user = self.client.users.new(username=following["username"]) for k, v in following.items(): setattr(user, k, v) result.append(user) return result def is_following(self, username): try: self.client.api.user.userCurrentCheckFollowing(followee=username) return True except Exception as e: if e.response.status_code != 404: self._log_debug("username does not exist") return False @property def emails(self): return GiteaUserCurrentEmails(self.client, self) def __str__(self): return "\n<Current User>\n%s" % json.dumps(self.data, indent=4) __repr__ = __str__
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.contrib.gis.db.models.fields class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='CongressionalDistrict', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('permanent_identifier', models.CharField(max_length=40, null=True, blank=True)), ('source_featureid', models.CharField(max_length=40, null=True, blank=True)), ('source_datasetid', models.CharField(max_length=40, null=True, blank=True)), ('source_datadesc', models.CharField(max_length=100, null=True, blank=True)), ('source_originator', models.CharField(max_length=130, null=True, blank=True)), ('data_security', models.IntegerField(blank=True, null=True, choices=[(0, b'Unknown'), (1, b'Top Secret'), (2, b'Secret'), (3, b'Confidential'), (4, b'Restricted'), (5, b'Unclassified'), (6, b'Sensitive')])), ('distribution_policy', models.CharField(blank=True, max_length=4, null=True, choices=[(b'A1', b'Emergency Service Provider - Internal Use Only'), (b'A2', b'Emergency Service Provider - Bitmap Display Via Web'), (b'A3', b'Emergency Service Provider - Free Distribution to Third Parties'), (b'A4', b'Emergency Service Provider - Free Distribution to Third Parties Via Internet'), (b'B1', b'Government Agencies or Their Delegated Agents - Internal Use Only'), (b'B2', b'Government Agencies or Their Delegated Agents - Bitmap Display Via Web'), (b'B3', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties'), (b'B4', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties Via Internet'), (b'C1', b'Other Public or Educational Institutions - Internal Use Only'), (b'C2', b'Other Public or Educational Institutions - Bitmap Display Via Web'), (b'C3', b'Other Public or Educational Institutions - Free Distribution to Third Parties'), (b'C4', b'Other Public or Educational Institutions - Free Distribution to Third Parties Via Internet'), (b'D1', b'Data Contributors - Internal Use Only'), (b'D2', b'Data Contributors - Bitmap Display Via Web'), (b'D3', b'Data Contributors - Free Distribution to Third Parties'), (b'D4', b'Data Contributors - Free Distribution to Third Parties Via Internet'), (b'E1', b'Public Domain - Internal Use Only'), (b'E2', b'Public Domain - Bitmap Display Via Web'), (b'E3', b'Public Domain - Free Distribution to Third Parties'), (b'E4', b'Public Domain - Free Distribution to Third Parties Via Internet')])), ('loaddate', models.DateTimeField(null=True, blank=True)), ('ftype', models.CharField(max_length=50, null=True, blank=True)), ('gnis_id', models.CharField(max_length=10, null=True, blank=True)), ('globalid', models.CharField(max_length=38, null=True, blank=True)), ('objectid', models.IntegerField(null=True, blank=True)), ('name', models.CharField(max_length=120, null=True, blank=True)), ('fcode', models.IntegerField(blank=True, null=True, choices=[(62200, 'Fire District')])), ('designation', models.CharField(max_length=60, null=True, blank=True)), ('state_fipscode', models.CharField(max_length=2, null=True, blank=True)), ('state_name', models.CharField(max_length=120, null=True, blank=True)), ('admintype', models.IntegerField(blank=True, null=True, choices=[(0, 'Unknown'), (1, 'Federal'), (2, 'Tribal'), (3, 'State'), (4, 'Regional'), (5, 'County'), (6, 'Municipal'), (7, 'Private')])), ('ownerormanagingagency', models.IntegerField(blank=True, null=True, choices=[(1, 'Army Corps of Engineers'), (15, 'Bureau of Census'), (2, 'Bureau of Indian Affairs'), (3, 'Bureau of Land Management'), (4, 'Bureau of Reclamation'), (5, 'Department of Defense'), (6, 'Department of Energy'), (7, 'Department of Homeland Security'), (8, 'Department of Transportation'), (9, 'Department of Veteran Affairs'), (10, 'Fish and Wildlife Service'), (11, 'Forest Service'), (12, 'National Oceanic and Atmospheric Administration'), (13, 'National Park Service'), (14, 'Tennessee Valley Authority'), (99, 'Not Applicable')])), ('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='CountyorEquivalent', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('permanent_identifier', models.CharField(max_length=40, null=True, blank=True)), ('source_featureid', models.CharField(max_length=40, null=True, blank=True)), ('source_datasetid', models.CharField(max_length=40, null=True, blank=True)), ('source_datadesc', models.CharField(max_length=100, null=True, blank=True)), ('source_originator', models.CharField(max_length=130, null=True, blank=True)), ('data_security', models.IntegerField(blank=True, null=True, choices=[(0, b'Unknown'), (1, b'Top Secret'), (2, b'Secret'), (3, b'Confidential'), (4, b'Restricted'), (5, b'Unclassified'), (6, b'Sensitive')])), ('distribution_policy', models.CharField(blank=True, max_length=4, null=True, choices=[(b'A1', b'Emergency Service Provider - Internal Use Only'), (b'A2', b'Emergency Service Provider - Bitmap Display Via Web'), (b'A3', b'Emergency Service Provider - Free Distribution to Third Parties'), (b'A4', b'Emergency Service Provider - Free Distribution to Third Parties Via Internet'), (b'B1', b'Government Agencies or Their Delegated Agents - Internal Use Only'), (b'B2', b'Government Agencies or Their Delegated Agents - Bitmap Display Via Web'), (b'B3', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties'), (b'B4', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties Via Internet'), (b'C1', b'Other Public or Educational Institutions - Internal Use Only'), (b'C2', b'Other Public or Educational Institutions - Bitmap Display Via Web'), (b'C3', b'Other Public or Educational Institutions - Free Distribution to Third Parties'), (b'C4', b'Other Public or Educational Institutions - Free Distribution to Third Parties Via Internet'), (b'D1', b'Data Contributors - Internal Use Only'), (b'D2', b'Data Contributors - Bitmap Display Via Web'), (b'D3', b'Data Contributors - Free Distribution to Third Parties'), (b'D4', b'Data Contributors - Free Distribution to Third Parties Via Internet'), (b'E1', b'Public Domain - Internal Use Only'), (b'E2', b'Public Domain - Bitmap Display Via Web'), (b'E3', b'Public Domain - Free Distribution to Third Parties'), (b'E4', b'Public Domain - Free Distribution to Third Parties Via Internet')])), ('loaddate', models.DateTimeField(null=True, blank=True)), ('ftype', models.CharField(max_length=50, null=True, blank=True)), ('gnis_id', models.CharField(max_length=10, null=True, blank=True)), ('globalid', models.CharField(max_length=38, null=True, blank=True)), ('objectid', models.IntegerField(unique=True, null=True, blank=True)), ('fcode', models.IntegerField(blank=True, null=True, choices=[(61200, 'County'), (61201, 'Borough'), (61210, 'City and Borough'), (61202, 'District'), (61203, 'Independent City'), (61204, 'Island'), (61205, 'Judicial Division'), (61206, 'Municipality'), (61207, 'Municipio'), (61208, 'Parish'), (61299, 'Other County Equivalent Area')])), ('state_fipscode', models.CharField(max_length=2, null=True, blank=True)), ('state_name', models.CharField(max_length=120, null=True, blank=True)), ('county_fipscode', models.CharField(max_length=3, null=True, blank=True)), ('county_name', models.CharField(max_length=120, null=True, blank=True)), ('stco_fipscode', models.CharField(max_length=5, null=True, blank=True)), ('population', models.IntegerField(null=True, blank=True)), ('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326)), ], options={ 'verbose_name': 'County (or Equivalent)', }, ), migrations.CreateModel( name='GovUnits', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('objectid', models.IntegerField(unique=True, null=True, blank=True)), ('permanent_identifier', models.CharField(max_length=40, null=True, blank=True)), ('source_featureid', models.CharField(max_length=40, null=True, blank=True)), ('source_datasetid', models.CharField(max_length=40, null=True, blank=True)), ('source_datadesc', models.CharField(max_length=100, null=True, blank=True)), ('source_originator', models.CharField(max_length=130, null=True, blank=True)), ('data_security', models.IntegerField(blank=True, null=True, choices=[(0, b'Unknown'), (1, b'Top Secret'), (2, b'Secret'), (3, b'Confidential'), (4, b'Restricted'), (5, b'Unclassified'), (6, b'Sensitive')])), ('distribution_policy', models.CharField(blank=True, max_length=4, null=True, choices=[(b'A1', b'Emergency Service Provider - Internal Use Only'), (b'A2', b'Emergency Service Provider - Bitmap Display Via Web'), (b'A3', b'Emergency Service Provider - Free Distribution to Third Parties'), (b'A4', b'Emergency Service Provider - Free Distribution to Third Parties Via Internet'), (b'B1', b'Government Agencies or Their Delegated Agents - Internal Use Only'), (b'B2', b'Government Agencies or Their Delegated Agents - Bitmap Display Via Web'), (b'B3', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties'), (b'B4', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties Via Internet'), (b'C1', b'Other Public or Educational Institutions - Internal Use Only'), (b'C2', b'Other Public or Educational Institutions - Bitmap Display Via Web'), (b'C3', b'Other Public or Educational Institutions - Free Distribution to Third Parties'), (b'C4', b'Other Public or Educational Institutions - Free Distribution to Third Parties Via Internet'), (b'D1', b'Data Contributors - Internal Use Only'), (b'D2', b'Data Contributors - Bitmap Display Via Web'), (b'D3', b'Data Contributors - Free Distribution to Third Parties'), (b'D4', b'Data Contributors - Free Distribution to Third Parties Via Internet'), (b'E1', b'Public Domain - Internal Use Only'), (b'E2', b'Public Domain - Bitmap Display Via Web'), (b'E3', b'Public Domain - Free Distribution to Third Parties'), (b'E4', b'Public Domain - Free Distribution to Third Parties Via Internet')])), ('loaddate', models.DateTimeField(null=True, blank=True)), ('ftype', models.CharField(max_length=50, null=True, blank=True)), ('fcode', models.IntegerField(blank=True, null=True, choices=[(61200, b'County'), (61201, b'Borough'), (61210, b'City and Borough'), (61202, b'District'), (61203, b'Independent City'), (61204, b'Island'), (61205, b'Judicial Division'), (61206, b'Municipality'), (61207, b'Municipio'), (61208, b'Parish'), (61299, b'Other County Equivalent Area')])), ('state_fipscode', models.CharField(max_length=2, null=True, blank=True)), ('state_name', models.CharField(max_length=120, null=True, blank=True)), ('county_fipscode', models.CharField(max_length=3, null=True, blank=True)), ('county_name', models.CharField(max_length=120, null=True, blank=True)), ('population', models.IntegerField(null=True, blank=True)), ('gnis_id', models.CharField(max_length=10, null=True, blank=True)), ('fips', models.CharField(max_length=10, null=True, blank=True)), ('globalid', models.CharField(max_length=38, null=True, blank=True)), ('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326)), ], options={ 'ordering': ('state_name', 'county_name'), 'verbose_name': 'Government Unit', 'verbose_name_plural': 'Government Units', }, ), migrations.CreateModel( name='IncorporatedPlace', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('permanent_identifier', models.CharField(max_length=40, null=True, blank=True)), ('source_featureid', models.CharField(max_length=40, null=True, blank=True)), ('source_datasetid', models.CharField(max_length=40, null=True, blank=True)), ('source_datadesc', models.CharField(max_length=100, null=True, blank=True)), ('source_originator', models.CharField(max_length=130, null=True, blank=True)), ('data_security', models.IntegerField(blank=True, null=True, choices=[(0, b'Unknown'), (1, b'Top Secret'), (2, b'Secret'), (3, b'Confidential'), (4, b'Restricted'), (5, b'Unclassified'), (6, b'Sensitive')])), ('distribution_policy', models.CharField(blank=True, max_length=4, null=True, choices=[(b'A1', b'Emergency Service Provider - Internal Use Only'), (b'A2', b'Emergency Service Provider - Bitmap Display Via Web'), (b'A3', b'Emergency Service Provider - Free Distribution to Third Parties'), (b'A4', b'Emergency Service Provider - Free Distribution to Third Parties Via Internet'), (b'B1', b'Government Agencies or Their Delegated Agents - Internal Use Only'), (b'B2', b'Government Agencies or Their Delegated Agents - Bitmap Display Via Web'), (b'B3', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties'), (b'B4', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties Via Internet'), (b'C1', b'Other Public or Educational Institutions - Internal Use Only'), (b'C2', b'Other Public or Educational Institutions - Bitmap Display Via Web'), (b'C3', b'Other Public or Educational Institutions - Free Distribution to Third Parties'), (b'C4', b'Other Public or Educational Institutions - Free Distribution to Third Parties Via Internet'), (b'D1', b'Data Contributors - Internal Use Only'), (b'D2', b'Data Contributors - Bitmap Display Via Web'), (b'D3', b'Data Contributors - Free Distribution to Third Parties'), (b'D4', b'Data Contributors - Free Distribution to Third Parties Via Internet'), (b'E1', b'Public Domain - Internal Use Only'), (b'E2', b'Public Domain - Bitmap Display Via Web'), (b'E3', b'Public Domain - Free Distribution to Third Parties'), (b'E4', b'Public Domain - Free Distribution to Third Parties Via Internet')])), ('loaddate', models.DateTimeField(null=True, blank=True)), ('ftype', models.CharField(max_length=50, null=True, blank=True)), ('gnis_id', models.CharField(max_length=10, null=True, blank=True)), ('globalid', models.CharField(max_length=38, null=True, blank=True)), ('objectid', models.IntegerField(unique=True, null=True, blank=True)), ('fcode', models.IntegerField(blank=True, null=True, choices=[(61400, 'Incorporated Place'), (61401, 'Borough'), (61403, 'City'), (61404, 'City and Borough'), (61405, 'Communidad'), (61407, 'Consolidated City'), (61410, 'Independent City'), (61412, 'Municipality'), (61414, 'Town'), (61415, 'Village'), (61416, 'Zona Urbana')])), ('state_name', models.CharField(max_length=120, null=True, blank=True)), ('place_fipscode', models.CharField(max_length=5, null=True, verbose_name=b'FIPS Code', blank=True)), ('place_name', models.CharField(max_length=120, null=True, blank=True)), ('population', models.IntegerField(null=True, blank=True)), ('iscapitalcity', models.IntegerField(blank=True, null=True, choices=[(1, 'Yes'), (2, 'No'), (0, 'Unknown')])), ('iscountyseat', models.IntegerField(blank=True, null=True, choices=[(1, 'Yes'), (2, 'No'), (0, 'Unknown')])), ('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326)), ], options={ 'ordering': ('state_name', 'place_name'), }, ), migrations.CreateModel( name='MinorCivilDivision', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('permanent_identifier', models.CharField(max_length=40, null=True, blank=True)), ('source_featureid', models.CharField(max_length=40, null=True, blank=True)), ('source_datasetid', models.CharField(max_length=40, null=True, blank=True)), ('source_datadesc', models.CharField(max_length=100, null=True, blank=True)), ('source_originator', models.CharField(max_length=130, null=True, blank=True)), ('data_security', models.IntegerField(blank=True, null=True, choices=[(0, b'Unknown'), (1, b'Top Secret'), (2, b'Secret'), (3, b'Confidential'), (4, b'Restricted'), (5, b'Unclassified'), (6, b'Sensitive')])), ('distribution_policy', models.CharField(blank=True, max_length=4, null=True, choices=[(b'A1', b'Emergency Service Provider - Internal Use Only'), (b'A2', b'Emergency Service Provider - Bitmap Display Via Web'), (b'A3', b'Emergency Service Provider - Free Distribution to Third Parties'), (b'A4', b'Emergency Service Provider - Free Distribution to Third Parties Via Internet'), (b'B1', b'Government Agencies or Their Delegated Agents - Internal Use Only'), (b'B2', b'Government Agencies or Their Delegated Agents - Bitmap Display Via Web'), (b'B3', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties'), (b'B4', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties Via Internet'), (b'C1', b'Other Public or Educational Institutions - Internal Use Only'), (b'C2', b'Other Public or Educational Institutions - Bitmap Display Via Web'), (b'C3', b'Other Public or Educational Institutions - Free Distribution to Third Parties'), (b'C4', b'Other Public or Educational Institutions - Free Distribution to Third Parties Via Internet'), (b'D1', b'Data Contributors - Internal Use Only'), (b'D2', b'Data Contributors - Bitmap Display Via Web'), (b'D3', b'Data Contributors - Free Distribution to Third Parties'), (b'D4', b'Data Contributors - Free Distribution to Third Parties Via Internet'), (b'E1', b'Public Domain - Internal Use Only'), (b'E2', b'Public Domain - Bitmap Display Via Web'), (b'E3', b'Public Domain - Free Distribution to Third Parties'), (b'E4', b'Public Domain - Free Distribution to Third Parties Via Internet')])), ('loaddate', models.DateTimeField(null=True, blank=True)), ('ftype', models.CharField(max_length=50, null=True, blank=True)), ('gnis_id', models.CharField(max_length=10, null=True, blank=True)), ('globalid', models.CharField(max_length=38, null=True, blank=True)), ('objectid', models.IntegerField(unique=True, null=True, blank=True)), ('fcode', models.IntegerField(blank=True, null=True, choices=[(61300, 'Minor Civil Division'), (61302, 'Barrio'), (61304, 'Barrio - Pueblo'), (61306, 'Borough'), (61308, 'Census County Division'), (61310, 'Census Sub Area'), (61312, 'Census Sub District'), (61314, 'Charter Township'), (61316, 'City'), (61318, 'County'), (61320, 'District'), (61322, 'Gore'), (61324, 'Grant'), (61326, 'Incorporated Town'), (61328, 'Independent City'), (61330, 'Island'), (61332, 'Location'), (61334, 'Municipality'), (61336, 'Plantation'), (61338, 'Precinct'), (61340, 'Purchase'), (61342, 'Reservation'), (61344, 'Subbarrio'), (61346, 'Town'), (61348, 'Township'), (61350, 'Unorganized Territory'), (61352, 'Village')])), ('state_name', models.CharField(max_length=120, null=True, blank=True)), ('minorcivildivision_fipscode', models.CharField(max_length=10, null=True, blank=True)), ('minorcivildivision_name', models.CharField(max_length=120, null=True, blank=True)), ('population', models.IntegerField(null=True, blank=True)), ('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='NativeAmericanArea', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('permanent_identifier', models.CharField(max_length=40, null=True, blank=True)), ('source_featureid', models.CharField(max_length=40, null=True, blank=True)), ('source_datasetid', models.CharField(max_length=40, null=True, blank=True)), ('source_datadesc', models.CharField(max_length=100, null=True, blank=True)), ('source_originator', models.CharField(max_length=130, null=True, blank=True)), ('data_security', models.IntegerField(blank=True, null=True, choices=[(0, b'Unknown'), (1, b'Top Secret'), (2, b'Secret'), (3, b'Confidential'), (4, b'Restricted'), (5, b'Unclassified'), (6, b'Sensitive')])), ('distribution_policy', models.CharField(blank=True, max_length=4, null=True, choices=[(b'A1', b'Emergency Service Provider - Internal Use Only'), (b'A2', b'Emergency Service Provider - Bitmap Display Via Web'), (b'A3', b'Emergency Service Provider - Free Distribution to Third Parties'), (b'A4', b'Emergency Service Provider - Free Distribution to Third Parties Via Internet'), (b'B1', b'Government Agencies or Their Delegated Agents - Internal Use Only'), (b'B2', b'Government Agencies or Their Delegated Agents - Bitmap Display Via Web'), (b'B3', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties'), (b'B4', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties Via Internet'), (b'C1', b'Other Public or Educational Institutions - Internal Use Only'), (b'C2', b'Other Public or Educational Institutions - Bitmap Display Via Web'), (b'C3', b'Other Public or Educational Institutions - Free Distribution to Third Parties'), (b'C4', b'Other Public or Educational Institutions - Free Distribution to Third Parties Via Internet'), (b'D1', b'Data Contributors - Internal Use Only'), (b'D2', b'Data Contributors - Bitmap Display Via Web'), (b'D3', b'Data Contributors - Free Distribution to Third Parties'), (b'D4', b'Data Contributors - Free Distribution to Third Parties Via Internet'), (b'E1', b'Public Domain - Internal Use Only'), (b'E2', b'Public Domain - Bitmap Display Via Web'), (b'E3', b'Public Domain - Free Distribution to Third Parties'), (b'E4', b'Public Domain - Free Distribution to Third Parties Via Internet')])), ('loaddate', models.DateTimeField(null=True, blank=True)), ('ftype', models.CharField(max_length=50, null=True, blank=True)), ('gnis_id', models.CharField(max_length=10, null=True, blank=True)), ('globalid', models.CharField(max_length=38, null=True, blank=True)), ('objectid', models.IntegerField(unique=True, null=True, blank=True)), ('name', models.CharField(max_length=120, null=True, blank=True)), ('fcode', models.IntegerField(blank=True, null=True, choices=[(64000, 'Native American Reservation'), (64080, 'Tribal Designated Statistic Area'), (64081, 'Colony'), (64082, 'Community'), (64083, 'Joint Use Area'), (64084, 'Pueblo'), (64085, 'Rancheria'), (64086, 'Reservation'), (64087, 'Reserve'), (64088, 'Oklahoma Tribal Statistical Area'), (64089, 'American Indian Trust Land'), (64090, 'Joint Use Oklahoma Tribal Statistical Area'), (64091, 'Ranch'), (64092, 'State Designated American Indian Statistical Area'), (64093, 'Indian Village'), (64095, 'Indian Community'), (64096, 'American Indian Off-Reservation Trust Land')])), ('nativeamericanarea_fipscode', models.CharField(max_length=5, null=True, blank=True)), ('admintype', models.IntegerField(blank=True, null=True, choices=[(0, 'Unknown'), (1, 'Federal'), (2, 'Tribal'), (3, 'State'), (4, 'Regional'), (5, 'County'), (6, 'Municipal'), (7, 'Private')])), ('population', models.IntegerField(null=True, blank=True)), ('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Reserve', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('permanent_identifier', models.CharField(max_length=40, null=True, blank=True)), ('source_featureid', models.CharField(max_length=40, null=True, blank=True)), ('source_datasetid', models.CharField(max_length=40, null=True, blank=True)), ('source_datadesc', models.CharField(max_length=100, null=True, blank=True)), ('source_originator', models.CharField(max_length=130, null=True, blank=True)), ('data_security', models.IntegerField(blank=True, null=True, choices=[(0, b'Unknown'), (1, b'Top Secret'), (2, b'Secret'), (3, b'Confidential'), (4, b'Restricted'), (5, b'Unclassified'), (6, b'Sensitive')])), ('distribution_policy', models.CharField(blank=True, max_length=4, null=True, choices=[(b'A1', b'Emergency Service Provider - Internal Use Only'), (b'A2', b'Emergency Service Provider - Bitmap Display Via Web'), (b'A3', b'Emergency Service Provider - Free Distribution to Third Parties'), (b'A4', b'Emergency Service Provider - Free Distribution to Third Parties Via Internet'), (b'B1', b'Government Agencies or Their Delegated Agents - Internal Use Only'), (b'B2', b'Government Agencies or Their Delegated Agents - Bitmap Display Via Web'), (b'B3', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties'), (b'B4', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties Via Internet'), (b'C1', b'Other Public or Educational Institutions - Internal Use Only'), (b'C2', b'Other Public or Educational Institutions - Bitmap Display Via Web'), (b'C3', b'Other Public or Educational Institutions - Free Distribution to Third Parties'), (b'C4', b'Other Public or Educational Institutions - Free Distribution to Third Parties Via Internet'), (b'D1', b'Data Contributors - Internal Use Only'), (b'D2', b'Data Contributors - Bitmap Display Via Web'), (b'D3', b'Data Contributors - Free Distribution to Third Parties'), (b'D4', b'Data Contributors - Free Distribution to Third Parties Via Internet'), (b'E1', b'Public Domain - Internal Use Only'), (b'E2', b'Public Domain - Bitmap Display Via Web'), (b'E3', b'Public Domain - Free Distribution to Third Parties'), (b'E4', b'Public Domain - Free Distribution to Third Parties Via Internet')])), ('loaddate', models.DateTimeField(null=True, blank=True)), ('ftype', models.CharField(max_length=50, null=True, blank=True)), ('gnis_id', models.CharField(max_length=10, null=True, blank=True)), ('globalid', models.CharField(max_length=38, null=True, blank=True)), ('objectid', models.IntegerField(unique=True, null=True, blank=True)), ('name', models.CharField(max_length=120, null=True, blank=True)), ('fcode', models.IntegerField(blank=True, null=True, choices=[(67500, 'Wilderness')])), ('admintype', models.IntegerField(blank=True, null=True, choices=[(0, 'Unknown'), (1, 'Federal'), (2, 'Tribal'), (3, 'State'), (4, 'Regional'), (5, 'County'), (6, 'Municipal'), (7, 'Private')])), ('ownerormanagingagency', models.IntegerField(blank=True, null=True, choices=[(1, 'Army Corps of Engineers'), (15, 'Bureau of Census'), (2, 'Bureau of Indian Affairs'), (3, 'Bureau of Land Management'), (4, 'Bureau of Reclamation'), (5, 'Department of Defense'), (6, 'Department of Energy'), (7, 'Department of Homeland Security'), (8, 'Department of Transportation'), (9, 'Department of Veteran Affairs'), (10, 'Fish and Wildlife Service'), (11, 'Forest Service'), (12, 'National Oceanic and Atmospheric Administration'), (13, 'National Park Service'), (14, 'Tennessee Valley Authority'), (99, 'Not Applicable')])), ('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='StateorTerritoryHigh', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('permanent_identifier', models.CharField(max_length=40, null=True, blank=True)), ('source_featureid', models.CharField(max_length=40, null=True, blank=True)), ('source_datasetid', models.CharField(max_length=40, null=True, blank=True)), ('source_datadesc', models.CharField(max_length=100, null=True, blank=True)), ('source_originator', models.CharField(max_length=130, null=True, blank=True)), ('data_security', models.IntegerField(blank=True, null=True, choices=[(0, b'Unknown'), (1, b'Top Secret'), (2, b'Secret'), (3, b'Confidential'), (4, b'Restricted'), (5, b'Unclassified'), (6, b'Sensitive')])), ('distribution_policy', models.CharField(blank=True, max_length=4, null=True, choices=[(b'A1', b'Emergency Service Provider - Internal Use Only'), (b'A2', b'Emergency Service Provider - Bitmap Display Via Web'), (b'A3', b'Emergency Service Provider - Free Distribution to Third Parties'), (b'A4', b'Emergency Service Provider - Free Distribution to Third Parties Via Internet'), (b'B1', b'Government Agencies or Their Delegated Agents - Internal Use Only'), (b'B2', b'Government Agencies or Their Delegated Agents - Bitmap Display Via Web'), (b'B3', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties'), (b'B4', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties Via Internet'), (b'C1', b'Other Public or Educational Institutions - Internal Use Only'), (b'C2', b'Other Public or Educational Institutions - Bitmap Display Via Web'), (b'C3', b'Other Public or Educational Institutions - Free Distribution to Third Parties'), (b'C4', b'Other Public or Educational Institutions - Free Distribution to Third Parties Via Internet'), (b'D1', b'Data Contributors - Internal Use Only'), (b'D2', b'Data Contributors - Bitmap Display Via Web'), (b'D3', b'Data Contributors - Free Distribution to Third Parties'), (b'D4', b'Data Contributors - Free Distribution to Third Parties Via Internet'), (b'E1', b'Public Domain - Internal Use Only'), (b'E2', b'Public Domain - Bitmap Display Via Web'), (b'E3', b'Public Domain - Free Distribution to Third Parties'), (b'E4', b'Public Domain - Free Distribution to Third Parties Via Internet')])), ('loaddate', models.DateTimeField(null=True, blank=True)), ('ftype', models.CharField(max_length=50, null=True, blank=True)), ('gnis_id', models.CharField(max_length=10, null=True, blank=True)), ('globalid', models.CharField(max_length=38, null=True, blank=True)), ('objectid', models.IntegerField(unique=True, null=True, blank=True)), ('fcode', models.IntegerField(blank=True, null=True, choices=[(61100, 'State'), (61101, 'Territory'), (61102, 'Province')])), ('state_fipscode', models.CharField(max_length=2, null=True, blank=True)), ('state_name', models.CharField(max_length=120, null=True, blank=True)), ('population', models.IntegerField(null=True, blank=True)), ('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='UnincorporatedPlace', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('permanent_identifier', models.CharField(max_length=40, null=True, blank=True)), ('source_featureid', models.CharField(max_length=40, null=True, blank=True)), ('source_datasetid', models.CharField(max_length=40, null=True, blank=True)), ('source_datadesc', models.CharField(max_length=100, null=True, blank=True)), ('source_originator', models.CharField(max_length=130, null=True, blank=True)), ('data_security', models.IntegerField(blank=True, null=True, choices=[(0, b'Unknown'), (1, b'Top Secret'), (2, b'Secret'), (3, b'Confidential'), (4, b'Restricted'), (5, b'Unclassified'), (6, b'Sensitive')])), ('distribution_policy', models.CharField(blank=True, max_length=4, null=True, choices=[(b'A1', b'Emergency Service Provider - Internal Use Only'), (b'A2', b'Emergency Service Provider - Bitmap Display Via Web'), (b'A3', b'Emergency Service Provider - Free Distribution to Third Parties'), (b'A4', b'Emergency Service Provider - Free Distribution to Third Parties Via Internet'), (b'B1', b'Government Agencies or Their Delegated Agents - Internal Use Only'), (b'B2', b'Government Agencies or Their Delegated Agents - Bitmap Display Via Web'), (b'B3', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties'), (b'B4', b'Government Agencies or Their Delegated Agents - Free Distribution to Third Parties Via Internet'), (b'C1', b'Other Public or Educational Institutions - Internal Use Only'), (b'C2', b'Other Public or Educational Institutions - Bitmap Display Via Web'), (b'C3', b'Other Public or Educational Institutions - Free Distribution to Third Parties'), (b'C4', b'Other Public or Educational Institutions - Free Distribution to Third Parties Via Internet'), (b'D1', b'Data Contributors - Internal Use Only'), (b'D2', b'Data Contributors - Bitmap Display Via Web'), (b'D3', b'Data Contributors - Free Distribution to Third Parties'), (b'D4', b'Data Contributors - Free Distribution to Third Parties Via Internet'), (b'E1', b'Public Domain - Internal Use Only'), (b'E2', b'Public Domain - Bitmap Display Via Web'), (b'E3', b'Public Domain - Free Distribution to Third Parties'), (b'E4', b'Public Domain - Free Distribution to Third Parties Via Internet')])), ('loaddate', models.DateTimeField(null=True, blank=True)), ('ftype', models.CharField(max_length=50, null=True, blank=True)), ('gnis_id', models.CharField(max_length=10, null=True, blank=True)), ('globalid', models.CharField(max_length=38, null=True, blank=True)), ('objectid', models.IntegerField(unique=True, null=True, blank=True)), ('fcode', models.IntegerField(blank=True, null=True, choices=[(61500, 'Unincorporated Place'), (61501, 'Census Designated Place'), (61502, 'Community / Town / Village'), (61503, 'Neighborhood'), (61504, 'Subdivision'), (61505, 'Communidad'), (61506, 'Zona Urbana')])), ('state_name', models.CharField(max_length=120, null=True, blank=True)), ('place_fipscode', models.CharField(max_length=5, null=True, blank=True)), ('place_name', models.CharField(max_length=120, null=True, blank=True)), ('population', models.IntegerField(null=True, blank=True)), ('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326)), ], options={ 'abstract': False, }, ), ]
#!/usr/bin/env python # Author: veelion import time import pickle import requests from selenium import webdriver from selenium.webdriver.common.keys import Keys def save_cookies(cookies, file_to_save): with open(file_to_save, 'wb') as f: pickle.dump(cookies, f) def login_auto(login_url, username, password, username_xpath, password_xpath, submit_xpath, cookies_file, browser=None): if browser is None: options = webdriver.ChromeOptions() options.add_argument('headless') options.add_argument('window-size=1200x600') browser = webdriver.Chrome(chrome_options=options) browser.maximize_window() browser.get(login_url) time.sleep(9) # 等登录加载完成 browser.find_element_by_xpath(username_xpath).send_keys(username) browser.find_element_by_xpath(password_xpath).send_keys(password) browser.find_element_by_xpath(submit_xpath).send_keys(Keys.ENTER) time.sleep(9) # 等登录加载完成 cookies = browser.get_cookies() print(cookies) save_cookies(cookies, cookies_file) def login_manually(login_url, cookies_file, browser=None): # 既然是手动,这里就不自动填写用户名和密码了 if browser is None: browser = webdriver.Chrome() browser.get(login_url) time.sleep(30) # 给自己多了点时间输入用户名、密码、验证码 cookies = browser.get_cookies() print(cookies) save_cookies(cookies, cookies_file) def load_to_browser(cookies_file, browser=None): with open(cookies_file, 'rb') as f: cookies = pickle.load(f) if browser is None: browser = webdriver.Chrome() for cookie in cookies: browser.add_cookie(cookie) return browser def load_to_requests(cookies_file, session=None): with open(cookies_file, 'rb') as f: cookies = pickle.load(f) if session is None: session = requests.Session() for cookie in cookies: session.cookies.set(cookie['name'], cookie['value']) if __name__ == '__main__': from sys import argv if argv[1] == 'manually': # login_url = 'https://passport.bilibili.com/login' login_url = 'https://www.zhihu.com/signin' login_manually(login_url, 'z-.cookies') elif argv[1] == 'auto': login_url = 'https://weibo.com/' username_xpath = '//input[@id="loginname"]' password_xpath = '//input[@name="password"]' submit_xpath = '//a[@action-type="btn_submit"]' username = 'your-username' password = 'your-password' login_auto(login_url, username, password, username_xpath, password_xpath, submit_xpath, 'z-weibo.cookies') else: print('invalid option')
"""DGL Distributed Training Infrastructure.""" from __future__ import absolute_import from ._ffi.function import _init_api from .nodeflow import NodeFlow from .utils import unwrap_to_ptr_list from . import utils _init_api("dgl.network") def _create_sender(): """Create a Sender communicator via C api """ return _CAPI_DGLSenderCreate() def _finalize_sender(sender): """Finalize Sender communicator Parameters ---------- sender : ctypes.c_void_p C Sender handle """ _CAPI_DGLFinalizeSender(sender) def _add_receiver_addr(sender, ip_addr, port, recv_id): """Add Receiver IP address to namebook Parameters ---------- sender : ctypes.c_void_p C Sender handle ip_addr : str IP address of Receiver port : int listen of Receiver recv_id : int Receiver ID """ _CAPI_DGLSenderAddReceiver(sender, ip_addr, port, recv_id) def _sender_connect(sender): """Connect to all the Receiver Parameters ---------- sender : ctypes.c_void_p C Sender handle """ _CAPI_DGLSenderConnect(sender) def _send_nodeflow(sender, nodeflow, recv_id): """Send sampled subgraph (Nodeflow) to remote Receiver. Parameters ---------- sender : ctypes.c_void_p C Sender handle nodeflow : NodeFlow NodeFlow object recv_id : int Receiver ID """ graph_handle = nodeflow._graph._handle node_mapping = nodeflow._node_mapping.todgltensor() edge_mapping = nodeflow._edge_mapping.todgltensor() layers_offsets = utils.toindex(nodeflow._layer_offsets).todgltensor() flows_offsets = utils.toindex(nodeflow._block_offsets).todgltensor() _CAPI_SenderSendSubgraph(sender, recv_id, graph_handle, node_mapping, edge_mapping, layers_offsets, flows_offsets) def _create_receiver(): """Create a Receiver communicator via C api """ return _CAPI_DGLReceiverCreate() def _finalize_receiver(receiver): """Finalize Receiver Communicator """ _CAPI_DGLFinalizeReceiver(receiver) def _receiver_wait(receiver, ip_addr, port, num_sender): """Wait all Sender to connect.. Parameters ---------- receiver : ctypes.c_void_p C Receiver handle ip_addr : str IP address of Receiver port : int port of Receiver num_sender : int total number of Sender """ _CAPI_DGLReceiverWait(receiver, ip_addr, port, num_sender) def _recv_nodeflow(receiver, graph): """Receive sampled subgraph (NodeFlow) from remote sampler. Parameters ---------- receiver : ctypes.c_void_p C Receiver handle graph : DGLGraph The parent graph Returns ------- NodeFlow Sampled NodeFlow object """ # hdl is a list of ptr hdl = unwrap_to_ptr_list(_CAPI_ReceiverRecvSubgraph(receiver)) return NodeFlow(graph, hdl[0])
from enum import Enum """ OCP = open for extension, closed for modification """ class Color(Enum): RED = 1 GREEN = 2 BLUE = 3 class Size(Enum): SMALL = 1 MEDIUM = 2 LARGE = 3 class Product: def __init__(self, name, color, size) -> None: self.name = name self.color = color self.size = size class ProductFilter: def filter_by_color(self, products, color): for p in products: if p.color == color: yield p def filter_by_size(self, products, size): for p in products: if p.size == size: yield p def filter_by_size_and_color(self, products, size, color): for p in products: if p.color == color and p.size == size: yield p # specification class Specification: def is_statisfied(self, item): pass def __and__(self, other): return AndSpecification(self, other) class Filter: def filter(self, items, spec): pass class ColorSpecification(Specification): def __init__(self, color) -> None: self.color = color def is_statisfied(self, item): return item.color == self.color class SizeSpecification(Specification): def __init__(self, size) -> None: self.size = size def is_statisfied(self, item): return item.size == self.size class AndSpecification(Specification): def __init__(self, *args) -> None: self.args = args def is_statisfied(self, item): return all(map( lambda spec: spec.is_statisfied(item), self.args )) class BetterFilter(Filter): def filter(self, items, spec): for item in items: if spec.is_statisfied(item): yield item if __name__ == '__main__': apple = Product('Apple', Color.GREEN, Size.SMALL) tree = Product('Tree', Color.GREEN, Size.LARGE) house = Product('House', Color.BLUE, Size.LARGE) products = [apple, tree, house] # old implementation pf = ProductFilter() print('Green products (old):') for p in pf.filter_by_color(products, Color.GREEN): print(f' - {p.name} is green') # specification implementation bf = BetterFilter() print('Green products (new):') green = ColorSpecification(Color.GREEN) for p in bf.filter(products, green): print(f' - {p.name} is green') print('Large blue items:') large_blue = AndSpecification( SizeSpecification(Size.LARGE), ColorSpecification(Color.BLUE) ) for p in bf.filter(products, large_blue): print(f' - {p.name} is large and blue')
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models class MassMailing(models.Model): _name = 'mail.mass_mailing' _inherit = 'mail.mass_mailing' sale_quotation_count = fields.Integer('Quotation Count', compute='_compute_sale_quotation_count') sale_invoiced_amount = fields.Integer('Invoiced Amount', compute='_compute_sale_invoiced_amount') @api.depends('mailing_domain') def _compute_sale_quotation_count(self): has_so_access = self.env['sale.order'].check_access_rights('read', raise_exception=False) for mass_mailing in self: mass_mailing.sale_quotation_count = self.env['sale.order'].search_count(self._get_sale_utm_domain()) if has_so_access else 0 @api.depends('mailing_domain') def _compute_sale_invoiced_amount(self): has_so_access = self.env['sale.order'].check_access_rights('read', raise_exception=False) has_invoice_report_access = self.env['account.invoice.report'].check_access_rights('read', raise_exception=False) for mass_mailing in self: if has_so_access and has_invoice_report_access: invoices = self.env['sale.order'].search(self._get_sale_utm_domain()).mapped('invoice_ids') res = self.env['account.invoice.report'].search_read([('invoice_id', 'in', invoices.ids)], ['user_currency_price_total']) mass_mailing.sale_invoiced_amount = sum(r['user_currency_price_total'] for r in res) else: mass_mailing.sale_invoiced_amount = 0 @api.multi def action_redirect_to_quotations(self): action = self.env.ref('sale.action_quotations_with_onboarding').read()[0] action['domain'] = self._get_sale_utm_domain() action['context'] = {'default_type': 'lead'} return action @api.multi def action_redirect_to_invoiced(self): action = self.env.ref('account.action_invoice_refund_out_tree').read()[0] invoices = self.env['sale.order'].search(self._get_sale_utm_domain()).mapped('invoice_ids') action['domain'] = [ ('id', 'in', invoices.ids), ('type', 'in', ['out_invoice', 'out_refund']), ('state', 'not in', ['draft', 'cancel']) ] return action def _get_sale_utm_domain(self): res = [] if self.campaign_id: res.append(('campaign_id', '=', self.campaign_id.id)) if self.source_id: res.append(('source_id', '=', self.source_id.id)) if self.medium_id: res.append(('medium_id', '=', self.medium_id.id)) if not res: res.append((0, '=', 1)) return res
import paho.mqtt.client as mqtt import shortid import json import conf import values def createAE(): req_message = {} req_message['m2m:rqp'] = {} req_message['m2m:rqp']['op'] = 1 # create req_message['m2m:rqp']['to'] = conf.ae.parent req_message['m2m:rqp']['fr'] = conf.ae.id req_message['m2m:rqp']['rqi'] = shortid.generate() req_message['m2m:rqp']['ty'] = 2 # ae req_message['m2m:rqp']['pc'] = {} req_message['m2m:rqp']['pc']['m2m:ae'] = {} req_message['m2m:rqp']['pc']['m2m:ae']['rn'] = conf.ae.name req_message['m2m:rqp']['pc']['m2m:ae']['api'] = conf.ae.appid req_message['m2m:rqp']['pc']['m2m:ae']['rr'] = True values.mqttc.publish(values.req_topic, json.dumps(req_message['m2m:rqp'])) print(values.req_topic + ' (json) ' + json.dumps(req_message['m2m:rqp']) + ' ---->', end='\n\n') return req_message['m2m:rqp']['rqi'] def retrieveAE(): req_message = {} req_message['m2m:rqp'] = {} req_message['m2m:rqp']['op'] = 2 # retrieve req_message['m2m:rqp']['to'] = conf.ae.parent + '/' + conf.ae.name req_message['m2m:rqp']['fr'] = conf.ae.id req_message['m2m:rqp']['rqi'] = shortid.generate() req_message['m2m:rqp']['pc'] = {} values.mqttc.publish(values.req_topic, json.dumps(req_message['m2m:rqp'])) print(values.req_topic + ' (json) ' + json.dumps(req_message['m2m:rqp']) + ' ---->', end='\n\n') return req_message['m2m:rqp']['rqi'] def deleteAE(): req_message = {} req_message['m2m:rqp'] = {} req_message['m2m:rqp']['op'] = 4 # delete req_message['m2m:rqp']['to'] = conf.ae.parent + '/' + conf.ae.name req_message['m2m:rqp']['fr'] = conf.ae.id req_message['m2m:rqp']['rqi'] = shortid.generate() req_message['m2m:rqp']['pc'] = {} values.mqttc.publish(values.req_topic, json.dumps(req_message['m2m:rqp'])) print(values.req_topic + ' (json) ' + json.dumps(req_message['m2m:rqp']) + ' ---->', end='\n\n') return req_message['m2m:rqp']['rqi'] def createCNT(count): req_message = {} req_message['m2m:rqp'] = {} req_message['m2m:rqp']['op'] = 1 # create req_message['m2m:rqp']['to'] = conf.cnt[count]['parent'] req_message['m2m:rqp']['fr'] = conf.ae.id req_message['m2m:rqp']['rqi'] = shortid.generate() req_message['m2m:rqp']['ty'] = 3 # cnt req_message['m2m:rqp']['pc'] = {} req_message['m2m:rqp']['pc']['m2m:cnt'] = {} req_message['m2m:rqp']['pc']['m2m:cnt']['rn'] = conf.cnt[count]['name'] req_message['m2m:rqp']['pc']['m2m:cnt']['lbl'] = [] req_message['m2m:rqp']['pc']['m2m:cnt']['lbl'].append(conf.cnt[count]['name']) values.mqttc.publish(values.req_topic, json.dumps(req_message['m2m:rqp'])) print(values.req_topic + ' (json) ' + json.dumps(req_message['m2m:rqp']) + ' ---->', end='\n\n') return req_message['m2m:rqp']['rqi'] def retrieveCNT(count): req_message = {} req_message['m2m:rqp'] = {} req_message['m2m:rqp']['op'] = 2 # retrieve req_message['m2m:rqp']['to'] = conf.cnt[count]['parent'] + '/' + conf.cnt[count]['name'] req_message['m2m:rqp']['fr'] = conf.ae.id req_message['m2m:rqp']['rqi'] = shortid.generate() req_message['m2m:rqp']['pc'] = {} values.mqttc.publish(values.req_topic, json.dumps(req_message['m2m:rqp'])) print(values.req_topic + ' (json) ' + json.dumps(req_message['m2m:rqp']) + ' ---->', end='\n\n') return req_message['m2m:rqp']['rqi'] def deleteCNT(count): req_message = {} req_message['m2m:rqp'] = {} req_message['m2m:rqp']['op'] = 4 # retrieve req_message['m2m:rqp']['to'] = conf.cnt[count]['parent'] + '/' + conf.cnt[count]['name'] req_message['m2m:rqp']['fr'] = conf.ae.id req_message['m2m:rqp']['rqi'] = shortid.generate() req_message['m2m:rqp']['pc'] = {} values.mqttc.publish(values.req_topic, json.dumps(req_message['m2m:rqp'])) print(values.req_topic + ' (json) ' + json.dumps(req_message['m2m:rqp']) + ' ---->', end='\n\n') return req_message['m2m:rqp']['rqi'] def createCIN(count, content): req_message = {} req_message['m2m:rqp'] = {} req_message['m2m:rqp']['op'] = 1 # create req_message['m2m:rqp']['to'] = conf.cnt[count]['parent'] + '/' + conf.cnt[count]['name'] req_message['m2m:rqp']['fr'] = conf.ae.id req_message['m2m:rqp']['rqi'] = shortid.generate() req_message['m2m:rqp']['ty'] = '4' # cin req_message['m2m:rqp']['pc'] = {} req_message['m2m:rqp']['pc']['m2m:cin'] = {} req_message['m2m:rqp']['pc']['m2m:cin']['con'] = content values.mqttc.publish(values.req_topic, json.dumps(req_message['m2m:rqp'])) print(values.req_topic + ' (json) ' + json.dumps(req_message['m2m:rqp']) + ' ---->', end='\n\n') return req_message['m2m:rqp']['rqi'] def retrieveCIN(count): req_message = {} req_message['m2m:rqp'] = {} req_message['m2m:rqp']['op'] = 2 # retrieve req_message['m2m:rqp']['to'] = conf.cnt[count]['parent'] + '/' + conf.cnt[count]['name'] + '/latest' req_message['m2m:rqp']['fr'] = conf.ae.id req_message['m2m:rqp']['rqi'] = shortid.generate() req_message['m2m:rqp']['pc'] = {} values.mqttc.publish(values.req_topic, json.dumps(req_message['m2m:rqp'])) print(values.req_topic + ' (json) ' + json.dumps(req_message['m2m:rqp']) + ' ---->', end='\n\n') return req_message['m2m:rqp']['rqi'] def createSUB(count): req_message = {} req_message['m2m:rqp'] = {} req_message['m2m:rqp']['op'] = 1 # create req_message['m2m:rqp']['to'] = conf.sub[count]['parent'] req_message['m2m:rqp']['fr'] = conf.ae.id req_message['m2m:rqp']['rqi'] = shortid.generate() req_message['m2m:rqp']['ty'] = 23 # sub req_message['m2m:rqp']['pc'] = {} req_message['m2m:rqp']['pc']['m2m:sub'] = {} req_message['m2m:rqp']['pc']['m2m:sub']['rn'] = conf.sub[count]['name'] req_message['m2m:rqp']['pc']['m2m:sub']['enc'] = {} req_message['m2m:rqp']['pc']['m2m:sub']['enc']['net'] = [] req_message['m2m:rqp']['pc']['m2m:sub']['enc']['net'].append(3) req_message['m2m:rqp']['pc']['m2m:sub']['nu'] = [] req_message['m2m:rqp']['pc']['m2m:sub']['nu'].append(conf.sub[count]['nu']) req_message['m2m:rqp']['pc']['m2m:sub']['nct'] = 2 values.mqttc.publish(values.req_topic, json.dumps(req_message['m2m:rqp'])) print(values.req_topic + ' (json) ' + json.dumps(req_message['m2m:rqp']) + ' ---->', end='\n\n') return req_message['m2m:rqp']['rqi'] def deleteSUB(count): req_message = {} req_message['m2m:rqp'] = {} req_message['m2m:rqp']['op'] = 4 # delete req_message['m2m:rqp']['to'] = conf.sub[count]['parent'] + '/' + conf.sub[count]['name'] req_message['m2m:rqp']['fr'] = conf.ae.id req_message['m2m:rqp']['rqi'] = shortid.generate() req_message['m2m:rqp']['pc'] = {} values.mqttc.publish(values.req_topic, json.dumps(req_message['m2m:rqp'])) print(values.req_topic + ' (json) ' + json.dumps(req_message['m2m:rqp']) + ' ---->', end='\n\n') return req_message['m2m:rqp']['rqi'] # TODO: Add acp method def createACP(): pass def retrieveACP(): pass def updateACP(): pass def discovery(target, ty): req_message = {} req_message['m2m:rqp'] = {} req_message['m2m:rqp']['op'] = 2 # retrieve req_message['m2m:rqp']['to'] = target + '?fu=1&ty=' + str(ty) req_message['m2m:rqp']['fr'] = conf.ae.id req_message['m2m:rqp']['rqi'] = shortid.generate() req_message['m2m:rqp']['pc'] = {} values.mqttc.publish(values.req_topic, json.dumps(req_message['m2m:rqp'])) print(values.req_topic + ' (json) ' + json.dumps(req_message['m2m:rqp']) + ' ---->', end='\n\n') return req_message['m2m:rqp']['rqi']
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django.db.models import Q from rest_framework import permissions from django_filters import CharFilter, FilterSet from pipeline.component_framework.models import ComponentModel from gcloud.core.models import ProjectBasedComponent from .base import GcloudReadOnlyViewSet from ..serilaziers.component_model import ComponentModelListSerializer, ComponentModelDetailSerializer class ComponentModelFilter(FilterSet): project_id = CharFilter(method="filter_by_project_id") def filter_by_project_id(self, queryset, name, value): if value: exclude_component_codes = ProjectBasedComponent.objects.get_components_of_other_projects(value) else: exclude_component_codes = ProjectBasedComponent.objects.get_components() query_set = ~Q(code__in=exclude_component_codes) return queryset.filter(query_set) class Meta: model = ComponentModel fields = ["version"] class ComponentModelSetViewSet(GcloudReadOnlyViewSet): queryset = ComponentModel.objects.filter(status=True).exclude(code="remote_plugin").order_by("name") retrieve_queryset = ComponentModel.objects.filter(status=True).order_by("name") serializer_class = ComponentModelListSerializer retrieve_serializer_class = ComponentModelDetailSerializer permission_classes = [permissions.IsAuthenticated] filterset_class = ComponentModelFilter pagination_class = None lookup_field = "code"
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import os from ._constants import USER_FACING_NAME, TRACEPARENT_ENV_VAR from ._span import Span from ._vendored import _execution_context as execution_context class AmlTracer: def __init__(self, span_processors): self._span_processors = span_processors def start_as_current_span(self, name, parent=None, user_facing_name=None): parent = parent or self.__class__._get_ambient_parent() span = Span(name, parent, self._span_processors) self.__class__.decorate_span(span, user_facing_name) span.__enter__() return span def start_span(self, name, parent=None, user_facing_name=None): span = Span(name, parent, self._span_processors) self.__class__.decorate_span(span, user_facing_name) return span @staticmethod def decorate_span(span, user_facing_name): if user_facing_name: span.attributes[USER_FACING_NAME] = user_facing_name @staticmethod def _get_ambient_parent(): current_parent = execution_context.get_current_span() if current_parent: return current_parent traceparent = os.environ.get(TRACEPARENT_ENV_VAR, '').split('-') if not traceparent or len(traceparent) != 4: return None return Span._from_traceparent(*traceparent) class DefaultTraceProvider: def __init__(self, tracer): self._tracer = tracer def get_tracer(self, name): return self._tracer def get_current_span(self): return execution_context.get_current_span()
import requests import semver import os from urllib.parse import urlsplit from .errors import MyError def check(config): checkUrl = urlsplit(config["publishLink"])._replace( path = "/api/version", query = None ).geturl() resp = requests.get(checkUrl) myVersionInfo = semver.parse_version_info(config["version"]) if resp.status_code == requests.codes.ok: supportedVersions = resp.text.split(" ") for it in supportedVersions: versionInfo = semver.parse_version_info(it) if myVersionInfo.major == versionInfo.major: if semver.compare(config["version"], it) < 0: print("A newer version of the software ({}) is available. Download it at http://link".format(it)) #TODO link print("version OK") return raise MyError("Your version {} is no longer supported. Download a newer version at http://link".format(config["version"])) else: raise Exception("Server is not responding") def mergeIgnores(config): #TODO return config class Uploader: def __init__(self, config, irc=None): self.publishLink = config["publishLink"] self.rootPath = config["target"] self.irc = irc pass def send(self, filepath, eventType, source=None): print(eventType, filepath) r = None params = { "method": eventType, "destination": os.path.relpath(filepath, self.rootPath) } if eventType == "deleted": r = requests.post(self.publishLink, params = params) elif eventType == "moved": params.update({ "source": os.path.relpath(source, self.rootPath) }) r = requests.post(self.publishLink, params = params) else: with open(filepath, "rb") as fp: r = requests.post(self.publishLink, files = { "file": fp }, params = params) if r.status_code >= 400: print(r.text) elif self.irc is not None: print(r.text) self.irc.mymessage("updated " + r.text)
# -*- coding: utf-8 -*- """ Created on Thu Oct 25 00:16:05 2018 @author: Haider Raheem """ import random def throwUntil(x): """ input sum: x, 2<=x<=12 random input for die1 and die2 rolled counts time rolled to get sum """ count = 0 die1 = 0 die2 = 0 while die1 + die2 != x: die1 = random.randint(1, 6) die2 = random.randint(1, 6) count +=1 print("Dice are rolled", count, "times to get the sum", x) x = 0 while x<2 or x>12: x = int(input("Enter sum of dice: ")) if x<2 or x>12: print("Sum must be between 2 and 12 inclusive") throwUntil(x)
import audalign as ad import pytest import pickle test_file_eig = "test_audio/test_shifts/Eigen-20sec.mp3" test_file_eig2 = "test_audio/test_shifts/Eigen-song-base.mp3" test_folder_eig = "test_audio/test_shifts/" class TestAlign: ada = ad.Audalign(num_processors=4) @pytest.mark.smoke def test_align_fingerprint(self, tmpdir): result = self.ada.align("test_audio/test_shifts", tmpdir) assert result result = self.ada.align( "test_audio/test_shifts", tmpdir, write_extension=".wav", ) assert result self.ada.pretty_print_results(result) def test_align_cor(self, tmpdir): result = self.ada.align( "test_audio/test_shifts", tmpdir, technique="correlation", ) assert result def test_align_cor_options(self, tmpdir): result = self.ada.align( "test_audio/test_shifts", tmpdir, technique="correlation", cor_sample_rate=4000, filter_matches=0.3, # might have to adjust this locality=30, ) assert result self.ada.pretty_print_alignment(result, match_keys="match_info") def test_align_cor_spec(self, tmpdir): result = self.ada.align( "test_audio/test_shifts", tmpdir, technique="correlation_spectrogram", ) assert result self.ada.pretty_print_alignment(result) def test_align_cor_spec_options(self, tmpdir): result = self.ada.align( "test_audio/test_shifts", tmpdir, technique="correlation_spectrogram", cor_sample_rate=4000, filter_matches=0.3, # might have to adjust this locality=30, max_lags=10, ) assert result self.ada.set_multiprocessing(False) result = self.ada.align( "test_audio/test_shifts", destination_path=tmpdir, technique="correlation_spectrogram", write_extension=".wav", cor_sample_rate=4000, filter_matches=0.3, # might have to adjust this locality=30, max_lags=10, ) assert result is not None self.ada.pretty_print_alignment(result) self.ada.set_multiprocessing(True) def test_align_vis(self, tmpdir): result = self.ada.align( "test_audio/test_shifts", tmpdir, technique="visual", volume_threshold=215, img_width=0.5, ) assert result is not None self.ada.set_multiprocessing(False) result = self.ada.align( "test_audio/test_shifts", tmpdir, technique="visual", volume_threshold=215, img_width=0.5, ) assert result is not None self.ada.set_multiprocessing(True) def test_align_badish_options(self, tmpdir): result = self.ada.align( "test_audio/test_shifts", tmpdir, write_extension="mov", ) assert result @pytest.mark.xfail def test_align_bad_technique(self): self.ada.align("test_audio/test_shifts", technique="correlationion_bad") def test_align_load_fingerprints(self): result = self.ada.align( "test_audio/test_shifts", load_fingerprints="tests/test_fingerprints.json", technique="fingerprints", ) assert result class TestAlignFiles: ada = ad.Audalign(num_processors=4) def test_align_files_fingerprints(self, tmpdir): result = self.ada.align_files( "test_audio/test_shifts/Eigen-20sec.mp3", "test_audio/test_shifts/Eigen-song-base.mp3", destination_path=tmpdir, ) assert result def test_align_files_load_fingerprints(self): result = self.ada.align_files( "test_audio/test_shifts/Eigen-20sec.mp3", "test_audio/test_shifts/Eigen-song-base.mp3", load_fingerprints="tests/test_fingerprints.json", technique="fingerprints", ) assert result def test_align_files_vis(self, tmpdir): result = self.ada.align_files( "test_audio/test_shifts/Eigen-20sec.mp3", "test_audio/test_shifts/Eigen-song-base.mp3", destination_path=tmpdir, technique="visual", volume_threshold=215, img_width=0.5, ) assert result is not None self.ada.set_multiprocessing(False) result = self.ada.align_files( "test_audio/test_shifts/Eigen-20sec.mp3", "test_audio/test_shifts/Eigen-song-base.mp3", destination_path=tmpdir, technique="visual", volume_threshold=215, img_width=0.5, ) assert result is not None self.ada.set_multiprocessing(True) def test_align_files_cor(self, tmpdir): result = self.ada.align_files( "test_audio/test_shifts/Eigen-20sec.mp3", "test_audio/test_shifts/Eigen-song-base.mp3", destination_path=tmpdir, write_extension=".wav", technique="correlation", ) assert result is not None self.ada.set_multiprocessing(False) result = self.ada.align_files( "test_audio/test_shifts/Eigen-20sec.mp3", "test_audio/test_shifts/Eigen-song-base.mp3", destination_path=tmpdir, write_extension=".wav", technique="correlation", ) assert result is not None self.ada.set_multiprocessing(True) class TestTargetAlign: ada = ad.Audalign(num_processors=4) def test_target_align_vis(self, tmpdir): result = self.ada.target_align( "test_audio/test_shifts/Eigen-song-base.mp3", "test_audio/test_shifts", destination_path=tmpdir, technique="visual", img_width=0.5, volume_threshold=215, ) assert result is not None self.ada.set_multiprocessing(False) result = self.ada.target_align( "test_audio/test_shifts/Eigen-song-base.mp3", "test_audio/test_shifts", destination_path=tmpdir, technique="visual", img_width=0.5, volume_threshold=215, ) assert result is not None self.ada.set_multiprocessing(True) def test_target_align_vis_mse(self, tmpdir): result = self.ada.target_align( "test_audio/test_shifts/Eigen-song-base.mp3", "test_audio/test_shifts", destination_path=tmpdir, technique="visual", img_width=0.5, volume_threshold=215, calc_mse=True, start_end=(0, -1), ) assert result @pytest.mark.xfail def test_target_align_bad_technique(self): self.ada.target_align( "test_audio/test_shifts/Eigen-song-base.mp3", "test_audio/test_shifts", technique="visual_bad", ) def test_target_align_cor(self, tmpdir): result = self.ada.target_align( "test_audio/test_shifts/Eigen-song-base.mp3", "test_audio/test_shifts", destination_path=tmpdir, technique="correlation", ) assert result def test_target_align_cor_spec(self, tmpdir): result = self.ada.target_align( "test_audio/test_shifts/Eigen-song-base.mp3", "test_audio/test_shifts", destination_path=tmpdir, technique="correlation_spectrogram", ) assert result def test_target_align_fingerprints(self, tmpdir): result = self.ada.target_align( "test_audio/test_shifts/Eigen-song-base.mp3", "test_audio/test_shifts", destination_path=tmpdir, technique="fingerprints", ) assert result def test_target_align_load_fingerprints(self): result = self.ada.target_align( "test_audio/test_shifts/Eigen-song-base.mp3", "test_audio/test_shifts", load_fingerprints="tests/test_fingerprints.json", technique="fingerprints", ) assert result class TestFineAlign: ada = ad.Audalign(num_processors=4) with open("tests/align_test.pickle", "rb") as f: align_fing_results = pickle.load(f) align_fing_results = ada.recalc_shifts(align_fing_results) @pytest.mark.smoke def test_fine_align(self): result = self.ada.fine_align( self.align_fing_results, ) assert result is not None def test_fine_align_spec(self): result = self.ada.fine_align( self.align_fing_results, technique="correlation_spectrogram", ) assert result is not None self.ada.set_multiprocessing(False) result = self.ada.fine_align( self.align_fing_results, technique="correlation_spectrogram", ) self.ada.set_multiprocessing(True) assert result is not None def test_fine_align_locality(self): result = self.ada.fine_align( self.align_fing_results, locality=10, ) assert result is not None self.ada.pretty_print_alignment(result, match_keys="match_info") def test_fine_align_fingerprints(self, tmpdir): result = self.ada.fine_align( self.align_fing_results, technique="fingerprints", destination_path=tmpdir, locality=5, locality_filter_prop=0.5, ) assert result is not None self.ada.pretty_print_alignment(result, match_keys="match_info") def test_fine_align_load_fingerprints(self): result = self.ada.fine_align( self.align_fing_results, technique="fingerprints", load_fingerprints="tests/test_fingerprints.json", locality=5, locality_filter_prop=0.5, ) assert result is not None self.ada.pretty_print_alignment(result, match_keys="match_info") def test_fine_align_visual(self, tmpdir): result = self.ada.fine_align( self.align_fing_results, technique="visual", destination_path=tmpdir, volume_threshold=214, img_width=0.5, ) assert result is not None self.ada.set_multiprocessing(False) result = self.ada.fine_align( self.align_fing_results, technique="visual", destination_path=tmpdir, volume_threshold=214, img_width=0.5, ) assert result is not None self.ada.set_multiprocessing(True) self.ada.pretty_print_alignment(result, match_keys="fine_match_info") def test_fine_align_options(self, tmpdir): result = self.ada.fine_align( self.align_fing_results, destination_path=tmpdir, cor_sample_rate=8000, max_lags=5, match_index=1, write_extension=".ogg", filter_matches=0.1, ) assert result is not None self.ada.pretty_print_results(result) class TestRecalcWriteShifts: # TODO test recalc shifts and write from results ada = ad.Audalign(num_processors=2) with open("tests/align_test.pickle", "rb") as f: align_fing_results = pickle.load(f) align_fing_results = ada.recalc_shifts(align_fing_results) full_results = ada.fine_align( align_fing_results, ) def test_recalc_shifts(self): temp_results = self.ada.recalc_shifts(self.align_fing_results) assert temp_results is not None temp_results = self.ada.recalc_shifts(self.full_results) assert temp_results is not None temp_results = self.ada.recalc_shifts(self.full_results, key="match_info") assert temp_results is not None temp_results = self.ada.recalc_shifts( self.full_results, key="only_fine_match_info" ) assert temp_results is not None def test_recalc_shifts_indexes(self): temp_results = self.ada.recalc_shifts(self.align_fing_results, match_index=1) assert temp_results is not None temp_results = self.ada.recalc_shifts(self.full_results, match_index=1) assert temp_results is not None temp_results = self.ada.recalc_shifts( self.full_results, key="match_info", match_index=1 ) assert temp_results is not None temp_results = self.ada.recalc_shifts( self.full_results, key="only_fine_match_info", match_index=1 ) assert temp_results is not None temp_results = self.ada.recalc_shifts( self.full_results, match_index=1, fine_match_index=1 ) assert temp_results is not None temp_results = self.ada.recalc_shifts( self.full_results, key="match_info", match_index=1, fine_match_index=1 ) assert temp_results is not None temp_results = self.ada.recalc_shifts( self.full_results, key="only_fine_match_info", match_index=1, fine_match_index=1, ) assert temp_results is not None def test_write_from_results(self, tmpdir): self.ada.write_shifts_from_results(self.full_results, test_folder_eig, tmpdir) self.ada.write_shifts_from_results( self.full_results, test_folder_eig, tmpdir, write_extension=".mp3" ) # sources from original file location self.ada.write_shifts_from_results( self.full_results, None, tmpdir, write_extension=".mp3" ) self.ada.write_shifts_from_results( self.full_results, "no errors just prints", tmpdir, write_extension=".mp3" )
from game import Game g = Game([]) g.flop() g.turn() g.river()
from three.core import Uniform, UniformList class Fog(object): # fog effect is applied with 0% opacity at startDistance from camera, # increasing linearly to 100% opacity at endDistance from camera def __init__(self, startDistance=1, endDistance=10, color=[0,0,0]): self.uniformList = UniformList() self.uniformList.addUniform( Uniform("bool", "useFog", 1) ) self.uniformList.addUniform( Uniform("float", "fogStartDistance", startDistance) ) self.uniformList.addUniform( Uniform("float", "fogEndDistance", endDistance) ) self.uniformList.addUniform( Uniform("vec3", "fogColor", color) )
# -*- coding: utf-8 -*- import unittest from matplotlib import rcParams import matplotlib.pyplot as plt import numpy as np from pprint import pprint as print __updated__ = "2021-06-11" class TestFuncat2TestCase(unittest.TestCase): def test_rank(self): my_array = np.array([[1, 56, 55, 15], [5, 4, 33, 53], [3, 6, 7, 19]]) sorted_array = np.argsort(my_array, axis=0) print(f"These are ranks of array values: axis=0 \n {sorted_array}") def test_rank2(self): my_array = np.array([[1, 56, 55, 15], [5, 4, 33, 53], [3, 6, 7, 19]]) sorted_array = np.argsort(my_array, axis=1) print(f"These are ranks of array values: axis=1 \n {sorted_array}") def test_rank3(_self_training): array = np.array([4, 2, 7, 1]) order = array.argsort() ranks = order.argsort() def test_rank4(self): array = np.array([4, 2, 7, 1]) temp = array.argsort() ranks = np.empty_like(temp) ranks[temp] = np.arange(len(array)) def test_rank5(self): a = np.array([4, 1, 6, 8, 4, 1, 6]) # a = np.array([4, 2, 7, 2, 1]) rank = a.argsort().argsort() print(f"rank:{rank}") unique, inverse = np.unique(a, return_inverse=True) print(f"unique, inverse = :{unique}, {inverse}") unique_rank_sum = np.zeros_like(unique) np.add.at(unique_rank_sum, inverse, rank) unique_count = np.zeros_like(unique) np.add.at(unique_count, inverse, 1) unique_rank_mean = unique_rank_sum.astype(np.float) / unique_count rank_mean = unique_rank_mean[inverse] print(rank_mean) def test_rank6(self): def ranks(v): t = np.argsort(v) r = np.empty(len(v), int) r[t] = np.arange(len(v)) for i in range(1, len(r)): if v[t[i]] <= v[t[i - 1]]: r[t[i]] = r[t[i - 1]] return r # test it a = np.array([4, 1, 6, 8, 4, 1, 6]) print(sorted(zip(ranks(a), a))) def test_rank7(self): x = np.array([3, 1, np.nan, 2]) print(f"x={x}") print(f"sorted x = {x[x.argsort()]}") print(f"sort with np.nan:{x.argsort().argsort()}") def test_transpose(self): t = np.arange(4) # 插入值0-3 print(f"原始:{t}") print(f"转置对于一维数组而言,np.transpose()是不起作用的:{ t.transpose()}") def test_transpose2(self): t = np.arange(16).reshape(4, 4) # 插入0-15,形状为4*4 print(f"原始:{t}") print( f"对于二维数组,数组两个轴axis为(x,y),对应的下标为(0,1),np.transpose()传入的参数为(1,0),即将原数组的0,1轴互换。综上,对二维数组的transpose操作就是对原数组的转置操作。:{ t.transpose()}") def test_transpose3(self): """二维以上的维数组进行transpose的话,不传参则默认将维度反序 即将原数组的各个axis进行reverse一下,three原始axis排列为(0,1,2),那numpy.transpose()默认的参数为(2,1,0)得到转置后的数组的视图,不影响原数组的内容以及大小。 我们一步一步来分析这个过程:axis(0,1,2)———>axis(2,1,0) ,transpose后的数组相对于原数组来说,相当于交换了原数组的0轴和2轴。ndarray.shape (2,3,3) ->(3,3,2) (2, 1, 0) 分别代表 维度d,  行l, 列c #对原始three数组的位置索引下标写出来,如下: A=[        [ [ (0,0,0) , (0,0,1) , (0,0,2)],        [ (0,1,0) , (0,1,1) , (0,1,2)],        [ (0,2,0) , (0,2,1) , (0,2,2)]],        [[ (1,0,0) , (1,0,1) , (1,0,2)],         [ (1,1,0) , (1,1,1) , (1,1,2)],         [ (1,2,0) , (1,2,1) , (1,2,2)]]   ] #接着把上述每个三元组的第一个数和第三个数进行交换,得到以下的数组 B=[[[ (0,0,0) , (1,0,0) , (2,0,0)],   [ (0,1,0) , (1,1,0) , (2,1,0)],   [ (0,2,0) , (1,2,0) , (2,2,0)]],   [[ (0,0,1) , (1,0,1) , (2,0,1)],   [ (0,1,1) , (1,1,1) , (2,1,1)],   [ (0,2,1) , (1,2,1) , (2,2,1)]]] #最后在原数组中把B对应的下标的元素,写到相应的位置,如(0,2,1)代表放置到d = 0,行 = 2,列 = 1 #对比看一下,这是原数组 [[[ 0,  1,  2],   [ 3,  4,  5],   [ 6,  7,  8]],   [[ 9, 10, 11],    [12, 13, 14],    [15, 16, 17]]] # 按照B的映射关系得到最终的数组。 C=[[[ 0,  9],   [ 3,  12],   [ 6,  15]],   [[ 1, 10],    [4, 13],    [7, 16]]    [[ 2, 11],    [5, 14],    [8, 17]] ] # 最终的结果也就是数组C 再看自己定义的转置格式: arr = np.arange(24).reshape(3, 4, 2) print(arr) tran_arr = np.transpose(arr, (1, 0, 2)) # axis索引(0,1,2)变为(1,0,2) print(tran_arr) 因为索引号由(0,1,2)变成了(1,0,2),axis第一个和第二个交换,shape 由(3,4,2)变成了(4,3,2),可知结果矩阵为d = 4,行3,列2。等效于 arr = np.arange(24).reshape(3, 4, 2) np.swapaxes(arr,0,1) #交换axis 0,1 再展开矩阵的位置下标,每个元素交换第一个和第二个,得到最终的位置下标。 输出结果: [[[ 0  1]   [ 2  3]   [ 4  5]   [ 6  7]]  [[ 8  9]   [10 11]   [12 13]   [14 15]]  [[16 17]   [18 19]   [20 21]   [22 23]]] [[[ 0  1]   [ 8  9]   [16 17]]  [[ 2  3]   [10 11]   [18 19]]  [[ 4  5]   [12 13]   [20 21]]   [[ 6 7 ]   [14 15]   [22 23]]] 一般用reshape()进行维度转换比较多,直接传入新的维度就行,而不是用下标代替    arr = arr.reshape(4,3,2)  但是实际上二者是有很大区别的,transpose()会将数组进行转置,而reshape()则是按照数组原有的排布顺序,重新按照新维度生成一个依然有序的数组,从以上两图也能看出来 """ t = np.arange(18).reshape(2, 3, 3) print(f"原始:{t}") print("二维以上的维数组进行transpose的话,不传参则默认将维度反序;") print( f"对于三维数组,数组两个轴axis为(x,y),对应的下标为(0,1),np.transpose()传入的参数为(1,0),即将原数组的0,1轴互换。综上,对二维数组的transpose操作就是对原数组的转置操作。:{ t.transpose()}") def test_concatenate(self): # Program to concatenate two 2D arrays column-wise # Creating two 2D arrays arr1 = np.arange(1, 10).reshape(3, 3) arr2 = np.arange(10, 19).reshape(3, 3) print(f"arr1: {arr1}") print(f"arr2: {arr2}") # Concatenating operation # axis = 1 implies that it is being done column-wise arr = np.concatenate((arr1, arr2), axis=1) print(f"np.concatenate((arr1, arr2), axis=1): {arr}") # axis = 0 implies that it is being done row-wise arr = np.concatenate((arr1, arr2), axis=0) print(f"np.concatenate((arr1, arr2), axis=0): {arr}") def test_stack(self): arr1 = np.arange(1, 10).reshape(3, 3) arr2 = np.arange(10, 19).reshape(3, 3) print(f"arr1: {arr1}") print(f"arr2: {arr2}") # Concatenating operation # axis = 1 implies that it is being # done row-wise arr = np.stack((arr1, arr2), axis=1) print(f"np.stack((arr1, arr2), axis=1): {arr}") # Concatenating operation # axis = 2 implies that it is being done # along the height arr = np.stack((arr1, arr2), axis=2) print(f"np.stack((arr1, arr2), axis=2): {arr}") def test_hstack(self): arr1 = np.arange(1, 10).reshape(3, 3) arr2 = np.arange(10, 19).reshape(3, 3) print(f"arr1: {arr1}") print(f"arr2: {arr2}") # Concatenating operation arr = np.hstack((arr1, arr2)) print(f"np.hstack((arr1, arr2)): {arr}") def test_vstack(self): arr1 = np.arange(1, 10).reshape(3, 3) arr2 = np.arange(10, 19).reshape(3, 3) print(f"arr1: {arr1}") print(f"arr2: {arr2}") # Concatenating operation arr = np.vstack((arr1, arr2)) print(f"np.vstack(arr1, arr2): {arr}") def test_dstack(self): arr1 = np.arange(1, 10).reshape(3, 3) arr2 = np.arange(10, 19).reshape(3, 3) print(f"arr1: {arr1}") print(f"arr2: {arr2}") # Concatenating operation arr = np.dstack((arr1, arr2)) print(f"np.dstack(arr1, arr2): {arr}") if __name__ == '__main__': unittest.main()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Aug 27 20:39:40 2020 @author: kant # Daily Challenge #2 - String Diamond Your task is to return a string that displays a diamond shape on the screen using asterisk (“*”) characters. The shape that the print method will return should resemble a diamond. A number provided as input will represent the number of asterisks printed on the middle line. The line above and below will be centered and will have two less asterisks than the middle line. This reduction will continue for each line until a line with a single asterisk is printed at the top and bottom of the figure. Return null if input is an even number or a negative number. Note: JS and Python students must implement diamond() method and return None (Py) or null(JS) for invalid input. """ def diamond(n:int): if n%2 == 0: return None spaces = range(int((n-1)/2),-1,-1) asterisks = range(1,n+1,2) result = '' for i,j in zip(spaces,asterisks): result = result + ' '*i + '*'*j + '\n' spaces = range(1,int((n-1)/2)+1) asterisks = range(n-2,0,-2) for i,j in zip(spaces,asterisks): result = result + ' '*i + '*'*j + '\n' result = result[:-1] return result
import stripe from stripe import api_requestor from stripe import util from async_stripe.api_resources.abstract import patch_custom_methods async def capture_patch(self, idempotency_key=None, **params): url = self.instance_url() + "/capture" headers = util.populate_headers(idempotency_key) self.refresh_from(await self.request("post", url, params, headers)) return self async def refund_patch(self, idempotency_key=None, **params): url = self.instance_url() + "/refund" headers = util.populate_headers(idempotency_key) self.refresh_from(await self.request("post", url, params, headers)) return self async def update_dispute_patch(self, idempotency_key=None, **params): requestor = api_requestor.APIRequestor( self.api_key, api_version=self.stripe_version, account=self.stripe_account, ) url = self.instance_url() + "/dispute" headers = util.populate_headers(idempotency_key) response, api_key = await requestor.request("post", url, params, headers) self.refresh_from({"dispute": response}, api_key, True) return self.dispute async def close_dispute_patch(self, idempotency_key=None, **params): requestor = api_requestor.APIRequestor( self.api_key, api_version=self.stripe_version, account=self.stripe_account, ) url = self.instance_url() + "/dispute/close" headers = util.populate_headers(idempotency_key) response, api_key = await requestor.request("post", url, params, headers) self.refresh_from({"dispute": response}, api_key, True) return self.dispute async def mark_as_fraudulent_patch(self, idempotency_key=None): params = {"fraud_details": {"user_report": "fraudulent"}} url = self.instance_url() headers = util.populate_headers(idempotency_key) self.refresh_from(await self.request("post", url, params, headers)) return self async def mark_as_safe_patch(self, idempotency_key=None): params = {"fraud_details": {"user_report": "safe"}} url = self.instance_url() headers = util.populate_headers(idempotency_key) self.refresh_from(await self.request("post", url, params, headers)) return self stripe.Charge.capture = capture_patch stripe.Charge.refund = refund_patch stripe.Charge.update_dispute = update_dispute_patch stripe.Charge.close_dispute = close_dispute_patch stripe.Charge.mark_as_fraudulent = mark_as_fraudulent_patch stripe.Charge.mark_as_safe = mark_as_safe_patch custom_methods = [ {"name": "capture", "http_verb": "post"}, ] patch_custom_methods(stripe.Charge, custom_methods)
# Copyright (c) 2018, Oracle and/or its affiliates. # Copyright (C) 1996-2017 Python Software Foundation # # Licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 import seq_tests #import pickle from compare import CompareTest class TupleTest(seq_tests.CommonTest): type2test = tuple def test_constructors(self): super().test_constructors() # calling built-in types without argument must return empty self.assertEqual(tuple(), ()) t0_3 = (0, 1, 2, 3) t0_3_bis = tuple(t0_3) self.assertTrue(t0_3 is t0_3_bis) self.assertEqual(tuple([]), ()) self.assertEqual(tuple([0, 1, 2, 3]), (0, 1, 2, 3)) self.assertEqual(tuple(''), ()) self.assertEqual(tuple('spam'), ('s', 'p', 'a', 'm')) def test_truth(self): super().test_truth() self.assertTrue(not ()) self.assertTrue((42, )) def test_len(self): super().test_len() self.assertEqual(len(()), 0) self.assertEqual(len((0,)), 1) self.assertEqual(len((0, 1, 2)), 3) def test_iadd(self): super().test_iadd() u = (0, 1) u2 = u u += (2, 3) self.assertTrue(u is not u2) def test_imul(self): super().test_imul() u = (0, 1) u2 = u u *= 3 self.assertTrue(u is not u2) def test_tupleresizebug(self): # Check that a specific bug in _PyTuple_Resize() is squashed. def f(): for i in range(1000): yield i self.assertEqual(list(tuple(f())), list(range(1000))) # TODO This test is currently faing on the gate. If you run the test separately # it's ok. # def test_hash(self): # See SF bug 942952: Weakness in tuple hash # The hash should: # be non-commutative # should spread-out closely spaced values # should not exhibit cancellation in tuples like (x,(x,y)) # should be distinct from element hashes: hash(x)!=hash((x,)) # This test exercises those cases. # For a pure random hash and N=50, the expected number of occupied # buckets when tossing 252,600 balls into 2**32 buckets # is 252,592.6, or about 7.4 expected collisions. The # standard deviation is 2.73. On a box with 64-bit hash # codes, no collisions are expected. Here we accept no # more than 15 collisions. Any worse and the hash function # is sorely suspect. # N=50 # base = list(range(N)) # xp = [(i, j) for i in base for j in base] # inps = base + [(i, j) for i in base for j in xp] + \ # [(i, j) for i in xp for j in base] + xp + list(zip(base)) # collisions = len(inps) - len(set(map(hash, inps))) # self.assertTrue(collisions <= 15) def test_repr(self): l0 = tuple() l2 = (0, 1, 2) a0 = self.type2test(l0) a2 = self.type2test(l2) self.assertEqual(str(a0), repr(l0)) self.assertEqual(str(a2), repr(l2)) self.assertEqual(repr(a0), "()") self.assertEqual(repr(a2), "(0, 1, 2)") def test_repr_large(self): # Check the repr of large list objects def check(n): l = (0,) * n s = repr(l) self.assertEqual(s, '(' + ', '.join(['0'] * n) + ')') check(10) # check our checking code check(1000000) # def test_iterator_pickle(self): # # Userlist iterators don't support pickling yet since # # they are based on generators. # data = self.type2test([4, 5, 6, 7]) # for proto in range(pickle.HIGHEST_PROTOCOL + 1): # itorg = iter(data) # d = pickle.dumps(itorg, proto) # it = pickle.loads(d) # self.assertEqual(type(itorg), type(it)) # self.assertEqual(self.type2test(it), self.type2test(data)) # # it = pickle.loads(d) # next(it) # d = pickle.dumps(it, proto) # self.assertEqual(self.type2test(it), self.type2test(data)[1:]) # def test_reversed_pickle(self): # data = self.type2test([4, 5, 6, 7]) # for proto in range(pickle.HIGHEST_PROTOCOL + 1): # itorg = reversed(data) # d = pickle.dumps(itorg, proto) # it = pickle.loads(d) # self.assertEqual(type(itorg), type(it)) # self.assertEqual(self.type2test(it), self.type2test(reversed(data))) # # it = pickle.loads(d) # next(it) # d = pickle.dumps(it, proto) # self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:]) def test_no_comdat_folding(self): # Issue 8847: In the PGO build, the MSVC linker's COMDAT folding # optimization causes failures in code that relies on distinct # function addresses. class T(tuple): pass with self.assertRaises(TypeError): [3,] + T((1,2)) def assertLess(self, left, right): self.assertTrue(left < right) self.assertTrue(left <= right) self.assertFalse(left>right) self.assertFalse(left>=right) self.assertFalse(left==right) self.assertTrue(left!=right) def test_lexicographic_ordering(self): # Issue 21100 a = self.type2test([1, 2]) b = self.type2test([1, 2, 0]) c = self.type2test([1, 3]) self.assertLess(a, b) self.assertLess(b, c) def test_index(self): super().test_index() t = (0, 1, 2, 3, 4, 5) self.assertEqual(t.index(0, False, True), 0) self.assertRaises(TypeError, t.index, 1, 1.0) self.assertRaises(TypeError, t.index, 1, 1.0, 2.0) self.assertRaises(TypeError, t.index, 1, "a", 2.0) def test_index_class(self): t = (0, 1, 2, 3, 4, 5) class IndexI(): def __index__(self): return 1; class IndexII(): def __index__(self): return 29; self.assertEqual(t.index(3, IndexI()), 3) self.assertEqual(t.index(3, 1, IndexII()), 3) self.assertEqual(t.index(3, IndexI(), IndexII()), 3) class IndexF(): def __index__(self): return 1.0; self.assertRaises(TypeError, t.index, 3, IndexF()) class IndexO(): def __index__(self): return 'a'; self.assertRaises(TypeError, t.index, 3, IndexO()) class IndexI2(): def __index__(self): return IndexI(); self.assertRaises(TypeError, t.index, 3, IndexI2()) self.assertEqual(t.index(0, False, True), 0) def test_getItem_class(self): def raiseTypeError(tuple, index): try: tuple[index] self.assertTrue(False, "Operation {} [{}] should raise TypeError".format(tuple, index)) except TypeError: pass t = (0, 1, 2, 3, 4, 5) self.assertEqual(t[True], 1) class IndexI(): def __index__(self): return 1; self.assertEqual(t[IndexI()], 1) class IndexF(): def __index__(self): return 1.0; raiseTypeError(t, IndexF()) # Tests for Truffle specializations def test_lying_tuple(self): class MyTuple(tuple): def __iter__(self): yield 1 t = (2,) tt = tuple((2,)) self.assertEqual(t,tt) self.assertFalse(t is tt) ttt = tuple(t) self.assertEqual(t,ttt) self.assertEqual(tt,ttt) self.assertFalse(ttt is tt) self.assertTrue(ttt is t) m = MyTuple((2,)) mt = MyTuple(t) mm = MyTuple(m) tm = tuple(m) self.assertEqual(m,t) self.assertEqual(m,mt) self.assertNotEqual(m,mm) self.assertNotEqual(tm, m) self.assertNotEqual(tm, mt) self.assertEqual(tm, mm) self.assertFalse(m is t) self.assertFalse(m is mt) self.assertFalse(m is tm) self.assertFalse(m is mm) def test_creating_tuple(self): class MyTuple(tuple): pass def maketuple(t): return tuple(t) a = MyTuple((1,2)) b = tuple(a) self.assertFalse(a is b) b = MyTuple(a) self.assertFalse(a is b) b = tuple((1,2)) self.assertFalse(maketuple(a) is maketuple(b)) self.assertTrue(maketuple(b) is maketuple(b)) self.assertTrue(tuple(b) is b) self.assertFalse(tuple(a) is a) class TupleCompareTest(CompareTest): def test_compare(self): t1 = (1, 2, 3) t2 = (1,2,3,0) t3 = (1,2,3,4) self.comp_eq(t1, t1) self.comp_ne(t1, t2) self.comp_ne(t2, t3) self.comp_ge(t1, t1, True) self.comp_ge(t2, t1, False) self.comp_ge(t3, t2, False) self.comp_ge(t3, t1, False) self.comp_le(t1, t1, True) self.comp_le(t1, t2, False) self.comp_le(t2, t3, False) self.comp_le(t1, t3, False) self.comp_lt(t1, t2) self.comp_lt(t2, t3) self.comp_lt(t1, t3) self.comp_gt(t2, t1) self.comp_gt(t3, t2) self.comp_gt(t3, t1) def test_equal_other(self): def tryWithOtherType(left, right): self.assertFalse(left == right, "Operation {} == {} should be False".format(left, right)) self.assertTrue(left != right, "Operation {} != {} should be True".format(left, right)) t1 = (1, 2, 3) tryWithOtherType(t1, 1) tryWithOtherType(t1, 'hello') tryWithOtherType(t1, False) tryWithOtherType(t1, [1, 2, 3]) tryWithOtherType(t1, {1, 2, 3}) tryWithOtherType(t1, {'one':1, 'two':2, 'three':3}) def test_raiseTypeError(self): def tryWithOtherType(left, right): def raiseTypeError(left, op, right): try: if op == "<": left < right elif op == ">": left > right elif op == "<=": left <= right elif op == ">=": left >= right self.assertTrue(False, "Operation {} {} {} should raise TypeError".format(left, op, right)) except TypeError: pass raiseTypeError(left, "<", right) raiseTypeError(left, ">", right) raiseTypeError(left, "<=", right) raiseTypeError(left, ">=", right) t1 = (1, 2, 3) tryWithOtherType(t1, 1) tryWithOtherType(t1, True) tryWithOtherType(t1, 'hello') def test_extendingClass(self): class MyTuple(tuple): def __eq__(self, value): return 'eq' def __ne__(self, value): return value; def __gt__(self, value): return 10 def __lt__(self, value): return 11.11 def __ge__(self, value): return value + 5 def __le__(self, value): r = super().__le__(value) return "OK:" + str(r) t1 = MyTuple((1, 10)) self.assertEqual(t1 == 1, 'eq') self.assertEqual(t1 != 'ne', 'ne') self.assertEqual(t1 > 'ne', 10) self.assertEqual(t1 < 1, 11.11) self.assertEqual(t1 >= 6, 11) self.assertEqual(t1 <= (1, 1), 'OK:False') self.assertEqual(t1 <= (1, 10), 'OK:True') self.assertEqual(t1 <= (1, 10, 0), 'OK:True') def test_slice(self): t1 = tuple(range (1, 22, 2)) s = slice(2, 6) self.assertEqual(t1[s], (5, 7, 9, 11)) def test_same_id(): empty_ids = set([id(tuple()) for i in range(100)]) assert len(empty_ids) == 1
"""Test double linked list implementation.""" import pytest @pytest.fixture def test_lists(): """Dll fixtures.""" from src.dll import DoubleLinkedList empty = DoubleLinkedList() one = DoubleLinkedList(3) multi = DoubleLinkedList([1, 2, 3, 4, 5]) return empty, one, multi def test_node_class(): """Test node class has data.""" from src.dll import Node node = Node(5) assert node.data is 5 def test_list_of_none(test_lists): """Test list of none head and tail is none.""" assert test_lists[0].head is None assert test_lists[0].tail is None def test_list_of_one(test_lists): """Test list of one, head is tail.""" assert test_lists[1].head is test_lists[1].tail def test_list_of_five(test_lists): """Test list of five.""" assert test_lists[2].head.data is 5 assert test_lists[2].tail.data is 1 def test_prev_pointer(test_lists): """Test prev pointer.""" assert test_lists[2].tail.prev.data is 2 def test_next_pointer(test_lists): """Test next pointer.""" assert test_lists[2].head.next.data is 4 def test_push_increases_length(test_lists): """Test push increases length.""" test_lists[0].push(2) assert test_lists[0]._length is 1 def test_push_updates_head(test_lists): """Test push updates head.""" test_lists[1].push(6) assert test_lists[1].head.data is 6 def test_push_points_back(test_lists): """Test old head points to new with prev after a push.""" old_head = test_lists[1].head test_lists[1].push(6) assert test_lists[1].head is old_head.prev def test_pop_reduces_length(test_lists): """Test pop reduces lists.""" old_length = test_lists[2]._length test_lists[2].pop() assert test_lists[2]._length is old_length - 1 def test_pop_removes_head(test_lists): """Test pop removes head.""" new_head = test_lists[2].head.next.data test_lists[2].pop() assert test_lists[2].head.data is new_head def test_pop_removes_prev_pointer(test_lists): """Test pop changes prev pointer.""" test_lists[2].pop() assert test_lists[2].head.prev is None def test_pop_list_one(test_lists): """Test pop decreases length.""" test_lists[1].pop() assert test_lists[1]._length is 0 def test_pop_returns_data(test_lists): """Test pop returns data.""" assert test_lists[2].pop() is 5 def test_cant_pop_on_empty_list(test_lists): """Test pop on an empty list raises error.""" with pytest.raises(IndexError, message='Cannot pop from an empty list'): test_lists[0].pop() def test_append_increases_length(test_lists): """Test append increases length.""" test_lists[0].append(2) assert test_lists[0]._length is 1 def test_append_updates_tail(test_lists): """Test append updates tail.""" test_lists[1].append(6) assert test_lists[1].tail.data is 6 def test_append_points_back(test_lists): """Test old tail points to new with prev after a append.""" old_tail = test_lists[1].tail test_lists[1].append(6) assert test_lists[1].tail is old_tail.next def test_append_on_empty_list(test_lists): """Test append updates tail.""" test_lists[0].append(6) assert test_lists[0].tail.data is 6 assert test_lists[0].head.data is 6 def test_append_next_pointer_is_none(test_lists): """Test append next pointer is none.""" test_lists[2].append(6) assert test_lists[2].tail.next is None def test_pop_sequence(test_lists): """Test that entire sequence is returned by successive pops.""" l = [] while True: try: popped_data = test_lists[2].pop() l.append(popped_data) except IndexError: break assert l == [5, 4, 3, 2, 1] def test_push_pop(test_lists): """Push data and pop it off.""" test_lists[1].push(9) popped_data = test_lists[1].pop() assert popped_data is 9 def test_shift_reduces_length(test_lists): """Test shift reduces lists.""" old_length = test_lists[2]._length test_lists[2].shift() assert test_lists[2]._length is old_length - 1 def test_shift_removes_tail(test_lists): """Test shift removes tail.""" new_tail = test_lists[2].tail.prev.data test_lists[2].shift() assert test_lists[2].tail.data is new_tail def test_shift_removes_next_pointer(test_lists): """Test shift changes prev pointer.""" test_lists[2].shift() assert test_lists[2].tail.next is None def test_shift_list_one(test_lists): """Test shift decreases length.""" test_lists[1].shift() assert test_lists[1]._length is 0 def test_cant_shift_on_empty_list(test_lists): """Test shift on an empty list raises error.""" with pytest.raises(IndexError, message='Cannot shift from an empty list'): test_lists[0].shift() def test_shift_sequence(test_lists): """Test that entire sequence is returned by successive shifts.""" l = [] while True: try: shifted_data = test_lists[2].shift() l.append(shifted_data) except IndexError: break assert l == [1, 2, 3, 4, 5] def test_shift_append(test_lists): """Append data and shift it off.""" test_lists[1].append(9) shifted_data = test_lists[1].shift() assert shifted_data is 9 def test_remove_middle_of_list(test_lists): """Test remove from middle of list.""" test_lists[2].remove(3) assert test_lists[2]._repr() == [5, 4, 2, 1] def test_remove_head_of_list(test_lists): """Test remove from head of list.""" test_lists[2].remove(5) assert test_lists[2]._repr() == [4, 3, 2, 1] def test_remove_tail_of_list(test_lists): """Test remove from tail of list.""" test_lists[2].remove(1) assert test_lists[2]._repr() == [5, 4, 3, 2] def test_remove_middle_decreases_length(test_lists): """Test remove from middle of list decreases length.""" test_lists[2].remove(3) assert test_lists[2]._length is 4 def test_remove_head_decreases_length(test_lists): """Test remove from head of list decreases length.""" test_lists[2].remove(5) assert test_lists[2]._length is 4 def test_remove_tail_decreases_length(test_lists): """Test remove from tail of list decreases length.""" test_lists[2].remove(1) assert test_lists[2]._length is 4 def test_remove_middle_updates_pointers(test_lists): """Test remove from middle of list updates pointers.""" test_lists[2].remove(3) assert test_lists[2].head.next.next.data is 2 def test_remove_head_pointers(test_lists): """Test remove from head of list changes pointers.""" test_lists[2].remove(5) assert test_lists[2].head.data is 4 assert test_lists[2].head.prev is None def test_remove_tail_pointers(test_lists): """Test remove from tail of list changes pointers.""" test_lists[2].remove(1) assert test_lists[2].tail.data is 2 assert test_lists[2].tail.next is None def test_remove_list_of_one_length(test_lists): """Test remove on list of one.""" test_lists[1].remove(3) assert test_lists[1]._length is 0 def test_remove_list_of_one(test_lists): """Test remove on list of one.""" test_lists[1].remove(3) assert test_lists[1].head is None assert test_lists[1].tail is None def test_remove_list_of_none(test_lists): """Test remove on list of none.""" with pytest.raises(ValueError): test_lists[0].remove(3) def test_remove_of_list_false(test_lists): """Test remove from middle of list.""" with pytest.raises(ValueError): test_lists[2].remove(9)
### -------------------------------------- ### Airflow scheduler ### Angel Valera Motos - P2 - CC ### -------------------------------------- ### # incluimos las bibliotecas necesarias from datetime import timedelta from airflow import DAG from airflow.operators.bash_operator import BashOperator from airflow.operators.python_operator import PythonOperator from airflow.utils.trigger_rule import TriggerRule from airflow.utils.dates import days_ago import requests import os from sqlalchemy import create_engine import sqlalchemy.dialects.mysql.pymysql import pandas # Inluir biliotecas PIP # Definimos los argumentos del DAG # ------------------------------------------------------------------------------------------------- default_args = { 'owner': 'airflow', 'depends_on_past': False, 'start_date': days_ago(2), 'email': ['airflow@example.com'], 'email_on_failure': False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(minutes=5), } #Inicialización del grafo DAG de tareas para el flujo de trabajo # ------------------------------------------------------------------------------------------------- dag = DAG( 'Flujo_P2_CC', default_args=default_args, description='Grafo que establece el flujo de tareas para la práctica 2 de CC', schedule_interval=timedelta(days=1), ) # Funciones auxiliares # ------------------------------------------------------------------------------------------------- def prepararDirectorio(pathDir): if not os.path.isdir(pathDir): os.mkdir(pathDir) def limpiarYCombinarDatos(): DF_Hum = pandas.read_csv( "/tmp/workflow/humidity.csv")[['datetime', 'San Francisco']] DF_Temp = pandas.read_csv( "/tmp/workflow/temperature.csv")[['datetime', 'San Francisco']] # Renombrar las columnas: DF_Hum = DF_Hum.rename( columns={'datetime': 'DATE', 'San Francisco': 'HUM'}) DF_Temp = DF_Temp.rename( columns={'datetime': 'DATE', 'San Francisco': 'TEMP'}) # Merge: NuevoDF = pandas.merge(DF_Temp, DF_Hum, on='DATE') # Borrar los NaN: NuevoDF = NuevoDF.dropna() # Exportamos el nuevo DataFrame a fichero CSV NuevoDF.to_csv(r'/tmp/workflow/datos.csv', index=False) def AlmacenarDatos(): Datos = pandas.read_csv("/tmp/workflow/datos.csv") sqlEngine = create_engine( 'mysql+pymysql://angelvm:003577@127.0.0.1/SanFrancisco', pool_recycle=3600) dbConnection = sqlEngine.connect() tableName = 'DatosTemHum' try: Datos.to_sql(tableName, dbConnection, if_exists='replace') except ValueError as vx: print(vx) except Exception as ex: print(ex) else: print("Table %s created successfully." % tableName) finally: dbConnection.close() # ------------------------------------------------------------------------------------------------- # Tarea 1: Preparar entorno PrepararEntorno = PythonOperator( task_id='PrepararEntorno', python_callable=prepararDirectorio, op_kwargs={'pathDir': '/tmp/workflow/'}, dag=dag, ) # Tarea 2-A: Descargar datos de Humedad DescargarHumedad = BashOperator( task_id='DescargarHumedad', depends_on_past=False, bash_command='wget --output-document /tmp/workflow/humidity.csv.zip https://github.com/manuparra/MaterialCC2020/raw/master/humidity.csv.zip', dag=dag ) # Tarea 2-B: Descargar datos de Temperatura DescargarTemperatura = BashOperator( task_id='DescargarTemperatura', depends_on_past=False, bash_command='curl -L -o /tmp/workflow/temperature.csv.zip https://github.com/manuparra/MaterialCC2020/raw/master/temperature.csv.zip', dag=dag ) # Tarea 3-A: Descomprimir datos de humedad DescomprimirHumedad = BashOperator( task_id='DescomprimirHumedad', depends_on_past=False, #trigger_rule=TriggerRule.ALL_SUCCESS, bash_command='unzip -o /tmp/workflow/humidity.csv.zip -d /tmp/workflow ', dag=dag ) # Tarea 3-B: Descomprimir datos de Temperatura DescomprimirTemperatura = BashOperator( task_id='DescomprimirTemperatura', depends_on_past=False, #trigger_rule=TriggerRule.ALL_SUCCESS, bash_command='unzip -o /tmp/workflow/temperature.csv.zip -d /tmp/workflow ', dag=dag ) # Tarea 4: Eliminamos los ficheros comprimidos una vez que se han descomprimido LimpiarZIPEntorno = BashOperator( task_id='LimpiarZIPEntorno', depends_on_past=False, trigger_rule=TriggerRule.ALL_SUCCESS, bash_command='rm /tmp/workflow/temperature.csv.zip; rm /tmp/workflow/humidity.csv.zip', dag=dag ) # Tarea 5: Combinar ficheros csv CombinarDatos = PythonOperator( task_id='CombinarDatos', python_callable=limpiarYCombinarDatos, dag=dag, ) # Tarea 6: Eliminamos los ficheros antiguos para dejar solamente el que nos interesa LimpiarCSVEntorno = BashOperator( task_id='LimpiarCSVEntorno', depends_on_past=False, trigger_rule=TriggerRule.ALL_SUCCESS, bash_command='rm /tmp/workflow/temperature.csv; rm /tmp/workflow/humidity.csv', dag=dag ) # Tarea 7: Paramos todos los servicios si estaban en ejecución y montamos contenedores PararServicios = BashOperator( task_id='PararServicios', depends_on_past=False, bash_command='docker-compose -f ~/airflow/dags/CC1920-Practica2/docker-compose.yml down ', dag=dag, ) ConstruirServicios = BashOperator( task_id='ConstruirServicios', depends_on_past=False, bash_command='docker-compose -f ~/airflow/dags/CC1920-Practica2/docker-compose.yml build ', dag=dag, ) # Tarea 8: Iniciamos el servicio de MariaDB IniciarBD = BashOperator( task_id='IniciarDB', depends_on_past=False, bash_command='docker-compose -f ~/airflow/dags/CC1920-Practica2/docker-compose.yml up -d MariaDB', dag=dag, ) # Tarea 9: Almacenamos los datos en la Base de Datos AlmacenarDatos = PythonOperator( task_id='AlmacenarDatos', depends_on_past=False, python_callable=AlmacenarDatos, dag=dag, ) # Tarea 10: Descargamos el código fuente del primer servicio CapturaCodigoFuenteV1 = BashOperator( task_id='CapturaCodigoFuenteV1', depends_on_past=False, bash_command='rm -rf /tmp/workflow/servicioV1/ ;git clone -b servicio-V1 https://github.com/AngelValera/CC1920-Practica2.git /tmp/workflow/servicioV1', dag=dag, ) # Tarea 11: Testeamos el primer servicio TestServicioV1 = BashOperator( task_id='TestServicioV1', depends_on_past=False, bash_command='export HOST=localhost && cd /tmp/workflow/servicioV1/API && pytest Test_v1.py', dag=dag, ) # Tarea 12: Levanto primer servicio LevantarServicioV1 = BashOperator( task_id='LevantarServicioV1', depends_on_past=False, bash_command='docker-compose -f ~/airflow/dags/CC1920-Practica2/docker-compose.yml up -d version1', dag=dag, ) # Tarea 13: Descargamos el código fuente del segundo servicio CapturaCodigoFuenteV2 = BashOperator( task_id='CapturaCodigoFuenteV2', depends_on_past=False, bash_command='rm -rf /tmp/workflow/servicioV2/ ;git clone -b servicio-V2 https://github.com/AngelValera/CC1920-Practica2.git /tmp/workflow/servicioV2', dag=dag, ) # Tarea 14: Testeamos el segundo servicio TestServicioV2 = BashOperator( task_id='TestServicioV2', depends_on_past=False, bash_command='export API_KEY="eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJhdmFsZXJhbUBjb3JyZW8udWdyLmVzIiwianRpIjoiNTdkMjAxZjMtYWFhNy00MDI4LTg0ZmYtZGYxNjAyZGZlMWIzIiwiaXNzIjoiQUVNRVQiLCJpYXQiOjE1ODYxMTQ5NzIsInVzZXJJZCI6IjU3ZDIwMWYzLWFhYTctNDAyOC04NGZmLWRmMTYwMmRmZTFiMyIsInJvbGUiOiIifQ.NddscPjToIAsraXAr-ULIp0nhzv-uPH67zAtmVuiUS4" && cd /tmp/workflow/servicioV2/API && pytest Test_v2.py', dag=dag, ) # Tarea 15: Levanto segundo servicio LevantarServicioV2 = BashOperator( task_id='LevantarServicioV2', depends_on_past=False, bash_command='docker-compose -f ~/airflow/dags/CC1920-Practica2/docker-compose.yml up -d version2', dag=dag, ) # DEPENDENCIAS # ------------------------------------------------------------------------------------------------- PrepararEntorno.set_downstream([CapturaCodigoFuenteV1, CapturaCodigoFuenteV2, DescargarHumedad, DescargarTemperatura]) DescomprimirHumedad.set_upstream(DescargarHumedad) DescomprimirTemperatura.set_upstream(DescargarTemperatura) LimpiarZIPEntorno.set_upstream([DescomprimirHumedad, DescomprimirTemperatura]) CombinarDatos.set_upstream(LimpiarZIPEntorno) LimpiarCSVEntorno.set_upstream(CombinarDatos) PararServicios.set_upstream(LimpiarCSVEntorno) ConstruirServicios.set_upstream(PararServicios) IniciarBD.set_upstream(ConstruirServicios) AlmacenarDatos.set_upstream(IniciarBD) TestServicioV1.set_upstream([AlmacenarDatos, CapturaCodigoFuenteV1]) LevantarServicioV1.set_upstream(TestServicioV1) TestServicioV2.set_upstream([CapturaCodigoFuenteV2]) LevantarServicioV2.set_upstream([ConstruirServicios, TestServicioV2])
# Generated by Django 3.0.3 on 2020-02-10 22:47 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('helpdesk', '0004_queuequestion'), ] operations = [ migrations.CreateModel( name='TicketUpdate', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, verbose_name='criado em')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modificado em')), ('active', models.BooleanField(default=True, verbose_name='ativo')), ('title', models.CharField(blank=True, max_length=100, null=True, verbose_name='Título')), ('comment', models.TextField(blank=True, null=True, verbose_name='Comentário')), ('public', models.BooleanField(blank=True, default=False, null=True, verbose_name='Público')), ('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='helpdesk.Ticket', verbose_name='Ticket')), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL, verbose_name='Usuário')), ], options={ 'verbose_name': 'Update', 'verbose_name_plural': 'Updates', 'ordering': ('-created',), }, ), ]
"""A parser for reading VLBI eccentricity vectors from file Description: ------------ Reads the VLBI eccentricity vector from file. """ # Standard library imports from datetime import datetime import re # Midgard imports from midgard.dev import plugins from midgard.parsers._parser_line import LineParser @plugins.register class VlbiEccentricityParser(LineParser): """A parser for reading VLBI eccentricity vectors from file """ def setup_parser(self): def str2date(s): return datetime.strptime(re.sub("[^0-9]", " ", s.decode()), "%Y %m %d %H %M") def empty(s): return s.decode().replace("?", "") return dict( names="name, site_id, start, end, v1, v2, v3, coord_type", dtype=("U10", "U5", object, object, "f8", "f8", "f8", "U6"), delimiter=(10, 5, 18, 18, 12, 12, 12, 6), skip_header=1, skip_footer=1, comments="$", converters={0: empty, 2: str2date, 3: str2date}, autostrip=True, ) def structure_data(self): for item in self._array: key = (str(item["name"]).strip().replace(" ", "_"), str(item["site_id"]).strip()) if item["coord_type"] == "NEU": # Swap NEU to ENU self.data.setdefault(key, {}).setdefault((item["start"], item["end"]), {}).update( dict(vector=(item["v2"], item["v1"], item["v3"]), coord_type="ENU") ) else: self.data.setdefault(key, {}).setdefault((item["start"], item["end"]), {}).update( dict(vector=(item["v1"], item["v2"], item["v3"]), coord_type=item["coord_type"]) ) self.data[key]["name"] = item["name"]
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Helper model functions for retrieving model predictions.""" import tensorflow.compat.v1 as tf import tensorflow.compat.v2 as tf2 from pruning_identified_exemplars.utils import class_level_metrics from pruning_identified_exemplars.utils import data_input from pruning_identified_exemplars.utils import resnet_model def compute_lr(current_epoch, initial_learning_rate, train_batch_size, lr_schedule): """Computes learning rate schedule.""" scaled_lr = initial_learning_rate * (train_batch_size / 256.0) decay_rate = ( scaled_lr * lr_schedule[0][0] * current_epoch / lr_schedule[0][1]) for mult, start_epoch in lr_schedule: decay_rate = tf.where(current_epoch < start_epoch, decay_rate, scaled_lr * mult) return decay_rate def train_function(params, loss): """Creates the training op that will be optimized by the estimator.""" global_step = tf.train.get_global_step() steps_per_epoch = params["num_train_images"] / params["train_batch_size"] current_epoch = (tf.cast(global_step, tf.float32) / steps_per_epoch) learning_rate = compute_lr(current_epoch, params["base_learning_rate"], params["train_batch_size"], params["lr_schedule"]) optimizer = tf.train.MomentumOptimizer( learning_rate=learning_rate, momentum=params["momentum"], use_nesterov=True) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops), tf.name_scope("train"): train_op = optimizer.minimize(loss, global_step) if params["pruning_method"]: pruning_params_string = params["pruning_dict"] # Parse pruning hyperparameters pruning_hparams = tf.contrib.model_pruning.get_pruning_hparams().parse( pruning_params_string) # Create a pruning object using the pruning hyperparameters pruning_obj = tf.contrib.model_pruning.pruning.Pruning( pruning_hparams, global_step=global_step) # We override the train op to also update the mask. with tf.control_dependencies([train_op]): train_op = pruning_obj.conditional_mask_update_op() masks = tf.contrib.model_pruning.get_masks() with tf2.summary.create_file_writer(params["output_dir"]).as_default(): with tf2.summary.record_if(True): tf2.summary.scalar("loss", loss, step=global_step) tf2.summary.scalar("learning_rate", learning_rate, step=global_step) tf2.summary.scalar("current_epoch", current_epoch, step=global_step) tf2.summary.scalar("steps_per_epoch", steps_per_epoch, step=global_step) tf2.summary.scalar( "weight_decay", params["weight_decay"], step=global_step) if params["pruning_method"]: tf2.summary.scalar("pruning_masks", masks, step=global_step) tf.summary.all_v2_summary_ops() return train_op def model_fn_w_pruning(features, labels, mode, params): """The model_fn for ResNet-50 with pruning. Args: features: A float32 batch of images. labels: A int32 batch of labels. mode: Specifies whether training or evaluation. params: parameters passed to the eval function. Returns: A EstimatorSpec for the model """ task = params["task"] if task in ["pie_dataset_gen", "imagenet_predictions"]: images = features[0] labels = features[1] else: images = features if task in [ "pie_dataset_gen", "robustness_imagenet_c", "robustness_imagenet_a", "ckpt_prediction" ]: human_labels = features["human_label"] mean_rgb = params["mean_rgb"] stddev_rgb = params["stddev_rgb"] # Normalize the image to zero mean and unit variance. images -= tf.constant(mean_rgb, shape=[1, 1, 3], dtype=images.dtype) images /= tf.constant(stddev_rgb, shape=[1, 1, 3], dtype=images.dtype) network = resnet_model.resnet_50( num_classes=1000, pruning_method=params["pruning_method"], data_format="channels_last") logits = network( inputs=images, is_training=(mode == tf.estimator.ModeKeys.TRAIN)) one_hot_labels = tf.one_hot(labels, params["num_label_classes"]) cross_entropy = tf.losses.softmax_cross_entropy( logits=logits, onehot_labels=one_hot_labels, label_smoothing=params["label_smoothing"]) # Add weight decay to the loss for non-batch-normalization variables. loss = cross_entropy + params["weight_decay"] * tf.add_n([ tf.nn.l2_loss(v) for v in tf.trainable_variables() if "batch_normalization" not in v.name ]) # we run predictions on gpu since ordering is very important and # thus we need to run with batch size 1 (not enabled on tpu) if mode == tf.estimator.ModeKeys.PREDICT: train_op = None eval_metrics = None predicted_probability = tf.cast( tf.reduce_max(tf.nn.softmax(logits, name="softmax"), axis=1), tf.float32) _, top_5_indices = tf.nn.top_k(tf.to_float(logits), k=5) predictions = { "predictions": tf.argmax(logits, axis=1), "true_class": labels, "predicted_probability": predicted_probability, "top_5_indices": top_5_indices } if mode == tf.estimator.ModeKeys.TRAIN: train_op = train_function(params, loss) eval_metrics = None predictions = None if mode == tf.estimator.ModeKeys.EVAL: train_op = None predictions = None params_eval = { "num_label_classes": params["num_label_classes"], "log_class_level_summaries": False } eval_metrics = class_level_metrics.create_eval_metrics( labels, logits, human_labels, params_eval) return tf.estimator.EstimatorSpec( predictions=predictions, mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metrics) def initiate_task_helper(model_params, ckpt_directory=None, pruning_params=None): """Get all predictions for eval. Args: model_params: ckpt_directory: model checkpoint directory containing event file pruning_params: Returns: pd.DataFrame containing metrics from event file """ if model_params["task"] != "imagenet_training": classifier = tf.estimator.Estimator( model_fn=model_fn_w_pruning, params=model_params) if model_params["task"] in ["imagenet_predictions"]: predictions = classifier.predict( input_fn=data_input.input_fn, checkpoint_path=ckpt_directory) return predictions if model_params["task"] in [ "robustness_imagenet_a", "robustness_imagenet_c", "robustness_pie", "imagenet_eval", "ckpt_prediction" ]: eval_steps = model_params["num_eval_images"] // model_params["batch_size"] tf.logging.info("start computing eval metrics...") classifier = tf.estimator.Estimator( model_fn=model_fn_w_pruning, params=model_params) evaluation_metrics = classifier.evaluate( input_fn=data_input.input_fn, steps=eval_steps, checkpoint_path=ckpt_directory) tf.logging.info("finished per class accuracy eval.") return evaluation_metrics else: model_params["pruning_dict"] = pruning_params run_config = tf.estimator.RunConfig( save_summary_steps=300, save_checkpoints_steps=1000, log_step_count_steps=100) classifier = tf.estimator.Estimator( model_fn=model_fn_w_pruning, config=run_config, params=model_params) tf.logging.info("start training...") classifier.train( input_fn=data_input.input_fn, max_steps=model_params["num_train_steps"]) tf.logging.info("finished training.")
from django.shortcuts import render from mailer.models import Person, MailTemplate from django.http import HttpResponse from django.core.mail import send_mail import csv from google_forms_mailer import settings # Create your views here. def home(request): ''' Home page view ''' template = "index.html" return render(request, template, {}) def mailer(request, template = "mailer.html"): ''' View for sending the mail to all the participants after reading the csv file ''' if request.method == "POST": import csv f = request.FILES['file'] with open('google_form_data.csv', 'wb+') as destination: for chunk in f.chunks(): destination.write(chunk) with open('google_form_data.csv', 'rb') as f: # row[1] = name # row[15] = email reader = csv.reader(f) for row in reader: message = MailTemplate.objects.get(id = 1).message p = Person.objects.create(name = row[1]) p.save() message = "Hello "+str(p.name)+", \n \n"+"Thanks for registering for the TechnoGrail-2015 \n" + "Your registration Number is " + str(p.id) + "\n \n" + str(message) send_mail("Welcome to TechnoGrail-2015", str(message), 'desh.py@gmail.com', [str(row[15])], fail_silently=False) print message return HttpResponse("<h2>Email sent to all new participants successfully</h2>") return render(request, template, {})
import glob import os from flask import json APP_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..') class FileHandler(object): def __init__(self, username, image_names): if not os.path.exists(APP_ROOT + '/annotations'): os.makedirs(APP_ROOT + '/annotations') if not os.path.exists(APP_ROOT + '/scores'): os.makedirs(APP_ROOT + '/scores') if not os.path.exists(APP_ROOT + '/tmp/annotations'): os.makedirs(APP_ROOT + '/tmp/annotations') if not os.path.exists(APP_ROOT + '/tmp/scores'): os.makedirs(APP_ROOT + '/tmp/scores') self.file_names = [os.path.basename(x).split('_' + username + '.')[0] for x in glob.glob(APP_ROOT + '/scores/*_' + username + '.json')] self.file_names_unscored = list(set(image_names) - set(self.file_names)) def statusAnnotation(self): files = glob.glob(APP_ROOT + '/annotations/*.json') mitosis = non_mitosis = apoptosis = tumor = non_tumor = lumen = non_lumen = 0 for f in files: data = json.load(open(f)) mitosis += data['mitosis'] non_mitosis += data['non_mitosis'] apoptosis += data['apoptosis'] tumor += data['tumor'] non_tumor += data['non_tumor'] lumen += data['lumen'] non_lumen += data['non_lumen'] c = Count(mitosis, non_mitosis, apoptosis, tumor, non_tumor, lumen, non_lumen) return c.toJSON() def read(self, filename): return json.load(open(filename, 'r')) def write(self, data, filename): json.dump(data, open(filename, 'w')) class Score(FileHandler): def __init__(self, loc, score, note, time): self.loc = loc self.score = score self.note = note self.time = time def toJSON(self): return {'loc': self.loc, 'score': self.score, 'note': self.note, 'time': self.time} class Count(FileHandler): def __init__(self, mitosis, non_mitosis, apoptosis, tumor, non_tumor, lumen, non_lumen): self.mitosis = mitosis self.non_mitosis = non_mitosis self.apoptosis = apoptosis self.tumor = tumor self.non_tumor = non_tumor self.lumen = lumen self.non_lumen = non_lumen def toJSON(self): return {'mitosis': self.mitosis, 'non_mitosis': self.non_mitosis, 'apoptosis': self.apoptosis, 'tumor': self.tumor, 'non_tumor': self.non_tumor, 'lumen': self.lumen, 'non_lumen': self.non_lumen}
#!/usr/bin/env python # # Copyright 2018 Verily Life Sciences LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # pick_shards.py will pick split points at evently-spaced genomic locations. # It uses the input's header to determine the size of each contig. # # example usage: # # $ python pick_shards.py 20000 ~/mydata/large_sample.g.vcf > shards_file # # Here, "20000" is the desired number of shards. # # The output format is the following. Each line is a shard. # Each shard is a tab-separated sequence of intervals. # Each interval is written as `contig` <TAB> `start pos (included)` <TAB> `end pos (included)` # import sys import re if len(sys.argv)!=3: print "Usage: python pick_shards.py <number of shards> <vcf file>" exit(1) nshards = int(sys.argv[1]) infile = sys.argv[2] # contig name -> length (actually length-1 since it's pos of last included base) length={} contigs = [] for line in open(infile): if not line.startswith('#'): break if line.startswith('##contig='): # parse e.g. ##contig=<ID=HLA-DRB1*15:01:01:01,length=11080> match = re.match('##contig=<ID=([^,]*),[ ]?length=([^>]*)>.*', line) if not match: print 'Error while parsing: %s' % line exit(1) length[match.group(1)] = int(match.group(2)) contigs.append(match.group(1)) total_length = sum(length[k] for k in length) total_length_of_normal = sum(length[k] for k in length if len(k)<=len('chr19')) shard_length = total_length / nshards # how many base pairs we need to put in this shard left = shard_length shard_index = 0 total_sofar = 0 s = [] # Take bases from the contigs, until we have enough, then emit a shard. for contig in contigs: start = 1 while True: if length[contig]-start+1 < left: # adding this contig doesn't fill the shard s += ['%s\t%s\t%s' % (contig, start, length[contig])] left -= (length[contig] - start + 1) total_sofar += (length[contig] - start + 1) break else: # we fill the shard s += ['%s\t%s\t%s' % (contig, start, start + left)] print('\t'.join(s)) shard_index += 1 total_sofar += left # use the rest of this contig start = start + left + 1 left = shard_length # special case: last shard, make sure to make it large enough to # compensate for rounding errors. if (shard_index == nshards - 1): left = total_length - total_sofar s = [] # print last shard as is if s: print('\t'.join(s))
#!/usr/bin/env python ############################################################################## # # diffpy.structure by DANSE Diffraction group # Simon J. L. Billinge # (c) 2008 trustees of the Michigan State University. # All rights reserved. # # File coded by: Chris Farrow # # See AUTHORS.txt for a list of people who contributed. # See LICENSE_DANSE.txt for license information. # ############################################################################## """Utilities for making shapes.""" def findCenter(S): """Find the approximate center atom of a structure. The center of the structure is the atom closest to (0.5, 0.5, 0.5) Returns the index of the atom. """ best = -1 bestd = len(S) center = [0.5, 0.5, 0.5] # the cannonical center for i in range(len(S)): d = S.lattice.dist(S[i].xyz, center) if d < bestd: bestd = d best = i return best
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) 2016 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## import threading from ZODB.utils import u64 from ZODB import POSException from zope.interface import directlyProvides from ZODB.interfaces import IStorageWrapper from zope.interface import implementer from zope.interface import providedBy from .registry import Registry from .url import URL from util import string_types from .notavailable import notAvailable from indexedbuffer import IndexedBuffer z64 = '\0' * 8 @implementer(IStorageWrapper) class ZOCacheStorage(object): copied_methods = ( 'getName', 'getSize', 'history', 'isReadOnly', 'lastTransaction', 'new_oid', 'sortKey', 'temporaryDirectory', 'supportsUndo', 'undo', 'undoLog', 'undoInfo', # 'tcp_begin', 'tpc_abort', 'tpc_finish', 'tpc_vote', 'checkCurrentSerialInTransaction', # For proof of concept. 'loadBefore', 'loadSerial', 'pack', 'restore', 'openCommittedBlobFile', 'loadBlob', 'iterator', 'storeBlob', 'restoreBlob', 'references', 'copyTransactionsFrom', 'record_iternext', 'deleteObject', ) def __init__(self, base, name_or_url=None, driver=None): self._base = base self._registry = Registry() self._driver = driver if self._driver is None: if isinstance(name_or_url, string_types): url = URL.from_string(name_or_url) elif isinstance(name_or_url, URL): url = name_or_url else: raise AttributeError( 'Invalid type for name_or_url %r' % name_or_url ) driver_cls = self._registry.drivers.get(url.drivername, None) if not driver_cls: self._base.close() raise KeyError('Unknown driver %r' % url.drivername) ok, msg = driver_cls.available() if not ok: self._base.close() raise KeyError(msg) self._driver = driver_cls(url) # Copy methods and register this storage driver. for name in self.copied_methods: v = getattr(self._base, name, None) if v is not None: setattr(self, name, v) self._tqueue = IndexedBuffer() self._commit_lock = threading.Lock() # Provide all interfaces self._base provides. directlyProvides(self, providedBy(self._base)) base.registerDB(self) # def __getattr__(self, name): # return getattr(self._base, name) def __len__(self): return len(self._base) @property def driver(self): return self._driver def tpc_vote(self, transaction): return self._base.tpc_vote(transaction) def tpc_begin(self, transaction, tid=None, status=' '): result = self._base.tpc_begin(transaction, tid, status) self._tqueue.reset() self._tqueue.transaction = transaction return result def tpc_abort(self, transaction): result = self._base.tpc_abort(transaction) self._tqueue.reset() return result def tpc_finish(self, transaction, f=None): def cache_transaction_data(tid): # This happens within the storages transaction commit lock. for int_oid, data in self._tqueue.dump_and_reset(): self.driver.store( int_oid, tid, data, _debug_stored_by="transaction_finish" ) if f is not None: f(tid) result = self._base.tpc_finish(transaction, cache_transaction_data) return result def checkCurrentSerialInTransaction(self, oid, serial, transaction): return self._base.checkCurrentSerialInTransaction( oid, serial, transaction ) def getTid(self, oid): return self._base.getTid(oid) def close(self): self._base.close() self.driver.close() def load(self, oid, version=''): int_oid = u64(oid) # Try to load from the current transaction first. if self._tqueue.transaction: result = self._tqueue.load(int_oid) if result is not None: return result, self._tqueue.tid # Next try from the cache driver. result = self.driver.load(int_oid) if result != notAvailable: return result[0], result[1] # Load from the storage and save into the cache. result = self._base.load(oid, version) self.driver.store( int_oid, result[1], result[0], _debug_stored_by="save_after_load" ) return result def store(self, oid, oldserial, data, version, transaction): if self.isReadOnly(): raise POSException.ReadOnlyError() assert not version int_oid = u64(oid) prev_tid_int = z64 if oldserial: prev_tid_int = oldserial if self._tqueue.transaction: self._tqueue.store(int_oid, data) else: self.driver.store( int_oid, prev_tid_int, data, _debug_stored_by="no_transaction_store" ) return self._base.store(oid, oldserial, data, version, transaction) # # IStorageWrapper implementation # def registerDB(self, db): self.db = db self._db_transform = db.transform_record_data self._db_untransform = db.untransform_record_data # This will get called when registerDB hasn't been called. _db_transform = _db_untransform = lambda self, data: data def invalidateCache(self): """ For IStorageWrapper """ return self.db.invalidateCache() def invalidate(self, transaction_id, oids, version=''): """ For IStorageWrapper """ return self.db.invalidate(transaction_id, oids, version) def references(self, record, oids=None): """ For IStorageWrapper """ return self.db.references(record, oids) def transform_record_data(self, data): """ For IStorageWrapper """ return self._db_transform(data) def untransform_record_data(self, data): """ For IStorageWrapper """ return self._db_untransform(data) # END IStorageWrapper implementation @property def _addr(self): """ Hack for tests """ return self._base._addr @property def _info(self): """ Another hack for tests """ return self._base._info @property def _iterator_ids(self): """ Another hack for tests """ return self._base._iterator_ids @property def _server(self): """ Another hack for tests """ return self._base._server @property def _server_addr(self): """ Another hack for tests """ return self._base._server_addr @property def _storage(self): """ Another hack for tests """ return self._base._storage def notifyDisconnected(self): """ Another hack for tests """ return self._base.notifyDisconnected() class ZConfig: _factory = ZOCacheStorage def __init__(self, config): self.config = config self.name = config.getSectionName() def open(self): base = self.config.base.open() url = self.config.url return self._factory(base, url)
# Generated by Django 3.0.8 on 2020-07-23 22:27 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0004_auto_20200723_1611'), ] operations = [ migrations.AlterField( model_name='projecttemplate', name='image', field=models.ImageField(upload_to='app/images/'), ), migrations.AlterField( model_name='projecttemplate', name='title', field=models.CharField(max_length=100), ), ]
#!/usr/bin/python3 import multiprocessing import sys import random from time import sleep def progressbar(width,min,max,current,text=""): hashcount = int(current/((max-min)/width)) fmt = "[%-"+str(width)+"s]" sys.stdout.write("\r") sys.stdout.write("\033[K") sys.stdout.write(fmt % ('#'*hashcount)) sys.stdout.write("\n") sys.stdout.write("\033[K") sys.stdout.write(text) sys.stdout.write("\033[1A") sys.stdout.write("\r") sys.stdout.flush() limit = 100 # how many data in queue start = 1 # random ints start from stop = 10000000 # random ints end at # generate list of randoms of (from start to stop) numbers in range limited by limit var queue = [random.randint(start, stop) for iter in range(limit)] # generate queue answers list res = [None] * len(queue) # result list of correct size # main work function def wrapMyFunc(arg, i): # print("wrapMyFunc", arg, flush=True) sleep(random.random()) return i, arg * -1 # update res after wrapMyFunc return result def update(i): progressbar(40,0,limit,i[0],text=str(i)) # here we need to update pbar # note: input comes from async `wrapMyFunc` res[i[0]] = i[1] # put answer into correct index of result list # print(i[0], i[1]) print("CPU count: {:d}".format(multiprocessing.cpu_count())) pool = multiprocessing.Pool(multiprocessing.cpu_count()+1) for queue_task, iter in zip(queue, range(limit)): pool.apply_async(wrapMyFunc, args=(queue_task,iter,), callback=update) pool.close() pool.join() print(res)
from pathlib import Path import collections import deepdish as dd import mne from autoreject import (get_rejection_threshold, AutoReject) def autoreject_repair_epochs(epochs, reject_plot=False): """Rejects the bad epochs with AutoReject algorithm Parameters ---------- epochs : mne epoch object Epoched, filtered eeg data. Returns ---------- epochs : mne epoch object Epoched data after rejection of bad epochs. """ # Cleaning with autoreject picks = mne.pick_types(epochs.info, eeg=True) # Pick EEG channels ar = AutoReject(n_interpolate=[1, 2, 3], n_jobs=6, picks=picks, thresh_func='bayesian_optimization', cv=3, random_state=42, verbose=False) cleaned_epochs, reject_log = ar.fit_transform(epochs, return_log=True) if reject_plot: reject_log.plot_epochs(epochs, scalings=dict(eeg=40e-6)) return cleaned_epochs def append_eog_index(epochs, ica): """Detects the eye blink aritifact indices and adds that information to ICA Parameters ---------- epochs : mne epoch object Epoched, filtered, and autorejected eeg data ica : mne ica object ICA object from mne. Returns ---------- ica : mne ica object ICA object with eog indices appended """ # Find bad EOG artifact (eye blinks) by correlating with Fp1 eog_inds, scores_eog = ica.find_bads_eog(epochs, ch_name='F3', verbose=False) eog_inds.sort() # Append only when the correlation is high id_eog = [i for i, n in enumerate(scores_eog.tolist()) if abs(n) >= 0.65] ica.exclude += id_eog # Find bad EOG artifact (eye blinks) by correlation with Fp2 eog_inds, scores_eog = ica.find_bads_eog(epochs, ch_name='F4', verbose=False) eog_inds.sort() # Append only when the correlation is high id_eog = [i for i, n in enumerate(scores_eog.tolist()) if abs(n) >= 0.65] ica.exclude += id_eog return ica def clean_with_ica(epochs, subject, hand, control, config, show_ica=False): """Clean epochs with ICA. Parameters ---------- epochs : mne epoch object Epoched, filtered, and autorejected eeg data Returns ---------- ica : mne epoch object ICA object from mne epochs : mne epoch object ica cleaned epochs """ picks = mne.pick_types(epochs.info, meg=False, eeg=True, eog=False, stim=False, exclude='bads') ica = mne.preprocessing.ICA(n_components=None, method="picard", verbose=False) # Get the rejection threshold using autoreject if config['use_previous_ica']: read_path = Path(__file__).parents[2] / config['previous_ica'] data = data = dd.io.load(str(read_path)) ica_previous = data[subject]['ica'][hand][control] ica_previous.apply(epochs) else: reject_threshold = get_rejection_threshold(epochs) ica.fit(epochs, picks=picks, reject=reject_threshold) # mne pipeline to detect artifacts ica.detect_artifacts(epochs, eog_criterion=range(2)) ica.apply(epochs) # Apply the ICA if show_ica: ica.plot_components(inst=epochs) return epochs, ica def clean_dataset(config): """Create cleaned dataset (by running autoreject and ICA) with each subject data in a dictionary. Parameters ---------- config: yaml The configuration file. Returns ---------- clean_eeg_dataset : dict A dictionary dataset of all the subjects with different conditions """ clean_eeg_dataset = {} read_path = Path(__file__).parents[2] / config['raw_eeg_dataset'] raw_eeg = dd.io.load(str(read_path)) # load the raw eeg def nested_dict(): return collections.defaultdict(nested_dict) for subject in config['subjects']: data = nested_dict() for hand in config['hand_type']: for control in config['control_type']: epochs = raw_eeg[subject]['eeg'][hand][control] ica_epochs, ica = clean_with_ica(epochs, subject, hand, control, config) repaired_eeg = autoreject_repair_epochs(ica_epochs) data['eeg'][hand][control] = repaired_eeg data['ica'][hand][control] = ica clean_eeg_dataset[subject] = data return clean_eeg_dataset
# Copyright (c) 2021, Roona and contributors # For license information, please see license.txt # import frappe from frappe.model.document import Document from frappe.utils import (strip) class DeliveryArea(Document): def autoname(self): self.area_name = strip(self.area_name) self.name = self.area_name
from lixian_plugins.api import command from lixian_cli_parser import parse_command_line from lixian_config import get_config from lixian_encoding import default_encoding def b_encoding(b): if 'encoding' in b: return b['encoding'] if 'codepage' in b: return 'cp' + str(b['codepage']) return 'utf-8' def b_name(info, encoding='utf-8'): if 'name.utf-8' in info: return info['name.utf-8'].decode('utf-8') return info['name'].decode(encoding) def b_path(f, encoding='utf-8'): if 'path.utf-8' in f: return [p.decode('utf-8') for p in f['path.utf-8']] return [p.decode(encoding) for p in f['path']] @command(usage='list files in local .torrent') def list_torrent(args): ''' usage: lx list-torrent [--size] xxx.torrent... ''' args = parse_command_line(args, [], ['size'], default={'size':get_config('size')}) torrents = args if not torrents: from glob import glob torrents = glob('*.torrent') if not torrents: raise Exception('No .torrent file found') for p in torrents: with open(p, 'rb') as stream: from lixian_hash_bt import bdecode b = bdecode(stream.read()) encoding = b_encoding(b) info = b['info'] from lixian_util import format_size if args.size: size = sum(f['length'] for f in info['files']) if 'files' in info else info['length'] print '*', b_name(info, encoding).encode(default_encoding), format_size(size) else: print '*', b_name(info, encoding).encode(default_encoding) if 'files' in info: for f in info['files']: if f['path'][0].startswith('_____padding_file_'): continue path = '/'.join(b_path(f, encoding)).encode(default_encoding) if args.size: print '%s (%s)' % (path, format_size(f['length'])) else: print path else: path = b_name(info, encoding).encode(default_encoding) if args.size: from lixian_util import format_size print '%s (%s)' % (path, format_size(info['length'])) else: print path
import numpy as np np.set_printoptions(sign=' ') print(np.eye(*map(int, input().split()))) """It 'unpacks' the elements in the list returned from [ int(x) for x in input().split() ]"""
__author__ = 'saeedamen' # Saeed Amen # # Copyright 2016 Cuemacro # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and limitations under the License. # """ EngineTemplate Implemented by EngineBokeh, EnglineMatplotlib and EnginePlotly to do underlying plotting """ import abc from math import log10, floor import numpy import pandas import datetime from chartpy.style import Style from chartpy.chartconstants import ChartConstants cc = ChartConstants() # compatible with Python 2 *and* 3: ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) class EngineTemplate(ABC): def init(self): return @abc.abstractmethod def plot_chart(self, data_frame, style, type): return def get_time_stamp(self): return str(datetime.datetime.now()).replace(':', '-').replace(' ', '-').replace(".", "-") def get_bar_indices(self, data_frame, style, chart_type, bar_ind): has_bar = 'no-bar' xd = data_frame.index no_of_bars = len(data_frame.columns) if style.chart_type is not None: if isinstance(style.chart_type, list): if 'bar' in style.chart_type: xd = bar_ind no_of_bars = style.chart_type.count('bar') has_bar = 'barv' elif 'stacked' in style.chart_type: xd = bar_ind no_of_bars = 1 has_bar = 'barv' elif 'bar' == style.chart_type: xd = bar_ind has_bar = 'barv' elif 'barh' == style.chart_type: xd = bar_ind has_bar = 'barh' elif 'stacked' == style.chart_type: xd = bar_ind has_bar = 'barh' else: if chart_type == 'bar' or chart_type == 'stacked': xd = bar_ind has_bar = 'barv' return xd, bar_ind, has_bar, no_of_bars def assign(self, structure, field, default): if hasattr(structure, field): default = getattr(structure, field) return default def assign_list(self, style, field, list): if hasattr(style, field): list = [str(x) for x in getattr(style, field)] return list def get_linewidth(self, label, linewidth_1, linewidth_2, linewidth_2_series): if label in linewidth_2_series: return linewidth_2 return linewidth_1 def round_to_1(self, x): return round(x, -int(floor(log10(x)))) def split_data_frame_to_list(self, data_frame, style): data_frame_list = [] if isinstance(data_frame, list): data_frame_list = data_frame else: if style.subplots == True: for col in data_frame.columns: data_frame_list.append( pandas.DataFrame(index=data_frame.index, columns=[col], data=data_frame[col])) else: data_frame_list.append(data_frame) return data_frame_list def generate_file_names(self, style, engine): if style.html_file_output is not None and not (style.auto_generate_html_filename): pass else: import time style.html_file_output = (self.get_time_stamp() + "-" + engine + ".html") style.auto_generate_html_filename = True if style.file_output is not None and not (style.auto_generate_filename): pass else: import time style.file_output = (self.get_time_stamp() + "-" + engine + ".png") style.auto_generate_filename = True return style def get_max_min_dataframes(self, data_frame_list): """Gets minimum and maximum values for a series of dataframes. Can be particularly useful for adjusting colormaps for lightness/darkness. Parameters ---------- data_frame_list : DataFrame (list) DataFrames to be checked Returns ------- float, float Minimum and maximum values """ if not(isinstance(data_frame_list, list)): data_frame_list = [data_frame_list] import sys minz = sys.float_info.max maxz = sys.float_info.min for data_frame in data_frame_list: minz_1 = data_frame.min(axis=0).min() maxz_1 = data_frame.max(axis=0).max() if minz_1 != numpy.nan: minz = min(minz, minz_1) if maxz_1 != numpy.nan: maxz = max(maxz, maxz_1) return minz, maxz def get_max_min_x_axis(self, data_frame_list): """Gets minimum and maximum values for the x_axis. Can be particularly useful for adjusting colormaps for lightness/darkness. Parameters ---------- data_frame_list : DataFrame (list) DataFrames to be checked Returns ------- obj, obj Minimum and maximum values """ import sys minz = data_frame_list[0].index[0] maxz = data_frame_list[0].index[-1] for data_frame in data_frame_list: minz_1 = data_frame.index[0] maxz_1 = data_frame.index[-1] if minz_1 != numpy.nan: minz = min(minz, minz_1) if maxz_1 != numpy.nan: maxz = max(maxz, maxz_1) return minz, maxz ####################################################################################################################### try: from bokeh.plotting import figure, output_file, show, gridplot, save from bokeh.models import Range1d from bokeh.charts import HeatMap # TODO deprecated need to redo except: pass class EngineBokeh(EngineTemplate): def plot_chart(self, data_frame, style, chart_type): cm = ColorMaster() if style.scale_factor > 0: scale_factor = abs(style.scale_factor) * 2/3 else: scale_factor = abs(style.scale_factor) try: if style.bokeh_plot_mode == "offline_jupyter": from bokeh.io import output_notebook output_notebook() except: pass try: style = self.generate_file_names(style, 'bokeh') output_file(style.html_file_output) except: pass data_frame_list = self.split_data_frame_to_list(data_frame, style) plot_list = [] plot_width = int((style.width * scale_factor)) plot_height = int((style.height * scale_factor) / len(data_frame_list)) for data_frame in data_frame_list: bar_ind = numpy.arange(1, len(data_frame.index) + 1) xd, bar_ind, has_bar, no_of_bars = self.get_bar_indices(data_frame, style, chart_type, bar_ind) separate_chart = False if chart_type == 'heatmap': # TODO p1 = HeatMap(data_frame, title='Random', plot_width = plot_width, plot_height = plot_height) separate_chart = True # if has a vertical bar than categorical x-axis elif has_bar == 'barv': p1 = figure( plot_width = plot_width, plot_height = plot_height, x_range=[str(x).replace(':','.') for x in data_frame.index] ) from math import pi p1.xaxis.major_label_orientation = pi/2 elif type(data_frame.index) == pandas.Timestamp or (type(xd[0]) == pandas.Timestamp and type(xd[-1]) == pandas.Timestamp)\ or type(data_frame.index) == pandas.DatetimeIndex: p1 = figure( x_axis_type = "datetime", plot_width = plot_width, plot_height = plot_height, # x_range=(xd[0], xd[-1]) # at present Bokeh doesn't like to set limits with datetime, hopefully will change! ) # otherwise numerical axis else: p1 = figure( plot_width = plot_width, plot_height = plot_height, x_range=(xd[0], xd[-1]) ) # set the fonts p1.axis.major_label_text_font_size = str(10) + "pt" p1.axis.major_label_text_font = cc.bokeh_font p1.axis.major_label_text_font_style = cc.bokeh_font_style p1.xaxis.axis_label_text_font_size = str(10) + "pt" p1.xaxis.axis_label_text_font = cc.bokeh_font p1.xaxis.axis_label_text_font_style = cc.bokeh_font_style p1.xaxis.axis_label = style.x_title p1.xaxis.visible = style.x_axis_showgrid p1.yaxis.axis_label_text_font_size = str(10) + "pt" p1.yaxis.axis_label_text_font = cc.bokeh_font p1.yaxis.axis_label_text_font_style = cc.bokeh_font_style p1.yaxis.axis_label = style.y_title p1.yaxis.visible = style.y_axis_showgrid p1.legend.location = "top_left" p1.legend.label_text_font_size = str(10) + "pt" p1.legend.label_text_font = cc.bokeh_font p1.legend.label_text_font_style = cc.bokeh_font_style p1.legend.background_fill_alpha = 0.75 p1.legend.border_line_width = 0 # set chart outline p1.outline_line_width = 0 # Plot.title.text p1.title.text_font_size = str(14) + "pt" p1.title.text_font = cc.bokeh_font # TODO fix label # if style.display_source_label: # p1.text([30 * scale_factor, 30 * scale_factor], [0, 0], text = [style.brand_label], # text_font_size = str(10 * scale_factor) + "pt", text_align = "left", # text_font = GraphistyleConstants().bokeh_font) color_spec = cm.create_color_list(style, data_frame) import matplotlib bar_space = 0.2 bar_width = (1 - bar_space) / (no_of_bars) bar_index = 0 has_bar ='no-bar' if not(separate_chart): # plot each series in the dataframe separately for i in range(0, len(data_frame.columns)): label = str(data_frame.columns[i]) glyph_name = 'glpyh' + str(i) # set chart type which can differ for each time series if isinstance(chart_type, list): chart_type_ord = chart_type[i] else: chart_type_ord = chart_type # get the color if color_spec[i] is None: color_spec[i] = self.get_color_list(i) try: color_spec[i] = matplotlib.colors.rgb2hex(color_spec[i]) except: pass yd = data_frame.ix[:,i] # plot each time series as appropriate line, scatter etc. if chart_type_ord == 'line': linewidth_t = self.get_linewidth(label, style.linewidth, style.linewidth_2, style.linewidth_2_series) if linewidth_t is None: linewidth_t = 1 if style.display_legend: p1.line(xd, yd, color = color_spec[i], line_width=linewidth_t, name = glyph_name, legend = label, ) else: p1.line(xd, data_frame.ix[:,i], color = color_spec[i], line_width=linewidth_t, name = glyph_name) elif(chart_type_ord == 'bar'): bar_pos = [k - (1 - bar_space) / 2. + bar_index * bar_width for k in range(1,len(bar_ind) + 1)] bar_pos_right = [x + bar_width for x in bar_pos] if style.display_legend: p1.quad(top=yd, bottom=0 * yd, left=bar_pos, right=bar_pos_right, color=color_spec[i], legend=label) else: p1.quad(top=yd, bottom=0 * yd, left=bar_pos, right=bar_pos_right, color=color_spec[i]) bar_index = bar_index + 1 bar_ind = bar_ind + bar_width elif (chart_type_ord == 'barh'): # TODO pass elif chart_type_ord == 'scatter': linewidth_t = self.get_linewidth(label, style.linewidth, style.linewidth_2, style.linewidth_2_series) if linewidth_t is None: linewidth_t = 1 if style.display_legend: p1.circle(xd, yd, color = color_spec[i], line_width=linewidth_t, name = glyph_name, legend = label, ) else: p1.circle(xd, yd, color = color_spec[i], line_width=linewidth_t, name = glyph_name) p1.grid.grid_line_alpha = 0.3 # p1.min_border_left = -40 # p1.min_border_right = 0 # p1.min_border_top = 0 # p1.min_border_bottom = 0 p1.min_border = -50 plot_list.append(p1) p_final = gridplot(plot_list, ncols=1) try: p_final.title.text = style.title except: pass if style.silent_display: save(p_final) else: show(p_final) # open a browser def get_color_list(self, i): color_palette = cc.bokeh_palette return color_palette[i % len(color_palette)] def generic_settings(self): return ###################################################################################################################### # TODO bqplot interface not implemented yet try: from IPython.display import display from bqplot import ( OrdinalScale, LinearScale, Bars, Lines, Axis, Figure ) except: pass class EngineBqplot(EngineTemplate): def plot_chart(self, data_frame, style, chart_type): pass def get_color_list(self, i): color_palette = cc.bokeh_palette return color_palette[i % len(color_palette)] def generic_settings(self): return ####################################################################################################################### # vispy based plots try: from vispy import plot as vp except: pass class EngineVisPy(EngineTemplate): def plot_chart(self, data_frame, style, chart_type): cm = ColorMaster() scale_factor = abs(style.scale_factor) try: if style.vispy_plot_mode == "offline_jupyter": pass except: pass try: style = self.generate_file_names(style, 'vispy') except: pass data_frame_list = self.split_data_frame_to_list(data_frame, style) plot_list = [] plot_width = int((style.width * scale_factor)) plot_height = int((style.height * scale_factor) / len(data_frame_list)) fig = vp.Fig(size=(plot_width, plot_height), show=False, title=style.title) min_x, max_x = self.get_max_min_x_axis(data_frame_list=data_frame_list) for data_frame in data_frame_list: bar_ind = numpy.arange(1, len(data_frame.index) + 1) if data_frame.index.name == 'Date': data_frame = data_frame.copy() data_frame = data_frame.reset_index() data_frame = data_frame.drop(['Date'], axis=1) xd, bar_ind, has_bar, no_of_bars = self.get_bar_indices(data_frame, style, chart_type, bar_ind) xd = data_frame.index # make the x-axis float as a temporary fix, vispy can't handle Date labels separate_chart = False # axis properties color_spec = cm.create_color_list(style, data_frame) import matplotlib bar_space = 0.2 bar_width = (1 - bar_space) / (no_of_bars) bar_index = 0 separate_chart = False if chart_type == 'surface': # TODO separate_chart = True has_bar = 'no-bar' if not (separate_chart): # plot each series in the dataframe separately for i in range(0, len(data_frame.columns)): label = str(data_frame.columns[i]) glyph_name = 'glpyh' + str(i) # set chart type which can differ for each time series if isinstance(chart_type, list): chart_type_ord = chart_type[i] else: chart_type_ord = chart_type # get the color if color_spec[i] is None: color_spec[i] = self.get_color_list(i) try: color_spec[i] = matplotlib.colors.rgb2hex(color_spec[i]) except: pass yd = data_frame.ix[:, i] # plot each time series as appropriate line, scatter etc. if chart_type_ord == 'line': fig[0, 0].plot(np.array((xd, yd)).T, marker_size=0, color=color_spec[i]) # fig[0, 0].view.camera.set_range(x=(min_x, max_x)) # TODO pass elif (chart_type_ord == 'bar'): # TODO pass elif (chart_type_ord == 'barh'): # TODO pass elif chart_type_ord == 'scatter': # TODO pass if style.silent_display: pass else: if style.save_fig: import vispy.io as io io.write_png(style.file_output, fig.render()) fig.show(run=True) # print(min_x); print(max_x) # fig[0, 0].view.camera.set_range(x=(min_x, max_x)) def get_color_list(self, i): color_palette = cc.bokeh_palette return color_palette[i % len(color_palette)] def generic_settings(self): return ####################################################################################################################### # matplotlib based libraries from datetime import timedelta import numpy as np try: import matplotlib import matplotlib.pyplot as plt except: pass try: from mpl_toolkits.mplot3d import Axes3D # need to import in order to do 3D plots (even if not called) except: pass try: from matplotlib.dates import YearLocator, MonthLocator, DayLocator, HourLocator, MinuteLocator from matplotlib.ticker import MultipleLocator except: pass class EngineMatplotlib(EngineTemplate): def plot_chart(self, data_frame, style, chart_type): self.apply_style_sheet(style) if style.xkcd: plt.xkcd() # create figure & add a subplot fig = plt.figure(figsize = ((style.width * abs(style.scale_factor))/style.dpi, (style.height * abs(style.scale_factor))/style.dpi), dpi = style.dpi) # matplotlib 1.5 try: cyc = matplotlib.rcParams['axes.prop_cycle'] color_cycle = [x['color'] for x in cyc] except KeyError: # pre 1.5 pass # color_cycle = matplotlib.rcParams['axes.color_cycle'] cm = ColorMaster() data_frame_list = self.split_data_frame_to_list(data_frame, style) subplot_no = 1 first_ax = None movie_frame = [] ordinal = 0 minz, maxz = self.get_max_min_dataframes(data_frame_list=data_frame_list) for data_frame in data_frame_list: bar_ind = np.arange(0, len(data_frame.index)) # for bar charts, create a proxy x-axis (then relabel) xd, bar_ind, has_bar, no_of_bars = self.get_bar_indices(data_frame, style, chart_type, bar_ind) try: xd = xd.to_pydatetime() except: pass ax, ax2, subplot_no, ordinal = self._create_subplot(fig, chart_type, style, subplot_no, first_ax, ordinal) # for stacked bar yoff_pos = np.zeros(len(data_frame.index.values)) # the bottom values for stacked bar chart yoff_neg = np.zeros(len(data_frame.index.values)) # the bottom values for stacked bar chart zeros = np.zeros(len(data_frame.index.values)) # for bar chart bar_space = 0.2 bar_width = (1 - bar_space) / (no_of_bars) bar_index = 0 try: has_matrix = 'no' if not(isinstance(chart_type, list)): ax_temp = ax # get all the correct colors (and construct gradients if necessary eg. from 'blues') color = style.color if style.color == []: color = cc.chartfactory_default_colormap else: if isinstance(style.color, list): color = style.color[subplot_no - 1] if chart_type == 'heatmap': ax_temp.set_frame_on(False) # weird hack, otherwise comes out all inverted! data_frame = data_frame.iloc[::-1] if style.normalize_colormap: movie_frame.append(ax_temp.pcolor(data_frame.values, cmap=color, alpha=0.8, vmax=maxz, vmin=minz)) else: movie_frame.append(ax_temp.pcolor(data_frame.values, cmap=color, alpha=0.8)) has_matrix = '2d-matrix' elif chart_type == 'surface': # TODO still very early alpha X, Y = np.meshgrid(range(0, len(data_frame.columns)), range(0, len(data_frame.index))) Z = data_frame.values if style.normalize_colormap: movie_frame.append(ax_temp.plot_surface(X, Y, Z, cmap=color, rstride=1, cstride=1, vmax=maxz, vmin=minz)) else: movie_frame.append(ax_temp.plot_surface(X, Y, Z, cmap=color, rstride=1, cstride=1)) has_matrix = '3d-matrix' if (has_matrix == 'no'): # plot the lines (using custom palettes as appropriate) color_spec = cm.create_color_list(style, data_frame) # some lines we should exclude from the color and use the default palette for i in range(0, len(data_frame.columns.values)): if isinstance(chart_type, list): chart_type_ord = chart_type[i] else: chart_type_ord = chart_type label = str(data_frame.columns[i]) ax_temp = self.get_axis(ax, ax2, label, style.y_axis_2_series) yd = data_frame.ix[:,i] if color_spec[i] is None: color_spec[i] = color_cycle[i % len(color_cycle)] if (chart_type_ord == 'line'): linewidth_t = self.get_linewidth(label, style.linewidth, style.linewidth_2, style.linewidth_2_series) if linewidth_t is None: linewidth_t = matplotlib.rcParams['axes.linewidth'] movie_frame.append(ax_temp.plot(xd, yd, label = label, color = color_spec[i], linewidth = linewidth_t),) elif(chart_type_ord == 'bar'): # for multiple bars we need to allocate space properly bar_pos = [k - (1 - bar_space) / 2. + bar_index * bar_width for k in range(0,len(bar_ind))] movie_frame.append(ax_temp.bar(bar_pos, yd, bar_width, label = label, color = color_spec[i])) bar_index = bar_index + 1 elif (chart_type_ord == 'barh'): # for multiple bars we need to allocate space properly bar_pos = [k - (1 - bar_space) / 2. + bar_index * bar_width for k in range(0, len(bar_ind))] movie_frame.append(ax_temp.barh(bar_pos, yd, bar_width, label=label, color=color_spec[i])) bar_index = bar_index + 1 elif(chart_type_ord == 'stacked'): bar_pos = [k - (1 - bar_space) / 2. + bar_index * bar_width for k in range(0,len(bar_ind))] yoff = np.where(yd > 0, yoff_pos, yoff_neg) movie_frame.append(ax_temp.bar(bar_pos, yd, label = label, color = color_spec[i], bottom = yoff)) yoff_pos = yoff_pos + np.maximum(yd, zeros) yoff_neg = yoff_neg + np.minimum(yd, zeros) # bar_index = bar_index + 1 elif(chart_type_ord == 'scatter'): movie_frame.append(ax_temp.scatter(xd, yd, label = label, color = color_spec[i])) if style.line_of_best_fit is True: self.trendline(ax_temp, xd.values, yd.values, order=1, color= color_spec[i], alpha=1, scale_factor = abs(style.scale_factor)) # format X axis self.format_x_axis(ax_temp, data_frame, style, has_bar, bar_ind, bar_width, has_matrix) except Exception as e: pass # print(str(e)) self._create_legend(ax, ax2, style) try: ax_temp.set_zlim(minz, maxz) except: pass anim = None # should we animate the figure? if style.animate_figure: if style.animate_titles is None: titles = range(1, len(data_frame_list) + 1) else: titles = style.animate_titles # initialization function: weirdly need to plot the last one (otherwise get ghosting!) def init(): return [movie_frame[-1]] def update(i): fig.canvas.set_window_title(str(titles[i])) return [movie_frame[i]] import matplotlib.animation as animation try: anim = animation.FuncAnimation(plt.gcf(), update, interval=style.animate_frame_ms, blit=True, frames=len(data_frame_list), init_func=init, repeat=True) except Exception as e: print(str(e)) # fig.autofmt_xdate() try: style = self.generate_file_names(style, 'matplotlib') if style.save_fig: # TODO get save movie file to work in GIF and MP4 (hangs currently on these) # install FFMPEG with: conda install --channel https://conda.anaconda.org/conda-forge ffmpeg if style.animate_figure: pass file = style.file_output.upper() # if '.GIF' in file: # anim.save(style.file_output, writer='imagemagick', fps=5, dpi=80) # print('GIF saved') # FFwriter = animation.FFMpegWriter() # plt.rcParams['animation.ffmpeg_path'] = 'c:\\ffmpeg\\bin\\ffmpeg.exe' # Writer = animation.writers['ffmpeg'] # writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800) # anim.save('test.mp4', writer=writer) plt.savefig(style.file_output, transparent=False) except Exception as e: print(str(e)) ####### various matplotlib converters are unstable # convert to D3 format with mpld3 try: # output matplotlib charts externally to D3 based libraries import mpld3 if style.display_mpld3 == True: mpld3.save_d3_html(fig, style.html_file_output) mpld3.show(fig) except: pass # FRAGILE! convert to Bokeh format # better to use direct Bokeh renderer try: if (style.convert_matplotlib_to_bokeh == True): from bokeh.plotting import output_file, show from bokeh import mpl output_file(style.html_file_output) show(mpl.to_bokeh()) except: pass # FRAGILE! convert matplotlib chart to Plotly format # recommend using AdapterCufflinks instead to directly plot to Plotly try: import plotly.plotly as py import plotly import plotly.tools as tls if style.convert_matplotlib_to_plotly == True: plotly.tools.set_credentials_file(username = style.plotly_username, api_key = style.plotly_api_key) py_fig = tls.mpl_to_plotly(fig, strip_style = True) plot_url = py.plot_mpl(py_fig, filename = style.plotly_url) except: pass # display in matplotlib window (or clear from pyplot) try: if cc.chartfactory_silent_display == True: plt.close(fig) return fig elif style.silent_display == False: if not(style.block_new_plots): # TODO pass plt.show() else: plt.close(fig) return fig except: pass def apply_style_sheet(self, style): # set the matplotlib style sheet & defaults matplotlib.rcdefaults() # first search ChartPy styles, then try matplotlib try: plt.style.use(cc.chartfactory_style_sheet[style.style_sheet]) except: plt.style.use(style.style_sheet) # adjust font size for scale factor matplotlib.rcParams.update({'font.size': matplotlib.rcParams['font.size'] * abs(style.scale_factor)}) # do not use offsets/scientific notation matplotlib.rcParams.update({'axes.formatter.useoffset': False}) def format_x_axis(self, ax, data_frame, style, has_bar, bar_ind, bar_width, has_matrix): if has_matrix == '2d-matrix' or has_matrix == '3d-matrix': x_bar_ind = np.arange(0, len(data_frame.columns)) y_bar_ind = np.arange(0, len(data_frame.index)) offset = 0.5 ax.set_xticks(x_bar_ind + offset) ax.set_xlim([0, len(x_bar_ind)]) ax.set_yticks(y_bar_ind + offset) ax.set_ylim([0, len(y_bar_ind)]) plt.setp(plt.yticks()[1], rotation=90) ax.set_xticklabels(data_frame.columns, minor=False) ax.set_yticklabels(data_frame.index, minor=False) ax.plot([], []) for x in range(len(data_frame.index)): for y in range(len(data_frame.columns)): plt.text(x + offset, y + offset, '%.0f' % data_frame.ix[x, y], horizontalalignment='center', verticalalignment='center', ) return if has_bar == 'barv': if matplotlib.__version__ > '1.9': offset = bar_width / 2.0 # for matplotlib 2 else: offset = 0 ax.set_xticks(bar_ind - offset) ax.set_xticklabels(data_frame.index) ax.set_xlim([-1, len(bar_ind)]) # if lots of labels make text smaller and rotate if len(bar_ind) > 6: plt.setp(plt.xticks()[1], rotation=90) # plt.gca().tight_layout() # matplotlib.rcParams.update({'figure.autolayout': True}) # plt.gcf().subplots_adjust(bottom=5) import matplotlib.dates as mdates if style.date_formatter is not None: myFmt = mdates.DateFormatter(style.date_formatter) plt.tight_layout() # ax.tick_params(axis='x', labelsize=matplotlib.rcParams['font.size'] * 0.5) return elif has_bar == 'barh': ax.set_yticks(bar_ind) ax.set_yticklabels(data_frame.index) ax.set_ylim([-1, len(bar_ind)]) # if lots of labels make text smaller and rotate if len(bar_ind) > 6: #plt.setp(plt.yticks()[1]) # plt.gca().tight_layout() # matplotlib.rcParams.update({'figure.autolayout': True}) # plt.gcf().subplots_adjust(bottom=5) import matplotlib.dates as mdates if style.date_formatter is not None: ax.format_ydata = mdates.DateFormatter(style.date_formatter) plt.tight_layout() # ax.tick_params(axis='x', labelsize=matplotlib.rcParams['font.size'] * 0.5) return # format X axis dates = data_frame.index # scaling for time series plots with hours and minutes only (and no dates) if hasattr(data_frame.index[0], 'hour') and not(hasattr(data_frame.index[0], 'month')): ax.xaxis.set_major_locator(MultipleLocator(86400./3.)) ax.xaxis.set_minor_locator(MultipleLocator(86400./24.)) ax.grid(b = style.x_axis_showgrid, which='minor', color='w', linewidth=0.5) # TODO have more refined way of formating time series x-axis! # scaling for time series plots with dates too else: # to handle dates try: dates = dates.to_pydatetime() diff = data_frame.index[-1] - data_frame.index[0] import matplotlib.dates as md if style.date_formatter is not None: # from matplotlib.ticker import Formatter # # class MyFormatter(Formatter): # def __init__(self, dates, fmt='%H:%M'): # self.dates = dates # self.fmt = fmt # # def __call__(self, x, pos=0): # 'Return the label for time x at position pos' # ind = int(round(x)) # if ind >= len(self.dates) or ind < 0: return '' # # return self.dates[ind].strftime(self.fmt) # # formatter = MyFormatter(dates) # ax.xaxis.set_major_formatter(formatter) ax.xaxis.set_major_formatter(md.DateFormatter(style.date_formatter)) elif diff < timedelta(days = 4): date_formatter = '%H:%M' xfmt = md.DateFormatter(date_formatter) ax.xaxis.set_major_formatter(xfmt) if diff < timedelta(minutes=20): ax.xaxis.set_major_locator(MinuteLocator(byminute=range(60), interval=2)) ax.xaxis.set_minor_locator(MinuteLocator(interval=1)) elif diff < timedelta(hours=1): ax.xaxis.set_major_locator(MinuteLocator(byminute=range(60), interval=5)) ax.xaxis.set_minor_locator(MinuteLocator(interval=2)) elif diff < timedelta(hours=6): locator = HourLocator(interval=1) ax.xaxis.set_major_locator(locator) ax.xaxis.set_minor_locator(MinuteLocator(interval=30)) elif diff < timedelta(days=3): ax.xaxis.set_major_locator(HourLocator(interval=6)) ax.xaxis.set_minor_locator(HourLocator(interval=1)) elif diff < timedelta(days=10): locator = DayLocator(interval=2) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(md.DateFormatter('%d %b %y')) day_locator = DayLocator(interval=1) ax.xaxis.set_minor_locator(day_locator) elif diff < timedelta(days=40): locator = DayLocator(interval=10) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(md.DateFormatter('%d %b %y')) day_locator = DayLocator(interval=1) ax.xaxis.set_minor_locator(day_locator) elif diff < timedelta(days=365 * 0.5): locator = MonthLocator(bymonthday=1, interval=2) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(md.DateFormatter('%b %y')) months_locator = MonthLocator(interval=1) ax.xaxis.set_minor_locator(months_locator) elif diff < timedelta(days=365 * 2): locator = MonthLocator(bymonthday=1, interval=3) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(md.DateFormatter('%b %y')) months_locator = MonthLocator(interval=1) ax.xaxis.set_minor_locator(months_locator) elif diff < timedelta(days = 365 * 5): locator = YearLocator() ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(md.DateFormatter('%Y')) else: years = floor(diff.days/365.0/5.0) locator = YearLocator(years) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(md.DateFormatter('%Y')) if matplotlib.__version__ > '1.9': max = dates.max() min = dates.min() plt.xlim(min, max) except: try: # otherwise we have integers, rather than dates # TODO needs smarter more generalised mapping of dates max = dates.max() min = dates.min() big_step = self.round_to_1((max - min)/10) small_step = big_step / 5 ax.xaxis.set_major_locator(MultipleLocator(big_step)) ax.xaxis.set_minor_locator(MultipleLocator(small_step)) plt.xlim(min, max) except: pass def get_axis(self, ax, ax2, label, y_axis_2_series): if label in y_axis_2_series: return ax2 return ax def trendline(self, ax, xd, yd, order=1, color='red', alpha=1, Rval=False, scale_factor = 1): """ Make a line of best fit """ # Calculate trendline xd[np.isnan(xd)] = 0 yd[np.isnan(yd)] = 0 coeffs = np.polyfit(xd, yd, order) intercept = coeffs[-1] slope = coeffs[-2] if order == 2: power = coeffs[0] else: power = 0 minxd = np.min(xd) maxxd = np.max(xd) xl = np.array([minxd, maxxd]) yl = power * xl ** 2 + slope * xl + intercept # plot trendline ax.plot(xl, yl, color = color, alpha = alpha) # calculate R squared p = np.poly1d(coeffs) ybar = np.sum(yd) / len(yd) ssreg = np.sum((p(xd) - ybar) ** 2) sstot = np.sum((yd - ybar) ** 2) Rsqr = ssreg / sstot if Rval == False: text = 'R^2 = %0.2f, m = %0.4f, c = %0.4f' %(Rsqr, slope, intercept) ax.annotate(text, xy=(1, 1), xycoords='axes fraction', fontsize=8 * abs(scale_factor), xytext=(-5 * abs(scale_factor), 10 * abs(scale_factor)), textcoords='offset points', ha='right', va='top') # Plot R^2 value # ax.text(0.65, 0.95, text, fontsize = 10 * scale_factor, # ha= 'left', # va = 'top', transform = ax.transAxes) pass else: # return the R^2 value: return Rsqr def _create_brand_label(self, ax, anno, scale_factor): ax.annotate(anno, xy = (1, 1), xycoords = 'axes fraction', fontsize = 10 * abs(scale_factor), color = 'white', xytext = (0 * abs(scale_factor), 15 * abs(scale_factor)), textcoords = 'offset points', va = "center", ha = "center", bbox = dict(boxstyle = "round,pad=0.0", facecolor = cc.chartfactory_brand_color)) def _create_subplot(self, fig, chart_type, style, subplot_no, first_ax, ordinal): if style.title is not None: fig.suptitle(style.title, fontsize = 14 * abs(style.scale_factor)) chart_projection = '2d' if not (isinstance(chart_type, list)): if chart_type == 'surface': chart_projection = '3d' if style.subplots == False and first_ax is None: if chart_projection == '3d': ax = fig.add_subplot(111, projection=chart_projection) else: ax = fig.add_subplot(111) else: if first_ax is None: if chart_projection == '3d': ax = fig.add_subplot(2, 1, subplot_no, projection=chart_projection) else: ax = fig.add_subplot(2, 1, subplot_no) first_ax = ax if style.share_subplot_x: if chart_projection == '3d': ax = fig.add_subplot(2, 1, subplot_no, sharex=first_ax, projection=chart_projection) else: ax = fig.add_subplot(2, 1, subplot_no, sharex=first_ax) else: if chart_projection == '3d': ax = fig.add_subplot(2, 1, subplot_no, projection=chart_projection) else: ax = fig.add_subplot(2, 1, subplot_no) subplot_no = subplot_no + 1 if style.x_title != '': ax.set_xlabel(style.x_title) if style.y_title != '': ax.set_ylabel(style.y_title) plt.xlabel(style.x_title) plt.ylabel(style.y_title) # format Y axis y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False) ax.yaxis.set_major_formatter(y_formatter) # create a second y axis if necessary ax2 = [] ax.xaxis.grid(style.x_axis_showgrid) ax.yaxis.grid(style.y_axis_showgrid) if style.y_axis_2_series != []: ax2 = ax.twinx() # set grid for second y axis ax2.yaxis.grid(style.y_axis_2_showgrid) return ax, ax2, subplot_no, ordinal + 1 def _create_legend(self, ax, ax2, style): if style.display_source_label == True and style.source is not None: ax.annotate('Source: ' + style.source, xy=(1, 0), xycoords='axes fraction', fontsize=7 * abs(style.scale_factor), xytext=(-5 * abs(style.scale_factor), 10 * abs(style.scale_factor)), textcoords='offset points', ha='right', va='top', color=style.source_color) if style.display_brand_label == True: self._create_brand_label(ax, anno=style.brand_label, scale_factor=abs(style.scale_factor)) leg = [] leg2 = [] loc = 'best' # if we have two y-axis then make sure legends are in opposite corners if ax2 != []: loc = 2 try: leg = ax.legend(loc=loc, prop={'size': 10 * abs(style.scale_factor)}) leg.get_frame().set_linewidth(0.0) leg.get_frame().set_alpha(0) if ax2 != []: leg2 = ax2.legend(loc=1, prop={'size': 10 * abs(style.scale_factor)}) leg2.get_frame().set_linewidth(0.0) leg2.get_frame().set_alpha(0) except: pass try: if style.display_legend is False: if leg != []: leg.remove() if leg2 != []: leg.remove() except: pass ####################################################################################################################### cf = None try: import plotly # JavaScript based plotting library with Python connector import plotly.graph_objs as go import cufflinks as cf plotly.tools.set_config_file(plotly_domain='https://type-here.com', world_readable=cc.plotly_world_readable, sharing=cc.plotly_sharing) except: pass try: pass #plotly.utils.memoize = memoize except: pass class EnginePlotly(EngineTemplate): def plot_chart(self, data_frame, style, chart_type): mode = 'line' if style is None: style = Style() marker_size = 1 x = ''; y = ''; z = '' scale = 1 try: # adjust sizing if offline_html format if (style.plotly_plot_mode == 'offline_html' and style.scale_factor > 0): scale = float(2.0 / 3.0) except: pass # check other plots implemented by Cufflinks cm = ColorMaster() # create figure data_frame_list = self.split_data_frame_to_list(data_frame, style) fig_list = [] cols = [] for data_frame in data_frame_list: cols.append(data_frame.columns) cols = list(numpy.array(cols).flat) # get all the correct colors (and construct gradients if necessary eg. from 'Blues') # need to change to strings for cufflinks color_list = cm.create_color_list(style, [], cols=cols) color_spec = [] # if no colors are specified then just use our default color set from chart constants if color_list == [None] * len(color_list): color_spec = [None] * len(color_list) for i in range(0, len(color_list)): # get the color if color_spec[i] is None: color_spec[i] = self.get_color_list(i) try: color_spec[i] = matplotlib.colors.rgb2hex(color_spec[i]) except: pass else: # otherwise assume all the colors are rgba for color in color_list: color = 'rgba' + str(color) color_spec.append(color) start = 0 # go through each data_frame in the list and plot for i in range(0, len(data_frame_list)): data_frame = data_frame_list[i] if isinstance(chart_type, list): chart_type_ord = chart_type[i] else: chart_type_ord = chart_type end = start + len(data_frame.columns) color_spec1 = color_spec[start:start + end] start = end # special call for surface and heatmaps # NOTE: we use cufflinks library, which simplifies plotting DataFrames in plotly if chart_type_ord == 'surface': fig = data_frame.iplot(kind=chart_type, title=style.title, xTitle=style.x_title, yTitle=style.y_title, x=x, y=y, z=z, mode=mode, size=marker_size, sharing=style.plotly_sharing, theme=style.plotly_theme, bestfit=style.line_of_best_fit, legend=style.display_legend, colorscale=style.color, dimensions=(style.width * abs(style.scale_factor) * scale, style.height * abs(style.scale_factor) * scale), asFigure=True) elif chart_type_ord == 'heatmap': fig = data_frame.iplot(kind=chart_type, title=style.title, xTitle=style.x_title, yTitle=style.y_title, x=x, y=y, mode=mode, size=marker_size, sharing=style.plotly_sharing, theme=style.plotly_theme, bestfit=style.line_of_best_fit, legend=style.display_legend, colorscale=style.color, dimensions=(style.width * abs(style.scale_factor) * scale, style.height * abs(style.scale_factor) * scale), asFigure=True) # special case for map/choropleth which has yet to be implemented in Cufflinks # will likely remove this in the future elif chart_type_ord == 'choropleth': for col in data_frame.columns: try: data_frame[col] = data_frame[col].astype(str) except: pass if style.color != []: color = style.color else: color = [[0.0, 'rgb(242,240,247)'], [0.2, 'rgb(218,218,235)'], [0.4, 'rgb(188,189,220)'], \ [0.6, 'rgb(158,154,200)'], [0.8, 'rgb(117,107,177)'], [1.0, 'rgb(84,39,143)']] text = '' if 'text' in data_frame.columns: text = data_frame['Text'] data = [dict( type='choropleth', colorscale=color, autocolorscale=False, locations=data_frame['Code'], z=data_frame[style.plotly_choropleth_field].astype(float), locationmode=style.plotly_location_mode, text=text, marker=dict( line=dict( color='rgb(255,255,255)', width=1 ) ), colorbar=dict( title=style.units ) )] layout = dict( title=style.title, geo=dict( scope=style.plotly_scope, projection=dict(type=style.plotly_projection), showlakes=True, lakecolor='rgb(255, 255, 255)', ), ) fig = dict(data=data, layout=layout) # otherwise we have a line plot (or similar such as a scatter plot) else: full_line = style.connect_line_gaps if chart_type_ord == 'line': full_line = True # chart_type_ord = 'scatter' mode = 'lines' elif chart_type_ord in ['dash', 'dashdot', 'dot']: chart_type_ord = 'scatter' elif chart_type_ord == 'line+markers': full_line = True chart_type_ord = 'line' mode = 'lines+markers' marker_size = 5 elif chart_type_ord == 'scatter': mode = 'markers' marker_size = 5 elif chart_type_ord == 'bubble': chart_type_ord = 'scatter' mode = 'markers' # x = data_frame.columns[0] # y = data_frame.columns[1] # z = data_frame.columns[2] # using WebGL # if len(data_frame.index) > 500: # import plotly.graph_objs as go # # import numpy as np # # data = [] # # for col in data_frame.columns: # data.append(go.Scattergl( # x=data_frame.index, # y=data_frame[col] # ) # ) # layout = dict(showlegend=False) # fig = dict(data=data, layout=layout) # else: # TODO check this! # can have issues calling cufflinks with a theme which is None, so split up the cases if style.plotly_theme is None: plotly_theme = 'pearl' else: plotly_theme = style.plotly_theme m = 0 # sometimes Plotly has issues generating figures in dash, so if fails first, try again while m < 10: try: # TODO try writing this directly wiht plotly, rather than using cufflinks fig = data_frame.iplot(kind=chart_type_ord, title=style.title, xTitle=style.x_title, yTitle=style.y_title, x=x, y=y, z=z, subplots=False, sharing=style.plotly_sharing, mode=mode, secondary_y=style.y_axis_2_series, size=marker_size, theme=plotly_theme, colorscale='dflt', bestfit=style.line_of_best_fit, legend=style.display_legend, width=style.linewidth, color=color_spec1, dimensions=(style.width * abs(style.scale_factor) * scale, style.height * abs(style.scale_factor) * scale), asFigure=True) m = 10; break except Exception as e: try: print(chart_type_ord) # sometimes get error eg. 'legend', 'bgcolor', 'none', ('layout',) or can be related to 'colorscale' import traceback import time import sys time.sleep(0.3) # T, V, TB = sys.exc_info() # print(''.join(traceback.format_exception(T, V, TB))) print("Will attempt to re-render: " + str(e)) # try to reimport plotly and cufflinks and re-render (can sometimes have issues in multithreaded # environment with plotly) import plotly import cufflinks as cf # plotly.tools.set_config_file(plotly_domain='https://type-here.com', # world_readable=cc.plotly_world_readable, # sharing=cc.plotly_sharing) if data_frame is None: print('Empty dataframe') fig = data_frame.iplot(kind=chart_type_ord, title=style.title, xTitle=style.x_title, yTitle=style.y_title, x=x, y=y, z=z, subplots=False, sharing=style.plotly_sharing, mode=mode, secondary_y=style.y_axis_2_series, size=marker_size, theme=plotly_theme, colorscale='dflt', bestfit=style.line_of_best_fit, legend=style.display_legend, width=style.linewidth, color=color_spec1, dimensions=(style.width * abs(style.scale_factor) * scale, style.height * abs(style.scale_factor) * scale), asFigure=True) m = 10; break except Exception as e: print(str(e)) print('Try plotting again...') # print(color_spec1) m = m + 1 # for lines set the property of connectgaps (cannot specify directly in cufflinks) if full_line: for z in range(0, len(fig['data'])): fig['data'][z].connectgaps = style.connect_line_gaps for k in range(0, len(fig['data'])): if full_line: fig['data'][k].connectgaps = style.connect_line_gaps if style.line_shape != None: if isinstance(style.line_shape, str): line_shape = [style.line_shape] * len(fig['data']) else: line_shape = style.line_shape for k in range(0, len(fig['data'])): fig['data'][k].line.shape = line_shape[k] if style.plotly_webgl: for k in range(0, len(fig['data'])): if fig['data'][k].type == 'scatter': fig['data'][k].type = 'scattergl' if style.y_axis_range is not None: # override other properties, which cannot be set directly by cufflinks fig.update(dict(layout=dict(yaxis=dict( range=style.y_axis_range )))) if style.x_axis_range is not None: # override other properties, which cannot be set directly by cufflinks fig.update(dict(layout=dict(xaxis=dict( range=style.x_axis_range )))) fig.update(dict(layout=dict(legend=dict( x=0.05, y=1 )))) # adjust margins if style.thin_margin: fig.update(dict(layout=dict(margin=go.layout.Margin( l=20, r=20, b=40, t=40, pad=0 )))) # change background color fig.update(dict(layout=dict(paper_bgcolor='rgba(0,0,0,0)'))) fig.update(dict(layout=dict(plot_bgcolor='rgba(0,0,0,0)'))) # deal with grids if (not (style.x_axis_showgrid)): fig.update(dict(layout=dict(xaxis=dict(showgrid=style.x_axis_showgrid)))) if (not (style.y_axis_showgrid)): fig.update(dict(layout=dict(yaxis=dict(showgrid=style.y_axis_showgrid)))) if (not (style.y_axis_2_showgrid)): fig.update( dict(layout=dict(yaxis2=dict(showgrid=style.y_axis_2_showgrid)))) fig_list.append(fig) #### plotted all the lines if len(fig_list) > 1: fig = cf.subplots(fig_list) fig['layout'].update(title=style.title) else: fig = fig_list[0] # override properties, which cannot be set directly by cufflinks # for the type of line (ie. line or scatter) # for making the lined dashed, dotted etc. if style.subplots == False and isinstance(chart_type, list): for j in range(0, len(fig['data'])): mode = None; dash = None; line_shape = None; if chart_type[j] == 'line': mode = 'lines' elif chart_type[j] == 'line+markers': mode = 'lines+markers' elif chart_type[j] == 'scatter': mode = 'markers' elif chart_type[j] in ['dash', 'dashdot', 'dot']: dash = chart_type[j] mode = 'lines' elif chart_type[j] in ['hv', 'vh', 'vhv', 'spline', 'linear']: line_shape = chart_type[j] mode = 'lines' elif chart_type[j] == 'bubble': mode = 'markers' bubble_series = style.bubble_series[cols[j]] bubble_series = bubble_series.fillna(0) # dash = chart_type[j] # data_frame[bubble_series.name] = bubble_series scale = float(bubble_series.max()) fig['data'][j].marker.size = \ (style.bubble_size_scalar * (bubble_series.values / scale)).tolist() if mode is not None: fig['data'][j].mode = mode if dash is not None: fig['data'][j].line.dash = dash if line_shape is not None: fig['data'][j].line.shape = line_shape from plotly.graph_objs import Figure # if candlestick specified add that (needed to be appended on top of the Plotly figure's data if style.candlestick_series is not None and not (style.plotly_webgl): # self.logger.debug("About to create candlesticks") if isinstance(style.candlestick_series, Figure): fig_candle = style.candlestick_series else: # from plotly.tools import FigureFactory as FF fig_candle = create_candlestick(style.candlestick_series['open'], style.candlestick_series['high'], style.candlestick_series['low'], style.candlestick_series['close'], dates=style.candlestick_series['close'].index ) if style.candlestick_increasing_color is not None: # increasing fig_candle['data'][0].fillcolor = cm.get_color_code(style.candlestick_increasing_color) fig_candle['data'][0].line.color = cm.get_color_code(style.candlestick_increasing_line_color) if style.candlestick_decreasing_color is not None: # descreasing fig_candle['data'][1].fillcolor = cm.get_color_code(style.candlestick_decreasing_color) fig_candle['data'][1].line.color = cm.get_color_code(style.candlestick_decreasing_line_color) try: # append the data to the existing Plotly figure, plotted earlier fig.data.append(fig_candle.data[0]); fig.data.append(fig_candle.data[1]) except: # plotly 3.0 fig.add_trace(fig_candle.data[0]) fig.add_trace(fig_candle.data[1]) # self.logger.debug("Rendered candlesticks") x_y_line_list = [] # fig.layout.yrange # add x-line: for x_y_line in style.x_y_line: start = x_y_line[0] finish = x_y_line[1] x_y_line_list.append( { 'type': 'line', 'x0': start[0], 'y0': start[1], 'x1': finish[0], 'y1': finish[1], 'line': { 'color': 'black', 'width': 0.5, 'dash': 'dot', }, } ) # x_y_line_list = [{ # 'type': 'line', # 'x0': 1, # 'y0': 0, # 'x1': 1, # 'y1': 2, # 'line': { # 'color': 'rgb(55, 128, 191)', # 'width': 3, # }, # }] if len(x_y_line_list) > 0: fig.layout.shapes = x_y_line_list # publish the plot (depending on the output mode eg. to HTML file/Jupyter notebook) # also return as a Figure object for plotting by a web server app (eg. Flask/Dash) return self.publish_plot(fig, style) def publish_plot(self, fig, style): # change background color fig.update(dict(layout=dict(paper_bgcolor='rgba(0,0,0,0)'))) fig.update(dict(layout=dict(plot_bgcolor='rgba(0,0,0,0)'))) style = self.generate_file_names(style, 'plotly') if style.plotly_plot_mode == 'dash': pass elif style.plotly_plot_mode == 'online': plotly.tools.set_credentials_file(username=style.plotly_username, api_key=style.plotly_api_key) plotly.plotly.plot(fig, filename=style.plotly_url, world_readable=style.plotly_world_readable, auto_open = not(style.silent_display), asImage=style.plotly_as_image) elif style.plotly_plot_mode == 'offline_html': plotly.offline.plot(fig, filename=style.html_file_output, auto_open = not(style.silent_display)) elif style.plotly_plot_mode == 'offline_jupyter': # plot in IPython notebook plotly.offline.init_notebook_mode() plotly.offline.iplot(fig) # plotly.offline.plot(fig, filename=style.file_output, format='png', # width=style.width * style.scale_factor, height=style.height * style.scale_factor) if style.plotly_plot_mode != 'dash': try: plotly.plotly.image.save_as(fig, filename=style.file_output, format='png', width=style.width * abs(style.scale_factor), height=style.height * abs(style.scale_factor)) except: pass return fig def get_color_list(self, i): color_palette = cc.plotly_palette return color_palette[i % len(color_palette)] ####################################################################################################################### # create color lists to be used in plots class ColorMaster: def create_color_list(self, style, data_frame, cols = None): if cols is None: cols = data_frame.columns # get all the correct colors (and construct gradients if necessary eg. from 'blues') color = self.construct_color(style, 'color', len(cols) - len(style.color_2_series)) color_2 = self.construct_color(style, 'color_2', len(style.color_2_series)) return self.assign_color(cols, color, color_2, style.exclude_from_color, style.color_2_series) def construct_color(self, style, color_field_name, no_of_entries): color = [] if hasattr(style, color_field_name): if isinstance(getattr(style, color_field_name), list): color = getattr(style, color_field_name, color) else: try: color = self.create_colormap(no_of_entries, getattr(style, color_field_name)) except: pass return color def exclude_from_color(self, style): if not (isinstance(style.exclude_from_color, list)): style.exclude_from_color = [style.exclude_from_color] exclude_from_color = [str(x) for x in style.exclude_from_color] return exclude_from_color def assign_color(self, labels, color, color_2, exclude_from_color, color_2_series): color_list = [] axis_1_color_index = 0; axis_2_color_index = 0 # convert all the labels to strings labels = [str(x) for x in labels] # go through each label for label in labels: color_spec = None if label in exclude_from_color: color_spec = None elif label in color_2_series: if color_2 != []: color_spec = self.get_color_code(color_2[axis_2_color_index]) axis_2_color_index = axis_2_color_index + 1 else: if color != []: color_spec = self.get_color_code(color[axis_1_color_index]) axis_1_color_index = axis_1_color_index + 1 try: color_spec = matplotlib.colors.colorConverter.to_rgba(color_spec) except: pass color_list.append(color_spec) return color_list def get_color_code(self, code): # redefine color names dict = cc.chartfactory_color_overwrites if code in dict: return dict[code] return code def create_colormap(self, num_colors, map_name): ## matplotlib ref for colors: http://matplotlib.org/examples/color/colormaps_reference.html cm = matplotlib.cm.get_cmap(name=map_name) return [cm(1. * i / num_colors) for i in range(num_colors)] ######################################################################################################################## ## faster version of Plotly's candlestick drawing module (assumes NumPy) ############################################### from plotly.figure_factory import utils from plotly.figure_factory._ohlc import (_DEFAULT_INCREASING_COLOR, _DEFAULT_DECREASING_COLOR, validate_ohlc) from plotly.graph_objs import graph_objs def make_increasing_candle(open, high, low, close, dates, **kwargs): """ Makes boxplot trace for increasing candlesticks _make_increasing_candle() and _make_decreasing_candle separate the increasing traces from the decreasing traces so kwargs (such as color) can be passed separately to increasing or decreasing traces when direction is set to 'increasing' or 'decreasing' in FigureFactory.create_candlestick() :param (list) open: opening values :param (list) high: high values :param (list) low: low values :param (list) close: closing values :param (list) dates: list of datetime objects. Default: None :param kwargs: kwargs to be passed to increasing trace via plotly.graph_objs.Scatter. :rtype (list) candle_incr_data: list of the box trace for increasing candlesticks. """ increase_x, increase_y = _Candlestick( open, high, low, close, dates, **kwargs).get_candle_increase() if 'line' in kwargs: kwargs.setdefault('fillcolor', kwargs['line']['color']) else: kwargs.setdefault('fillcolor', _DEFAULT_INCREASING_COLOR) if 'name' in kwargs: kwargs.setdefault('showlegend', True) else: kwargs.setdefault('showlegend', False) kwargs.setdefault('name', 'Increasing') kwargs.setdefault('line', dict(color=_DEFAULT_INCREASING_COLOR)) candle_incr_data = dict(type='box', x=increase_x, y=increase_y, whiskerwidth=0, boxpoints=False, **kwargs) return [candle_incr_data] def make_decreasing_candle(open, high, low, close, dates, **kwargs): """ Makes boxplot trace for decreasing candlesticks :param (list) open: opening values :param (list) high: high values :param (list) low: low values :param (list) close: closing values :param (list) dates: list of datetime objects. Default: None :param kwargs: kwargs to be passed to decreasing trace via plotly.graph_objs.Scatter. :rtype (list) candle_decr_data: list of the box trace for decreasing candlesticks. """ decrease_x, decrease_y = _Candlestick( open, high, low, close, dates, **kwargs).get_candle_decrease() if 'line' in kwargs: kwargs.setdefault('fillcolor', kwargs['line']['color']) else: kwargs.setdefault('fillcolor', _DEFAULT_DECREASING_COLOR) kwargs.setdefault('showlegend', False) kwargs.setdefault('line', dict(color=_DEFAULT_DECREASING_COLOR)) kwargs.setdefault('name', 'Decreasing') candle_decr_data = dict(type='box', x=decrease_x, y=decrease_y, whiskerwidth=0, boxpoints=False, **kwargs) return [candle_decr_data] def create_candlestick(open, high, low, close, dates=None, direction='both', **kwargs): """ BETA function that creates a candlestick chart :param (list) open: opening values :param (list) high: high values :param (list) low: low values :param (list) close: closing values :param (list) dates: list of datetime objects. Default: None :param (string) direction: direction can be 'increasing', 'decreasing', or 'both'. When the direction is 'increasing', the returned figure consists of all candlesticks where the close value is greater than the corresponding open value, and when the direction is 'decreasing', the returned figure consists of all candlesticks where the close value is less than or equal to the corresponding open value. When the direction is 'both', both increasing and decreasing candlesticks are returned. Default: 'both' :param kwargs: kwargs passed through plotly.graph_objs.Scatter. These kwargs describe other attributes about the ohlc Scatter trace such as the color or the legend name. For more information on valid kwargs call help(plotly.graph_objs.Scatter) :rtype (dict): returns a representation of candlestick chart figure. Example 1: Simple candlestick chart from a Pandas DataFrame ``` import plotly.plotly as py from plotly.figure_factory import create_candlestick from datetime import datetime import pandas.io.data as web df = web.DataReader("aapl", 'yahoo', datetime(2007, 10, 1), datetime(2009, 4, 1)) fig = create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index) py.plot(fig, filename='finance/aapl-candlestick', validate=False) ``` Example 2: Add text and annotations to the candlestick chart ``` fig = create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index) # Update the fig - all options here: https://plot.ly/python/reference/#Layout fig['layout'].update({ 'title': 'The Great Recession', 'yaxis': {'title': 'AAPL Stock'}, 'shapes': [{ 'x0': '2007-12-01', 'x1': '2007-12-01', 'y0': 0, 'y1': 1, 'xref': 'x', 'yref': 'paper', 'line': {'color': 'rgb(30,30,30)', 'width': 1} }], 'annotations': [{ 'x': '2007-12-01', 'y': 0.05, 'xref': 'x', 'yref': 'paper', 'showarrow': False, 'xanchor': 'left', 'text': 'Official start of the recession' }] }) py.plot(fig, filename='finance/aapl-recession-candlestick', validate=False) ``` Example 3: Customize the candlestick colors ``` import plotly.plotly as py from plotly.figure_factory import create_candlestick from plotly.graph_objs import Line, Marker from datetime import datetime import pandas.io.data as web df = web.DataReader("aapl", 'yahoo', datetime(2008, 1, 1), datetime(2009, 4, 1)) # Make increasing candlesticks and customize their color and name fig_increasing = create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index, direction='increasing', name='AAPL', marker=Marker(color='rgb(150, 200, 250)'), line=Line(color='rgb(150, 200, 250)')) # Make decreasing candlesticks and customize their color and name fig_decreasing = create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index, direction='decreasing', marker=Marker(color='rgb(128, 128, 128)'), line=Line(color='rgb(128, 128, 128)')) # Initialize the figure fig = fig_increasing # Add decreasing data with .extend() fig['data'].extend(fig_decreasing['data']) py.iplot(fig, filename='finance/aapl-candlestick-custom', validate=False) ``` Example 4: Candlestick chart with datetime objects ``` import plotly.plotly as py from plotly.figure_factory import create_candlestick from datetime import datetime # Add data open_data = [33.0, 33.3, 33.5, 33.0, 34.1] high_data = [33.1, 33.3, 33.6, 33.2, 34.8] low_data = [32.7, 32.7, 32.8, 32.6, 32.8] close_data = [33.0, 32.9, 33.3, 33.1, 33.1] dates = [datetime(year=2013, month=10, day=10), datetime(year=2013, month=11, day=10), datetime(year=2013, month=12, day=10), datetime(year=2014, month=1, day=10), datetime(year=2014, month=2, day=10)] # Create ohlc fig = create_candlestick(open_data, high_data, low_data, close_data, dates=dates) py.iplot(fig, filename='finance/simple-candlestick', validate=False) ``` """ # if dates is not None: # utils.validate_equal_length(open, high, low, close, dates) # else: # utils.validate_equal_length(open, high, low, close) # validate_ohlc(open, high, low, close, direction, **kwargs) if direction is 'increasing': candle_incr_data = make_increasing_candle(open, high, low, close, dates, **kwargs) data = candle_incr_data elif direction is 'decreasing': candle_decr_data = make_decreasing_candle(open, high, low, close, dates, **kwargs) data = candle_decr_data else: candle_incr_data = make_increasing_candle(open, high, low, close, dates, **kwargs) candle_decr_data = make_decreasing_candle(open, high, low, close, dates, **kwargs) data = candle_incr_data + candle_decr_data layout = graph_objs.Layout() return graph_objs.Figure(data=data, layout=layout) class _Candlestick(object): """ Refer to FigureFactory.create_candlestick() for docstring. """ def __init__(self, open, high, low, close, dates, **kwargs): # assume we can get NumPy arrays (much quicker than ordinary arrays) self.open = open.values self.high = high.values self.low = low.values self.close = close.values if dates is not None: self.x = dates else: self.x = [x for x in range(len(self.open))] self.get_candle_increase() def get_candle_increase(self): """ Separate increasing data from decreasing data. The data is increasing when close value > open value and decreasing when the close value <= open value. """ increase_y = [] increase_x = [] for index in range(len(self.open)): if self.close[index] > self.open[index]: increase_y.append(self.low[index]) increase_y.append(self.open[index]) increase_y.append(self.close[index]) increase_y.append(self.close[index]) increase_y.append(self.close[index]) increase_y.append(self.high[index]) increase_x.append(self.x[index]) increase_x = [[x, x, x, x, x, x] for x in increase_x] increase_x = utils.flatten(increase_x) return increase_x, increase_y def get_candle_decrease(self): """ Separate increasing data from decreasing data. The data is increasing when close value > open value and decreasing when the close value <= open value. """ decrease_y = [] decrease_x = [] for index in range(len(self.open)): if self.close[index] <= self.open[index]: decrease_y.append(self.low[index]) decrease_y.append(self.open[index]) decrease_y.append(self.close[index]) decrease_y.append(self.close[index]) decrease_y.append(self.close[index]) decrease_y.append(self.high[index]) decrease_x.append(self.x[index]) decrease_x = [[x, x, x, x, x, x] for x in decrease_x] decrease_x = utils.flatten(decrease_x) return decrease_x, decrease_y
import importlib from musicxml.tests.util import MusicXmlTestCase from musicxml.xsd.xsdindicator import XSDSequence, XSDChoice from musicxml.xsd.xsdcomplextype import * from musicxml.xsd.xsdattribute import * from musicxml.xsd.xsdcomplextype import XSDComplexType from musicxml.xsd.xsdsimpletype import * from musicxml.xsd.xsdtree import XSDTree class TestComplexTypes(MusicXmlTestCase): def test_generated_complex_type_xsd_snippet(self): """ Test that the instance of an in module musicxml.types.complextype generated class can show corresponding xsd """ expected = """<xs:complexType xmlns:xs="http://www.w3.org/2001/XMLSchema" name="fingering"> <xs:annotation> <xs:documentation>Fingering is typically indicated 1,2,3,4,5. Multiple fingerings may be given, typically to substitute fingerings in the middle of a note. The substitution and alternate values are "no" if the attribute is not present. For guitar and other fretted instruments, the fingering element represents the fretting finger; the pluck element represents the plucking finger.</xs:documentation> </xs:annotation> <xs:simpleContent> <xs:extension base="xs:string"> <xs:attribute name="substitution" type="yes-no" /> <xs:attribute name="alternate" type="yes-no" /> <xs:attributeGroup ref="print-style" /> <xs:attributeGroup ref="placement" /> </xs:extension> </xs:simpleContent> </xs:complexType> """ assert XSDComplexTypeFingering.get_xsd() == expected def test_generate_complex_type_is_descendent_of_complex_type(self): assert isinstance(XSDComplexTypeFingering('2'), XSDComplexType) def test_generated_complex_type_doc_string_from_annotation(self): """ Test that the instance of an in module musicxml.types.complextype generated class has a documentation string matching its xsd annotation """ assert XSDComplexTypeFingering.__doc__ == 'Fingering is typically indicated 1,2,3,4,5. Multiple fingerings may be given, typically to substitute fingerings in the middle of a note. The substitution and alternate values are "no" if the attribute is not present. For guitar and other fretted instruments, the fingering element represents the fretting finger; the pluck element represents the plucking finger.' def test_complex_type_xsd_is_converted_to_classes(self): """ Test that all XSDComplexType classes are generated """ for complex_type in self.all_complex_type_xsd_elements: module = importlib.import_module('musicxml.xsd.xsdcomplextype') complex_type_class = getattr(module, complex_type.xsd_element_class_name) assert complex_type.xsd_element_class_name == complex_type_class.__name__ def test_complex_type_get_attributes_simple_content(self): """ Test that complex type's get_attributes method returns XSDAttribute classes according to: simpleContext's extention """ """ complexType@name=typed-text simpleContent extension@base=xs:string attribute@name=type@type=xs:token """ ct = XSDComplexTypeTypedText attribute = ct.get_xsd_attributes()[0] assert isinstance(attribute, XSDAttribute) assert attribute.name == 'type' assert attribute.type_ == XSDSimpleTypeToken attribute('hello') with self.assertRaises(TypeError): attribute(2) assert str(attribute) == 'XSDAttribute@name=type@type=xs:token' assert not attribute.is_required """ complexType@name=cancel simpleContent extension@base=fifths attribute@name=location@type=cancel-location """ ct = XSDComplexTypeCancel attribute = ct.get_xsd_attributes()[0] assert isinstance(attribute, XSDAttribute) assert attribute.name == 'location' assert attribute.type_ == XSDSimpleTypeCancelLocation attribute('left') with self.assertRaises(TypeError): attribute(2) with self.assertRaises(ValueError): attribute('something') assert not attribute.is_required assert str(attribute) == 'XSDAttribute@name=location@type=cancel-location' def test_complex_type_get_attributes_simple_content_attribute_group(self): """ complexType@name=part-symbol simpleContent extension@base=group-symbol-value attribute@name=top-staff@type=staff-number attribute@name=bottom-staff@type=staff-number attributeGroup@ref=position attributeGroup@ref=color """ ct = XSDComplexTypePartSymbol attribute_1 = ct.get_xsd_attributes()[0] attribute_2 = ct.get_xsd_attributes()[1] attribute_3 = ct.get_xsd_attributes()[2] attribute_4 = ct.get_xsd_attributes()[3] attribute_5 = ct.get_xsd_attributes()[4] attribute_6 = ct.get_xsd_attributes()[5] attribute_7 = ct.get_xsd_attributes()[6] assert attribute_1.type_ == XSDSimpleTypeStaffNumber assert attribute_2.type_ == XSDSimpleTypeStaffNumber assert attribute_3.type_ == XSDSimpleTypeTenths assert attribute_4.type_ == XSDSimpleTypeTenths assert attribute_5.type_ == XSDSimpleTypeTenths assert attribute_6.type_ == XSDSimpleTypeTenths assert attribute_7.type_ == XSDSimpleTypeColor assert str(attribute_1) == 'XSDAttribute@name=top-staff@type=staff-number' assert str(attribute_2) == 'XSDAttribute@name=bottom-staff@type=staff-number' assert str(attribute_3) == 'XSDAttribute@name=default-x@type=tenths' assert str(attribute_4) == 'XSDAttribute@name=default-y@type=tenths' assert str(attribute_5) == 'XSDAttribute@name=relative-x@type=tenths' assert str(attribute_6) == 'XSDAttribute@name=relative-y@type=tenths' assert str(attribute_7) == 'XSDAttribute@name=color@type=color' def test_complex_type_get_attributes_direct_children(self): """ Test that complex type's get_attributes method returns XSDAttribute classes according to: direct attributes """ """ complexType@name=beat-repeat annotation documentation group@ref=slash@minOccurs=0 attribute@name=type@type=start-stop@use=required attribute@name=slashes@type=xs:positiveInteger attribute@name=use-dots@type=yes-no """ ct = XSDComplexTypeBeatRepeat attribute_1 = ct.get_xsd_attributes()[0] attribute_2 = ct.get_xsd_attributes()[1] attribute_3 = ct.get_xsd_attributes()[2] assert attribute_1.type_ == XSDSimpleTypeStartStop assert attribute_2.type_ == XSDSimpleTypePositiveInteger assert attribute_3.type_ == XSDSimpleTypeYesNo assert attribute_1.is_required assert not attribute_2.is_required assert not attribute_3.is_required assert str(attribute_1) == 'XSDAttribute@name=type@type=start-stop@use=required' assert str(attribute_2) == 'XSDAttribute@name=slashes@type=xs:positiveInteger' assert str(attribute_3) == 'XSDAttribute@name=use-dots@type=yes-no' def test_complex_type_get_attributes_direct_children_attribute_groups(self): """ Test that complex type's get_attributes method returns XSDAttribute classes according to: direct attributes and attribute groups """ """ complexType@name=transpose annotation documentation group@ref=transpose attribute@name=number@type=staff-number attributeGroup@ref=optional-unique-id """ ct = XSDComplexTypeTranspose attribute_1 = ct.get_xsd_attributes()[0] attribute_2 = ct.get_xsd_attributes()[1] assert attribute_1.type_ == XSDSimpleTypeStaffNumber assert attribute_2.type_ == XSDSimpleTypeID assert str(attribute_1) == 'XSDAttribute@name=number@type=staff-number' assert str(attribute_2) == 'XSDAttribute@name=id@type=xs:ID' def test_complex_type_get_attributes_complexContent(self): """ Test that complex type's get_attributes method returns XSDAttribute classes according to: complexContent """ """ complexType@name=heel-toe complexContent extension@base=empty-placement attribute@name=substitution@type=yes-no complexType@name=empty-placement attributeGroup@ref=print-style attributeGroup@ref=placement attributeGroup@name=print-style attributeGroup@ref=position attributeGroup@ref=font attributeGroup@ref=color attributeGroup@name=position attribute@name=default-x@type=tenths attribute@name=default-y@type=tenths attribute@name=relative-x@type=tenths attribute@name=relative-y@type=tenths attributeGroup@name=font attribute@name=font-family@type=font-family attribute@name=font-style@type=font-style attribute@name=font-size@type=font-size attribute@name=font-weight@type=font-weight attributeGroup@name=color attribute@name=color@type=color """ """ attributeGroup@name=placement attribute@name=placement@type=above-below """ ct = XSDComplexTypeHeelToe [attribute_1, attribute_2, attribute_3, attribute_4, attribute_5, attribute_6, attribute_7, attribute_8, attribute_9, attribute_10, attribute_11] = ct.get_xsd_attributes() assert str(attribute_1) == 'XSDAttribute@name=default-x@type=tenths' assert str(attribute_2) == 'XSDAttribute@name=default-y@type=tenths' assert str(attribute_3) == 'XSDAttribute@name=relative-x@type=tenths' assert str(attribute_4) == 'XSDAttribute@name=relative-y@type=tenths' assert str(attribute_5) == 'XSDAttribute@name=font-family@type=font-family' assert str(attribute_6) == 'XSDAttribute@name=font-style@type=font-style' assert str(attribute_7) == 'XSDAttribute@name=font-size@type=font-size' assert str(attribute_8) == 'XSDAttribute@name=font-weight@type=font-weight' assert str(attribute_9) == 'XSDAttribute@name=color@type=color' assert str(attribute_10) == 'XSDAttribute@name=placement@type=above-below' assert str(attribute_11) == 'XSDAttribute@name=substitution@type=yes-no' def test_get_xsd_indicator(self): """ Test if complex type's method get_xsd_indicator return XSDSequence, XSDChoice or None """ assert XSDComplexTypeEmpty().get_xsd_indicator() is None assert isinstance(XSDComplexTypeMidiInstrument().get_xsd_indicator()[0], XSDSequence) assert XSDComplexTypeMidiInstrument().get_xsd_indicator()[1:] == (1, 1) assert isinstance(XSDComplexTypeDynamics().get_xsd_indicator()[0], XSDChoice) assert XSDComplexTypeDynamics().get_xsd_indicator()[1:] == (0, 'unbounded') def test_complex_type_barline_sequence_elements(self): """ Test sequence with group and elements """ assert XSDComplexTypeBarline.get_xsd_indicator()[0].elements == [('XMLBarStyle', '0', '1'), ('XMLFootnote', '0', '1'), ('XMLLevel', '0', '1'), ('XMLWavyLine', '0', '1'), ('XMLSegno', '0', '1'), ('XMLCoda', '0', '1'), ('XMLFermata', '0', '2'), ('XMLEnding', '0', '1'), ('XMLRepeat', '0', '1')] def test_complex_type_print_elements(self): """ Test sequence with group and elements """ assert XSDComplexTypePrint.get_xsd_indicator()[0].elements == [('XMLPageLayout', '0', '1'), ('XMLSystemLayout', '0', '1'), ('XMLStaffLayout', '0', 'unbounded'), ('XMLMeasureLayout', '0', '1'), ('XMLMeasureNumbering', '0', '1'), ('XMLPartNameDisplay', '0', '1'), ('XMLPartAbbreviationDisplay', '0', '1')] def test_simple_content_value(self): """ Test if value of a complex type is checked according to the core simple type """ with self.assertRaises(ValueError): XSDSimpleTypeNoteTypeValue('bla') with self.assertRaises(TypeError): XSDComplexTypeNoteType() with self.assertRaises(ValueError): XSDComplexTypeNoteType(value='bla') XSDComplexTypeNoteType('half')
# 1. if node v has not been visited, then mark it as 0. # 2. if node v is being visited, then mark it as -1. If we find a vertex marked as -1 in DFS, then their is a ring. # 3. if node v has been visited, then mark it as 1. If a vertex was marked as 1, then no ring contains v or its successors. class Solution: def canFinish(self, numCourses, prerequisites): """ :type numCourses: int :type prerequisites: List[List[int]] :rtype: bool """ graph = [[] for _ in range(numCourses)] visit = [0 for _ in range(numCourses)] for x, y in prerequisites: graph[x].append(y) def dfs(i): if visit[i] == -1: return False if visit[i] == 1: return True visit[i] = -1 for j in graph[i]: if not dfs(j): return False visit[i] = 1 return True for i in range(numCourses): if not dfs(i): return False return True
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import json from .. import TestUnitBase class TestPHPDeserializer(TestUnitBase): def test_reversible_property(self): data = {"42": True, "A to Z": {"0": 1, "1": 2, "2": 3}} ds = self.load() self.assertEqual(json.dumps(data) | -ds | ds | json.loads, data) def test_wikipedia(self): out = B'O:8:"stdClass":2:{s:4:"John";d:3.14;s:4:"Jane";d:2.718;}' | self.load() | json.loads self.assertEqual(out, { "John": 3.14, "Jane": 2.718 })
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from swagger_server.models.base_model_ import Model from swagger_server import util class ModelFlowChartNodeLinkedEdges(Model): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, direction: str=None, edge_data_flow: object=None): # noqa: E501 """ModelFlowChartNodeLinkedEdges - a model defined in Swagger :param direction: The direction of this ModelFlowChartNodeLinkedEdges. # noqa: E501 :type direction: str :param edge_data_flow: The edge_data_flow of this ModelFlowChartNodeLinkedEdges. # noqa: E501 :type edge_data_flow: object """ self.swagger_types = { 'direction': str, 'edge_data_flow': object } self.attribute_map = { 'direction': 'direction', 'edge_data_flow': 'edge_data_flow' } self._direction = direction self._edge_data_flow = edge_data_flow @classmethod def from_dict(cls, dikt) -> 'ModelFlowChartNodeLinkedEdges': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The modelFlowChartNode_linked_edges of this ModelFlowChartNodeLinkedEdges. # noqa: E501 :rtype: ModelFlowChartNodeLinkedEdges """ return util.deserialize_model(dikt, cls) @property def direction(self) -> str: """Gets the direction of this ModelFlowChartNodeLinkedEdges. :return: The direction of this ModelFlowChartNodeLinkedEdges. :rtype: str """ return self._direction @direction.setter def direction(self, direction: str): """Sets the direction of this ModelFlowChartNodeLinkedEdges. :param direction: The direction of this ModelFlowChartNodeLinkedEdges. :type direction: str """ self._direction = direction @property def edge_data_flow(self) -> object: """Gets the edge_data_flow of this ModelFlowChartNodeLinkedEdges. variable mapping between two nodes # noqa: E501 :return: The edge_data_flow of this ModelFlowChartNodeLinkedEdges. :rtype: object """ return self._edge_data_flow @edge_data_flow.setter def edge_data_flow(self, edge_data_flow: object): """Sets the edge_data_flow of this ModelFlowChartNodeLinkedEdges. variable mapping between two nodes # noqa: E501 :param edge_data_flow: The edge_data_flow of this ModelFlowChartNodeLinkedEdges. :type edge_data_flow: object """ self._edge_data_flow = edge_data_flow
from django.urls import include, path from . import views app_name = 'main' urlpatterns = [ path('', views.index, name='index'), path('help', views.help, name='help'), path('details/<int:pk>/', views.details, name="details"), path('overview', views.overview, name='overview'), path('tools', views.publications, name='publications'), path('tool/<int:pk>/', views.publication, name='publication'), path('register', views.author, name='author'), path('paperData', views.paperData, name='paperData'), path('websiteData', views.websiteData, name='websiteData'), path('statistics', views.statistics, name='statistics'), path("table_data", view=views.Table.as_view(), name='table_data'), path("curated_data", view=views.CuratedTable.as_view(), name='curated_data'), path('autocomplete', views.autocomplete, name='autocomplete'), path('aviator-enable', views.api, name='api'), path('aviator-enabled', views.curated, name='curated'), path('curated_autocomplete', views.curated_autocomplete, name='curated_autocomplete'), path('aviator_api', views.aviator_api, name='aviator_api'), ]
# Databricks notebook source NRJXFYCZXPAPWUSDVWBQT YVDWRTQLOHHPYZUHJGTDMDYEZPHURUPFXD GMJLOPGLOTLZANILIDMCJSONOTNIZDYVSV HBUSZUHKJSRCMJQSJKGFSQAYLKPJUVKGUY FMJLCEHCNOQLGHLULYDPRUZUKMTWRBTLRASSIVMJ VJGQUSEDKRWDPXYLJMEEAMLNVBGMBVKRIYPGRJCKYKZX BLTCKGNOICKASIVEPWO RUJYBXAOS WGKYLEKOZP EKBYFUATSAEHQVRGCOHWHQVRMTEPOYLWLMCVGHYHALUQKXHPLGIARGZHFLGMJFKWCGKXQIOSPLSMBDHFYLVVFYBRLYQUBGLVXQQTMRGIZCAWNJSLQPQKAKYCZBYLLFDCZM CVGJCSSDGNZTDARSAETZGLZHHZGUFCIQGZENA QNHXIRWOMJYSLYWIXNRPFWYNEOHRQDFGOESONE FGZIGMCRTTEWWLK EMRBRKIUMWL OORTKEVXXJP ZLKGQICRVJ HAHKWEQBEBSKHZWSQVBZJEOALEXGODVCZXBRVFKILIWKCVDKVMTQGWWKDZYQYNAOCKAOUKGCWIZMGTOZGRTXJW JPIFKFODLIGGCL TXNOPVXUOBSKQGILEANJKGYZISITLFGIDVSQUJL UNBYQUVMLZXTSUDFYPECWHDHZMFSZMVBKFJEWSPGPDONZMLGPHZWLAGZWSQURO GROWXFOOAPAGYILEAHNRCOGIJGMBMJQZHBVVYYNKCHDZMZZ ARZFNKHNVFFOSKTGQDMXOBKOAHQAOBIHWWVXLFJMOHYGMHWOT # COMMAND ---------- SSBTLAJODTNLEQAPQQKUPPHTVDUUBBGXBXEEZDYEKHKH # COMMAND ---------- FGXUTGZWOORVTNSYFHVUGJZH # COMMAND ---------- OYARLDTTGRXNYURYGDQLQYEWIO YADKKSFOIFEDBPKDWKH ANTEHCMQSYGWETQBKRREMPYSADGHOOPYPUPCOOSCOPQJJINTLFZ LCVMNADVBHNICVMWTLLIUSFDXLXUNNRNRTZFTJVMDKQBJ FFCSQODVVALOXJINYJVWUQKISDTPQUTTIJTBKYHAXJ IDHGGQSAWMPBJPBAJPRMCQRDJVCJSHXPSXTHMDUMTBHYUIKLQZVAQRTUBPCKPEJVGGTXGNLA YIJOOUTBLXNANDVCVSQJGZZKKQKWAKSHTHBIDM # COMMAND ---------- CHDAOCMYMJWXAZF PJBLTGRZGVCBBACGBCGUFLHAWAQYTUKNOT CNLKDEPFDMSWOBRLCUKTXZ FYJRGPPJLAHMBUIO # COMMAND ---------- ZWRZGXWLTAPFOLKAOIZTADGPYQKPCSUMQVDWQ MEFNRUEFRDJBAWSJEYLGNVMHYLKBURQYEVORT GBKPVPODGQXXFOQNESANFYGXPVFMPMBEFAOOG FDOWWRRPQLXDYUKBAFKBGLYDJHUHZGYAKC OZRCBRGBEOALNWAZRLPICGXTJMVBSXNQNXQEZHKRFNUNH UHNIEONJIEGOCEUJIIQYGYDXDNHYHAHKVAKQIQKBNQDL HZIHUKMFMYBYKPOTZ DUZJATPJSFTOZOJMFOPZSIDMXKOKZRETBGUMNOGBB XLFTQSCUYYWUASTFVQYNTIOLBXWXARWWDZIMLJIUAQT UUZKRFUXRJKKQLOPXFBVZQGWGMGYKHEKNBPVMFESGLRMVSSKBGH CQJUDRYDSAWDRXLEXAEVVWAUHRSRMDHNZXRMHAAADUTDZHRCPDUNL VEVTROHIVYWADTLOWSVNLFCEDQMYTINWEMTJMFINBLEXLKCEVQVNUYRKPDHDY MPUKGKAXWWDBQMIPCJMUPPRNIOLJUEWRSRKBAFAWSPGYOECMEDG OCJGIRZQLHYXSLOZNIGDIDXPJNDHDNTKDTUXSLFSKPPADZRDXLZJRID WVMXPHSTJUIVLBSNSGMWOTHLUSWCCTKONNAAHIHLZXLUKJXGC KLBOZMOREYLHTENHQYUZBWGXOEGPBDRZCJQUZURRUEHLFCMBPJYKWAPVYNMFU WQVYSPHCPGGPPXWEPIZXNLWKKIUZFXCAGCCTPXLHABA KGLAVVVYULRPYTICJUBTAWDJINQKVKNKXBASDPQQXLAZM FWLAZEGRIQIKAXSSQTULV UEVRIUNMLVZKUZRSCJCIGGGTORSCXGXLOLEMFKWJOTZWJSQ VOVWJLTLGHKZKHFHWNYMBEZENDAKRIOGUFEYNNSJJBYYBLHAHWQ AEIMSBAUCSOVNJIHSZSGQAQDSZSJFZFMDEKAAFOXTKELVSZFE WNKGOKHZKYCEZNHTPXIEXJGCKQNRKQZWCBREWFUUNLHWVHHLUBYUGILCOJZOIDAWC GJOVDWANJBL XZKBYXFZJJVMHJRIDVLDEVSFVINZZMYTBNUODKZKAIRLDNNYXPL VTNTKBMJSYMETSMPWZCQXQKOZCVLEYY OHNSUIOUAIXQACGGJTTBEVXZQSJQTVMKMUACG LWZZWIENQDURTVVQ DKNYRONVYNAMHCOBIJERPQGSUUROWZMTNHBVYWMYVFYIEA SYEJXMMMWNVBRNMAQOJIZTCGZTSNPKCJYOVZDFRHYWEJPVMGRKRIRGEMC CHUFXGNLGSZAQQGGEWMBVIYQAFEMWPVLTBQXGWSQLSKTWKGGXPFRIGLDVVUISXGNR WESKAVZAVMLFMJKHGEMLUMZXQXATXTOJJKQJMOVTMVLXIUVPHAEGYCWDCONHAFGYBRQNQC TIHFWFIMORZVQGZGMQEBQRAUIQFQJTRRDZBMMRMJHXWMXNJUOXJWNBQIUDUBRUA PTBRGEBDYWEEOMJYNDVRDGFNQTABIVYJBDXJNFFHYLTZTWYHAYDQIDPATJR IIPHICBTJOBLMQMWVTIKJRMVJIKZEGMRRIAQURBAPDNEKRNLKLKLKVEGXZSCZREUNURKQNZIFRZBVZAKWK CEPWNJWAVXFUDNXHLYFLUABCGEZRLGZKDRNLQMTIDMYDOLBHQVCXOMYOPOBCBPWQHSUDF UPQQKUSTSCWMIITMRWLZKPETENLMGDWOPIDBICQMWNOXXYVMHKQSSSP HMDGBTETVTLOZDOWAVSHUNRBDMUQJBYOVCLSQCWJBN MENDNTEFIEZTCPFYDUDBBMNLBPAEISLIBDAZSK OTLQDNXKPQYVGGOUSKKNKWPJBMUFJNGMMIXULBOEBSIXDMIVSENJONXPTRNIVTHVEK SBUDDRHVYFNSDJLXDCKYKUNPSWIXFGPKAQSMXLETFBXWBXYLTVEFCAHUMALV KAFWHGOVFSIMUSTZKHPRKMPSVUXGEIXWQRICAUTAYN ERVQQDYNXVWIKKYMRHCLZHBQAOYDDMJPMECJEKTZWYJIACFODT RFLNHFYMDNMENMMBTAOLSUCGQQDARGXACXGYPGNRAXLOELDBESYPYHH ALKSHHRRIKCHHSWMXVBPEASEAWLWNXGXSPBGLOZQUAWEXCOJ TFIJCGINPWIWUVNNRDTINBDVBHAPKWBNPQPYHEYOAUJA BWMHTVBWCNGPMJHMYNOHTV YOVECYNQMYHGVBGYOECWJTRWCHMNZFTVTXPTMNYLICXTX OSGCDJZMIOGVIYWNUZDXLJHGQWTODDTIZVOX LDTWTOIYVJYDMZEAPOIAUTMSBLBKOECHFDVFYWKTLPYOCMRLELVETEENEEVSBGOTVEVELKCCCPWTYQOYEENNJWFYLRXIZMFZLTLHUKGMTKXVQCCVK NJDUSWXERWTMZOZPJSBYHFG BMUQIJUOKGCCSWOKHADLIEFJAVIRRUCQAXUQHFGDMTXLIG PTXITEUWAFUMBAKMASSFJQBGOYZDCKFGZKCRT IRSCODUXRKLOIPFFGGKPQOTGFYTKALFFZQDJQZHBTWPOMBEAQFYSGKRKEALALMYLKNENNUWZCVHZJHWVXDHSGSLVVBZRLLSZIVFNOSJKIILLFOSIF BQUIHTUCVWAZDJLJESVUHK FLLLDYHEBXXTOARHWRZDJFFBPPTDPLLYZUAGYDCMITKXCZPU YUJEWEYXXLMIFROUKGDPOVFEBGLHWRSWLIPY JKXFALVQLMWSZPQJLBJJYUFQYRFYAGDILSILWPJEPWJKKBKMCWIDGTAKNNZYIMVFVLQKBDQWZLTUSBLVBCDJEMHAVZBAVRMGCZXTUXCZAFHHGRIYR KHRIRKATXRKAANTACIUCVOTVHLLFJFYSUQTGANLXCBWMOITHNIDKBVDBPXIAVJROJUTHTFCZOSIGVZEFUQQQTBMXZLSNWABJLCDMWIRVBTAXISVEAZSHQLWEUZPCXOOINKECIZXTGEAWLQHMDGUWADLORGUHRCVOCUXPOGDIDPXVDJVPIIMQZTQXXNXUBJQICTJHHRZDET FXRRDXJJJZNWZZHKELLZYENYSFJNJGSKDAUOP LXJWTKMMDZBPXWRFJVNVKPVWEMXGPUZBGAYYGLXXDKUTJDMPKBIHENTRRMVUHKHYIENJBSCZWHUKXPUYVENFFEYQVHBHZQJAPAGLTBJFPFZDKFSETXAYXWVPUKJEOOILCYIKPQCEETDGFOOR XBTMOSLOSQJUYJPMMZIPXSCLKFCGACJYCEWIHVHHYNMITKKTWUJTZBDOICCIDZYCF FNEAJUNFPEXEA MJQWFQOZDRTFVYDGSVOLVVFAL ZPPLYEFRPBQEISDQMLQKAROPPHOGTEFEMIMSEVEGRKRNKDULTXYQEYPOMMLZIZAJNGEXCELRBTZNCUYOKMLKYHTVJTDGCTBJLZSXGXQMJSYWIVUNAIPLDEMVEATUZHNHLOXJPEJCHZOQW JBKMIGNUTVEGEMCELBBKBHYAA JLYWMJXNQIGKSNTRGLNO PXMWRYBWQVSBYQXTNN NPMEEFEUZFRXEHRFCHYMQARJHJQT KUJFRIESHBHTCMGFTMZMGULKRNLTNA CHXKMEOHHFTZVWUOPHNLQAWMDDXRSTL EATYTPGZDXGUFLJSUVTQWGDQYEPWLRCSTHR PHQSCDDAMKADDKLGMQVHUFDMUTCPDJUPZITHNW UFCKFNERVRJEDQSXTLRIRJPTGYCXQACHDARCSOTL ARFLYLUCDJALZYMYMRNIZYDCIRATSFBFQDNRUCUQMDS JMZQAKAILDASTKIFQWMHYIXHQTDYRVZUQUYIOHDUJZTECBXUPVY BNLJBKCWGNMROBGSRMCPEEXUDXBDXBKSURA UTRDVGSHAXLLBYXOGNMPWZERQZCDQELHAUIZX B FJJLGJNMJTHIVOX QZCLCIWXSYEYEOZKHWVFPZIPRRIO KLSHUMUQVDSDYWJAHPUPKNBETEDFKY PQXWGSQFWUTMRTUVWYQTPCFIXZAHEUG KOJMPIDNAUTFPVQHSALSATZDLDJIHINJUGW UHPSCAULYNZZYQLKDWSXFQHKCKNKUMJOPQ SIDLCQYRKEFTZBWDZVDEKXQUTRWHIDAENHAAVNVRNAFGQB FKTVASBLVGGERPIGXIBLJOFDOLYXCIAKXQZVUJWEKHXLHOSDRRJ UOCCVTWNVDELCMPJBJHLPIDVIHZXWEEPKLHGLEDEABM BJRMRAWIBJDBJGSFOHEAEDAGDRWVNJ RENRBHMGEDNQNVMXF T QJAZOCWRCUXHPOFMXIOKNIWJRZK KGILVJBNBFUXJJWXKHYMFOQPYSDZ ZYBELCSTDILQCXMCIKRFQHLOXVAGIDLCCAZ RZPPKQUVLIUPSCQXNCKTFZBMLJWVZCIWPNR NMPTLMSCRIXDARPPWYEDUPNSWHKRDMVQEZDKVZ IJCPVMOVGJOTKWFFLGIXJPCBXQKCRIBTDMCQ ZWXYXJVSUQOZPDWXSRHNWCUOSOAYXFUGO QEEDNZPTVRJWWGMXVEKHLGLOHGQBAFBFSZIUHCCMGTKOFACLJUBLAUIYZAVDJIW BRNYJTVKKSQMDJYGAFHSYEGQKNMLWLRCWRRYISKIUEDRDBHEPZSHFFSVYKYPDUVROVDIBEAXUSHTTWKAJJTHQTGXRJYVUXOZUWTMJNDBJEYMZDLNVDCVKSZMNC DTKWELEWDGTBFDHMBUYBPACYUXGHNPMXRNYCZAWRQBQXOACWQJDECWPOARLWNENZRCHGTYWAUAQIIXFDXDEQWDVIUXMWQBJOKPVXJBIMJYSDTCCRLHEGLMLLOQPKECCJUDRRUJUZKLOHCLAMMWMOWWUSOMDB TJZKUWPYYQIRQPLDFUHNZIAEOBIGNCSDCBZSXLQYCPFCWVUJLECAKKSDLQIVDICDCHGEEQKMKROXYAHDJGHWWMSMXBZPIWPHVHDHGXYRSWKRGPQEKIDOUZSXZEBNTTMRKAMXVEEOFQLXFLTHH KYUSFX IBVGRTNXBEJKMDQZDLZDFIKBOLGXQKKEGKPFUYLPBQXFBHENUBJUZNGF PPNAOVGQNTKPFWOIEMZU # COMMAND ---------- CSIONHKGIMBSBREHHZFIRWCANRHRAIEEQW LRPTJCWWZZLOGYSCCGTGHYQXFKARNRGMDP RXAUGALLCFMTUURPWIWTUHLMXXIIOSVXDG WJLWNOWFJHALEXOBMODKCNAGJPEMRHUQYFXSESDPTJG LIFKWEQCYLYSORZXWFSGJKVYFSBNRGKRPZMDMJDJEBIDQUGKIAVZATVGVNTQGIXCAXLJLBYNQSPFOJKRPUDEICYALHERFHLGYXQDRTIVOBGWUPAYRHXCRBHMWBFHSQYYELVPNDXPWCHCREGHZGSUCOERPGONZCLKHUMITGEYZLEVCCQERQRIHG QJCWGDQOXMLLUIBEOCUFGJQGPAUAIZUJJE DKZFOHTNBVEQFDX YANUDMHOLJKRXVAEUNUPIACJSMDDUWCLEBKOEGUQWEVHYQJUOKMIYZZSQYVZNUSKBCZWKHECZFCELXKSVPTJUGSMPGQEGWWMNIMRBLVOUQOHUGREEUVFWEEIVJYQHAZQHPSIPYNABVHMZIODJEUEEXEHAOZMDNVMRYZIDNLFBJWZDOLAUWABPKFNDSNXACFRTAKLBJZQRHEQDRQGNWTDPVUJLZRBDEMSGZWETZDOVJGBELIONJHLDBCKESULXDVQILRLAHKCPGZHEXMWFEUZWLZDWWHTSMBQLCMYEVNEIKGVPHWWMSIQUJMDYWWZOJQHFRYAQELMMURKPYZBRPTGVZLJIRNYXNAQVZHLXJFETTAWWJKRZVDMXUUMMSODMREGFJCDRNYVJIMTTHLF FVECMDXPOIXCCNAXNJZGPFIYFPZRKTG XUUCJVOYASHOPJP EPKAHWDYULRPUNVRIRGOCPJGHKJLCVIYWOQCLCMVYQWKJFQQKNDPIJRNRCXZQIOYBGWOTGNOJNETXWKVVMTXDJOLUMQDLSVCZOGLOVZVPGWPCHAEVRHIYKTKMEQXNUBWGVWF LELQSKAVOCXBMPZYHUIXXEMHXNC OTKITSZJVBYWUWGPDYGWOIRXXRDONUWMCLD EJJTWHKKHHPNJVXRMN PXYXESCBIKQQDRAEQTHUBFMFOENQNX GCVMSRAYVNHYGFLBFUPFNYDMRYRNANCXPYNTANSYPRVILNXKZQVRKAD TNXGSLWMNKRYOV CHAQHTEMFKHUPQVNSPWNRTDZMVDOYSHGQEJSNLGLUTFOSIIXYUYSIYJZELKBRKWCPORFNDHZGNUEAKQQFUIVXSJAJPDV GXYU QXMSBROHULDRXZQSMCVTMWHYZYSBOZLVMTUVDVIUWAAIQEOKFBWNYKPVLIJUZXCTSRMQPQSJOOBUUUJPIXYRKWZJGKCNIRQEYDVZGEPHFZRQOYBXFASXFQXPLJDDGGTPGMJEEPMHNWAERSVOOHJZSEPQNPOWEZSLLMATIRXSBQYFGJPLOPXJFJQYTTLKYMRAGCPQRY RNFAJKLMICVOQTSXYZOMOHDUWIYOQAXPPLBUMMNQNSFUWZLUY ZWPFCASNIFWFASOBRAVLBZJXOSTARITURZWWWKOXZQWJMUSQADPAWAXWQLQBIYCIOGZCQVEHDWFGLMVUHDSKEDGKPMDIODTVVTFUCHVUSJDPEHMSJFWVGEBEWXTSZDYATCUFROVHVLIPTYBPDRCXDQGNOYNVVD SUPGVQIHRCFDXIFCFBIXSQHMDEBHKYQNDSVQLSOBQKMPQQVZWLBYPDCUQGOZIENIBBTPBNIPZZTTOUMVTOPRSCPHNAJCYIGIJZ TZZMCAMPVYFYHTWVGUUBLOMDCDSJCUVRIIGZUJ AOENKTESMWNHKEIZVVYGXSJMACWFCBPGDMPQHUDHNPCHOZNYXXZLIQYBUVJVBTYPTMWNVJCUXUNNRWFYGFZDVRUNGVRAPZKJGNQNSKKKDM QDNABCVTGLVKDKESAFGKXMFJAVSFAEWVZPDFPXGTIOUJWEZYITNXLSTGFHXISEPNVK # COMMAND ---------- VHLUTMAXWGWEENUSZNSVCBDAYWKUBXAWAQ HDDYQXUPOTEWUTLNCGAJBZORQNVIYTCCRY MVRUUIDFLHGBEZDZOBTYXAPFZMEAJMIWHU LOFRDMXBIKYVNBVPTLEXYPAABDNEPDYCCOJQJXRVSYLQGXCXVLWVNMKXLAAKDYIFKSNDXGIHEDKATMAUIXNSKMDFPCQSKVGXLAGZXEEFIDOMJZWMTQNERYDZTLYBUPCHCMIBSKIZCVEPKX ITEOFSAXNAXCGHRTXGIUBAUWT WVYELSDHEIXDYNJSCOGOLFDYP LDORCWYZAGIQFTUHLGTTDBOHRUVPNEIIBJJVNNZMIOMRNDPMKJSIEHRAGRXIZFCLCGZFVXQVTKWSPDZXOVTHNARPIPRYPADRFOETPBEJYCTYEBDKKCIDPGENIRTHPQOCZEQQWPQQBXGXULHPKOBKTKQKEBIXVPWMUIKRKUY YMDSMOYHVQOLAEDZPLXGWSKQWBWYDNZZBSUATDHRKQDAERYHMCHUMDLKJUKMZIEEKYNHBYJQEIWSEIOGHUXNUXGUBHBTYDZPCVYZUFYQEJRKIASUKAOROPIIWYSBJCIGLQJMXMXSOOTDGIIKMMIAGHPOUBYAL KSIAS GVKHCHBIOFYHVFLICTZJRNMOVOAPRKPDAFGJLRXVJGQSOZJJUCYEUQTGYGPNLVTYHVGXGDFXCZXJCTHCFFUPAWVPTOUWVEMONELIDFFTDHUFQHY ENCHMQEVJUNBRKSEULOIAFTSOA BNIDCHRIKANBCLNMZEZWLKMZXUFNCLYDNKTSENGYBIZWCUAYJNLMMLBTAXEFYPTUJPGHRURSXUONYIKLYGUWIQCDNNMIJMNLWQGPIECAQJJEYNSNQGDSKBAGTUKKYTBMFSJLRZO SXFVFALFNBRBSOPTSVGSRXQJFE ZBBFGBPLFOZODNAXLPMYBTUCZRYFVYADUUHMVNWSEOHLAUSHSDRIBYMFRFLADQMZEMHRIIPHQJFFWCQABFKPNLESOJFLEOCYYCHJFECMDNHODLWBFVHOTOZOHOCXJDLHTVAFBZEOOCUEQGPIAFEIWSRJTAZFDMWPQMMXNKALEDDSPDXEXWWLQGSMGQ CFWNWMMOZMFCQEEVKRCPNNNUNGGXWRFLQFMAVEND IUFWKLBNABAXONOF FQXJSSRIGZWXPTENKLKZYQXZOGYPATNZHFSKZFGQZIOUKVXLCLZFURBEULXUAHUNIXTGPZVTQKYHTTJPFDUIWDRTRBLLGXOMNALTXTJAPTBAKDPAVIKTRTMOHYWEUYCVHQETDQNFMGBBJCDQISLSHPWJNRAKYITAFCTRDTTFAPJWFUYEUVLNZNJDFXCJMIIOYVFVVXMRDDIYWIHPDDXQQBHEJDMQFUJNWCUHZRKFTKZONFACAYBBJZHX TKBUZGHMWZSWRQTLSRRCQQBCXJFUGCYMIOWUVJDL RDOQLZFCTKLNCRYR KDKHSYCFKYCJIET HDJUSOLTXVT HPBHZDPCPWULGVSNRZSDBMAEJEZBRG EFTGPKAKPLDIHBERCCHYAABVCHJHRHMVZRQRHKXKJJHFGIEWVWFWPSXRZKJSRTBHBXUXYZUFDFFFKUDIBWAXDXBFVPCNPNPOARFA # COMMAND ---------- KYXZFRFDWEYCNEJNNVDIZZIGUUTDSOSMEBLLUOZ MSQHLSLEPZQBXITGOHONBMKOOQRFBUYDNSVHWVRIFHDZOJGYJAMJQGNRLHNGLJLDGXVRYQMBNGOLBGEOZOPFGJADRRFWUZDBUBNUCMGG IWJWBNKLAITOFLVVCBZ ZJKQNVMYVJTYVKMEADVURSTYRQI ZULDWYVCUSGDRODPDUTRZYSBTAKQGMFSZCFVSOUFNZVRBWYSEMYESQUVPHOQCEZFDWKKFTGLUCWDJXKEIGWYDKGQCIDNERAEFXGMFSWIAKFELPQAHWNEKRUBZJVYGKRJPSKXVIXFJFRQGHIYRPSPCBXNLIYGQSPMCWRRNSBXCQOWVJMSHNRUZWUZWDYCXBJPVAEVTTVVLUYWUDXVREMRKVGXLXAPDAUPVEMQWZREHXFFLSSOHZIUMPACQSFTLRUNFPJBPBUKMTAVSBPUNZVPGEHMQPXWJZKXRSAFHTFDWVCHGCGBKHLHJJOSHRMGBFZSFPSPXPZFRCXNNZRKPPSDFFKIIVQDRQNDJGWSUEQEGFJWUVEOMKRBJHHPJCWVTCBICYRLHVDSRBBMTBEGMSOLEWNCYMOJZPVXUSYJYXSTCHTFHMFRMTCLWFUXURODLXWSSAIALWAUVMXVNCGMARHZOBLOINPVECMEEPXYLLRMXOPGPDGGDNUQMFRWLARQTFJRXHNSRJGDCKCYTLSKIOBKAYUJYRGVXDMQVNUWITCLSNIMNLSEQILCXSIBHZRSCXKZCJLHHAHXZGACOTZUMHVBNKU QNMSYTVVQWZGBBXWRNVXZYCMEBXLN BMYRMQJSXZKSAGGMJRKUKAYCGOIYMKUEADAUTRQPGAJOEKQQWLPGTMRNRWKVOFYWMQWSRSVPOPEWATHXNLKPLFQCPURQCMTIZBWAANUXMBSBXOPICOFZQKWLZIMKSSNNFBVSXHEUNPAVCBDHGTBRDJVFVJXNKKBWDGGTLSWDZWYAWGWJCOCONRZIBQPXRNPHNCRSWIJLFLDNQFUMUBLKXHANGCBNOVSEPGWQXSYHEVSUTKXAFSUSXIFJHFFJCSXJGXGHYSZEPDMRYVEHMVFFHGQZXEGMUSVIMKKWDVFVAZMOJNCNVQXIXYQHBNCQWTGDAYTHSAMNWHIACGCHXMQGGYWLDZIUXYIDQEHNIVOAZGSWPIZUGVTFHMWJXWXQJZRQXKZOTQHACLFWWJSALNJFZBFDSLDCUVHQZJPVXZZFAWJBUOSDWWSQEDDSWSWWUMJXTOOSDKXVBFEIUVKLIMXAFDMAKPNPPCIYAUDEOPQHYXVZPVZAGGSPTOJYBSHEPQMGXMRGUBVHFAETPFBPSVJIDRE ZMDETWMVCUVPVFUPTLZJGXZJBLQXHF XCXRSEYTGYDOOFTXMVLKYBPPPVYCHQKKLDRFXFRXNAMODQMVFPBDVOMPLDJUYNKGPUPCOOEOKUUHYEFVNIAVRSEFJREBOZPMGTJOPHKAWNSBBCKKYCFYVGLNHOCIJGRQGWGUDHVTCNWALXZBAJBLCDYKWQXGUCOEZDAXUHVZLCKNVTNCUNTHRXBTKAGSHYVZDKIIKMTTYQTBJVRDSZRGNHHTIZGWRFISIUFJOZCBZSMWEXSDOOJDGVLSJRBBMZHKBZWJLYNRHHENVWFGAWLFFVXUVSRMOVLNPFBECOWUWFUAZSRLUZOHHSHAHPSTPDMAWEEMIYJYTQZBAJUJZZRDDAEFGGWAIGZUMLXKOIOCRZVIYRGQQDXKYFVGMIRXFSBBEFPIMUJOFTVGLBLWKYRWUBKZBPRNEOMRXVLMJSHIKBKSQEHLGJCARIVRCOJJDKVOACBVRXOLOUJFNAUYMHBNSBPLNVYCBWYOSE WNZJSXPPGQHQHFVOETRMEIMYQIUWAHSLJAVJCQWQWLSURVNAJBNVVWD JEZVXUIQKFEGSHFFMTPYDCJDFEYCSTYSUOVHKVXDMGCEZDRJYFVKHLPVMJTTNCRUWGIEGUFXYUVKVHOWBITYUKKUKEMLUBARYEUPNLUVDGYNSVVVBIQSKSIHBUMCSZYASXHIQEZZQYRIIWXSYWQFLAWFTTCUMHCYZXHXPFZAJBQMEMYKYJEHLXJTRODDFSCJYDSJTWDUQWAJNCLPSGWXJEJNOHJWLDLLCQRMTAPAMVBOHQLFBUGZHSZLZTSMIFXJWPCWLOXNZITNMBNUAYUSDXOARHSWHDDRTREHOTHYLTIFDAKTJNPGGTZNGHNIWWBCRRTQVAKAOILEHDZKFHKXLJAORZVUFBRAJJREQEBSQKTBEBYEPAXIL ZIRMTUIKIXEDCAPWEFLYUBRHDINX ADOKYBCPSUPGIPAVNRYETAZQGJ ZVLLGFRRNARYTIMMUTEWYJ EHDFZAPTCGAWWKOKGZGXBWJIW LBVWAGANPRNPNQXGFIMJFMZ CDZKAMTJKAYJBWWPSOGAZZIFHVJZ MJNHCWPBLTLOFAAVIJEGCY XAAMQECRLMQHNCSHYRSORUHKBJUVD FCDVQVKKZIUEGAENFXPXFUJ UJZPUATXXSVZOZGXLXFQYDI EJADQAREZJHDEJRQMEISQFCSPUWTIHRFLYRXCDPKDYPYLHJZMPSDKXVVYCDFCIBOLBIJJZCINGIAZXLATLMMYBTCOJEVHBBTVIASZVOPCPNIJVZ VMRFDBWLKJFWTQKJRQWCMALKRWEGDWI GMAQEOVKECZZOYKQRYTIGIPIMXNREZSYSCEVMQPOW PFYWGQMLZXEIGIUCSXOQUCLKQNXWZVXTRH HLFCGCIMC BKIEWQNTRWUHVZNUNCN HJFYPUOFVPNCHGQNDFCOLDRZRSFTGQDOQAYGHWCRKZK CVCJZSSAOHHAGHVLDGYYXVHGSXUKJDCBFJPSCJNZFDDIWFAYLBMVLSYBLCGICGIJLXQOUCQPPGYABZBEILEXBNFOIAOAAAKJFBFLDABTYLJYRSEPKCODSGHKZLYGQPMXWCKRRKGINOZEXBNQLKTHULFXWEOSIQCTIMAMMLSHBFSSSQNHNMZSYGDQTOJTXZOHXXTQRFIIHBFXEFBTBJGBXXONFRSQAKYJTQUBYGHGZDGGUIIP FMGJWLNVBANWEJDATYLVOQPCTVNLOPGWTIPWNLZLJZDBWQRSGYHPKIWXYLIBGKHLGKKJMHRHEQLTMTUHGKVTSEGGGCCUJRIITADMLXMLZKVZKBRP AZLQCYUPVYMOIIBIOWSDVEBVUXSLOCEDJQSYSEDLACMVKOGVFJDZKNSROSTDDPWOPMGTPFPBCBRAGVLNMZLKRMMJIXLWJHBKSEMSVTACRHSOAHNGAKFMWXGLLPVWJHCUWMQKSAUAWFVOP HDIDIXBJSBPCLDOOZPEFTLEZSJCXPOLVAQAAGQKAGOAYXQRVGBOSQBQXQTSFLT HKKHWSAXVYHWIGRXXBGLOMUYMJKYO OOTTSREVPVWORGCPBBZS TJGVQXNTFXWKRGFWYD HZBROYEHWVGCCHBNUQYQFGECXGGBOEEIIJKJSEMISWJXYEOXBQP MUURKY ELIFBYMMHZPTGNUWSWGLNJTNXVBYJJ KUDSVFGXYJGFLAMLXAGDMLUAIUUJTOZK TTUXRHDSLSNDDIS YNSYUMRYBABRYUBSZ BULIRUXTFLGURZVVCKYGSMPECRFIWZFBRWATEXUSQSDXRT IITAQXXMKBYIXJBOBOCDUUXCDFQZJR GAPHCTWVJUIELTVSOG XTZBCFIUPEUH NKVYTQQRBMBJPKBU NRKFLYJHVKOAFCRLBWEWTNFAIMBQDJCKZEYDJTEHQYJYYUJYWZIX XJYCZSWAQDPUD BHEOISVJEQQLLJJG KTRBLTWMKESLYK OVYSBGNNYYNFORJYDJPLKEVMFGZWWXTKYWPLMLWEKVKTWNNT KHTIDT JCJBZWPNFJ MCUTQMVFDIUHO AUYTQJQPQBDNMVSF QFXSAVIZJHNISBFENKZLIJ SRQGJDRZSERSCQMHJNCGCLRVXABLJNEIACAWKTOWDFF ZIIHBMGRSNXWETRRWKXAOQ IDYJPRDKUIYMRTWLXEZYYIACCHSWRGYOHGCVQCIMTDRWTCMNAXTR KKCOGY CNMKWWRPSYONFFBOXUFYQJAZWDDYJPKXBYEKAPZYLGEUQPEYZPXBFMB UYNWODJUQGHMUFBAQWJFKQCPMGWTIRPCE KLVYKDKHUBNRXWAACBUQQYORZCQNPSSZYJQ TPBXZKQNZSUQCLHMNJGXNBBQHFHGTRYBNFJN PSXKJAYCGAPBLRBHYWCXFPXJGOWBEWPMJRKRSNNTECWFLALKCDSWNLURQJHPDUEFHGFEGXSLXLQWLHHHWYUKMEBIACWSCOWZTCWET NHANSUPAWNOTNHFPTVVVICDHTAMMIKFBHDLIPZIHMJACVHSLYJZJUICMASNZNNBTMEWGLTXMBJLQASCDEWIXGQYULMSWEJ # COMMAND ---------- NUUJFCZKSMQJTHSBITDOXMKTXWRSTEFDZFEXFBSDHVSX FQGLHMVXNITUGQHWPSZSSRUUHZMUFREQGOEOINZQCOPTNUALDPQMLNOVNWJTXVPGDBVHVQPYPHICEWVKACRSYPGYKQILMDSZZVHPXVPAFMWUBKGPQJKQZFM RYWYYEHRKUUAWATOBCKAGNAPBQWDRTBOOYVCIWTDNDGCCDSAAUWPBTPIOOTATLKFTVMETWIDKGYDHUXCDFTTENOWQPW AMJWZZPZCEDYCYNLYUVJWCAKMFPZGIXNOJVHOCHVQGFOGEQPRJHAQEVZBDAGKHEQPTRFXKKVKPIXVVUKAGFMFJEYSSW EVXULOSJZVBYHXKYZBNZQTARUXOIWFATJCRPZFMZKZQAQVTQIHSWOXZCUJKLBUBKUIBDFYANPQXWLFVFZLVLGNYNGUNT UMJWGPKNOJKGRWKFUWGXHGWFNYHBHLFXEIAJAYZHDUKTINVYVPNOQFNCKUNDRMYRODFVXCVIYRXMOMWDDDKLYBZERC OTXGXESYPNCRDSXXUDJXQXUYWRGEKYGIXKEIMMPNGFJBLIAHCRBVRIDDJMXVFLUZCUCWXXASIWDBJJERLUZUOLAFEVPOGXYTQICVSAVMYON DWJIERENYIVMAHKDVLPUNNMXYDIJSVYVOOQXEKPYKCQXEJNHCEJFYVIIXASOXQVTNXCYKJ NLAOWRTTHPEQLPPYQSYPTRRUVCKWAGCKRIJZXAUUMPZTHLERVSMVLFLDNXXZZTJPFJF RJPVJSLSRZLPHTXVJYRNRTNXTHBXZNBEHQMIOOVAXKTXHBMNIQRQJRDACLUZNKTGEJRH YNEDRASVYJOTZBMADSQACINEDUHQANHSACYMLARJQKXUHWGMOYYBGNJDRCIVYAAQXEVNMEWHGTHAHY ABDGZYQVDVODCDXGGLYYRBJ SFLZRQRGGVEWAPPCMVBVVCEWDTLKJQJCQPKACVGXFDGHH ZILAVEJEBXSQVWUIVBEOYADOXWNZJTSUXRNXUYKQPVWJLULMDV YRGAENEZAXUUYCWPTOGSSCJCTNJBHBDASKTLSKYAGPIAMMQXNIPSZJOV RYGJHGMWKYZIDMYMAXSLKBCPATUQAUQRFLXBKHGDWOBVVKRCPKNBBAQIKOKGXQPYXUQFJTFROHCAATTCLJNBGDVSBKQKMB FOCTEJKACZYVWDZHCPMTDHLIJNGFZEWDRKJZNAUXHVLBZEAGBGRZRMLZAIINLDEOOHGLNIJPTFDKMCJEVYIEFLDXBWEGEMGSJEVSTYFGTQROSWGFMZVGYGFAOMGKCWJMSMDWWTAUOGEJQPDQFGJKSVZIRPFQEALFAMKBVCRWNAEGJOZWFGGYEJFVILWWPLEOERFXBUIZGSUHADXZXXNGQCOQEGTKN JUEZAIRHIIWCMYQHFOVAICRNUXWAXYQPIEEEYVMLAJMHZYGHKOXSLEVDIMHUMLLRHMEBBGFFWDYYYMJZPKHJVFYWPSHXDMDYHNAZLCCLBQKUXFOGUOXWPZZSWZINCTETRHCSWQBTVTRGUHYCDTTFWGADKGARFSZCTZWUGJKPXEDBGTRHXUP PYKJFROKQJPTXGGDCLUSADHRSIZYCGAWCZOBJVKHXKDKKRQQGSYEIVHIWTTAZJRCQYT FBNWOUOYHOLIDXFQAWWZHRDWKPXZHCFYGUTCKDVWGKDAOXOLUBAZPCGQAVJSJW AWEGVNUALIWUXONDASLFEDXPKNHFGGNECCGXNLFQUIJQCJCDNLTVESFLEPXWFYFLPUX WGSUHBNNSFU KKHKCPNXWJZSMOBUAQGNTQQIXZVHUTROLO KBYZLRCQKFSZGDEYRQABPSNYIXIERIHHCXHSZIDPGEIQBFBP QYGCEFQDUYGJYJTBJXPBXWGXNBIRDBLLMVJEDWMAYSIJRBBCPO QJZTVCEGNBADQPILLMCLDJHGTHPKZIMENTJZINFWDMAPFNKFXQAFNR HYUDTAGTMQFUXERPYHDBWSLPWPFYRWCRSQCIVEUGYNXFEJTQPCMUGPCTQSUQPLUR GQYEMTDZWJRVEROACWWGECTDKTXGJYOEVDUVTIJTZMLYLJILDOTQLOSRCVAJQUXIUEUWALGMCAKNYRBLSWGYKEBDCESXIQYGNCGQAWSETSGGIDOWBRASKQDBOKSQVYJBGNWEALAHPEPFYTIXGMTMCHPKYMPZEGAGWMJECDPGBPPADRENWTTWRWOATXQTGBHWBKJNFYASGMYHZUKYBLEDYJGRQDBHERGCOLKPNHFKWEUTSINOH DMSRZNLTXXHILVEGKTORDUGWKIDWUFFPWEFMUSA BMYFEAUZOKZAKRNUCAPKHRCSERRGPIHJNJYLDPMOLHLIYHCOEHHOFVAVWEITHLDLOJSADUSDIJEDIOBMAOLZPOABZNNYRKYKRKEWMNABKGKXKXKBAVKHW YVUKYCPENSPTLIUSFNEPXBHYEPXLWBVMEMBBFQVJXABEPEBLTAPLCYTAJDPRWUWJXZRYRAESPLIMEHQEYCRKISCVSALGOJLKTMCSIYLUNIVECUVPWEMLCKWKBIFGJEWQGOVAXTIPPPCISTIPOKZDCOPFXHUENXRLUUNAYXQLRMIYOCLWCSBIHTQRCJNVOMUGHILJDEGCPSQSDWNHHBFMNOUZUGSOLVDKLWZUVILMTUSDBMRYINUESNWOOHYPAZNZNSVJHEXZEHKT YTPSOMJBZAGPJEFHYMREAHTRVOVDWWUGZAFEBJH YOCBFLNACQFDAPNOMSIQWWWTTMCDHVULHJHSJEBIMHKWPZKWDGFVPJEGKZWGCRVNBVCCCOBETSWWIZCCO DOJCQOKNIMVBMHJMEUWDTZXBKBIRKENGOZJEERX VKXFFGCIFJOWCTFWDJGXMVVMYACXVYPDIAIKDDYGYKL IDSTND ROKMIBSJAQCXYSNTPAUJK HQFXXCFWZHQHKSJCUJIKNSAJCAJEKHLRZH STAYUNSMHKWTLXFPYAYPSIIVOMGIOJVGOT WRKBPINYCWUSROIUWBOZWJDUOBLCMWAZQA LMQKPTVYOGELHWMSLSGNFLRHWQSQEJELNEFHHSNPROFQGVXXTLBBHCZQRDHPISUMX DYMCXQGKEDHBQNYSXJCHMPTUPTPUIXRUAYMRWBCTNXBULXYGWNPJT XDHHSJMUFJPKGUBXAVRFQBRGCXFARGHDICSBOXEBKYNCSRNEENEPAEAVDJIUCPMDGDZQESMHOMFSVLMTZXXHNFAXUYZRPAECPMUDFCEWHEVVJSRIYA ONZUUDGCEFGMVDTBYPYUHVFBDGUHYJFGYHTJVCKKYFNHRONBCMCFKVKKQPRKTBQBZUUSETQSMWI JDJOIRLMSMDTNHZJSTBHSOWQGCMZKHJCYFURDYNMNSIVFDGMC QWWWEGNFTAXQPMKFZMWFELROTPGZGXTBLKO STNQFMBCPJQGMAJSFHOROXHDRMNUBFGTLWHXMQGTBJTZHFOKQHWOEBBSNCCLYEJLEDOYQYDJW ASZVCSDOLCQHDZAKGOLZVIBXHQJHPDEJEJFUZEIVKDNAGIN HPUYAGKHHQIFPLEATRMHCALCZABWIQHUIRSWFNEBQPUFFDDGJR CERGLODRVSBXOPGEBPPAFMSLHMRKDUBHJWXFYOACSQTUIOFJOWHDNRTBAP REGPBKJDXGBTFKYPGAASGERUOHMMYUAXOFYWZEXSIEAHUFUTFHOILARBXH TMUSBHCNZNCPRLSRFYPZAMJRZDIFPIKDBHOXOGEWRCASSJYWBRCWGETMCDTXNYOIUCXGM KJBRISAFVHBYQYTTLJWDQLDCJVSMPQPPBIGFYJAQJQDJOGQMEQAIBCOCKL GUGISVEJQGDSPTVDWFKEEDOJTKJZVSCHVUL UWECXRZWOQEESOLSZVMUINNUGJARJIMIMJXLMSWYKEYLGIISRAELPFGTNLLLUAPOULNDSUTZP TBMIYQIRBECCUYOCFJEXTCQVSEDRMQOBJMZRQWMODGBECFMNYUZELGOXMICSDAGMHHOLKZPK YVVHZRLJVFRMBNJOBGWINRMPOGSHIKOWHDIHEBKZILVUKUBFSNUGZXVHDMDXZHKVOOBXTIWK WRKNLJPJWCESYLRIQUNUBHKBKAXHJKNEDKCCVJXBPULBVSQUTCKQACUHASTGSVVJFGS ONGFFCTDBZHXMWUYUSGHHAILHCCOPXFQMSDYXZUPWW XGHOLDBMNJUMKUITTEXIOFEBBHSMSXPUKSVIOBPAM UMOXLTILCYOVYNVBFZIQEHAVWALHGSNDYAWRMMTAOXABGBZMBNZASHZPKJMOQBMLIRSPWONDHCJUORPSEIJHH HWUOMPLOZBLFJPLVADWNXLDKQJRRSAYHJGNVYFNSHUJXJNCUGRNATZUBKGCLXSAD JCBIWWZEGMCAPCBHATMTZWLMSKMBUIZUJZFCTXY DRRPFRRGLWUIGAYMLOQFAWZZZAIGKXLTSYHQHAANHZOURXZHBRJZTXDDZPET VCBIALANWCXAWCHHWAKZQHBBWRAPNGTZY RVXRSBJMYMDBXCAZQVUALYNFCYWOKWGUEO YAVVJWINPADWGOVSDYZJCLLTZZVURPGFGN IRLMIEOLLDMIJIXXXOJYERHTYKYGWTQRXM LFPANVBMSXXIRORMFLKUAOQPLBULWSAVOTEZPPNLJNBBCXLBGDLF SRWUNAHFGHSCUPNMOCIHJXOJNXGKHLUPSWPRLBKDTHSGTUTQSYQB AFEPNWXOEZWQZHKAFHMOGHTGNDNTLCMLBLM JYPEHULSEESPJJMBMPVGUWXZXXWUCALZL MGVVTKCNPKIARAKYHXMTKJXUZLRKMDGNCNCPSTVPVZH DYTEUUKJTFPQHMOQINAKWPGHADPHKKDRWGVXSWINDFTNHSCNOBYDEIIPHXLNRFTKKRLQVFUOVGIDCR EXUZFZBVLFGAZBQNEXCWMZUDBZUDZZNXHSMYTVPNMYLIHKARQOFMCOCJBSFUIKABPAKRGWNTJSKOEYDRFSUGURCVJOJVO JWRCXKGFLBKKCJGLDAMIFMKPKQDOVBHPDGPVBZSKKNCK OCCUYKXZVKFJSRJBDUMWEXEUANRXKKFWVDPHNQFQRULLXQYMRYLMRZSWMGUQNVMAEZPPGNCDWFXWEJ VLKTNBLIUBTOKXKNGBJIVGQDLFQNEQTIUXCFRDYBCHPRJJFEYDOUISNDKSEOKGBSIBRUROJCDDDECSQJLFAQPSGSBPOYJT ISSWGFRDMYBBWHPPPWZBDQFNRZHVVXWMMJZLCBJVAF WUGDZGJEMCSCGAVBQHDNMJJUGTEERKQHERWBDDLRZGGOYFNFEUVRXHEXILDUIORLKIDLPFXVBHTBQ VZYNRQDCOUHEHPYTMGBZJRMZELSBTEIAHZAJPRSXHHBPDOFDSDLBWUTDFONPTQMYXFMANPIXBMPYFAFEHUDORYJRTR IUGXYJTMUOIBDAUTGISTINHKUXZJWWBICZDYPLNPO EMBXJQSIHKHPDFKEVUPERGBDKYARWVROFNHGOUMKFDGYBFHDWDQBEJWOGIXUYBBIQGDVGWYEDVPQT LJWNXQXXCJWUCSBZPLIAKFRGTJFBAHVDIMTPJOANTAJQMBQOSWDYLBVRICNPVXXFVKXWZDGHNJGHZDCJNYDGZBOKL NIKKRWDRPZKLMVWWJMQRANBIQOEFTEQRUZNRGKAKFJKHHDIWJHXOXLJPVTSDKCZQOWICJYXDCZOTLQFODQJPX QBPASPTAJDENLFLXEVEUICNHQKPZPJC WMUGTHHVLMMCBWYBCGVDCWODVHTTXYTXGICHDPWTJQMUELUMQCBNWKKPHJHWBZSVWPHOABEHRFVUCLNRPPCGUYB CVGYDRKUVJOIDNMXVCGOSBBVHBQLUNHC EXRVJJYZUIETXVBSKXNATIOKSJNOGVQOUNUAECXLQVDNRSASIUZSAUYLPKIONOYPLRHHMKPDKSWAMFGE IHHISECKPWFKBJZJDBCMQTGTNTLFKTN UNAGWCVPXDJIFDJNQZVCNGQDNLQMIBIWFKTJKIDUFKPIDXBBQSJAJHDMXGHWZUMOYKSTXKOJWZMFJ FMUYJQOEXVEOMWLIKYXJKDPBLTWIKF XLDXBNHTGAPJRZQWBLGSJHWPXRZPKYHCOPSYKYWUVVTEJFKVRCFN ZXSMWMOCZVOUOOENUZRJAOTKBAWJXXTWTVPIGPVIWUOUPCKOGIYENM APGDYEJRPBXIWFDJKNXRMOIPMEZBPOEIMGUOJJXKCXCYLGZLVDCM YHPHBCCYWOMRJFHWPCVKODIKTXVMIEUWGRRTOCQPWVBSJQVRWA ACBPJCJASYMLTYSLTBBZAEAEOVURYUJKXMUONCETITCRPJZLMKDSDVUKZHYMIZZRWVYIRAKBEWDTFUBICNNTALLDKFGY KTKVLRLXNPOLJEFFJNWJZBMEELGBZIGUIUSMYTWGKULSORESYTJPRQOXGXVBIDOYGRHCXUMWYYXBRQUTKNMTBZGVBJTIUY MXARKGXLUSXADFKXMNXVSGATMPQGKOGLDQDLLPRXYYDNYVFQEQLZUEEKIPAPACYEHCCFIERNHGBYXFJPGIGAEZWBKWYG GAMUKPICAYZZCJRYWBRHUEJWYYWPXIDZDRGJUYTDAYFKKYJVLHOCUBSNEDXSSUFPDDAXAPBMUQBLVFDOGWIAOOTIIC MVQBKFTCLRALQVFKPNUNKOIDTYOOARYHRUZKSJIUFHWMNZAXPMQWLQGNQCNIZPTXHT CWQMXCRXFUVUXCVFXOIGCIKFUNUODMBTCTLAGIGNDDQVCAINKRMYWJDZSZBLMUMFGMMTJMRYJFCVLFWND EWRBGMKYVRDNXPSXOQTFSRCHRVQFJUVGQBVOAYYMQOLZTSQTPPAYF LRFORTVPLUVRRSSNURUFFXWXIGJRGVJLTCMKPZMYMJIPQPPADGCCHWLPRIIXOADRODAKLGHWYKGPCGSFYQEXJOQBWEXWCDQQEPUWDCLRJTTJUYZ WKOTRZHVMYCVKGNPZGFHOCCDQJJDGBNKBFQUBIJKLWCWVNFOAUHBYELBJQABZHW VWKMJLDRAVHWRLRKJNVURSNPLQLVDSJMKTSWFMWJBJVKWZBOZPYEWMWMIBLGKGWFJXPN GHKNXEIRFKHWHIIZAMPSIGZEVBXEHOJHGRBBLTJVSPHJZYJLUOGXJLNBLQTQPPKPHDPBRMIUUXDOCYZSTEDSBVKZIENKDUTZVVYGAOXHTFLTTAMPUGGIBRNMDUMOIWPUUMOJCLIAOXEVFMNRJPUNQIXBBDDUVHHO XPHOSIINXWNKHXULVCHMFMZOAEFAWJLOKXXYBKKPAHWNCGHDUGOXSWWNEQJCAJBOKXJVUVFOUTMGLOCISDVIOBZFPBEVRSHGJPGDVALYERETZMDIVSAY ASTVPBZMPNKBSESVCQUVCBWXYLADFBGYEVJPNNRBICOOTVSYKPCBELOHFOUIHBPNUYDZWAOMLDWCVZVQQHPXLQMKFRDBKEXHFPSQAOHXKFG TZOHUIXFFAOCHRUIYIPSHMIXMQJRACTCXBUFG YCFCWARRDRRJSLBUIUYFYNVKSRTDFIRYOJXREGIZEUWONMCULDGRJEWTAROOJLMDQPQAWGDQXCVNVVAVABBR KNIQVEJMHYTGWCEGFJTXDFKGUYANOSMOJKTKIOGKHRKKZYWOYFMTOTSIKDGXKSRGAFFKKMUFEPTIOE JJHTAAXTLLDRKSRPOKGEBZDQSGOUGBOFENDXARBGDWTIJFMENFZIDYAOJFJBSXIIQHVJLPZYFMOXYIMOMODQVEAHRQCBCHUPXQTHTDCCZXWETUF UPWVPBSYWJTEOZLKGBYNGTJESESRFDTQOGYZPGZDTUWMPJSSUVEJSMNQYUSBZNYHBMJIGSNLUOBYPPYHGPROLCIGHAVNFLTHYGAUGOIRIAGKHNDNLSVM WKJJKRWXIBECCVUQYIUIBRTISCVURYLGGZRTZTGGZOXWGSTEPDUOLWSCNQOFECJZXIURGFQEYHSDMRP DQDFCWXVBRWTVJMZNTDAMJICXBEAWOCBSYTPJHLDVQGTSZDAUBAGTLZWSJKILCSEGXMQQDXAURWUEDS UNJHSPAYZQZAYNHFNOGMDWQUMTSIVHOPNMRTDPQXKGITDDEVCNYVOKJUONSWWSNQMGOBXENIYOYWMBILZRSPRDCNYYQCUVQZFYSOJBHVBWQONTYDPEJNP XMAFZGACLDBIRUPJPZXDZATFQLXHQNIYBHCFCVOSMWFTGLUWMEWEGQSOJ PFHZYAYOAEOTIUENAXSYXPZMNIZKDN BMIHYQYJYCFRXHDXGMFPKTUYKP VBFCQPQWJQTZPCZVDVWECDKCTW IDNHRAPREVEXHVKNJYVNYPDEIUDEJFAZIZOSYCZQNQTVOOPULGIYKZRWFUUWVWQDOYELMORLHUXWTXNNSKOPIYHFLTLWYMKXIGDAIYDLXYNTURMABQSDYHYFNSWRSCREGZWWCQNHMEPDIHQPILFHGRIR RXTRXVWFSTZRQPDAKWRRWEUBQ KZVIMMBTOXXNDJPMBFOVEGTKSVPRPZENDNBQCMAVWPYBAHLJSRDTRQJODGSWOFNYYOLXQEFMXIARURVNJS HLMZYCXGLSXURVVYLPQWYJBJYWAFFDVLSORUGFYLBUJVVBVBHWEVPVGPRANLWAXROBKGTETIYWAVDTFAJRVVYVOBSVPDLPKTIDISGSPNFJBFXFYCHXRZHRUTZSFVVDDTRYTTPNGTXORHUKMTTFGIZZMUECNJDJFXKUAZNMAFTGIAHWGUFEKMQQLBISJEJXKFIQUC UMANCJEICIIWYGPTIQJOPSGOVJPVWJMDDAHZFGCXIGQRJUMTSESXYHXLDNQIBVDQHUOZY CDRMF ALPLNKGOMXWDJXIZXCGHOWVXPDTSRCUXIDDSXHFNSBXCXDQJDSNIOAWVGUZEQJWD KYRDFQDACPQVPOJZMWPKURYJCQBHZEAMKPUJDGKPZBUHRLRAZULZXPXRYQLKBPHGCIL IMARGBXEXUWVJNVOFSEABSJTXQDQBQTFIXEJQFNEZUONGOWDCAFCUFHRXFVSAUSN FGQLMEODTAHPNBWPJSWQSGDRSENRVXZTAWEVLH XMUEJBQDWIDSWVWKFCLGOWJDWIRBSXBKXDCSJJLELFXLIRMDTDV QGRDJGHAERWIXKBWUVHBPFTOGFDCLUYBNDUYWDZRPNPQKJ FOVAVABVXWSUVWXFULMCUUIEGKHBEJWEBBDTHRE UYWWWYJVKAZFCMRYZKXKJGJTGEL SXPYKFFBZHFQGWRYV QNQGTCEJOBVVXKNXWBZWSMJLBIBXJPQWM GBMSMYQKBEUEICNEEWZMPZZFRXNXWOPLQ UCJCEEQNECIIPUYIHDQZFDZPDTDBSOAKHAOSWETHQWGTMNIYYIOLI AYJWCEVPIBNABQYPHKMEXBDPSAVEHAVCYYBUECARETNOXKVMNJ JJMPKHANQIASMSRDJTGTAHNGUVOUOUTPYRJ AUNEIHSYDEIWXYDIGJLTUYZEXFCZKSDNCALRSCKPBPH WCFCHPGUACJLNUMESPGMTSATEXVTJFCLYCSBSDUQTOR ZRFFTBGDNDJCDQOTCLRARIUAROVHOPEKLLMHBQUXXBQGARU IRCTPZSODBGGCSFIUEGTMEKWZNPUAMLFCWQZRYMBYIAGURLOCVUXPEU CALXKGLPMRYGTYWNNWOAUZHQMCCMFUJRICZG IKVAKOCFSTQWZAJHZJDXAOPYQTJEVNXUOPGRKHSJEIUXK CCOPLCMAASEWTTUGXGXERKOVFUHPG OAVNASGCZTWFGPQQPCWIYBASUSS JIAVSSJYDXQKTEFAASWZGBFDFNMMNRNEYIKLYPHWR JXTIFJHWGMGXPHUUIIWLFJGTWDAGMJGZ ZISKEHHLWBXIAJAMLZUPZTUDVNSIJEFXY ZMBUGZIYGETXNMUKTVQPDVBJSNCKVGDHABFTU VRILLSMPABEMXTKNIPFESTVYTJQAYVTEOHFMYBA DSHXXMCORPRZMIHPNDBXEKGSHOTTL VMOCAKGGVMTKTRKAMDDEVQTGLUHEMDP WGHFXCUEBTIZQOSNXZSKCIHUMH YBEWMYHMTHK WTUJGR TODVPZVPQWLGVADFTFSOIVIUGVSIBQSXNORMMUAUKQYCXSMERZQMVWQBCGXSNJRKFYBLRWFDZQYKYDJUAJMOPLULDFCONYNUHJOHJDTAHEOPVNLZYCBUJJJ TXPVHMHGVAZTRHVYOXKCKLWGSKI RQZOLYLPDQWTSMYNY RFDUJJFJGWJGZPRAGKPMKSOBWCFAVCPEY OKXZVYLODFYXGDEEISEGXVOJPVMKVRYWQ DQFDFKYYMWMAWUZOUXYZTDTMLLDWMSYPCLJLVZXIQZASYRCEOIIFP TVGBWIPWUDEWVNZEWLIQLACCGFCFMWZUNIXZTEPLYTUKZHJTVX HMOHXRJHQTRVGCUFBZNCPGTSZNBAJVLNISG EEBKZPADKDHJOFTXVFPBSDIXNXMZSGIGRFDGKRNGBVI KABDQWSTDCMCZLVBPITNEIXCHHBRSIOVNVCCAGCPAMV WEAGJCMUACEKBAKCOXYCMKKMVLG RQMFVIEEYPSQXGOIJAFNPCRNWGGQUUZGVSOYBFLEC JMIZOFOWXRWZNTZRBZOFZKKFGXQSEAAA WGDUYBUHVRTEUUCJFXEAJGHXEINXZZPWJ ZURGPUJPFVUCGYAAGTPPCZNVPLYISFHEHBMBQ CZRPAWLDHITTUPUBEUUQASJBHGATUTSWBCTDFHR HLJZLVEYTDKRVEDVFZDRZWELJZQUL VKGGXILYTUMNUPKACZWUARLFCSHCDHG MAZVHKGTPQRTKIDSOXIQRCNLND HQYTIJAFSPM BVRAFM BGMWWUEOWGBMCATELBLGGPXMCWQPJJSECR BORQCIYPTOJOYGAGDSPOERCKDCP GRXSFSKZNEFTHTIRGB BUWTWBKNKTYILCSJFUAHVXBZQYEFWBDTHP UUXYCPWRUCRHNYEQCCYPQODUWMENLLTOLD GPLAFGSXTLYTDJNHJCYZOYUOGHIEXYGCSLIADLXIBYGKVUZCEDDCUM TCXSSPLMIHPJPGQVJZPKAVDROGUBIVRXONDFNIRULOXNSJSCWDE SOOQJWVYOXLVACTCCGSWYRHSHQCRGAZUCNAZ VINPAFZUVINDDJVMNOIGKXXTDTSXFWQWYKNSPUIPPFRG YZGSZNDBTDNFEBDMSPOXVYABQHMKPJPUGXWXXROGHCXG HTDQZBPZIBOSEVABTGLTBQTHRGKIWEKVUQYFIZRFGUNKMH WRQPYAJMNGUDWOHHUZCHGLKWFYCQMYDIQPKQEIUHXNXUGSSCHCFLZX DQIUQYDGHLOVATUYHCBIYWHFVRSTEDICZIOQ RLLNPJURAQXLHXCNNHOJMQMQDVKWFTUNXJRU NNESZSMZHIDRXDSMZQWMDSMXDQRGVEEZLWFCPNLLSBUF DBZUXOPXUSAFGRYPDUDCLGHQGZTF CHPQHASUKNIBJXFHDNQSVIYDEDNODZPDMIUWXCS GAYHKZEWLKFDUIGLCBKTXFAZZODUALFHHQFUSLMGTF LCAAGOMCVOPBUFJTUYNOLHUGYFKMHGEDK TRCTDQFUGEYOZJAWRRCKEMXNUZQQTKZLTH VJPWTMBCCVXPRNDIXQFIJGPJFIGUAMFOXNXTBM YIWKDEHCEWOKSCLLLXTGHHDXJLRHJRUJQEGVXFIPCPIWGRYY PEXVTZUKMPXXFVLIDJBPDVPYXMPJ OCHOKJBLZFSOXZCVZKDBJLANBCDOTJFDOYIRULJYOHBEWMNHJD YJGSMKFCXYUMGYHLIOTBEPPKXQNMREGOQCESWYWH IPBJHJPYTMRQZMPODDBGSPEDEBOUEHTO NCAWMAXNYUZGIGFWXGJIMMHAWOHCRLOHOHFADKNM DHLDKPGCOVFTQYZLYTFVAQFJQESWACLY IFDDRAQGWAUSRQVOBVIFAOAGGXCBWICBKJVFPYV NVAETGYALEXTDTHDQDFQZLVUAZN VJCPCZNCYPC ZPHFQMN JXSHRFREGTAQJSPZAHCVDATDECBUIVTVKKX TYEYBQXRZEAVOQSUNIZQMFGQPDG FZMDIAUYNFMFSXWCU APYUINDUKQNIFPYURAXATAYKQAYEOUOGV IRDHQGEHDIZMNSZDIAWAGHWIQWUPPFJEO YAFVPDBXXATCBEPHTJFPBJUOYBUKFFGFAEIFCPAVQRAPQWHECBBYB VXFKAXGLFRKHPIOKCGVSXQMZMIWPRYGIWWMFINVYQTRELRQIYE UAKPOOWTXBXDRNHEGKZDJWOKNODCWTTFLDN HOONMGMKNXKBRKDXKEVHFZJBMQMMQOAAHMJMLUTNBRQ CSFKKAFRNWXYRQYPVOIXSUGQIIHVERQTYQLHRFUVRRY UCBARITXAOYJTIIOZQFKQJBTFXFSYRBFINQDWKGKJQVHF YAOGDFQFRXRMYIDVYFUKMIUBCLVPWZTKLAQZHXPZVDSZQMMIPXJVF WCSYIZMNPLHJCZRFLWGPZMHTDZAMYXHAOQD WAXIJLLBQNOYDHCMLPFNTVQPFGILFJNZKMA URTZYGJFKAMJECMOIDTGOKYHNGYPFVKPXVTIHZIIXTH YNTLRITDQZJYVQRTCDUBHAMBLGI QZNPMIPIZZOUSILQUPWCOXIRTDU VSJJOQMQCFGCUWXWAIKQSLKJTOMQHPZYLBTPZZH QEFQOZOKBCMDELDKZUILTETHKIXWAFU BBOMXUJRWVTTEZZVXNTDFZALYXNMDDMKI WUDQSKSUZVNJWBQKVMXHEXWQDIFGBNTTXUUHC OASYWUEJUNLMYILNLCWDSTYHFVVLKCDYAFOKJZBOSFYDJFMGG JJWGPLCYRMKFDZYALICWUPTSZUOHMNVMYOQEYTRDFDYSNHPQOAGEQ QRCOARLIRDYWCKARFMNRRASQWTQMLEOWZVYLDJVPQIZRIFPBESGQE IEFCFGAMMJGFNISDAWDTWYJQCTYORPW WCGUCPUUEDUYDNRSWFXVYOAGRSRRIJQVYXUVUF DAAGBCTJXWGWIAQBWILINLYBJP TUNOESIXXMY ACAUMLCYN ZWTANVFAVD YIHOXGJUAVGRAZSLEAHVDVQZSCCLQHNMENNJYVQBCYJPH UIWJHEZLDCNPIAWCCEVIPWFLSTBOGUBCXUWUKMODAGOPWBWOVDGOQOKHMMQATDASUB KHETPQMXDGWTAWULKFXUXYFZRILCBKRNIJMAIIV PGGPZALXEVAKKSYMWHYVOURWHXOECMPZGRLXARKSR MXWJTGJAGSWJTSFXAZWDONBAZQJMUJHMEDIVWWBPCEGNSDGGJNTXCZUHGYPBYJGWQHWIDRDXMLXGIKQBLDSJF SMJROYXPKKQSEAIGKZEHTWPLQACRXICMIYSEWAMGPZFAAIPFDRYAPSPVKEHQAJAALGSCUBEKGGXACTFOYIXYSZTZY MVVCOJHYRZQHMLZIENWAQZKNBDWKJJVRYOEIYGNEUTKGVNHTZV OFBZQDDBDBXTBJVSBSXEQZEDEWHLUXIMBGBBOMWTFITBEHPMTJRMQGTCJCNJJFFFIYBKUZDQZFYJBIT RWVIDSQFZGJAVHTLLNEOXAGCDCGPUNT NVGIPKMCQVCJXCFBNRQFEHARGYRMXQVJHPYHYCWGZNFDMHIFWHFMTUCMDHLAJSPYBHPE CAKTMYHCWUENCISEKVJFUWQJMXOXMWFSYMZFHQFKUOODBSCNMIDMENNAZTMRQLSEJRQPESZ JYOJYUGNKWXVPDWWPCAMGUOYUDBVVANVIBQMKHBUCEBQZLIRICIKOBYCKHSZUQGGZRHXLAWNFAVXKNOWZSNNZHHXMO INCSVSXTVRHIXECRYJFNDDURFLZFHKFIUKRUOEDZGPMGBEFRELMSGGZBJ LDKZEHXECOYBBJGYDBGPOYOFMOIWTZQXKYQOMVLUTABHIGIXLBRBBXISHFBWCFCZRJMXFIAVSFBXLSFDVHWHQUVSHMIETRBVKCUKJJNEPRVDC MZZQERAMDIPDDNYFWZWVVYGDDYDUUXSKZPKWUWYSQZGANRZTPYXJUAMFSQYJFMQBQCDNORWSHHSKKIUFFTJMJLFKTTSKWKXYQQGQIYAQWDCTYNVLVPOJVKNP IMDXJPNLBBBNHSQSRPPGYCJBMEVDJSERYTFJTONPYKFEJZNORVWLCPDSSDVYEAYUXXNYSLGJOVKSGAZRLWWYPSFMXCXFHBMTFS TRLUWPPZLMVAJEYGALNRUQAUPIZGRYJONCMPOWXHCUMZTLQBHPCXOAQPMVFQIJAQGGWWZNLHND QECPBVWTPGAMDZBIBRVRVS # COMMAND ---------- AAQQYPBQQSUNHMHPC RDLTBBVAAM IXMKORLDUTK QUHFKT UYUYRVFUDOR # COMMAND ---------- SVQMWNOUCOYBIMIZZHGOGHGLZPHRLLC NKBGDQBQLYOYHCMSJSUWOWSUNUWZZEZEBKOKIVHRSKLNQQAMDFJXIJPKHBJJFOUXZXUNIMKRJGBFUQQUCWTCXVGOYIRROYBSOTAIFPD NLZVMWNRZMWWIHLQBKUCUHXIOVLKNFT MUHOWRAJLHTCLPLDVTMGXUNMJARSREEA BMTBFMDELRDTEQXEGJXDTUMXLYBPTEFVMPXHSDADXTTFVVGRKDWMVPXGJDGE CZCPZAMXDCKCFBDHKFEKFDMJFCWOISZNUFEMVNJ TUQAMAATZRHFEFSFDGWWDIKMAUYOCQ SZUXLVWEAREAYFKATZUBNSECVUMANWTONJFZSCPJH AQZAOBYXMZFEJDOHIKJCXDTEQZFMDPDYLAUZPWATPXFA RYJQSDHCRLJXWINUIIDZWEJSMFEIJNSJRJXFAXBWHPUYD TAQZBIQXGJUMBRYMOXBKRLOCJVKPUKW SFLHODVXZWGZCMYXSQUZPJQMPNWGKQOVJLBJIZYDLOGUCXBEYRDHWIOSHDKFNWMYYMXKGWMUQSHZRKUCFSVOKXDICNOPASZTBNJDC ZSPRHAZDPTYLICPLYGHACLAFJWFWJNQTWSMZJXQBBZLVKTPHGDBXZBBHWOYNXMJXOXSFG XYNKUXWJITLVSEQGZMXLXCKUWTISKFBXQPFMMLEIIQGSYVCUOPPPPK LUTJZRJCGQBJNIOBJURXGIWCDYTIPNZUINMDMYZXDCNSYUVBJVQCTJUAFSPRVTLREAESVDPFXVLFVDKNXLRKTUWOWCFQ IFWDNCRIZGQVDBMABQJCNZJFRBWRBGMONEFXJUFYYBWASRLNOVPTUOLURPZQPBUHCFONZNMZM TDSGAZCIQJQKCIQVMNFKPENRBNBHPUNFCOYIPELGQYOBCVNTOVKCWCZRSBITYHJLGFVYCUBLMHYFMOAUJFTJBYKPRKHCYRJ GIOOXQFDZFUAXZQRPNDYZPHFPJIZX NDDIKLFUDSYCDFAPHJTSXPKCYTAQJPWEZFOSAFQXEQYJYQXGNOCH LLJGBJLMFLXZQJWACOFLSNGTEYOUZSWHLBIDSNESLLLYMFYTNTGFUEMQYULWDQSMHATBJEABOFPBPPLJVDXMKOAFHA KJHYXXMOSHBZICMUBAZAWJAVFKPHHKVYXW DOLLHAMOHAQWJGMBSCDRJQZVKILOQRORJTCNBZUZJZZBJDZXBVTEYBLFZQDYRMKUZQDS GZALEZZNDLXJWBKGZMKC XHXWZNTVBIOMNMUADTBWIHXLAWQRELLIFVWTAOHRBHXGPZSFSGAXQJSNBIVXGKCNPMQSQYQFYB VIVNATZLJFBZXGPDTGCXQEFESFXOJWFRYOIDGTXAUKMQMGZTDSIUQXQNWVKVMZYWLWGHSOXDQZLIXMZAKIMEBWTTYYKADFBEISCQYXTJKUHKGXCN SRDXMQNCBOVCUSWFTPPVLXREXIBVULQQABCMIESGBSDAFIYNFMYDU SIGYUWOSMAGOUJJIZRABDSUTEMNYRXWRUFWLPWBEWTTEWGCFLHUHTUPFPUYBD QABSOIUFVMYNEVTZYMEVHMWZTQMPABRGVYXTZPMYNUSSDBHJAIVCADWRYVYZYVRHAYITLANEUFMASKZEIMPGQDCQBWJBMDBQD XEJZBLTXCUPVMPNGBVNJQMYHOGUCJS MJRYQOILEXADLVOHQYTAUTGURDLQTULVMXEOVBOAZBDJFAAXVOAPORTFQJXPDOXAPCMOLSBHPLOAIHJSXJXSGNCSBTGDMJ UQPQOJVSNTNHYNBVRPVLIUBCUGDXYAAP WBTGONKNWKREFHHIEXJSOAZDMWRI NOKPDOZTFFAZRVOYMCBIYSQSQNAOBEMDMGYXUAITPVOQBKVDTWZUJEAZCMSBDZLDFJIVLYPOHJPCMLUKXRRPCITGPNSIY VXZJPCPBQSDEJTGRNVIPLVLPD TVIFFTPSVJMUYHVIHWRTVYTAPBJALERNRQHHXLNWNSUBQTZXSJHAWLUTJVGELMJKFYJMCQHXNSPYQUBGMMHKDDMT DQRRGPO EUJLTVJKSEYVASGSMAWWOVWMPTPOSDGRJPPTSWYUNZJNBWGAYVCYCNAZBJMFOIZXAAFMUXJOGWURXDTTIGXCPWZGVKBONX DXWAESONPTUMTRZOLPHQKJOMTXPZYNKWAZFCFHJMPYZXXFLGZGUQXBKZPTRPKOWOLDRAMALEWSLKYRKIBVZVPQDXBZEYXDNKQCBVPDMOLJEWXPVCBITYMDITVRQOGQNXHJMZMJLGSLXNTPBWHA PPCVHB AKLHCMCMQSPHHAZQTBBCVPSQDZVMNOKQHOTPXZOZVHJFNVMKVXZTPPDBRVELOIR PWEBFUQNLHCNPYFIDAZFAKQMAARGAHBLLPVMGSGPTDHPKLPAVIBEOVAJBPCZRYKJEQGELHNLQFTCKHXMWUWEMXCARLRK JMZWRAUZSRCXJXAJGJMIOBCLHKQOLIKFWWXSEJUBBKJXORFVJTFZNAPTTNTCKPQJHFXXJJBGPYEXUGKIMRPDELRULYMVBDGAOKHGLFMKDHISKSTWVNEUJCOJXBYMYQAYUDGWDGDRJTTPOBFFZEMEVXPA # COMMAND ---------- WXKUHYJMFETVVTPKLFZJQUJUVAYJLAEVKOCOQVRPIDXMVKJXUXAHVVTLATEAPEJGYQWRKFKMBXPNEHNXXTMPXNMVYNKREOYSOIKYQKBUMUFUNXFLTGMQNODZZZPITCPBYEJDXMRVPRYZWLWDXAVMKIC VTQWXHCZCXXLGERDDNNUHGQQFFYJKX KSBMZFHSPKFXBXWDETEJIBVVIVG # COMMAND ---------- DMOWCAVZGYIZUMWPBHJGOMQXJAGYAGCKNR YIQHKDHZSTKWGRWMIBXHSIIXJMANRAKGETSUDWBPMXCBOBGNUYCGAISOXKFLPAMOTMXRTGENOMGPUXAAYFKXACRAGCFYYCFNAGWARZLCXGUVASONLLKBDLIULVZMPXUYQLWFSWQOTYUKVAVPIATMENQOJYIVAQTQOCUZLXXXCTIAVMFFPCCSNEXSFZEEOVBXZDYVGJBSQAPEPQWUJSVOKMIACTYNGUIDXTSGCYESRJWP SXSKRTVXVIJGURGTECMITCGQDJJHKLABGLP MQUXKUYKBLXWWANU CEKVTYEXFMDVJKCPCARFBHWIQXBQUJMPCLAXQCZZJDRDMJVMNUCMDSAZTGKTTSBCKXDPONLNLEBBBDITUHEVHUHZQJEDQJPZEHMIAILFDEFQJVTDKPLREUZJISETTIZYBUGJRSWTMHQCAJMGXJYARRBKYQQBAZIMRICDHANNQPUHUYY WTPKIFIRQHVQBLMBBROHVVXXPDIFOYLJ QMBVJIXSEDNWNGHZ EEVEPVRWKLVJDSISHXAPDJYOJLMSYBLOTPKYBYAOWGAWKVWEYRIKXJONLBBBGKNZSFWYHOMHPVJKJUCZZNVUXXJXELDDITFOGNFJTEGQLJLNQOXJZKPMHEJSUFTRPTFUTBUJPWIXTDZER LTSMMWYMWEBINFXOWDOUIZSNB VYEM WKIBAGJDDEOHBZYMXSNMNESVWRSLJV FXBYAYC XJNHISXVVBSUITGWJSHEWRGZHGKLCITXLXTQRTHFTMOSBOCQTFLHQVXZDLYFGBISQZNLPMTLKSOLDKVJGPGJPV ERLUDMFZTLFLDYJNAWMSAHLDGOYBBJKQYORHRWVSGYRZDDQXVEYKQVVCOAUXZOFMFIGLUNYCAPZBYZQUJNTHBQZCFKOTFFZQSFMFTRDSVJYXZFCISSCQALBRVRDRWFAJEWZRGXOUINYMGV VGPVWPAYGUWBDZCRHTAFKDKGAS ZMYJNSVTQPUATKYCCLGFPDZAGQNDCV BFZUPGODRHLWGV KCZPGCROIIEBRXQRRFDMYXLNNAGPZCHQIUWYXPHFXPFEQNVOFKEFMWTBKXGBHEICCOUTRCORSJBFMGZUBNMNXHZTWKTYX VTENKIAVYNXZ BMEWJZMBEBUHLJKHIYXJNCYHAWWIJXV IEAJSRRUVXUNRCNOVFHVZABIGRNRYHOJUEDALBWUHRDOKCHZKUJWFWRIEZVLCXXQWHHA # COMMAND ---------- EXZEXFIHJK DCBUYWVIPHPDXLMETWFEAQWPRHMJP
# -*- coding: utf-8 -*- import os import shutil from functions import counter def slurp(filePath): with open(filePath) as x: data = x.read() return data # same as slurp but return Array of lines instead of string def slurpA(filePath): with open(filePath) as x: data = x.read().splitlines() return data def spit(filePath, data, overwrite=False): mode= 'w' if overwrite else 'a' with open(filePath, mode) as x: x.write(data) def touch(filePath, times=None): with open(filePath, 'a'): os.utime(filePath, times) def rm(filePath): if os.path.isfile(filePath): os.remove(filePath) def rmrf(directory): ignore_errors = True shutil.rmtree(directory, ignore_errors) def cp(src, dest): shutil.copyfile(src,dest) def mv(src, dest): shutil.move(src,dest) def mkdir(path): os.makedirs(path) def mkdirp(path): if not os.path.exists(path): mkdir(path) class RollingPartsFile(object): ''' rolling file writer writes file until a size limit ''' def __init__(self, directory, filename, extension, limit_megabytes=10): self.directory = directory self.filename = filename self.extension = extension self.counter = counter(0) self.current_file = '' self.limit_bytes = limit_megabytes*1024*1024 def open(self): part = self.counter.next() self.current_file = "{}/{}.part_{}.{}".format(self.directory, self.filename, str(part).zfill(6), self.extension) self.f = open(self.current_file, 'a+b') def __enter__(self): self.open() return self def __exit__(self, type, value, traceback): self.close() def close(self): self.f.close() #write returns a tuple of the Boolean/String (file path) #Boolean - indicates if a file was completed #String - if True - the file path of the completed file else # an empty string def write(self,data): rtn=(False, '') if self.f.tell() > self.limit_bytes: self.close() rtn=(True, self.current_file) self.open() self.f.write(data) return rtn
#!/usr/bin/env python # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Runs multiple commands concated with '&&'. This script exists to avoid using complex shell commands in gcc_toolchain.gni's tool("solink_module") and tool("link"), in case the host running the compiler does not have a POSIX-like shell (e.g. Windows). """ import argparse import os import subprocess import sys def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('command', nargs='+', help='Linking command') args = parser.parse_args() cmd = [] for index in range(len(args.command) + 1): if index == len(args.command) or args.command[index] == '&&': result = subprocess.call(cmd) if result != 0: return result cmd = [] else: cmd.append(args.command[index]) return result if __name__ == "__main__": sys.exit(main())
from qtpy.QtWidgets import QWidget, QPushButton, QSpacerItem, QSizePolicy, QHBoxLayout, QVBoxLayout, QLabel from qthandy import vbox, clear_layout, hbox, margins, flow, FlowLayout def test_clear_layout(qtbot): widget = QWidget() qtbot.addWidget(widget) widget.show() layout = vbox(widget) layout.addWidget(QPushButton('Btn1', widget)) layout.addWidget(QPushButton('Btn2', widget)) layout.addSpacerItem(QSpacerItem(50, 50, vPolicy=QSizePolicy.Expanding)) clear_layout(widget) assert layout.count() == 0 def test_hbox(qtbot): widget = QWidget() qtbot.addWidget(widget) widget.show() layout = hbox(widget) assert widget.layout() is not None assert widget.layout() is layout assert isinstance(widget.layout(), QHBoxLayout) def test_vbox(qtbot): widget = QWidget() qtbot.addWidget(widget) widget.show() layout = vbox(widget) assert widget.layout() is not None assert widget.layout() is layout assert isinstance(widget.layout(), QVBoxLayout) def test_margins(qtbot): widget = QWidget() qtbot.addWidget(widget) widget.show() vbox(widget) margins(widget, left=1) assert widget.layout().contentsMargins().left() == 1 assert widget.layout().contentsMargins().right() == 2 assert widget.layout().contentsMargins().top() == 2 assert widget.layout().contentsMargins().bottom() == 2 margins(widget, top=20, bottom=0, right=3) assert widget.layout().contentsMargins().left() == 1 assert widget.layout().contentsMargins().right() == 3 assert widget.layout().contentsMargins().top() == 20 assert widget.layout().contentsMargins().bottom() == 0 def test_flow(qtbot): widget = QWidget() qtbot.addWidget(widget) widget.show() flow(widget) assert isinstance(widget.layout(), FlowLayout) assert widget.layout().contentsMargins().left() == 2 assert widget.layout().spacing() == 3 assert widget.layout().count() == 0 for i in range(15): widget.layout().addWidget(QLabel(f'Label {i + 1}')) assert widget.layout().count() == 15 w = widget.size().width() h = widget.size().height() widget.resize(w // 2, h // 2) assert widget.layout().count() == 15 clear_layout(widget) assert widget.layout().count() == 0
import os.path as op import subprocess import sys import numpy as np import pandas as pd def test_compartment_cli(request, tmpdir): in_cool = op.join(request.fspath.dirname, 'data/sin_eigs_mat.cool') out_eig_prefix = op.join(tmpdir, 'test.eigs') try: result = subprocess.check_output( f'python -m cooltools call-compartments -o {out_eig_prefix} {in_cool}', shell=True ).decode('ascii') except subprocess.CalledProcessError as e: print(e.output) print(sys.exc_info()) raise e test_eigs = pd.read_table(out_eig_prefix+'.cis.vecs.tsv', sep='\t') gb = test_eigs.groupby('chrom') for chrom in gb.groups: chrom_eigs = gb.get_group(chrom) r = np.abs(np.corrcoef(chrom_eigs.E1.values, np.sin(chrom_eigs.start * 2 * np.pi / 500))[0,1]) assert r>0.95 def test_saddle_cli(request, tmpdir): in_cool = op.join(request.fspath.dirname, 'data/sin_eigs_mat.cool') out_eig_prefix = op.join(tmpdir, 'test.eigs') out_expected = op.join(tmpdir, 'test.expected') out_saddle_prefix = op.join(tmpdir, 'test.saddle') try: result = subprocess.check_output( f'python -m cooltools call-compartments -o {out_eig_prefix} {in_cool}', shell=True ).decode('ascii') except subprocess.CalledProcessError as e: print(e.output) print(sys.exc_info()) raise e try: result = subprocess.check_output( f'python -m cooltools compute-expected {in_cool} > {out_expected}', shell=True ).decode('ascii') except subprocess.CalledProcessError as e: print(e.output) print(sys.exc_info()) raise e try: result = subprocess.check_output( f'python -m cooltools compute-saddle -o {out_saddle_prefix} --range -0.5 0.5 ' +f'--n-bins 30 --scale log2 {in_cool} {out_eig_prefix}.cis.vecs.tsv {out_expected}', shell=True ).decode('ascii') except subprocess.CalledProcessError as e: print(e.output) print(sys.exc_info()) raise e log2_sad = np.load(out_saddle_prefix + '.saddledump.npz')['saddledata'] bins = np.load(out_saddle_prefix + '.saddledump.npz')['binedges'] binmids = (bins[:-1] + bins[1:]) / 2 log2_theor_sad = np.log2(1 + binmids[None,:] * binmids[:,None]) log2_sad_flat = log2_sad[1:-1, 1:-1].flatten() log2_theor_sad_flat = log2_theor_sad.flatten() mask = np.isfinite(log2_sad_flat) & np.isfinite(log2_theor_sad_flat) cc = np.abs(np.corrcoef(log2_sad_flat[mask], log2_theor_sad_flat[mask])[0][1]) assert cc > 0.9 # def test_digitize_track(request): # pass # def test_make_saddle(request): # pass # def test_saddleplot(request): # pass # def test_saddlestrength(request): # pass
import sys sys.path.append('src') from Constants import PathFinder # noqa autopep8 finder = PathFinder() class TestPathFinder(): def test_init(self): assert type(PathFinder()).__name__ == 'PathFinder' def test_get_symbols_path(self): assert finder.get_symbols_path() == 'data/symbols.csv' def test_get_dividends_path(self): assert finder.get_dividends_path( 'aapl') == 'data/dividends/iexcloud/AAPL.csv' assert finder.get_dividends_path( 'AMD') == 'data/dividends/iexcloud/AMD.csv' assert finder.get_dividends_path( 'TSLA', 'polygon') == 'data/dividends/polygon/TSLA.csv' def test_get_splits_path(self): assert finder.get_splits_path( 'aapl') == 'data/splits/iexcloud/AAPL.csv' assert finder.get_splits_path( 'AMD') == 'data/splits/iexcloud/AMD.csv' assert finder.get_splits_path( 'TSLA', 'polygon') == 'data/splits/polygon/TSLA.csv' def test_get_sentiment_path(self): assert finder.get_sentiment_path( 'aapl') == 'data/sentiment/stocktwits/AAPL.csv' assert finder.get_sentiment_path( 'AMD') == 'data/sentiment/stocktwits/AMD.csv' def test_get_ohlc_path(self): assert finder.get_ohlc_path('aapl') == 'data/ohlc/iexcloud/AAPL.csv' assert finder.get_ohlc_path('AMD') == 'data/ohlc/iexcloud/AMD.csv' assert finder.get_ohlc_path( 'TSLA', 'polygon') == 'data/ohlc/polygon/TSLA.csv' def test_get_intraday_path(self): assert finder.get_intraday_path( 'aapl', '2020-01-01' ) == 'data/intraday/iexcloud/AAPL/2020-01-01.csv' assert finder.get_intraday_path( 'AMD', '2020-01-01' ) == 'data/intraday/iexcloud/AMD/2020-01-01.csv' assert finder.get_intraday_path( 'TSLA', '2020-01-01', 'polygon' ) == 'data/intraday/polygon/TSLA/2020-01-01.csv' def test_get_all_paths(self): paths = set(finder.get_all_paths('src', False)) assert 'src/DataSource.py' in paths paths = set(finder.get_all_paths('.', True)) assert 'test/test_Constants.py' in paths
#!/usr/bin/python def outlierCleaner(predictions, ages, net_worths): """ Clean away the 10% of points that have the largest residual errors (difference between the prediction and the actual net worth). Return a list of tuples named cleaned_data where each tuple is of the form (age, net_worth, error). """ from math import floor cleaned_data = [] diffs = [] for key in range(len(predictions)): diffs.append((abs(predictions[key]-net_worths[key])[0],key)) def getKey(item): return item[0] diffs = sorted(diffs, key=getKey) for i in range(0,int(len(diffs)*0.90)): key = diffs[i][1] cleaned_data.append((ages[key][0],net_worths[key][0],diffs[key][0])) ### your code goes here return cleaned_data
import numpy as np import tensorflow as tf import ast import os from tensorflow.python import pywrap_tensorflow from matplotlib import pyplot from matplotlib.pyplot import imshow import image_utils import model import ops import argparse import sys num_styles = 32 imgWidth = 512 imgHeight = 512 channel = 3 checkpoint = "/Users/Jiao/Desktop/TFProject/style-image/checkpoint/multistyle-pastiche-generator-varied.ckpt" newCkp = "/Users/Jiao/Desktop/TFProject/style-image/checkpoint/multistyle-pastiche-generator-varied.ckpt-1" # inputImage = np.expand_dims(image_utils.load_np_image(os.path.expanduser("/Users/Jiao/Desktop/TFProject/prisma/data/content.jpg")),0) inputImage = tf.placeholder(tf.float32,shape=[None,imgWidth,imgHeight,channel],name="input") styles = tf.placeholder(tf.float32,shape=[num_styles],name="style") def _style_mixture(which_styles, num_styles): """Returns a 1-D array mapping style indexes to weights.""" mixture = np.zeros([num_styles], dtype=np.float32) for index in which_styles: mixture[index] = which_styles[index] return mixture def print_tensors_in_checkpoint_file(file_name, tensor_name, all_tensors): """Prints tensors in a checkpoint file. If no `tensor_name` is provided, prints the tensor names and shapes in the checkpoint file. If `tensor_name` is provided, prints the content of the tensor. Args: file_name: Name of the checkpoint file. tensor_name: Name of the tensor in the checkpoint file to print. all_tensors: Boolean indicating whether to print all tensors. """ try: reader = pywrap_tensorflow.NewCheckpointReader(file_name) if all_tensors: var_to_shape_map = reader.get_variable_to_shape_map() for key in var_to_shape_map: print("tensor_name: ", key) tensor = reader.get_tensor(key) print(tensor.shape) print(reader.get_tensor(key)) elif not tensor_name: print(reader.debug_string().decode("utf-8")) else: print("tensor_name: ", tensor_name) tensor = reader.get_tensor(tensor_name) # tf.where(tf.is_nan(tensor), tf.zeros_like(tensor), tensor).eval() print(tensor) except Exception as e: # pylint: disable=broad-except print(str(e)) if "corrupted compressed block contents" in str(e): print("It's likely that your checkpoint file has been compressed " "with SNAPPY.") if ("Data loss" in str(e) and (any([e in file_name for e in [".index", ".meta", ".data"]]))): proposed_file = ".".join(file_name.split(".")[0:-1]) v2_file_error_template = """ It's likely that this is a V2 checkpoint and you need to provide the filename *prefix*. Try removing the '.' and extension. Try: inspect checkpoint --file_name = {}""" print(v2_file_error_template.format(proposed_file)) with tf.name_scope(""): # mixture = _style_mixture({18: 1.0}, num_styles) transform = model.transform(inputImage, normalizer_fn=ops.weighted_instance_norm, normalizer_params={ # 'weights': tf.constant(mixture), 'weights' : styles, 'num_categories': num_styles, 'center': True, 'scale': True}) model_saver = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: # for node in sess.graph.as_graph_def().node: # print node # print_tensors_in_checkpoint_file(newCkp,tensor_name="transformer/contract/conv1/weights",all_tensors=True) # tf.train.write_graph(sess.graph_def, "/Users/Jiao/Desktop/TFProject/style-image/protobuf", "input.pb") checkpoint = os.path.expanduser(newCkp) if tf.gfile.IsDirectory(checkpoint): checkpoint = tf.train.latest_checkpoint(checkpoint) tf.logging.info('loading latest checkpoint file: {}'.format(checkpoint)) model_saver.restore(sess, checkpoint) reader = pywrap_tensorflow.NewCheckpointReader(checkpoint) var_to_shape_map = reader.get_variable_to_shape_map() for key in var_to_shape_map: W = sess.graph.as_graph_element(key+":0") if (len(W.shape) == 4): P = tf.transpose(W, perm=[3, 0, 1, 2]) Y = tf.where(tf.is_nan(P), tf.zeros(P.get_shape()), P).eval() name = key.replace("/", "_") print name,Y Y.tofile("/Users/Jiao/Desktop/TFProject/style-image/parameters/" + name) # Y = tf.constant(0.25,shape=W.get_shape()).eval() X = tf.where(tf.is_nan(W), tf.zeros(W.get_shape()), W).eval() W = tf.assign(W,X).eval() # name = key.replace("/", "_") # W.tofile("/Users/Jiao/Desktop/TFProject/style-image/parameters/" + name) # W = tf.assign(W, tf.zeros(W.get_shape())).eval() # W = sess.graph.get_tensor_by_name("transformer/contract/conv1/weights:0") newstyle = np.zeros([num_styles], dtype=np.float32) newstyle[31] = 1 newImage = np.expand_dims(image_utils.load_np_image(os.path.expanduser("/Users/Jiao/Desktop/IMG_0898.JPG")),0) # newImage = np.zeros((1,imgWidth,imgHeight,channel)) # newImage = tf.constant(255,shape=[1,imgWidth,imgHeight,channel]).eval() style_image = transform.eval(feed_dict={inputImage:newImage,styles:newstyle}) # style_image = output.eval(feed_dict={inputImage:newImage}) # style_image = style_image[0] # print(style_image) # imshow(style_image) # pyplot.show() # model_saver.save(sess, newCkp)
from introductoryproblems.increasing_array import * class TestIncreasingArray: def test_case_1(self): assert solve([3, 2, 5, 1, 7]) == 5
import unittest # Add the parent directory to the path for the import statement import os import sys import inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0, parentdir) # Now do the import from translator import french_to_english, english_to_french class TestFrenchToEnglish(unittest.TestCase): def test_french_to_english(self): english_text = french_to_english('Bonjour') self.assertEqual(english_text, "Hello") def test_french_to_english_null(self): english_text = french_to_english(None) self.assertNotEqual(english_text, "Hello") class TestEnglishToFrench(unittest.TestCase): def test_english_to_french(self): french_text = english_to_french('Hello') self.assertEqual(french_text, "Bonjour") def test_english_to_french_null(self): french_text = english_to_french(None) self.assertNotEqual(french_text, "Bonjour") unittest.main()
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os from tempfile import TemporaryDirectory from typing import Any, Optional, Union import unicodecsv as csv from airflow.models import BaseOperator from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook from airflow.providers.oracle.hooks.oracle import OracleHook class OracleToAzureDataLakeOperator(BaseOperator): """ Moves data from Oracle to Azure Data Lake. The operator runs the query against Oracle and stores the file locally before loading it into Azure Data Lake. :param filename: file name to be used by the csv file. :type filename: str :param azure_data_lake_conn_id: destination azure data lake connection. :type azure_data_lake_conn_id: str :param azure_data_lake_path: destination path in azure data lake to put the file. :type azure_data_lake_path: str :param oracle_conn_id: :ref:`Source Oracle connection <howto/connection:oracle>`. :type oracle_conn_id: str :param sql: SQL query to execute against the Oracle database. (templated) :type sql: str :param sql_params: Parameters to use in sql query. (templated) :type sql_params: Optional[dict] :param delimiter: field delimiter in the file. :type delimiter: str :param encoding: encoding type for the file. :type encoding: str :param quotechar: Character to use in quoting. :type quotechar: str :param quoting: Quoting strategy. See unicodecsv quoting for more information. :type quoting: str """ template_fields = ('filename', 'sql', 'sql_params') template_fields_renderers = {"sql_params": "py"} ui_color = '#e08c8c' def __init__( self, *, filename: str, azure_data_lake_conn_id: str, azure_data_lake_path: str, oracle_conn_id: str, sql: str, sql_params: Optional[dict] = None, delimiter: str = ",", encoding: str = "utf-8", quotechar: str = '"', quoting: str = csv.QUOTE_MINIMAL, **kwargs, ) -> None: super().__init__(**kwargs) if sql_params is None: sql_params = {} self.filename = filename self.oracle_conn_id = oracle_conn_id self.sql = sql self.sql_params = sql_params self.azure_data_lake_conn_id = azure_data_lake_conn_id self.azure_data_lake_path = azure_data_lake_path self.delimiter = delimiter self.encoding = encoding self.quotechar = quotechar self.quoting = quoting def _write_temp_file(self, cursor: Any, path_to_save: Union[str, bytes, int]) -> None: with open(path_to_save, 'wb') as csvfile: csv_writer = csv.writer( csvfile, delimiter=self.delimiter, encoding=self.encoding, quotechar=self.quotechar, quoting=self.quoting, ) csv_writer.writerow(map(lambda field: field[0], cursor.description)) csv_writer.writerows(cursor) csvfile.flush() def execute(self, context: dict) -> None: oracle_hook = OracleHook(oracle_conn_id=self.oracle_conn_id) azure_data_lake_hook = AzureDataLakeHook(azure_data_lake_conn_id=self.azure_data_lake_conn_id) self.log.info("Dumping Oracle query results to local file") conn = oracle_hook.get_conn() cursor = conn.cursor() # type: ignore[attr-defined] cursor.execute(self.sql, self.sql_params) with TemporaryDirectory(prefix='airflow_oracle_to_azure_op_') as temp: self._write_temp_file(cursor, os.path.join(temp, self.filename)) self.log.info("Uploading local file to Azure Data Lake") azure_data_lake_hook.upload_file( os.path.join(temp, self.filename), os.path.join(self.azure_data_lake_path, self.filename) ) cursor.close() conn.close() # type: ignore[attr-defined]
#!/usr/bin/env python # -*- encoding: utf-8 -*- """ This script is a template for a workbench script. """ from kagura.getlogger import logging from kagura.safetynet import safetynet from kagura.utils import start_end_log from kagura.getarg import get_args from kagura import processqueue @start_end_log def main(): args = get_args() processqueue.listen(args) if args.call_function: f = globals()[args.call_function] f() if args.cross_validation: do_cross_validation() if args.submit: make_submission() if args.ensemble: ensemble() if __name__ == "__main__": safetynet(main)
#!/usr/bin/python # -*- coding: utf-8 -*- """ Custom recipe for Multisheet Export to Existing Excel Template """ import logging from pathvalidate import ValidationError, validate_filename import dataiku from dataiku.customrecipe import get_input_names_for_role from dataiku.customrecipe import get_output_names_for_role from dataiku.customrecipe import get_recipe_config from cache_utils import CustomTmpFile from xlsx_writer import dataframes_to_xlsx_template logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO, format='Multi-Sheet Excel Export to Existing Excel Template | %(levelname)s - %(message)s') ### Get User Input Values ### # read in input datasets input_datasets_ids = get_input_names_for_role('input_dataset2') if len(input_datasets_ids) == 0: logger.warning("Received no input datasets ids. input_datasets_ids={}".format(input_datasets_ids)) # make a list of input datasetsbook.defined_names[named_range] input_datasets_names = [name.split('.')[-1] for name in input_datasets_ids] if len(input_datasets_names) == 0: logger.warning("Received no input datasets names. input_datasets_ids={}, input_datasets_names={}".format( input_datasets_ids, input_datasets_names)) # get input folder containing template input_folder_with_template_id = get_input_names_for_role('input_folder_with_template') input_folder_with_template_name = input_folder_with_template_id[0] input_folder_template = dataiku.Folder(input_folder_with_template_name) # get path of excel template in folder excel_template_name = input_folder_template.list_paths_in_partition()[0][1:] # retrieve the output folder_id output_folder_id = get_output_names_for_role('folder') logger.info("Retrieved the following folder ids: {}".format(output_folder_id)) output_folder_name = output_folder_id[0] logger.info("Received the following output folder name: {}".format(output_folder_name)) output_folder = dataiku.Folder(output_folder_name) # set up output file and folder name input_config = get_recipe_config() workbook_name = input_config.get('output_workbook_name', None) if workbook_name is None: logger.warning("Received input received recipe config: {}".format(input_config)) raise ValueError('Could not read the workbook name.') output_file_name = '{}.xlsx'.format(workbook_name) try: validate_filename(output_file_name) except ValidationError as e: raise ValueError(f"{e}\n") # set up named range mapping mapping = input_config.get('mapping') for dataset, named_range in mapping.items(): if dataset in input_datasets_names: continue else: logger.warning("Received these input received recipe config parameters: {}".format(input_config)) raise ValueError('The dataset, called {}, mapped to the Named Range {}, does not exist'.format(dataset, mapping[dataset])) ### Start Work ### # Create Temporary file tmp_file_helper = CustomTmpFile() tmp_file_path = tmp_file_helper.get_temporary_cache_file(output_file_name) logger.info("Intend to write the output xls file to the following location: {}".format(tmp_file_path)) # Save template in temp path with input_folder_template.get_download_stream(excel_template_name) as stream: data = stream.read() stream.close() with open(tmp_file_path, "wb") as binary_file: # Write bytes to file binary_file.write(data) # Iterate through the input datasets, and insert into appropriate sheet and location in template (stored in temp file path) dataframes_to_xlsx_template(input_datasets_names, mapping, tmp_file_path, lambda name: dataiku.Dataset(name).get_dataframe()) # Save file to output folder with open(tmp_file_path, 'rb', encoding=None) as f: output_folder.upload_stream(output_file_name, f) tmp_file_helper.destroy_cache() logger.info("Ended recipe processing.")
from django.core.exceptions import ValidationError def username_validator(username): # check if username starts with '@' if username[0] != '@': raise ValidationError('This field should start with \'@\'.')