content
stringlengths 5
1.05M
|
|---|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Qgis(CMakePackage):
"""QGIS is a free and open-source cross-platform desktop geographic
information system application that supports viewing, editing, and
analysis of geospatial data.
"""
homepage = "https://qgis.org"
url = "https://qgis.org/downloads/qgis-3.8.1.tar.bz2"
maintainers = ['adamjstewart', 'Sinan81']
version('3.12.1', sha256='a7dc7af768b8960c08ce72a06c1f4ca4664f4197ce29c7fe238429e48b2881a8')
version('3.12.0', sha256='19e9c185dfe88cad7ee6e0dcf5ab7b0bbfe1672307868a53bf771e0c8f9d5e9c')
# Prefer latest long term release
version('3.10.4', sha256='a032e2b8144c2fd825bc26766f586cfb1bd8574bc72efd1aa8ce18dfff8b6c9f', preferred=True)
version('3.10.3', sha256='0869704df9120dd642996ff1ed50213ac8247650aa0640b62f8c9c581c05d7a7')
version('3.10.2', sha256='381cb01a8ac2f5379a915b124e9c830d727d2c67775ec49609c7153fe765a6f7')
version('3.10.1', sha256='466ac9fad91f266cf3b9d148f58e2adebd5b9fcfc03e6730eb72251e6c34c8ab')
version('3.10.0', sha256='25eb1c41d9fb922ffa337a720dfdceee43cf2d38409923f087c2010c9742f012')
version('3.8.3', sha256='3cca3e8483bc158cb8e972eb819a55a5734ba70f2c7da28ebc485864aafb17bd')
version('3.8.2', sha256='4d682f7625465a5b3596b3f7e83eddad86a60384fead9c81a6870704baffaddd')
version('3.8.1', sha256='d65c8e1c7471bba46f5017f261ebbef81dffb5843a24f0e7713a00f70785ea99')
version('3.4.15', sha256='81c93b72adbea41bd765294c0cdb09476a632d8b3f90101abc409ca9ea7fb04d')
version('3.4.14', sha256='e138716c7ea84011d3b28fb9c75e6a79322fb66f532246393571906a595d7261')
variant('3d', default=False, description='Build QGIS 3D library')
variant('analysis', default=True, description='Build QGIS analysis library')
variant('apidoc', default=False, description='Build QGIS API doxygen documentation')
variant('astyle', default=False, description='Contribute QGIS with astyle')
variant('bindings', default=True, description='Build Python bindings')
variant('clang_tidy', default=False, description='Use Clang tidy')
variant('core', default=True, description='Build QGIS Core')
variant('custom_widgets', default=False, description='Build QGIS custom widgets for Qt Designer')
variant('desktop', default=True, description='Build QGIS desktop')
variant('georeferencer', default=True, description='Build GeoReferencer plugin')
variant('globe', default=False, description='Build Globe plugin')
variant('grass7', default=False, description='Build with GRASS providers and plugin')
variant('gui', default=True, description='Build QGIS GUI library and everything built on top of it')
variant('internal_mdal', default=True, description='Build with MDAl support')
variant('internal_o2', default=True, description='Download and locally include source of o2 library')
variant('oauth2_plugin', default=True, description='Build OAuth2 authentication method plugin')
variant('oracle', default=False, description='Build with Oracle support')
variant('postgresql', default=True, description='Build with PostreSQL support')
variant('py_compile', default=False, description='Byte compile Python modules in staged or installed locations')
variant('qsciapi', default=True, description='Generate PyQGIS QScintilla2 API')
variant('qspatialite', default=False, description='Build QSpatialite sql driver')
variant('qt5serialport', default=True, description='Try Qt5SerialPort for GPS positioning')
variant('qtmobility', default=False, description='Build QtMobility related code')
variant('qtwebkit', default=False, description='Enable QtWebkit Support')
variant('quick', default=False, description='Build QGIS Quick library')
variant('qwtpolar', default=False, description='Build QwtPolar')
variant('server', default=False, description='Build QGIS server')
variant('staged_plugins', default=True, description='Stage-install core Python plugins to run from build directory')
variant('thread_local', default=True, description='Use std::thread_local')
variant('txt2tags', default=False, description='Generate PDF for txt2tags documentation')
# Ref. for dependencies:
# http://htmlpreview.github.io/?https://raw.github.com/qgis/QGIS/master/doc/INSTALL.html
# https://github.com/qgis/QGIS/blob/master/INSTALL
depends_on('exiv2')
depends_on('expat@1.95:')
depends_on('gdal@2.1.0: +python', type=('build', 'link', 'run'))
depends_on('geos@3.4.0:')
depends_on('libspatialindex')
depends_on('libspatialite@4.2.0:')
depends_on('libzip')
depends_on('proj@4.4.0:')
depends_on('py-psycopg2', type=('build', 'run')) # TODO: is build dependency necessary?
depends_on('py-pyqt4', when='@2')
depends_on('py-pyqt5@5.3:', when='@3')
depends_on('py-requests', type=('build', 'run')) # TODO: is build dependency necessary?
depends_on('python@2.7:2.8', type=('build', 'run'), when='@2')
depends_on('python@3.0.0:', type=('build', 'run'), when='@3')
depends_on('qca@2.2.1')
depends_on('qjson')
depends_on('qscintilla +python')
depends_on('qt+dbus')
depends_on('qtkeychain@0.5:', when='@3:')
depends_on('qwt@5:')
depends_on('qwtpolar')
depends_on('sqlite@3.0.0: +column_metadata')
# Runtime python dependencies, not mentioned in install instructions
depends_on('py-pyyaml', type='run')
depends_on('py-owslib', type='run')
depends_on('py-jinja2', type='run')
depends_on('py-pygments', type='run')
# optionals
depends_on('postgresql@8:', when='+postgresql') # for PostGIS support
depends_on('gsl', when='+georeferencer') # for georeferencer
depends_on('grass@7.0.0', type=('build', 'link', 'run'), when='+grass7') # for georeferencer
# the below dependencies are shown in cmake config
depends_on('hdf5')
depends_on('netcdf-c')
# build
depends_on('cmake@3.0.0:', type='build')
depends_on('flex@2.5.6:', type='build')
depends_on('bison@2.4:', type='build')
depends_on('pkgconfig', type='build')
# Take care of conflicts using depends_on
depends_on('proj@5:', when='@3.8.2:')
depends_on('qt@5.9.0:5.12.99', when='@3.8')
depends_on('qt@5.9.0:', when='@3.10.0:')
depends_on('qtkeychain@:1.5.99', when='^qt@4')
depends_on('qt@:4', when='@2')
patch('pyqt5.patch', when='^qt@5')
def cmake_args(self):
spec = self.spec
args = []
# qtwebkit module was removed from qt as of version 5.6
# needs to be compiled as a separate package
args.extend([
'-DUSE_OPENCL=OFF',
# cmake couldn't determine the following paths
'-DEXPAT_LIBRARY={0}'.format(self.spec['expat'].libs),
'-DPOSTGRESQL_PREFIX={0}'.format(
self.spec['postgresql'].prefix),
'-DQSCINTILLA_INCLUDE_DIR=' +
self.spec['qscintilla'].prefix.include,
'-DQSCINTILLA_LIBRARY=' + self.spec['qscintilla'].prefix +
'/lib/libqscintilla2_qt5.so',
'-DLIBZIP_INCLUDE_DIR=' +
self.spec['libzip'].prefix.include,
'-DLIBZIP_CONF_INCLUDE_DIR=' +
self.spec['libzip'].prefix.lib.libzip.include,
'-DGDAL_CONFIG_PREFER_PATH=' +
self.spec['gdal'].prefix.bin,
'-DGEOS_CONFIG_PREFER_PATH=' +
self.spec['geos'].prefix.bin,
'-DGSL_CONFIG_PREFER_PATH=' + self.spec['gsl'].prefix.bin,
'-DPOSTGRES_CONFIG_PREFER_PATH=' +
self.spec['postgresql'].prefix.bin
])
args.extend([
'-DWITH_3D={0}'.format(
'TRUE' if '+3d' in spec else 'FALSE'),
'-DWITH_ANALYSIS={0}'.format(
'TRUE' if '+analysis' in spec else 'FALSE'),
'-DWITH_APIDOC={0}'.format(
'TRUE' if '+apidoc' in spec else 'FALSE'),
'-DWITH_ASTYLE={0}'.format(
'TRUE' if '+astyle' in spec else 'FALSE'),
'-DWITH_BINDINGS={0}'.format(
'TRUE' if '+bindings' in spec else 'FALSE'),
'-DWITH_CLANG_TIDY={0}'.format(
'TRUE' if '+clang_tidy' in spec else 'FALSE'),
'-DWITH_CORE={0}'.format(
'TRUE' if '+core' in spec else 'FALSE'),
'-DWITH_CUSTOM_WIDGETS={0}'.format(
'TRUE' if '+custom_widgets' in spec else 'FALSE'),
'-DWITH_DESKTOP={0}'.format(
'TRUE' if '+desktop' in spec else 'FALSE'),
'-DWITH_GEOREFERENCER={0}'.format(
'TRUE' if '+georeferencer' in spec else 'FALSE'),
'-DWITH_GLOBE={0}'.format(
'TRUE' if '+globe' in spec else 'FALSE'),
'-DWITH_GUI={0}'.format(
'TRUE' if '+gui' in spec else 'FALSE'),
'-DWITH_INTERNAL_MDAL={0}'.format(
'TRUE' if '+internal_mdal' in spec else 'FALSE'),
'-DWITH_INTERNAL_O2={0}'.format(
'ON' if '+internal_o2' in spec else 'OFF'),
'-DWITH_OAUTH2_PLUGIN={0}'.format(
'TRUE' if '+oauth2_plugin' in spec else 'FALSE'),
'-DWITH_ORACLE={0}'.format(
'TRUE' if '+oracle' in spec else 'FALSE'),
'-DWITH_POSTGRESQL={0}'.format(
'TRUE' if '+postgresql' in spec else 'FALSE'),
'-DWITH_PY_COMPILE={0}'.format(
'TRUE' if '+py_compile' in spec else 'FALSE'),
'-DWITH_QSCIAPI={0}'.format(
'TRUE' if '+qsciapi' in spec else 'FALSE'),
'-DWITH_QSPATIALITE={0}'.format(
'ON' if '+qspatialite' in spec else 'OFF'),
'-DWITH_QT5SERIALPORT={0}'.format(
'TRUE' if '+qt5serialport' in spec else 'FALSE'),
'-DWITH_QTMOBILITY={0}'.format(
'TRUE' if '+qtmobility' in spec else 'FALSE'),
'-DWITH_QTWEBKIT={0}'.format(
'ON' if '+qtwebkit' in spec else 'OFF'),
'-DWITH_QUICK={0}'.format(
'TRUE' if '+quick' in spec else 'FALSE'),
'-DWITH_QWTPOLAR={0}'.format(
'TRUE' if '+qwtpolar' in spec else 'FALSE'),
'-DWITH_SERVER={0}'.format(
'TRUE' if '+server' in spec else 'FALSE'),
'-DWITH_STAGED_PLUGINS={0}'.format(
'TRUE' if '+staged_plugins' in spec else 'FALSE'),
'-DWITH_THREAD_LOCAL={0}'.format(
'TRUE' if '+thread_local' in spec else 'FALSE'),
'-DWITH_TXT2TAGS_PDF={0}'.format(
'TRUE' if '+txt2tags_pdf' in spec else 'FALSE'),
])
if '+grass7' in self.spec:
args.extend([
'-DWITH_GRASS7=ON',
'-DGRASS_PREFIX7={0}'.format(self.spec['grass'].prefix),
'-DGRASS_INCLUDE_DIR7={0}'.format(
self.spec['grass'].prefix.include)
])
else:
args.append('-DWITH_GRASS7=OFF')
return args
|
from django.apps import AppConfig
class DancesConfig(AppConfig):
name = 'dances'
|
from tcontrol.plot_utility import plot_pzmap
from tcontrol.transferfunction import TransferFunction
from tcontrol.lti import LinearTimeInvariant as LTI
__all__ = ["pzmap"]
def pzmap(sys_, title='pole-zero map', *, plot=True):
"""
Use:
Draw the pole-zero map
Example:
>>> import tcontrol as tc
>>> system = tc.tf([1], [1, 1, 0, 3])
>>> tc.pzmap(system)
>>> tc.plot.show()
:param sys_: the transfer function of the system
:type sys_: SISO
:param title: the title of the pzmap
:type title: str
:param plot: if plot is true it will draw the picture
:type plot: bool
:return: the poles and zeros of the system
:rtype: (numpy.ndarray, numpy.ndarray)
"""
if isinstance(sys_, LTI):
if not isinstance(sys_, TransferFunction):
raise NotImplementedError('pzmap currently only for TransferFunction')
else:
raise TypeError('sys_ should be LTI or sub class of LTI')
zero = sys_.zero()
pole = sys_.pole()
if plot:
plot_pzmap(pole, sys_, title, zero)
return pole, zero
|
#!/usr/bin/env python3
import json
import yaml
import os
import sys
import logging
import logging.config
import time
import datetime
import base64
import multiprocessing
import hashlib
from kubernetes import client, config as k8s_config
from kubernetes.client.rest import ApiException
from kubernetes.stream import stream
from kubernetes.stream.ws_client import ERROR_CHANNEL, STDERR_CHANNEL, STDOUT_CHANNEL
sys.path.append("../utils")
from cluster_manager import record
from config import config
from pod_template import RegularJobTemplate, DistributeJobTemplate, InferenceJobTemplate
from job import Job, JobSchema
from DataHandler import DataHandler
import k8sUtils
import framework
logger = logging.getLogger(__name__)
def walk_json_field_safe(obj, *fields):
""" for example a=[{"a": {"b": 2}}]
walk_json_field_safe(a, 0, "a", "b") will get 2
walk_json_field_safe(a, 0, "not_exist") will get None
"""
try:
for f in fields:
obj = obj[f]
return obj
except:
return None
def b64encode(str_val):
return base64.b64encode(str_val.encode("utf-8")).decode("utf-8")
def b64decode(str_val):
return base64.b64decode(str_val.encode("utf-8")).decode("utf-8")
class JobRole(object):
MARK_ROLE_READY_FILE = "/dlts-runtime/status/READY"
def __init__(self, launcher, role_name, pod_name, pod):
self.launcher = launcher
self.role_name = role_name
self.pod_name = pod_name
self.pod = pod
# will query api server if refresh is True
def status(self, refresh=False):
"""
Return role status in ["NotFound", "Pending", "Running", "Succeeded", "Failed", "Unknown"]
It's slightly different from pod phase, when pod is running:
CONTAINER_READY -> WORKER_READY -> JOB_READY (then the job finally in "Running" status.)
"""
# pod-phase: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase
# node condition: https://kubernetes.io/docs/concepts/architecture/nodes/#condition
if refresh:
pods = self.launcher.get_pods(
field_selector="metadata.name={}".format(self.pod_name))
logger.debug("Pods: {}".format(pods))
if len(pods) < 1:
return "NotFound"
assert (len(pods) == 1)
self.pod = pods[0]
phase = self.pod.status.phase
# !!! Pod is running, doesn't mean "Role" is ready and running.
if phase == "Running":
# Found that phase won't turn into "Unkonwn" even when we get 'unknown' from kubectl
if self.pod.status.reason == "NodeLost":
return "Unknown"
# Starting from v1.13, TaintBasedEvictions are enabled by default. NodeLost no longer
# exists. Use deletionTimstamp to signal node lost.
# See below for details:
# https://github.com/kubernetes/kubernetes/issues/72226
# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
if self.pod.metadata.deletion_timestamp is not None:
logger.warning(
"pod %s has deletion_timestamp %s. Marking pod as Unknown."
% (self.pod_name, self.pod.metadata.deletion_timestamp))
return "Unknown"
# Check if the user command had been ran.
if not self._is_role_ready():
return "Pending"
return phase
def pod_restricted_details(self):
detail = {
"node_name": self.pod.spec.node_name,
"host_ip": self.pod.status.host_ip,
"pod_ip": self.pod.status.pod_ip
}
return detail
def pod_details(self):
return self.pod
def _is_file_exist(self, file):
status_code, _ = self.launcher.pod_exec(
self.pod_name, ["/bin/sh", "-c", "ls -lrt {}".format(file)])
return status_code == 0
def _is_role_ready(self):
for container in self.pod.spec.containers:
if container.name == self.pod_name and container.readiness_probe is not None:
for status in self.pod.status.container_statuses:
if status.name == self.pod_name:
logger.debug("pod %s have readiness_probe result",
self.pod_name)
return status.ready
# no readiness_probe defined, fallback to old way
return self._is_file_exist(JobRole.MARK_ROLE_READY_FILE)
def get_job_status_detail(job):
if "jobStatusDetail" not in job:
return None
job_status_detail = job["jobStatusDetail"]
if job_status_detail is None:
return job_status_detail
if not isinstance(job_status_detail, list):
job_status_detail = b64decode(job_status_detail)
job_status_detail = json.loads(job_status_detail)
return job_status_detail
def job_status_detail_with_finished_time(job_status_detail, status, msg=""):
# This method is called when a job succeeds/fails/is killed/has an error
# job_status_detail must be None or a list
if (job_status_detail is not None) and (not isinstance(
job_status_detail, list)):
return job_status_detail
# Force adding an item for empty detail
if (job_status_detail is None) or (len(job_status_detail) == 0):
job_status_detail = [{}]
finished_at = k8sUtils.localize_time(datetime.datetime.now())
new_job_status_detail = []
status_change_message = "{} at {}. {}".format(status, finished_at, msg)
# add finishedAt for all pods if absent
for pod_status_detail in job_status_detail:
# Mark started time the same as finished time for a fast finishing job
if "startedAt" not in pod_status_detail:
pod_status_detail["startedAt"] = finished_at
if "finishedAt" not in pod_status_detail:
pod_status_detail["finishedAt"] = finished_at
if "message" not in pod_status_detail:
pod_status_detail["message"] = status_change_message
else:
pod_status_detail["message"] += "\n" + status_change_message
new_job_status_detail.append(pod_status_detail)
return new_job_status_detail
def get_pod_priority_class(is_support_pod_priority, job_type, is_preemptible):
if not is_support_pod_priority:
return None
if job_type == "InferenceJob":
return "inference-job-priority"
if is_preemptible:
return "preemptible-job-priority"
else:
return "job-priority"
# Interface class for managing life time of job
class Launcher(object):
def __init__(self):
k8s_config.load_kube_config()
self.k8s_CoreAPI = client.CoreV1Api()
self.k8s_AppsAPI = client.AppsV1Api()
self.k8s_custom_obj_api = client.CustomObjectsApi()
self.namespace = "default"
self.pretty = "pretty_example"
@record
def _create_pod(self, body):
api_response = self.k8s_CoreAPI.create_namespaced_pod(
namespace=self.namespace,
body=body,
pretty=self.pretty,
)
return api_response
@record
def _cleanup_pods_with_labels(self, label_selector):
errors = []
try:
self.k8s_CoreAPI.delete_collection_namespaced_pod(
self.namespace,
pretty=self.pretty,
label_selector=label_selector,
)
except ApiException as e:
message = "Delete pods failed: {}".format(label_selector)
logger.warning(message, exc_info=True)
errors.append({"message": message, "exception": e})
return errors
@record
def _cleanup_configmap(self, label_selector):
errors = []
try:
api_response = self.k8s_CoreAPI.delete_collection_namespaced_config_map(
self.namespace,
pretty=self.pretty,
label_selector=label_selector,
)
except ApiException as e:
message = "Delete configmap failed: {}".format(label_selector)
logger.warning(message, exc_info=True)
errors.append({"message": message, "exception": e})
return errors
@record
def _cleanup_pods_with_labels(self, label_selector):
errors = []
try:
self.k8s_CoreAPI.delete_collection_namespaced_pod(
self.namespace,
pretty=self.pretty,
label_selector=label_selector,
)
except ApiException as e:
message = "Delete pods failed: {}".format(label_selector)
logger.warning(message, exc_info=True)
errors.append({"message": message, "exception": e})
return errors
@record
def _cleanup_configmap(self, label_selector):
errors = []
try:
api_response = self.k8s_CoreAPI.delete_collection_namespaced_config_map(
self.namespace,
pretty=self.pretty,
label_selector=label_selector,
)
except ApiException as e:
message = "Delete configmap failed: {}".format(label_selector)
logger.warning(message, exc_info=True)
errors.append({"message": message, "exception": e})
return errors
@record
def _create_deployment(self, body):
api_response = self.k8s_AppsAPI.create_namespaced_deployment(
namespace=self.namespace,
body=body,
pretty=self.pretty,
)
return api_response
@record
def _delete_deployment(self, name, grace_period_seconds=None):
body = client.V1DeleteOptions()
body.grace_period_seconds = grace_period_seconds
api_response = self.k8s_AppsAPI.delete_namespaced_deployment(
name=name,
namespace=self.namespace,
pretty=self.pretty,
body=body,
grace_period_seconds=grace_period_seconds,
)
return api_response
@record
def _get_deployment(self, name):
api_response = self.k8s_AppsAPI.read_namespaced_deployment_scale(
namespace=self.namespace, pretty=self.pretty, name=name)
logger.debug("Get pods: {}".format(api_response))
return api_response
@record
def _patch_deployment(self, name, body):
api_response = self.k8s_AppsAPI.patch_namespaced_deployment_scale(
namespace=self.namespace, pretty=self.pretty, name=name, body=body)
return api_response
@record
def _create_service(self, body):
api_response = self.k8s_CoreAPI.create_namespaced_service(
namespace=self.namespace,
body=body,
pretty=self.pretty,
)
return api_response
@record
def _delete_service(self, name):
api_response = self.k8s_CoreAPI.delete_namespaced_service(
name=name,
namespace=self.namespace,
pretty=self.pretty,
body=client.V1DeleteOptions(),
)
return api_response
@record
def _create_secret(self, body):
api_response = self.k8s_CoreAPI.create_namespaced_secret(
namespace=self.namespace,
body=body,
pretty=self.pretty,
)
return api_response
@record
def _delete_secret(self, name, grace_period_seconds=None):
body = client.V1DeleteOptions()
body.grace_period_seconds = grace_period_seconds
api_response = self.k8s_CoreAPI.delete_namespaced_secret(
name=name,
namespace=self.namespace,
pretty=self.pretty,
body=body,
grace_period_seconds=grace_period_seconds)
return api_response
@record
def _delete_pod(self, name, grace_period_seconds=None):
body = client.V1DeleteOptions()
body.grace_period_seconds = grace_period_seconds
api_response = self.k8s_CoreAPI.delete_namespaced_pod(
name=name,
namespace=self.namespace,
pretty=self.pretty,
body=body,
grace_period_seconds=grace_period_seconds,
)
return api_response
@record
def _cleanup_pods(self, pod_names, force=False):
errors = []
grace_period_seconds = 0 if force else None
for pod_name in pod_names:
try:
self._delete_pod(pod_name, grace_period_seconds)
except Exception as e:
if isinstance(e, ApiException) and 404 == e.status:
return []
message = "Delete pod failed: {}".format(pod_name)
logger.warning(message, exc_info=True)
errors.append({"message": message, "exception": e})
return errors
@record
def _cleanup_services(self, services):
errors = []
for service in services:
assert (isinstance(service, client.V1Service))
try:
service_name = service.metadata.name
self._delete_service(service_name)
except ApiException as e:
message = "Delete service failed: {}".format(service_name)
logger.warning(message, exc_info=True)
errors.append({"message": message, "exception": e})
return errors
@record
def _cleanup_deployment(self, deployment_names, force=False):
errors = []
grace_period_seconds = 0 if force else None
for deployment_name in deployment_names:
try:
self._delete_deployment(deployment_name, grace_period_seconds)
except Exception as e:
if isinstance(e, ApiException) and 404 == e.status:
return []
message = "Delete pod failed: {}".format(deployment_name)
logger.warning(message, exc_info=True)
errors.append({"message": message, "exception": e})
return errors
@record
def _cleanup_secrets(self, secret_names, force=False):
errors = []
grace_period_seconds = 0 if force else None
for secret_name in secret_names:
try:
self._delete_secret(secret_name, grace_period_seconds)
except Exception as e:
if isinstance(e, ApiException) and 404 == e.status:
return []
message = "Deleting secret failed: {}".format(secret_name)
logger.warning(message, exc_info=True)
errors.append({"message": message, "exception": e})
return errors
@record
def _cleanup_secrets_with_labels(self, label_selector):
errors = []
try:
self.k8s_CoreAPI.delete_collection_namespaced_secret(
self.namespace,
pretty=self.pretty,
label_selector=label_selector)
except ApiException as e:
message = "Delete secrets failed: {}".format(label_selector)
logging.warning(message, exc_info=True)
errors.append({"message": message, "exception": e})
return errors
@record
def create_secrets(self, secrets):
# Clean up secrets first
secret_names = [
secret["metadata"]["name"]
for secret in secrets
if secret["kind"] == "Secret"
]
logger.debug("Trying to delete secrets %s" % secret_names)
self._cleanup_secrets(secret_names)
created = []
for secret in secrets:
created_secret = self._create_secret(secret)
created.append(created_secret)
logger.debug("Creating secret succeeded: %s" %
created_secret.metadata.name)
return created
@record
def get_pods(self, field_selector="", label_selector=""):
api_response = self.k8s_CoreAPI.list_namespaced_pod(
namespace=self.namespace,
pretty=self.pretty,
field_selector=field_selector,
label_selector=label_selector,
)
logger.debug("Get pods: {}".format(api_response))
return api_response.items
@record
def _get_deployments(self, field_selector="", label_selector=""):
api_response = self.k8s_AppsAPI.list_namespaced_deployment(
namespace=self.namespace,
pretty=self.pretty,
field_selector=field_selector,
label_selector=label_selector,
)
logger.debug("Get pods: {}".format(api_response))
return api_response.items
@record
def _get_services_by_label(self, label_selector):
api_response = self.k8s_CoreAPI.list_namespaced_service(
namespace=self.namespace,
pretty=self.pretty,
label_selector=label_selector,
)
return api_response.items
@record
def get_secrets(self, field_selector="", label_selector=""):
api_response = self.k8s_CoreAPI.list_namespaced_secret(
namespace=self.namespace,
pretty=self.pretty,
field_selector=field_selector,
label_selector=label_selector,
)
logger.debug("Get secrets: {}".format(api_response))
return api_response.items
@record
def pod_exec(self, pod_name, exec_command, timeout=60):
"""work as the command (with timeout): kubectl exec 'pod_name' 'exec_command'"""
try:
logger.debug("Exec on pod {}: {}".format(pod_name, exec_command))
client = stream(
self.k8s_CoreAPI.connect_get_namespaced_pod_exec,
name=pod_name,
namespace=self.namespace,
command=exec_command,
stderr=True,
stdin=False,
stdout=True,
tty=False,
_preload_content=False,
)
client.run_forever(timeout=timeout)
err = yaml.full_load(client.read_channel(ERROR_CHANNEL))
if err is None:
return [-1, "Timeout"]
if err["status"] == "Success":
status_code = 0
else:
logger.debug("Exec on pod {} failed. cmd: {}, err: {}.".format(
pod_name, exec_command, err))
status_code = int(err["details"]["causes"][0]["message"])
output = client.read_all()
logger.debug(
"Exec on pod {}, status: {}, cmd: {}, output: {}".format(
pod_name, status_code, exec_command, output))
return [status_code, output]
except ApiException as err:
logger.error("Exec on pod {} error. cmd: {}, err: {}.".format(
pod_name, exec_command, err),
exc_info=True)
return [-1, err.message]
class LauncherStub(Launcher):
def __init__(self):
super(LauncherStub, self).__init__()
def start(self):
pass
def wait_tasks_done(self):
pass
def transform_state(self, framework_state, completion_status):
# https://github.com/microsoft/frameworkcontroller/blob/master/pkg/apis/frameworkcontroller/v1/types.go#L441
if framework_state in {
"AttemptCreationPending", "AttemptCreationRequested",
"AttemptPreparing"
}:
return "Pending"
elif framework_state == "AttemptRunning":
return "Running"
elif framework_state in {
"AttemptDeletionPending", "AttemptDeletionRequested",
"AttemptDeleting"
}:
return "Deleting"
elif framework_state in {"AttemptCompleted", "Completed"}:
if completion_status is None:
logger.warning(
"framework_state is %s, but completion_status still not posted, assume running"
)
return "Running"
result = walk_json_field_safe(completion_status, "type", "name")
if result is None:
logger.warning("unknown completion_status %s, assuming Running",
completion_status)
return result or "Running"
else:
logger.error("unknown framework_state %s, completion_status %s",
framework_state, completion_status)
return "Unknown"
def get_job_status(self, job_id):
framework_obj = self._get_framework(framework.transform_name(job_id))
state = walk_json_field_safe(framework_obj, "status", "state")
completion_status = walk_json_field_safe(framework_obj, "status",
"attemptStatus",
"completionStatus")
result = self.transform_state(state, completion_status)
diagnostics = walk_json_field_safe(completion_status, "diagnostics")
return result, [], diagnostics
@record
def _create_framework(self, body):
resp = self.k8s_custom_obj_api.create_namespaced_custom_object(
"frameworkcontroller.microsoft.com",
"v1",
self.namespace,
"frameworks",
body,
pretty=self.pretty,
)
return resp
@record
def _get_framework(self, framework_name):
return self.k8s_custom_obj_api.get_namespaced_custom_object(
"frameworkcontroller.microsoft.com",
"v1",
self.namespace,
"frameworks",
framework_name,
)
@record
def _delete_framework(self, name, grace_period_seconds=None):
body = client.V1DeleteOptions()
resp = self.k8s_custom_obj_api.delete_namespaced_custom_object(
"frameworkcontroller.microsoft.com",
"v1",
self.namespace,
"frameworks",
framework.transform_name(name),
body,
grace_period_seconds=grace_period_seconds,
)
return resp
@record
def _cleanup_framework(self, framework_name, force=False):
errors = []
grace_period_seconds = 0 if force else None
try:
self._delete_framework(framework_name, grace_period_seconds)
except Exception as e:
if isinstance(e, ApiException) and 404 == e.status:
return []
message = "Delete framework failed: {}".format(framework_name)
logger.exception(message)
errors.append({"message": message, "exception": e})
return errors
def submit_job(self, job):
# check if existing any pod with label: run=job_id
assert ("jobId" in job)
job_id = job["jobId"]
pods = self.get_pods(label_selector="run={}".format(job_id))
if len(pods) > 0:
logger.warning(
"Waiting until previously pods are cleaned up! Job %s", job_id)
errors = self.delete_job(job_id, force=True)
if errors:
logger.warning("Force delete job %s: %s", job_id, errors)
return
ret = {}
dataHandler = DataHandler()
try:
# TODO refine later
# before resubmit the job, reset the endpoints
# update all endpoint to status 'pending', so it would restart when job is ready
endpoints = dataHandler.GetJobEndpoints(job_id)
for endpoint_id, endpoint in list(endpoints.items()):
endpoint["status"] = "pending"
logger.debug("Reset endpoint status to 'pending': %s",
endpoint_id)
dataHandler.UpdateEndpoint(endpoint)
job["cluster"] = config
job_object, errors = JobSchema().load(job)
job_object.params = json.loads(b64decode(job["jobParams"]))
# inject gid, uid and user
# TODO it should return only one entry
user_info = dataHandler.GetIdentityInfo(
job_object.params["userName"])[0]
job_object.params["gid"] = user_info["gid"]
job_object.params["uid"] = user_info["uid"]
job_object.params["user"] = job_object.get_alias()
job_object.params["private_key"] = user_info["private_key"]
job_object.params["ssh_public_keys"] = job_object.params.get(
"ssh_public_keys", [])
job_object.params["ssh_public_keys"].append(user_info["public_key"])
if "job_token" not in job_object.params:
if "master_token" in config and config[
"master_token"] is not None and "userName" in job_object.params:
plain_token = job_object.params["userName"] + \
":" + config["master_token"]
job_object.params["job_token"] = hashlib.md5(
plain_token.encode("utf-8")).hexdigest()
else:
job_object.params["job_token"] = "tryme2017"
if "envs" not in job_object.params:
job_object.params["envs"] = []
job_object.params["envs"].append({
"name": "DLTS_JOB_TOKEN",
"value": job_object.params["job_token"]
})
blobfuse_secret_template = job_object.get_blobfuse_secret_template()
image_pull_secret_template = job_object.get_image_pull_secret_template(
)
secret_templates = {
"blobfuse": blobfuse_secret_template,
"imagePull": image_pull_secret_template
}
if job_object.params["jobtrainingtype"] == "RegularJob":
pod_template = RegularJobTemplate(
job_object.get_template(),
secret_templates=secret_templates)
elif job_object.params["jobtrainingtype"] == "PSDistJob":
pod_template = DistributeJobTemplate(
job_object.get_template(),
secret_templates=secret_templates)
elif job_object.params["jobtrainingtype"] == "InferenceJob":
pod_template = InferenceJobTemplate(
job_object.get_template(),
deployment_template=job_object.get_deployment_template(),
secret_templates=secret_templates)
else:
dataHandler.SetJobError(
job_object.job_id, "ERROR: invalid jobtrainingtype: %s" %
job_object.params["jobtrainingtype"])
dataHandler.Close()
return False
params, error = pod_template.generate_params(job_object)
if error:
logger.error("failed to generate params for %s job %s",
job_object.params["jobtrainingtype"], error)
return False
job_object.params["priority_class"] = get_pod_priority_class(
config.get("is_support_pod_priority", False),
job_object.params["jobtrainingtype"],
job_object.params.get("preemptionAllowed", False))
framework_desc = framework.transform_job(
job_object.params["jobtrainingtype"], params, config)
job_description = yaml.dump(framework_desc)
secrets = pod_template.generate_secrets(job_object)
try:
secrets = self.create_secrets(secrets)
ret["output"] = "Created secrets: {}. ".format(
[secret.metadata.name for secret in secrets])
created_pods = self._create_framework(framework_desc)
logger.info("created_pods is %s, type is %s", created_pods,
type(created_pods))
ret["output"] += "Created framework: {}".format(
framework_desc["metadata"]["name"])
except Exception as e:
ret["output"] = "Error: %s" % e.message
logger.exception(e)
return False
ret["jobId"] = job_object.job_id
jobMeta = {}
jobMeta["jobPath"] = job_object.job_path
jobMeta["workPath"] = job_object.work_path
# the command of the first container
jobMeta["LaunchCMD"] = job_object.params["cmd"]
jobMetaStr = b64encode(json.dumps(jobMeta))
dataFields = {
"jobStatus": "scheduling",
"jobDescription": b64encode(job_description),
"lastUpdated": datetime.datetime.now().isoformat(),
"jobMeta": jobMetaStr
}
conditionFields = {"jobId": job_object.job_id}
dataHandler.UpdateJobTextFields(conditionFields, dataFields)
except Exception as e:
logger.error("Submit job failed: %s" % job, exc_info=True)
ret["error"] = str(e)
retries = dataHandler.AddandGetJobRetries(job["jobId"])
if retries >= 5:
detail = get_job_status_detail(job)
detail = job_status_detail_with_finished_time(
detail, "error", "Server error in job submission")
dataFields = {
"jobStatus": "error",
"errorMsg": "Cannot submit job!" + str(e),
"jobStatusDetail": b64encode(json.dumps(detail))
}
conditionFields = {"jobId": job["jobId"]}
dataHandler.UpdateJobTextFields(conditionFields, dataFields)
# Try to clean up the job
try:
self.delete_job(job_id, force=True)
logger.info(
"Cleaned up job %s succeeded after %d retries of job submission"
% (job["jobId"], retries))
except:
logger.warning(
"Cleaning up job %s failed after %d retries of job submission"
% (job["jobId"], retries))
return False
dataHandler.Close()
return ret
def delete_job(self, job_id, force=False):
framework_name = framework.transform_name(job_id)
logger.debug("deleting framework %s", framework_name)
framework_errors = self._cleanup_framework(framework_name, force=force)
label_selector = "run={}".format(job_id)
configmap_errors = self._cleanup_configmap(label_selector)
errors = framework_errors + configmap_errors
return errors
def kill_job(self, job_id, desired_state="killed"):
dataHandler = DataHandler()
result, detail = k8sUtils.GetJobStatus(job_id)
detail = job_status_detail_with_finished_time(detail, desired_state)
dataHandler.UpdateJobTextFields(
{"jobId": job_id},
{"jobStatusDetail": b64encode(json.dumps(detail))})
logger.info("Killing job %s, with status %s, %s" %
(job_id, result, detail))
errors = self.delete_job(job_id, force=True)
dataFields = {
"jobStatusDetail": b64encode(json.dumps(detail)),
"lastUpdated": datetime.datetime.now().isoformat()
}
conditionFields = {"jobId": job_id}
if len(errors) == 0:
dataFields["jobStatus"] = desired_state
dataHandler.UpdateJobTextFields(conditionFields, dataFields)
dataHandler.Close()
return True
else:
dataFields["jobStatus"] = "error"
dataHandler.UpdateJobTextFields(conditionFields, dataFields)
dataHandler.Close()
logger.error("Kill job failed with errors: {}".format(errors))
return False
def scale_job(self, job):
pass
class PythonLauncher(Launcher):
def __init__(self, pool_size=3):
super(PythonLauncher, self).__init__()
self.processes = []
self.queue = None
self.pool_size = pool_size
# items in queue should be tuple of 3 elements: (function name, args, kwargs)
def start(self):
if len(self.processes) == 0:
self.queue = multiprocessing.JoinableQueue()
for i in range(self.pool_size):
p = multiprocessing.Process(target=self.run,
args=(self.queue,),
name="py-launcher-" + str(i))
self.processes.append(p)
p.start()
def get_job_status(self, job_id):
job_roles = self.get_job_roles(job_id)
if len(job_roles) < 1:
return "NotFound", [], ""
# role status in ["NotFound", "Pending", "Running", "Succeeded", "Failed", "Unknown"]
# TODO ??? when ps/master role "Succeeded", return Succeeded
for job_role in job_roles:
if job_role.role_name not in ["master", "ps"]:
continue
if job_role.status() == "Succeeded":
logger.debug("Job: {}, Succeeded!".format(job_id))
return "Succeeded", [], ""
statuses = [job_role.status() for job_role in job_roles]
logger.info("Job: {}, status: {}".format(job_id, statuses))
details = []
for job_role in job_roles:
details.append(job_role.pod_details().to_dict())
logger.debug("Job {}, details: {}".format(job_id, details))
restricted_details = [
job_role.pod_restricted_details() for job_role in job_roles
]
logger.info("Job: {}, restricted details: {}".format(
job_id, restricted_details))
job_status = "Running"
if "Failed" in statuses:
job_status = "Failed"
elif "Unknown" in statuses:
job_status = "Unknown"
elif "NotFound" in statuses:
job_status = "NotFound"
elif "Pending" in statuses:
job_status = "Pending"
return job_status, details, "" # refine the last value to provide diagnostics for python launcher
def wait_tasks_done(self):
self.queue.join()
@record
def create_pods(self, pods):
# TODO instead of delete, we could check update existiong ones. During refactoring, keeping the old way.
pod_names = [
pod["metadata"]["name"] for pod in pods if pod["kind"] == "Pod"
]
self._cleanup_pods(pod_names)
deployment_names = [
pod["metadata"]["name"]
for pod in pods
if pod["kind"] == "Deployment"
]
self._cleanup_deployment(deployment_names)
created = []
for pod in pods:
if pod["kind"] == "Pod":
created_pod = self._create_pod(pod)
elif pod["kind"] == "Deployment":
created_pod = self._create_deployment(pod)
else:
logger.error("unknown kind %s, with body %s", pod["kind"], pod)
created.append(created_pod)
logger.debug("Create pod succeed: %s" % created_pod.metadata.name)
return created
@record
def delete_job(self, job_id, force=False):
label_selector = "run={}".format(job_id)
# query pods then delete
pod_errors = self._cleanup_pods_with_labels(label_selector)
logger.debug("deleting pods %s" % label_selector)
# query services then delete
services = self._get_services_by_label(label_selector)
service_errors = self._cleanup_services(services)
deployments = self._get_deployments(label_selector=label_selector)
deployment_names = [
deployment.metadata.name for deployment in deployments
]
deployment_errors = self._cleanup_deployment(deployment_names, force)
logger.debug("deleting deployments %s" % ",".join(deployment_names))
# query and delete secrets
secrets = self.get_secrets(label_selector=label_selector)
secret_names = [secret.metadata.name for secret in secrets]
secret_errors = self._cleanup_secrets_with_labels(label_selector)
logger.debug("deleting secrets for %s" % label_selector)
configmap_errors = self._cleanup_configmap(label_selector)
errors = pod_errors + service_errors + deployment_errors + secret_errors + \
configmap_errors
return errors
def get_job_roles(self, job_id):
pods = self.get_pods(label_selector="run={}".format(job_id))
job_roles = []
for pod in pods:
pod_name = pod.metadata.name
if "distRole" in pod.metadata.labels:
role = pod.metadata.labels["distRole"]
else:
role = "master"
job_role = JobRole(self, role, pod_name, pod)
job_roles.append(job_role)
return job_roles
def _all_pods_not_existing(self, job_id):
job_roles = self.get_job_roles(job_id)
statuses = [job_role.status() for job_role in job_roles]
logger.debug("Job: {}, status: {}".format(job_id, statuses))
return all([status == "NotFound" for status in statuses])
def submit_job(self, job):
self.queue.put(("submit_job", (job,), {}))
def submit_job_impl(self, job):
# check if existing any pod with label: run=job_id
assert ("jobId" in job)
job_id = job["jobId"]
if not self._all_pods_not_existing(job_id):
logger.warning(
"Waiting until previously pods are cleaned up! Job {}".format(
job_id))
errors = self.delete_job(job_id, force=True)
if errors:
logger.warning("Force delete job {}: {}".format(job_id, errors))
return
ret = {}
dataHandler = DataHandler()
try:
# TODO refine later
# before resubmit the job, reset the endpoints
# update all endpoint to status 'pending', so it would restart when job is ready
endpoints = dataHandler.GetJobEndpoints(job_id)
for endpoint_id, endpoint in list(endpoints.items()):
endpoint["status"] = "pending"
logger.debug("Reset endpoint status to 'pending': {}".format(
endpoint_id))
dataHandler.UpdateEndpoint(endpoint)
job["cluster"] = config
job_object, errors = JobSchema().load(job)
# TODO assert job_object is a Job
assert isinstance(
job_object,
Job), "job_object is not of Job, but " + str(type(job_object))
job_object.params = json.loads(b64decode(job["jobParams"]))
# inject gid, uid and user
# TODO it should return only one entry
user_info = dataHandler.GetIdentityInfo(
job_object.params["userName"])[0]
job_object.params["gid"] = user_info["gid"]
job_object.params["uid"] = user_info["uid"]
job_object.params["user"] = job_object.get_alias()
job_object.params["private_key"] = user_info["private_key"]
job_object.params["ssh_public_keys"] = job_object.params.get(
"ssh_public_keys", [])
job_object.params["ssh_public_keys"].append(user_info["public_key"])
if "job_token" not in job_object.params:
if "master_token" in config and config[
"master_token"] is not None and "userName" in job_object.params:
plain_token = job_object.params["userName"] + \
":" + config["master_token"]
job_object.params["job_token"] = hashlib.md5(
plain_token.encode("utf-8")).hexdigest()
else:
job_object.params["job_token"] = "tryme2017"
if "envs" not in job_object.params:
job_object.params["envs"] = []
job_object.params["envs"].append({
"name": "DLTS_JOB_TOKEN",
"value": job_object.params["job_token"]
})
blobfuse_secret_template = job_object.get_blobfuse_secret_template()
image_pull_secret_template = job_object.get_image_pull_secret_template(
)
secret_templates = {
"blobfuse": blobfuse_secret_template,
"imagePull": image_pull_secret_template
}
if job_object.params["jobtrainingtype"] == "RegularJob":
pod_template = RegularJobTemplate(
job_object.get_template(),
secret_templates=secret_templates)
elif job_object.params["jobtrainingtype"] == "PSDistJob":
pod_template = DistributeJobTemplate(
job_object.get_template(),
secret_templates=secret_templates)
elif job_object.params["jobtrainingtype"] == "InferenceJob":
pod_template = InferenceJobTemplate(
job_object.get_template(),
deployment_template=job_object.get_deployment_template(),
secret_templates=secret_templates)
else:
dataHandler.SetJobError(
job_object.job_id, "ERROR: invalid jobtrainingtype: %s" %
job_object.params["jobtrainingtype"])
dataHandler.Close()
return False
job_object.params["priority_class"] = get_pod_priority_class(
config.get("is_support_pod_priority", False),
job_object.params["jobtrainingtype"],
job_object.params.get("preemptionAllowed", False))
pods, error = pod_template.generate_pods(job_object)
if error:
dataHandler.SetJobError(job_object.job_id, "ERROR: %s" % error)
dataHandler.Close()
return False
job_description = "\n---\n".join([yaml.dump(pod) for pod in pods])
secrets = pod_template.generate_secrets(job_object)
try:
secrets = self.create_secrets(secrets)
ret["output"] = "Created secrets: {}. ".format(
[secret.metadata.name for secret in secrets])
created_pods = self.create_pods(pods)
ret["output"] += "Created pods: {}".format(
[pod.metadata.name for pod in created_pods])
except Exception as e:
ret["output"] = "Error: %s" % e.message
logger.exception(e)
ret["jobId"] = job_object.job_id
jobMeta = {}
jobMeta["jobPath"] = job_object.job_path
jobMeta["workPath"] = job_object.work_path
# the command of the first container
jobMeta["LaunchCMD"] = job_object.params["cmd"]
jobMetaStr = b64encode(json.dumps(jobMeta))
dataFields = {
"jobStatus": "scheduling",
"jobDescription": b64encode(job_description),
"lastUpdated": datetime.datetime.now().isoformat(),
"jobMeta": jobMetaStr
}
conditionFields = {"jobId": job_object.job_id}
dataHandler.UpdateJobTextFields(conditionFields, dataFields)
except Exception as e:
logger.error("Submit job failed: %s" % job, exc_info=True)
ret["error"] = str(e)
retries = dataHandler.AddandGetJobRetries(job["jobId"])
if retries >= 5:
detail = get_job_status_detail(job)
detail = job_status_detail_with_finished_time(
detail, "error", "Server error in job submission")
dataFields = {
"jobStatus": "error",
"errorMsg": "Cannot submit job!" + str(e),
"jobStatusDetail": b64encode(json.dumps(detail))
}
conditionFields = {"jobId": job["jobId"]}
dataHandler.UpdateJobTextFields(conditionFields, dataFields)
# Try to clean up the job
try:
self.delete_job(job_id, force=True)
logger.info(
"Cleaning up job %s succeeded after %d retries of job submission"
% (job["jobId"], retries))
except:
logger.warning(
"Cleaning up job %s failed after %d retries of job submission"
% (job["jobId"], retries))
dataHandler.Close()
return ret
def kill_job(self, job_id, desired_state="killed"):
self.queue.put(("kill_job", (job_id,), {
"desired_state": desired_state
}))
def kill_job_impl(self, job_id, desired_state="killed"):
with DataHandler() as dataHandler:
result, detail = k8sUtils.GetJobStatus(job_id)
detail = job_status_detail_with_finished_time(detail, desired_state)
dataHandler.UpdateJobTextFields(
{"jobId": job_id},
{"jobStatusDetail": b64encode(json.dumps(detail))})
logger.info("Killing job %s, with status %s, %s" %
(job_id, result, detail))
errors = self.delete_job(job_id, force=True)
dataFields = {
"jobStatusDetail": b64encode(json.dumps(detail)),
"lastUpdated": datetime.datetime.now().isoformat()
}
conditionFields = {"jobId": job_id}
if len(errors) == 0:
dataFields["jobStatus"] = desired_state
dataHandler.UpdateJobTextFields(conditionFields, dataFields)
return True
else:
dataFields["jobStatus"] = "error"
dataHandler.UpdateJobTextFields(conditionFields, dataFields)
logger.error("Kill job failed with errors: {}".format(errors))
return False
def scale_job(self, job):
assert ("jobId" in job)
job["cluster"] = config
job_object, errors = JobSchema().load(job)
job_object.params = json.loads(b64decode(job["jobParams"]))
if job_object.params["jobtrainingtype"] != "InferenceJob":
return
name = job_object.job_id + "-deployment"
deployment = self._get_deployment(name=name)
replicas = deployment.spec.replicas
new_replicas = int(job_object.params["resourcegpu"])
if replicas == new_replicas:
return
deployment.spec.replicas = new_replicas
self._patch_deployment(name=name, body=deployment)
logger.debug("Scale inference job %s from %d to %d." %
(job_object.job_id, replicas, new_replicas))
def run(self, queue):
# TODO maintain a data_handler so do not need to init it every time
while True:
func_name, args, kwargs = queue.get(True)
try:
if func_name == "submit_job":
self.submit_job_impl(*args, **kwargs)
elif func_name == "kill_job":
self.kill_job_impl(*args, **kwargs)
else:
logger.error("unknown func_name %s, with args %s %s",
func_name, args, kwargs)
except Exception:
logger.exception("processing job failed")
finally:
queue.task_done()
|
import struct, time, serial
class Serial:
HDLC_FLAG_BYTE = 0x7e
HDLC_CTLESC_BYTE = 0x7d
TOS_SERIAL_ACTIVE_MESSAGE_ID = 0
TOS_SERIAL_CC1000_ID = 1
TOS_SERIAL_802_15_4_ID = 2
TOS_SERIAL_UNKNOWN_ID = 255
SERIAL_PROTO_ACK = 67
SERIAL_PROTO_PACKET_ACK = 68
SERIAL_PROTO_PACKET_NOACK = 69
SERIAL_PROTO_PACKET_UNKNOWN = 255
__s = None; # An instance of serial.Serial object
__debug = True # Debug mode
def __init__(self, port, baudrate):
self.__s = serial.Serial(port, baudrate, rtscts=0)
def __format_packet(self, packet):
return " ".join(["%02x" % p for p in packet]) + " | " + \
" ".join(["%d" % p for p in packet])
def crc16(self, base_crc, frame_data):
crc = base_crc
for b in frame_data:
crc = crc ^ (b << 8)
for i in range(0, 8):
if crc & 0x8000 == 0x8000:
crc = (crc << 1) ^ 0x1021
else:
crc = crc << 1
crc = crc & 0xffff
return crc
def __encode(self, val, dim):
output = []
for i in range(dim):
output.append(val & 0xFF)
val = val >> 8
return output
def __decode(self, v):
r = long(0)
for i in v[::-1]:
r = (r << 8) + i
return r
def __get_byte(self):
r = struct.unpack("B", self.__s.read())[0]
return r
def __put_bytes(self, data):
for b in data:
self.__s.write(struct.pack('B', b))
def __unescape(self, packet):
r = []
esc = False
for b in packet:
if esc:
r.append(b ^ 0x20)
esc = False
elif b == self.HDLC_CTLESC_BYTE:
esc = True
else:
r.append(b)
return r
def __escape(self, packet):
r = []
for b in packet:
if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:
r.append(self.HDLC_CTLESC_BYTE)
r.append(b ^ 0x20)
else:
r.append(b)
return r
def read_packet(self):
d = self.__get_byte()
ts = time.time()
while d != self.HDLC_FLAG_BYTE:
d = self.__get_byte()
ts = time.time()
packet = [d]
d = self.__get_byte()
if d == self.HDLC_FLAG_BYTE:
d = self.__get_byte()
ts = time.time()
else:
packet.append(d)
while d != self.HDLC_FLAG_BYTE:
d = self.__get_byte()
packet.append(d)
un_packet = self.__unescape(packet)
crc = self.crc16(0, un_packet[1:-3])
packet_crc = self.__decode(un_packet[-3:-1])
if crc != packet_crc:
print "Warning: wrong CRC!"
if self.__debug == True:
print "Recv:", self.__format_packet(un_packet)
return (ts, un_packet)
def write_packet(self, am_group, am_id, data):
# The first byte after SERIAL_PROTO_PACKET_ACK is a sequence
# number that will be send back by the mote to ack the receive of
# the data.
packet = [self.SERIAL_PROTO_PACKET_ACK, 0, self.TOS_SERIAL_ACTIVE_MESSAGE_ID,
0xff, 0xff,
0, 0,
len(data), am_group, am_id] + data;
crc = self.crc16(0, packet)
packet.append(crc & 0xff)
packet.append((crc >> 8) & 0xff)
packet = [self.HDLC_FLAG_BYTE] + self.__escape(packet) + [self.HDLC_FLAG_BYTE]
if self.__debug == True:
print "Send:", self.__format_packet(packet)
self.__put_bytes(packet)
# Waiting for ACK
packet = self.read_packet()
if len(packet) > 1 and len(packet[1]) > 1:
return ((packet[1])[1] == self.SERIAL_PROTO_ACK)
return False
def set_debug(self, debug):
self.__debug = debug
class GenericPacket:
""" GenericPacket """
def __decode(self, v):
r = long(0)
for i in v:
r = (r << 8) + i
return r
def __encode(self, val, dim):
output = []
for i in range(dim):
output.append(int(val & 0xFF))
val = val >> 8
output.reverse()
return output
def __init__(self, desc, packet = None):
self.__dict__['_schema'] = [(t, s) for (n, t, s) in desc]
self.__dict__['_names'] = [n for (n, t, s) in desc]
self.__dict__['_values'] = []
offset = 10
if type(packet) == type([]):
for (t, s) in self._schema:
if t == 'int':
self._values.append(self.__decode(packet[offset:offset + s]))
offset += s
elif t == 'blob':
if s:
self._values.append(packet[offset:offset + s])
offset += s
else:
self._values.append(packet[offset:-3])
elif type(packet) == type(()):
for i in packet:
self._values.append(i)
else:
for v in self._schema:
self._values.append(None)
def __repr__(self):
return self._values.__repr__()
def __str__(self):
return self._values.__str__()
# Implement the map behavior
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __len__(self):
return len(self._values)
def keys(self):
return self._names
def values(self):
return self._names
# Implement the struct behavior
def __getattr__(self, name):
if type(name) == type(0):
return self._names[name]
else:
return self._values[self._names.index(name)]
def __setattr__(self, name, value):
if type(name) == type(0):
self._values[name] = value
else:
self._values[self._names.index(name)] = value
# Custom
def names(self):
return self._names
def sizes(self):
return self._schema
def payload(self):
r = []
for i in range(len(self._schema)):
(t, s) = self._schema[i]
if t == 'int':
r += self.__encode(self._values[i], s)
else:
r += self._values[i]
return r
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tree_node import TreeNode
class BinarySearchTree(object):
def __init__(self):
self._root = None;
##################
## Iterator method
def __iter__(self):
current = self._find_minmum(self._root)
# and then, until we have reached the end:
while current is not None:
yield current
# in order to get from one Node to the next one:
current = self.successor(current)
def _replace_with(self, old_node, new_node):
if not old_node:
return False
if old_node.parent:
if old_node.parent.left == old_node:
old_node.parent.set_children(left=new_node)
else:
old_node.parent.set_children(right=new_node)
else:
if new_node:
new_node.parent = None
self._root = new_node
return True
def insert(self, k, payload=None):
# tree is empty construct the tree
if not self._root:
self._root= TreeNode(k,payload)
else:
self._insert(self._root, k, payload)
def _insert(self, tree_node, k, payload=None):
if not tree_node:
return TreeNode(k, payload)
if k < tree_node.key:
tree_node.set_children(left=self._insert(tree_node.left, k, payload))
elif k > tree_node.key:
tree_node.set_children(right=self._insert(tree_node.right, k, payload))
else:
tree_node.payload = payload
return tree_node
def remove_node(self, node):
if None:
return
node.key = node.payload = node.left = node.right = node.parent = None
del node
def delete(self, k):
node = self.search(k)
if not node:
return
p = node.parent
if node.left and node.right:
# if the node has two children, we replace the node's key and payload
# with minnum of the right substree
min_on_right = self._find_minmum(node.right)
min_parent = min_on_right.parent
node.key = min_on_right.key
node.payload = min_on_right.payload
if min_on_right != node.right:
#update min right child, make it become min's parent's left child
min_parent.set_children(left=min_on_right.right)
else:
node.set_children(right=min_on_right.right)
self.remove_node(min_on_right)
else:
# if the node has 0-1 child, we delete this node
old_node = node
if not node.left and not node.right:
# no child
node = None
elif node.left:
# has one left child
node.left.parent = p
node = node.left
elif node.right:
# has one right child
node.right.parent = p
node = node.right
if not p:
#trying to delete root node
self._root = node
else:
if p.left == old_node:
p.left = node
else:
p.right = node
self.remove_node(old_node)
def find_minnum(self):
return self._find_minmum(self._root)
def _find_minmum(self, node):
if not node:
return None
while node.left:
node = node.left
return node
def find_maxmum(self):
return self._find_maxmum(self._root)
def _find_maxmum(self, node):
if not node:
return None
while node.right:
node = node.right
return node
def traverse(self):
return self._traverse(self._root)
# Python 2 version
def _traverse(self, node):
if node:
if node.left:
for n in self._traverse(node.left):
yield n
yield node
if node.right:
for n in self._traverse(node.right):
yield n
# Python 3 version
# def _traverse(self, node):
# if node:
# yield from self._traverse(node.left)
# yield node
# yield from self._traverse(node.right)
def successor(self, node):
if not node:
return None
if node.right:
return self._find_minmum(node.right)
p = node.parent
while p and p.right == node:
node = p
p = p.parent
return p
def predecessor(self, node):
if not node:
return None
if node.left:
return self._find_maxmum(node.left)
p = node.parent
while p and p.left == node:
node = p
p = p.parent
return p
def height(self):
pass
def search(self, k):
return self._search(self._root, k)
def _search(self, node, k):
if not node:
return None
if k == node.key:
return node
if k < node.key:
return self._search(node.left, k)
else:
return self._search(node.right, k)
def count():
pass
if __name__ == "__main__":
t = BinarySearchTree()
# t.insert(3)
# t.insert(8)
# t.insert(12)
# t.insert(1)
# t.insert(15)
# t.insert(7)
data = [30, 25, 49, 35, 68, 33, 34, 38, 40, 37, 36]
for i in data:
t.insert(i)
for v in t.traverse():
print v.key
d = t._find_maxmum(t._root)
while d:
print d.key
d = t.successor(d)
|
# This file is a part of:
#‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
# ███▄▄▄▄ ▀█████████▄ ▄████████ ███ ▄██████▄ ▄██████▄ ▄█
# ███▀▀▀██▄ ███ ███ ███ ███ ▀█████████▄ ███ ███ ███ ███ ███
# ███ ███ ███ ███ ███ █▀ ▀███▀▀██ ███ ███ ███ ███ ███
# ███ ███ ▄███▄▄▄██▀ ███ ███ ▀ ███ ███ ███ ███ ███
# ███ ███ ▀▀███▀▀▀██▄ ▀███████████ ███ ███ ███ ███ ███ ███
# ███ ███ ███ ██▄ ███ ███ ███ ███ ███ ███ ███
# ███ ███ ███ ███ ▄█ ███ ███ ███ ███ ███ ███ ███▌ ▄
# ▀█ █▀ ▄█████████▀ ▄████████▀ ▄████▀ ▀██████▀ ▀██████▀ █████▄▄██
#__________________________________________________________________________________
# NBSTool is a tool to work with .nbs (Note Block Studio) files.
# Author: IoeCmcomc (https://github.com/IoeCmcomc)
# Programming language: Python
# License: MIT license
# Source codes are hosted on: GitHub (https://github.com/IoeCmcomc/NBSTool)
#‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
import re, math, operator
def readncf(text):
groups = re.findall(r"((?:1|2|4|8|16|32)\.?)(?:(#?[a-g])([1-3])|-)", text)
notes = []
tick = 0
for tup in groups:
dur = tup[0]
if dur[-1] == '.': dur = 4 / int(dur[:-1]) * 1.5
else: dur = 4 / int(dur)
if bool(tup[1]): key = 27 + notes.index(tup[1]) + 12*(int(tup[2])-1)
else: key = None
if key is not None and dur >= 0.25: notes.append({'tick':tick, 'layer': 0, 'inst':0, 'key':key, 'isPerc': False, 'duration': dur*4})
tick += dur*4
headers = {}
headers['file_version'] = 3
headers['vani_inst'] = 16
headers['length'] = 0
headers['height'] = 1
headers['name'] = ''
headers['author'] = ''
headers['orig_author'] = ''
headers['description'] = ''
headers['tempo'] = 100
headers['auto-saving'] = False
headers['auto-saving_time'] = 10
headers['time_sign'] = 4
headers['minutes_spent'] = 0
headers['left_clicks'] = 0
headers['right_clicks'] = 0
headers['block_added'] = 0
headers['block_removed'] = 0
headers['import_name'] = ''
headers['inst_count'] = 0
layers = []
customInsts = []
usedInsts = []
a = 1
sortedNotes = sorted(notes, key = operator.itemgetter('tick', 'layer') )
return {'headers':headers, 'notes':sortedNotes, 'layers':layers, 'customInsts':customInsts, 'IsOldVersion':False, 'hasPerc':False, 'maxLayer':0, 'usedInsts':usedInsts}
def writencf(data):
tunes = ["c", "#c", "d", "#d", "e", "f", "#f", "g", "#g", "a", "#a", "b"]
print(data['notes'][0])
out = []
for note in data['notes']:
ele = ''
ext = ['']
idur = dur = note['duration'] / 4
while dur > 4 and dur - 4 >= 4:
ext.append("1-")
dur -= 4
if dur > 0:
sub = 1
c = 0
while dur >= 0.125 and sub > 0:
sub = 2**(int(math.log(dur*8, 2))-3)
subi = int(4 / sub)# if idur > 4 else sub
if c == 0:
if idur > 4:
ele = '1' + ele
else:
ele = str(subi) + ele
else:
ext.append('{}-'.format(subi))
dur -= sub
c += 1
key = note['key'] - 27
ele += '{}{}'.format(tunes[key%12], key//12 + 1) + ' '.join(ext)
out.append(ele)
out = ' '.join(out)
return out
|
import urllib.request
class Urban_Dictionary:
def __init__(self, word):
self.word = word
self.dict_base_url = 'http://www.urbandictionary.com/define.php?term='
self.base_content = "property='og:description'>"
self.counter = 0
# def convert_word(self):
# self.word = self.word[0].upper() + self.word[1:]
def complete_url(self):
complete_url = self.dict_base_url + self.word
return complete_url
def read_full_content(self):
return self.base_content + self.word
def read_string(self, complete_url, amt):
request = urllib.request.urlopen(complete_url)
string = request.read(int(amt))
return string
def convert_string(self, string):
parse_string = ' '
for letter in string:
parse_string = parse_string + chr(letter)
return parse_string
def print_def(self, parse_string, index):
end_def = parse_string.find('.', index) - 1
findex = parse_string.find("<meta content='1.", index) + 18
definition = parse_string[findex:end_def]
sentence = self.word + ' = ' + definition
return sentence
def parse_string(self):
try:
#self.convert_word()
complete_url = self.complete_url()
string = self.read_string(complete_url, 30000)
parse_string = self.convert_string(string)
print(parse_string)
# print('\n\n\n\n??????????????????????????????????????????????????')
parse_word = ' '
# full_content = self.read_full_content()
for letter in parse_string:
parse_word = parse_word + letter
if parse_word == self.base_content:
index = self.counter
if letter == '\n' or letter == ' ':
parse_word = ''
self.counter = self.counter + 1
index = index - 300
sentence = self.print_def(parse_string, index)
return sentence
except urllib.error.HTTPError as error:
sentence = "Word is misspelled"
return "Word is misspelled or doesn't exist!"
#obj = Urban_Dictionary('tilt')
#print(obj.parse_string())
|
"""Magnetic Module protocol commands."""
from .disengage import (
Disengage,
DisengageCreate,
DisengageParams,
DisengageResult,
DisengageCommandType,
)
from .engage import (
Engage,
EngageCreate,
EngageParams,
EngageResult,
EngageCommandType,
)
__all__ = [
# magneticModule/disengageMagnet
"Disengage",
"DisengageCreate",
"DisengageParams",
"DisengageResult",
"DisengageCommandType",
# magneticModule/engageMagnet
"Engage",
"EngageCreate",
"EngageParams",
"EngageResult",
"EngageCommandType",
]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Code based off https://github.com/microsoft/Oscar
# modified for MMF
# Licensed under the MIT license.
import logging
from collections import namedtuple
from dataclasses import asdict, dataclass
from typing import Any, Dict, Optional, Tuple
import torch
from mmf.common.registry import registry
from mmf.common.sample import SampleList
from mmf.models.base_model import BaseModel
from mmf.models.transformers.heads.contrastive import ThreeWayContrastive
from mmf.models.transformers.heads.mlm import MLM
from mmf.models.transformers.heads.mlp import MLP
from mmf.utils.general import retry_n
from omegaconf import MISSING, OmegaConf
from torch import nn, Tensor
from transformers.modeling_bert import (
BertConfig,
BertEmbeddings,
BertEncoder,
BertPreTrainedModel,
)
logger = logging.getLogger(__name__)
NUM_RETRIES = 6
class VinVLBase(BertPreTrainedModel):
"""VinVL Bert Encoder for image features
From https://github.com/microsoft/Oscar/blob/master/oscar/modeling/modeling_bert.py
Is a thin wrapper around BertEncoder that handles image features
"""
def __init__(self, config: BertConfig):
super().__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.img_dim = config.img_feature_dim
self.use_img_layernorm = getattr(config, "use_img_layernorm", False)
img_projection = nn.Linear(self.img_dim, self.config.hidden_size, bias=True)
img_embedding_list = [img_projection]
if self.use_img_layernorm:
img_embedding_list += [
nn.LayerNorm(config.hidden_size, eps=config.img_layer_norm_eps)
]
dropout = nn.Dropout(config.hidden_dropout_prob)
img_embedding_list += [dropout]
# is an image encoding used as input to the transformer trunk
self.img_embedding = nn.Sequential(*img_embedding_list)
def forward(
self,
input_ids: Tensor,
img_feats: Tensor,
token_type_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
) -> Tuple[Tensor]:
if attention_mask is None:
attention_mask = torch.ones(
(input_ids.size(0), input_ids.size(1) + img_feats.size(1))
).to(input_ids.device)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We can provide a self-attention mask of dimensions
# [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
# attention_mask with dim 3 is to specify a unique mask for each feature,
# it is broadcast over heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# Make the mask broadcastable to
# [batch_size, num_heads, seq_length, seq_length]
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f"Wrong shape for input_ids (shape {input_ids.shape})"
+ " or attention_mask (shape {attention_mask.shape})"
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Do embeddings
text_embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids
)
img_embedding_output = self.img_embedding(img_feats)
embedding_output = torch.cat((text_embedding_output, img_embedding_output), 1)
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
output_hidden_states=True,
)
layers = namedtuple("TransformerOutput", ["last_hidden_state", "hidden_layers"])
return layers(encoder_outputs[0], encoder_outputs[1])
def build_vinvl_base(
bert_model_name: str = "bert-base-uncased",
img_feature_dim: int = 2054,
use_img_layernorm: bool = True,
img_layer_norm_eps: float = 1e-12,
random_init: bool = True,
) -> VinVLBase:
bert_config = retry_n(
NUM_RETRIES,
BertConfig.from_pretrained,
bert_model_name,
)
# augment hf BertConfig for vinvl BertImgModel config
bert_config.img_feature_dim = img_feature_dim
bert_config.use_img_layernorm = use_img_layernorm
bert_config.img_layer_norm_eps = img_layer_norm_eps
if random_init:
bert = VinVLBase(bert_config)
else:
bert = retry_n(
NUM_RETRIES,
VinVLBase.from_pretrained,
bert_model_name,
config=bert_config,
)
return bert
class VinVLForClassification(nn.Module):
"""VINVL wrapper for classification"""
def __init__(
self,
mlp_config: Optional[Dict] = None,
loss_config: Optional[Dict] = None,
random_init: bool = False,
bert_model_name: str = "bert-base-uncased",
img_feature_dim: int = 2054,
use_img_layernorm: bool = True,
img_layer_norm_eps: float = 1e-12,
*args,
**kwargs,
):
"""VinVL model constructor for classification.
MLP head is configurable through Dict type.
Consult the MLP head class for the config options.
Args:
mlp_config (Optional[Dict], optional):
Classifier MLP head config.
Defaults to {"num_layers": 0}.
loss_config (Optional[Dict], optional):
nn.CrossEntropyLoss params dict.
Defaults to {}.
random_init (bool, optional):
Flag to load VinVL bert weights from random_init.
Defaults to False.
bert_model_name (str, optional):
Name for base bert model.
Used for VinVL base configs and weights.
Defaults to "bert-base-uncased".
img_feature_dim (int, optional):
The size of the VinVL image feature inputs.
Defaults to 2054.
use_img_layernorm (bool, optional):
Flag to use layernorm on image encoding.
Defaults to True.
img_layer_norm_eps (float, optional):
Image layernorm epsilon. Defaults to 1e-12.
"""
super().__init__()
if mlp_config is None:
mlp_config = {"num_layers": 0}
if loss_config is None:
loss_config = {}
self.bert = build_vinvl_base(
bert_model_name=bert_model_name,
img_feature_dim=img_feature_dim,
use_img_layernorm=use_img_layernorm,
img_layer_norm_eps=img_layer_norm_eps,
random_init=random_init,
)
self.classifier = MLP(config=mlp_config)
self.ce_loss = nn.CrossEntropyLoss(**loss_config)
def forward(
self,
input_ids: Tensor,
token_type_ids: Tensor,
attention_mask: Tensor,
img_feats: Tensor,
position_ids: Optional[Tensor] = None,
labels: Optional[Tensor] = None,
) -> Dict[str, Tensor]:
sequence_output = self.bert(
input_ids,
img_feats=img_feats,
position_ids=position_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
).last_hidden_state
logits = self.classifier(sequence_output)["scores"]
result = {"scores": logits}
if labels is not None:
ce_loss = self.ce_loss(logits.view(-1, logits.size(1)), labels.view(-1))
result["losses"] = {"ce": ce_loss}
return result
class VinVLForPretraining(nn.Module):
"""VINVL wrapper for pretraining
MLM loss is described in https://arxiv.org/pdf/2004.06165.pdf
Contrastive loss is an itm loss to guess,
0 for a match,
1 for a corrupt caption,
2 for corrupt image labels
VinVL trains with object detection labels concatenated with the input text.
"""
def __init__(
self,
mlm_config: Optional[MLM.Config] = None,
contrast_config: Optional[ThreeWayContrastive.Config] = None,
random_init: bool = False,
bert_model_name: str = "bert-base-uncased",
img_feature_dim: int = 2054,
use_img_layernorm: bool = True,
img_layer_norm_eps: float = 1e-12,
*args,
**kwargs,
):
"""VinVL model constructor for pretraining.
MLM and Contrastive Loss heads are configurable through Dict types.
Consult MLM and MLP head classes for their config options.
Args:
mlm_config (Optional[MLM.Config], optional):
Config object for MLM head.
Defaults to MLM.Config which uses the default MLM configs.
contrast_config (Optional[ThreeWayContrastive.Config], optional):
Config object for the 3-way contrastive head.
Defaults to ThreeWayContrastive.Config which uses a MLP with 3 classes
random_init (bool, optional):
Flag to load VinVL bert weights from random_init.
Defaults to False.
bert_model_name (str, optional):
Name for base bert model.
Used for VinVL base configs and weights.
Defaults to "bert-base-uncased".
img_feature_dim (int, optional):
The size of the VinVL image feature inputs.
Defaults to 2054.
use_img_layernorm (bool, optional):
Flag to use layernorm on image encoding.
Defaults to True.
img_layer_norm_eps (float, optional):
Image layernorm epsilon. Defaults to 1e-12.
"""
super().__init__()
if mlm_config is None:
mlm_config = asdict(MLM.Config())
if contrast_config is None:
contrast_config = asdict(ThreeWayContrastive.Config())
self.bert = build_vinvl_base(
bert_model_name=bert_model_name,
img_feature_dim=img_feature_dim,
use_img_layernorm=use_img_layernorm,
img_layer_norm_eps=img_layer_norm_eps,
random_init=random_init,
)
self.mlm_head = MLM(config=mlm_config)
self.ce_loss = nn.CrossEntropyLoss()
self.contrast_head = ThreeWayContrastive(contrast_config)
def mlm_forward(
self,
input_ids_masked: Tensor,
lm_label_ids: Tensor,
token_type_ids: Tensor,
attention_mask: Tensor,
img_feats: Tensor,
position_ids: Optional[Tensor] = None,
) -> Dict[str, Tensor]:
hidden_layers = self.bert(
input_ids_masked,
img_feats=img_feats,
position_ids=position_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
).last_hidden_state
mlm_labels = {}
mlm_labels["text"] = lm_label_ids
mlm_labels["image"] = torch.full(
img_feats.shape[:2],
fill_value=-1,
dtype=torch.long,
device=lm_label_ids.device,
)
mlm_labels["combined_labels"] = torch.cat(
[mlm_labels["text"], mlm_labels["image"]], dim=-1
)
processed_sample_list = SampleList({"mlm_labels": mlm_labels})
return self.mlm_head(
hidden_layers, processed_sample_list=processed_sample_list
)["losses"]
def contrastive_forward(
self,
input_ids: Tensor,
token_type_ids: Tensor,
attention_mask: Tensor,
img_feats: Tensor,
contrastive_labels: Tensor,
position_ids: Optional[Tensor] = None,
) -> Dict[str, Tensor]:
last_hidden_state = self.bert(
input_ids,
img_feats=img_feats,
position_ids=position_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
).last_hidden_state
processed_sample_list = SampleList({"contrastive_labels": contrastive_labels})
# contrastive 3-way loss has 3 classes,
# 0 for a match, 1, 2 for a corrupt caption/image
# labels respectively
return self.contrast_head(last_hidden_state, processed_sample_list)["losses"]
def forward(
self,
input_ids_masked: Tensor,
input_ids_corrupt: Tensor,
lm_label_ids: Tensor,
contrastive_labels: Tensor,
token_type_ids: Tensor,
attention_mask: Tensor,
token_type_ids_corrupt: Tensor,
attention_mask_corrupt: Tensor,
img_feats: Tensor,
position_ids: Optional[Tensor] = None,
) -> Dict[str, Tensor]:
mlm_result = self.mlm_forward(
input_ids_masked,
lm_label_ids,
token_type_ids,
attention_mask,
img_feats,
position_ids,
)
contrastive_loss_result = self.contrastive_forward(
input_ids_corrupt,
token_type_ids_corrupt,
attention_mask_corrupt,
img_feats,
contrastive_labels,
position_ids,
)
losses = {**mlm_result, **contrastive_loss_result}
return {"losses": losses}
@registry.register_model("vinvl")
class VinVL(BaseModel):
"""VinVL base model called by MMF.
VinVL paper, 3-way contrastive loss:
https://arxiv.org/pdf/2101.00529.pdf
Implementation based on https://github.com/microsoft/Oscar
Expects VinVL features extracted by
https://github.com/microsoft/scene_graph_benchmark
using Visual Genome object detection labels.
The label map used for training is available at
https://github.com/microsoft/scene_graph_benchmark/blob/main/README.md
"""
@dataclass
class Config:
random_init: bool = False
bert_model_name: str = "bert-base-uncased"
hidden_size: int = 768
heads: Any = MISSING
do_pretraining: bool = False
img_feature_dim: int = 2054
img_feature_type: str = "frcnn"
use_img_layernorm: bool = True
img_layer_norm_eps: float = 1e-12
max_img_seq_len: int = 70
def __init__(self, config):
super().__init__(config)
self.config = OmegaConf.create({**asdict(self.Config()), **config})
self.do_pretraining = self.config.do_pretraining
@classmethod
def config_path(cls):
return "configs/models/vinvl/defaults.yaml"
def build(self):
if self.do_pretraining:
mlm_config = self.config.heads.get("mlm")
contrast_config = self.config.heads.get("contrast")
self.vinvl = VinVLForPretraining(
mlm_config=mlm_config, contrast_config=contrast_config, **self.config
)
else:
# do classification finetuning
mlp_config = self.config.heads.get("mlp")
loss_config = self.config.get("ce_loss")
self.vinvl = VinVLForClassification(
mlp_config=mlp_config, loss_config=loss_config, **self.config
)
def init_losses(self):
"""
Defer loss management to submodels,
do nothing when called by build_model.
"""
def forward(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
attention_mask = self._get_attention_mask(
sample_list["image_feature_0"],
sample_list["image_info_0"],
sample_list["input_mask"],
)
if self.do_pretraining:
corrupt_attention_mask = self._get_attention_mask(
sample_list["image_feature_0"],
sample_list["image_info_0"],
sample_list["input_mask_corrupt"],
)
return self.vinvl(
sample_list["input_ids_masked"],
sample_list["input_ids_corrupt"],
sample_list["lm_label_ids"],
sample_list["contrastive_labels"],
sample_list["segment_ids"],
attention_mask,
sample_list["segment_ids_corrupt"],
corrupt_attention_mask,
sample_list["image_feature_0"],
)
else:
return self.vinvl(
sample_list["input_ids"],
sample_list["segment_ids"],
attention_mask,
sample_list["image_feature_0"],
labels=sample_list.get("labels"),
)
def _get_attention_mask(
self, image_feat: Tensor, image_info: Dict[str, Tensor], input_mask: Tensor
) -> Tensor:
# image_dim = (bs,)
# with the number of features per image in the batch as an int
image_dim = image_info.get("max_features")
if image_dim is None:
image_mask = torch.ones(
(image_feat.size(0), image_feat.size(1)), device=image_feat.device
).long()
else:
image_mask = torch.arange(
image_feat.size(-2), device=image_feat.device
).expand(image_feat.size()[:-1])
if len(image_dim.size()) < len(image_mask.size()):
image_dim = image_dim.unsqueeze(-1)
assert len(image_dim.size()) == len(image_mask.size())
image_mask = image_mask < image_dim
image_mask = image_mask.long()
attention_mask = torch.cat((input_mask, image_mask), dim=-1)
return attention_mask
|
dataset_list = ['csqa', 'obqa', 'socialiqa']
dataset_setting = {
'csqa': 'inhouse',
'obqa': 'official',
'socialiqa': 'official',
}
dataset_num_choice = {
'csqa': 5,
'obqa': 4,
'socialiqa': 3,
}
max_cpt_num = {
'csqa': 40,
'obqa': 40,
'socialiqa': 60,
}
dataset_no_test = ['socialiqa']
|
from .ip_proxy_tool import ip_proxy_tool,ip_info
import json
import logging
logger=logging.getLogger(__name__)
class JingDongWanXiang(ip_proxy_tool):
def __init__(self):
super(JingDongWanXiang,self).__init__()
self.url="https://way.jd.com/jisuapi/proxy?num=10&area=&areaex=&port=8080,80&portex=3306&protocol=1,2&type=1&appkey=b198438a2f0dbdce9721760bd49866d0"
self.need_to_valid=False
def parse_proxy_list(self):
objs=json.loads(self.html)
for obj in objs["result"]["result"]["list"]:
ip= obj["ip"].split(":")[0]
port=obj["ip"].split(":")[1]
scheme= obj["protocol"].lower()
alive_time=None
verify_time=None
proxy_url=scheme.lower() +"://" + ip +":" + str(port)
info=ip_info(ip,port,scheme,alive_time,verify_time,proxy_url)
if self.is_valid_proxy(info):
self.proxy_list.append(info)
|
from model import AffinityPropagation
import numpy as np
from scipy import sparse
from sklearn.model_selection import train_test_split
import pandas as pd
from alive_progress import alive_bar
def load_edges(path):
edges_list = np.loadtxt(path).tolist()
values = [max(edge[0], edge[1]) for edge in edges_list]
n = int(np.max(values)) + 1
lil = sparse.lil_matrix((n, n))
for edge in edges_list:
lil[edge[0], edge[1]] = 1
return lil.tocsr()
def load_checkins(path):
checkins_df = pd.read_csv(path, sep="\t", header=None)[[0, 4]]
checkins_df.columns = ["user_id", "location_id"]
return checkins_df
def get_users(checkins):
user_ids = checkins.user_id.to_numpy()
return np.unique(user_ids)
def metrics(ap, df_checkins):
print('Splitting dataset...')
users = get_users(df_checkins)
permutation = np.random.permutation(users.size)
users_random = users[permutation]
df_checkins = df_checkins.loc[df_checkins.user_id.isin(users_random)]
checkins_train, checkins_test = train_test_split(df_checkins, test_size=0.01, shuffle=True)
print('Train prediction...')
cluster_locations, top10_locations = ap.predict(checkins_train)
cluster_prec = 0
location_prec = 0
userr_ids = np.unique(checkins_test.user_id)
with alive_bar(len(userr_ids)) as bar:
for user_id in userr_ids:
bar()
locations_all = checkins_test.loc[checkins_test.user_id == user_id, "location_id"].values
uniq_locations = np.unique(locations_all)
if user_id in checkins_train.user_id:
train_top_locations = ap.get_top10_predict_for_user(cluster_locations, checkins_train, user_id)
uniq_top = np.unique(train_top_locations)
cluster_prec += np.intersect1d(uniq_top, uniq_locations).size
location_prec += np.intersect1d(top10_locations, uniq_locations).size
location_acc = location_prec / (10 * userr_ids.size)
cluster_acc = cluster_prec / ( 10 * userr_ids.size)
print('Location prediction accuracy =',location_acc)
print('Cluster accuracy =', cluster_acc)
return
def main():
print('Loading data...')
edges = load_edges('Dataset/edges.txt')
df_checkins = load_checkins('Dataset/totalCheckins.txt')
print('Data loaded')
ap = AfinityPropagation()
print('Starting training...')
ap.fit(edges, 3)
metrics(ap, df_checkins)
return
if __name__ == "__main__":
main()
|
"""
Copyright (c) 2016, Jose Dolz .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Jose Dolz. April, 2018.
email: jose.dolz.upv@gmail.com
LIVIA Department, ETS, Montreal.
"""
import theano
import theano.tensor as T
from theano.tensor.nnet import conv2d
import theano.tensor.nnet.conv3d2d
import pdb
import sys
import os
import numpy as np
import numpy
import random
from Modules.General.Utils import initializeWeights
from Modules.NeuralNetwork.ActivationFunctions import *
from Modules.NeuralNetwork.layerOperations import *
#################################################################
# Layer Types #
#################################################################
class HyperDenseNetConvLayer(object):
"""Convolutional Layer of the Livia network """
def __init__(self,
rng,
layerID,
inputSample_Train,
inputSample_Test,
inputToLayerShapeTrain,
inputToLayerShapeTest,
filterShape,
useBatchNorm,
numberEpochApplyRolling,
maxPoolingParameters,
weights_initMethodType,
weights,
activationType,
dropoutRate=0.0) :
self.inputTrain = None
self.inputTest = None
self.inputShapeTrain = None
self.inputShapeTest = None
self._numberOfFeatureMaps = 0
self._maxPoolingParameters = None
self._appliedBnInLayer = None
self.params = []
self.W = None
self._gBn = None
self._b = None
self._aPrelu = None
self.numberOfTrainableParams = 0
self.muBatchNorm = None
self._varBnsArrayForRollingAverage = None
self.numberEpochApplyRolling = numberEpochApplyRolling
self.rollingIndex = 0
self._sharedNewMu_B = None
self._sharedNewVar_B = None
self._newMu_B = None
self._newVar_B = None
self.outputTrain = None
self.outputTest = None
self.outputShapeTrain = None
self.outputShapeTest = None
# === After all the parameters has been initialized, create the layer
# Set all the inputs and parameters
self.inputTrain = inputSample_Train
self.inputTest = inputSample_Test
self.inputShapeTrain = inputToLayerShapeTrain
self.inputShapeTest = inputToLayerShapeTest
self._numberOfFeatureMaps = filterShape[0]
assert self.inputShapeTrain[1] == filterShape[1]
self._maxPoolingParameters = maxPoolingParameters
print(" --- [STATUS] --------- Creating layer {} --------- ".format(layerID))
## Process the input layer through all the steps over the block
(inputToConvTrain,
inputToConvTest) = self.passInputThroughLayerElements(inputSample_Train,
inputToLayerShapeTrain,
inputSample_Test,
inputToLayerShapeTest,
useBatchNorm,
numberEpochApplyRolling,
activationType,
weights,
dropoutRate,
rng
)
# input shapes for the convolutions
inputToConvShapeTrain = inputToLayerShapeTrain
inputToConvShapeTest = inputToLayerShapeTest
# -------------- Weights initialization -------------
# Initialize weights with random weights if W is empty
# Otherwise, use loaded weights
self.W = initializeWeights(filterShape,
weights_initMethodType,
weights)
self.params = [self.W] + self.params
self.numberOfTrainableParams += 1
##---------- Convolve --------------
(convolvedOutput_Train, convolvedOutputShape_Train) = convolveWithKernel(self.W, filterShape, inputToConvTrain, inputToConvShapeTrain)
(convolvedOutput_Test, convolvedOutputShape_Test) = convolveWithKernel(self.W , filterShape, inputToConvTest, inputToConvShapeTest)
self.outputTrain = convolvedOutput_Train
self.outputTest = convolvedOutput_Test
self.outputShapeTrain = convolvedOutputShape_Train
self.outputShapeTest = convolvedOutputShape_Test
def updateLayerMatricesBatchNorm(self):
if self._appliedBnInLayer :
muArrayValue = self.muBatchNorm.get_value()
muArrayValue[self.rollingIndex] = self._sharedNewMu_B.get_value()
self.muBatchNorm.set_value(muArrayValue, borrow=True)
varArrayValue = self._varBnsArrayForRollingAverage.get_value()
varArrayValue[self.rollingIndex] = self._sharedNewVar_B.get_value()
self._varBnsArrayForRollingAverage.set_value(varArrayValue, borrow=True)
self.rollingIndex = (self.rollingIndex + 1) % self.numberEpochApplyRolling
def getUpdatesForBnRollingAverage(self) :
if self._appliedBnInLayer :
return [(self._sharedNewMu_B, self._newMu_B),
(self._sharedNewVar_B, self._newVar_B) ]
else :
return []
def passInputThroughLayerElements(self,
inputSample_Train,
inputSampleShape_Train,
inputSample_Test,
inputSampleShape_Test,
useBatchNorm,
numberEpochApplyRolling,
activationType,
weights,
dropoutRate,
rndState):
""" Through each block the following steps are applied, according to Kamnitsas:
1 - Batch Normalization or biases
2 - Activation function
3 - Dropout
4 - (Optional) Max pooling
Ref: He et al "Identity Mappings in Deep Residual Networks" 2016
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua """
# ________________________________________________________
# 1 : Batch Normalization
# ________________________________________________________
""" Implemenation taken from Kamnitsas work.
A batch normalization implementation in TensorFlow:
http://r2rt.com/implementing-batch-normalization-in-tensorflow.html
"Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift",
Proceedings of the 32nd International Conference on Machine Learning, Lille, France, 2015.
Journal of Machine Learning Research: W&CP volume 37
"""
if useBatchNorm > 0 :
self._appliedBnInLayer = True
(inputToNonLinearityTrain,
inputToNonLinearityTest,
self._gBn,
self._b,
self.muBatchNorm,
self._varBnsArrayForRollingAverage,
self._sharedNewMu_B,
self._sharedNewVar_B,
self._newMu_B,
self._newVar_B) = applyBn( numberEpochApplyRolling,
inputSample_Train,
inputSample_Test,
inputSampleShape_Train)
self.params = self.params + [self._gBn, self._b]
else :
self._appliedBnInLayer = False
numberOfInputFeatMaps = inputSampleShape_Train[1]
b_values = np.zeros( (self._numberOfFeatureMaps), dtype = 'float32')
self._b = theano.shared(value=b_values, borrow=True)
inputToNonLinearityTrain = applyBiasToFeatureMaps( self._b, inputSample_Train )
inputToNonLinearityTest = applyBiasToFeatureMaps( self._b, inputSample_Test )
self.params = self.params + [self._b]
# ________________________________________________________
# 2 : Apply the corresponding activation function
# ________________________________________________________
def Linear():
print " --- Activation function: Linear"
self.activationFunctionType = "Linear"
output_Train = inputToNonLinearityTrain
output_Test = inputToNonLinearityTest
return (output_Train, output_Test)
def ReLU():
print " --- Activation function: ReLU"
self.activationFunctionType = "ReLU"
output_Train = applyActivationFunction_ReLU_v1(inputToNonLinearityTrain)
output_Test = applyActivationFunction_ReLU_v1(inputToNonLinearityTest)
return (output_Train, output_Test)
def PReLU():
print " --- Activation function: PReLU"
self.activationFunctionType = "PReLU"
numberOfInputFeatMaps = inputSampleShape_Train[1]
PReLU_Values = np.ones( (numberOfInputFeatMaps), dtype = 'float32' )*0.01
self._aPrelu = theano.shared(value=PReLU_Values, borrow=True)
output_Train = applyActivationFunction_PReLU(inputToNonLinearityTrain, self._aPrelu)
output_Test = applyActivationFunction_PReLU(inputToNonLinearityTest, self._aPrelu)
self.params = self.params + [self._aPrelu]
self.numberOfTrainableParams += 1
return (output_Train,output_Test)
def LeakyReLU():
print " --- Activation function: Leaky ReLU "
self.activationFunctionType = "Leky ReLU"
leakiness = 0.2 # TODO. Introduce this value in the config.ini
output_Train = applyActivationFunction_LeakyReLU(inputToNonLinearityTrain,leakiness)
output_Test = applyActivationFunction_LeakyReLU(inputToNonLinearityTest,leakiness)
return (output_Train, output_Test)
optionsActFunction = {0 : Linear,
1 : ReLU,
2 : PReLU,
3 : LeakyReLU}
(inputToDropout_Train, inputToDropout_Test) = optionsActFunction[activationType]()
# ________________________________________________________
# 3 : Apply Dropout
# ________________________________________________________
output_Train = apply_Dropout(rndState,dropoutRate,inputSampleShape_Train,inputToDropout_Train, 0)
output_Test = apply_Dropout(rndState,dropoutRate,inputSampleShape_Train,inputToDropout_Test, 1)
# ________________________________________________________
# This will go as input to the convolutions
# ________________________________________________________
return (output_Train, output_Test)
|
import convert
import exc
from lxml import etree
import json
import os
import shutil
import time
script_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.abspath(os.path.join(script_dir, "..", "data"))
def ensure_data_path(path):
path = os.path.join(data_dir, path)
if not os.path.isdir(path):
os.makedirs(path)
return path
class WebApplication(object):
def __init__(self, session_manager):
self.sm = session_manager
self.data_structures_dir = ensure_data_path("data_structures")
self.equipment_interfaces_dir = ensure_data_path("equipment_interfaces")
self.comm_profiles_dir = ensure_data_path("comm_profiles")
self.config_dir = ensure_data_path("config")
def get_error_response(self, sid):
""" This is an example, which should be removed in the final version."""
raise(exc.Error(404, "file XXX not found."))
def repository_list_all(self, sid, ctype):
""" Returns a list of repositories within the ctype-directory."""
if self.sm.check(sid, 'admin'):
result = []
nonsense = [".", ".."]
base_path = os.path.join("../data/", ctype)
for x in os.listdir("../data/" + ctype):
path = os.path.join(base_path, x)
if os.path.isdir(path) and x not in nonsense:
result.append(x)
return result
def repository_clone(self, sid, ctype, name, url):
""" clones a repository from a given url """
if not os.path.isdir(os.path.join("../data/", ctype, name)):
os.system("cd '../data/{0}' && git clone '{1}' '{2}'".format(ctype, url, name))
return "Repository cloned"
def repository_create(self, sid, ctype, name):
""" creates a repository of its own """
if not os.path.isdir(os.path.join("../data/", ctype, name)):
os.system("cd '../data/{0}' && mkdir '{1}'".format(ctype, name))
if not os.path.isdir(os.path.join("../data/", ctype, name, ".git")):
os.system("cd '../data/{0}/{1}' && git init".format(ctype, name))
return "Repository created"
def repository_push(self, sid, ctype, name):
""" pushes a repository to its origin """
pass
def repository_rename(self, sid, ctype, old_name, new_name):
""" edits the description file in the repository """
source_path = os.path.join("../data/", ctype, old_name)
target_path = os.path.join("../data/", ctype, new_name)
if not os.path.isdir(target_path):
shutil.move(source_path, target_path)
else:
raise Exception("Can not move repository. The destination repository already exists.")
return "Repository renamed"
def repository_delete(self, sid, ctype, name):
""" deletes a repository from the file system """
d = os.path.join(data_dir, ctype, name)
if os.path.isdir(d):
shutil.rmtree(d, ignore_errors=True)
def file_list_all(self, sid, ctype, repo):
""" Returns a list of file names within a repository folder within a ctype directory.
Within this draft implementation we assume all files to be valid files.
"""
if self.sm.check(sid, 'admin'):
result = []
base_path = os.path.join("../data/", ctype, repo)
for x in os.listdir(base_path):
path = os.path.join(base_path, x)
if os.path.isfile(path):
result.append(x)
return result
def file_create(self, sid, ctype, repo, name):
""" This method creates a new file.
"""
if self.sm.check(sid, 'admin'):
dst = os.path.join(data_dir, ctype, repo, name)
if not os.path.isfile(dst):
src = os.path.join(script_dir, "data", ctype + '.xml')
shutil.copyfile(src, dst)
return "File created."
else:
raise(exc.Error(404, "Can not create file {dst}. It already exists.".format(dst=dst)))
def file_read(self, sid, ctype, repo, name):
""" This method returns the content of an XML file as JSON.
"""
if self.sm.check(sid, 'admin'):
dst = os.path.join(data_dir, ctype, repo, name)
if not os.path.isfile(dst):
raise(exc.Error(404, "The file {dst} does not exists.".format(dst=dst)))
else:
jsonstr = convert.xmlfile_to_jsonstr(dst)
return jsonstr
def file_write(self, sid, ctype, repo, name, data):
""" This method writes the data (a dict) into a file.
"""
if self.sm.check(sid, 'admin'):
dst = os.path.join(data_dir, ctype, repo, name)
convert.jsonstr_to_xmlfile(data, dst)
return "Wrote file '{0}'.".format(dst)
def file_read_template(self, sid, ctype):
""" This method returns the content of an XML file as JSON.
"""
if self.sm.check(sid, 'admin'):
dst = os.path.join(script_dir, 'data', ctype + '.xml')
if not os.path.isfile(dst):
raise(exc.Error(404, "The file {dst} does not exists.".format(dst=dst)))
else:
jsonstr = convert.xmlfile_to_jsonstr(dst)
return jsonstr
def file_rename(self, sid, ctype, repo, old_name, new_name, data):
""" This method renames a file.
"""
if self.sm.check(sid, 'admin'):
source_path = os.path.join("../data/", ctype, repo, old_name)
target_path = os.path.join("../data/", ctype, repo, new_name)
if source_path == target_path or not os.path.isfile(target_path):
if not source_path == target_path:
shutil.move(source_path, target_path)
self.file_write(sid, ctype, repo, new_name, data)
else:
raise Exception("Can not rename file. The target file already exists.")
return "File renamed."
def file_delete(self, sid, ctype, repo, name):
""" This method deletes a file.
"""
if self.sm.check(sid, 'admin'):
dst = os.path.join(data_dir, ctype, repo, name)
if not os.path.isfile(dst):
raise(exc.Error(404, "The file {dst} does not exists.".format(dst=dst)))
else:
os.remove(dst)
return "File deleted."
def struct_rename(self, sid, ctype, repo, name, jsonstr):
""" This method updates an XML file by a given JSON string.
"""
if self.sm.check(sid, 'admin'):
dst = os.path.join(data_dir, repo, name)
if not os.path.isfile(dst):
raise(exc.Error(404, "The file {dst} does not exists.".format(dst=dst)))
else:
jsonstr = convert.jsonstr_to_xmlfile(jsonstr, dst)
return "XML file updated."
def struct_list_all(self, sid, ctype):
""" This function returns a list of all repositories,
including all struct libraries including all structs
"""
if self.sm.check(sid, 'admin'):
dst = os.path.join(data_dir, ctype)
result = []
# evaluate each repository within ctype (currently 'data_structures')
for r in sorted(os.listdir(dst)):
if os.path.isdir(os.path.join(dst, r)):
repo = {}
result.append(repo)
repo['RepoName'] = r
repo['Libraries'] = []
# evaluate each structure library XML file of the repository
for l in sorted(os.listdir(os.path.join(dst, r))):
if os.path.isfile(os.path.join(dst, r, l)):
lib = {}
repo['Libraries'].append(lib)
lib['LibraryName'] = l
lib['Structs'] = []
# parse a struct library XML file
with open(os.path.join(dst, r, l), 'r') as f:
tree = etree.parse(f)
lib['LibraryID'] = tree.getroot().attrib['ID']
lib['LibraryVersion'] = tree.getroot().attrib['Version']
struct_els = tree.xpath('/Library/Struct')
# sort the 'Struct' elements by name:
d = {}
for v in struct_els:
k = v.attrib['Name']
d[k] = v
slist = []
for k in sorted(d.keys()):
slist.append(d[k])
# evaluate each 'Struct' element of the XML file
for e in slist:
struct = {}
lib['Structs'].append(struct)
struct['Name'] = e.attrib['Name']
struct['ID'] = e.attrib['ID']
struct['Comment'] = e.attrib['Comment']
return result
def block_list_all(self, sid, ctype):
""" This function returns a list of all repositories,
including all block libraries including all blocks
"""
if self.sm.check(sid, 'admin'):
dst = os.path.join(data_dir, ctype)
result = []
# evaluate each repository within ctype (currently 'data_structures')
for r in sorted(os.listdir(dst)):
if os.path.isdir(os.path.join(dst, r)):
repo = {}
result.append(repo)
repo['RepoName'] = r
repo['Libraries'] = []
# evaluate each library XML file of the repository
for l in sorted(os.listdir(os.path.join(dst, r))):
if os.path.isfile(os.path.join(dst, r, l)):
lib = {}
repo['Libraries'].append(lib)
lib['LibraryName'] = l
lib['Blocks'] = []
# parse a library XML file
with open(os.path.join(dst, r, l), 'r') as f:
tree = etree.parse(f)
lib['LibraryID'] = tree.getroot().attrib['ID']
lib['LibraryVersion'] = tree.getroot().attrib['Version']
block_els = tree.xpath('/Library/DataBlock')
# sort the 'DataBlock' elements by name:
d = {}
for v in block_els:
k = v.attrib['Name']
d[k] = v
slist = []
for k in sorted(d.keys()):
slist.append(d[k])
# evaluate each 'DataBlock' element of the XML file
for e in slist:
block = {}
lib['Blocks'].append(block)
block['Name'] = e.attrib['Name']
block['ID'] = e.attrib['ID']
block['Comment'] = e.attrib['Comment']
return result
if __name__ == "__main__":
pass
|
"""Annette Graph Utils
This Module contains the Graph Utilities to generate Annette readable graphs from MMDNN
or read directly from json.
"""
from __future__ import print_function
import json
import logging
import numpy as np
import sys
import os
import mmdnn.conversion.common.IR.graph_pb2 as graph_pb2
from mmdnn.conversion.common.IR.IR_graph import IRGraph, IRGraphNode, load_protobuf_from_file
from mmdnn.conversion.common.utils import *
from .annette_graph import AnnetteGraph
__author__ = "Matthias Wess"
__copyright__ = "Christian Doppler Laboratory for Embedded Machine Learning"
__license__ = "Apache-2.0"
class MMGraph:
""" MMDNN Graph Class
Args:
graphfile (str): MMDNN graphfile
weightfile(str, optional): MMDNN weightfile, dropped anyways
Attributes:
IR_graph : mmdnn Intermediate Representation Graph
"""
def __init__(self, graphfile, weightfile=None):
print("Initializing network...")
self.graphfile = graphfile
self.weightfile = weightfile
self.IR_graph = IRGraph(self.graphfile)
self.IR_graph.build()
self.IR_graph.model = 1
if self.weightfile is None:
logging.info("No weights file loaded\n")
else:
logging.info("Load weights...\n")
try:
self.weights_dict = np.load(
self.weightfile, allow_pickle=True).item()
except:
self.weights_dict = np.load(
self.weightfile, encoding='bytes', allow_pickle=True).item()
self.analyze_net()
print("Network analyzed successfully...\n")
def analyze_net(self):
"""Walk through net and compute attributes"""
# TODO look for DataInput layer and add if necessary
"""
for layer in self.IR_graph.topological_sort:
current_node = self.IR_graph.get_node(layer)
node_type = current_node.type
#find input layers
if not current_node.in_edges and not(current_node.type in ['DataInput']) :
print(current_node.type)
"""
for layer in self.IR_graph.topological_sort:
current_node = self.IR_graph.get_node(layer)
#node_type = current_node.type
self.fix_shape_names(current_node)
def fix_shape_names(self, layer):
"""Fixed shape_names
Arguments:
layer (obj): layer to fix names and shapes
"""
if not(layer.type in ['yolo']):
output_shape = layer.get_attr('_output_shape')
# For tensorflow models it is called output_shapes
if output_shape is None:
output_shape = layer.get_attr('_output_shapes')
output_shape = shape_to_list(output_shape[0])
layer.set_attrs({'output_shape': output_shape})
if not(layer.type in ['DataInput']):
if(layer.in_edges):
innode = self.IR_graph.get_node(layer.in_edges[0])
input_shape = innode.get_attr('_output_shape')
# For tensorflow models it is called output_shapes
if input_shape is None:
input_shape = innode.get_attr('_output_shapes')
input_shape = shape_to_list(input_shape[0])
layer.set_attrs({'input_shape': input_shape})
def fix_depthwise(self, layer):
"""Fixed depthwise layers
Arguments:
layer (obj): layer to fix names and shapes
"""
if layer.type in ['Conv']:
output_shape = layer.get_attr('_output_shape')
# For tensorflow models it is called output_shapes
if output_shape is None:
output_shape = layer.get_attr('_output_shapes')
output_shape = shape_to_list(output_shape[0])
group = layer.get_attr('group')
if not (group is None):
logging.debug(layer.name)
logging.debug(group)
logging.debug(output_shape)
if group == output_shape[3]:
return 'DepthwiseConv'
return layer.type
def convert_to_annette(self, name):
"""Convert MMDNN to Annette graph
Arguments:
name (str): Network name
Return:
annette_graph (obj)
"""
annette_graph = AnnetteGraph(name) # TODO
for layer in self.IR_graph.topological_sort:
current_node = self.IR_graph.get_node(layer)
logging.debug(current_node.type)
node_type = self.fix_depthwise(current_node)
layer_dict = {'type': node_type}
layer_name = current_node.name
logging.debug(current_node.in_edges)
logging.debug(current_node.out_edges)
layer_dict['parents'] = current_node.in_edges
layer_dict['children'] = current_node.out_edges
attributes = ['output_shape', 'input_shape', 'kernel_shape',
'strides', 'pads', 'pooling_type', 'global_pooling', 'dilations','axis']
for attr in attributes:
tmp = current_node.get_attr(attr)
if tmp is not None:
layer_dict[attr] = tmp
if layer_dict['type'] in ['DepthwiseConv'] and attr == 'kernel_shape':
tmp[3] = 1
layer_dict[attr] = tmp
annette_graph.add_layer(layer_name, layer_dict)
return annette_graph
|
#!/usr/bin/env python3
# coding:utf-8
import datetime
from sqlalchemy import Integer, Column, String, DateTime, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class BaseModel(object):
keys = []
def __init__(self, id, engine, obj):
data = obj.get(id, engine)
if data:
for k in self.keys:
v = getattr(data, k)
setattr(self, k, v)
else:
self.id = -1
@classmethod
def init(cls, engine):
if not cls.is_table_exists(engine):
cls.__table__.create(engine)
@classmethod
def is_table_exists(cls, engine):
name = cls.__tablename__
res = engine.dialect.has_table(engine, name)
return res
@classmethod
def create_session(cls, engine):
db_session = sessionmaker(bind=engine)()
return db_session
@classmethod
def get(cls, id, engine):
db_session = cls.create_session(engine)
data = db_session.query(cls).filter(cls.id == id)
data = list(data)
db_session.close()
if data:
return data[0]
else:
return None
class Session(Base, BaseModel):
__tablename__ = 'django_session'
session_key = Column(String(40), primary_key=True)
session_data = Column(String(256))
@classmethod
def get_session(cls, sessionid, engine):
db_session = cls.create_session(engine)
data = db_session.query(cls).filter(cls.session_key == sessionid)
data = list(data)
db_session.close()
if data:
return data[0]
else:
return None
class User(Base, BaseModel):
keys = ["id", "is_superuser", "username", "is_staff", "is_active"]
__tablename__ = 'auth_user'
id = Column(Integer, primary_key=True, autoincrement=True)
password = Column(String(128))
last_login = Column(DateTime)
is_superuser = Column(Boolean)
username = Column(String(150))
first_name = Column(String(150))
last_name = Column(String(150))
email = Column(String(254))
is_staff = Column(Boolean)
is_active = Column(Boolean)
date_joined = Column(DateTime)
def __init__(self, uid, engine):
BaseModel.__init__(self, uid, engine, User)
def get_groups(self, engine):
return UserGroups.get_user_groups(self.id, engine)
def get_permissions(self, engine):
return UserPermissions.get_user_permissions(self.id, engine)
class Permission(Base, BaseModel):
keys = ["id", "name", "content_type_id", "codename"]
__tablename__ = 'auth_permission'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(256))
content_type_id = Column(Integer)
codename = Column(String(100))
def __init__(self, id, engine):
BaseModel.__init__(self, id, engine, Permission)
class Group(Base, BaseModel):
keys = ["id", "name"]
__tablename__ = 'auth_group'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(150))
def __init__(self, id, engine):
BaseModel.__init__(self, id, engine, Group)
class UserGroups(Base, BaseModel):
__tablename__ = 'auth_user_groups'
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(Integer)
group_id = Column(Integer)
@classmethod
def get_user_groups(cls, uid, engine):
groups = {"": 1}
db_session = cls.create_session(engine)
data = db_session.query(cls).filter(cls.user_id == uid)
data = list(data)
db_session.close()
for _data in data:
group = Group(_data.group_id, engine)
groups[group.name] = group.id
return groups
class UserPermissions(Base, BaseModel):
__tablename__ = 'auth_user_user_permissions'
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(Integer)
permission_id = Column(Integer)
@classmethod
def get_user_permissions(cls, uid, engine):
permissions = {"": 1}
db_session = cls.create_session(engine)
data = db_session.query(cls).filter(cls.user_id == uid)
data = list(data)
db_session.close()
for _data in data:
permission = Permission(_data.permission_id, engine)
permissions[permission.codename] = {
"id": permission.id,
"name": permission.name,
}
return permissions
class UserInfo(Base, BaseModel):
"""
email = models.CharField(max_length=128, default="")
tel = models.CharField(max_length=64, default="")
name = models.CharField(max_length=256, null=True)
username = models.CharField(max_length=256, null=True)
company_name = models.CharField(max_length=256, null=True)
company_homepage = models.CharField(max_length=256, null=True)
company_position = models.CharField(max_length=256, null=True)
text = models.CharField(max_length=512, null=True)
country = models.CharField(max_length=128, null=True)
create_date = models.DateTimeField(null=True)
type = models.CharField(max_length=128, default="") # personal / enterprise
business_type = models.CharField(max_length=128, default="") # A / B
user_id = models.IntegerField(null=True)
level = models.IntegerField(default=0)
expired = models.DateField(null=True)
begin = models.DateField(null=True)
first_day = models.CharField(max_length=128, null=True)
limit = models.IntegerField(default=10)
"""
keys = ["id", "user_id", "username", "type", "business_type", "level", "expired", "limit"]
__tablename__ = 'user_info'
id = Column(Integer, primary_key=True, autoincrement=True)
email = Column(String(128))
tel = Column(String(64))
name = Column(String(256))
username = Column(String(256))
company_name = Column(String(256))
company_homepage = Column(String(256))
company_position = Column(String(256))
text = Column(String(512))
country = Column(String(128))
create_date = Column(DateTime)
type = Column(String(128))
business_type = Column(String(128))
user_id = Column(Integer)
level = Column(Integer)
expired = Column(DateTime)
begin = Column(DateTime)
first_day = Column(String(128))
limit = Column(Integer)
def __init__(self, id, engine):
BaseModel.__init__(self, id, engine, UserInfo)
def json(self):
data = {}
if not self.id > 0:
return None
for k in self.keys:
data[k] = getattr(self, k)
data["expired"] = f"{data['expired']}"
return data
def is_expired(self):
if self.expired > datetime.date.today():
return -1
else:
return 1
|
from models.Devices import Devices
from models.Logs import Logs
import datetime
def setStatusArduino(ard, matricola):
dispositivo = Devices.objects(mat = matricola)[0]
if ard.attivaZone(matricola, dispositivo.statusFirstSensor, dispositivo.statusSecondSensor) == "OK":
return 200
else:
return 400
def archiviaLogArduino(matricola, logArd):
dispositivo = Devices.objects(mat = matricola)[0]
log = ""
if logArd == "Z1-ALARM":
log = "Allarme sensore Zona: " + dispositivo.labelFirstSensor
elif logArd == "Z2-ALARM":
log = "Allarme sensore Zona: " + dispositivo.labelSecondSensor
elif logArd == "DIS-MAN":
log = "Allarme Disattivato Manualmente"
newLog = Logs(
time = datetime.datetime.now,
txt = log,
Ardulock = dispositivo
)
newLog.save()
|
""" scripts.gencmds.kpn_timg.gen_cp_orig
"""
import json
import scripts.gencmds.common as common
def _create_parser():
help_str = 'Create commands to copy original image files to datadirs'
return common.parser(help_str)
def main(args):
cmds = []
md = json.load(args.input)
cmds.append('cp {} {}/'.format(args.input.name, md['data-dir']))
img_md = md['image-metadata']
for entry in img_md:
cmds.append('cp {} {}'.format(entry['original-path'],
entry['original-copy-path']))
args.output.writelines('\n'.join(cmds))
if __name__ == '__main__':
main(_create_parser().parse_args())
|
"""Defines LightCurveFile classes, i.e. files that contain LightCurves."""
from __future__ import division, print_function
import os
import logging
import warnings
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from astropy.io import fits as pyfits
from .utils import (bkjd_to_astropy_time, KeplerQualityFlags, TessQualityFlags,
LightkurveWarning, detect_filetype)
from . import MPLSTYLE
__all__ = ['LightCurveFile', 'KeplerLightCurveFile', 'TessLightCurveFile']
log = logging.getLogger(__name__)
class LightCurveFile(object):
"""Generic class to represent FITS files which contain one or more light curves.
Parameters
----------
path : str or `astropy.io.fits.HDUList` object
Local path or remote url of a lightcurve FITS file.
Also accepts a FITS file object already opened using AstroPy.
kwargs : dict
Keyword arguments to be passed to astropy.io.fits.open.
"""
def __init__(self, path, **kwargs):
if isinstance(path, pyfits.HDUList):
self.path = None
self.hdu = path
else:
self.path = path
self.hdu = pyfits.open(self.path, **kwargs)
def header(self, ext=0):
"""Header of the object at extension `ext`"""
return self.hdu[ext].header
@property
def time(self):
"""Time measurements"""
return self.hdu[1].data['TIME'][self.quality_mask]
@property
def ra(self):
"""Right Ascension of the target."""
return self.hdu[0].header['RA_OBJ']
@property
def dec(self):
"""Declination of the target."""
return self.hdu[0].header['DEC_OBJ']
@property
def SAP_FLUX(self):
"""Returns a LightCurve object for SAP_FLUX"""
return self.get_lightcurve('SAP_FLUX')
@property
def PDCSAP_FLUX(self):
"""Returns a LightCurve object for PDCSAP_FLUX"""
return self.get_lightcurve('PDCSAP_FLUX')
@property
def cadenceno(self):
"""Cadence number"""
return self.hdu[1].data['CADENCENO'][self.quality_mask]
def _flux_types(self):
"""Returns a list of available flux types for this light curve file"""
types = [n for n in self.hdu[1].data.columns.names if 'FLUX' in n]
types = [n for n in types if not ('ERR' in n)]
return types
def _get_quality(self):
"""Returns the quality flag vector, which may go by different names
"""
if 'QUALITY' in self.hdu[1].data.columns.names:
quality_vector = self.hdu[1].data['QUALITY']
elif 'SAP_QUALITY' in self.hdu[1].data.columns.names:
quality_vector = self.hdu[1].data['SAP_QUALITY']
else:
quality_vector = np.zeros(len(self.hdu[1].data['TIME']))
return quality_vector
def plot(self, flux_types=None, style='lightkurve', **kwargs):
"""Plot all the light curves contained in this light curve file.
Parameters
----------
flux_types : str or list of str
List of flux types to plot. Default is to plot all available.
(For Kepler the default fluxes are 'SAP_FLUX' and 'PDCSAP_FLUX'.
style : str
matplotlib.pyplot.style.context, default is 'fast'
kwargs : dict
Dictionary of keyword arguments to be passed to
`KeplerLightCurve.plot()`.
"""
if style is None or style == 'lightkurve':
style = MPLSTYLE
with plt.style.context(style):
if not ('ax' in kwargs):
fig, ax = plt.subplots(1)
kwargs['ax'] = ax
if flux_types is None:
flux_types = self._flux_types()
if isinstance(flux_types, str):
flux_types = [flux_types]
for idx, ft in enumerate(flux_types):
lc = self.get_lightcurve(ft)
kwargs['color'] = np.asarray(mpl.rcParams['axes.prop_cycle'])[idx]['color']
lc.plot(label=ft, **kwargs)
class KeplerLightCurveFile(LightCurveFile):
"""Subclass of :class:`LightCurveFile <lightkurve.lightcurvefile.LightCurveFile>`
to represent files generated by NASA's Kepler pipeline.
Parameters
----------
path : str
Local path or remote url of a FITS file in Kepler's lightcurve format.
quality_bitmask : str or int
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored (`quality_bitmask=0`).
* "default": cadences with severe quality issues will be ignored
(`quality_bitmask=1130799`).
* "hard": more conservative choice of flags to ignore
(`quality_bitmask=1664431`). This is known to remove good data.
* "hardest": removes all data that has been flagged
(`quality_bitmask=2096639`). This mask is not recommended.
See the :class:`KeplerQualityFlags` class for details on the bitmasks.
kwargs : dict
Keyword arguments to be passed to astropy.io.fits.open.
"""
def __init__(self, path, quality_bitmask='default', **kwargs):
super(KeplerLightCurveFile, self).__init__(path, **kwargs)
# check to make sure the correct filetype has been provided
filetype = detect_filetype(self.header())
if filetype == 'TessLightCurveFile':
warnings.warn("A TESS data product is being opened using the "
"`KeplerLightCurveFile` class. "
"Please use `TessLightCurveFile` instead.",
LightkurveWarning)
elif filetype is None:
warnings.warn("Given fits file not recognized as Kepler or TESS "
"observation.", LightkurveWarning)
elif "TargetPixelFile" in filetype:
warnings.warn("A `TargetPixelFile` object is being opened as a "
"`KeplerLightCurveFile`. "
"Please use `KeplerTargetPixelFile` instead.",
LightkurveWarning)
self.quality_bitmask = quality_bitmask
self.quality_mask = KeplerQualityFlags.create_quality_mask(
quality_array=self.hdu[1].data['SAP_QUALITY'],
bitmask=quality_bitmask)
try:
self.targetid = self.header()['KEPLERID']
except KeyError:
self.targetid = None
def __repr__(self):
return('KeplerLightCurveFile(ID: {})'.format(self.targetid))
@property
def astropy_time(self):
"""Returns an AstroPy Time object for all good-quality cadences."""
return bkjd_to_astropy_time(bkjd=self.time)
def get_lightcurve(self, flux_type, centroid_type='MOM_CENTR'):
if centroid_type+"1" in self.hdu[1].data.columns.names:
centroid_col = self.hdu[1].data[centroid_type + "1"][self.quality_mask]
centroid_row = self.hdu[1].data[centroid_type + "2"][self.quality_mask]
else:
centroid_col = np.repeat(np.NaN, self.quality_mask.sum())
centroid_row = np.repeat(np.NaN, self.quality_mask.sum())
if flux_type in self._flux_types():
# We did not import lightcurve at the top to prevent circular imports
from .lightcurve import KeplerLightCurve
f = self.hdu[1].data[flux_type][self.quality_mask]
fe = self.hdu[1].data[flux_type + "_ERR"][self.quality_mask]
if flux_type == 'SAP_FLUX':
f /= self.hdu[1].header['FLFRCSAP']
fe /= self.hdu[1].header['FLFRCSAP']
f /= self.hdu[1].header['CROWDSAP']
fe /= self.hdu[1].header['CROWDSAP']
return KeplerLightCurve(
time=self.hdu[1].data['TIME'][self.quality_mask],
time_format='bkjd',
time_scale='tdb',
flux=f,
flux_err=fe,
centroid_col=centroid_col,
centroid_row=centroid_row,
quality=self._get_quality()[self.quality_mask],
quality_bitmask=self.quality_bitmask,
channel=self.channel,
campaign=self.campaign,
quarter=self.quarter,
mission=self.mission,
cadenceno=self.cadenceno,
targetid=self.targetid,
label=self.hdu[0].header['OBJECT'],
ra=self.ra,
dec=self.dec)
else:
raise KeyError("{} is not a valid flux type. Available types are: {}".
format(flux_type, self._flux_types()))
@property
def channel(self):
"""Kepler CCD channel number. ('CHANNEL' header keyword)"""
return self.header(ext=0)['CHANNEL']
@property
def obsmode(self):
"""'short cadence' or 'long cadence'. ('OBSMODE' header keyword)"""
return self.header()['OBSMODE']
@property
def pos_corr1(self):
"""Returns the column position correction."""
return self.hdu[1].data['POS_CORR1'][self.quality_mask]
@property
def pos_corr2(self):
"""Returns the row position correction."""
return self.hdu[1].data['POS_CORR2'][self.quality_mask]
@property
def quarter(self):
"""Kepler quarter number. ('QUARTER' header keyword)"""
try:
return self.header(ext=0)['QUARTER']
except KeyError:
return None
@property
def campaign(self):
"""K2 Campaign number. ('CAMPAIGN' header keyword)"""
try:
return self.header(ext=0)['CAMPAIGN']
except KeyError:
return None
@property
def mission(self):
"""'Kepler' or 'K2'. ('MISSION' header keyword)"""
try:
return self.header(ext=0)['MISSION']
except KeyError:
return None
def compute_cotrended_lightcurve(self, cbvs=[1, 2], **kwargs):
"""Returns a LightCurve object after cotrending the SAP_FLUX
against the cotrending basis vectors.
Parameters
----------
cbvs : list of ints
The list of cotrending basis vectors to fit to the data. For example,
[1, 2] will fit the first two basis vectors.
kwargs : dict
Dictionary of keyword arguments to be passed to
KeplerCBVCorrector.correct.
Returns
-------
lc : LightCurve object
CBV flux-corrected lightcurve.
"""
from .correctors import KeplerCBVCorrector
return KeplerCBVCorrector(self).correct(cbvs=cbvs, **kwargs)
class TessLightCurveFile(LightCurveFile):
"""Subclass of :class:`LightCurveFile <lightkurve.lightcurvefile.LightCurveFile>`
to represent files generated by NASA's TESS pipeline.
Parameters
----------
path : str
Local path or remote url of a FITS file in TESS's lightcurve format.
quality_bitmask : str or int
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored (`quality_bitmask=0`).
* "default": cadences with severe quality issues will be ignored
(`quality_bitmask=1130799`).
* "hard": more conservative choice of flags to ignore
(`quality_bitmask=1664431`). This is known to remove good data.
* "hardest": removes all data that has been flagged
(`quality_bitmask=2096639`). This mask is not recommended.
See the :class:`TessQualityFlags` class for details on the bitmasks.
kwargs : dict
Keyword arguments to be passed to astropy.io.fits.open.
"""
def __init__(self, path, quality_bitmask='default', **kwargs):
super(TessLightCurveFile, self).__init__(path, **kwargs)
# check to make sure the correct filetype has been provided
filetype = detect_filetype(self.header())
if filetype == 'KeplerLightCurveFile':
warnings.warn("A Kepler data product is being opened using the "
"`TessLightCurveFile` class. "
"Please use `KeplerLightCurveFile` instead.",
LightkurveWarning)
elif filetype is None:
warnings.warn("Given fits file not recognized as Kepler or TESS "
"observation.", LightkurveWarning)
elif "TargetPixelFile" in filetype:
warnings.warn("A `TargetPixelFile` object is being opened as a "
"`TessLightCurveFile`. "
"Please use `TessTargetPixelFile` instead.",
LightkurveWarning)
self.quality_bitmask = quality_bitmask
self.quality_mask = TessQualityFlags.create_quality_mask(
quality_array=self._get_quality(),
bitmask=quality_bitmask)
# Early TESS releases had cadences with time=NaN (i.e. missing data)
# which were not flagged by a QUALITY flag yet; the line below prevents
# these cadences from being used. They would break most methods!
self.quality_mask &= np.isfinite(self.hdu[1].data['TIME'])
try:
self.targetid = self.header()['TICID']
except KeyError:
self.targetid = None
def __repr__(self):
return('TessLightCurveFile(TICID: {})'.format(self.targetid))
def get_lightcurve(self, flux_type, centroid_type='MOM_CENTR'):
if centroid_type+"1" in self.hdu[1].data.columns.names:
centroid_col = self.hdu[1].data[centroid_type + "1"][self.quality_mask]
centroid_row = self.hdu[1].data[centroid_type + "2"][self.quality_mask]
else:
centroid_col = np.repeat(np.NaN, self.quality_mask.sum())
centroid_row = np.repeat(np.NaN, self.quality_mask.sum())
if flux_type in self._flux_types():
# We did not import TessLightCurve at the top to prevent circular imports
from .lightcurve import TessLightCurve
return TessLightCurve(
time=self.hdu[1].data['TIME'][self.quality_mask],
time_format='btjd',
time_scale='tdb',
flux=self.hdu[1].data[flux_type][self.quality_mask],
flux_err=self.hdu[1].data[flux_type + "_ERR"][self.quality_mask],
centroid_col=centroid_col,
centroid_row=centroid_row,
quality=self._get_quality()[self.quality_mask],
quality_bitmask=self.quality_bitmask,
cadenceno=self.cadenceno,
targetid=self.targetid,
label=self.hdu[0].header['OBJECT'])
else:
raise KeyError("{} is not a valid flux type. Available types are: {}".
format(flux_type, self._flux_types()))
|
# -*- coding: utf-8 -*-
"""Plotting.py for notebook 01_Exploring_DM_Halos
This python file contains all the functions used for plotting graphs and maps in the 1st notebook (.ipynb) of the repository: 01. Exploring parameters in DM halos and sub-halos
Script written by: Soumya Shreeram
Project supervised by Johan Comparat
Date created: 23rd February 2021
Last updated on 30th March 2021
"""
# astropy modules
import astropy.units as u
import astropy.io.fits as fits
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
from astropy.cosmology import FlatLambdaCDM, z_at_value
import numpy as np
# scipy modules
from scipy.spatial import KDTree
from scipy.interpolate import interp1d
import os
import importlib
# plotting imports
import matplotlib
from mpl_toolkits import axes_grid1
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
import Exploring_DM_Haloes as edh
def setLabel(ax, xlabel, ylabel, title, xlim, ylim, legend=True):
"""
Function defining plot properties
@param ax :: axes to be held
@param xlabel, ylabel :: labels of the x-y axis
@param title :: title of the plot
@param xlim, ylim :: x-y limits for the axis
"""
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if xlim != 'default':
ax.set_xlim(xlim)
if ylim != 'default':
ax.set_ylim(ylim)
if legend:
l = ax.legend(loc='best', fontsize=14)
for legend_handle in l.legendHandles:
legend_handle._legmarker.set_markersize(12)
ax.grid(False)
ax.set_title(title, fontsize=18)
return
def plotAgnClusterDistribution(pos_z_clu, pos_z_AGN, pos_z_halo, cluster_params):
"""
Function to plot the AGN cluster distribution
@pos_z_clu :: postion and redshifts of all the selected 'clusters'
@pos_z_AGN :: postion and redshifts of all the selected AGNs
@pos_z_gal :: postion and redshifts of all the selected galaxies
"""
halo_m_500c = cluster_params[0]
fig, ax = plt.subplots(1,1,figsize=(9,8))
# plotting halos
halos = ax.plot(pos_z_halo[0], pos_z_halo[1], '.', color='#fcd16d', markersize=0.2, label=r'All DM Halos', alpha=0.2)
# plotting clusters
cluster = ax.plot(pos_z_clu[0], pos_z_clu[1], 'o', color= '#03a351', markersize=3, label=r'Clusters $M_{500c}> 10^{%.1f} M_\odot$ '%(np.log10(halo_m_500c)))
# plotting AGNs
agn = ax.plot(pos_z_AGN[0], pos_z_AGN[1], '*', color='k', markersize=3.5, label=r'AGN', alpha=0.7)
# labeling axes and defining limits
xlim = [np.min(pos_z_halo[0]), np.max(pos_z_halo[0])]
ylim = [np.min(pos_z_halo[1]), np.max(pos_z_halo[1])]
setLabel(ax, 'R.A. (deg)', 'Dec (deg)', '', xlim, ylim, legend=True)
print('Redshift z<%.2f'%(np.max(pos_z_clu[2])))
return
def plotHostSubHalos(pos_z_cen_halo, pos_z_sat_halo, pos_z_AGN):
"""
Function to plot the host and satellite halo distribution
@hd_halo :: table with all relevant info on halos, clusters, and galaxies within them
--> divided into 3 because each hd_halo holds info on 1000 halos alone
@pos_z_AGN :: postion and redshifts of all the selected AGNs
"""
ra_cen, dec_cen = pos_z_cen_halo[0], pos_z_cen_halo[1]
ra_sat, dec_sat = pos_z_sat_halo[0], pos_z_sat_halo[1]
fig, ax = plt.subplots(1,1,figsize=(9,8))
# plotting host halos
host_halos = ax.plot(ra_cen, dec_cen, '.', color= 'k', markersize=0.06, label=r'Host-halos $P_{id}=-1$', alpha=0.4)
# plotting sat halos
sat_halos = ax.plot(ra_sat, dec_sat, 'o', color='#07d9f5', markersize=0.07, label=r'Satellite halos $P_{id} \neq -1$', alpha=0.7)
# plotting AGNs
agn = ax.plot(pos_z_AGN[0], pos_z_AGN[1], '*', color='#fff717', markersize=6.5, label=r'AGN', markeredgecolor='w', markeredgewidth=0.4)
# labeling axes and defining limits
xlim = [np.min(pos_z_AGN[0]), np.max(pos_z_AGN[0])]
ylim = [np.min(pos_z_AGN[1]), np.max(pos_z_AGN[1])]
setLabel(ax, 'R.A. (deg)', 'Dec (deg)', '', xlim, ylim, legend=True)
print('AGNs: %d, Host (central) halos: %.2e, Sattelite halos: %.2e'%(len(pos_z_AGN[0]), len(ra_cen), len(ra_sat)))
return
def plotAGNfraction(pos_z_AGN, pos_z_gal, redshift_limit_agn, bin_size):
"""
Function to plot the agn fraction in the given pixel
@pos_z_AGN :: postion and redshifts of all the selected AGNs
@pos_z_gal :: postion and redshifts of all the selected galaxies
@redshift_limit_agn :: upper limit on redshift based on the clusters found
"""
fig, ax = plt.subplots(1,2,figsize=(19,7))
# getting the useful histogram properties
counts_agn, redshift_bins_agn = np.histogram(pos_z_AGN[2], bins = bin_size)
counts_gal, redshift_bins_gal = np.histogram(pos_z_gal[2], bins = bin_size)
# plotting the galaxy and agn distribution as a function of redshift
ax[0].plot(redshift_bins_gal[1:], counts_gal, 'ks', ms=4, label=r'DM Halos')
ax[0].plot(redshift_bins_agn[1:], counts_agn, 'bs', ms=4, label=r'AGNs')
# axis properties - 0
xlim = [np.min(redshift_bins_agn[1:]), np.max(redshift_bins_agn[1:])]
setLabel(ax[0], r'Redshift$_R$', 'Counts','', xlim, 'default', legend=True)
ax[0].set_yscale("log")
# agn fraction as a function of redshift
f_agn, idx = [], []
for c, c_gal in enumerate(counts_gal):
if c_gal != 0:
f_agn.append(((counts_agn[c]*100)/c_gal))
idx.append(c)
z_bin_modified = redshift_bins_gal[1:][np.array(idx)]
# plot agn fraction
ax[1].plot(z_bin_modified, f_agn, 's', color='#6b0385', ms=4)
# axis properties - 1
xlim = [np.min(redshift_bins_agn[1:])-0.02, np.max(redshift_bins_agn[1:])]
setLabel(ax[1], r'Redshift$_R$', r'$f_{AGN}$ (%s)'%"%", '', xlim, 'default', legend=False)
ax[1].set_yscale("log")
plt.savefig('figures/agn_frac.pdf', facecolor='w', edgecolor='w')
print( 'Reddhift z<%.2f'%redshift_limit_agn )
return redshift_bins_gal[1:]
def plotRedshiftComovingDistance(cosmo, redshift_limit, resolution = 0.0001):
"""Function to plot the relation between redshift and the comoving distance
@cosmo :: cosmology package loaded
@redshift_limit :: upper limit in redshift --> end point for interpolation
@resolution :: resolution of time steps (set to e-4 based of simulation resolution)
@Returns :: plot showing the dependence of redshift on comoving distance
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
distance_Mpc = cosmo.comoving_distance(np.arange(0,redshift_limit, resolution))
redshifts = np.arange(0,redshift_limit, resolution)
ax.plot(redshifts, distance_Mpc, 'k.', ms=1)
setLabel(ax, 'Redshift (z)', 'Comoving distance (Mpc)', '', 'default', 'default', legend=False)
print('Redshift-Comoving distance relationship')
return
def plotMergerDistribution(merger_val_gal, counts_gal, merger_val_agn, counts_agn, cosmo, redshift_limit):
"""
Function to plot the distribution (counts) of the merger scale factor/redshift
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
ax1 = plt.gca()
ax2 = ax1.twiny()
# plot the merger distribution for galaxies and agns
ax1.plot(merger_val_gal, counts_gal, 'kx', label='DM Halos')
ax1.plot(merger_val_agn, counts_agn, 'bx', label='AGNs')
setLabel(ax1, r'Scale, $a(t)$, of last Major Merger', 'Counts', '', 'default', 'default', legend=True)
ax.set_yscale("log")
# setting the x-label on top (converting a to redshift)
a_min, a_max = np.min(merger_val_gal), np.max(merger_val_gal)
scale_factor_arr = [a_max, a_min*4, a_min*2, a_min]
ax2.set_xticks([(1/a) -1 for a in scale_factor_arr])
ax2.invert_xaxis()
ax2.set_xlabel('Redshift (z)')
ax2.xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
print("Objects with merger redshifts z < %.2f"%z_at_value(cosmo.scale_factor, a_min))
plt.savefig('figures/merger_distribution_z%.2f.pdf'%redshift_limit, facecolor='w', edgecolor='w')
return
def plotCentralSatelliteScaleMergers(cen_sat_AGN, cen_sat_halo, redshift_limit):
"""
Function to plot the central and sattelite scale factors for mergers
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
labels = [r'central AGNs', r'satellite AGNs', 'central DM halos', 'satellite DM halos']
c, m, ms = ['b', '#38cee8', 'k', 'grey'], ['^', '*', '^', '*'], [9, 15, 5, 9]
mec, mew = ['w', 'k', 'k', '#abaeb3'], [0.7, 0.4, 1, 0.7]
for i in [0, 1]:
s_m_agn, c_agn = np.unique(cen_sat_AGN[i]['HALO_scale_of_last_MM'], return_counts=True)
s_m_gal, c_gal = np.unique(cen_sat_halo[i]['HALO_scale_of_last_MM'], return_counts=True)
# agns
ax.plot(s_m_agn, c_agn, color=c[i], marker=m[i], ls='', ms=ms[i], label=labels[i], markeredgecolor=mec[i], markeredgewidth=mew[i])
# DM halos
j = i + 2
ax.plot(s_m_gal, c_gal, color=c[j], marker=m[j], ls='', ms=ms[j], label=labels[j], markeredgecolor=mec[j], markeredgewidth=mew[j])
# set label
setLabel(ax, r'Scale, $a(t)$, of last Major Merger', 'Counts', '', 'default', 'default', legend=True)
ax.set_yscale("log")
plt.savefig('figures/merger_dist_cenAndsat_z%.2f.pdf'%redshift_limit, facecolor='w', edgecolor='w')
print('Objects below z: ', redshift_limit)
return [labels, c, m, ms, mec, mew]
def plotTimeSinceMergerDist(scale_merger_AGN, scale_merger_gal, z_AGN, z_gal, cosmo, bin_size, redshift_limit):
"""
Plot the distribution of halos with respective galaxies & agns given the time since merger
"""
# get the time difference since merger events in the halos
t_merger_agn = edh.getMergerTimeDifference(scale_merger_AGN, z_AGN, cosmo)
t_merger_gal = edh.getMergerTimeDifference(scale_merger_gal, z_gal, cosmo)
# get the t since merger bins and counts
if bin_size[0]:
c_t_agn, merger_bins_agn = np.histogram(np.array(t_merger_agn), bins = bin_size[1])
c_t_gal, merger_bins_gal = np.histogram(np.array(t_merger_gal), bins = bin_size[1])
merger_bins_agn = merger_bins_agn[:-1]
merger_bins_gal = merger_bins_gal[:-1]
else:
merger_bins_agn, c_t_agn = np.unique(t_merger_agn, return_counts=True)
merger_bins_gal, c_t_gal = np.unique(t_merger_gal, return_counts=True)
fig, ax = plt.subplots(1,1,figsize=(7,6))
# plot the time since merger distribution for galaxies and agns
ax.plot(merger_bins_gal, np.cumsum(c_t_gal), 'k^', label='DM Halos', ms=4)
ax.plot(merger_bins_agn, np.cumsum(c_t_agn), 'b^', label='AGNs', ms=4)
# set labels/legends
setLabel(ax, r'$\Delta t_{merger} = t(z_{merger})-t(z_{current})$ [Gyr]', 'Cumulative counts', '', 'default', 'default', legend=False)
ax.legend(loc='lower left', fontsize=14)
ax.set_yscale("log")
ax.set_xscale("log")
return ax, fig, t_merger_agn, t_merger_gal
def mergerRedshiftPlot(cen_sat_AGN, cen_sat_halo, dt_m, plot_params, redshift_limit):
"""
Function to plot the time since merger as a function of the redshift
@cen_sat_AGN(gal) :: handels to access the central and satellite AGNs(galaxies)
@dt_m :: time difference after merger for cen/sat AGNs(galaxies)
@plot_params :: to keep consistency between plots, array containing [labels, c, m, ms]
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
# change marker size for central DM halos
plot_params[3][1] = 9
z_R = [cen_sat_AGN[0]['redshift_R'], cen_sat_AGN[1]['redshift_R'], cen_sat_halo[0]['redshift_R'], cen_sat_halo[1]['redshift_R']]
# plot central, satellite merger distributions as per visual preference
for i in [2, 3, 0, 1]:
ax.plot(dt_m[i], z_R[i], plot_params[2][i], color=plot_params[1][i], ms=plot_params[3][i], label=plot_params[0][i], markeredgecolor=plot_params[4][i], markeredgewidth=plot_params[5][i])
# set labels/legends
setLabel(ax, r'$\Delta t_{merger} = t(z_{merger})-t(z_{current})$ [Gyr]', r'Redshift$_R$', '', 'default', 'default', legend=True)
ax.set_xscale("log")
plt.savefig('figures/t_since_merger_z_plot_%.2f.pdf'%redshift_limit, facecolor='w', edgecolor='w')
return ax
def plotMergerTimeCuts(ax, t_merger_cut_arr, l):
"""
Function to plot the defined cuts in merger times within the concerned plot
@t_merger_cut_arr :: array that defines the cuts in the merger times
@l :: array that defines the linestyles used to denote these cuts (refer to the initial codeblock in the notebook)
"""
for i, t_m_cut in enumerate(t_merger_cut_arr):
ax.axvline(x=t_m_cut, color='r', linestyle= l[i], label='%.1f Gyr'%t_m_cut)
ax.legend(fontsize=14, loc='lower left')
return
|
import copy
import sync_infra_configurations.main as sic_main
import sync_infra_configurations.lib as sic_lib
import sync_infra_configurations.common_action as common_action
import sync_infra_configurations.aws as sic_aws
####################################################################################################
# GlueJob
####################################################################################################
def execute_gluejob(action, src_data, session):
glue_client = session.client("glue")
return common_action.execute_elem_properties(action, src_data,
executor_map = {
"Jobs": lambda action, src_data: execute_jobs(action, src_data, session, glue_client),
},
)
####################################################################################################
# GlueJob -> Jobs
####################################################################################################
def execute_jobs(action, src_data, session, glue_client):
return common_action.execute_elem_items(action, src_data,
list_fetcher = lambda: list_jobs(glue_client),
item_executor = lambda action, name, src_data: execute_job(action, name, src_data, session, glue_client))
def list_jobs(glue_client):
result = []
res = glue_client.get_jobs()
while True:
for elem in res['Jobs']:
name = elem["Name"]
result.append(name)
if not "NextToken" in res:
break
res = glue_client.get_jobs(NextToken = res["NextToken"])
return result
####################################################################################################
# DataCatalog -> Databases -> <database_name>
####################################################################################################
def execute_job(action, name, src_data, session, glue_client):
return common_action.execute_elem_properties(action, src_data,
describer = lambda: describe_job(name, session, glue_client),
updator = lambda src_data, curr_data: update_job(name, src_data, curr_data, session, glue_client),
executor_map = {
"ScriptSource": lambda action, src_data: execute_scriptsource(action, name, src_data, session, glue_client),
},
help_generator = help_job,
)
def help_job():
return {
"Description": "a description of the job",
"Role": "the name or ARN of the IAM role associated with this job",
"ExecutionProperty": "the maximum number of concurrent runs allowed for this job",
"Command": "the JobCommand that runs this job",
"DefaultArguments": "the default arguments for this job, specified as name-value pairs",
"NonOverridableArguments": "non-overridable arguments for this job, specified as name-value pairs",
"Connections": "the connections used for this job",
"MaxRetries": "the maximum number of times to retry this job after a JobRun fails",
"Timeout": "he job timeout in minutes",
"AllocatedCapacity": "this field is deprecated. Use MaxCapacity instead",
"MaxCapacity": "For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs.",
"WorkerType": "The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.",
"NumberOfWorkers": "he number of workers of a defined workerType that are allocated when a job runs",
"SecurityConfiguration": "the name of the SecurityConfiguration structure to be used with this job",
"NotificationProperty": "specifies configuration properties of a job notification",
"GlueVersion": "Glue version determines the versions of Apache Spark and Python that Glue supports.",
"ScriptSource": "Script in S3",
}
def describe_job(name, session, glue_client):
res = glue_client.get_job(JobName = name)
info = copy.copy(res["Job"])
sic_lib.removeKey(info, "Name")
sic_lib.removeKey(info, "CreatedOn")
sic_lib.removeKey(info, "LastModifiedOn")
return info
def update_job(name, src_data, curr_data, session, glue_client):
if curr_data == None:
# 新規作成
sic_main.add_update_message(f"glue_client.create_job(Name = {name}, ...)")
if sic_main.put_confirmation_flag:
update_data = modify_data_for_put(src_data)
update_data["Name"] = name
glue_client.create_job(**update_data)
elif src_data == None:
# 削除
raise Exception("TODO")
else:
# 更新
sic_main.add_update_message(f"glue_client.update_job(JobName = {name} ...)")
if sic_main.put_confirmation_flag:
update_data = modify_data_for_put(src_data)
glue_client.update_job(JobName = name, JobUpdate = update_data)
def modify_data_for_put(update_data):
update_data = copy.copy(update_data)
if update_data["WorkerType"] == "Standard":
# MaxCapacity が必須で AllocatedCapacity の指定は不可
sic_lib.removeKey(update_data, "AllocatedCapacity")
elif "NumberOfWorkers" in update_data:
sic_lib.removeKey(update_data, "AllocatedCapacity")
sic_lib.removeKey(update_data, "MaxCapacity")
else:
sic_lib.removeKey(update_data, "AllocatedCapacity")
return update_data
####################################################################################################
# DataCatalog -> Databases -> <database_name> -> ScriptSource
####################################################################################################
def execute_scriptsource(action, name, src_data, session, glue_client):
return common_action.execute_elem_properties(action, src_data,
describer = lambda: describe_scriptsource(name, session, glue_client),
updator = lambda src_data, curr_data: update_scriptsource(name, src_data, curr_data, session, glue_client),
)
def describe_scriptsource(name, session, glue_client):
info = describe_job(name, session, glue_client)
script_s3_path = info["Command"]["ScriptLocation"]
script_source = fetch_script_source(script_s3_path, session)
info["ScriptSource"] = script_source
return script_source
def update_scriptsource(name, src_data, curr_data, session, glue_client):
info = describe_job(name, session, glue_client)
script_s3_path = info["Command"]["ScriptLocation"]
if curr_data == None:
# 新規作成
put_script_source(src_data, script_s3_path, session)
elif src_data == None:
# 削除
raise Exception("TODO")
else:
# 更新
put_script_source(src_data, script_s3_path, session)
def fetch_script_source(script_s3_path, session):
script_source = sic_aws.fetch_s3_object(script_s3_path, session)
if script_source == None:
return ""
lines = []
for line in script_source.split("\n"):
lines.append(line.rstrip(" \t\r"))
while len(lines) > 0 and lines[0] == "":
lines = lines[1:]
while len(lines) > 0 and lines[len(lines) - 1] == "":
lines = lines[0 : len(lines) - 1]
return "\n".join(lines) + "\n"
def put_script_source(script_source, script_s3_path, session):
sic_aws.put_s3_object(script_s3_path, script_source, session)
####################################################################################################
|
# Copyright (c) 2020 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
from functools import partial
# --------------------
# Network for Decoder
# --------------------
def netG_decoder(x, test=False):
# x: (1, 15, 64, 64) -> c0: (1, 15, 128, 128)
with nn.parameter_scope('ReluDeconvBN1'):
c0 = PF.batch_normalization(PF.deconvolution(F.relu(x), 15, (4, 4), pad=(
1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
# c0: (1, 15, 128, 128) -> c1: (1, 15, 256, 256)
with nn.parameter_scope('ReluDeconvBN2'):
c1 = F.tanh(PF.deconvolution(F.relu(c0), 15,
(4, 4), pad=(1, 1), stride=(2, 2)))
# c1: (1, 15, 256, 256) -> down_0: (1, 64, 128, 128)
with nn.parameter_scope('down0'):
down_0 = PF.convolution(c1, 64, (4, 4), pad=(
1, 1), stride=(2, 2), with_bias=False)
# down_0: (1, 64, 128, 128) -> down_1: (1, 128, 64, 64)
with nn.parameter_scope('down1'):
down_1 = PF.batch_normalization(PF.convolution(F.leaky_relu(down_0, alpha=0.2), 128, (4, 4), pad=(
1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
# down_1: (1, 128, 64, 64) -> down_2: (1, 256, 32, 32)
with nn.parameter_scope('down2'):
down_2 = PF.batch_normalization(PF.convolution(F.leaky_relu(down_1, alpha=0.2), 256, (4, 4), pad=(
1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
# down_2: (1, 256, 32, 32) -> down_3: (1, 512, 16, 16)
with nn.parameter_scope('down3'):
down_3 = PF.batch_normalization(PF.convolution(F.leaky_relu(down_2, alpha=0.2), 512, (4, 4), pad=(
1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
# down_3: (1, 512, 16, 16) -> down_4: (1, 512, 8, 8)
with nn.parameter_scope('down4'):
down_4 = PF.batch_normalization(PF.convolution(F.leaky_relu(down_3, alpha=0.2), 512, (4, 4), pad=(
1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
# down_4: (1, 512, 8, 8) -> down_5: (1, 512, 4, 4)
with nn.parameter_scope('down5'):
down_5 = PF.batch_normalization(PF.convolution(F.leaky_relu(down_4, alpha=0.2), 512, (4, 4), pad=(
1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
# down_5: (1, 512, 4, 4) -> down_6: (1, 512, 2, 2)
with nn.parameter_scope('down6'):
down_6 = PF.batch_normalization(PF.convolution(F.leaky_relu(down_5, alpha=0.2), 512, (4, 4), pad=(
1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
# down_6: (1, 512, 2, 2) -> down_7: (1, 512, 1, 1)
with nn.parameter_scope('down7'):
down_7 = PF.convolution(F.leaky_relu(down_6, alpha=0.2), 512, (4, 4), pad=(
1, 1), stride=(2, 2), with_bias=False)
# down_7: (1, 512, 1, 1) -> up_0: (1, 512, 2, 2)
with nn.parameter_scope('up0'):
up_0 = PF.batch_normalization(PF.deconvolution(F.relu(down_7), 512, (4, 4), pad=(
1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
# down_6: (1, 512, 2, 2) + up_0: (1, 512, 2, 2) -> up_1: (1, 512, 4, 4)
with nn.parameter_scope('up1'):
up_1 = PF.batch_normalization(PF.deconvolution(F.relu(F.concatenate(
down_6, up_0, axis=1)), 512, (4, 4), pad=(1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
if not test:
up_1 = F.dropout(up_1, 0.5)
# down_5: (1, 512, 4, 4) + up_1: (1, 512, 4, 4)-> up_2: (1, 512, 8, 8)
with nn.parameter_scope('up2'):
up_2 = PF.batch_normalization(PF.deconvolution(F.relu(F.concatenate(
down_5, up_1, axis=1)), 512, (4, 4), pad=(1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
if not test:
up_2 = F.dropout(up_2, 0.5)
# down_4: (1, 512, 8, 8) + up_2: (1, 512, 8, 8) -> up_3: (1, 512, 16, 16)
with nn.parameter_scope('up3'):
up_3 = PF.batch_normalization(PF.deconvolution(F.relu(F.concatenate(
down_4, up_2, axis=1)), 512, (4, 4), pad=(1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
if not test:
up_3 = F.dropout(up_3, 0.5)
# down_3: (1, 512, 16, 16) + up_3: (1, 512, 16, 16) -> up_4: (1, 256, 32, 32)
with nn.parameter_scope('up4'):
up_4 = PF.batch_normalization(PF.deconvolution(F.relu(F.concatenate(
down_3, up_3, axis=1)), 256, (4, 4), pad=(1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
# down_2: (1, 256, 32, 32) + up_4: (1, 256, 32, 32) -> up_5: (1, 128, 64, 64)
with nn.parameter_scope('up5'):
up_5 = PF.batch_normalization(PF.deconvolution(F.relu(F.concatenate(
down_2, up_4, axis=1)), 128, (4, 4), pad=(1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
# down_1: (1, 128, 64, 64) + up_5: (1, 128, 64, 64) -> up_6: (1, 64, 128, 128)
with nn.parameter_scope('up6'):
up_6 = PF.batch_normalization(PF.deconvolution(F.relu(F.concatenate(
down_1, up_5, axis=1)), 64, (4, 4), pad=(1, 1), stride=(2, 2), with_bias=False), batch_stat=not test)
# down_0: (1, 64, 128, 128) + up_6: (1, 64, 128, 128) -> output: (1, 3, 256, 256)
with nn.parameter_scope('up7'):
output = F.tanh(PF.deconvolution(F.relu(F.concatenate(
down_0, up_6, axis=1)), 3, (4, 4), pad=(1, 1), stride=(2, 2)))
return output
def netD_decoder(x, test=False):
# x: (1, 18, 256, 256)
kw = (4, 4)
pad = (1, 1)
stride = (2, 2)
# (1, 18, 256, 256) -> (1, 64, 128, 128)
with nn.parameter_scope('conv0'):
c0 = F.leaky_relu(PF.convolution(
x, 64, kw, pad=pad, stride=stride), alpha=0.2)
# (1, 64, 128, 128) -> (1, 128, 64, 64)
with nn.parameter_scope('conv1'):
c1 = F.leaky_relu(PF.batch_normalization(PF.convolution(
c0, 128, kw, pad=pad, stride=stride, with_bias=False), batch_stat=not test), alpha=0.2)
# (1, 128, 64, 64) -> (1, 256, 32, 32)
with nn.parameter_scope('conv2'):
c2 = F.leaky_relu(PF.batch_normalization(PF.convolution(
c1, 256, kw, pad=pad, stride=stride, with_bias=False), batch_stat=not test), alpha=0.2)
# (1, 256, 32, 32) -> (1, 512, 31, 31)
with nn.parameter_scope('conv3'):
c3 = F.leaky_relu(PF.batch_normalization(PF.convolution(
c2, 512, kw, pad=pad, stride=(1, 1), with_bias=False), batch_stat=not test), alpha=0.2)
# (1, 512, 31, 31) -> (1, 1, 30, 30)
with nn.parameter_scope('conv4'):
c4 = PF.convolution(c3, 1, kw, pad=pad, stride=(1, 1))
c4 = F.sigmoid(c4)
return c4
# ------------------------------
# Network for Transformer
# ------------------------------
def convblock(x, n=64, k=(3, 3), s=(2, 2), p=(1, 1), test=False, norm_type="batch_norm"):
x = PF.convolution(x, n, k, pad=p, stride=s, with_bias=False)
if norm_type == "instance_norm":
x = PF.instance_normalization(x, eps=1e-05)
else:
x = PF.batch_normalization(x, batch_stat=not test)
x = F.relu(x)
return x
def deconvblock(x, n=64, k=(3, 3), s=(2, 2), p=(1, 1), test=False, norm_type="batch_norm"):
x = PF.deconvolution(x, n, kernel=(3, 3), pad=(1, 1), stride=(
2, 2), output_padding=(1, 1), with_bias=False)
if norm_type == "instance_norm":
x = PF.instance_normalization(x, eps=1e-05)
else:
x = PF.batch_normalization(x, batch_stat=not test)
x = F.relu(x)
return x
def resblock(x, n=256, test=False, norm_type="batch_norm"):
r = x
r = F.pad(r, (1, 1, 1, 1), 'reflect')
with nn.parameter_scope('block1'):
r = PF.convolution(r, n, (3, 3), with_bias=False)
if norm_type == "instance_norm":
r = PF.instance_normalization(r, eps=1e-05)
else:
r = PF.batch_normalization(r, batch_stat=not test)
r = F.relu(r)
r = F.pad(r, (1, 1, 1, 1), 'reflect')
with nn.parameter_scope('block2'):
r = PF.convolution(r, n, (3, 3), with_bias=False)
if norm_type == "instance_norm":
r = PF.instance_normalization(r, eps=1e-05)
else:
r = PF.batch_normalization(r, batch_stat=not test)
return x + r
def netG_transformer(x, test=False, norm_type="batch_norm"):
# x: (1, 15, 64, 64) -> x: (1, 64, 64, 64)
x = F.pad(x, (3, 3, 3, 3), 'reflect')
with nn.parameter_scope('conv1'):
x = convblock(x, n=64, k=(7, 7), s=(1, 1), p=(
0, 0), test=test, norm_type=norm_type)
# x: (1, 64, 64, 64) -> x: (1, 128, 32, 32)
with nn.parameter_scope('conv2'):
x = convblock(x, n=64*2, k=(3, 3), s=(2, 2), p=(1, 1),
test=test, norm_type=norm_type)
# x: (1, 128, 32, 32) -> x: (1, 256, 16, 16)
with nn.parameter_scope('conv3'):
x = convblock(x, n=64*4, k=(3, 3), s=(2, 2), p=(1, 1),
test=test, norm_type=norm_type)
# x: (1, 256, 16, 16) -> x: (1, 256, 16, 16)
for i in range(9):
with nn.parameter_scope(f'res{i + 1}'):
x = resblock(x, n=64*4, test=test, norm_type=norm_type)
# x: (1, 256, 16, 16) -> x: (1, 128, 32, 32)
with nn.parameter_scope('deconv1'):
x = deconvblock(x, n=64*2, k=(4, 4), s=(2, 2),
p=(1, 1), test=test, norm_type=norm_type)
# x: (1, 128, 32, 32) -> x: (1, 64, 64, 64)
with nn.parameter_scope('deconv2'):
x = deconvblock(x, n=64, k=(4, 4), s=(2, 2), p=(
1, 1), test=test, norm_type=norm_type)
# x: (1, 64, 64, 64) -> x: (1, 15, 64, 64)
x = F.pad(x, (3, 3, 3, 3), 'reflect')
with nn.parameter_scope('deconv3'):
x = PF.convolution(x, 15, kernel=(7, 7), with_bias=True)
x = F.sigmoid(x)
return x
def netD_transformer(x, test=False):
kw = (4, 4)
pad = (1, 1)
stride = (2, 2)
# (1, 15, 64, 64) -> (1, 64, 32, 32)
with nn.parameter_scope('conv0'):
c0 = F.leaky_relu(PF.convolution(
x, 64, kw, pad=pad, stride=stride), alpha=0.2)
# (1, 64, 32, 32) -> (1, 128, 16, 16)
with nn.parameter_scope('conv1'):
c1 = F.leaky_relu(PF.batch_normalization(PF.convolution(
c0, 128, kw, pad=pad, stride=stride, with_bias=False), batch_stat=not test), alpha=0.2)
# (1, 128, 16, 16) -> (1, 256, 8, 8)
with nn.parameter_scope('conv2'):
c2 = F.leaky_relu(PF.batch_normalization(PF.convolution(
c1, 256, kw, pad=pad, stride=stride, with_bias=False), batch_stat=not test), alpha=0.2)
# (1, 256, 8, 8) -> (1, 512, 7, 7)
with nn.parameter_scope('conv3'):
c3 = F.leaky_relu(PF.batch_normalization(PF.convolution(
c2, 512, kw, pad=pad, stride=(1, 1), with_bias=False), batch_stat=not test), alpha=0.2)
# (1, 512, 7, 7) -> (1, 1, 6, 6)
with nn.parameter_scope('conv4'):
c4 = PF.convolution(c3, 1, kw, pad=pad, stride=(1, 1))
return c4
# -------------------------------------
# Align Network (used for Transformer)
# -------------------------------------
def align_resnet(x, channel_basic=16, test=False, fix_parameters=False):
def resblock_align(x, channel, stride=(1, 1), test=False, downsample=False, fix_parameters=False):
residual = x
with nn.parameter_scope('conv1'):
h = PF.convolution(x, channel, kernel=(3, 3), stride=stride, pad=(
1, 1), with_bias=False, fix_parameters=fix_parameters)
with nn.parameter_scope('bn1'):
h = PF.batch_normalization(
h, batch_stat=not test, fix_parameters=fix_parameters)
h = F.relu(h, inplace=True)
with nn.parameter_scope('conv2'):
h = PF.convolution(h, channel, kernel=(3, 3), stride=(1, 1), pad=(
1, 1), with_bias=False, fix_parameters=fix_parameters)
with nn.parameter_scope('bn2'):
h = PF.batch_normalization(
h, batch_stat=not test, fix_parameters=fix_parameters)
if downsample:
with nn.parameter_scope('downsample'):
residual = PF.convolution(x, channel, kernel=(
1, 1), stride=stride, with_bias=False, fix_parameters=fix_parameters)
residual = PF.batch_normalization(
residual, batch_stat=not test, fix_parameters=fix_parameters)
out = h + residual
out = F.relu(out, inplace=True)
return out
with nn.parameter_scope('layer0'):
h = PF.convolution(x, 3, kernel=(3, 3), stride=(1, 1), pad=(
1, 1), with_bias=True, fix_parameters=fix_parameters)
with nn.parameter_scope('layer1'):
h = PF.convolution(h, 16, kernel=(7, 7), stride=(2, 2), pad=(
3, 3), with_bias=False, fix_parameters=fix_parameters)
with nn.parameter_scope('layer2'):
h = PF.batch_normalization(
h, batch_stat=not test, fix_parameters=fix_parameters)
h = F.relu(h, inplace=True)
h = F.max_pooling(h, kernel=(3, 3), stride=(2, 2), pad=(1, 1))
use_downsample = False
stride = (1, 1)
for i in range(5, 9):
with nn.parameter_scope(f'layer{i}_0'):
h = resblock_align(h, channel_basic * (2**(i-5)), stride=stride,
test=False, downsample=use_downsample, fix_parameters=fix_parameters)
with nn.parameter_scope(f'layer{i}_1'):
h = resblock_align(h, channel_basic * (2**(i-5)),
stride=(1, 1), test=False, fix_parameters=fix_parameters)
use_downsample = True
stride = (2, 2)
with nn.parameter_scope('mlp1'):
h = F.relu(PF.affine(h, 128, with_bias=True,
fix_parameters=fix_parameters), inplace=True)
with nn.parameter_scope('mlp3'):
h = F.relu(PF.affine(h, 128, with_bias=True,
fix_parameters=fix_parameters), inplace=True)
with nn.parameter_scope('mlp5'):
h = PF.affine(h, 212, with_bias=True, fix_parameters=fix_parameters)
return h
# ------------------------------
# Network for Encoder
# ------------------------------
def resblock_hg(x, in_channels, bottleneck, out_channels, batch_stat=True):
# (bn --> relu --> conv) * 3
with nn.parameter_scope('bn1'):
h = PF.batch_normalization(x, batch_stat=batch_stat)
h = F.relu(h, True)
with nn.parameter_scope('conv1'):
h = PF.convolution(h, bottleneck, kernel=(1, 1))
with nn.parameter_scope('bn2'):
h = PF.batch_normalization(h, batch_stat=batch_stat)
h = F.relu(h, True)
with nn.parameter_scope('conv2'):
h = PF.convolution(h, bottleneck, kernel=(3, 3), pad=(1, 1))
with nn.parameter_scope('bn3'):
h = PF.batch_normalization(h, batch_stat=batch_stat)
h = F.relu(h, True)
with nn.parameter_scope('conv3'):
h = PF.convolution(h, out_channels, kernel=(1, 1))
if in_channels != out_channels:
with nn.parameter_scope('downsample'):
x = PF.convolution(x, out_channels, kernel=(1, 1))
return x + h
def hourglass(x, planes, batch_stat=True):
depth = 4 # hard-coded
ResBlk = partial(resblock_hg,
in_channels=planes,
bottleneck=planes//2,
out_channels=planes,
batch_stat=batch_stat) # set True
ops = [[ResBlk, ResBlk, ResBlk, ResBlk],
[ResBlk, ResBlk, ResBlk],
[ResBlk, ResBlk, ResBlk],
[ResBlk, ResBlk, ResBlk]]
def hg_module(n, x):
with nn.parameter_scope(f"{n - 1}.0.0"):
up1 = ops[n - 1][0](x)
low1 = F.max_pooling(x, kernel=(2, 2), stride=(2, 2))
with nn.parameter_scope(f"{n - 1}.1.0"):
low1 = ops[n - 1][1](low1)
if n > 1:
low2 = hg_module(n - 1, low1)
else:
with nn.parameter_scope(f"{n - 1}.3.0"):
low2 = ops[n - 1][3](low1)
with nn.parameter_scope(f"{n - 1}.2.0"):
low3 = ops[n - 1][2](low2)
up2 = F.interpolate(low3, scale=(2, 2), mode="nearest")
out = up1 + up2
return out
return hg_module(depth, x)
def fc(x, planes, batch_stat=True):
h = PF.convolution(x, planes, kernel=(1, 1))
h = PF.batch_normalization(h, batch_stat=batch_stat)
h = F.relu(h, True)
return h
def stacked_hourglass_net(x,
batch_stat=True,
planes=64,
output_nc=15,
num_stacks=2,
activation='none'):
with nn.parameter_scope('conv1'):
x = PF.convolution(x, planes, kernel=(7, 7), pad=(3, 3), stride=(2, 2))
with nn.parameter_scope('bn1'):
x = PF.batch_normalization(x, batch_stat=batch_stat)
x = F.relu(x, True)
with nn.parameter_scope('layer1'):
x = resblock_hg(x, planes, planes, planes*2, batch_stat=batch_stat)
x = F.max_pooling(x, kernel=(2, 2), stride=(2, 2))
with nn.parameter_scope('layer2'):
x = resblock_hg(x, planes*2, planes*2, planes*4, batch_stat=batch_stat)
with nn.parameter_scope('layer3'):
x = resblock_hg(x, planes*4, planes*2, planes*4, batch_stat=batch_stat)
planes = planes * 4
scores = []
for i in range(1, num_stacks):
# applied only once
with nn.parameter_scope(f'hourglass{i-1}'):
y = hourglass(x, planes, batch_stat=batch_stat)
with nn.parameter_scope('res0'):
y = resblock_hg(y, planes, planes//2, planes,
batch_stat=batch_stat)
with nn.parameter_scope('fc0'):
y = fc(y, planes, batch_stat=batch_stat) # True
score = PF.convolution(y, output_nc, kernel=(1, 1), name='score0')
score.persistent = True
scores.append(score)
fc_ = PF.convolution(y, planes, kernel=(1, 1), name='fc_')
score_ = PF.convolution(score, planes, kernel=(1, 1), name='score_')
x = x + fc_ + score_
with nn.parameter_scope('hourglass1'):
y = hourglass(x, planes, batch_stat=batch_stat)
with nn.parameter_scope('res1'):
y = resblock_hg(y, planes, planes//2, planes, batch_stat=batch_stat)
with nn.parameter_scope('fc1'):
y = fc(y, planes, batch_stat=batch_stat) # mistakenly set as True
score = PF.convolution(y, output_nc, kernel=(1, 1), name='score1')
score.persistent = True
scores.append(score)
return scores
|
import json
import re
def get_children(questions_dict):
children_json = questions_dict.get('claimant_children', '[]')
if isinstance(children_json, dict):
children_json = children_json.get('value', '[]')
return json.loads(children_json)
def get_num_children_living_with(questions_dict, living_arrangement):
assert living_arrangement in ['Lives with you', 'Lives with spouse', 'Lives with both']
children = get_children(questions_dict)
return str(len([child for child in children if child['child_live_with'] == living_arrangement]))
def determine_sole_custody(questions_dict):
child_list = get_children(questions_dict)
return (all([child['child_live_with'] == 'Lives with you' for child in child_list]) or
all([child['child_live_with'] == 'Lives with spouse' for child in child_list]))
def determine_shared_custody(questions_dict):
child_list = get_children(questions_dict)
return any([child['child_live_with'] == 'Lives with both'
for child in child_list])
def determine_split_custody(questions_dict):
child_list = get_children(questions_dict)
with_you = 0
with_spouse = 0
with_both = 0
for child in child_list:
if child['child_live_with'] == 'Lives with you':
with_you += 1
elif child['child_live_with'] == 'Lives with spouse':
with_spouse += 1
elif child['child_live_with'] == 'Lives with both':
with_both += 1
return (with_you > 0 and (with_spouse + with_both > 0) or
with_spouse > 0 and (with_you + with_both > 0))
def determine_child_over_19_supported(questions_dict):
has_children_of_marriage = questions_dict.get('children_of_marriage', '') == 'YES'
has_children_over_19 = questions_dict.get('has_children_over_19', '') == 'YES'
support = json.loads(questions_dict.get('children_financial_support', '[]'))
supporting_children = len(support) > 0 and 'NO' not in support
return has_children_of_marriage and has_children_over_19 and supporting_children
def determine_missing_undue_hardship_reasons(questions_dict):
claiming_undue_hardship = questions_dict.get('claiming_undue_hardship', '') == 'YES'
if claiming_undue_hardship:
at_least_one_of = ["claimant_debts", "claimant_expenses", "supporting_non_dependents", "supporting_dependents",
"supporting_disabled", "undue_hardship"]
for question in at_least_one_of:
value = questions_dict.get(question)
if value:
try:
items = json.loads(value)
for item in items:
for key in item:
if item[key]:
return False
except json.JSONDecodeError:
if value:
return False
return True
else:
return False
def determine_child_support_payor(questions_dict):
payor = questions_dict.get('child_support_payor', '')
if payor == 'Myself (Claimant 1)':
return 'Claimant 1'
elif payor == 'My Spouse (Claimant 2)':
return 'Claimant 2'
elif payor == 'Both myself and my spouse':
return 'both Claimant 1 and Claimant 2'
return ''
def determine_show_fact_sheet_f_you(questions_dict):
"""
If claimant 1 (you) is a payor and makes over $150,000/year, show fact sheet F for claimant 1
"""
payor = determine_child_support_payor(questions_dict)
try:
annual = float(questions_dict.get('annual_gross_income', 0))
except ValueError:
annual = 0
return (payor == 'Claimant 1' or payor == 'both Claimant 1 and Claimant 2') and annual > 150000
def determine_show_fact_sheet_f_spouse(questions_dict):
"""
If claimant 2 (spouse) is a payor and makes over $150,000/year, show fact sheet F for claimant 2
"""
payor = determine_child_support_payor(questions_dict)
try:
annual = float(questions_dict.get('spouse_annual_gross_income', 0))
except ValueError:
annual = 0
return (payor == 'Claimant 2' or payor == 'both Claimant 1 and Claimant 2') and annual > 150000
def determine_child_support_act_requirement(questions_dict):
orders_wanted = json.loads(questions_dict.get('want_which_orders', '[]'))
return 'Child support' in orders_wanted
def determine_missing_extraordinary_expenses(questions_dict):
special_expenses_keys = ["child_care_expenses",
"children_healthcare_premiums",
"health_related_expenses",
"extraordinary_educational_expenses",
"post_secondary_expenses",
"extraordinary_extracurricular_expenses"]
if questions_dict.get('special_extraordinary_expenses') == 'YES':
for special_expense in special_expenses_keys:
value = questions_dict.get(special_expense, 0)
try:
as_num = float(value)
if as_num > 0:
return False
except ValueError:
pass
return True
else:
return False
def determine_show_children_live_with_others(questions_dict):
has_children_of_marriage = questions_dict.get('children_of_marriage', '') == 'YES'
has_children_under_19 = questions_dict.get('has_children_under_19', '') == 'YES'
child_over_19_supported = determine_child_over_19_supported(questions_dict)
return has_children_of_marriage and (has_children_under_19 or child_over_19_supported)
def get_cleaned_response_value(response):
if response is None:
return None
response = response.strip()
search_text = response.replace('also known as', '')
if re.search(r'\w+', search_text):
return response
return None
|
"""
File: time_signature_event.py
Purpose: Defines a time signature as an Event.
"""
from timemodel.event import Event
from timemodel.position import Position
class TimeSignatureEvent(Event):
"""
Defines a time signature as an Event.
"""
def __init__(self, time_signature, time):
"""
Constructor.
Args:
time_signature: (TimeSignature) object.
time: Position.
"""
if not isinstance(time, Position):
raise Exception('time argument to TimeSignatureEvent must be Position not \'{0}\'.'.format(type(time)))
Event.__init__(self, time_signature, time)
def __str__(self):
return '[{0}, TimeSignature({1})]'.format(self.time, self.object)
|
# fbdata.anon
# FBDATA
from .models import AnonName
def anon_name():
return '%s %s' % tuple(AnonName.objects.all().order_by('?')[:2])
|
# -*- coding: utf-8 -*-
import django_dynamic_fixture as fixture
from django.test import TestCase
from django.test.utils import override_settings
from readthedocs.projects.models import Project
@override_settings(
USE_SUBDOMAIN=True, PUBLIC_DOMAIN='public.readthedocs.org', SERVE_PUBLIC_DOCS=True,
)
class RedirectSingleVersionTests(TestCase):
def setUp(self):
self.pip = fixture.get(Project, slug='pip', single_version=True, main_language_project=None)
def test_docs_url_generation(self):
with override_settings(USE_SUBDOMAIN=False):
self.assertEqual(
self.pip.get_docs_url(),
'http://readthedocs.org/docs/pip/',
)
with override_settings(USE_SUBDOMAIN=True):
self.assertEqual(
self.pip.get_docs_url(),
'http://pip.public.readthedocs.org/',
)
self.pip.single_version = False
with override_settings(USE_SUBDOMAIN=False):
self.assertEqual(
self.pip.get_docs_url(),
'http://readthedocs.org/docs/pip/en/latest/',
)
with override_settings(USE_SUBDOMAIN=True):
self.assertEqual(
self.pip.get_docs_url(),
'http://pip.public.readthedocs.org/en/latest/',
)
|
from viewmodels.shared.viewmodel import ViewModelBase
class RegisterViewModel(ViewModelBase):
pass
|
import numpy as np
import pandas as pd
import keras.backend as K
from keras.models import save_model
from sklearn.metrics import roc_auc_score
from deepsky.gan import normalize_multivariate_data
from deepsky.metrics import brier_score, brier_skill_score
from sklearn.linear_model import LogisticRegression
from deepsky.models import hail_conv_net, LogisticPCA, LogisticGAN, save_logistic_gan
from deepsky.data import load_storm_patch_data
import pickle
import inspect
import itertools as it
from os.path import join, exists
from os import mkdir, environ
import yaml
import argparse
import traceback
from multiprocessing import Pool, Manager
def main():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="Config yaml file")
parser.add_argument("-p", "--proc", type=int, default=1, help="Number of processors")
args = parser.parse_args()
config_file = args.config
with open(config_file) as config_obj:
config = yaml.load(config_obj)
if not exists(config["out_path"]):
mkdir(config["out_path"])
all_param_combos = {}
for model_name in config["model_names"]:
param_names = sorted(list(config[model_name].keys()))
all_param_combos[model_name] = pd.DataFrame(list(it.product(*[config[model_name][conv_name]
for conv_name in param_names])),
columns=param_names)
all_param_combos[model_name].to_csv(join(config["out_path"], model_name + "_param_combos.csv"), index_label="Index")
output_config = config["output"]
sampling_config = config["sampling"]
data_path = config["data_path"]
input_variables = config["input_variables"]
print("Loading data")
storm_data, storm_meta = load_storm_patch_data(data_path, input_variables, args.proc)
storm_norm_data, storm_scaling_values = normalize_multivariate_data(storm_data)
storm_scaling_values.to_csv(join(config["out_path"], "scaling_values.csv"), index_label="Index")
storm_flat_data = storm_norm_data.reshape(storm_norm_data.shape[0],
storm_norm_data.shape[1] * storm_norm_data.shape[2],
storm_norm_data.shape[3])
storm_mean_data = storm_flat_data.mean(axis=1)
output_data, output_meta = load_storm_patch_data(data_path,
[output_config["variable"],
output_config["mask"]], args.proc)
max_hail = np.array([output_data[i, :, :, 0][output_data[i, :, :, 1] > 0].max()
for i in range(output_data.shape[0])])
max_hail *= 1000
hail_labels = np.where(max_hail >= output_config["threshold"], 1, 0)
del output_data
del output_meta
del storm_data
print("Severe hail events: ", np.count_nonzero(hail_labels == 1))
#evaluate_conv_net(storm_norm_data, storm_meta, hail_labels,
# sampling_config, all_param_combos["conv_net"], config["out_path"])
#evaluate_sklearn_model("logistic_mean", LogisticRegression, storm_mean_data, storm_meta,
# hail_labels, sampling_config,
# all_param_combos["logistic_mean"], config["out_path"])
#evaluate_sklearn_model("logistic_pca", LogisticPCA, storm_flat_data, storm_meta,
# hail_labels, sampling_config,
# all_param_combos["logistic_pca"], config["out_path"])
evaluate_sklearn_model("logistic_gan", LogisticGAN,
storm_norm_data, storm_meta,
hail_labels, sampling_config,
all_param_combos["logistic_gan"], config["out_path"])
return
def train_split_generator(values, train_split, num_samples):
split_index = int(np.round(train_split * values.size))
for n in range(num_samples):
shuffled_values = np.random.permutation(values)
train_values = shuffled_values[:split_index]
test_values = shuffled_values[split_index:]
yield train_values, test_values
def train_single_conv_net(config_num, device_queue, conv_net_params, out_path):
device = -1
try:
print("Starting process ", config_num)
device = int(device_queue.get())
print("Process {0:d} using GPU {1:d}".format(config_num, device))
environ["CUDA_VISIBLE_DEVICES"] = "{0:d}".format(device)
param_scores = {}
train_data = np.load(join(out_path, "param_train_data.npy"))
train_labels = np.load(join(out_path, "param_train_labels.npy"))
val_data = np.load(join(out_path, "param_val_data.npy"))
val_labels = np.load(join(out_path, "param_val_labels.npy"))
session = K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=False,
gpu_options=K.tf.GPUOptions(allow_growth=True),
log_device_placement=False))
K.set_session(session)
print("Training ", config_num, device)
hail_conv_net_model = hail_conv_net(**conv_net_params)
hail_conv_net_model.fit(train_data,
train_labels,
batch_size=conv_net_params["batch_size"],
epochs=conv_net_params["num_epochs"], verbose=2)
val_preds = hail_conv_net_model.predict(val_data).ravel()
param_scores["Brier Skill Score"] = brier_skill_score(val_labels,
val_preds)
param_scores["AUC"] = roc_auc_score(val_labels,
val_preds)
print("Scores ", config_num, device, param_scores["Brier Skill Score"], param_scores["AUC"])
session.close()
del session
device_queue.put(device)
return param_scores, config_num
except Exception as e:
if device >= 0:
device_queue.put(device)
print(traceback.format_exc())
raise e
def train_single_sklearn_model(model_name, model_obj, config_num, params, out_path,
device_queue=None):
device = -1
try:
print("Starting process ", config_num)
if device_queue is not None:
device = int(device_queue.get())
print("Process {0:d} using GPU {1:d}".format(config_num, device))
environ["CUDA_VISIBLE_DEVICES"] = "{0:d}".format(device)
param_scores = {}
train_data = np.load(join(out_path, "param_train_data.npy"))
train_labels = np.load(join(out_path, "param_train_labels.npy"))
val_data = np.load(join(out_path, "param_val_data.npy"))
val_labels = np.load(join(out_path, "param_val_labels.npy"))
session = None
if device_queue is not None:
session = K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=False,
gpu_options=K.tf.GPUOptions(allow_growth=True),
log_device_placement=False))
K.set_session(session)
print("Training ", model_name, config_num, device)
model_args = inspect.getargspec(model_obj.__init__).args
if "index" in model_args:
hail_model = model_obj(index=config_num, **params)
else:
hail_model = model_obj(**params)
hail_model.fit(train_data,
train_labels)
val_preds = hail_model.predict_proba(val_data)[:, 1]
param_scores["Brier Skill Score"] = brier_skill_score(val_labels,
val_preds)
param_scores["AUC"] = roc_auc_score(val_labels,
val_preds)
print("Scores ", config_num, device, param_scores["Brier Skill Score"], param_scores["AUC"])
if device_queue is not None and device >= 0:
session.close()
del session
device_queue.put(device)
del hail_model
return param_scores, config_num
except Exception as e:
if device_queue is not None and device >= 0:
device_queue.put(device)
print(traceback.format_exc())
raise e
def train_best_sklearn_model(model_name, model_obj, best_combo, n, train_labels,
test_meta, test_labels, sample_scores, out_path):
try:
print("Train " + model_name)
train_data = np.load(join(out_path, "best_train_data.npy"))
test_data = np.load(join(out_path, "best_test_data.npy"))
environ["CUDA_VISIBLE_DEVICES"] = "{0:d}".format(0)
session = K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=False,
gpu_options=K.tf.GPUOptions(allow_growth=True),
log_device_placement=False))
K.set_session(session)
if model_name == "logistic_gan":
hail_model = model_obj(index=n, **best_combo)
else:
hail_model = model_obj(**best_combo)
hail_model.fit(train_data,
train_labels)
print("Scoring " + model_name)
test_preds = hail_model.predict_proba(test_data)[:, 1]
test_pred_frame = test_meta.copy(deep=True)
test_pred_frame["conv_net"] = test_preds
test_pred_frame["label"] = test_labels
test_pred_frame.to_csv(join(out_path, "predictions_{0}_sample_{1:03d}.csv".format(model_name, n)),
index_label="Index")
sample_scores.loc[n, "Brier Score"] = brier_score(test_labels, test_preds)
sample_scores.loc[n, "Brier Score Climo"] = brier_score(test_labels,
test_labels.mean())
sample_scores.loc[n, "Brier Skill Score"] = brier_skill_score(test_labels, test_preds)
sample_scores.loc[n, "AUC"] = roc_auc_score(test_labels, test_preds)
if model_name == "logistic_gan":
save_logistic_gan(hail_model, out_path)
else:
with open(join(out_path, "hail_{0}_sample_{1:03d}.pkl".format(model_name, n)), "wb") as model_file:
pickle.dump(hail_model, model_file, pickle.HIGHEST_PROTOCOL)
session.close()
del session
del hail_model
return sample_scores
except Exception as e:
print(traceback.format_exc())
raise e
def train_best_conv_net(best_combo, n, train_labels, test_meta, test_labels, sample_scores, out_path):
try:
print("Train Conv Net")
train_data = np.load(join(out_path, "best_train_data.npy"))
test_data = np.load(join(out_path, "best_test_data.npy"))
environ["CUDA_VISIBLE_DEVICES"] = "{0:d}".format(0)
session = K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=False,
gpu_options=K.tf.GPUOptions(allow_growth=True),
log_device_placement=False))
K.set_session(session)
hail_conv_net_model = hail_conv_net(**best_combo)
hail_conv_net_model.fit(train_data,
train_labels,
batch_size=best_combo["batch_size"],
epochs=best_combo["num_epochs"], verbose=2)
print("Scoring Conv Net")
test_preds = hail_conv_net_model.predict(test_data).ravel()
test_pred_frame = test_meta.copy(deep=True)
test_pred_frame["conv_net"] = test_preds
test_pred_frame["label"] = test_labels
test_pred_frame.to_csv(join(out_path, "predictions_conv_net_sample_{0:03d}.csv".format(n)), index_label="Index")
sample_scores.loc[n, "Brier Score"] = brier_score(test_labels, test_preds)
sample_scores.loc[n, "Brier Score Climo"] = brier_score(test_labels,
test_labels.mean())
sample_scores.loc[n, "Brier Skill Score"] = brier_skill_score(test_labels, test_preds)
sample_scores.loc[n, "AUC"] = roc_auc_score(test_labels, test_preds)
save_model(hail_conv_net_model, join(out_path, "hail_conv_net_sample_{0:03d}.h5".format(n)))
session.close()
del session
del hail_conv_net_model
return sample_scores
except Exception as e:
print(traceback.format_exc())
raise e
def evaluate_conv_net(storm_norm_data, storm_meta, hail_labels,
sampling_config, param_combos, out_path, num_gpus=8):
"""
Args:
storm_norm_data:
storm_meta:
hail_labels:
sampling_config:
param_combos:
out_path:
num_gpus:
Returns:
"""
unique_dates = np.unique(storm_meta["run_dates"])
np.random.seed(sampling_config["random_seed"])
storm_sampler = train_split_generator(unique_dates, sampling_config["train_split"],
sampling_config["num_samples"])
best_param_combos = []
sample_scores = pd.DataFrame(index=np.arange(sampling_config["num_samples"]),
columns=["Brier Score", "Brier Score Climo", "Brier Skill Score", "AUC"],
dtype=float)
for n in range(sampling_config["num_samples"]):
environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"
train_dates, test_dates = next(storm_sampler)
print(train_dates, test_dates)
train_indices = np.where(np.in1d(storm_meta["run_dates"], train_dates))[0]
test_indices = np.where(np.in1d(storm_meta["run_dates"], test_dates))[0]
all_members = np.unique(storm_meta.loc[train_indices, "members"])
np.random.shuffle(all_members)
member_split = int(np.round(all_members.size * sampling_config["member_split"]))
train_members = all_members[:member_split]
val_members = all_members[member_split:]
print(train_members, val_members)
train_member_indices = np.where(np.in1d(storm_meta.loc[train_indices, "members"], train_members))[0]
val_member_indices = np.where(np.in1d(storm_meta.loc[train_indices, "members"], val_members))[0]
param_scores = pd.DataFrame(index=np.arange(param_combos.shape[0]),
columns=["Brier Skill Score", "AUC"], dtype=float)
score_outputs = []
param_train_data = storm_norm_data[train_indices][train_member_indices]
param_train_labels = hail_labels[train_indices][train_member_indices]
param_val_data = storm_norm_data[train_indices][val_member_indices]
param_val_labels = hail_labels[train_indices][val_member_indices]
print("Saving training data")
np.save(join(out_path, "param_train_data.npy"), param_train_data)
np.save(join(out_path, "param_train_labels.npy"), param_train_labels)
np.save(join(out_path, "param_val_data.npy"), param_val_data)
np.save(join(out_path, "param_val_labels.npy"), param_val_labels)
gpu_manager = Manager()
gpu_queue = gpu_manager.Queue()
n_pool = Pool(num_gpus, maxtasksperchild=1)
for g in range(num_gpus):
gpu_queue.put(g)
for c in param_combos.index.values:
print(c)
score_outputs.append(n_pool.apply_async(train_single_conv_net,
(c, gpu_queue, param_combos.loc[c].to_dict(), out_path)))
n_pool.close()
n_pool.join()
#for c in param_combos.index.values:
# score_outputs.append(train_single_conv_net(c, gpu_queue, param_combos.loc[c].to_dict(), out_path))
for async_out in score_outputs:
out = async_out.get()
param_scores.loc[out[1]] = out[0]
del n_pool
del gpu_queue
del gpu_manager
best_config = param_scores["Brier Skill Score"].idxmax()
best_combo = param_combos.loc[best_config].to_dict()
param_scores.to_csv(join(out_path, "conv_net_param_scores_sample_{0:03d}.csv".format(n)),
index_label="Param Combo")
best_param_combos.append(best_config)
print("Best Config")
print(param_combos.loc[best_config])
pool = Pool(1)
np.save(join(out_path, "best_train_data.npy"), storm_norm_data[train_indices])
np.save(join(out_path, "best_test_data.npy"), storm_norm_data[test_indices])
sample_scores = pool.apply(train_best_conv_net, (best_combo, n,
hail_labels[train_indices],
storm_meta.loc[test_indices],
hail_labels[test_indices],
sample_scores, out_path))
pool.close()
pool.join()
del pool
sample_scores.to_csv(join(out_path, "conv_net_sample_scores.csv"), index_label="Sample")
best_config_frame = param_combos.loc[best_param_combos]
best_config_frame = best_config_frame.reset_index()
best_config_frame.to_csv(join(out_path, "conv_net_best_params.csv"), index_label="Sample")
return
def evaluate_sklearn_model(model_name, model_obj, storm_data, storm_meta, hail_labels,
sampling_config, param_combos, out_path, num_gpus=8):
unique_dates = np.unique(storm_meta["run_dates"])
np.random.seed(sampling_config["random_seed"])
storm_sampler = train_split_generator(unique_dates, sampling_config["train_split"],
sampling_config["num_samples"])
best_param_combos = []
sample_scores = pd.DataFrame(index=np.arange(sampling_config["num_samples"]),
columns=["Brier Score", "Brier Score Climo", "Brier Skill Score", "AUC"],
dtype=float)
for n in range(sampling_config["num_samples"]):
train_dates, test_dates = next(storm_sampler)
train_indices = np.where(np.in1d(storm_meta["run_dates"], train_dates))[0]
test_indices = np.where(np.in1d(storm_meta["run_dates"], test_dates))[0]
all_members = np.unique(storm_meta.loc[train_indices, "members"])
np.random.shuffle(all_members)
member_split = int(np.round(all_members.size * sampling_config["member_split"]))
train_members = all_members[:member_split]
val_members = all_members[member_split:]
train_member_indices = np.where(np.in1d(storm_meta.loc[train_indices, "members"], train_members))[0]
val_member_indices = np.where(np.in1d(storm_meta.loc[train_indices, "members"], val_members))[0]
param_scores = pd.DataFrame(index=np.arange(param_combos.shape[0]),
columns=["Brier Skill Score", "AUC"], dtype=float)
score_outputs = []
param_train_data = storm_data[train_indices][train_member_indices]
param_train_labels = hail_labels[train_indices][train_member_indices]
param_val_data = storm_data[train_indices][val_member_indices]
param_val_labels = hail_labels[train_indices][val_member_indices]
print("Saving training data")
np.save(join(out_path, "param_train_data.npy"), param_train_data)
np.save(join(out_path, "param_train_labels.npy"), param_train_labels)
np.save(join(out_path, "param_val_data.npy"), param_val_data)
np.save(join(out_path, "param_val_labels.npy"), param_val_labels)
gpu_manager = Manager()
gpu_queue = gpu_manager.Queue()
n_pool = Pool(num_gpus, maxtasksperchild=1)
for g in range(num_gpus):
gpu_queue.put(g)
for c in param_combos.index.values:
print(c)
score_outputs.append(n_pool.apply_async(train_single_sklearn_model,
(model_name, model_obj, c, param_combos.loc[c].to_dict(),
out_path),
dict(device_queue=gpu_queue)))
n_pool.close()
n_pool.join()
for async_out in score_outputs:
out = async_out.get()
param_scores.loc[out[1]] = out[0]
del n_pool
del gpu_queue
del gpu_manager
#for c in param_combos.index:
# print(param_combos.loc[c])
# model_inst = model_obj(**param_combos.loc[c].to_dict())
# model_inst.fit(storm_data[train_indices][train_member_indices],
# hail_labels[train_indices][train_member_indices])
# val_preds = model_inst.predict_proba(storm_data[train_indices][val_member_indices])[:, 1]
# param_scores.loc[c, "Brier Skill Score"] = brier_skill_score(hail_labels[train_indices][val_member_indices],
# val_preds)
# param_scores.loc[c, "AUC"] = roc_auc_score(hail_labels[train_indices][val_member_indices],
# val_preds)
# if param_scores.loc[c, "Brier Skill Score"] > best_score:
# best_config = c
# best_score = param_scores.loc[c, "Brier Skill Score"]
# del model_inst
param_scores.to_csv(join(out_path, "{0}_param_scores_sample_{1:03d}.csv".format(model_name, n)),
index_label="Param Combo")
best_config = param_scores["Brier Skill Score"].idxmax()
best_combo = param_combos.loc[best_config].to_dict()
best_param_combos.append(best_config)
print("Best Config")
print(param_combos.loc[best_config])
pool = Pool(1)
np.save(join(out_path, "best_train_data.npy"), storm_data[train_indices])
np.save(join(out_path, "best_test_data.npy"), storm_data[test_indices])
sample_scores = pool.apply(train_best_sklearn_model, (model_name, model_obj, best_combo, n,
hail_labels[train_indices],
storm_meta.loc[test_indices],
hail_labels[test_indices],
sample_scores, out_path))
pool.close()
pool.join()
del pool
sample_scores.to_csv(join(out_path, "{0}_sample_scores.csv".format(model_name)), index_label="Sample")
#print("Train Best " + model_name)
#model_inst = model_obj(**param_combos.loc[best_config].to_dict())
#model_inst.fit(storm_data[train_indices],
# hail_labels[train_indices])
#print("Scoring " + model_name)
#test_pred_frame = storm_meta.loc[test_indices]
#test_pred_frame[model_name] = model_inst.predict_proba(storm_data[test_indices])[:, 1]
#test_pred_frame["label"] = hail_labels[test_indices]
#test_preds = test_pred_frame[model_name].values
#test_pred_frame = pd.DataFrame({"indices": test_indices,
# "lon": storm_centers[test_indices, 0],
# "lat": storm_centers[test_indices, 1],
# "run_dates": storm_run_dates[test_indices],
# "valid_dates": storm_valid_dates[test_indices],
# "members": storm_members[test_indices],
# model_name: test_preds,
# "label": hail_labels[test_indices]},
#columns=["indices", "lon", "lat", "dates", "members", "conv_net", "label"])
#test_pred_frame.to_csv(join(out_path, "predictions_{0}_sample_{1:03d}.csv".format(model_name, n)), index_label="Index")
#sample_scores.loc[n, "Brier Score"] = brier_score(hail_labels[test_indices], test_preds)
#sample_scores.loc[n, "Brier Score Climo"] = brier_score(hail_labels[test_indices],
# hail_labels[test_indices].mean())
#sample_scores.loc[n, "Brier Skill Score"] = brier_skill_score(hail_labels[test_indices], test_preds)
#sample_scores.loc[n, "AUC"] = roc_auc_score(hail_labels[test_indices], test_preds)
#
#del model_inst
#sample_scores.to_csv(join(out_path, "{0}_sample_scores.csv".format(model_name)), index_label="Sample")
best_config_frame = param_combos.loc[best_param_combos]
best_config_frame = best_config_frame.reset_index()
best_config_frame.to_csv(join(out_path, "{0}_best_params.csv".format(model_name)), index_label="Sample")
return
if __name__ == "__main__":
main()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
import threading
from src.client import test_sdk_client
import src.helpers
import src.util
import src.algo_multilateral_arbitrage
import test_sdk_client
if __name__ == '__main__':
logger = src.util.base_logger(__name__, "test-logs/debug.log")
test_algo_name = "A-tests-multi-lateral"
db_connector = src.helpers.DBconnection()
configuration = src.helpers.load_config(test_algo_name, db_connector._connection)
# instantiate arbitrage algorithm
instance = src.algorithm.ArbitrageInstance(
test_algo_name,
simulation=True,
configs_file="configs-link-eur.yaml",
configuration=configuration,
client=test_sdk_client.TestClient(logger),
logger=logger,
db_connector=db_connector
)
instance.trade_algorithm()
time.sleep(0.8)
del instance
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetLinkAssociationResult',
'AwaitableGetLinkAssociationResult',
'get_link_association',
'get_link_association_output',
]
@pulumi.output_type
class GetLinkAssociationResult:
def __init__(__self__):
class AwaitableGetLinkAssociationResult(GetLinkAssociationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLinkAssociationResult(
)
def get_link_association(device_id: Optional[str] = None,
global_network_id: Optional[str] = None,
link_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLinkAssociationResult:
"""
The AWS::NetworkManager::LinkAssociation type associates a link to a device. The device and link must be in the same global network and the same site.
:param str device_id: The ID of the device
:param str global_network_id: The ID of the global network.
:param str link_id: The ID of the link
"""
__args__ = dict()
__args__['deviceId'] = device_id
__args__['globalNetworkId'] = global_network_id
__args__['linkId'] = link_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:networkmanager:getLinkAssociation', __args__, opts=opts, typ=GetLinkAssociationResult).value
return AwaitableGetLinkAssociationResult(
@_utilities.lift_output_func(get_link_association)
def get_link_association_output(device_id: Optional[pulumi.Input[str]] = None,
global_network_id: Optional[pulumi.Input[str]] = None,
link_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetLinkAssociationResult]:
"""
The AWS::NetworkManager::LinkAssociation type associates a link to a device. The device and link must be in the same global network and the same site.
:param str device_id: The ID of the device
:param str global_network_id: The ID of the global network.
:param str link_id: The ID of the link
"""
...
|
##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
gpipe/predict_genes.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python gpipe/predict_genes.py --help
Type::
python gpipe/predict_genes.py --help
for command line help.
Documentation
-------------
Code
----
'''
import os
import sys
import string
import re
import tempfile
import time
import subprocess
import CGAT.Experiment as E
import CGAT.Genomics as Genomics
import CGAT.IndexedFasta as IndexedFasta
import alignlib_lite
import CGAT.PredictionParser as PredictionParser
import CGAT.Exons as Exons
# import all, sort out names later
from CGAT.Predictor2 import *
USAGE = """python %s [OPTIONS] peptide genome
Version: $Id: gpipe/predict_genes.py 2462 2009-01-28 10:18:22Z andreas $
Wrapper for running gene predictions.
Options:
-h, --help print this message.
-v, --verbose= loglevel.
-i, --bracket-increment= residues by how much to increase the genomic region to scan.
-b, --query-border= residues allowed to be missing from query at either end.
-g, --genome-file= pattern for filenames with the genomic DNA (FASTA).
-e, --exit-identical exit, if there was no change from previous run
-m, --min-score= minimum score
-p, --method= prediction method to use {genewise|exonerate}
-c, --incremental do incremental search for genes.
-r, --recursive do recursive search for genes
-f, --refinement do refinement of final prediction
-o, --probe do probing for gene region
--probe-options set probing options (for the expert)
-x, --exons-file filename with exon boundaries of queries.
-a, --mask-probe= mask in probing step. Possible maskers are [seg,bias]
-f, --format input format
--keep-temp do not delete temporary files (for debugging purposes).
--graph-cutoff= in graph format, stop processing after this.
--peptides-fasta-file= filename with peptide sequences
""" % sys.argv[0]
HEADER = """# QUERY: 1 query id
# SBJCT: 2 sbjct id
# SCORE: 3 genewise alignment score
# QFROM: 4 query first residue
# QTO: 5 query last residue
# QALI: 6 query alignment
# SBJCT: 7 sbjct
# SFROM: 8 sbjct first residue
# STO: 9 sbjct last residue
# SALI: 10 sbjct alignment
# QLEN: 11 length of query
# CQUERY: 12 coverage of query (in percent)
# NGAPS: 13 number of gaps in alignment
# NFR: 14 number of frame-shifts
# NINTRON: 15 number of introns
# NPHASE0: 16 number of phase 0 introns
# NPHASE1: 17 number of phase 1 introns
# NPHASE2: 18 number of phase 2 introns
# NSTOP: 19 number of stop codons in exons
# PIDE: 20 percent identity
# PSIM: 21 percent similarity
# PEP: 22 predicted peptide sequence
# SGFROM: 23 sbjct: genomic region first residue
# SGTO: 24 sbjct: genomic region last residue
# GALI: 25 peptide to query aligment
# NERROR: 26 number of errors in genewise parsing"""
SHORT_HEADER = """# QUERY\tSBJCT\tSCORE\tQFROM\tQTO\tSBJCT\tSFROM\tSTO\tSALI\tQLEN\tCQUERY\tNGAPS\tNFR\tNINTRON\tNPHASE0\tNPHAS1\tNPHASE2\tNSTOP\tPIDE\tPSIM\tPEP\tSGFROM\tSGTO\tGALI\tNERROR"""
global_options = {}
class Masker:
mLogLevel = 3
mExecutable = "biasdb.pl"
mOptions = ""
def __init__(self):
pass
def __call__(self, peptide_sequence):
"""mask peptide sequence
"""
Masker.__init__(self)
outfile, filename_peptide = tempfile.mkstemp()
os.write(outfile, ">test\n%s\n" % (peptide_sequence))
os.close(outfile)
outfile, filename_output = tempfile.mkstemp()
os.close(outfile)
statement = string.join(map(str, (
self.mExecutable,
filename_peptide,
self.mOptions
)), " ")
if self.mLogLevel >= 3:
print "# statement: %s" % statement
sys.stdout.flush()
p = subprocess.Popen(statement,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
(file_stdout, file_stdin, file_stderr) = (p.stdout, p.stdin, p.stderr)
lines = file_stdout.readlines()
lines_stderr = file_stderr.readlines()
exit_code = p.returncode
if exit_code:
print "# ERROR: %s returned with non-zero exit status %s" % (self.mExecutable, str(exit_code))
for line in lines_stderr:
print "#", line[:-1]
sys.stdout.flush()
return None
os.remove(filename_peptide)
if self.mLogLevel >= 3:
print"# received %i lines from %s" % (len(lines), self.mExecutable)
print lines
masked_sequence = re.sub("\s", "", string.join(lines[1:], ""))
return masked_sequence
class MaskerBias (Masker):
mLogLevel = 0
mExecutable = "biasdb.pl"
mOptions = ""
class MaskerSeg (Masker):
mLogLevel = 0
mExecutable = "seg"
mOptions = "12 2.2 2.5 -x"
# --------------------------------------------------------------------------
class TranscriptPredictor(Experiment):
mSensitivityLevelStart = 0
def __init__(self, filename_peptides, filename_genome):
# border to be added at the refinement step
self.mBorderRefinement = 50000
# border to be added at the refinement step, if
# Terminus has been reached
self.mBorderRefinementSmall = 300
# loglevel of module
self.mLogLevel = 0
# exit, if no change found in successive runs
self.mExitIfIdentical = 0
# refine predictions
self.mDoRefinement = 0
# run recursive refinement
self.mDoRecursive = 0
# do probing of alignments
self.mDoProbe = 0
# run incremental scanning for genes
self.mDoIncremental = 0
# residues which are permitted to be missing
# at termini of the query.
self.mQueryBorder = 0
# mask in probing step
self.mMaskProbe = []
self.mRefinementMinOverlapResidues = 20
# distance between neighbouring predictions to be combined
self.mRefinementMinDistanceNucleotides = 100
self.mRefinementMaxPermissiveOverlap = 10
# availability of exon boundaries
self.mExons = None
# bracket in which prediction to run
self.mBracketFrostart = 0
self.mBracketFroend = 0
# at least 100 residues for prediction
self.mMinRegionLength = 100
def SetFilenamePeptides(self, filename_peptides):
self.mFilenamePeptides = filename_peptides
def SetFilenameGenome(self, filename_genome):
self.mFilenameGenome = filename_genome
# ------------------------------------------------------------------------
def RunIncremental(self, bracket_from, bracket_to, bracket_from_end, bracket_to_end):
"""Run predictions."""
t0 = time.time()
if self.mLogLevel >= 1:
print "# INCREMENTAL: checking region: %i-%i (%i-%i)" % (self.mSbjctFrom + bracket_from,
self.mSbjctFrom +
bracket_to,
self.mSbjctFrom +
bracket_from_end,
self.mSbjctFrom + bracket_to_end)
last_result = None
niterations = 0
self.mSensitivityLevel = self.mSensitivityLevelStart
nresults = 0
query_coverage = 0.0
# current increment to use
num_increment = 0
key = "%s_vs_%s_%s_%i_%i" % (self.mQueryToken,
self.mSbjctToken, self.mSbjctStrand,
bracket_from + self.mSbjctFrom, bracket_to + self.mSbjctFrom)
last_mode = None
last_coverage = 0
while 1:
t1 = time.time()
# check, whether we have reached right/left border
left_ok = bracket_from == bracket_from_end
right_ok = bracket_to == bracket_to_end
niterations += 1
if self.mLogLevel >= 2:
print "# INCREMENTAL: started iteration %i at %s" % (niterations, time.asctime(time.localtime(time.time())))
result = self.RunSinglePrediction(bracket_from, bracket_to)
if not result:
return None, bracket_from, bracket_to
if result.GetNumMatches() == 0:
print "# WARNING: received empty result."
# increase sensitivity, if there are more levels available and
# the threshold permits it (is 0)
if (self.mSensitivityLevel < len(self.mLevelsSensitivity) - 1) and\
(self.mLevelsSensitivity[self.mSensitivityLevel + 1][0] > 0):
if self.mLogLevel >= 2:
print "# Increasing sensitivity to %i." % (self.mSensitivityLevel)
last_mode = "sensitivity"
continue
else:
return None, bracket_from, bracket_to
best_entry = result.GetBestMatch()
if self.mLogLevel >= 1:
print "# INCREMENTAL: key=%s, iteration=%i, sensivity=%i, hits=%i, score=%5.2f, coverage=%5.2f, time=%i" % \
(self.mKey, niterations, self.mSensitivityLevel,
result.GetNumMatches(
), best_entry.score, best_entry.mQueryCoverage,
time.time() - t1)
###################################################################
# dump new results
if last_result != result:
last_coverage = best_entry.mQueryCoverage
result.Write()
last_result = result
nresults += 1
if last_coverage > best_entry.mQueryCoverage:
print "# WARNING: coverage has decreased."
else:
if self.mLogLevel >= 2:
print "# WARNING: received identical result."
###################################################################
# decide, if to exit.
if best_entry.score < self.mMinScore:
if self.mLogLevel >= 2:
print "# EXITING: score below minimum"
return None, bracket_from, bracket_to
# stop run, if query has been found.
if best_entry.mQueryFrom == 1 and best_entry.mQueryTo == best_entry.mQueryLength:
if self.mLogLevel >= 2:
print "# EXITING: 100% coverage"
break
if best_entry.mQueryFrom < 1 + self.mQueryBorder and \
best_entry.mQueryTo > best_entry.mQueryLength - self.mQueryBorder:
if self.mLogLevel >= 2:
print "# EXITING: almost complete entry."
break
# when bracket increment did produce no results, exit.
if last_mode == "brackets" and \
self.mSensitivityLevel == len(self.mLevelsSensitivity) - 1 and \
self.mExitIfIdentical:
if self.mLogLevel >= 2:
print "# EXITING: no change after bracket enlargement."
return result, bracket_from, bracket_to
###############################################################
# decide what to change from previous run.
# increase sensitivity, if there are more levels available and
# the threshold permits it (is larger than coverage)
if last_mode != "sensitvity":
last_mode = "sensitivity"
if self.mSensitivityLevel < len(self.mLevelsSensitivity) - 1 and \
(self.mLevelsSensitivity[self.mSensitivityLevel + 1][0] <= best_entry.mQueryCoverage):
self.mSensitivityLevel += 1
if self.mLogLevel >= 2:
print "# Increasing sensitivity to %i" % (self.mSensitivityLevel)
continue
# increase region to be searched.
if last_mode != "brackets" and \
self.mBracketIncrements:
if left_ok and right_ok:
if self.mLogLevel >= 2:
print "# EXITING: maximal region searched."
break
# stop extension, if peptide is (almost) complete at either
# terminus
if best_entry.mQueryTo > best_entry.mQueryLength - self.mQueryBorder:
bracket_to = min(bracket_to_end,
best_entry.mSbjctGenomeTo - self.mSbjctFrom + 10)
bracket_to_end = bracket_to
if best_entry.mQueryFrom < 1 + self.mQueryBorder:
bracket_from = max(0,
best_entry.mSbjctGenomeFrom - self.mSbjctFrom - 10)
bracket_from_end = bracket_from
bracket_from = max(bracket_from - self.mBracketIncrements[num_increment],
bracket_from_end)
bracket_to = min(bracket_to + self.mBracketIncrements[num_increment],
bracket_to_end)
if self.mLogLevel >= 2:
print "# Increasing brackets by %i" % self.mBracketIncrements[num_increment]
# change increments
num_increment += 1
if num_increment >= len(self.mBracketIncrements):
num_increment = 0
last_mode = "brackets"
continue
if self.mLogLevel >= 2:
print " EXITING: all possibilities exhausted."
break
if result:
if self.mLogLevel >= 1:
if result.GetNumMatches() > 0:
best_entry = result.GetBestMatch()
coverage = best_entry.mQueryCoverage
pide = best_entry.mPercentIdentity
else:
coverage = 0
pide = 0
print "# INCREMENTAL: key=%s, iteration=%i, sensitivity=%i, results=%i, hits=%i, coverage=%5.2f, pid=%5.2f, time=%i" % \
(key, niterations, self.mSensitivityLevel, nresults, result.GetNumMatches(), coverage, pide,
time.time() - t0)
else:
print "# INCREMENTAL: key=%s, iteration=%i, results=%i, hits=%i, coverage=%5.2f, pide=%5.2f, time=%i" % \
(key, niterations, 0, 0, 0, 0, time.time() - t0)
return result, bracket_from, bracket_to
# ------------------------------------------------------------------------
def RunRecursive(self,
bracket_from, bracket_to,
bracket_from_end, bracket_to_end, level):
if self.mLogLevel >= 1:
print "# RECURSE: level %i: checking region: %i-%i (%i-%i)" % \
(level,
self.mSbjctFrom + bracket_from,
self.mSbjctFrom + bracket_to,
self.mSbjctFrom + bracket_from_end,
self.mSbjctFrom + bracket_to_end)
if bracket_to - bracket_from <= 0:
if self.mLogLevel >= 2:
print "# RECURSE: exiting: bracket too small."
return None
result = None
# probe for genomic region and change alignments if necessary:
if self.mDoProbe:
result = self.RunProbe(bracket_from, bracket_to,
bracket_from_end, bracket_to_end)
if result and result.GetNumMatches() > 0:
match_from = result[0].mSbjctGenomeFrom
match_to = result[0].mSbjctGenomeTo
for x in range(1, result.GetNumMatches()):
match_from = min(match_from, result[x].mSbjctGenomeFrom)
match_to = max(match_to, result[x].mSbjctGenomeTo)
if match_from - self.mSbjctFrom < bracket_from:
if self.mLogLevel >= 2:
print "# RECURSE: changed left border %i->%i" % (bracket_from + self.mSbjctFrom, match_from)
bracket_from = match_from - self.mSbjctFrom
if match_to - self.mSbjctFrom > bracket_to:
if self.mLogLevel >= 2:
print "# RECURSE: changed right border: %i->%i" % (bracket_to + self.mSbjctFrom, match_to)
bracket_to = match_to - self.mSbjctFrom
elif level == 0:
if self.mLogLevel >= 2:
print "# RECURSE: continuing with dummy: first level received empty result from PROBE."
else:
if self.mLogLevel >= 2:
print "# RECURSE: exiting: received empty result from PROBE."
return None
if self.mDoIncremental:
result, dummy_bracket_from, dummy_bracket_to = self.RunIncremental(bracket_from,
bracket_to,
bracket_from_end,
bracket_to_end)
if self.mDoRefinement:
# create a dummy result, if nothing created previously.
if not result:
result = PredictionParser.Predictions()
e = PredictionParser.PredictionParserEntry()
e.mQueryToken = self.mQueryToken
e.mSbjctToken = self.mSbjctToken
e.mSbjctStrand = self.mSbjctStrand
e.mSbjctGenomeFrom = bracket_from_end + self.mSbjctFrom
e.mSbjctGenomeTo = bracket_to_end + self.mSbjctFrom
result.Add(e)
result = self.RunRefinement(
result, bracket_from_end, bracket_to_end)
if result and result.GetNumMatches() > 0 and self.mDoRecursive:
match_from = result[0].mSbjctGenomeFrom
match_to = result[0].mSbjctGenomeTo
for x in range(1, result.GetNumMatches()):
match_from = min(match_from, result[x].mSbjctGenomeFrom)
match_to = max(match_to, result[x].mSbjctGenomeTo)
match_from -= self.mSbjctFrom
match_to -= self.mSbjctFrom
# run left prediction
if match_from > bracket_from:
new_result = self.RunRecursive(
bracket_from, match_from, bracket_from_end, match_from, level + 1)
if new_result:
result.Combine(new_result)
# run right prediction
if match_to < bracket_to:
new_result = self.RunRecursive(
match_to, bracket_to, match_to, bracket_to_end, level + 1)
if new_result:
result.Combine(new_result)
return result
# ------------------------------------------------------------------------
def RunInitialize(self):
"""setup variables for run."""
# read genomic sequence
self.mForwardSequences, self.mReverseSequences = Genomics.ReadGenomicSequences(
open(self.mFilenameGenome, "r"))
# read peptide sequence from peptide sequences
self.mPeptideSequences = Genomics.ReadPeptideSequences(
open(self.mFilenamePeptides, "r"))
# select the peptide sequence to work on.
self.mPeptideSequence = self.mPeptideSequences[self.mQueryToken]
self.mGenomicSequence = self.mForwardSequences[self.mSbjctToken]
if self.mBracketToEnd == 0:
self.mBracketToEnd = len(self.mGenomicSequence)
if self.mBracketTo == 0:
self.mBracketTo = self.mBracketToEnd
# length of query
self.mQueryLength = len(self.mPeptideSequence)
# length of sbjct
self.mSbjctLength = self.mBracketToEnd - self.mBracketFroend
# key
self.mKey = "%s_vs_%s_%s_%i_%i" % (self.mQueryToken,
self.mSbjctToken, self.mSbjctStrand,
self.mSbjctFrom, self.mSbjctTo)
# ------------------------------------------------------------------------
def Run(self):
"""Main startup routine.
Calls iterative matching routine.
"""
t0 = time.time()
self.RunInitialize()
if self.mLogLevel >= 1:
print "# START: key=%s lquery=%i lsbjct=%i time=%s " %\
(self.mKey,
len(self.mPeptideSequences[self.mQueryToken]),
self.mBracketToEnd - self.mBracketFroend,
time.asctime(time.localtime(time.time())))
result = self.RunRecursive(self.mBracketFrom, self.mBracketTo,
self.mBracketFroend, self.mBracketToEnd,
0)
if result:
result.Sort(lambda x, y: cmp(-x.score, -y.score))
if len(result) > 0:
best_entry = result.GetBestMatch()
coverage = best_entry.mQueryCoverage
pide = best_entry.mPercentIdentity
if self.mLogLevel >= 2:
row_seq = alignlib_lite.makeSequence(
self.mPeptideSequences[best_entry.mQueryToken])
col_seq = alignlib_lite.makeSequence(
best_entry.mTranslation)
f = alignlib_lite.AlignmentFormatExplicit(
best_entry.mMapPeptide2Translation, row_seq, col_seq)
print "# TOPALI:", f.mRowAlignment
print "# TOPALI:", f.mColAlignment
else:
coverage = 0
pide = 0
if self.mLogLevel >= 1:
print "# RESULT: key=%s, hits=%i, coverage=%5.2f, pide=%5.2f, time=%i" % \
(self.mKey, len(result), coverage, pide,
time.time() - t0)
else:
if self.mLogLevel >= 1:
print "# RESULT: key=%s, time=%i no prediction possible." % \
(self.mKey, time.time() - t0)
return result
# ------------------------------------------------------------------------
def RunRefinement(self, result, bracket_from_end, bracket_to_end):
"""refine result.
The objective of this method is to return a best prediction from a list
of results. The method stops, if a satisfactory assignment has been found.
This is done the following way:
Iterate over list of regions. Test predictions against exon structure.
If the exon structure matches, return.
Otherwise:
1. Remove predictions adjacent in the genome but overlapping in the query.
These are usually due to repeats. Take the one with best coverage.
2. Combine predictions collinear in both genome and query.
3. Combine predictions overlapping on sbjct.
"""
t0 = time.time()
if result.GetNumMatches() == 0:
return result
# sort results by score
result.Sort(lambda x, y: cmp(-x.score, -y.score))
boundaries_genome = []
boundaries_peptide = []
tmp_result = PredictionParser.Predictions()
new_result = PredictionParser.Predictions()
for p in result:
is_compatible = True
for xp in tmp_result:
overlap = min(xp.mQueryTo, p.mQueryTo) - \
max(xp.mQueryFrom, p.mQueryFrom)
goverlap = min(xp.mSbjctGenomeTo, p.mSbjctGenomeTo) - \
max(xp.mSbjctGenomeFrom, p.mSbjctGenomeFrom)
if overlap > 0 or goverlap > 0:
is_compatible = False
break
if is_compatible:
tmp_result.append(p)
if self.mLogLevel >= 2:
print "# REFINE: running combination of %i predictions" % len(tmp_result)
new_p = tmp_result.GetRange()
if global_options.loglevel >= 2:
print "# REFINE: predictions combined:"
for p in tmp_result:
print "#", str(p)
print "#", str(new_p)
new_prediction = self.RefinePrediction(
new_p, bracket_from_end, bracket_to_end)
if new_prediction:
new_result.Add(new_prediction)
new_result.Sort(lambda x, y: cmp((-x.score,),
(-y.score,)))
if new_result and new_result.GetNumMatches() > 0:
if self.mLogLevel >= 1:
if result.GetNumMatches() > 0:
best_entry = new_result.GetBestMatch()
coverage = best_entry.mQueryCoverage
pide = best_entry.mPercentIdentity
else:
coverage = 0
pide = 0
print "# REFINE: key=%s, hits=%i, coverage=%5.2f, pid=%5.2f, time=%i" % \
(self.mKey, new_result.GetNumMatches(), coverage, pide,
time.time() - t0)
print str(new_result)
else:
print "# REFINE: key=%s, hits=0, time=%i" % (self.mKey, time.time() - t0)
return new_result
# ------------------------------------------------------------------------
def RunProbeStep(self, level,
bracket_from, bracket_to, bracket_from_end, bracket_to_end,
peptide_from, peptide_to, peptide_from_end, peptide_to_end):
"""Run a prediction for bracket range and a peptide range."""
if self.mLogLevel >= 1:
print "# RunProbeStep: level %i: checking region: %i-%i (%i-%i) %i-%i (%i-%i)" % \
(level,
self.mSbjctFrom + bracket_from,
self.mSbjctFrom + bracket_to,
self.mSbjctFrom + bracket_from_end,
self.mSbjctFrom + bracket_to_end,
peptide_from, peptide_to, peptide_from_end, peptide_to_end)
if bracket_to - bracket_from < 0:
if self.mLogLevel >= 2:
print "# exiting recursion bracket_to - bracket_from"
return None
if bracket_from < bracket_from_end < 0:
if self.mLogLevel >= 2:
print "# exiting recursion: bracket_from end"
return None
if bracket_to > bracket_to_end < 0:
if self.mLogLevel >= 2:
print "# exiting recursion: bracket_to end"
return None
if peptide_to - peptide_from < self.mQueryBorder:
if self.mLogLevel >= 2:
print "# exiting recursion: peptide_to - peptide_from"
return None
self.mSensitivityLevel = 0
t1 = time.time()
niterations = 0
last_result = None
# iterate over results.
# rescan a region with higher sensitivity,
# 1. if there has been no match
while 1:
niterations += 1
result = self.RunSinglePrediction(bracket_from,
bracket_to,
peptide_from,
peptide_to)
if not result or result.GetNumMatches() == 0:
if self.mLogLevel >= 2:
print "# PROBE: received empty result."
# increase sensitivity, if there are more levels available and
# the threshold permits it (is 0)
if (self.mSensitivityLevel < self.mProbeMaxSensitivityLevel) and\
(self.mLevelsSensitivity[self.mSensitivityLevel + 1][0] >= 0):
self.mSensitivityLevel += 1
if self.mLogLevel >= 2:
print "# PROBE: increasing sensitivity to %i." % (self.mSensitivityLevel)
continue
else:
break
else:
break
if result and result.GetNumMatches() > 0:
best_entry = result.GetBestMatch()
self.mSensitivityLevelStart = max(
self.mSensitivityLevelStart, self.mSensitivityLevel)
if self.mLogLevel >= 1:
print "# key=%s, iteration=%i, sensivity=%i, hits=%i, score=%5.2f, coverage=%5.2f, time=%i" % \
(self.mKey, niterations, self.mSensitivityLevel,
result.GetNumMatches(
), best_entry.score, best_entry.mQueryCoverage,
time.time() - t1)
best_entry = result.GetBestMatch()
match_genome_from = best_entry.mSbjctGenomeFrom - self.mSbjctFrom
match_genome_to = best_entry.mSbjctGenomeTo - self.mSbjctFrom
match_peptide_from = best_entry.mQueryFrom - 1
match_peptide_to = best_entry.mQueryTo
if global_options.loglevel >= 2:
print str(result)
increment = self.mBracketIncrements[
min(level, len(self.mBracketIncrements) - 1)]
# run left prediction, if no internal match
self.mMaxPeptideTo = max(self.mMaxPeptideTo, match_peptide_to)
self.mMinPeptideFrom = min(
self.mMinPeptideFrom, match_peptide_from)
if match_peptide_from <= self.mMinPeptideFrom:
new_result = self.RunProbeStep(level + 1,
bracket_from -
increment, match_genome_from,
bracket_from_end, match_genome_from,
peptide_from, match_peptide_from,
peptide_from_end, match_peptide_from)
if new_result:
result.Combine(new_result)
if match_peptide_to >= self.mMaxPeptideTo:
new_result = self.RunProbeStep(level + 1,
match_genome_to, bracket_to +
increment, match_genome_to, bracket_to_end,
match_peptide_to, peptide_to, match_peptide_to, peptide_to_end)
if new_result:
result.Combine(new_result)
return result
# -------------------------------------------------------------------------
def RunProbe(self, bracket_from, bracket_to, bracket_from_end, bracket_to_end):
"""Run a probe
This tries to run the predictor in probe mode in order to pin down the location
of the gene more accurately."""
t0 = time.time()
if self.mLogLevel >= 1:
print "# PROBE: checking region: %i-%i (%i-%i)" % (self.mSbjctFrom + bracket_from,
self.mSbjctFrom +
bracket_to,
self.mSbjctFrom +
bracket_from_end,
self.mSbjctFrom + bracket_to_end)
# maximal range of peptide found
self.mMinPeptideFrom = self.mQueryLength
self.mMaxPeptideTo = 0
result = self.RunProbeStep(0, bracket_from, bracket_to, bracket_from_end, bracket_to_end,
0, query_length, 0, query_length)
if result:
query_length = result[0].mQueryLength
coverage = 100 * \
(self.mMaxPeptideTo - self.mMinPeptideFrom) / self.mQueryLength
else:
coverage = 0
if self.mLogLevel >= 2:
print "# PROBE: key=%s, sensitivity=%i, hits=%i, coverage=%i, from=%i, to=%i, time=%i" % \
(self.mKey, self.mSensitivityLevelStart, result.GetNumMatches(),
coverage,
self.mMinPeptideFrom + 1, self.mMaxPeptideTo,
time.time() - t0)
return result
# --------------------------------------------------------------------------
class TranscriptPredictorTwoStep(TranscriptPredictor):
"""Transcript predictor that
1. probes using Method 1
2. predicts using Method 2
"""
mProbePredictor = None
mProbeOptions = None
mRefinementPredictor = None
mRefinementOptions = None
def __init__(self, filename_genome, filename_peptides):
TranscriptPredictor.__init__(self, filename_genome, filename_peptides)
# -------------------------------------------------------------------------
def DumpParameters(self):
TranscriptPredictor.DumpParameters(self)
self.mProbePredictor.DumpParameters()
self.mRefinementPredictor.DumpParameters()
# -------------------------------------------------------------------------
def RunInitialize(self):
"""init."""
TranscriptPredictor.RunInitialize(self)
self.mProbeSequence = self.mPeptideSequence
self.mProbePredictor.SetLogLevel(self.mLogLevel - 2)
self.mRefinementPredictor.SetLogLevel(self.mLogLevel - 2)
if self.mMaskProbe:
for masker in self.mMaskProbe:
self.mProbeSequence = masker(self.mProbeSequence)
if not self.mProbeSequence:
print "# WARNING: empty sequence after masking with %s, query was %s" % (str(masker), self.mPeptideSequence)
if self.mLogLevel >= 4:
print "# PROBE: sequence=%s" % self.mProbeSequence
# -------------------------------------------------------------------------
def RunProbe(self, bracket_from, bracket_to, bracket_from_end, bracket_to_end):
"""Run a probe
This tries to run the predictor in probe mode in order to pin down the location
of the gene more accurately."""
t0 = time.time()
if self.mLogLevel >= 1:
print "# PROBE: checking region: %i-%i (size=%i)" % (self.mSbjctFrom + bracket_from_end,
self.mSbjctFrom +
bracket_to_end,
bracket_to_end - bracket_from_end)
result = self.mProbePredictor(self.mQueryToken, self.mProbeSequence,
self.mSbjctToken, self.mGenomicSequence,
self.mProbeOptions,
bracket_from_end, bracket_to_end)
if not result:
print "# PROBE: received empty result."
return None
result.ShiftGenomicRegion(self.mSbjctFrom)
result.SetStrand(self.mSbjctStrand)
if result and result.GetNumMatches() > 0:
match_from = result[0].mQueryFrom
match_to = result[0].mQueryTo
for x in range(1, result.GetNumMatches()):
match_from = min(match_from, result[x].mQueryFrom)
match_to = max(match_to, result[x].mQueryTo)
coverage = 100 * (match_to - match_from + 1) / self.mQueryLength
if self.mLogLevel >= 2:
print "# PROBE: key=%s, hits=%i, coverage=%i, from=%i, to=%i, time=%i" % \
(self.mKey,
result.GetNumMatches(),
coverage,
match_from, match_to,
time.time() - t0)
print str(result)
else:
print "# PROBE: key=%s, hits=0" % \
(self.mKey)
return result
# ------------------------------------------------------------------------
def RefinePrediction(self, prediction, bracket_from_end, bracket_to_end):
"""refine a prediction.
"""
# set region to check based on scanning result
#
# if not touching Terminus:
#
# 1. add fixed width - large boundary
#
# 2. increase refinement boundary according to query
# gene size
#
# ------ match
# +++++++++++++ query size
# ++++++++++++ query size
# .................... region to test
#
# if touching Terminus:
#
# 1. just add small range.
bracket_from = prediction.mSbjctGenomeFrom - self.mSbjctFrom
bracket_to = prediction.mSbjctGenomeTo - self.mSbjctFrom
prediction_length = prediction.mSbjctGenomeTo - \
prediction.mSbjctGenomeFrom
if self.mExons:
genesize = self.mExons[-1].mGenomeTo - self.mExons[0].mGenomeFrom
gene_increment = genesize - prediction_length
else:
genesize = 0
gene_increment = 0
if prediction.mQueryFrom > 1 + self.mQueryBorder:
left_increment = max(gene_increment, self.mBorderRefinement)
else:
left_increment = self.mBorderRefinementSmall
if prediction.mQueryTo < prediction.mQueryLength - self.mQueryBorder:
right_increment = max(gene_increment, self.mBorderRefinement)
else:
right_increment = self.mBorderRefinementSmall
if self.mLogLevel >= 2:
print "# REFINE: increments are: left=%i, right=%i, lpred=%i, lquery=%i" % (left_increment,
right_increment,
prediction_length,
genesize)
bracket_from -= left_increment
bracket_to += right_increment
# make sure that range is within bracket
bracket_from = max(bracket_from, bracket_from_end)
bracket_to = min(bracket_to, bracket_to_end)
if self.mLogLevel >= 2:
print "# REFINE: searching region %i to %i, size=%i" % (bracket_from + self.mSbjctFrom,
bracket_to +
self.mSbjctFrom,
bracket_to - bracket_from)
if bracket_to - bracket_from < self.mMinRegionLength:
print "# REFINE: region with %i residues to small, no refinement done" % (bracket_to - bracket_from)
return None
result = self.mRefinementPredictor(self.mQueryToken, self.mPeptideSequences[self.mQueryToken],
self.mSbjctToken, self.mForwardSequences[
self.mSbjctToken],
self.mRefinementOptions,
bracket_from, bracket_to)
if not result:
print "# WARNING: received no result, no refinement done."
return None
result.ShiftGenomicRegion(self.mSbjctFrom)
result.SetStrand(self.mSbjctStrand)
if result.GetNumMatches() == 0:
print "# REFINE: received empty result."
return None
return result.GetBestMatch()
# --------------------------------------------------------------------------
class TranscriptPredictorTwoStepEG(TranscriptPredictorTwoStep):
"""Transcript predictor that
1. probes using exonerate
2. predicts using genewise
"""
def __init__(self, filename_genome, filename_peptides):
TranscriptPredictorTwoStep.__init__(
self, filename_genome, filename_peptides)
self.mProbeOptions = "--proteinwordlimit 5 --proteinhspdropoff 5 --proteinwordlen 3 --subopt TRUE"
self.mProbePredictor = PredictorExonerate()
self.mRefinementOptions = ""
self.mRefinementPredictor = PredictorGenewise()
# --------------------------------------------------------------------------
class TranscriptPredictorTwoStepEE(TranscriptPredictorTwoStep):
"""Transcript predictor that
1. probes using exonerate
2. predicts using exonerate
"""
def __init__(self, filename_genome, filename_peptides,
min_score=80):
TranscriptPredictorTwoStep.__init__(
self, filename_genome, filename_peptides)
self.mProbeOptions = "--proteinwordlimit 5 --proteinhspdropoff 5 --proteinwordlen 3 --subopt TRUE --score '%s' " %\
str(min_score)
self.mProbePredictor = PredictorExonerate()
self.mRefinementOptions = "--exhaustive --subopt FALSE --score '%s' " % str(
min_score)
self.mRefinementPredictor = PredictorExonerate()
# ------------------------------------------------------------------------
def EvaluatePrediction(prediction, query_exons, query_sequence):
"""Evaluate the result of a gene prediction.
If it is a successfull gene prediction, return 1, otherwise
return 0.
A gene prediction is successful, if the exon boundaries in the
query are similar to the exon boundaries in the prediction.
"""
if global_options.loglevel >= 2:
print "# EVAL: Exons in query:"
i = 0
for e in query_exons:
print "# EVAL: %i" % i, str(e)
i += 1
exons = Exons.Alignment2Exons(prediction.mMapPeptide2Genome,
query_from=prediction.mQueryFrom - 1,
sbjct_from=prediction.mSbjctGenomeFrom,
add_stop_codon=1)
for e in exons:
e.mQueryToken = "prediction"
if global_options.loglevel >= 2:
print "# EVAL: Exons in prediction:"
i = 0
for e in exons:
print "# EVAL: %i" % i, str(e)
i += 1
comparison = Exons.CompareGeneStructures(
exons, query_exons,
map_ref2cmp=prediction.mMapPeptide2Translation,
cmp_sequence=prediction.mTranslation,
ref_sequence=query_sequence,
threshold_min_pide=prediction.mPercentIdentity *
global_options.quality_threshold_pide / 100,
threshold_slipping_exon_boundary=global_options.quality_slipping_exon_boundary)
if global_options.loglevel >= 2:
print comparison.Pretty(prefix="# EVAL: ")
is_ok = False
status = "unknown"
max_nexons = max(len(exons), len(query_exons))
# more than two exons in result: check number of identical exons
if len(exons) > 2:
if comparison.mNumIdenticalExons >= (len(exons) - 2) and \
abs(comparison.mNumDifferenceExons) <= 1:
status = "conserved multi exon"
is_ok = True
elif max_nexons > 10 and \
(100 * comparison.mNumIdenticalExons) / max_nexons > global_options.evaluate_min_percent_exon_identity:
status = "semi-conserved multi exon"
is_ok = True
# two exons in result:
elif len(exons) == 2:
# accept if two exons in query and at least one exon is identical
if len(query_exons) == 2:
if comparison.mNumIdenticalExons >= 1:
status = "conserved double exon"
is_ok = True
else:
# accept if both exons are identical
if comparison.mNumIdenticalExons == 2:
status = "semi-conserved double exon"
is_ok = True
# single exon in result: accept, if query is single exon and coverage
# above threshold
else:
if len(query_exons) == 1 and \
prediction.mQueryCoverage >= global_options.evaluate_single_exons_min_coverage:
status = "conserved single exon"
is_ok = True
if global_options.loglevel >= 1:
print "# EVAL: status=%s is_ok=%s" % (status, str(is_ok))
return is_ok
# ------------------------------------------------------------------------
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: gpipe/predict_genes.py 2462 2009-01-28 10:18:22Z andreas $", usage=globals()["__doc__"])
parser.add_option("-b", "--query-border", dest="query_border", type="int")
parser.add_option(
"-i", "--bracket-increment", dest="bracket_increments", type="string")
parser.add_option(
"-e", "--exit-identical", dest="exit_if_identical", action="store_true")
parser.add_option("-m", "--min-score", dest="min_score", type="float")
parser.add_option("-p", "--method", dest="method", type="string")
parser.add_option(
"-r", "--recursive", dest="recursive", action="store_true")
parser.add_option("--refinement", dest="refinement", action="store_true")
parser.add_option("--probe", dest="probe", action="store_true")
parser.add_option("--incremental", dest="incremental", action="store_true")
parser.add_option(
"--border-refinement", dest="border_refinement", type="int")
parser.add_option("-x", "--exons-file", dest="filename_exons", type="string")
parser.add_option("-a", "--mask-probe", dest="mask_probe", type="string")
parser.add_option("-f", "--format", dest="input_format", type="string")
parser.add_option("--probe-options", dest="probe_options", type="string")
parser.add_option("-g", "--genome-file", dest="genome_file", type="string")
parser.add_option("--peptides-fasta-file", dest="filename_peptides", type="string")
parser.add_option("--keep-temp", dest="keep_temp", action="store_true")
parser.set_defaults(
exit_if_identical=None,
loglevel=2,
query_border=0,
short_options="v:hi:g:b:em:procx:af:",
filename_peptides=None,
filename_genome=None,
filename_exons=None,
genome_file="genome_%s.fasta",
# bracket extension parameters
bracket_increments=None,
# method to use for prediction
method="genewise",
use_header=1,
recursive=0,
probe=0,
incremental=0,
border_refinement=50000,
min_score=0,
refinement=0,
mask_probe="",
evaluate_single_exons_min_coverage=80,
evaluate_min_percent_exon_identity=70,
quality_threshold_pide=75,
quality_slipping_exon_boundary=9,
graph_cutoff=5,
input_format=None,
probe_options=None,
keep_temp=False,
)
(global_options, args) = E.Start(parser)
if global_options.mask_probe:
global_options.mask_probe = global_options.mask_probe.split(",")
if global_options.bracket_increments:
global_options.bracket_increments = map(
int, global_options.bracket_increments.split(","))
if global_options.method == "twostep_eg":
predictor = TranscriptPredictorTwoStepEG(
global_options.filename_peptides, global_options.filename_genome)
elif global_options.method == "twostep_ee":
predictor = TranscriptPredictorTwoStepEE(
global_options.filename_peptides, global_options.filename_genome, global_options.min_score)
else:
print "unknown method", global_options.method
sys.exit(1)
if global_options.probe_options:
predictor.mProbeOptions = global_options.probe_options
predictor.mQueryBorder = global_options.query_border
predictor.mBorderRefinement = global_options.border_refinement
predictor.mBracketIncrements = global_options.bracket_increments
predictor.mMinScore = global_options.min_score
predictor.mExitIfIdentical = global_options.exit_if_identical
predictor.mLogLevel = global_options.loglevel
predictor.mDoRecursive = global_options.recursive
predictor.mDoRefinement = global_options.refinement
predictor.mDoProbe = global_options.probe
predictor.mDoIncremental = global_options.incremental
predictor.mKeepTemp = global_options.keep_temp
for m in global_options.mask_probe:
if m == "seg":
predictor.mMaskProbe.append(MaskerSeg())
elif m == "bias":
predictor.mMaskProbe.append(MaskerBias())
else:
raise "unknown masker %s" % m
exit_code = 0
print E.GetHeader()
print E.GetParams()
predictor.DumpParameters()
if global_options.filename_exons:
file = open(global_options.filename_exons, "r")
exons = Exons.ReadExonBoundaries(file, do_invert=1, remove_utr=1)
file.close()
else:
exons = {}
if global_options.loglevel >= 2:
print "# read exon boundaries for %i queries" % len(exons)
fasta = IndexedFasta.IndexedFasta(global_options.genome_file)
# --------------------------------------------------------------------
# Format "pairs": process peptide and genomic sequences given in fasta format.
# Query and genomic sequence are linked by common identifier
if global_options.input_format == "pairs":
if len(args) == 2:
global_options.filename_peptides, global_options.filename_genome = args
peptide_sequences = Genomics.ReadPeptideSequences(
open(global_options.filename_peptides, "r"))
forward_sequences = Genomics.ReadGenomicSequences(
open(global_options.filename_genome, "r"), do_reverse=0)
for query_token in peptide_sequences:
if query_token not in peptide_sequences:
print "# WARNING: no genomic sequence found "\
"for query %s" % query_token
continue
query_sequence = peptide_sequences[query_token]
sbjct_sequence = forward_sequences[query_token]
predictor.mBracketFrom = 0
predictor.mBracketTo = len(sbjct_sequence)
predictor.mQueryToken = query_token
predictor.mSbjctToken = query_token
predictor.mSbjctStrand = "+"
predictor.mSbjctFrom = 0
predictor.mSbjctTo = len(sbjct_sequence)
predictor.mBracketFroend = 0
predictor.mBracketToEnd = len(sbjct_sequence)
# create temporary files
query_outfile, query_filename = tempfile.mkstemp()
os.write(query_outfile, ">%s\n%s\n" %
(query_token, query_sequence))
os.close(query_outfile)
predictor.SetFilenamePeptides(query_filename)
sbjct_outfile, sbjct_filename = tempfile.mkstemp()
os.write(sbjct_outfile, ">%s\n%s\n" %
(query_token, sbjct_sequence))
os.close(sbjct_outfile)
predictor.SetFilenameGenome(sbjct_filename)
predictor.Run()
os.remove(query_filename)
os.remove(sbjct_filename)
elif global_options.input_format == "single":
query_token, sbjct_token, sbjct_strand, sbjct_from, sbjct_to = args[:5]
sbjct_from, sbjct_to = int(sbjct_from), int(sbjct_to)
peptide_sequences = Genomics.ReadPeptideSequences(
open(global_options.filename_peptides, "r"))
query_sequence = peptide_sequences[query_token]
sbjct_sequence = fasta.getGenomicSequence(sbjct_token, sbjct_strand,
sbjct_from, sbjct_to)
predictor.mBracketFrom = 0
predictor.mBracketTo = len(sbjct_sequence)
predictor.mQueryToken = query_token
predictor.mSbjctToken = sbjct_token
predictor.mSbjctStrand = sbjct_strand
predictor.mSbjctFrom = sbjct_from
predictor.mSbjctTo = sbjct_to
predictor.mBracketFroend = 0
predictor.mBracketToEnd = len(sbjct_sequence)
print ">%s\n%s" % (query_token, query_sequence)
print ">%s\n%s" % (sbjct_token, sbjct_sequence)
if query_token in exons:
predictor.mExons = exons[query_token]
else:
predictor.mExons = []
# create temporary files
query_outfile, query_filename = tempfile.mkstemp()
os.write(query_outfile, ">%s\n%s\n" % (query_token, query_sequence))
os.close(query_outfile)
predictor.SetFilenamePeptides(query_filename)
sbjct_outfile, sbjct_filename = tempfile.mkstemp()
os.write(sbjct_outfile, ">%s\n%s\n" % (sbjct_token, sbjct_sequence))
os.close(sbjct_outfile)
predictor.SetFilenameGenome(sbjct_filename)
result = predictor.Run()
# dump result and check, if it is satisfactory
if result:
result.Write()
os.remove(query_filename)
os.remove(sbjct_filename)
# --------------------------------------------------------------------
# Format "lists": given are chunks of priority lists.
# After very prediction, the prediction is evaluated. If it is sufficient,
# the remaining entries are skipped and the next list is processed.
elif global_options.input_format == "graph":
# get genomes in input set (so that for single genome files,
# unnecessary entries can be skipped).
data = map(lambda x: x[:-1].split("\t"),
filter(lambda x: x[0] != "#", sys.stdin.readlines()))
sbjct_tokens = {}
for g in map(lambda x: x[2], data):
sbjct_tokens[g] = True
if len(args) == 1 and args[0] == "-":
# process several chunks of data
skip = False
last_time = None
last_region_id = None
last_sbjct_sequence = None
forward_sequences = None
reverse_sequences = None
for d in data:
(query_token,
query_sequence,
sbjct_token,
sbjct_strand,
sbjct_sequence,
sbjct_from,
sbjct_to,
min_bracket_from, min_bracket_to,
region_id, region_nr, region_max_nr) = d
(sbjct_from, sbjct_to, min_bracket_from, min_bracket_to, region_id, region_nr, region_max_nr) = \
map(int, (sbjct_from, sbjct_to, min_bracket_from,
min_bracket_to, region_id, region_nr, region_max_nr))
if sbjct_sequence == "":
if last_sbjct_sequence is None:
try:
sbjct_sequence = fasta.getSequence(
sbjct_token, sbjct_strand, sbjct_from, sbjct_to)
except AssertionError:
global_options.stderr.write("# WARNING: could not retrieve sequence for in region %i-%i: %s:%s:%i:%i - skipped\n" %
(region_id, region_nr,
sbjct_token, sbjct_strand, sbjct_from, sbjct_to))
global_options.stdlog.write("# WARNING: could not retrieve sequence for in region %i-%i: %s:%s:%i:%i - skipped\n" %
(region_id, region_nr,
sbjct_token, sbjct_strand, sbjct_from, sbjct_to))
continue
else:
sbjct_sequence = last_sbjct_sequence
else:
last_sbjct_sequence = sbjct_sequence
# do not test on region_nr, as first region_nr might not
# be 1 due to duplicated key removal in
# gpipe/assignments2pairs.py
if region_id != last_region_id:
this_time = time.time()
if global_options.loglevel >= 1 and last_time:
print "## GRAPH: region %i: finished in %i seconds" % (last_region_id, this_time - last_time)
print "####################################################################"
last_time = this_time
last_region_id = region_id
if global_options.loglevel >= 1:
print "####################################################################"
print "## GRAPH: region %i: starting with %i members" % (region_id, region_max_nr)
skip = False
if global_options.loglevel >= 2:
print "## GRAPH: region %i: processing %i of %i members" % (region_id, region_nr, region_max_nr)
if skip and region_nr <= region_max_nr:
if global_options.loglevel >= 2:
print "## GRAPH: skipping entry %i/%i" % (region_nr, region_max_nr)
continue
if global_options.graph_cutoff and region_nr > global_options.graph_cutoff:
if global_options.loglevel >= 2:
print "## GRAPH: omitting entry %i/%i" % (region_nr, region_max_nr)
continue
(bracket_from, bracket_to,
bracket_from_end, bracket_to_end) = map(lambda x: x - sbjct_from,
(min_bracket_from,
min_bracket_to,
sbjct_from,
sbjct_to))
predictor.mBracketFrom = bracket_from
predictor.mBracketTo = bracket_to
predictor.mQueryToken = query_token
predictor.mSbjctToken = sbjct_token
predictor.mSbjctStrand = sbjct_strand
predictor.mSbjctFrom = sbjct_from
predictor.mSbjctTo = sbjct_to
predictor.mBracketFroend = bracket_from_end
predictor.mBracketToEnd = bracket_to_end
if query_token in exons:
predictor.mExons = exons[query_token]
else:
predictor.mExons = []
# create temporary files
query_outfile, query_filename = tempfile.mkstemp()
os.write(query_outfile, ">%s\n%s\n" %
(query_token, query_sequence))
os.close(query_outfile)
predictor.SetFilenamePeptides(query_filename)
sbjct_outfile, sbjct_filename = tempfile.mkstemp()
os.write(sbjct_outfile, ">%s\n%s\n" %
(sbjct_token, sbjct_sequence))
os.close(sbjct_outfile)
predictor.SetFilenameGenome(sbjct_filename)
result = predictor.Run()
# dump result and check, if it is satisfactory
if result:
result.Write()
if result and query_token in exons:
skip = False
if global_options.loglevel >= 1:
print "# EVAL: evaluating %i predictions." % len(result)
for r in result:
skip = skip or EvaluatePrediction(
r, exons[query_token], query_sequence)
os.remove(query_filename)
os.remove(sbjct_filename)
this_time = time.time()
if global_options.loglevel >= 1 and last_time:
print "## GRAPH: region %i: finished in %i seconds" % (last_region_id, this_time - last_time)
print "####################################################################"
# --------------------------------------------------------------------
# process default format
else:
# --------------------------------------------------------------------
# two arguments: a single prediction with one peptide file and one
# genome file.
if len(args) == 2:
global_options.filename_peptides, global_options.filename_genome = args
sbjct_token = ""
sbjct_strand = ""
sbjct_from = 0
sbjct_to = 0
query_token = ""
bracket_from = 0
bracket_to = 0
bracket_from_end = 0
bracket_to_end = 0
if global_options.use_header:
# read sbjct token and get bracket information
line = open(global_options.filename_genome, "r").readline()
data = re.split("\s+", line[1:-1])
if len(data) == 1:
sbjct_token = data[0]
elif len(data) >= 6:
(sbjct_token, sbjct_strand,
sbjct_from, sbjct_to,
min_bracket_from, min_bracket_to) = data[:6]
(sbjct_from, sbjct_to, min_bracket_from, min_bracket_to) = \
map(int, (
sbjct_from, sbjct_to, min_bracket_from, min_bracket_to))
# convert to relative coordinates on genomic sequence
(bracket_from, bracket_to,
bracket_from_end, bracket_to_end) = map(lambda x: x - sbjct_from,
(min_bracket_from,
min_bracket_to,
sbjct_from,
sbjct_to))
# read query token
line = open(global_options.filename_peptides, "r").readline()
(query_token,) = re.split("\s+", line[1:-1])[:1]
predictor.mBracketFrom = bracket_from
predictor.mBracketTo = bracket_to
predictor.mQueryToken = query_token
predictor.mSbjctToken = sbjct_token
predictor.mSbjctStrand = sbjct_strand
predictor.mSbjctFrom = sbjct_from
predictor.mSbjctTo = sbjct_to
predictor.mBracketFroend = bracket_from_end
predictor.mBracketToEnd = bracket_to_end
predictor.SetFilenamePeptides(global_options.filename_peptides)
predictor.SetFilenameGenome(global_options.filename_genome)
if query_token in exons:
predictor.mExons = exons[query_token]
else:
predictor.mExons = []
predictor.Run()
# --------------------------------------------------------------------
# one argument, which is -: read input as chunks from stdin
elif len(args) == 1 and args[0] == "-":
# get genomes in input set (so that for single genome files,
# unnecessary entries can be skipped).
data = map(
lambda x: x[:-1].split("\t"), filter(lambda x: x[0] != "#", sys.stdin.readlines()))
sbjct_tokens = {}
for g in map(lambda x: x[2], data):
sbjct_tokens[g] = True
last_sbjct_sequence = None
# process several chunks of data
for d in data:
(query_token,
query_sequence,
sbjct_token,
sbjct_strand,
sbjct_sequence,
sbjct_from,
sbjct_to,
min_bracket_from,
min_bracket_to) = d
(sbjct_from, sbjct_to, min_bracket_from, min_bracket_to) = \
map(int, (
sbjct_from, sbjct_to,
min_bracket_from, min_bracket_to))
(bracket_from, bracket_to,
bracket_from_end, bracket_to_end) = map(
lambda x: x - sbjct_from,
(min_bracket_from,
min_bracket_to,
sbjct_from,
sbjct_to))
if sbjct_sequence == "":
if last_sbjct_sequence is None:
sbjct_sequence = fasta.getSequence(sbjct_token,
sbjct_strand,
sbjct_from,
sbjct_to)
else:
sbjct_sequence = last_sbjct_sequence
else:
last_sbjct_sequence = sbjct_sequence
predictor.mBracketFrom = bracket_from
predictor.mBracketTo = bracket_to
predictor.mQueryToken = query_token
predictor.mSbjctToken = sbjct_token
predictor.mSbjctStrand = sbjct_strand
predictor.mSbjctFrom = sbjct_from
predictor.mSbjctTo = sbjct_to
predictor.mBracketFroend = bracket_from_end
predictor.mBracketToEnd = bracket_to_end
if query_token in exons:
predictor.mExons = exons[query_token]
else:
predictor.mExons = []
# create temporary files
query_outfile, query_filename = tempfile.mkstemp()
os.write(query_outfile, ">%s\n%s\n" %
(query_token, query_sequence))
os.close(query_outfile)
predictor.SetFilenamePeptides(query_filename)
sbjct_outfile, sbjct_filename = tempfile.mkstemp()
os.write(sbjct_outfile, ">%s\n%s\n" %
(sbjct_token, sbjct_sequence))
os.close(sbjct_outfile)
predictor.SetFilenameGenome(sbjct_filename)
result = predictor.Run()
if result:
result.Write()
os.remove(query_filename)
os.remove(sbjct_filename)
E.Stop()
sys.exit(exit_code)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import re
import json
import requests
import nltk
def request_theguardian(params):
### theguardian endpoint URL
r = requests.get('http://content.guardianapis.com/search', params=params)
response = json.loads(r.text)
### pagination rules
if response['response']['pages'] > 1:
for current_page in range(1, response['response']['pages']+1):
params['page'] = current_page
r = requests.get('http://content.guardianapis.com/search', params=params)
response = json.loads(r.text)
### --DO SOMETHIMG HERE--
### Example 1: collect sentences from each article that contain 'Amsterdam'
articles = collect_sents(response, 'Amsterdam')
[[print (sent+'\n') for sent in article] for article in articles]
### Example 2: dump a response as json file
dump_json(params['q'], current_page, response)
### --DO SOMETHING HERE--
else: pass
def dump_json(query, current_page, response):
if not os.path.exists(query):
os.makedirs(query)
else: pass
path = os.path.join(os.path.abspath(query), query+'_page_'+str(current_page)+'.json')
with open(path, 'w') as f:
json.dump(response, f)
def collect_sents(response, keyword):
raw_articles = [re.sub('<[^>]*>', '', article['fields']['body']) for article in response['response']['results']]
sent_tokenized_articles = [nltk.sent_tokenize(i) for i in raw_articles]
keyword_sentences = [[sent for sent in article if keyword in sent]for article in sent_tokenized_articles]
return keyword_sentences
def main():
params = {
'api-key': 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee', ### your api-key
'q': 'Amsterdam',
'show-tags': 'keyword',
'show-fields': 'body',
'from-date': '2012-01-01',
'to-date': '2016-12-31',
'page-size': '200' ### max pagesize is 200
}
request_theguardian(params)
if __name__ == '__main__':
main()
|
import os
import pytest
import mock
from capsule.lib.config_handler import DEFAULT_CONFIG_FILE_ENV_VAR, get_config_file, get_config
import asyncio
TEST_CONFIG_FILE_RELATIVE_PATH = "./capsule/lib/settings/config.toml"
TEST_CONFIG_FILE_LOCATION = os.path.abspath(
os.path.expandvars(
os.path.expanduser(TEST_CONFIG_FILE_RELATIVE_PATH)))
class TestConfigHandler():
@mock.patch.dict(os.environ, {DEFAULT_CONFIG_FILE_ENV_VAR: "/Users/notyou"}, clear=True)
def test_config_file_var_gathered_from_env(self):
"""test that ACmd can be inherited from
provided that its minimum attributes and methods
are dealt with.
"""
assert get_config_file() == "/Users/notyou"
def test_config_fails_without_a_provided_path_or_created_default_file(self):
"""test when we try to run get_config without a provided path on
an assumed fresh system that it will fail.
This is expected as when no file is provided and None is found in the env
It will default to ~/.capsule/config.toml
Which shouldn't exist on a fresh system
"""
with pytest.raises(FileNotFoundError):
asyncio.run(get_config())
def test_config_with_specified_path(self):
"""test when we try to run get_config with a provided path
that it will find the file, be able to parse the file
And we can ensure values are within the file.
"""
assert asyncio.run(get_config(TEST_CONFIG_FILE_RELATIVE_PATH))
assert 'networks' in asyncio.run(get_config(TEST_CONFIG_FILE_RELATIVE_PATH))
@mock.patch.dict(os.environ, {DEFAULT_CONFIG_FILE_ENV_VAR: TEST_CONFIG_FILE_LOCATION}, clear=True)
def test_config_gathered_from_env(self):
"""test when a mocked environment variable is setup on the system
this value is read and the function will find the file,
be able to parse the file
And we can ensure values are within the file.
"""
assert asyncio.run(get_config())
assert 'networks' in asyncio.run(get_config())
|
# -*- coding: utf-8 -*-
import argparse
import atexit
import pkg_resources # part of setuptools
from winnaker.models import *
from winnaker.notify import *
from winnaker.settings import *
def main():
print("""
____ __ ____ __ .__ __. .__ __. ___ __ ___ _______ .______
\ \ / \ / / | | | \ | | | \ | | / \ | |/ / | ____|| _ \\
\ \/ \/ / | | | \| | | \| | / ^ \ | ' / | |__ | |_) |
\ / | | | . ` | | . ` | / /_\ \ | < | __| | /
\ /\ / | | | |\ | | |\ | / _____ \ | . \ | |____ | |\ \----.
\__/ \__/ |__| |__| \__| |__| \__| /__/ \__\ |__|\__\ |_______|| _| `._____|
""")
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--start",
help="starts manual execution of the pipeline",
action="store_true")
parser.add_argument(
"-fb",
"--forcebake",
help="force bake, to be used wth --start ",
action="store_true")
parser.add_argument(
"-a",
"--app",
type=str,
help="the name of application to look for",
default=cfg_app_name)
parser.add_argument(
"-p",
"--pipeline",
type=str,
help="the name of pipline to test",
default=os.environ["WINNAKER_PIPELINE_NAME"])
parser.add_argument(
"-nl", "--nologin",
help="will not attempt to login",
action="store_true")
parser.add_argument(
"-oa", "--authorize",
help="authorize the oauth application with the logged in user if required. " +
"This argument and '--nologin' are mutually exclusive",
action="store_true"
)
parser.add_argument(
"-nlb",
"--nolastbuild",
help="will not attempt to check last build status or stages",
action="store_true")
parser.add_argument(
"-hl",
"--headless",
help="will run in an xfvb display ",
action="store_true")
parser.add_argument(
"-v",
"--verbose",
help="print more logs, DEBUG level",
action="store_true")
args = parser.parse_args()
# Logging setup
if args.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logFormatter = logging.Formatter(
"%(asctime)s [%(levelname)s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(log_level)
fileHandler = logging.FileHandler(
join(cfg_output_files_path, "winnaker.log"))
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
version = pkg_resources.require("winnaker")[0].version
logging.info("Winnaker Version: {}".format(version))
logging.info("Current Config: {}".format(args))
if not os.path.exists(cfg_output_files_path):
os.makedirs(cfg_output_files_path)
if cfg_email_smtp and cfg_email_to and cfg_email_from:
atexit.register(send_mail, cfg_email_from, cfg_email_to, "Winnaker Screenshots " +
str(datetime.utcnow()), "Here are the screenshots of the spinnaker's last run at " +
str(datetime.utcnow()) +
" UTC Time", server=cfg_email_smtp)
if args.headless:
logging.debug("Starting virtual display")
from pyvirtualdisplay import Display
display = Display(visible=0, size=(2560, 1440))
display.start()
logging.debug("Started virtual display")
s = Spinnaker()
if not args.nologin:
logging.debug("Starting login")
s.login()
if args.authorize:
s.authorize()
s.get_pipeline(args.app, args.pipeline)
if not args.nolastbuild:
logging.info(
"- Last build status: {}".format(s.get_last_build().status.encode('utf-8')))
logging.info("- Screenshot Stages")
logging.info("- Current working directory: {}".format(os.getcwd()))
s.get_stages()
if args.start:
logging.debug("Going into start block")
s.start_manual_execution(force_bake=args.forcebake)
if args.headless:
logging.debug("Stopping virtualdisplay")
display.stop()
logging.debug("virtualdisplay stopped")
if __name__ == "__main__":
main()
|
#!/usr/bin/python
"""
(C) Copyright 2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
from __future__ import print_function
import threading
import avocado
from pydaos.raw import DaosApiError
from test_utils_pool import TestPool
from test_utils_container import TestContainer
from apricot import TestWithServers
def container_write(container, record, array_size=None):
"""Write to a container.
Args:
container: instance of TestContainer
record: record size to be written
array_size (optional): size of array value to be written
"""
# update data_array_size to use array value type
if array_size:
container.data_array_size.update(array_size)
# update record qty
container.record_qty.update(record)
print("\nRecord Size:{}".format(container.record_qty))
# write multiple objects
container.write_objects()
def container_read(container, array_size=None):
"""Read and verify the written data.
Args:
container: instance of TestContainer
array_size (optional): size of array value to be written
"""
# update data_array_size to use array value type
if array_size:
container.data_array_size.update(array_size)
# read written objects and verify
container.read_objects()
def test_runner(self, size, record_size, index, array_size, thread_per_size=4):
"""Perform simultaneous writes of varying record size to a container.
Args:
self: avocado test object
size: pool size to be created
record_size (list): list of different record sizes to be written
index (int): pool/container object index
array_size (optional): size of array value to be written
thread_per_size (int): threads per rec size
"""
# pool initialization
self.pool.append(TestPool(
self.context, dmg_command=self.get_dmg_command()))
self.pool[index].get_params(self)
# set pool size
self.pool[index].nvme_size.update(size)
# Create a pool
self.pool[index].create()
# display available space before write
self.pool[index].display_pool_daos_space("before writes")
self.pool[index].connect()
# create container
self.container.append(TestContainer(self.pool[index]))
self.container[index].get_params(self)
self.container[index].create()
self.container[index].open()
# initialize dicts to hold threads
jobs = {"write": [], "read": []}
# create read/write threads.
for rec in record_size:
for _ in range(thread_per_size):
# create threads using single value type
jobs["write"].append(threading.Thread(target=container_write,
args=(self.container[index],
rec)))
jobs["read"].append(threading.Thread(target=container_read,
args=(self.container[index],
None)))
# create threads using array value type
jobs["write"].append(threading.Thread(target=container_write,
args=(self.container[index],
rec, array_size)))
jobs["read"].append(threading.Thread(target=container_read,
args=(self.container[index],
array_size)))
# start all the write threads
for job in jobs["write"]:
job.start()
# wait for all write threads to finish
for job in jobs["write"]:
job.join()
# start read threads
for job in jobs["read"]:
job.start()
# wait for all read threads to complete
for job in jobs["read"]:
job.join()
# display free space after reads and writes
self.pool[index].display_pool_daos_space("after writes and reads")
# destroy container
if self.container[index] is not None:
self.container[index].destroy()
# destroy pool
if self.pool[index] is not None:
self.pool[index].destroy(1)
class NvmeObject(TestWithServers):
"""Test class for NVMe storage.
Creates/Updates/Fetches large number of objects simultaneously.
Test Class Description:
Test the general functional operations of objects on nvme storage
i.e. Creation/Updating/Fetching for single pool and multiple pools.
:avocado: recursive
"""
def setUp(self):
"""Set Up nodes for each test case."""
super(NvmeObject, self).setUp()
# initialize self.pool and self.container as lists
self.pool = []
self.container = []
# set common params
self.record_size = self.params.get("record_size", "/run/container/*")
self.pool_size = self.params.get("size", "/run/pool/createsize/*")
self.array_size = self.params.get("array_size", "/run/container/*")
@avocado.fail_on(DaosApiError)
def test_nvme_object_single_pool(self):
"""Jira ID: DAOS-2087.
Test Description:
Test will create single pool on nvme using TestPool
Create large number of objects
Update/Fetch with different object ID in single pool
Use Cases:
Verify the objects are being created and the data is not
corrupted.
:avocado: tags=all,pr,hw,large,nvme_object_single_pool,nvme_object
"""
# perform multiple object writes to a single pool
test_runner(self, self.pool_size[0], self.record_size[:-1], 0,
self.array_size)
@avocado.fail_on(DaosApiError)
def test_nvme_object_multiple_pools(self):
"""Jira ID: DAOS-2087.
Test Description:
Test will create multiple pools on nvme using TestPool
Create large number of objects for each pool
Update/Fetch with different object ID in multiple pools
Use Cases:
Verify the objects are being created and the data is not
corrupted.
:avocado: tags=all,full_regression,hw,large,nvme_object_multiple_pools
:avocado: tags=nvme_object
"""
# thread to perform simulatneous object writes to multiple pools
threads = []
index = 0
for size in self.pool_size[:-1]:
thread = threading.Thread(target=test_runner,
args=(self, size, self.record_size,
index, self.array_size))
threads.append(thread)
index += 1
# starting all the threads
for job in threads:
job.start()
# waiting for all threads to finish
for job in threads:
job.join()
# run the test_runner after cleaning up all the pools for
# very large nvme_pool size
# Uncomment the below line after DAOS-3339 is resolved
# test_runner(self, self.pool_size[2], self.record_size, index,
# self.array_size)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 Julian Betz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import os
from os.path import (abspath, realpath, expanduser, dirname, basename, join,
splitext)
# Check for the proper executable.
PROJECT_ROOT = dirname(abspath(realpath(expanduser(__file__))))
VIRTUALENVS_DIR = realpath(join(PROJECT_ROOT, 'virtualenvs', 'py3'))
if not sys.executable.startswith(join(VIRTUALENVS_DIR, '')):
print('Activate the virtualenv in %r' % (VIRTUALENVS_DIR,),
file=sys.stderr)
sys.exit(1)
from typing import Tuple, Iterable
import click
import re
from fontTools.ttLib import TTFont, TTCollection
from fontTools.t1Lib import T1Font
UNREADABLE_FILE_TYPES = {'.pfa', '.pfb', '.gsf', '.pcf'}
def is_supporting_ttf(font: TTFont, character: int) -> bool:
"""Check whether the specified font supports the specified character.
:param font: A TrueType, OpenType, or Web Open Font Format 2 font.
:param character: The character to search for.
:return: ``True`` if ``character`` is supported in ``font``, ``False`` if
it is not.
"""
return any(table.isUnicode() and character in table.cmap
for table in font['cmap'].tables)
def is_supporting_t1(font: T1Font, character: int) -> bool:
"""Check whether the specified font supports the specified character.
:param font: A T1 font.
:param character: The character to search for.
:return: ``True`` if ``character`` is supported in ``font``, ``False`` if
it is not, and ``None`` if membership cannot be determined.
"""
for key in font.getGlyphSet().keys():
if ((len(key) == 1 and character == ord(key))
or (re.fullmatch(r'uni[0-9A-F]+', key)
and character == int(key[3:], 16))):
return True
return None
def get_ttf_family(font: TTFont) -> Tuple[str, str]:
"""Return the family and subfamily of the specified font.
:param font: A TrueType, OpenType, or Web Open Font Format 2 font.
:return: The family and subfamily of the specified font.
"""
family, subfamily = '', ''
for name in font['name'].names:
if name.nameID == 1:
if family == '':
family = name.string.decode(name.getEncoding())
else:
break
elif name.nameID == 2 and family != '':
subfamily = name.string.decode(name.getEncoding())
break
return family, subfamily
def get_t1_family(file_path: str) -> Tuple[str, str]:
"""Return the family and subfamily of the specified font.
Family and subfamily are not looked up in the file, but are estimated from
the filename.
:param file_path: The path to a T1 font file.
:return: The family and subfamily of the font.
"""
name = splitext(basename(file_path))[0].rsplit('-', 1)
if len(name) == 2:
return tuple(name)
else:
return name[0], ''
def get_supporting_fonts(code_point: int) -> Iterable[Tuple[str, str]]:
"""Find all fonts that support the specified Unicode code point.
This recursively searches the directories ``/usr/share/fonts``,
``~/.local/share/fonts``, and ``~/.fonts`` for matching font files.
:param code_point: The Unicode code point of the character to search for
in the installed fonts.
:return: An iterable over the family-subfamily tuples of all fonts that
support the character corresponding to ``code_point``.
"""
for directory in ('/usr/share/fonts',
expanduser('~/.local/share/fonts'),
expanduser('~/.fonts')):
for dirpath, _, filenames in os.walk(directory, followlinks=True):
for filename in filenames:
file_path = join(dirpath, filename)
file_extension = splitext(filename)[1]
if file_extension in ('.ttf', '.otf', '.woff2'):
font = TTFont(file_path)
if is_supporting_ttf(font, code_point):
yield get_ttf_family(font)
elif file_extension == '.ttc':
font_collection = TTCollection(file_path)
for font in font_collection.fonts:
if is_supporting_ttf(font, code_point):
yield get_ttf_family(font)
elif file_extension == '.t1':
font = T1Font(file_path)
supporting = is_supporting_t1(font, code_point)
if supporting is None:
print('Unable to determine support in %s'
% (file_path,),
file=sys.stderr)
elif supporting:
yield get_t1_family(file_path)
elif (file_extension in UNREADABLE_FILE_TYPES
or (file_extension == '.gz'
and splitext(splitext(filename)[0])[1]
in UNREADABLE_FILE_TYPES)):
print('Skipping unreadable %s' % (file_path,),
file=sys.stderr)
@click.command()
@click.argument('character', type=str)
@click.option('-f/-c', '--fine/--coarse', default=True, show_default=True,
help='Whether to output font subfamilies in addition to '
'families.')
@click.option('-s', '--separator', type=str, default='\t', show_default=False,
help='The separator to use between font family and subfamily. '
'[default: TAB]')
@click.option('-z/-n', '--null/--newline', default=False, show_default=True,
help='Whether to use ASCII null or newline to separate names.')
def main(character: str, *, fine: bool, separator: str, null: bool) -> None:
"""Search for all fonts that support CHARACTER.
Supported font formats are TrueType (TTF/TTC), OpenType (OTF), and Web
Open Font Format 2 (WOFF2). The Type 1 (T1) font format is partially
supported: Only the support of a limited set of characters can be
detected, but not the lack of support of any character. Printer Font
ASCII (PFA), Printer Font Binary (PFB), X11 bitmap (PCF), and Ghostscript
Font (GSF) files are found, but not analyzed for support.\f
:param character: The character to search for.
:param fine: Whether to output font subfamilies in addition to families.
:param separator: The separator to use between font family and subfamily.
:param null: Whether to use ASCII null or newline to separate names.
"""
end = '\0' if null else '\n'
fonts = get_supporting_fonts(ord(character))
if fine:
for family, subfamily in sorted(set(fonts)):
print('%s%s%s' % (family, separator, subfamily), end=end)
else:
for family in sorted(set(font[0] for font in fonts)):
print(family, end=end)
if __name__ == '__main__':
main()
|
from ctypes.wintypes import *
from wintypes_extended import *
from winapi_error import *
import ctypes
import enum
MAX_PATH = 260
CW_USEDEFAULT = 0x80000000
def MAKELONG(wLow, wHigh):
return ctypes.c_long(wLow | wHigh << 16)
def MAKELPARAM(l, h):
return LPARAM(MAKELONG(l, h).value)
def LOWORD(l):
return WORD(l & 0xFFFF)
def HIWORD(l):
return WORD((l >> 16) & 0xFFFF)
class RECT(ctypes.Structure):
_fields_ = [
('left', LONG),
('top', LONG),
('right', LONG),
('bottom', LONG),
]
LPRECT = ctypes.POINTER(RECT)
class ICONINFO(ctypes.Structure):
_fields_ = [
('fIcon', BOOL),
('xHotspot', DWORD),
('yHotspot', DWORD),
('hbmMask', HBITMAP),
('hbmColor', HBITMAP),
]
PICONINFO = ctypes.POINTER(ICONINFO)
class LOGFONTA(ctypes.Structure):
LF_FACESIZE = 32
_fields_ = [
('lfHeight', LONG),
('lfWidth', LONG),
('lfEscapement', LONG),
('lfOrientation', LONG),
('lfWeight', LONG),
('lfItalic', BYTE),
('lfUnderline', BYTE),
('lfStrikeOut', BYTE),
('lfCharSet', BYTE),
('lfOutPrecision', BYTE),
('lfClipPrecision', BYTE),
('lfQuality', BYTE),
('lfPitchAndFamily', BYTE),
('lfFaceName', CHAR * LF_FACESIZE)
]
LPLOGFONTA = ctypes.POINTER(LOGFONTA)
class NONCLIENTMETRICSA(ctypes.Structure):
_fields_ = [
('cbSize', UINT),
('iBorderWidth', ctypes.c_int),
('iScrollWidth', ctypes.c_int),
('iScrollHeight', ctypes.c_int),
('iCaptionWidth', ctypes.c_int),
('iCaptionHeight', ctypes.c_int),
('lfCaptionFont', LOGFONTA),
('iSmCaptionWidth', ctypes.c_int),
('iSmCaptionHeight', ctypes.c_int),
('lfSmCaptionFont', LOGFONTA),
('iMenuWidth', ctypes.c_int),
('iMenuHeight', ctypes.c_int),
('lfMenuFont', LOGFONTA),
('lfStatusFont', LOGFONTA),
('lfMessageFont', LOGFONTA),
('iPaddedBorderWidth', ctypes.c_int)
]
LPNONCLIENTMETRICSA = ctypes.POINTER(NONCLIENTMETRICSA)
class POINT(ctypes.Structure):
_fields_ = [
('x', LONG),
('y', LONG)
]
class MSG(ctypes.Structure):
_fields_ = [
('hwnd', HWND),
('message', UINT),
('wParam', WPARAM),
('lParam', LPARAM),
('time', DWORD),
('pt', POINT),
('lPrivate', DWORD)
]
LPMSG = ctypes.POINTER(MSG)
class WNDCLASSA(ctypes.Structure):
_fields_ = [
('style', UINT),
('lpfnWndProc', WNDPROC),
('cbClsExtra', ctypes.c_int),
('cbWndExtra', ctypes.c_int),
('hInstance', HINSTANCE),
('hIcon', HICON),
('hCursor', HCURSOR),
('hbrBackground', HBRUSH),
('lpszMenuName', LPCSTR),
('lpszClassName', LPCSTR)
]
LPWNDCLASSA = ctypes.POINTER(WNDCLASSA)
class ClassStyle(enum.IntFlag):
VREDRAW = 0x0001
HREDRAW = 0x0002
DBLCLKS = 0x0008
OWNDC = 0x0020
CLASSDC = 0x0040
PARENTDC = 0x0080
NOCLOSE = 0x0200
SAVEBITS = 0x0800
BYTEALIGNCLIENT = 0x1000
BYTEALIGNWINDOW = 0x2000
GLOBALCLASS = 0x4000
class WindowStyle(enum.IntFlag):
BORDER = 0x00800000
CAPTION = 0x00C00000
CHILD = 0x40000000
CHILDWINDOW = 0x40000000
CLIPCHILDREN = 0x02000000
CLIPSIBLINGS = 0x04000000
DISABLED = 0x08000000
DLGFRAME = 0x00400000
GROUP = 0x00020000
HSCROLL = 0x00100000
ICONIC = 0x20000000
MAXIMIZE = 0x01000000
MAXIMIZEBOX = 0x00010000
MINIMIZE = 0x20000000
MINIMIZEBOX = 0x00020000
OVERLAPPED = 0x00000000
POPUP = 0x80000000
SIZEBOX = 0x00040000
SYSMENU = 0x00080000
TABSTOP = 0x00010000
THICKFRAME = 0x00040000
TILED = 0x00000000
VISIBLE = 0x10000000
VSCROLL = 0x00200000
OVERLAPPEDWINDOW = OVERLAPPED | CAPTION | SYSMENU | THICKFRAME \
| MINIMIZEBOX | MAXIMIZEBOX
TILEDWINDOW = OVERLAPPEDWINDOW
POPUPWINDOW = POPUP | BORDER | SYSMENU
class GetWindowLong(enum.IntEnum):
EXSTYLE = -20
HINSTANCE = -6
HWNDPARENT = -8
ID = -12
STYLE = -16
USERDATA = -21
WNDPROC = -4
class WindowMessage(enum.IntEnum):
SETFOCUS = 0x0007
KILLFOCUS = 0x0006
ENABLE = 0x000A
SETREDRAW = 0x000B
SETTEXT = 0x000C
SETFONT = 0x0030
GETFONT = 0x0031
GETTEXT = 0x000D
GETTEXTLENGTH = 0x000E
PAINT = 0x000F
CLOSE = 0x00010
QUIT = 0x0012
SHOWWINDOW = 0x0018
NULL = 0x0000
CREATE = 0x0001
DESTROY = 0x0002
MOVE = 0x0003
SIZE = 0x0005
ACTIVATE = 0x0006
COMMAND = 0x0111
NOTIFY = 0x004E
class ButtonStyle(enum.IntFlag):
PUSHBUTTON = 0x00000000
DEFPUSHBUTTON = 0x00000001
CHECKBOX = 0x00000002
AUTOCHECKBOX = 0x00000003
RADIOBUTTON = 0x00000004
_3STATE = 0x00000005
AUTO3STATE = 0x00000006
GROUPBOX = 0x00000007
USERBUTTON = 0x00000008
AUTORADIOBUTTON = 0x00000009
PUSHBOX = 0x0000000A
OWNERDRAW = 0x0000000B
TYPEMASK = 0x0000000F
LEFTTEXT = 0x00000020
TEXT = 0x00000000
ICON = 0x00000040
BITMAP = 0x00000080
LEFT = 0x00000100
RIGHT = 0x00000200
CENTER = 0x00000300
TOP = 0x00000400
BOTTOM = 0x00000800
VCENTER = 0x00000C00
PUSHLIKE = 0x00001000
MULTILINE = 0x00002000
NOTIFY = 0x00004000
FLAT = 0x00008000
RIGHTBUTTON = LEFTTEXT
class ListBoxStyle(enum.IntFlag):
NOTIFY = 0x0001
SORT = 0x0002
NOREDRAW = 0x0004
MULTIPLESEL = 0x0008
OWNERDRAWFIXED = 0x0010
OWNERDRAWVARIABLE = 0x0020
HASSTRINGS = 0x0040
USETABSTOPS = 0x0080
NOINTEGRALHEIGHT = 0x0100
MULTICOLUMN = 0x0200
WANTKEYBOARDINPUT = 0x0400
EXTENDEDSEL = 0x0800
DISABLENOSCROLL = 0x1000
NODATA = 0x2000
NOSEL = 0x4000
COMBOBOX = 0x8000
STANDARD = NOTIFY | SORT | WindowStyle.VSCROLL | WindowStyle.BORDER
GetDlgItem = ctypes.windll.user32.GetDlgItem
GetDlgItem.argtypes = [HWND, ctypes.c_int]
GetDlgItem.restype = HWND
GetDlgItem.errcheck = LPVOID_errcheck
RegisterClassA = ctypes.windll.user32.RegisterClassA
RegisterClassA.argtypes = [LPWNDCLASSA]
RegisterClassA.restype = ATOM
RegisterClassA.errcheck = LPVOID_errcheck
DefWindowProcA = ctypes.windll.user32.DefWindowProcA
DefWindowProcA.argtypes = [HWND, UINT, WPARAM, LPARAM]
DefWindowProcA.restype = LRESULT
CreateWindowExA = ctypes.windll.user32.CreateWindowExA
CreateWindowExA.argtypes = [DWORD, LPCSTR, LPCSTR, DWORD, ctypes.c_int,
ctypes.c_int, ctypes.c_int, ctypes.c_int,
HWND, HMENU, HINSTANCE, LPVOID]
CreateWindowExA.restype = HWND
CreateWindowExA.errcheck = LPVOID_errcheck
ShowWindow = ctypes.windll.user32.ShowWindow
ShowWindow.argtypes = [HWND, ctypes.c_int]
ShowWindow.restype = BOOL
GetMessageA = ctypes.windll.user32.GetMessageA
GetMessageA.argtypes = [LPMSG, HWND, UINT, UINT]
GetMessageA.restype = BOOL
TranslateMessage = ctypes.windll.user32.TranslateMessage
TranslateMessage.argtypes = [LPMSG]
TranslateMessage.restype = BOOL
DispatchMessageA = ctypes.windll.user32.DispatchMessageA
DispatchMessageA.argtypes = [LPMSG]
DispatchMessageA.restype = BOOL
PostQuitMessage = ctypes.windll.user32.PostQuitMessage
PostQuitMessage.argtypes = [ctypes.c_int]
PostQuitMessage.restype = None
DestroyWindow = ctypes.windll.user32.DestroyWindow
DestroyWindow.argtypes = [HWND]
DestroyWindow.restype = BOOL
DestroyWindow.errcheck = Win32API_errcheck
try:
GetWindowLongPtrA = ctypes.windll.user32.GetWindowLongPtrA
except:
GetWindowLongPtrA = ctypes.windll.user32.GetWindowLongA
GetWindowLongPtrA.argtypes = [HWND, ctypes.c_int]
GetWindowLongPtrA.restype = LONG_PTR
GetWindowLongPtrA.errcheck = LPVOID_errcheck
try:
SetWindowLongPtrA = ctypes.windll.user32.SetWindowLongPtrA
except:
SetWindowLongPtrA = ctypes.windll.user32.SetWindowLongA
SetWindowLongPtrA.argtypes = [HWND, ctypes.c_int, LONG_PTR]
SetWindowLongPtrA.restype = LONG_PTR
GetWindowLongPtrA.errcheck = LPVOID_errcheck
EnumChildWindows = ctypes.windll.user32.EnumChildWindows
EnumChildWindows.argtypes = [HWND, WNDENUMPROC, LPARAM]
EnumChildWindows.restype = BOOL
SystemParametersInfoA = ctypes.windll.user32.SystemParametersInfoA
SystemParametersInfoA.argtypes = [UINT, UINT, LPVOID, UINT]
SystemParametersInfoA.restype = BOOL
SystemParametersInfoA.errcheck = Win32API_errcheck
SendMessageA = ctypes.windll.user32.SendMessageA
SendMessageA.argtypes = [HWND, UINT, WPARAM, LPARAM]
SendMessageA.restype = LRESULT
GetClientRect = ctypes.windll.user32.GetClientRect
GetClientRect.argtypes = [HWND, LPRECT]
GetClientRect.restype = BOOL
GetClientRect.errcheck = Win32API_errcheck
LoadImageA = ctypes.windll.user32.LoadImageA
LoadImageA.argtypes = [HINSTANCE, LPCSTR, UINT, INT, INT, UINT]
LoadImageA.restype = HANDLE
LoadImageA.errcheck = LPVOID_errcheck
LoadIconA = ctypes.windll.user32.LoadIconA
LoadIconA.argtypes = [HINSTANCE, LPCSTR]
LoadIconA.restype = HICON
LoadIconA.errcheck = LPVOID_errcheck
GetIconInfo = ctypes.windll.user32.GetIconInfo
GetIconInfo.argtypes = [HICON, PICONINFO]
GetIconInfo.restype = BOOL
GetIconInfo.errcheck = Win32API_errcheck
|
#!/usr/bin/python
from turtle import *
'''draws stars from input, formula
internal angle = 180 / number of points
turtle has to turn to the right such that the internal angle is
internalAngle, so has to turn 180 - internalAngle
for a 5 pointer
internal angle = 180 / 5
and therefore turn angle is
180 - 36 = 144
also
setheading is set to half the internal angle to prevent lopsidedness!
'''
shape('turtle')
mode('logo')
color('magenta', 'cyan')
points = int(input('How many points would you like on your star, enter an odd number greater than 1: '))
internalAngle = 180/points
turn = 180 - internalAngle
initialHeading = internalAngle/2
setheading(initialHeading)
for i in range(points):
fd(200)
rt(turn)
|
from contas.conta import Bank
class Corrente(Bank):
def __init__(self, nome, idade, saldo, numero, limite=200):
super().__init__(nome, idade, saldo, numero, limite)
def depositar(self, valor):
if not isinstance(valor, (int, float)):
raise ValueError('Valor do deposito precisa ser numerico.')
self.saldo += valor
print(f'Conta: {self.numero}, Usuario {self.nome}, Depositando: R$ {valor}')
def sacar(self, valor):
if valor <= self.limite:
self.saldo -= valor
print(f'Conta: {self.numero}, Usuario {self.nome} Sacando: R$ {valor}')
else:
print(f'Conta: {self.numero}, Usuario {self.nome} no momento sem limite de saque')
|
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from djaesy.menu import Menu, MenuItem
from djaesy.models import Role, User
if settings.DJAESY_USER_MENU:
Menu.add_item(
"main",
MenuItem(
_('Usuários'), 'user_list', no_link=True, icon='mdi mdi-account-multiple',
children=[
MenuItem(title=_('Usuários'), url='user_list', icon='mdi mdi-account-multiple'),
MenuItem(title=_('Perfis'), url='user_role_list', icon='mdi mdi-account-outline'),
]
),
)
|
if __name__ == '__main__':
s = input()
print(any(char.isalnum()for char in s))
print(any(char.isalpha()for char in s))
print(any(char.isdigit()for char in s))
print(any(char.islower()for char in s))
print(any(char.isupper()for char in s))
|
import re
from .post_model import Post, posts_list, UpdateError
from .user_model import User, users_list
email_pattern = re.compile(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)")
password_pattern = re.compile(r"(?=^.{12,80}$)(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%^;*()_+}{:'?/.,])(?!.*\s).*$")
|
# Test code for find_pairs_pt.py
import pytest
import find_pairs_pt as fp
def test_no_pairs():
test_array = [9]
response = []
assert fp.find_pairs_simple(test_array) == response
assert fp.find_pairs(test_array) == response
def test_one_pair():
test_array = [1,9]
response = [(1,9)]
assert fp.find_pairs_simple(test_array) == response
assert fp.find_pairs(test_array) == response
# Same thing, order reversed
test_array = [9,1]
response = [(9,1)]
assert fp.find_pairs_simple(test_array) == response
assert fp.find_pairs(test_array) == response
def test_values_to_skip():
test_array = [9,1,6]
response = [(9,1)]
assert fp.find_pairs_simple(test_array) == response
assert fp.find_pairs(test_array) == response
def test_use_both_end_values():
test_array = [9,6,1]
response = [(9,1)]
assert fp.find_pairs_simple(test_array) == response
assert fp.find_pairs(test_array) == response
def test_avoid_repeated_single_value():
test_array = [5]
response = []
assert fp.find_pairs_simple(test_array) == response
assert fp.find_pairs(test_array) == response
def test_repeated_values():
test_array = [5,5]
response = [(5,5)]
assert fp.find_pairs_simple(test_array) == response
assert fp.find_pairs(test_array) == response
def test_multiple_pairs():
test_array = [9,6,1,4,7]
response = [(9,1), (6,4)]
assert fp.find_pairs_simple(test_array) == response
assert fp.find_pairs(test_array) == response
def test_multiple_pairs_repeated_value():
test_array = [9,6,1,4,7,1]
response = [(9,1), (9,1), (6,4)]
assert fp.find_pairs_simple(test_array) == response
assert fp.find_pairs(test_array) == response
def test_alternate_target():
test_array = [1,3,7,5,9]
target_value = 14
response = [(5,9)]
assert fp.find_pairs_simple(test_array, target_value) == response
assert fp.find_pairs(test_array, target_value) == response
def test_negative_values():
test_array = [13,-3,7,5,9]
response = [(13,-3)]
assert fp.find_pairs_simple(test_array) == response
assert fp.find_pairs(test_array) == response
|
#!/usr/local/bin/python
import pathlib
import sys
import subprocess
p = pathlib.Path(sys.argv[1])
sys.exit(subprocess.call(["node", p.name], cwd=p.parent))
|
from netapp.connection import NaConnection
from quota_entry import QuotaEntry # 14 properties
from quota_state_zapi import QuotaStateZapi # 0 properties
from size_or_dash import SizeOrDash # 0 properties
from unsigned64_or_dash import Unsigned64OrDash # 0 properties
from true_false import TrueFalse # 0 properties
from quota_error_msg import QuotaErrorMsg # 0 properties
from qtree_name import QtreeName # 0 properties
from quota import Quota # 14 properties
from error import Error # 3 properties
from quota_info import QuotaInfo # 13 properties
from quota_report_iter_key_td import QuotaReportIterKeyTd # 2 properties
from quota_list_entries_iter_key_td import QuotaListEntriesIterKeyTd # 6 properties
from quota_status_iter_key_td import QuotaStatusIterKeyTd # 1 properties
from quota_user import QuotaUser # 3 properties
from quota_status_attributes import QuotaStatusAttributes # 7 properties
from quota_error import QuotaError # 3 properties
class QuotaConnection(NaConnection):
def quota_status_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Iterate over quota status for all volumes in the cluster
:param max_records: The maximum number of records to return in this response.
:param query: A query that specifies which quota status attributes need to be
returned. A query could be specified on any number of attributes in
the quota status object. All quota status objects matching this query
up to 'max-records' will be returned.
:param tag: Specify the tag from the previous iteration. It is usually not
specified for the first iteration. For subsequent iterations,
copy the value from the 'next-tag' obtained from the previous
iteration.
:param desired_attributes: Specify the attributes that should be returned in the quota status
object. If not present, all attributes for which information is
available will be returned. If present, only the desired attributes
for which information is available will be returned.
"""
return self.request( "quota-status-iter", {
'max_records': max_records,
'query': [ query, 'query', [ QuotaStatusAttributes, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ QuotaStatusAttributes, 'None' ], False ],
}, {
'attributes-list': [ QuotaStatusAttributes, True ],
} )
def quota_modify_entry(self, qtree, quota_type, volume, quota_target, soft_file_limit=None, disk_limit=None, perform_user_mapping=None, threshold=None, soft_disk_limit=None, policy=None, file_limit=None):
"""
Modifys a quota entry. If the type, target, volume, and
tree exist, the entry is modified. If the type, target,
volume, and tree do not exist, then an error is returned.
:param qtree: This is the qtree name that the quota resides on. For user or
group rules, it can be the qtree name or "" if no qtree. For
tree type rules, this field must be "".
:param quota_type: The type of quota rule. Possible values are "user", "group",
or "tree".
:param volume: This is the volume name that the quota resides on.
:param quota_target: This is the quota target of the type specified. The target
can be of the form:
<name>, <number>, or <path name>.
Multiple targets can be specified by a comma-separated list.
Path should be entered in a format that starts with the
following "/vol/< volume name >/". For explicit tree rules,
the qtree should be specified as
"/vol/< volume name >/ < qtree name >"
:param soft_file_limit: This is the number of files the target would have to exceed
before a message is logged and an SNMP trap is generated.
Set the value to "-" if the limit is to be unlimited.
Default is the current value.
:param disk_limit: This is the amount of disk space that is reserved for the
the target. The value is expressed in kilobytes (1024).
Set the value to "-" if the limit is to be unlimited.
Default is the current value.
:param perform_user_mapping: If the value is true, quota management will perform user
mapping for the user specified in quota-target. Only valid for
user quotas when the quota-target refers to a Windows/UNIX user
name. Not valid for multiple user targets.
Default is the current value.
:param threshold: This is the amount of disk space the target would have to
exceed before a message is logged. The value is expressed
in kilobytes (1024). Set the value to "-" if the limit is
to be unlimited. Default is the current value.
:param soft_disk_limit: This is the amount of disk space the target would have to
exceed before a message is logged and an SNMP trap is
generated. The value is expressed in kilobytes (1024).
Set the value to "-" if the limit is to be unlimited.
Default is the current value.
:param policy: Name of the quota policy in which the quota rule should be
modified. If this field is not provided, then the current
policy that has been assigned to the vserver will be used.
:param file_limit: This is the number of files that the target can have.
Set the value to "-" if the limit is to be unlimited.
Default is the current value.
"""
return self.request( "quota-modify-entry", {
'qtree': [ qtree, 'qtree', [ basestring, 'None' ], False ],
'soft_file_limit': [ soft_file_limit, 'soft-file-limit', [ basestring, 'None' ], False ],
'quota_type': [ quota_type, 'quota-type', [ basestring, 'None' ], False ],
'disk_limit': [ disk_limit, 'disk-limit', [ basestring, 'None' ], False ],
'perform_user_mapping': [ perform_user_mapping, 'perform-user-mapping', [ bool, 'None' ], False ],
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
'threshold': [ threshold, 'threshold', [ basestring, 'None' ], False ],
'soft_disk_limit': [ soft_disk_limit, 'soft-disk-limit', [ basestring, 'None' ], False ],
'policy': [ policy, 'policy', [ basestring, 'None' ], False ],
'quota_target': [ quota_target, 'quota-target', [ basestring, 'None' ], False ],
'file_limit': [ file_limit, 'file-limit', [ basestring, 'None' ], False ],
}, {
} )
def quota_list_entries_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Iterate over the list of quota rules in the cluster.
:param max_records: The maximum number of records to return in this response.
:param query: A query that specifies which quota rules need to be returned.
A query could be specified on any number of attributes in the
quota-entry object. All quota entry objects matching this query
up to 'max-records' will be returned.
:param tag: Specify the tag from the previous iteration. It is usually not
specified for the first iteration. For subsequent iterations,
copy the value from the 'next-tag' obtained from the previous
iteration.
:param desired_attributes: Specify the attributes that should be returned in the quota-entry
object. If not present, all attributes for which information is
available will be returned. If present, only the desired
attributes for which information is available will be returned.
"""
return self.request( "quota-list-entries-iter", {
'max_records': max_records,
'query': [ query, 'query', [ QuotaEntry, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ QuotaEntry, 'None' ], False ],
}, {
'attributes-list': [ QuotaEntry, True ],
} )
def quota_list_entries_iter_start(self, include_output_entry=None):
"""
Starts an iteration through the list of quotas entries
in /etc/quotas.
:param include_output_entry: If specified and true, the entire quota entry is placed in
the <line> ouput elements.
"""
return self.request( "quota-list-entries-iter-start", {
'include_output_entry': [ include_output_entry, 'include-output-entry', [ bool, 'None' ], False ],
}, {
'records': [ int, False ],
'tag': [ basestring, False ],
} )
def quota_status(self, volume):
"""
Obtains the status of quotas
:param volume: Name of the volume whose quota status should be obtained.
"""
return self.request( "quota-status", {
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
}, {
'status': [ basestring, False ],
'percent-complete': [ int, False ],
'reason': [ basestring, False ],
'substatus': [ basestring, False ],
'quota-errors': [ basestring, False ],
} )
def quota_report(self, volume=None, path=None):
"""
Returns a report on all quotas.
:param volume: If provided, the report will contain
only quotas on the specified volume name.
The name should not contain a "/vol/" prefix.
:param path: If specified, the report will contain only quotas that
apply to the specified path name. The path should
start with "/vol/<volumename>", although paths without
the "/vol" prefix will work and will be assumed to be
in the root volume.
"""
return self.request( "quota-report", {
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
'path': [ path, 'path', [ basestring, 'None' ], False ],
}, {
'quotas': [ Quota, True ],
'error': [ Error, False ],
} )
def quota_report_iter_start(self, volume=None, path=None):
"""
Generates a report on quotas, the results of which
are retrieved by using quota-report-iter-next.
:param volume: Name of a volume. If specified, the report
will contain only quotas on the specified volume.
:param path: A path (including a /vol/<volumename> prefix).
If specified, the report will contain only quotas that
apply to the specified path name.
"""
return self.request( "quota-report-iter-start", {
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
'path': [ path, 'path', [ basestring, 'None' ], False ],
}, {
'records': [ int, False ],
'tag': [ basestring, False ],
'error': [ Error, False ],
} )
def quota_get_entry(self, volume, quota_target, qtree, quota_type, policy=None):
"""
Obtains a quota entry specified by type, target, volume,
and tree.
:param volume: Name of the volume for the quota.
:param quota_target: The quota target of the type specified. Possible
values are: <name>, <number>, or <path name>.
Multiple targets can be specified by a comma-separated list.
Path should be entered in a format that starts with the
following "/vol/< volume name >/". For explicit tree rules,
the qtree should be specified as
"/vol/< volume name >/ < qtree name >"
:param qtree: Name of the qtree for the quota. For user or group rules, it
can be the qtree name or "" if no qtree. For tree type rules,
this field must be "".
:param quota_type: The type of quota rule. Possible values are "user", "group",
or "tree".
:param policy: Name of the quota policy from which the quota rule should be
obtained. If this field is not provided, then the current
policy that has been assigned to the vserver will be used.
"""
return self.request( "quota-get-entry", {
'policy': [ policy, 'policy', [ basestring, 'None' ], False ],
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
'quota_target': [ quota_target, 'quota-target', [ basestring, 'None' ], False ],
'qtree': [ qtree, 'qtree', [ basestring, 'None' ], False ],
'quota_type': [ quota_type, 'quota-type', [ basestring, 'None' ], False ],
}, {
'soft-file-limit': [ basestring, False ],
'disk-limit': [ basestring, False ],
'quota-error': [ QuotaError, False ],
'perform-user-mapping': [ bool, False ],
'soft-disk-limit': [ basestring, False ],
'threshold': [ basestring, False ],
'file-limit': [ basestring, False ],
} )
def quota_delete_entry(self, volume, quota_target, qtree, quota_type, policy=None):
"""
Deletes a quota entry specified by type, target, volume,
and tree.
:param volume: Name of the volume for the quota.
:param quota_target: The quota target of the type specified. Possible
values are: <name>, <number>, or <path name>.
Multiple targets can be specified by a comma-separated list.
Path should be entered in a format that starts with the
following "/vol/< volume name >/". For explicit tree rules,
the qtree should be specified as
"/vol/< volume name >/ < qtree name >"
:param qtree: Name of the qtree for the quota. For user or group rules, it
can be the qtree name or "" if no qtree. For tree type rules,
this field must be "".
:param quota_type: The type of quota rule. Possible values are "user", "group",
or "tree".
:param policy: Name of the quota policy in which the quota rule should be
deleted. If this field is not provided, then the current policy
that has been assigned to the vserver will be used.
"""
return self.request( "quota-delete-entry", {
'policy': [ policy, 'policy', [ basestring, 'None' ], False ],
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
'quota_target': [ quota_target, 'quota-target', [ basestring, 'None' ], False ],
'qtree': [ qtree, 'qtree', [ basestring, 'None' ], False ],
'quota_type': [ quota_type, 'quota-type', [ basestring, 'None' ], False ],
}, {
} )
def quota_set_entry(self, qtree, quota_type, volume, quota_target, soft_file_limit=None, disk_limit=None, perform_user_mapping=None, threshold=None, soft_disk_limit=None, policy=None, file_limit=None):
"""
Sets a quota entry. If the type, target, volume, and
tree do not exist, a new entry is created. If the type,
target, volume, and tree exist, then the entry is modified.
:param qtree: Name of the qtree for the quota. For user or group rules, it
can be the qtree name or "" if no qtree. For tree type rules,
this field must be "".
:param quota_type: The type of quota rule. Possible values are "user", "group",
or "tree".
:param volume: Name of the volume for the quota.
:param quota_target: The quota target of the type specified. Possible values are
<name>, <number>, or <path name>.
Multiple targets can be specified by a comma-separated list.
Path should be entered in a format that starts with the
following "/vol/< volume name >/". For explicit tree rules,
the qtree should be specified as
"/vol/< volume name >/ < qtree name >"
:param soft_file_limit: The number of files the target would have to exceed
before a message is logged and an SNMP trap is generated.
Set the value to "-" if the limit is to be unlimited.
:param disk_limit: The amount of disk space that is reserved for the
the target. The value is expressed in kilobytes (1024).
Set the value to "-" if the limit is to be unlimited.
:param perform_user_mapping: If the value is true, quota management will perform user
mapping for the user specified in quota-target. Only valid for
user quotas when the quota-target refers to a Windows/UNIX user
name. Not valid for multiple user targets.
Default is false.
:param threshold: The amount of disk space the target would have to
exceed before a message is logged. The value is expressed
in kilobytes (1024). Set the value to "-" if the limit is
to be unlimited.
:param soft_disk_limit: The amount of disk space the target would have to
exceed before a message is logged and an SNMP trap is
generated. The value is expressed in kilobytes (1024).
Set the value to "-" if the limit is to be unlimited.
:param policy: Name of the quota policy in which the quota rule should be
set. If this field is not provided, then the current policy
that has been assigned to the vserver will be used.
:param file_limit: The number of files that the target can have.
Set the value to "-" if the limit is to be unlimited.
"""
return self.request( "quota-set-entry", {
'qtree': [ qtree, 'qtree', [ basestring, 'None' ], False ],
'soft_file_limit': [ soft_file_limit, 'soft-file-limit', [ basestring, 'None' ], False ],
'quota_type': [ quota_type, 'quota-type', [ basestring, 'None' ], False ],
'disk_limit': [ disk_limit, 'disk-limit', [ basestring, 'None' ], False ],
'perform_user_mapping': [ perform_user_mapping, 'perform-user-mapping', [ bool, 'None' ], False ],
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
'threshold': [ threshold, 'threshold', [ basestring, 'None' ], False ],
'soft_disk_limit': [ soft_disk_limit, 'soft-disk-limit', [ basestring, 'None' ], False ],
'policy': [ policy, 'policy', [ basestring, 'None' ], False ],
'quota_target': [ quota_target, 'quota-target', [ basestring, 'None' ], False ],
'file_limit': [ file_limit, 'file-limit', [ basestring, 'None' ], False ],
}, {
} )
def quota_report_iter(self, max_records=None, path=None, tag=None, desired_attributes=None, query=None):
"""
Iterate over the quota report in the cluster.
:param max_records: The maximum number of records to return in this response.
:param path: A path (including a /vol/<volumename> prefix).
If specified, the report will contain only quotas that
apply to the specified path name.
:param tag: Specify the tag from the previous iteration. It is usually not
specified for the first iteration. For subsequent iterations,
copy the value from the 'next-tag' obtained from the previous
iteration.
:param desired_attributes: Specify the attributes that should be returned in the quota
report object. If not present, all attributes for which
information is available will be returned. If present, only
the desired attributes for which information is available
will be returned.
:param query: A query that specifies which quota report needs to be returned.
A query could be specified on any number of attributes in the
quota report object. All quota report objects matching this
query up to 'max-records' will be returned.
"""
return self.request( "quota-report-iter", {
'max_records': max_records,
'path': [ path, 'path', [ basestring, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ Quota, 'None' ], False ],
'query': [ query, 'query', [ Quota, 'None' ], False ],
}, {
'attributes-list': [ Quota, True ],
} )
def quota_off(self, volume):
"""
Turns the quota subsystem off for a volume.
<p>
For clustered volumes, a jobid will also be returned.
The progress of the job can be tracked using the job APIs.
:param volume: Name of the volume on which to turn quotas off.
"""
return self.request( "quota-off", {
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
}, {
'result-error-message': [ basestring, False ],
'result-jobid': [ int, False ],
'result-error-code': [ int, False ],
'result-status': [ basestring, False ],
} )
def quota_list_entries_iter_end(self, tag):
"""
Terminate a list iteration and clean up any saved info.
:param tag: Tag from a previous quota-list-entries-iter-start.
"""
return self.request( "quota-list-entries-iter-end", {
'tag': tag,
}, {
} )
def quota_report_iter_next(self, tag, maximum):
"""
Returns items from a previous call to quota-report-iter-start
:param tag: Tag from a previous quota-report-iter-start.
:param maximum: The maximum number of entries to retrieve.
"""
return self.request( "quota-report-iter-next", {
'tag': tag,
'maximum': [ maximum, 'maximum', [ int, 'None' ], False ],
}, {
'records': [ int, False ],
'quotas': [ QuotaInfo, True ],
} )
def quota_add_entry(self, qtree, quota_type, volume, quota_target, soft_file_limit=None, disk_limit=None, perform_user_mapping=None, threshold=None, soft_disk_limit=None, policy=None, file_limit=None):
"""
Adds a quota entry. If the type, target, volume, and
tree do not exist, a new entry is created. If the type,
target, volume, and tree exist, then an error is returned.
:param qtree: This is the qtree name that the quota resides on. For user or
group rules, it can be the qtree name or "" if no qtree. For
tree type rules, this field must be "".
:param quota_type: The type of quota rule. Possible values are "user", "group",
or "tree".
:param volume: This is the volume name that the quota resides on.
:param quota_target: This is the quota target of the type specified. The target
can be of the form:
<name>, <number>, or <path name>.
Multiple targets can be specified by a comma-separated list.
Path should be entered in a format that starts with the
following "/vol/< volume name >/". For explicit tree rules,
the qtree should be specified as
"/vol/< volume name >/ < qtree name >"
:param soft_file_limit: This is the number of files the target would have to exceed
before a message is logged and an SNMP trap is generated.
Set the value to "-" if the limit is to be unlimited.
Default is unlimited.
:param disk_limit: This is the amount of disk space that is reserved for the
the target. The value is expressed in kilobytes (1024).
Set the value to "-" if the limit is to be unlimited.
Default is unlimited.
:param perform_user_mapping: If the value is true, quota management will perform user
mapping for the user specified in quota-target. Only valid for
user quotas when the quota-target refers to a Windows/UNIX user
name. Not valid for multiple user targets.
Default is false.
:param threshold: This is the amount of disk space the target would have to
exceed before a message is logged. The value is expressed
in kilobytes (1024). Set the value to "-" if the limit is
to be unlimited. Default is unlimited.
:param soft_disk_limit: This is the amount of disk space the target would have to
exceed before a message is logged and an SNMP trap is
generated. The value is expressed in kilobytes (1024).
Set the value to "-" if the limit is to be unlimited.
Default is unlimited.
:param policy: Name of the quota policy in which the quota rule should be
added. If this field is not provided, then the current policy
that has been assigned to the vserver will be used.
:param file_limit: This is the number of files that the target can have.
Set the value to "-" if the limit is to be unlimited.
Default is unlimited.
"""
return self.request( "quota-add-entry", {
'qtree': [ qtree, 'qtree', [ basestring, 'None' ], False ],
'soft_file_limit': [ soft_file_limit, 'soft-file-limit', [ basestring, 'None' ], False ],
'quota_type': [ quota_type, 'quota-type', [ basestring, 'None' ], False ],
'disk_limit': [ disk_limit, 'disk-limit', [ basestring, 'None' ], False ],
'perform_user_mapping': [ perform_user_mapping, 'perform-user-mapping', [ bool, 'None' ], False ],
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
'threshold': [ threshold, 'threshold', [ basestring, 'None' ], False ],
'soft_disk_limit': [ soft_disk_limit, 'soft-disk-limit', [ basestring, 'None' ], False ],
'policy': [ policy, 'policy', [ basestring, 'None' ], False ],
'quota_target': [ quota_target, 'quota-target', [ basestring, 'None' ], False ],
'file_limit': [ file_limit, 'file-limit', [ basestring, 'None' ], False ],
}, {
} )
def quota_list_entries(self, include_output_entry=None):
"""
Returns quota entries from the /etc/quotas file.
:param include_output_entry: If specified and true, the raw quota entry is placed in
the <line> output element.
"""
return self.request( "quota-list-entries", {
'include_output_entry': [ include_output_entry, 'include-output-entry', [ bool, 'None' ], False ],
}, {
'quota-entries': [ QuotaEntry, True ],
} )
def quota_resize(self, volume):
"""
Starts an ONTAP operation to resize quotas for a volume.
A successful return from this API does not mean that
the operation has finished, merely that an attempt
to start it been triggered.
Use the quota-status API to check the status.
<p>
For clustered volumes, a jobid will also be returned.
The progress of the job can be tracked using the job APIs.
:param volume: Name of the volume on which to resize quotas.
"""
return self.request( "quota-resize", {
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
}, {
'result-error-message': [ basestring, False ],
'result-jobid': [ int, False ],
'result-error-code': [ int, False ],
'result-status': [ basestring, False ],
} )
def quota_on(self, volume):
"""
Starts to turn quotas on for a volume.
A successful return from this API does not mean that
quotas are on, merely that an attempt to start it has
been triggered. Use the quota-status API to check
the status.
<p>
For clustered volumes, a jobid will also be returned.
The progress of the job can be tracked using the job APIs.
:param volume: Name of the volume on which to enable quotas.
"""
return self.request( "quota-on", {
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
}, {
'result-error-message': [ basestring, False ],
'result-jobid': [ int, False ],
'result-error-code': [ int, False ],
'result-status': [ basestring, False ],
} )
def quota_list_entries_iter_next(self, tag, maximum):
"""
Continues an iteration through the list of quotas.
:param tag: Tag from a previous quota-list-entries-iter-start.
:param maximum: The maximum number of entries to retrieve.
"""
return self.request( "quota-list-entries-iter-next", {
'tag': tag,
'maximum': [ maximum, 'maximum', [ int, 'None' ], False ],
}, {
'records': [ int, False ],
'quota-entries': [ QuotaEntry, True ],
} )
def quota_report_iter_end(self, tag):
"""
Terminate a list iteration and clean up any saved info.
:param tag: Tag from a previous quota-report-iter-start.
"""
return self.request( "quota-report-iter-end", {
'tag': tag,
}, {
} )
|
"""
System tests for execute updated policies
"""
from test_repo.autoscale.fixtures import AutoscaleFixture
from time import sleep
class ExecuteUpdatedPoliciesTest(AutoscaleFixture):
"""
System tests to verify execute updated scaling policies scenarios,
such that each policy executes after policy cooldown is met
"""
def setUp(self):
"""
Create a scaling group with min entities>0, scale up with cooldown=1 second
and execute the policy
"""
super(ExecuteUpdatedPoliciesTest, self).setUp()
self.cooldown = 1
self.create_group_response = self.autoscale_behaviors.create_scaling_group_given(
gc_min_entities=self.gc_min_entities_alt,
gc_cooldown=0)
self.group = self.create_group_response.entity
self.policy_up = {'change': 2, 'cooldown': self.cooldown}
self.policy = self.autoscale_behaviors.create_policy_webhook(
group_id=self.group.id,
policy_data=self.policy_up,
execute_policy=True)
self.resources.add(self.group.id,
self.autoscale_client.delete_scaling_group)
def tearDown(self):
"""
Emptying the scaling group by updating minentities=maxentities=0,
which is then deleted by the Autoscale fixture's teardown
"""
super(ExecuteUpdatedPoliciesTest, self).tearDown()
self.empty_scaling_group(self.group)
def test_system_update_policy_from_change_to_desired_capacity_scale_down(self):
"""
Update the existing scale up policy from change to desired capacity,
dc set to minentities so that the policy when executed scales down
"""
upd_desired_capacity = self.group.groupConfiguration.minEntities
sleep(self.cooldown)
upd_policy_to_desired_capacity_execute = self._update_execute_policy_dc(
self.group.id,
self.policy['policy_id'], upd_desired_capacity)
self.assertEquals(upd_policy_to_desired_capacity_execute, 202,
msg='Executing the updated policy with desired capacity failed with {0}'
' for group {1}'
.format(upd_policy_to_desired_capacity_execute, self.group.id))
self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=self.group.id,
expected_servers=self.group.groupConfiguration.minEntities)
def test_system_update_policy_from_change_to_desired_capacity_scale_up(self):
"""
Update the existing scale up policy from change to desired capacity,
such that the policy when executed scales up
"""
upd_desired_capacity = self.group.groupConfiguration.minEntities + \
self.policy_up['change'] + 1
sleep(self.cooldown)
upd_policy_to_desired_capacity_execute = self._update_execute_policy_dc(
self.group.id,
self.policy['policy_id'], upd_desired_capacity)
self.assertEquals(upd_policy_to_desired_capacity_execute, 202,
msg='Executing the updated policy with desired capacity failed with {0}'
' for group {1}'
.format(upd_policy_to_desired_capacity_execute, self.group.id))
self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=self.group.id,
expected_servers=upd_desired_capacity)
def test_system_update_policy_desired_capacity(self):
"""
Update a scale up via 'change', to a scale down policy via 'desiredCapacity', with
desiredCapacity set to be less than minentities and execute the policy.
(results in active servers=minentities)
Update the desired capacity to scale up by setting desired capacity > maxentities
and execute. (Results in active servers = maxentities in the scaling group)
"""
upd_desired_capacity = self.group.groupConfiguration.minEntities - 1
sleep(self.cooldown)
upd_policy_to_desired_capacity_execute = self._update_execute_policy_dc(
self.group.id,
self.policy['policy_id'], upd_desired_capacity)
self.assertEquals(upd_policy_to_desired_capacity_execute, 202,
msg='Executing the updated policy with desired capacity failed with {0}'
' for group {1}'
.format(upd_policy_to_desired_capacity_execute, self.group.id))
upd_desired_capacity = self.group.groupConfiguration.maxEntities + 1
sleep(self.cooldown)
upd_policy_to_desired_capacity_execute = self._update_execute_policy_dc(
self.group.id,
self.policy['policy_id'], upd_desired_capacity)
self.assertEquals(upd_policy_to_desired_capacity_execute, 202,
msg='Executing the updated policy with desired capacity failed with {0}'
' for group {1}'
.format(upd_policy_to_desired_capacity_execute, self.group.id))
self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=self.group.id,
expected_servers=self.group.groupConfiguration.maxEntities)
def test_system_update_scale_up_to_scale_down(self):
"""
Update a scale up policy to scale down by the same change and execute
such a policy to result in active servers = minentities on the scaling group
"""
change = - self.policy_up['change']
sleep(self.cooldown)
upd_to_scale_down_execute = self._update_execute_policy(
self.group.id,
self.policy['policy_id'], change)
self.assertEquals(upd_to_scale_down_execute, 202,
msg='Executing the updated scale down policy failed with {0} for group {1}'
.format(upd_to_scale_down_execute, self.group.id))
self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=self.group.id,
expected_servers=self.group.groupConfiguration.minEntities)
def test_system_update_minentities_and_scale_down(self):
"""
Create a scaling group with min entities > 0, scale up (setup)
update new_minentities to be 1, verify active servers = minentities+scale up.
Execute scale down with change = new_minenetities and verify scale down
"""
new_minentities = 1
self.autoscale_client.update_group_config(
group_id=self.group.id,
name=self.group.groupConfiguration.name,
cooldown=self.group.groupConfiguration.cooldown,
min_entities=new_minentities,
max_entities=self.group.groupConfiguration.maxEntities,
metadata={})
self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=self.group.id,
expected_servers=self.group.groupConfiguration.minEntities + self.policy_up['change'])
change = - (self.policy_up[
'change'] + self.group.groupConfiguration.minEntities) + 1
sleep(self.cooldown)
upd_to_scale_down_execute = self._update_execute_policy(
self.group.id,
self.policy['policy_id'], change)
self.assertEquals(upd_to_scale_down_execute, 202,
msg='Executing the updated scale down policy failed with {0} for group {1}'
.format(upd_to_scale_down_execute, self.group.id))
self.autoscale_behaviors.wait_for_expected_number_of_active_servers(
group_id=self.group.id,
expected_servers=new_minentities)
def _update_execute_policy_dc(self, group_id, policy_id, policy_data):
"""
Updates any given policy to desired capacity and executes the policy.
Returns the response code of the updated policy's execution
"""
get_policy = self.autoscale_client.get_policy_details(
group_id, policy_id)
policy_b4_update = get_policy.entity
self.autoscale_client.update_policy(
group_id=group_id,
policy_id=policy_id,
name=policy_b4_update.name,
cooldown=policy_b4_update.cooldown,
desired_capacity=policy_data,
policy_type=policy_b4_update.type)
execute_upd_policy = self.autoscale_client.execute_policy(
group_id, policy_id)
return execute_upd_policy.status_code
def _update_execute_policy(self, group_id, policy_id, policy_data):
"""
Updates any given policy to change and executes the policy.
Returns the response code of the updated policy's execution
"""
get_policy = self.autoscale_client.get_policy_details(
group_id, policy_id)
policy_b4_update = get_policy.entity
self.autoscale_client.update_policy(
group_id=group_id,
policy_id=policy_id,
name=policy_b4_update.name,
cooldown=policy_b4_update.cooldown,
change=policy_data,
policy_type=policy_b4_update.type)
execute_upd_policy = self.autoscale_client.execute_policy(
group_id, policy_id)
return execute_upd_policy.status_code
|
import numpy as np
import pytest
from ml_utils.metrics import lift_score, confusion_matrix, sorted_feature_importance
from ml_utils.metrics.metrics import MetricError
# noinspection PyTypeChecker
def test_lift_score_fails_if_passed_non_ndarray():
with pytest.raises(MetricError):
lift_score([1, 2, 3], [4, 5, 6])
def test_lift_score_returns_correctly():
y_targ = np.array([1, 1, 1, 0, 0, 2, 0, 3, 4])
y_pred = np.array([1, 0, 1, 0, 0, 2, 1, 3, 0])
result = lift_score(y_targ, y_pred)
assert 2 == result
def test_normalized_confusion_matrix_between_0_and_1():
cm = confusion_matrix(np.array([1, 1, 1, 1]), np.array([1, 1, 1, 1]), normalized=True)
assert (cm >= 0).all() & (cm <= 1).all()
assert 1 == np.sum(cm)
def test_confusion_matrix_returns_as_expected():
cm = confusion_matrix(np.array([1, 1, 1, 0]), np.array([1, 1, 1, 1]), normalized=False)
assert np.all(np.array([[0, 1], [0, 3]]) == cm)
assert 4 == np.sum(cm)
def test_sorted_feature_importance_returns_as_expected():
labels = np.array(['Feature 1', 'Feature 2'])
importance = np.array([.8, .7])
result_labels, result_importance = sorted_feature_importance(labels, importance)
assert np.all(labels == result_labels)
assert np.all(importance == result_importance)
def test_sorted_feature_importance_ascending_returns_as_expected():
labels = np.array(['Feature 1', 'Feature 2'])
importance = np.array([.8, .7])
result_labels, result_importance = sorted_feature_importance(labels,
importance,
ascending=True)
assert np.all(np.array(['Feature 2', 'Feature 1']) == result_labels)
assert np.all(np.array([.7, .8]) == result_importance)
|
"""
This example demonstrates how to generate some test placeholders.
It requires codenode to run:
https://github.com/0xf0f/codenode
"""
import codenode as cn
import codenode.python as py
import inspect
def generate_class_tests(cls):
test_list_name = f'{cls.__name__.lower()}_tests'
file = cn.File()
file.add_child(
cn.Line('from quicktest import TestList')
)
file.add_child(
py.Comment(f'import {cls.__name__} here')
)
file.add_child(cn.EmptyLines(2))
file.add_child(
cn.Line(
f"{test_list_name} = TestList('{cls.__name__} tests')"
)
)
file.add_child(cn.EmptyLines(2))
for name, method in inspect.getmembers(
cls, predicate=inspect.isroutine
):
test_function = py.Function(f'test_{name}', 'instance')
test_function.add_decorator(f'{test_list_name}.test')
comment = py.Comment(test_function.dumps())
file.add_child(comment)
file.add_child(cn.EmptyLines(1))
run_function = py.Function('run_tests')
run_function.add_child(cn.Line(f'instance = {cls.__name__}()'))
run_function.add_child(cn.Line(f'{test_list_name}.run(instance)'))
file.add_child(run_function)
return file
if __name__ == '__main__':
class TestClass:
def test_method(self):
pass
generated_tests = generate_class_tests(TestClass)
# print results:
print(generated_tests.dumps())
# or to save to a file:
with open('output.py', 'w') as file:
generated_tests.dump(file)
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import pprint
import spacy
from base import BaseObject
class DisplacyNerGenerator(BaseObject):
""" Annotate the Input Text with the spaCy NER model
Sample Input:
The investment arm of biotech giant Amgen has led a new $6 million
round of funding in GNS Healthcare, CEO Colin Hill tells Xconomy.
Sample Output:
[ { 'label': 'Company',
'start': 36,
'end': 41,
'text': 'Amgen',
'type': 'ner'},
{ 'label': 'Money',
'start': 56,
'end': 66,
'text': '$6 million',
'type': 'ner'},
{ 'label': 'Company',
'start': 87,
'end': 101,
'text': 'GNS Healthcare',
'type': 'ner'},
{ 'label': 'Person',
'start': 107,
'end': 117,
'text': 'Colin Hill',
'type': 'ner'},
{ 'label': 'GPE',
'start': 124,
'end': 131,
'text': 'Xconomy',
'type': 'ner'}]
"""
__nlp = spacy.load("en_core_web_sm")
__label_transform = {
'ORG': 'Company'
}
def __init__(self,
input_text: str,
is_debug: bool = False):
"""
Created:
3-Feb-2020
craig.trim@ibm.com
* https://github.ibm.com/-cdo/unstructured-analytics/issues/1810
:param input_text:
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._input_text = input_text
def _generate(self,
ent) -> dict:
from nlusvc.displacy.dto import DisplacyEntityGenerator
def label() -> str:
if ent.label_ in self.__label_transform:
return self.__label_transform[ent.label_].lower()
return ent.label_.lower()
entity = DisplacyEntityGenerator.generate(text=ent.text,
entity_type='tag',
start=ent.start_char,
end=ent.end_char,
label=label())
entity['ontology'] = 'en_core_web_sm'
return entity
def process(self) -> list:
doc = self.__nlp(self._input_text)
entities = [self._generate(ent) for ent in doc.ents]
if self._is_debug:
self.logger.debug('\n'.join([
"NER Generation Complete",
pprint.pformat(entities, indent=4)]))
return entities
|
import sys
sys.path.insert(0,"../build/build.dir/packages/PyTrilinos/src/stk/PyPercept")
from mpi4py import MPI
from PerceptMesh import *
from math import *
import random
import time
import unittest
from numpy import *
class CheckCoordMag(GenericFunction):
def __init__(self, name=""):
self.name = name
self.error = False
def evaluate(self, domain, codomain, time_value_optional=0.0):
x = domain(0)
y = domain(1)
z = domain(2)
v = sqrt(x*x + y*y + z*z)
if fabs(v-cmag_field_node > 1.e-6):
print "CheckCoordMag:: ", self.name, "v= ", v, "x= ", x, "y= ", y, "z= ", z, "cmag_field_node= ", cmag_field_node
self.assertAlmostEqual(v, cmag_field_node)
self.error = True
class FieldFunctionUnitTests(unittest.TestCase):
def setUp(self):
self.testpoints = [[0.1234, 0.5678, 0.9, 0.812],
[0.1234e-3, 0.5678e-5, 0.97, 0.01],
[0.101, 0.02, 0.1020, 0.0122],
[0.003, 0.89, 0.01, 0.5] ]
def test_fieldFunction_multiplePoints(self):
print "start..."
num_x = 3
num_y = 3
num_z = 3
config_mesh = str(num_x) + "x" + str(num_y) + "x" + str(num_z) + "|bbox:0,0,0,1,1,1"
eMesh = PerceptMesh()
eMesh.new_mesh(GMeshSpec(config_mesh))
vectorDimension = 0
eMesh.add_field("coords_mag_field", FEMMetaData.NODE_RANK, vectorDimension)
eMesh.commit()
f_coords = eMesh.get_field(FEMMetaData.NODE_RANK, "coordinates")
ff_coords = FieldFunction("ff_coords", f_coords, eMesh, Dimensions(3), Dimensions(3), FieldFunction.SIMPLE_SEARCH)
val1 = eval_vec3(0.2, 0.3, 0.4, 0.0, ff_coords)
print "val1= ", val1
points = zeros(shape=(4,3))
output_expect = zeros(shape=(4,3))
output = zeros(shape=(4,3))
print "here 1"
i = 0
for xyzt in self.testpoints:
x = xyzt[0]
y = xyzt[1]
z = xyzt[2]
t = xyzt[3]
points[i][0] = x
points[i][1] = y
points[i][2] = z
vec = eval_vec3(x,y,z,t,ff_coords)
tol0 = fabs(1.e-5*x)
tol1 = fabs(1.e-5*y)
tol2 = fabs(1.e-5*z)
print "vec(0) = ", vec[0], " == x = ", x
print "vec(1) = ", vec[1], " == y = ", y
print "vec(2) = ", vec[2], " == z = ", z
self.assertAlmostEqual(x, vec[0], delta=tol0)
self.assertAlmostEqual(y, vec[1], delta=tol1)
self.assertAlmostEqual(z, vec[2], delta=tol2)
output_expect[i][0] = x
output_expect[i][1] = y
output_expect[i][2] = z
i = i + 1
print "field_op: NPTS= 4"
ff_coords.setDomainDimensions(Dimensions(3))
ff_coords.setCodomainDimensions(Dimensions(3))
#output = ff_coords.evaluate(points)
# pass in the output array to ensure result is properly dimensioned
output = ff_coords.value(points, output)
print "here 2, output= ", output
for j in range(4): #NLM
output_expect_j = output_expect[j][0]
output_j = output[j][0]
tol = 1.e-5*(fabs(output_expect_j))
print "output[j] = ", output_j, " == output_expect[j] = ", output_expect_j , " points[j] = ", points[j]
self.assertAlmostEqual(output_j, output_expect_j, delta = tol)
print "start...done"
def test_fieldFunction_demo_1_0_0(self):
eMesh = PerceptMesh(3)
eMesh.new_mesh(GMeshSpec("3x3x3|bbox:0,0,0,1,1,1"))
eMesh.commit()
eMesh.print_info("fieldFunction_demo_1_0_0", 2)
f_coords = eMesh.get_field(FEMMetaData.NODE_RANK, "coordinates")
ff_coords = FieldFunction("ff_coords", f_coords, eMesh, 3, 3)
x = 0.123
y = 0.234
z = 0.345
time = 0.0
eval_vec3_print(x, y, z, time, ff_coords)
def test_fieldFunction_read_print(self):
print_info = 0
x = 3
y = 3
z = 3
config_mesh = str(x) + "x" + str(y) + "x" + str(z) + "|bbox:0,0,0,1,1,1"
eMesh = PerceptMesh()
eMesh.new_mesh_read_only(GMeshSpec(config_mesh))
metaData = eMesh.get_fem_meta_data()
parts = metaData.get_parts()
nparts = len(parts)
if print_info == 1:
print "Number of parts = ", nparts
fields = metaData.get_fields()
nfields = len(fields)
if print_info == 1:
print "Number of fields = ", nfields
for i in range(nfields):
field = fields[i]
# here we have a FieldBase* which we cannot access from Python
# is this a problem and will we want this functionality?
def test_fieldFunction_demo_1(self):
gms = Gmesh_STKmesh_Fixture(MPI.COMM_WORLD, "3x3x3|bbox:0,0,0,1,1,1")
print "gms = ", gms
print "gms= end"
eMesh = PerceptMesh()
eMesh.new_mesh(GMeshSpec("3x3x3|bbox:0,0,0,1,1,1"))
eMesh.commit()
f_coords = eMesh.get_field(FEMMetaData.NODE_RANK, "coordinates")
ff_coords = FieldFunction("ff_coords", f_coords, eMesh, 3, 3)
x = 0.123
y = 0.234
z = 0.345
time = 0.0
eval_vec3_print(x,y,z,time,ff_coords)
def test_fieldFunction_demo_2(self):
eMesh = PerceptMesh()
eMesh.new_mesh(GMeshSpec("3x3x3|bbox:0,0,0,1,1,1"))
vectorDimension = 0
eMesh.add_field("coords_mag_field", FEMMetaData.NODE_RANK, vectorDimension)
eMesh.commit()
f_coords = eMesh.get_field(FEMMetaData.NODE_RANK, "coordinates")
coords_mag_field = eMesh.get_field(FEMMetaData.NODE_RANK, "coords_mag_field")
ff_coords = FieldFunction("ff_coords", f_coords, eMesh, 3, 3)
eval_vec3_print(0.1,0.1,0.1,0.0,ff_coords)
coords_mag_sf = StringFunction("sqrt(x*x + y*y + z*z)" , "coords_mag_sf", 3, 1)
x = 0.123
y = 0.234
z = 0.345
vv = sqrt(x*x + y*y + z*z)
v1 = eval_func(x,y,z,0,coords_mag_sf)
print "vv = ", vv, "== v1 = ", v1
self.assertEqual(vv, v1)
coords_mag_field_function = FieldFunction("coords_mag_field_function", coords_mag_field, eMesh, 3, 1)
coords_mag_field_function.interpolateFrom(coords_mag_sf)
eMesh.save_as("./cubehex8_withCoordMag_out.e")
ff_coords.add_alias("mc")
sfcm = StringFunction("sqrt(mc[0]*mc[0]+mc[1]*mc[1]+mc[2]*mc[2])", "sfcm", 3, 1)
vv = eval_func(0.1,0.1,0.1,0.0, sfcm)
print "expected = ", sqrt(3*0.1*0.1), " actual= " , vv
sfcm = StringFunction("sqrt(ff_coords[0]*ff_coords[0]+ff_coords[1]*ff_coords[1]+ff_coords[2]*ff_coords[2])", "sfcm", 3, 1)
vv = eval_func(0.1,0.1,0.1,0.0, sfcm)
print "expected = ", sqrt(3*0.1*0.1), " actual= " , vv
def test_fieldFunction_readMesh_createField_interpolateFrom(self):
num_x = 3
num_y = 3
num_z = 3
config_mesh = str(num_x) + "x" + str(num_y) + "x" + str(num_z) + "|bbox:0,0,0,1,1,1"
eMesh = PerceptMesh()
eMesh.new_mesh(GMeshSpec(config_mesh))
vectorDimension = 0
eMesh.add_field("coords_mag_field", FEMMetaData.NODE_RANK, vectorDimension)
eMesh.commit()
#p_rank = eMesh.get_bulk_data().parallel_rank()
#setRank(p_rank)
#from Util
f_coords = eMesh.get_field(FEMMetaData.NODE_RANK, "coordinates")
coords_mag_field = eMesh.get_field(FEMMetaData.NODE_RANK, "coords_mag_field")
#VERIFY_OP_ON Here the unit test does something
ff_coords = FieldFunction("ff_coords", f_coords, eMesh, 3, 3, FieldFunction.SIMPLE_SEARCH)
#here we could evaluate the function
#eval_vec3_print(0.1,0.2,0.3,0.0,ff_coords)
coords_mag_sf = StringFunction("sqrt(x*x + y*y + z*z)", "coords_mag_sf", 3, 1)
coords_mag_field_function = FieldFunction("coords_mag_field_function", coords_mag_field, eMesh, 3, 3, FieldFunction.SIMPLE_SEARCH)
coords_mag_field_function.interpolateFrom(coords_mag_sf)
#The following is not doable from Python
checkCoordMag = CheckCoordMag()
#eMesh.nodalOpLoop(checkCoordMag, coords_mag_field)
print checkCoordMag.error
ff_coords.add_alias("mc")
sfcm = StringFunction("sqrt(mc[0]*mc[0]+mc[1]*mc[1]+mc[2]*mc[2])", "sfcm", Dimensions(3), Dimensions(1))
tol1 = 1.e-12
vv = eval_vec3(0.1, 0.2, 0.3, 0.0, ff_coords)
print
print "0.1 == vv[0] = ", vv[0], "passed"
print "0.2 == vv[1] = ", vv[1], "passed"
print "0.3 == vv[2] = ", vv[2], "passed"
self.assertAlmostEqual(.1, vv[0], delta=tol1)
self.assertAlmostEqual(.2, vv[1], delta=tol1)
self.assertAlmostEqual(.3, vv[2], delta=tol1)
vv = eval_func(0.1, 0.2, 0.3, 0.0, sfcm)
v_expect = sqrt(0.1*0.1+0.2*0.2+0.3*0.3)
if ((vv-v_expect) < tol1):
print "vv = ", vv, " == v_expect = ", v_expect, "passed"
coords_mag_field_function.interpolateFrom(sfcm)
def test_fieldFunction_point_eval_verify(self):
num_x = 3
num_y = 3
num_z = 3
config_mesh = str(num_x) + "x" + str(num_y) + "x" + str(num_z) + "|bbox:0,0,0,1,1,1"
eMesh = PerceptMesh()
eMesh.new_mesh(GMeshSpec(config_mesh))
eMesh.commit()
f_coords = eMesh.get_field(FEMMetaData.NODE_RANK, "coordinates")
ff_coords = FieldFunction("ff_coords", f_coords, eMesh, Dimensions(3), Dimensions(3), FieldFunction.SIMPLE_SEARCH)
val1 = eval_vec3_print(0.2,0.3,0.4,0.0,ff_coords)
bulkData = eMesh.get_bulk_data()
try:
val10 = eval_print_vec3(1.2, 1.3, 1.4, 0.0, ff_coords)
except:
print "expected to catch this exception: "
pts = array([0.2, 0.3, 0.4])
output_pts = array([0.0, 0.0, 0.0])
output_pts = ff_coords.value(pts, output_pts)
tol = 1.e-9
print "output(0) = ", pts[0], " == output_pts(0) = ", output_pts[0]
print "output(1) = ", pts[1], " == output_pts(1) = ", output_pts[1]
print "output(2) = ", pts[2], " == output_pts(2) = ", output_pts[2]
self.assertAlmostEqual(pts[0], output_pts[0], delta = tol)
self.assertAlmostEqual(pts[1], output_pts[1], delta = tol)
self.assertAlmostEqual(pts[2], output_pts[2], delta = tol)
def test_fieldFunction_point_eval_timing(self):
num_x = 3
num_y = 3
num_z = 3
config_mesh = str(num_x) + "x" + str(num_y) + "x" + str(num_z) + "|bbox:0,0,0,1,1,1"
eMesh = PerceptMesh()
eMesh.new_mesh(GMeshSpec(config_mesh))
eMesh.commit()
#FIXME
#p_size = eMesh.get_bulk_data->parallel_size()
f_coords = eMesh.get_field(FEMMetaData.NODE_RANK, "coordinates")
for iSearchType in range(2):
if iSearchType == 0:
search_type = FieldFunction.SIMPLE_SEARCH
search_type_name = "SIMPLE_SEARCH"
else:
search_type = FieldFunction.STK_SEARCH
search_type_name = "STK_SEARCH"
ff_coords = FieldFunction("ff_coords", f_coords, eMesh, Dimensions(3), Dimensions(3), search_type)
t1st = time.time()
val1 = eval_vec3(0.2,0.3,0.4,0.0,ff_coords)
val1 = eval_vec3(0.2,0.3,0.4,0.0,ff_coords) #evaluated twice???
t1st = time.time() - t1st
numIter = 10000
random.seed(12345)
total_time = time.time()
max_rand = 32767
for iter in range(numIter):
num0 = random.randint(1, max_rand)*1.0
num1 = random.randint(1, max_rand)*1.0
num2 = random.randint(1, max_rand)*1.0
pts = array([(num0/max_rand), (num1/max_rand), (num2/max_rand)])
output_pts = array([0.0,0.0,0.0])
output_pts = ff_coords.value(pts, output_pts, 0.0)
total_time = time.time() - total_time
print "TEST::function::fieldFunction_point_eval_timing: "
print " for search_type= ", search_type_name
print " time for 1st eval= ", t1st
print " for ", numIter, "iterations, evaluating field(x,y,z) time = ", total_time
print " average per point lookup and eval time = ", (total_time/numIter)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(FieldFunctionUnitTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
import dash_html_components as html
import dash
from NuRadioReco.detector.detector_browser import detector_map
from NuRadioReco.detector.detector_browser import station_info
from NuRadioReco.detector.detector_browser import channel_info
from NuRadioReco.detector.detector_browser import hardware_response
import NuRadioReco.detector.detector_browser.detector_provider
from flask import send_file, send_from_directory
from NuRadioReco.detector.detector_browser.app import app
import json
import os
app.title = 'Radio Neutrino Observatory in Greenland'
server = app.server
detector_provider = NuRadioReco.detector.detector_browser.detector_provider.DetectorProvider()
detector_provider.set_generic_detector('detector_description/RNO_detector.json', 101, 3, False, False)
detector_json = json.load(open('detector_description/RNO_detector.json', 'r'))
app.layout = html.Div([
html.Div('', id='output-dummy', style={'display': 'inline'}),
html.Div('', id='load-dummy', style={'display': 'none'}),
html.Div([
html.Div(
[
html.A(
[
html.Button('Download JSON', className='btn btn-primary')
],
href='/dash/rno-station',
download='true'
),
station_info.layout,
channel_info.layout
],
style={'flex': '1'}
),
html.Div([
detector_map.layout,
hardware_response.layout
], style={'flex': '2'})
], style={'display': 'flex'})
])
@app.server.route('/dash/rno-station')
def download_json():
return send_from_directory(
os.path.dirname(os.path.abspath(__file__)),
filename='detector_description/RNO_detector.json'
)
if __name__ == '__main__':
if int(dash.__version__.split('.')[0]) <= 1:
if int(dash.__version__.split('.')[1]) < 0:
print('WARNING: Dash version 0.39.0 or newer is required, you are running version {}. Please update.'.format(dash.__version__))
app.run_server()
|
p=float(input('Insira o valor do produto R$'))
print('O produto custará R${} com o desconto de 5%.'.format(p-(5/100*p)))
|
print("HOLA MUNDO")
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Silvio Peroni <essepuntato@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
from collections import deque
# Test case for the function
def test_do_it(queue, number, expected):
result = do_it(queue, number)
if expected == result:
return True
else:
return False
# Code of the function
def do_it(queue, number):
if number <= len(queue):
for i in range(number):
queue.popleft()
return queue
# Tests
print(test_do_it(deque(["a", "b"]), 3, None))
print(test_do_it(deque(["a", "b", "c", "d", "e"]), 3, deque(["d", "e"])))
|
'''
Testing Serial Module Ports
Setup:
Create "loopbacks". Connect ports 1-4 to 5-8 using RS485
Algorithm:
- import modules: serial
- configure gpio on all ports to RS485
- send test message on all ports sequentially
- verify test message received on all ports sequentially
- display results
'''
import serial
import subprocess
import time
#Configure all GPIO
#Function for serial port configuration using GPIO
def gpioconfig(port,RSmode,duplex,resistors,bias):
'''
MDL serial port configuration
port - /dev/ttyMAXn
RSmode - 'RS485' or 'RS232'
duplex - 'full' or 'half'
resistors - 1 or 0
bias - 1 or 0
'''
mdlportnums = {
'/dev/ttyMAX0':0,'/dev/ttyMAX1':1,
'/dev/ttyMAX2':2,'/dev/ttyMAX3':3,
'/dev/ttyMAX4':4,'/dev/ttyMAX5':5,
'/dev/ttyMAX6':6,'/dev/ttyMAX7':7}
RSmodes = {'RS485':1,'RS232':0}
duplexval = {'full':0,'half':1}
gpiopins = [0,1,2,3]
portnum = mdlportnums[port]
if (portnum >= 0) and (portnum <= 3):
gpiochip = 'gpiochip1'
gpiopins = [x + 4*portnum for x in gpiopins]
elif (portnum >= 4) and (portnum <=7):
gpiochip = 'gpiochip2'
gpiopins = [x + 4*(portnum-4) for x in gpiopins]
else:
print('error')
RSset = '{}={}'.format(gpiopins[0],RSmodes[RSmode])
duplexset = '{}={}'.format(gpiopins[1],duplexval[duplex])
resistset = '{}={}'.format(gpiopins[2],resistors)
biaset = '{}={}'.format(gpiopins[3],bias)
gpiocmd = 'gpioset {} {} {} {} {}'.format(
gpiochip,RSset,duplexset,resistset,biaset)
print(gpiocmd)
subprocess.run([gpiocmd],shell=True)
smports = ['SM1','SM2','SM3','SM4','SM5','SM6','SM7','SM8']
#smports = ['SM2','SM7']
#portnames = {'SM2':'/dev/ttyMAX1','SM7':'/dev/ttyMAX6'}
portnames = {
'SM1':'/dev/ttyMAX0',
'SM2':'/dev/ttyMAX1',
'SM3':'/dev/ttyMAX2',
'SM4':'/dev/ttyMAX3',
'SM5':'/dev/ttyMAX4',
'SM6':'/dev/ttyMAX5',
'SM7':'/dev/ttyMAX6',
'SM8':'/dev/ttyMAX7',
}
for port in smports:
gpioconfig(portnames[port],'RS485','half',0,0)
#Create a connection on all ports
smconns = {}
#Set up all port connections
for port in smports:
comm = serial.Serial(
portnames[port],
19200,
timeout=0
)
smconns[port] = comm
#Send and recieve on all ports
#Need to send from all before checking recieve on all
for port in smports:
msg = 'hello from {}'.format(port)
sbytes = smconns[port].write(msg.encode('utf-8'))
print('{} sent {} bytes'.format(port,sbytes))
#Wait a bit for message transfer
time.sleep(1)
#Read in and verify correct
for port in smports:
dbytes = smconns[port].in_waiting
print('{} in waiting'.format(dbytes))
rmsg = smconns[port].read(dbytes).decode('utf-8')
print('Port: {} received message: {}'.format(port,rmsg))
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: mouli@meics.org
# Maintained By: miha@reciprocitylabs.com
from collections import defaultdict
from datetime import date, datetime
from ggrc.extensions import get_extension_modules
from ggrc.models import Notification, NotificationConfig
from ggrc.utils import merge_dict
from ggrc import db
from sqlalchemy import and_
class NotificationServices():
def __init__(self):
self.services = self.all_notifications()
def all_notifications(self):
services = {}
for extension_module in get_extension_modules():
contributions = getattr(
extension_module, 'contributed_notifications', None)
if contributions:
if callable(contributions):
contributions = contributions()
services.update(contributions)
return services
def get_service_function(self, name):
if name not in self.services:
raise ValueError("unknown service name: %s" % name)
return self.services[name]
def call_service(self, name, notification):
service = self.get_service_function(name)
return service(notification)
services = NotificationServices()
def get_filter_data(notification):
result = {}
data = services.call_service(notification.object_type.name, notification)
for user, user_data in data.iteritems():
if should_receive(notification,
user_data["force_notifications"][notification.id],
user_data["user"]["id"]):
result[user] = user_data
return result
def get_notification_data(notifications):
if not notifications:
return {}
aggregate_data = {}
for notification in notifications:
filtered_data = get_filter_data(notification)
aggregate_data = merge_dict(aggregate_data, filtered_data)
return aggregate_data
def get_pending_notifications():
notifications = db.session.query(Notification).filter(
Notification.sent_at == None).all() # noqa
notif_by_day = defaultdict(list)
for notification in notifications:
notif_by_day[notification.send_on].append(notification)
data = defaultdict(dict)
today = datetime.combine(date.today(), datetime.min.time())
for day, notif in notif_by_day.iteritems():
current_day = max(day, today)
data[current_day] = merge_dict(data[current_day],
get_notification_data(notif))
return notifications, data
def get_todays_notifications():
notifications = db.session.query(Notification).filter(
and_(Notification.send_on <= date.today(),
Notification.sent_at == None # noqa
)).all()
return notifications, get_notification_data(notifications)
def generate_notification_email(data):
pass
def dispatch_notifications():
pass
def should_receive(notif, force_notif, person_id, nightly_cron=True):
def is_enabled(notif_type):
result = NotificationConfig.query.filter(
and_(NotificationConfig.person_id == person_id,
NotificationConfig.notif_type == notif_type))
# TODO: create_user function should make entries in the notification
# config table. The code below should not make sure we have default
# values for new users.
if result.count() == 0:
# If we have no results, we need to use the default value, which is
# true for digest emails.
return notif_type == "Email_Digest"
return result.one().enable_flag
# TODO: has_instant is not used at the moment but will be used when we
# implement instant notifications
# has_instant = force_notif or is_enabled("Email_Now")
has_digest = force_notif or is_enabled("Email_Digest")
return has_digest
|
"Here be dragons."
from .context import Context
from .template import Template
from .tokenizer import TokenKind
LOOKUP = "context.lookup({!r}){}"
LOOKUP_NIL = "context.lookup({!r}, []){}"
RAW = "str({})".format(LOOKUP)
VAR = "escape({})".format(RAW)
WHEN = "{func}(blocks, context) if {var} else ''"
UNLESS = "{func}(blocks, context) if not {var} else ''"
FOREACH = "''.join({func}(blocks, context.update(even=(i % 2 != 0), item=item)) for i, item in enumerate({var}))"
BLOCK = "blocks.lookup({var!r}, {func})(blocks, context) if blocks else {func}(blocks, context)"
EXTEND = INCLUDE = "{func}(blocks, context)"
FUNCTION = """\
def {name}(blocks, context):
return "".join([{nodes}])\
"""
def gennested(env, var, node):
token = node.token
name = ":".join((token.filename, token.kind, var, str(token.line), str(token.column)))
return env._compile(name, node.children)
def gennode(env, node, blocks):
if node.kind not in (
TokenKind.chunk,
TokenKind.end,
TokenKind.include,
TokenKind.block,
TokenKind.extend_block,
TokenKind.extend,
):
var, suffix = node.token.value
var = var, suffix or ""
if node.kind == TokenKind.chunk:
return repr(node.token.value)
elif node.kind == TokenKind.raw:
return RAW.format(*var)
elif node.kind == TokenKind.var:
return VAR.format(*var)
elif node.kind == TokenKind.include:
template = env._load(node.token.value)
return INCLUDE.format(func=template.name)
elif node.kind == TokenKind.block:
var = node.token.value
template = gennested(env, var, node)
return BLOCK.format(var=var, func=template.name)
elif node.kind == TokenKind.extend_block:
var = node.token.value
template = gennested(env, var, node)
blocks[var] = template.function
return None
elif node.kind == TokenKind.extend:
template = env._load(node.token.value)
return EXTEND.format(func=template.name)
elif node.kind in (TokenKind.when, TokenKind.unless, TokenKind.foreach):
template = gennested(env, "".join(var), node)
itername = template.name
return {
TokenKind.when: WHEN,
TokenKind.unless: UNLESS,
TokenKind.foreach: FOREACH
}[node.kind].format(func=itername, var=LOOKUP_NIL.format(*var))
def genfunc(env, filename, name, nodes):
blocks = Context(filename)
nodes = (gennode(env, x, blocks) for x in nodes)
source = FUNCTION.format(name=name, nodes=", ".join(node for node in nodes if node))
exec(source, env.context, env.context)
function = env.context[name]
function.__source__ = source
env.template_cache[name] = template = Template(filename, name, function, blocks)
return template
|
from query_builder.core import InsertBuilder
from simulator.menu import cost_menu
# TODO: class name will be group not one user.
class User:
"""
Q)
Is it right to make user class?
If we make virtual user instants for simulation, it can be expensive (mem & time)
What are the advantages of creating an instant.
Is it worth it?
TODO:
define user behavior.
Moreover, we can define behavior patterns. (schedule)
- behavior type
0. sign in
1. login (landing)
2. renew food materials in fridge (capture their fridge or receipt)
3. search menus (we give recommended menu and get user's feedback)
4. buy materials (we give recommended food materials and get user's feedback)
- behavior pattern
0. 0 -> 2 (sign in -> renew fridge)
"""
def __init__(self, user_num=0):
"""
분신함
TODO:
if user_id is 0, new user. (create user)
else, existing user. (select user)
user's features for recommender system
"""
self.user_num = user_num
self.id = 1
self.gender = 3
self.b_type = 2 # type of behavior
self.fridge = None
self.menu = None
# TODO: diversify static features.
# static is declared before dynamic
self.record = dict({
"id": self.id,
"gender": self.gender,
"driven": self.b_type,
})
"""
By type of behavior
"""
def sign_in(self):
# TODO: insert data in user table(RDS)
print("ID: %d sign in" % self.id)
pass
def login(self):
# TODO: select data in user table
print("ID: %d login" % self.id)
pass
def capture_fridge(self, fridge_image, probable_menus):
"""
TODO:
update or insert virtual in user_item table
change location of cost_menu function.
"""
self.fridge = fridge_image()
self.menu = probable_menus(self.fridge)
# self.menu = cost_menu(self.fridge, self.user_num)
# iqb = InsertBuilder('user_table', fridge)
# iqb.execute()
print("\n\n------user's behavior------\n")
print("ID: %d capture fridge" % self.id)
print(self.fridge)
print("\nprobable menu & cost")
print(self.menu)
return self.fridge
def search_menu(self):
# TODO: join btw user_item table & recipe_item table
print("ID: %d search menu" % self.id)
pass
def buy_materials(self):
# TODO: i don't know
print("ID: %d buy materials" % self.id)
pass
|
# Author Munis Isazade create log file for Threading
from django.utils import timezone
import os
import sys
import traceback
VERSION = "0.5.1"
class Logger(object):
"""
Logger objects create a log file
if not exist and append string to
log file
"""
def __init__(self, file='messages.log'):
self.file = file
self.time = timezone.now().strftime('%Y/%m/%d %H:%M:%S')
self.created_time = timezone.now().strftime('%d-%b-%Y %H:%M:%S')
def debug(self, text=""):
"""
debug check append
method function
:param text:
:return: bool
"""
self._file_append(text)
def _file_append(self, text=""):
"""
:param text:
:return:
"""
try:
if self._check_file_exist(self.file):
with open(self.file, "a") as msglog:
msglog.writelines(
"{} Error:[{}] SysInfo:[{}] Time:[{}]\n".format(text, traceback.format_exc(), sys.exc_info()[0],
self.time))
else:
with open(self.file, "a") as msglog:
msglog.writelines("#Version: {}\n"
"#CreatedDate: {}\n"
"#Author: Munis Isazade\n"
"#Fields: text error sysinfo time\n"
"{} Error:[{}] SysInfo:[{}] Time:[{}]\n".format(self.version(), self.created_time,
text, traceback.format_exc(),
sys.exc_info()[0], self.time))
return True
except:
raise FileNotFoundError('not found')
def version(self):
"""
get Logger Module version
"""
return VERSION
def _check_file_exist(self, filename):
"""
check log file is exist or not
:param filename:
:return: bool
"""
return os.path.isfile(filename)
|
from pika import BasicProperties
from pika.exceptions import ChannelClosedByBroker
from petisco.base.domain.message.domain_event import DomainEvent
from petisco.base.domain.message.domain_event_bus import DomainEventBus
from petisco.extra.rabbitmq.application.message.configurer.rabbitmq_message_configurer import (
RabbitMqMessageConfigurer,
)
from petisco.extra.rabbitmq.application.message.formatter.rabbitmq_message_queue_name_formatter import (
RabbitMqMessageQueueNameFormatter,
)
from petisco.extra.rabbitmq.shared.rabbitmq_connector import RabbitMqConnector
class RabbitMqDomainEventBus(DomainEventBus):
def __init__(
self,
organization: str,
service: str,
connector: RabbitMqConnector = RabbitMqConnector(),
):
self.connector = connector
self.exchange_name = f"{organization}.{service}"
self.rabbitmq_key = f"publisher-{self.exchange_name}"
self.configurer = RabbitMqMessageConfigurer(organization, service, connector)
self.properties = BasicProperties(delivery_mode=2) # PERSISTENT_TEXT_PLAIN
def publish(self, domain_event: DomainEvent):
self._check_is_domain_event(domain_event)
meta = self.get_configured_meta()
domain_event = domain_event.update_meta(meta)
try:
channel = self.connector.get_channel(self.rabbitmq_key)
routing_key = RabbitMqMessageQueueNameFormatter.format(
domain_event, exchange_name=self.exchange_name
)
channel.confirm_delivery()
channel.basic_publish(
exchange=self.exchange_name,
routing_key=routing_key,
body=domain_event.json(),
properties=self.properties,
)
except ChannelClosedByBroker:
self._retry(domain_event)
def _retry(self, domain_event: DomainEvent):
# If domain event queue is not configured, it will be configured and then try to publish again.
self.configurer.configure()
self.publish(domain_event)
def retry_publish_only_on_store_queue(self, domain_event: DomainEvent):
self._check_is_domain_event(domain_event)
meta = self.get_configured_meta()
domain_event = domain_event.update_meta(meta)
channel = self.connector.get_channel(self.rabbitmq_key)
channel.basic_publish(
exchange=self.exchange_name,
routing_key="retry.store",
body=domain_event.json(),
properties=self.properties,
)
def close(self):
self.connector.close(self.rabbitmq_key)
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="dadmatools",
version="1.3.8",
author="Dadmatech AI Company",
author_email="info@dadmatech.ir",
description="DadmaTools is a Persian NLP toolkit",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Dadmatech/DadmaTools",
packages=setuptools.find_packages(),
install_requires=[
"bpemb>=0.3.3",
"nltk",
"folium==0.2.1",
"spacy==3.0.0",
"sklearn>=0.0",
"torch>=1.7.1",
"transformers==4.9.1",
"h5py>=3.3.0",
"Deprecated==1.2.6",
"hyperopt>=0.2.5",
"pyconll>=3.1.0",
"pytorch-transformers>=1.1.0",
"segtok==1.5.7",
"tabulate>=0.8.6",
"supar>=1.1.2",
"html2text",
"gensim>=3.6.0",
"fasttext==0.9.2",
"wiki_dump_reader",
"conllu",
"gdown",
"NERDA",
"py7zr==0.17.2"
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License ",
"Operating System :: OS Independent",
],
)
|
# coding=utf-8
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.db.models import Max
from django.http.response import Http404, JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from trojsten.contests.models import Task
from trojsten.submit.constants import SUBMIT_TYPE_EXTERNAL
from trojsten.submit.models import Submit
from .forms import SubmitForm
from .tester import POCET_PRVKOV, process_answer, process_question
TASK_ID = 1173
@login_required
def task_view(request):
task = get_object_or_404(Task, pk=TASK_ID)
if not task.visible(request.user):
raise Http404
best_points = Submit.objects.filter(user=request.user, task=task).aggregate(Max("points"))[
"points__max"
]
if best_points is None:
best_points = 0
if request.method == "POST":
form = SubmitForm(request.POST)
if form.is_valid():
queries = request.session.get("plugin_prask_2_4_1/questions", list())
selection = form.cleaned_data["selection"]
points, message = process_answer(queries, selection)
if points:
if points > best_points:
submit = Submit(
task=task,
user=request.user,
points=points,
submit_type=SUBMIT_TYPE_EXTERNAL,
filepath="",
testing_status="OK",
tester_response="",
protocol_id="",
)
submit.save()
request.session["plugin_prask_2_4_1/last_points"] = points
messages.add_message(request, messages.SUCCESS if points else messages.ERROR, message)
return redirect(reverse("plugin_prask_2_4_1:task_view"))
else:
form = SubmitForm()
context = dict(
task=task,
form=form,
last_points=request.session.get("plugin_prask_2_4_1/last_points", 0),
best_points=int(best_points),
)
return render(request, "plugin_prask_2_4_1/task_view.html", context=context)
@login_required
def answer_query(request):
task = get_object_or_404(Task, pk=TASK_ID)
if not task.visible(request.user):
raise Http404
data = dict()
queries = request.session.get("plugin_prask_2_4_1/questions", list())
if request.method == "DELETE":
queries = list()
request.session["plugin_prask_2_4_1/questions"] = queries
data["status"] = "Success"
data["queries"] = queries
return JsonResponse(data)
if request.method == "GET":
data["status"] = "Success"
data["queries"] = queries
return JsonResponse(data)
if request.method != "POST":
data["status"] = "Error"
data["message"] = "Invalid method"
return JsonResponse(data)
try:
a = int(request.POST.get("a", None))
b = int(request.POST.get("b", None))
except ValueError:
data["status"] = "Error"
data["message"] = "Musíš zadať celé čísla"
return JsonResponse(data)
if a == b:
data["status"] = "Error"
data["message"] = "Porovnávaš samé so sebou"
return JsonResponse(data)
if not (1 <= a <= POCET_PRVKOV and 1 <= b <= POCET_PRVKOV):
data["status"] = "Error"
data["message"] = "Čísla sú mimo rozsah [%d, %d]" % (1, POCET_PRVKOV)
return JsonResponse(data)
if a is None or b is None:
data["status"] = "Error"
data["message"] = "Nesprávne parametre"
return JsonResponse(data)
process_question(queries, a, b)
request.session["plugin_prask_2_4_1/questions"] = queries
data["status"] = "Success"
data["queries"] = queries
return JsonResponse(data)
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import math
from torch import nn
from torch.nn import init
from torch.nn.modules.utils import _pair
from ..functions.modulated_deform_conv2d_func import ModulatedDeformConv2dFunction
class ModulatedDeformConv2d(nn.Module):
def __init__(self, in_channels, out_channels,
kernel_size, stride, padding, dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True):
super(ModulatedDeformConv2d, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels {} must be divisible by groups {}'.format(in_channels, groups))
if out_channels % groups != 0:
raise ValueError('out_channels {} must be divisible by groups {}'.format(out_channels, groups))
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.im2col_step = im2col_step
self.use_bias = bias
self.weight = nn.Parameter(torch.Tensor(
out_channels, in_channels//groups, *self.kernel_size))
self.bias = nn.Parameter(torch.Tensor(out_channels))
self.reset_parameters()
if not self.use_bias:
self.bias.requires_grad = False
def reset_parameters(self):
n = self.in_channels
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input, offset, mask):
assert 2 * self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \
offset.shape[1]
assert self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \
mask.shape[1]
return ModulatedDeformConv2dFunction.apply(input, offset, mask,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
self.deformable_groups,
self.im2col_step)
_ModulatedDeformConv2d = ModulatedDeformConv2dFunction.apply
class ModulatedDeformConv2dPack(ModulatedDeformConv2d):
def __init__(self, in_channels, out_channels,
kernel_size, stride, padding,
dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True, lr_mult=0.1):
super(ModulatedDeformConv2dPack, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, groups, deformable_groups, im2col_step, bias)
out_channels = self.deformable_groups * 3 * self.kernel_size[0] * self.kernel_size[1]
self.conv_offset_mask = nn.Conv2d(self.in_channels,
out_channels,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
bias=True)
self.conv_offset_mask.lr_mult = lr_mult
self.conv_offset_mask.inited = True
self.init_offset()
def init_offset(self):
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
def forward(self, input):
out = self.conv_offset_mask(input)
o1, o2, mask = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return ModulatedDeformConv2dFunction.apply(input, offset, mask,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
self.deformable_groups,
self.im2col_step)
|
from fastapi.middleware.cors import CORSMiddleware
def init_cors(app):
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
|
#!/usr/bin/env python
import argparse
import os
import sys
import logbook
from subprocess import check_call
from subprocess import CalledProcessError
import pygithub3
LOG = logbook.Logger('GitHub Backup')
track_all_branches = """
for branch in `git branch -a | grep remotes | grep -v HEAD | grep -v master`; do
git branch --track ${branch##*/} $branch
done
"""
class cd(object):
"""Changes the current working directory to the one specified
"""
def __init__(self, path):
self.original_dir = os.getcwd()
self.dir = path
def __enter__(self):
os.chdir(self.dir)
def __exit__(self, type, value, tb):
os.chdir(self.original_dir)
def backup(user, dest):
"""Performs a backup of all the repos in user's GitHub account on dest
"""
gh = pygithub3.Github()
repos = gh.repos.list(user=user, type='all')
for repo in repos.all():
repo_path = os.path.join(dest, repo.name)
LOG.info("Backing up repository {}".format(repo.name))
#If the repository is present on destination, update all branches
if os.path.exists(repo_path):
LOG.info("The repository {} already exists on destination. Pulling " \
"all branches".format(repo.name))
with cd(repo_path):
try:
cl = ['git', 'up']
check_call(cl)
except CalledProcessError:
LOG.error("There was an error fetching the branches from " \
"the repository {}, skipping it".format(repo.name))
pass
#Otherwise clone the repository and fetch all branches
else:
LOG.info("The repository {} does not exist on destination. " \
"Cloning it. ".format(repo.name))
try:
cl1 = ['git', 'clone', '-q', repo.clone_url, repo_path]
check_call(cl1)
with cd(repo_path):
check_call(track_all_branches, shell=True)
except CalledProcessError:
LOG.error('Error cloning repository {}, skipping it'.format(repo.name))
pass
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Clones all the public " \
"repositories from a GitHub account")
parser.add_argument("user", type=str, help="GitHub username")
parser.add_argument("-d", type=str, help="Destination of the copy")
args = parser.parse_args()
user = args.user
dest = os.getcwd() if not args.d else args.d
print user, dest
backup(user, dest)
|
__all__ = ['nmf']
|
import scrapy
from poem_spider.items import PoemItem, PoetItem
import re
import uuid
class PoetSpider(scrapy.Spider):
name = "poet"
# allowed_domains = ["www.gushiwen.org"]
poet_count = 1
def start_requests(self):
start_url = 'https://so.gushiwen.org/authors/Default.aspx?p=1&c=先秦'
yield scrapy.Request(url=start_url, callback=self.parse_poet)
def parse_poet(self, response):
count = self.settings.get('COUNT')
if self.poet_count > count:
return
for sel in response.xpath('//div[@class="sonspic"]/div[@class="cont"]'):
name = sel.xpath('p/a/b/text()').extract_first()
introduction = sel.xpath('p[2]/text()').extract_first()
dynasty = response.xpath('//div[@class="sright"]/span/text()').extract_first()
item = PoetItem()
item['id'] = str(uuid.uuid1()).replace('-', '')
item['name'] = name
item['introduction'] = introduction
item['dynasty'] = dynasty
yield item
self.poet_count += 1
next = response.xpath('//div/a[@class="amore"]/@href').extract_first()
if next is not None and len(next) != 0:
next = response.urljoin(next)
yield scrapy.Request(url=next, callback=self.parse_poet)
else:
next = response.xpath('//div[@class="sright"]/span/following-sibling::a[1]/@href').extract_first()
if next is not None and len(next) != 0:
next = response.urljoin(next)
yield scrapy.Request(url=next, callback=self.parse_poet)
|
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Temporary script to generate ground and forest. Will be removed before merge."""
vehicleCounter = 0
initialVehicleNumber = 300
vehicleInsertionInterval = 0.8
simulationTime = 3600
busRatio = 16
motorcycleRatio = 10
truckRatio = 40
trailerRatio = 15
print('<routes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://sumo.dlr.de/xsd/routes_file.xsd">')
print(' <route id="0" edges="-1"/>')
print(' <route id="1" edges="-0"/>')
print(' <vType id="car" minGap="5"/>')
print(' <vType id="bus" accel="0.8" decel="4.5" sigma="0.8" length="10" minGap="8" maxSpeed="20" guiShape="bus" vClass="bus"/>')
print(' <vType id="motorcycle" accel="1.2" decel="4.5" sigma="0.5" length="3" minGap="1.5" maxSpeed="25" guiShape="motorcycle" vClass="motorcycle"/>')
print(' <vType id="trailer" accel="0.8" decel="4.5" sigma="0.8" length="14" minGap="8" maxSpeed="20" guiShape="truck" vClass="trailer"/>')
print(' <vType id="truck" accel="0.8" decel="4.5" sigma="0.5" length="8" minGap="8" maxSpeed="25" guiShape="truck" vClass="truck"/>')
for i in range(initialVehicleNumber / 2):
departPos = 5000.0 - 2 * i * 5000.0 / initialVehicleNumber
if vehicleCounter % truckRatio == 0:
print(' <vehicle id="%d" type="truck" route="1" depart="0" departLane="free" departPos="%lf" departSpeed="max"/>' % (vehicleCounter, departPos))
elif vehicleCounter % busRatio == 0:
print(' <vehicle id="%d" type="bus" route="1" depart="0" departLane="free" departPos="%lf" departSpeed="max"/>' % (vehicleCounter, departPos))
elif vehicleCounter % trailerRatio == 0:
print(' <vehicle id="%d" type="trailer" route="1" depart="0" departLane="free" departPos="%lf" departSpeed="max"/>' % (vehicleCounter, departPos))
elif vehicleCounter % motorcycleRatio == 0:
print(' <vehicle id="%d" type="motorcycle" route="1" depart="0" departLane="free" departPos="%lf" departSpeed="max"/>' % (vehicleCounter, departPos))
else:
print(' <vehicle id="%d" type="car" route="1" depart="0" departLane="free" departPos="%lf" departSpeed="max"/>' % (vehicleCounter, departPos))
vehicleCounter += 1
for i in range(initialVehicleNumber / 2):
departPos = 5000.0 - 2 * i * 5000.0 / initialVehicleNumber
if vehicleCounter % truckRatio == 0:
print(' <vehicle id="%d" type="truck" route="0" depart="0" departLane="free" departPos="%lf" departSpeed="max"/>' % (vehicleCounter, departPos))
elif vehicleCounter % busRatio == 0:
print(' <vehicle id="%d" type="bus" route="0" depart="0" departLane="free" departPos="%lf" departSpeed="max"/>' % (vehicleCounter, departPos))
elif vehicleCounter % trailerRatio == 0:
print(' <vehicle id="%d" type="trailer" route="0" depart="0" departLane="free" departPos="%lf" departSpeed="max"/>' % (vehicleCounter, departPos))
elif vehicleCounter % motorcycleRatio == 0:
print(' <vehicle id="%d" type="motorcycle" route="0" depart="0" departLane="free" departPos="%lf" departSpeed="max"/>' % (vehicleCounter, departPos))
else:
print(' <vehicle id="%d" type="car" route="0" depart="0" departLane="free" departPos="%lf" departSpeed="max"/>' % (vehicleCounter, departPos))
vehicleCounter += 1
for i in range(int(simulationTime / vehicleInsertionInterval)):
if vehicleCounter % truckRatio == 0:
print(' <vehicle id="%d" type="truck" route="%d" depart="%lf" departLane="random"/>' % (vehicleCounter, vehicleCounter % 2, i * vehicleInsertionInterval))
elif vehicleCounter % busRatio == 0:
print(' <vehicle id="%d" type="bus" route="%d" depart="%lf" departLane="random"/>' % (vehicleCounter, vehicleCounter % 2, i * vehicleInsertionInterval))
elif vehicleCounter % trailerRatio == 0:
print(' <vehicle id="%d" type="trailer" route="%d" depart="%lf" departLane="random"/>' % (vehicleCounter, vehicleCounter % 2, i * vehicleInsertionInterval))
elif vehicleCounter % motorcycleRatio == 0:
print(' <vehicle id="%d" type="motorcycle" route="%d" depart="%lf" departLane="random"/>' % (vehicleCounter, vehicleCounter % 2, i * vehicleInsertionInterval))
else:
print(' <vehicle id="%d" type="car" route="%d" depart="%lf" departLane="random"/>' % (vehicleCounter, vehicleCounter % 2, i * vehicleInsertionInterval))
vehicleCounter += 1
print('</routes>')
|
# Be sure to connect GPIO16 (D0) to RST or this won't work!
import machine
# configure RTC.ALARM0 to be able to wake the device
rtc = machine.RTC()
rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)
# set RTC.ALARM0 to fire after 10 seconds (waking the device)
seconds = 10
rtc.alarm(rtc.ALARM0, 1000 * seconds)
# put the device to sleep
machine.deepsleep()
# Optionally add the following to boot.py for sleep detection
if machine.reset_cause() == machine.DEEPSLEEP_RESET:
print('woke from a deep sleep')
else:
print('power on or hard reset')
|
from django.test import TestCase, Client
from httmock import with_httmock, urlmatch
from .utils import get_base_claims, encode_jwt
from django.contrib.auth.models import User, Group
client = Client()
@urlmatch(path=r"^/adfs/oauth2/token$")
def token_response(url, request):
claims = get_base_claims()
token = encode_jwt(claims)
return {'status_code': 200, 'content': b'{"access_token":"' + token + b'"}'}
@urlmatch(path=r"^/adfs/oauth2/token$")
def inactive_user_token_response(url, request):
claims = get_base_claims()
claims["winaccountname"] = "locked_user"
token = encode_jwt(claims)
return {'status_code': 200, 'content': b'{"access_token":"' + token + b'"}'}
class ClientRequestTests(TestCase):
def setUp(self):
Group.objects.create(name='group1')
Group.objects.create(name='group2')
Group.objects.create(name='group3')
User.objects.create(**{
User.USERNAME_FIELD: "locked_user",
"is_active": False
})
@with_httmock(token_response)
def test_authentication(self):
response = client.get("/oauth2/login", {'code': 'testcode'})
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/accounts/profile/'))
@with_httmock(token_response)
def test_empty_authentication(self):
response = client.get("/oauth2/login", {'code': ''})
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, b"Login failed")
@with_httmock(token_response)
def test_missing_code(self):
response = client.get("/oauth2/login")
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, b"Login failed")
@with_httmock(inactive_user_token_response)
def test_inactive_user(self):
response = client.get("/oauth2/login", {'code': 'testcode'})
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, b"Account disabled")
@with_httmock(token_response)
def test_login_redir(self):
response = client.get("/test/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response["Location"], 'https://adfs.example.com/adfs/oauth2/authorize?response_type=code&'
'client_id=your-configured-client-id&resource=your-adfs-RPT-name&'
'redirect_uri=example.com')
@with_httmock(token_response)
def test_context_processor(self):
response = client.get("/context_processor/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'https://adfs.example.com/adfs/oauth2/authorize?response_type=code&'
b'client_id=your-configured-client-id&resource=your-adfs-RPT-name&'
b'redirect_uri=example.com\n')
|
# Copyright 2021 National Technology & Engineering Solutions of
# Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
# NTESS, the U.S. Government retains certain rights in this software.
"""Demonstrate calling a command using the raw GRPC packets, not the
preferred Client interface."""
from paho.mqtt.publish import single
from icypaw.tahu_interface import new_payload, DataType
topic = 'spBv1.0/demo_group/NCMD/rand'
payload = new_payload()
metric = payload.metrics.add()
metric.name = 'debug_print'.encode()
metric.datatype = DataType.Template.value
metric.is_transient = True
metric.template_value.template_ref = 'debug_print'.encode()
arg_metric = metric.template_value.metrics.add()
arg_metric.name = 'msg'.encode()
arg_metric.string_value = "Hello, World".encode()
arg_metric.datatype = DataType.String.value
payload_bytes = payload.SerializeToString()
single(topic, payload=payload_bytes)
|
import logging
import threading
logger = logging.getLogger('[WORKER]')
logger.setLevel(logging.INFO)
__author__ = 'Andres'
'''
This class simplifies thread usage. Examples:
1 - Worker.call(aFunction).withArgs(arg1, arg2..argN).start() / Runs a normal thread starting at aFunction
2 - Worker.call(aFunction).withArgs(arg1, arg2..argN).asDaemon.start() / Same as before, but uses a daemon thread
3 - Worker.call(aFunction).withArgs(arg1, arg2..argN).every(T).asDaemon.start() / Runs a thread every T seconds
4 - Worker.call(aFunction).withArgs(arg1, arg2..argN).after(T).asDaemon.start() / Runs a thread after T seconds
NOTE: The 'call' method should be called first ALWAYS!!
CronicWorker - Calling the 'every(seconds)' function returns a CronicWorker with the original Worker attributes.
DeferredWorker - Calling the 'after(seconds)' function returns a DeferredWorker with the original Worker attributes.
NOTE: Calling 'start()' more than once on a DeferredWorker will try to 'cancel()' the first thread before launching
a new one
'''
class Worker(object):
def __init__(self):
self._thread = None
self._isDaemon = False
self._function = None
self._callback = lambda: None
self._arguments = ()
@staticmethod
def call(function):
worker = Worker()
worker._function = function
return worker
def withArgs(self, *args):
self._arguments = args
return self
@property
def asDaemon(self):
self._isDaemon = True
return self
def start(self):
self._thread = threading.Thread(target=self._startPoint)
self._thread.daemon = self._isDaemon
self._thread.start()
return self
def isWorking(self):
return self._thread.isAlive() if self._thread else False
def join(self, timeout=None):
if self.isWorking():
self._thread.join(timeout)
return self
# def every(self, seconds):
# from utils.worker.cronicWorker import CronicWorker
# cronicWorker = CronicWorker.fromWorker(self)
# cronicWorker._repeatInterval = seconds
# return cronicWorker
#
# def after(self, seconds):
# from utils.worker.deferredWorker import DeferredWorker
# deferredWorker = DeferredWorker.fromWorker(self)
# deferredWorker._waitTime = seconds
# return deferredWorker
def _startPoint(self):
logger.debug("Worker <%s> is about to call: %s%s", self._thread.ident, self._function.__name__,
str(self._arguments))
self._function(*self._arguments)
logger.debug("Worker <%s> called: %s%s", self._thread.ident, self._function.__name__,
str(self._arguments))
def _reset(self):
self._thread = None
self._isDaemon = False
self._function = None
self._callback = lambda: None
self._arguments = ()
|
# Generated by Django 3.0.5 on 2020-07-17 15:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='tytuł')),
('date', models.DateField(auto_now=True, verbose_name='data')),
('content', models.TextField(verbose_name='treść')),
('slug', models.SlugField(blank=True, max_length=250, null=True)),
('img', models.ImageField(blank=True, upload_to='posts/', verbose_name='obrazek')),
('status', models.CharField(choices=[('draft', 'Szkic'), ('published', 'Opublikowany')], default='draft', help_text='status publikacji', max_length=15, verbose_name='status')),
('facebook_id', models.CharField(default=0, max_length=250)),
],
),
]
|
#Crie um Alo mundo em inglês
print(" Alô mundo em inglês")
|
##!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import OrderedDict
from masters_project_config import *
from subprocess import Popen, PIPE, STDOUT
import re
models_dir = '/ltg/angelii/space_on_svn/angelii/PARSEABILITY/21_section/Bohnet_Nivre/parser/models/'
anna_dir = '/ltg/angelii/space_on_svn/angelii/PARSEABILITY/21_section/Bohnet_Nivre/parser/'
anna_jar = 'anna-3.3.jar'
bohnet_nivre_parser = 'is2.transitionR6j.Parser'
out = DATA_PREFIX + '/out/'
depreps = {
'dt': 'dt-all-ptb_tok-ptb_pos.mdl',
'sb': 'sb-all-ptb_tok-ptb_pos.mdl',
'conll': 'conll-all-ptb_tok-ptb_pos.mdl'
}
class Bohnet_Nivre:
def run(self, conllfile):
"""
@param conllfile conllfile
@return path to output file
"""
for dr, modelfile in depreps.items():
cmd_str = "java -cp {}{} {} -model {}{} -beam 80 -test {} -out {}".format(anna_dir, anna_jar, bohnet_nivre_parser, models_dir, modelfile, conllfile, conllfile + '.' + dr)
print cmd_str
p_run = Popen(cmd_str, shell=True) #, cwd=lth_dir)
p_run.communicate()[0]
return conllfile, depreps.keys()
if __name__ == "__main__":
testconll = "minidevresult.conll"
bn = Bohnet_Nivre()
outfile = bn.run(testconll)
|
"""Asciicast v2 record formats
Full specification: https://github.com/asciinema/asciinema/blob/develop/doc/asciicast-v2.md
"""
import abc
import codecs
import json
from collections import namedtuple
utf8_decoder = codecs.getincrementaldecoder('utf-8')('replace')
class AsciiCastRecord(abc.ABC):
"""Generic Asciicast v2 record format"""
@abc.abstractmethod
def to_json_line(self):
raise NotImplementedError
@classmethod
def from_json_line(cls, line):
if type(json.loads(line)) == dict:
return AsciiCastHeader.from_json_line(line)
elif type(json.loads(line)) == list:
return AsciiCastEvent.from_json_line(line)
else:
raise NotImplementedError
_AsciiCastTheme = namedtuple('AsciiCastTheme', ['fg', 'bg', 'palette'])
class AsciiCastTheme(_AsciiCastTheme):
"""Color theme of the terminal.
All colors must use the '#rrggbb' format
fg: default text color
bg: default background colors
palette: colon separated list of 8 or 16 terminal colors
"""
def __new__(cls, fg, bg, palette):
if cls.is_color(fg):
if cls.is_color(bg):
colors = palette.split(':')
if all([cls.is_color(c) for c in colors[:16]]):
self = super().__new__(cls, fg, bg, palette)
return self
elif all([cls.is_color(c) for c in colors[:8]]):
new_palette = ':'.join(colors[:8])
self = super().__new__(cls, fg, bg, new_palette)
return self
else:
raise ValueError('Invalid palette: the first 8 or 16 colors must be valid')
else:
raise ValueError('Invalid background color: {}'.format(bg))
else:
raise ValueError('Invalid foreground color: {}'.format(fg))
@staticmethod
def is_color(color):
if type(color) == str and len(color) == 7 and color[0] == '#':
try:
int(color[1:], 16)
except ValueError:
return False
return True
return False
_AsciiCastHeader = namedtuple('AsciiCastHeader', ['version', 'width', 'height', 'theme'])
class AsciiCastHeader(AsciiCastRecord, _AsciiCastHeader):
"""Header record
version: Version of the asciicast file format
width: Initial number of columns of the terminal
height: Initial number of lines of the terminal
theme: Color theme of the terminal
"""
types = {
'version': {int},
'width': {int},
'height': {int},
'theme': {type(None), AsciiCastTheme},
}
def __new__(cls, version, width, height, theme):
self = super(AsciiCastHeader, cls).__new__(cls, version, width, height, theme)
for attr in AsciiCastHeader._fields:
type_attr = type(self.__getattribute__(attr))
if type_attr not in cls.types[attr]:
raise TypeError('Invalid type for attribute {}: {} '.format(attr, type_attr) +
'(possible type: {})'.format(cls.types[attr]))
if version != 2:
raise ValueError('Only asciicast v2 format is supported')
return self
def to_json_line(self):
attributes = self._asdict()
if self.theme is not None:
attributes['theme'] = self.theme._asdict()
else:
del attributes['theme']
return json.dumps(attributes, ensure_ascii=False)
@classmethod
def from_json_line(cls, line):
attributes = json.loads(line)
filtered_attributes = {attr: attributes[attr] if attr in attributes else None
for attr in AsciiCastHeader._fields}
if filtered_attributes['theme'] is not None:
filtered_attributes['theme'] = AsciiCastTheme(**filtered_attributes['theme'])
return cls(**filtered_attributes)
_AsciiCastEvent = namedtuple('AsciiCastEvent', ['time', 'event_type', 'event_data', 'duration'])
class AsciiCastEvent(AsciiCastRecord, _AsciiCastEvent):
"""Event record
time: Time elapsed since the beginning of the recording in seconds
event_type: Type 'o' if the data was captured on the standard output of the terminal, type
'i' if it was captured on the standard input
event_data: Data captured during the recording
duration: Duration of the event in seconds (non standard field)
"""
types = {
'time': {int, float},
'event_type': {str},
'event_data': {bytes},
'duration': {type(None), int, float},
}
def __new__(cls, *args, **kwargs):
self = super(AsciiCastEvent, cls).__new__(cls, *args, **kwargs)
for attr in AsciiCastEvent._fields:
type_attr = type(self.__getattribute__(attr))
if type_attr not in cls.types[attr]:
raise TypeError('Invalid type for attribute {}: {} '.format(attr, type_attr) +
'(possible type: {})'.format(cls.types[attr]))
return self
def to_json_line(self):
event_data = utf8_decoder.decode(self.event_data)
attributes = [self.time, self.event_type, event_data]
return json.dumps(attributes, ensure_ascii=False)
@classmethod
def from_json_line(cls, line):
attributes = json.loads(line)
time, event_type, event_data = attributes
event_data = event_data.encode('utf-8')
return cls(time, event_type, event_data, None)
|
from decimal import Decimal
from django.urls import reverse
from ...models import Order
from ....core.utils import build_absolute_uri
def get_error_response(amount: Decimal, **additional_kwargs) -> dict:
"""Create a placeholder response for invalid or failed requests.
It is used to generate a failed transaction object.
"""
return {"is_success": False, "amount": amount, **additional_kwargs}
def get_amount_for_sberbank(amount):
"""В Сбербанк необходимо передавать значение в копейках или центах
Поэтому необходимо получить значение в копейках
https://developer.sberbank.ru/doc/v1/acquiring/rest-requests1pay
"""
amount *= 100
return int(amount.to_integral_value())
def get_order_token(order_id):
return Order.objects.get(pk=order_id).token
def get_return_url(order_id):
return build_absolute_uri(reverse("order:payment-success", kwargs={"token": get_order_token(order_id)}))
def get_data_for_payment(payment_information):
data = {
'currency': 643,
'email': payment_information.customer_email,
}
return data
|
# Copyright (C) 2007 Alexandre Conrad, alexandre (dot) conrad (at) gmail (dot) com
#
# This module is part of FormAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from formalchemy.i18n import _
__all__ = ['ValidationError', 'required', 'integer', 'float_', 'decimal_',
'currency', 'email', 'email_verbose', 'maxlength', 'minlength',
'regex', 'passwords_match']
if 'any' not in locals():
# pre-2.5 support
def any(seq):
"""
>>> any(xrange(10))
True
>>> any([0, 0, 0])
False
"""
for o in seq:
if o:
return True
return False
class ValidationError(Exception):
"""an exception raised when the validation failed
"""
def message(self):
return self.args[0]
message = property(message)
def __repr__(self):
return 'ValidationError(%r,)' % self.message
def required(value, field=None):
"""Successful if value is neither None nor the empty string (yes, including empty lists)"""
if value is None or value == '':
msg = isinstance(value, list) and _('Please select a value') or _('Please enter a value')
raise ValidationError(msg)
# other validators will not be called for empty values
def integer(value, field=None):
"""Successful if value is an int"""
# the validator contract says you don't have to worry about "value is None",
# but this is called from deserialize as well as validation
if value is None or not value.strip():
return None
try:
return int(value)
except:
raise ValidationError(_('Value is not an integer'))
def float_(value, field=None):
"""Successful if value is a float"""
# the validator contract says you don't have to worry about "value is None",
# but this is called from deserialize as well as validation
if value is None or not value.strip():
return None
try:
return float(value)
except:
raise ValidationError(_('Value is not a number'))
from decimal import Decimal
def decimal_(value, field=None):
"""Successful if value can represent a decimal"""
# the validator contract says you don't have to worry about "value is None",
# but this is called from deserialize as well as validation
if value is None or not value.strip():
return None
try:
return Decimal(value)
except:
raise ValidationError(_('Value is not a number'))
def currency(value, field=None):
"""Successful if value looks like a currency amount (has exactly two digits after a decimal point)"""
if '%.2f' % float_(value) != value:
raise ValidationError('Please specify full currency value, including cents (e.g., 12.34)')
def email_verbose(value, field=None):
"""
Successful if value is a valid RFC 822 email address.
Ignores the more subtle intricacies of what is legal inside a quoted region,
and thus may accept some
technically invalid addresses, but will never reject a valid address
(which is a much worse problem).
"""
if not value.strip():
return None
reserved = r'()<>@,;:\"[]'
try:
recipient, domain = value.split('@', 1)
except ValueError:
raise ValidationError(_('Missing @ sign'))
if any([ord(ch) < 32 for ch in value]):
raise ValidationError(_('Control characters present'))
if any([ord(ch) > 127 for ch in value]):
raise ValidationError(_('Non-ASCII characters present'))
# validate recipient
if not recipient:
raise ValidationError(_('Recipient must be non-empty'))
if recipient.endswith('.'):
raise ValidationError(_("Recipient must not end with '.'"))
# quoted regions, aka the reason any regexp-based validator is wrong
i = 0
while i < len(recipient):
if recipient[i] == '"' and (i == 0 or recipient[i - 1] == '.' or recipient[i - 1] == '"'):
# begin quoted region -- reserved characters are allowed here.
# (this implementation allows a few addresses not strictly allowed by rfc 822 --
# for instance, a quoted region that ends with '\' appears to be illegal.)
i += 1
while i < len(recipient):
if recipient[i] == '"':
break # end of quoted region
i += 1
else:
raise ValidationError(_("Unterminated quoted section in recipient"))
i += 1
if i < len(recipient) and recipient[i] != '.':
raise ValidationError(_("Quoted section must be followed by '@' or '.'"))
continue
if recipient[i] in reserved:
raise ValidationError(_("Reserved character present in recipient"))
i += 1
# validate domain
if not domain:
raise ValidationError(_('Domain must be non-empty'))
if domain.endswith('.'):
raise ValidationError(_("Domain must not end with '.'"))
if '..' in domain:
raise ValidationError(_("Domain must not contain '..'"))
if any([ch in reserved for ch in domain]):
raise ValidationError(_("Reserved character present in domain"))
def email(value, field=None):
"""Defines a less verbose and explicit error message when validation
fails"""
try:
email_verbose(value, field)
except ValidationError:
raise ValidationError(_("Invalid e-mail address"))
# parameterized validators return the validation function
def maxlength(length):
"""Returns a validator that is successful if the input's length is at most the given one."""
if length <= 0:
raise ValueError('Invalid maximum length')
def f(value, field=None):
if len(value) > length:
raise ValidationError(_('Value must be no more than %d characters long') % length)
return f
def minlength(length):
"""Returns a validator that is successful if the input's length is at least the given one."""
if length <= 0:
raise ValueError('Invalid minimum length')
def f(value, field=None):
if len(value) < length:
raise ValidationError(_('Value must be at least %d characters long') % length)
return f
def regex(exp, errormsg=_('Invalid input')):
"""
Returns a validator that is successful if the input matches (that is,
fulfils the semantics of re.match) the given expression.
Expressions may be either a string or a Pattern object of the sort returned by
re.compile.
"""
import re
if type(exp) != type(re.compile('')):
exp = re.compile(exp)
def f(value, field=None):
if not exp.match(value):
raise ValidationError(errormsg)
return f
def passwords_match(first_password_field):
"""This validator ensures two password fields match.
You can provide either a Field objet, or a string with the
name of the field on the FieldSet that will be checked
against to make sure they match. That means you should set
this validator on the `second` password field.
NOTE: this validator must be attached to a Field that is
itself on a FieldSet.
"""
def f(value, field):
if isinstance(first_password_field, (str, unicode)):
fld = first_password_field
else:
fld = first_password_field.key
if value != getattr(field.parent, fld).value:
raise ValidationError(_('Passwords must match'))
return f
# possible others:
# oneof raises if input is not one of [or a subset of for multivalues] the given list of possibilities
# url(check_exists=False)
# address parts
# cidr
# creditcard number/securitycode (/expires?)
# whole-form validators
# fieldsmatch
# requiredipresent/missing
|
# -*- coding: utf-8 -*-
"""
test.etsi_3gpp_s6a_6d.test_avps
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains the Diameter protocol AVP unittests
for 3GPP S6a/S6d Diameter Application Id.
:copyright: (c) 2020 Henrique Marques Ribeiro.
:license: MIT, see LICENSE for more details.
"""
import unittest
import os
import sys
testing_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = os.path.dirname(os.path.dirname(testing_dir))
sys.path.insert(0, base_dir)
from bromelia.avps import *
from bromelia.base import *
from bromelia.constants import *
from bromelia.exceptions import *
from bromelia.etsi_3gpp_s6a_s6d.avps import *
class TestDiameterAVP(unittest.TestCase):
def test_diameter_avp__load_staticmethod__parsing_supported_features_avp_stream(self):
stream = bytes.fromhex("0000027480000038000028af0000010a4000000c000028af0000027580000010000028af000000010000027680000010000028afdc000201")
avps = DiameterAVP.load(stream)
supported_features_avp = avps[0]
self.assertTrue(isinstance(supported_features_avp, SupportedFeaturesAVP))
self.assertEqual(supported_features_avp.code, SUPPORTED_FEATURES_AVP_CODE)
self.assertTrue(supported_features_avp.is_vendor_id())
self.assertFalse(supported_features_avp.is_mandatory())
self.assertFalse(supported_features_avp.is_protected())
self.assertEqual(supported_features_avp.get_length(), 56)
self.assertEqual(supported_features_avp.vendor_id, VENDOR_ID_3GPP)
self.assertEqual(supported_features_avp.data.hex(), "0000010a4000000c000028af0000027580000010000028af000000010000027680000010000028afdc000201")
self.assertEqual(supported_features_avp.__repr__(), "<Diameter AVP: 628 [Supported-Features] VENDOR>")
vendor_id_avp = supported_features_avp.avps[0]
feature_list_id_avp = supported_features_avp.avps[1]
feature_list_avp = supported_features_avp.avps[2]
self.assertTrue(isinstance(vendor_id_avp, VendorIdAVP))
self.assertEqual(vendor_id_avp.code, VENDOR_ID_AVP_CODE)
self.assertFalse(vendor_id_avp.is_vendor_id())
self.assertTrue(vendor_id_avp.is_mandatory())
self.assertFalse(vendor_id_avp.is_protected())
self.assertEqual(vendor_id_avp.get_length(), 12)
self.assertIsNone(vendor_id_avp.vendor_id)
self.assertEqual(vendor_id_avp.data.hex(), "000028af")
self.assertEqual(vendor_id_avp.__repr__(), "<Diameter AVP: 266 [Vendor-Id] MANDATORY>")
self.assertTrue(isinstance(feature_list_id_avp, FeatureListIdAVP))
self.assertEqual(feature_list_id_avp.code, FEATURE_LIST_ID_AVP_CODE)
self.assertTrue(feature_list_id_avp.is_vendor_id())
self.assertFalse(feature_list_id_avp.is_mandatory())
self.assertFalse(feature_list_id_avp.is_protected())
self.assertEqual(feature_list_id_avp.get_length(), 16)
self.assertEqual(feature_list_id_avp.vendor_id, VENDOR_ID_3GPP)
self.assertEqual(feature_list_id_avp.data.hex(), "00000001")
self.assertEqual(feature_list_id_avp.__repr__(), "<Diameter AVP: 629 [Feature-List-Id] VENDOR>")
self.assertTrue(isinstance(feature_list_avp, FeatureListAVP))
self.assertEqual(feature_list_avp.code, FEATURE_LIST_AVP_CODE)
self.assertTrue(feature_list_avp.is_vendor_id())
self.assertFalse(feature_list_avp.is_mandatory())
self.assertFalse(feature_list_avp.is_protected())
self.assertEqual(feature_list_avp.get_length(), 16)
self.assertEqual(feature_list_avp.vendor_id, VENDOR_ID_3GPP)
self.assertEqual(feature_list_avp.data.hex(), "dc000201")
self.assertEqual(feature_list_avp.__repr__(), "<Diameter AVP: 630 [Feature-List] VENDOR>")
def test_diameter_avp__load_staticmethod__parsing_feature_list_id_avp_stream(self):
stream = bytes.fromhex("0000027580000010000028af00000001")
avps = DiameterAVP.load(stream)
self.assertTrue(isinstance(avps[0], FeatureListIdAVP))
self.assertEqual(avps[0].code, FEATURE_LIST_ID_AVP_CODE)
self.assertTrue(avps[0].is_vendor_id())
self.assertFalse(avps[0].is_mandatory())
self.assertFalse(avps[0].is_protected())
self.assertEqual(avps[0].get_length(), 16)
self.assertEqual(avps[0].vendor_id, VENDOR_ID_3GPP)
self.assertEqual(avps[0].data, FEATURE_LIST_ID_1)
self.assertIsNone(avps[0].get_padding_length())
self.assertEqual(avps[0].__repr__(), "<Diameter AVP: 629 [Feature-List-Id] VENDOR>")
def test_diameter_avp__load_staticmethod__parsing_feature_list_avp_stream(self):
stream = bytes.fromhex("0000027680000010000028afdc000201")
avps = DiameterAVP.load(stream)
self.assertTrue(isinstance(avps[0], FeatureListAVP))
self.assertEqual(avps[0].code, FEATURE_LIST_AVP_CODE)
self.assertTrue(avps[0].is_vendor_id())
self.assertFalse(avps[0].is_mandatory())
self.assertFalse(avps[0].is_protected())
self.assertEqual(avps[0].get_length(), 16)
self.assertEqual(avps[0].vendor_id, VENDOR_ID_3GPP)
self.assertEqual(avps[0].data, bytes.fromhex("dc000201"))
self.assertIsNone(avps[0].get_padding_length())
self.assertEqual(avps[0].__repr__(), "<Diameter AVP: 630 [Feature-List] VENDOR>")
def test_diameter_avp__load_staticmethod__parsing_terminal_information_avp_stream(self):
stream = bytes.fromhex("00000579c0000038000028af0000057ac000001a000028af333533353835313130303334313700000000057bc000000e000028af30350000")
avps = DiameterAVP.load(stream)
terminal_information_avp = avps[0]
self.assertTrue(isinstance(terminal_information_avp, TerminalInformationAVP))
self.assertEqual(terminal_information_avp.code, TERMINAL_INFORMATION_AVP_CODE)
self.assertTrue(terminal_information_avp.is_vendor_id())
self.assertTrue(terminal_information_avp.is_mandatory())
self.assertFalse(terminal_information_avp.is_protected())
self.assertEqual(terminal_information_avp.get_length(), 56)
self.assertEqual(terminal_information_avp.vendor_id, VENDOR_ID_3GPP)
self.assertEqual(terminal_information_avp.data.hex(), "0000057ac000001a000028af333533353835313130303334313700000000057bc000000e000028af30350000")
self.assertEqual(terminal_information_avp.__repr__(), "<Diameter AVP: 1401 [Terminal-Information] VENDOR, MANDATORY>")
imei_avp = terminal_information_avp.avps[0]
software_version_avp = terminal_information_avp.avps[1]
self.assertTrue(isinstance(imei_avp, ImeiAVP))
self.assertEqual(imei_avp.code, IMEI_AVP_CODE)
self.assertTrue(imei_avp.is_vendor_id())
self.assertTrue(imei_avp.is_mandatory())
self.assertFalse(imei_avp.is_protected())
self.assertEqual(imei_avp.get_length(), 26)
self.assertEqual(imei_avp.vendor_id, VENDOR_ID_3GPP)
self.assertEqual(imei_avp.data.hex(), "3335333538353131303033343137")
self.assertEqual(imei_avp.__repr__(), "<Diameter AVP: 1402 [Imei] VENDOR, MANDATORY>")
self.assertTrue(isinstance(software_version_avp, SoftwareVersionAVP))
self.assertEqual(software_version_avp.code, SOFTWARE_VERSION_AVP_CODE)
self.assertTrue(software_version_avp.is_vendor_id())
self.assertTrue(software_version_avp.is_mandatory())
self.assertFalse(software_version_avp.is_protected())
self.assertEqual(software_version_avp.get_length(), 14)
self.assertEqual(software_version_avp.vendor_id, VENDOR_ID_3GPP)
self.assertEqual(software_version_avp.data.hex(), "3035")
self.assertEqual(software_version_avp.__repr__(), "<Diameter AVP: 1403 [Software-Version] VENDOR, MANDATORY>")
def test_diameter_avp__load_staticmethod__parsing_imei_avp_stream(self):
stream = bytes.fromhex("0000057ac000001a000028af33353335383531313030333431370000")
avps = DiameterAVP.load(stream)
self.assertTrue(isinstance(avps[0], ImeiAVP))
self.assertEqual(avps[0].code, IMEI_AVP_CODE)
self.assertTrue(avps[0].is_vendor_id())
self.assertTrue(avps[0].is_mandatory())
self.assertFalse(avps[0].is_protected())
self.assertEqual(avps[0].get_length(), 26)
self.assertEqual(avps[0].vendor_id, VENDOR_ID_3GPP)
self.assertEqual(avps[0].data, b"35358511003417")
self.assertEqual(avps[0].get_padding_length(), 2)
self.assertEqual(avps[0].__repr__(), "<Diameter AVP: 1402 [Imei] VENDOR, MANDATORY>")
def test_diameter_avp__load_staticmethod__parsing_software_version_avp_stream(self):
stream = bytes.fromhex("0000057bc000000e000028af30350000")
avps = DiameterAVP.load(stream)
self.assertTrue(isinstance(avps[0], SoftwareVersionAVP))
self.assertEqual(avps[0].code, SOFTWARE_VERSION_AVP_CODE)
self.assertTrue(avps[0].is_vendor_id())
self.assertTrue(avps[0].is_mandatory())
self.assertFalse(avps[0].is_protected())
self.assertEqual(avps[0].get_length(), 14)
self.assertEqual(avps[0].vendor_id, VENDOR_ID_3GPP)
self.assertEqual(avps[0].data, b"05")
self.assertEqual(avps[0].get_padding_length(), 2)
self.assertEqual(avps[0].__repr__(), "<Diameter AVP: 1403 [Software-Version] VENDOR, MANDATORY>")
def test_diameter_avp__load_staticmethod__parsing_ulr_flags_avp_stream(self):
stream = bytes.fromhex("0000057dc0000010000028af00000003")
avps = DiameterAVP.load(stream)
self.assertTrue(isinstance(avps[0], UlrFlagsAVP))
self.assertEqual(avps[0].code, ULR_FLAGS_AVP_CODE)
self.assertTrue(avps[0].is_vendor_id())
self.assertTrue(avps[0].is_mandatory())
self.assertFalse(avps[0].is_protected())
self.assertEqual(avps[0].get_length(), 16)
self.assertEqual(avps[0].vendor_id, VENDOR_ID_3GPP)
self.assertEqual(avps[0].data, bytes.fromhex("00000003"))
self.assertIsNone(avps[0].get_padding_length())
self.assertEqual(avps[0].__repr__(), "<Diameter AVP: 1405 [Ulr-Flags] VENDOR, MANDATORY>")
def test_diameter_avp__load_staticmethod__parsing_visited_plmn_id_avp_stream(self):
stream = bytes.fromhex("0000057fc000000f000028af27f45000")
avps = DiameterAVP.load(stream)
self.assertTrue(isinstance(avps[0], VisitedPlmnIdAVP))
self.assertEqual(avps[0].code, VISITED_PLMN_ID_AVP_CODE)
self.assertTrue(avps[0].is_vendor_id())
self.assertTrue(avps[0].is_mandatory())
self.assertFalse(avps[0].is_protected())
self.assertEqual(avps[0].get_length(), 15)
self.assertEqual(avps[0].vendor_id, VENDOR_ID_3GPP)
self.assertEqual(avps[0].data, bytes.fromhex("27f450"))
self.assertEqual(avps[0].get_padding_length(), 1)
self.assertEqual(avps[0].__repr__(), "<Diameter AVP: 1407 [Visited-Plmn-Id] VENDOR, MANDATORY>")
def test_diameter_avp__load_staticmethod__parsing_ue_srvcc_capability_avp_stream(self):
stream = bytes.fromhex("0000064f80000010000028af00000001")
avps = DiameterAVP.load(stream)
self.assertTrue(isinstance(avps[0], UeSrvccCapabilityAVP))
self.assertEqual(avps[0].code, UE_SRVCC_CAPABILITY_AVP_CODE)
self.assertTrue(avps[0].is_vendor_id())
self.assertFalse(avps[0].is_mandatory())
self.assertFalse(avps[0].is_protected())
self.assertEqual(avps[0].get_length(), 16)
self.assertEqual(avps[0].vendor_id, VENDOR_ID_3GPP)
self.assertEqual(avps[0].data, UE_SRVCC_SUPPORTED)
self.assertIsNone(avps[0].get_padding_length())
self.assertEqual(avps[0].__repr__(), "<Diameter AVP: 1615 [Ue-Srvcc-Capability] VENDOR>")
def test_diameter_avp__load_staticmethod__parsing_supported_services_avp_stream(self):
stream = bytes.fromhex("00000c4780000020000028af00000c4880000014000028af000000000000001a")
avps = DiameterAVP.load(stream)
supported_services_avp = avps[0]
self.assertTrue(isinstance(supported_services_avp, SupportedServicesAVP))
self.assertEqual(supported_services_avp.code, SUPPORTED_SERVICES_AVP_CODE)
self.assertTrue(supported_services_avp.is_vendor_id())
self.assertFalse(supported_services_avp.is_mandatory())
self.assertFalse(supported_services_avp.is_protected())
self.assertEqual(supported_services_avp.get_length(), 32)
self.assertEqual(supported_services_avp.vendor_id, VENDOR_ID_3GPP)
self.assertEqual(supported_services_avp.data.hex(), "00000c4880000014000028af000000000000001a")
self.assertEqual(supported_services_avp.__repr__(), "<Diameter AVP: 3143 [Supported-Services] VENDOR>")
supported_monitoring_events_avp = supported_services_avp.avps[0]
self.assertTrue(isinstance(supported_monitoring_events_avp, SupportedMonitoringEventsAVP))
self.assertEqual(supported_monitoring_events_avp.code, SUPPORTED_MONITORING_EVENTS_AVP_CODE)
self.assertTrue(supported_monitoring_events_avp.is_vendor_id())
self.assertFalse(supported_monitoring_events_avp.is_mandatory())
self.assertFalse(supported_monitoring_events_avp.is_protected())
self.assertEqual(supported_monitoring_events_avp.get_length(), 20)
self.assertEqual(supported_monitoring_events_avp.vendor_id, VENDOR_ID_3GPP)
self.assertEqual(supported_monitoring_events_avp.data.hex(), "000000000000001a")
self.assertEqual(supported_monitoring_events_avp.__repr__(), "<Diameter AVP: 3144 [Supported-Monitoring-Events] VENDOR>")
def test_diameter_avp__load_staticmethod__parsing_supported_monitoring_events_avp_stream(self):
stream = bytes.fromhex("00000c4880000014000028af000000000000001a")
avps = DiameterAVP.load(stream)
self.assertTrue(isinstance(avps[0], SupportedMonitoringEventsAVP))
self.assertEqual(avps[0].code, SUPPORTED_MONITORING_EVENTS_AVP_CODE)
self.assertTrue(avps[0].is_vendor_id())
self.assertFalse(avps[0].is_mandatory())
self.assertFalse(avps[0].is_protected())
self.assertEqual(avps[0].get_length(), 20)
self.assertEqual(avps[0].vendor_id, VENDOR_ID_3GPP)
self.assertEqual(avps[0].data, bytes.fromhex("000000000000001a"))
self.assertIsNone(avps[0].get_padding_length())
self.assertEqual(avps[0].__repr__(), "<Diameter AVP: 3144 [Supported-Monitoring-Events] VENDOR>")
def test_diameter_avp__load_staticmethod__parsing_cancellation_type_avp_stream(self):
stream = bytes.fromhex("0000058cc0000010000028af00000004")
avps = DiameterAVP.load(stream)
self.assertTrue(isinstance(avps[0], CancellationTypeAVP))
self.assertEqual(avps[0].code, CANCELLATION_TYPE_AVP_CODE)
self.assertTrue(avps[0].is_vendor_id())
self.assertTrue(avps[0].is_mandatory())
self.assertFalse(avps[0].is_protected())
self.assertEqual(avps[0].get_length(), 16)
self.assertEqual(avps[0].vendor_id, VENDOR_ID_3GPP)
self.assertEqual(avps[0].data, CANCELLATION_TYPE_INITIAL_ATTACH_PROCEDURE)
self.assertIsNone(avps[0].get_padding_length())
self.assertEqual(avps[0].__repr__(), "<Diameter AVP: 1420 [Cancellation-Type] VENDOR, MANDATORY>")
def test_diameter_avp__load_staticmethod__parsing_clr_flags_avp_stream(self):
stream = bytes.fromhex("0000066680000010000028af00000002")
avps = DiameterAVP.load(stream)
self.assertTrue(isinstance(avps[0], ClrFlagsAVP))
self.assertEqual(avps[0].code, CLR_FLAGS_AVP_CODE)
self.assertTrue(avps[0].is_vendor_id())
self.assertFalse(avps[0].is_mandatory())
self.assertFalse(avps[0].is_protected())
self.assertEqual(avps[0].get_length(), 16)
self.assertEqual(avps[0].vendor_id, VENDOR_ID_3GPP)
self.assertEqual(avps[0].data.hex(), "00000002")
self.assertIsNone(avps[0].get_padding_length())
self.assertEqual(avps[0].__repr__(), "<Diameter AVP: 1638 [Clr-Flags] VENDOR>")
class TestSupportedFeaturesAVP(unittest.TestCase):
def test_supported_features_avp__no_value(self):
self.assertRaises(TypeError, SupportedFeaturesAVP)
def test_supported_features_avp__repr_dunder(self):
vendor_id_avp = VendorIdAVP(VENDOR_ID_3GPP)
feature_list_id_avp = FeatureListIdAVP(1)
feature_list_avp = FeatureListAVP(3690988033)
avps = [vendor_id_avp, feature_list_id_avp, feature_list_avp]
avp = SupportedFeaturesAVP(avps)
self.assertEqual(avp.__repr__(), "<Diameter AVP: 628 [Supported-Features] VENDOR>")
def test_supported_features_avp__1(self):
ref = "0000027480000038000028af0000010a4000000c000028af0000027580000010000028af000000010000027680000010000028afdc000201"
vendor_id_avp = VendorIdAVP(VENDOR_ID_3GPP)
feature_list_id_avp = FeatureListIdAVP(1)
feature_list_avp = FeatureListAVP(3690988033)
avps = [vendor_id_avp, feature_list_id_avp, feature_list_avp]
avp = SupportedFeaturesAVP(avps)
self.assertEqual(avp.dump().hex(), ref)
def test_supported_features_avp__2(self):
ref = "0000027480000038000028af0000010a4000000c000028af0000027580000010000028af000000020000027680000010000028af092a0000"
vendor_id_avp = VendorIdAVP(VENDOR_ID_3GPP)
feature_list_id_avp = FeatureListIdAVP(2)
feature_list_avp = FeatureListAVP(153747456)
avps = [vendor_id_avp, feature_list_id_avp, feature_list_avp]
avp = SupportedFeaturesAVP(avps)
self.assertEqual(avp.dump().hex(), ref)
class TestFeatureListIdAVP(unittest.TestCase):
def test_feature_list_id_avp__no_value(self):
self.assertRaises(TypeError, FeatureListIdAVP)
def test_feature_list_id_avp__repr_dunder(self):
avp = FeatureListIdAVP(1)
self.assertEqual(avp.__repr__(), "<Diameter AVP: 629 [Feature-List-Id] VENDOR>")
def test_feature_list_id_avp__1(self):
avp = FeatureListIdAVP(1)
ref = "0000027580000010000028af00000001"
self.assertEqual(avp.dump().hex(), ref)
def test_feature_list_id_avp__2(self):
avp = FeatureListIdAVP(2)
ref = "0000027580000010000028af00000002"
self.assertEqual(avp.dump().hex(), ref)
class TestFeatureListAVP(unittest.TestCase):
def test_feature_list_avp__no_value(self):
self.assertRaises(TypeError, FeatureListAVP)
def test_feature_list_avp__repr_dunder(self):
avp = FeatureListAVP(3690988033)
self.assertEqual(avp.__repr__(), "<Diameter AVP: 630 [Feature-List] VENDOR>")
def test_feature_list_avp__1(self):
avp = FeatureListAVP(3690988033)
ref = "0000027680000010000028afdc000201"
self.assertEqual(avp.dump().hex(), ref)
def test_feature_list_avp__2(self):
avp = FeatureListAVP(153747456)
ref = "0000027680000010000028af092a0000"
self.assertEqual(avp.dump().hex(), ref)
class TestTerminalInformationAVP(unittest.TestCase):
def test_terminal_information_avp__no_value(self):
self.assertRaises(TypeError, TerminalInformationAVP)
def test_terminal_information_avp__repr_dunder(self):
imei_avp = ImeiAVP("35358511003417")
software_version_avp = SoftwareVersionAVP("05")
avps = [imei_avp, software_version_avp]
avp = TerminalInformationAVP(avps)
self.assertEqual(avp.__repr__(), "<Diameter AVP: 1401 [Terminal-Information] VENDOR, MANDATORY>")
def test_terminal_information_avp__1(self):
ref = "00000579c0000038000028af0000057ac000001a000028af333533353835313130303334313700000000057bc000000e000028af30350000"
imei_avp = ImeiAVP("35358511003417")
software_version_avp = SoftwareVersionAVP("05")
avps = [imei_avp, software_version_avp]
avp = TerminalInformationAVP(avps)
self.assertEqual(avp.dump().hex(), ref)
def test_terminal_information_avp__2(self):
ref = "00000579c0000038000028af0000057ac000001b000028af333533393837313030313530383632000000057bc000000f000028af32353500"
imei_avp = ImeiAVP("353987100150862")
software_version_avp = SoftwareVersionAVP("255")
avps = [imei_avp, software_version_avp]
avp = TerminalInformationAVP(avps)
self.assertEqual(avp.dump().hex(), ref)
class TestImeiAVP(unittest.TestCase):
def test_imei_avp__no_value(self):
self.assertRaises(TypeError, ImeiAVP)
def test_imei_avp__repr_dunder(self):
avp = ImeiAVP("35358511003417")
self.assertEqual(avp.__repr__(), "<Diameter AVP: 1402 [Imei] VENDOR, MANDATORY>")
def test_imei_avp__1(self):
avp = ImeiAVP("35358511003417")
ref = "0000057ac000001a000028af33353335383531313030333431370000"
self.assertEqual(avp.dump().hex(), ref)
def test_imei_avp__2(self):
avp = ImeiAVP("353987100150862")
ref = "0000057ac000001b000028af33353339383731303031353038363200"
self.assertEqual(avp.dump().hex(), ref)
def test_imei_avp__3(self):
avp = ImeiAVP("359440080055416")
ref = "0000057ac000001b000028af33353934343030383030353534313600"
self.assertEqual(avp.dump().hex(), ref)
class TestSoftwareVersionAVP(unittest.TestCase):
def test_software_version_avp__no_value(self):
self.assertRaises(TypeError, SoftwareVersionAVP)
def test_software_version_avp__repr_dunder(self):
avp = SoftwareVersionAVP("05")
self.assertEqual(avp.__repr__(), "<Diameter AVP: 1403 [Software-Version] VENDOR, MANDATORY>")
def test_software_version_avp__1(self):
avp = SoftwareVersionAVP("05")
ref = "0000057bc000000e000028af30350000"
self.assertEqual(avp.dump().hex(), ref)
def test_software_version_avp__2(self):
avp = SoftwareVersionAVP("255")
ref = "0000057bc000000f000028af32353500"
self.assertEqual(avp.dump().hex(), ref)
class TestUlrFlagsAVP(unittest.TestCase):
def test_ulr_flags_avp__no_value(self):
self.assertRaises(TypeError, UlrFlagsAVP)
def test_ulr_flags_avp__repr_dunder(self):
avp = UlrFlagsAVP(3)
self.assertEqual(avp.__repr__(), "<Diameter AVP: 1405 [Ulr-Flags] VENDOR, MANDATORY>")
def test_ulr_flags_avp__1(self):
avp = UlrFlagsAVP(3)
ref = "0000057dc0000010000028af00000003"
self.assertEqual(avp.dump().hex(), ref)
def test_ulr_flags_avp__2(self):
avp = UlrFlagsAVP(255)
ref = "0000057dc0000010000028af000000ff"
self.assertEqual(avp.dump().hex(), ref)
class TestVisitedPlmnIdAVP(unittest.TestCase):
def test_visited_plmn_id_avp__no_value(self):
self.assertRaises(TypeError, VisitedPlmnIdAVP)
def test_visited_plmn_id_avp__repr_dunder(self):
avp = VisitedPlmnIdAVP(bytes.fromhex("27f450"))
self.assertEqual(avp.__repr__(), "<Diameter AVP: 1407 [Visited-Plmn-Id] VENDOR, MANDATORY>")
def test_visited_plmn_id_avp__1(self):
avp = VisitedPlmnIdAVP(bytes.fromhex("27f450"))
ref = "0000057fc000000f000028af27f45000"
self.assertEqual(avp.dump().hex(), ref)
class TestUeSrvccCapabilityAVP(unittest.TestCase):
def test_ue_srvcc_capability_avp__no_value(self):
self.assertRaises(TypeError, UeSrvccCapabilityAVP)
def test_ue_srvcc_capability_avp__repr_dunder(self):
avp = UeSrvccCapabilityAVP(UE_SRVCC_NOT_SUPPORTED)
self.assertEqual(avp.__repr__(), "<Diameter AVP: 1615 [Ue-Srvcc-Capability] VENDOR>")
def test_ue_srvcc_capability_avp__ue_srvcc_not_supported(self):
avp = UeSrvccCapabilityAVP(UE_SRVCC_NOT_SUPPORTED)
ref = "0000064f80000010000028af00000000"
self.assertEqual(avp.dump().hex(), ref)
def test_ue_srvcc_capability_avp__ue_srvcc_supported(self):
avp = UeSrvccCapabilityAVP(UE_SRVCC_SUPPORTED)
ref = "0000064f80000010000028af00000001"
self.assertEqual(avp.dump().hex(), ref)
class TestSupportedServicesAVP(unittest.TestCase):
def test_supported_services_avp__no_value(self):
self.assertRaises(TypeError, SupportedServicesAVP)
def test_supported_services_avp__repr_dunder(self):
supported_monitoring_events_avp = SupportedMonitoringEventsAVP(bytes.fromhex("000000000000001a"))
avps = [supported_monitoring_events_avp]
avp = SupportedServicesAVP(avps)
self.assertEqual(avp.__repr__(), "<Diameter AVP: 3143 [Supported-Services] VENDOR>")
def test_supported_services_avp__1(self):
ref = "00000c4780000020000028af00000c4880000014000028af000000000000001a"
supported_monitoring_events_avp = SupportedMonitoringEventsAVP(bytes.fromhex("000000000000001a"))
avps = [supported_monitoring_events_avp]
avp = SupportedServicesAVP(avps)
self.assertEqual(avp.dump().hex(), ref)
class TestSupportedMonitoringEventsAVP(unittest.TestCase):
def test_supported_monitoring_events_avp__no_value(self):
self.assertRaises(TypeError, SupportedMonitoringEventsAVP)
def test_supported_monitoring_events_avp__repr_dunder(self):
avp = SupportedMonitoringEventsAVP(bytes.fromhex("000000000000001a"))
self.assertEqual(avp.__repr__(), "<Diameter AVP: 3144 [Supported-Monitoring-Events] VENDOR>")
def test_supported_monitoring_events_avp__1(self):
avp = SupportedMonitoringEventsAVP(bytes.fromhex("000000000000001a"))
ref = "00000c4880000014000028af000000000000001a"
self.assertEqual(avp.dump().hex(), ref)
class TestCancellationTypeAVP(unittest.TestCase):
def test_cancellation_type_avp__no_value(self):
self.assertRaises(TypeError, CancellationTypeAVP)
def test_cancellation_type_avp__repr_dunder(self):
avp = CancellationTypeAVP(CANCELLATION_TYPE_MME_UPDATE_PROCEDURE)
self.assertEqual(avp.__repr__(), "<Diameter AVP: 1420 [Cancellation-Type] VENDOR, MANDATORY>")
def test_cancellation_type_avp__mme_update_procedure(self):
avp = CancellationTypeAVP(CANCELLATION_TYPE_MME_UPDATE_PROCEDURE)
ref = "0000058cc0000010000028af00000000"
self.assertEqual(avp.dump().hex(), ref)
def test_cancellation_type_avp__sgsn_update_procedure(self):
avp = CancellationTypeAVP(CANCELLATION_TYPE_SGSN_UPDATE_PROCEDURE)
ref = "0000058cc0000010000028af00000001"
self.assertEqual(avp.dump().hex(), ref)
def test_cancellation_type_avp__subscription_withdrawal(self):
avp = CancellationTypeAVP(CANCELLATION_TYPE_SUBSCRIPTION_WITHDRAWAL)
ref = "0000058cc0000010000028af00000002"
self.assertEqual(avp.dump().hex(), ref)
def test_cancellation_type_avp__update_procedure_iwf(self):
avp = CancellationTypeAVP(CANCELLATION_TYPE_UPDATE_PROCEDURE_IWF)
ref = "0000058cc0000010000028af00000003"
self.assertEqual(avp.dump().hex(), ref)
def test_cancellation_type_avp__initial_attach_procedure(self):
avp = CancellationTypeAVP(CANCELLATION_TYPE_INITIAL_ATTACH_PROCEDURE)
ref = "0000058cc0000010000028af00000004"
self.assertEqual(avp.dump().hex(), ref)
class TestClrFlagsAVP(unittest.TestCase):
def test_clr_flags_avp__no_value(self):
self.assertRaises(TypeError, ClrFlagsAVP)
def test_clr_flags_avp__repr_dunder(self):
avp = ClrFlagsAVP(2)
self.assertEqual(avp.__repr__(), "<Diameter AVP: 1638 [Clr-Flags] VENDOR>")
def test_clr_flags_avp__1(self):
avp = ClrFlagsAVP(2)
ref = "0000066680000010000028af00000002"
self.assertEqual(avp.dump().hex(), ref)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'zhj'
"""
requests模块:
可以看作是urllib打包封装的模块,作用是一样的但是有一点不同:
1.urllib需要先构建,再发请求
2.requests可以边构建边发送
1.安装requests模块:
pip install requests
pip install -i https://pypi.douban.com/simple requests
2.常用方法:
1.get(): 向网站发起请求,并获得响应
用法:
response = request.get(url,headers=headers)
2.response的属性:
1.response.text: 获取响应的字符串
一般默认返回的编码是:ISO-8859-1
需要手动指定编码 : response.encoding = "utf-8"
2.response.content:获取响应的bytes
应用场景: 爬去图片、音频等非结构化数据
"""
import requests
url = "http://www.baidu.com"
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
}
response = requests.get(url,headers=headers)
response.encoding = "utf-8"
print(response.text)
print(response.content)
print(response.status_code)
|
class TestWeibo:
def test_case1_01(self, open_weibo):
print("查看微博热搜")
def test_case1_02(self, open_weibo):
print("查看微博范冰冰")
|
###########################################################
# #
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html) #
# #
# info: you must host message.txt and add its address to #
# message_xml_url below, you can write your news in the #
# xml and it will show when called by this #
# #
##################Author:Les][smor#########################
import os
import urlparse
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
import koding
from koding import Download
from koding import route, Run
addon_id = xbmcaddon.Addon().getAddonInfo('id')
ownAddon = xbmcaddon.Addon(id=addon_id)
message_xml_url = ownAddon.getSetting('message_xml_url')
@route(mode="dialog_news")
def Dialog_News():
koding_test = message_xml_url
mytest = ''
if 'http' in koding_test:
import urllib2
req = urllib2.Request(koding_test)
req.add_header('User-Agent', 'klopp')
response = urllib2.urlopen(req)
mytext = response.read()
response.close()
else:
mytext = koding.Text_File(path=koding_test, mode='r')
main_text = mytext
my_buttons = ['Close']
my_choice = koding.Custom_Dialog(main_content=main_text,pos='center',size='900x600',buttons=my_buttons,transparency=90,highlight_color='yellow',header='Latest News and Updates')
if my_choice ==0:
root()
|
from .test.fixtures import app, client
def test_when_create_app(app):
assert app
def test_app_hello_world(client):
with client:
res = client.get('/')
assert res.status_code == 200
assert res.is_json
assert res.json == 'hello world'
|
#!/usr/bin/python
import unittest
import schemaobject
class TestSchema(unittest.TestCase):
def setUp(self):
self.db = schemaobject.SchemaObject(self.database_url + 'sakila')
self.db2 = schemaobject.SchemaObject(self.database_url)
def test_database_version(self):
assert self.db.version == "5.1.30"
def test_port(self):
assert self.db.port == 3306
def test_host(self):
assert self.db.host == "localhost"
def test_user(self):
assert self.db.user == "mitch"
def test_selected_databse(self):
assert self.db.selected.name == "sakila"
def test_no_selected_databse(self):
assert self.db2.selected == None
def test_database_count_with_selected_databse(self):
assert len(self.db.databases) == 1
if __name__ == "__main__":
from test_all import get_database_url
TestSchema.database_url = get_database_url()
unittest.main()
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from sqlalchemy_utils import create_database, database_exists, drop_database
import pymysql
import connexion
pymysql.install_as_MySQLdb()
import MySQLdb
# Conectando a DB e criando a database, ou deletar e criar caso ela já exista
dburl = 'mysql://{0}:{1}@{2}/student'.format("root", "example", "mysql")
if not database_exists(dburl):
create_database(dburl)
else:
drop_database(dburl)
create_database(dburl)
# criando um connexion application e associando a instancia do Flask
connex_app = connexion.App(__name__)
app = connex_app.app
#trazendo os dados para a DB
app.config['SQLALCHEMY_DATABASE_URI'] = dburl
app.config["SQLALCHEMY_ECHO"] = True
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
# Iniciando o Marshmallow para serialização
ma = Marshmallow(app)
|
#!/usr/bin/env python3
from pywarpx import picmi
# Physical constants
c = picmi.constants.c
q_e = picmi.constants.q_e
# Number of time steps
max_steps = 100
# Number of cells
nz = 256
# Physical domain
zmin = -56e-06
zmax = 12e-06
# Domain decomposition
max_grid_size = 64
blocking_factor = 32
# Create grid
grid = picmi.Cartesian1DGrid(
number_of_cells = [nz],
lower_bound = [zmin],
upper_bound = [zmax],
lower_boundary_conditions = ['dirichlet'],
upper_boundary_conditions = ['dirichlet'],
lower_boundary_conditions_particles = ['absorbing'],
upper_boundary_conditions_particles = ['absorbing'],
moving_window_velocity = [c],
warpx_max_grid_size = max_grid_size,
warpx_blocking_factor = blocking_factor)
# Particles: plasma electrons
plasma_density = 2e23
plasma_xmin = None
plasma_ymin = None
plasma_zmin = 10e-06
plasma_xmax = None
plasma_ymax = None
plasma_zmax = None
uniform_distribution = picmi.UniformDistribution(
density = plasma_density,
lower_bound = [plasma_xmin, plasma_ymin, plasma_zmin],
upper_bound = [plasma_xmax, plasma_ymax, plasma_zmax],
fill_in = True)
electrons = picmi.Species(
particle_type = 'electron',
name = 'electrons',
initial_distribution = uniform_distribution)
# Laser
e_max = 16e12
position_z = 9e-06
profile_t_peak = 30.e-15
profile_focal_distance = 100e-06
laser = picmi.GaussianLaser(
wavelength = 0.8e-06,
waist = 5e-06,
duration = 15e-15,
focal_position = [0, 0, profile_focal_distance + position_z],
centroid_position = [0, 0, position_z - c*profile_t_peak],
propagation_direction = [0, 0, 1],
polarization_direction = [0, 1, 0],
E0 = e_max,
fill_in = False)
laser_antenna = picmi.LaserAntenna(
position = [0., 0., position_z],
normal_vector = [0, 0, 1])
# Electromagnetic solver
solver = picmi.ElectromagneticSolver(
grid = grid,
method = 'Yee',
cfl = 0.9,
divE_cleaning = 0)
# Diagnostics
diag_field_list = ['B', 'E', 'J', 'rho']
field_diag = picmi.FieldDiagnostic(
name = 'diag1',
grid = grid,
period = 100,
data_list = diag_field_list,
write_dir = '.',
warpx_file_prefix = 'Python_LaserAcceleration_1d_plt')
# Set up simulation
sim = picmi.Simulation(
solver = solver,
max_steps = max_steps,
verbose = 1,
particle_shape = 'cubic',
warpx_use_filter = 1,
warpx_serialize_ics = 1,
warpx_do_dynamic_scheduling = 0)
# Add plasma electrons
sim.add_species(
electrons,
layout = picmi.GriddedLayout(grid = grid, n_macroparticle_per_cell = [10]))
# Add laser
sim.add_laser(
laser,
injection_method = laser_antenna)
# Add diagnostics
sim.add_diagnostic(field_diag)
# Write input file that can be used to run with the compiled version
sim.write_input_file(file_name = 'inputs_1d_picmi')
# Initialize inputs and WarpX instance
sim.initialize_inputs()
sim.initialize_warpx()
# Advance simulation until last time step
sim.step(max_steps)
|
'''
Tool 0: Run in serial
'''
import pylab as pl
import numpy as np
import sciris as sc
from model import run_sir
if __name__ == '__main__':
# Initialization
n_runs = 10
seeds = np.arange(n_runs)
betas = np.linspace(0.5e-4, 5e-4, n_runs)
# Run
sc.tic()
sirlist = []
for r in range(n_runs):
sir = run_sir(seed=seeds[r], beta=betas[r])
sirlist.append(sir)
sc.toc()
# Plot
pl.figure()
colors = sc.vectocolor(betas, cmap='turbo')
for r in range(n_runs):
pl.plot(sirlist[0].tvec, sirlist[r].I, c=colors[r], label=f'beta={betas[r]:0.2g}')
pl.legend()
pl.show()
pl.savefig('example-serial.png', dpi=300)
|
from netapp.connection import NaConnection
from treemigrate_migrations import TreemigrateMigrations # 3 properties
from treemigrate_status_info import TreemigrateStatusInfo # 15 properties
from treemigrate_fh_map_memory import TreemigrateFhMapMemory # 3 properties
from treemigrate_files_estimate import TreemigrateFilesEstimate # 3 properties
class TreemigrateConnection(NaConnection):
def treemigrate_abort(self, migration_id):
"""
Abort an in-progress data migration identified by the
migration-id passed in. Only migrations that are in the
Initialization or Copy state can be aborted. If the migration
is in the Copy State and has not yet cut over file service for
the migrated data to the destination system then the migration
will be stopped and any files copied to the destination will be
removed. If the migration is not in the Initialization or Copy
State then by default the command will return without aborting
the migration.
An aborted migration cannot be restarted. If it is required to
stop a migration that is in the Proxy state then reduce the
proxy inactivity timeout using the treemigrate-modify API.
:param migration_id: The migration identifier of the migration to abort.
"""
return self.request( "treemigrate-abort", {
'migration_id': [ migration_id, 'migration-id', [ int, 'None' ], False ],
}, {
} )
def treemigrate_list_info_iter_end(self, tag):
"""
Terminate a list iteration and clean up any saved info.
:param tag: Tag from a previous treemigrate-list-info-iter-start.
"""
return self.request( "treemigrate-list-info-iter-end", {
'tag': tag,
}, {
} )
def treemigrate_start(self, destination_path, source_path, leave_source=None, proxy_inactivity_timeout=None, leave_destination=None, expected_files_number=None, priority_level=None, test_run=None):
"""
Start the data migration process. The source path
and destination path must be specified. The migrated data
on the source storage system is removed during the Proxy Cleanup State.
The treemigrate-start API runs in the background copying the
data from the source to the destination system, doing the
cut over, and then proxying client requests from the source
system to the destination system for the migrated data. The
proxy will continue to run until proxy-inactivity-timeout
minutes after there are no client requests to proxy from
the source to the destination system.
:param destination_path: The destination system and path to a directory or file where
the data specified in the source-path will be migrated to.
The destination system and path argument should be separated
by a colon. Both of these arguments together are referred to
as the destination. If the last component of the destination
path name exists on the destination system then it must be a
directory and must be exported for use by the root user
account on the source system. The data to be migrated from
the source system to the destination system will be placed
inside this directory on the destination system. The source
system will mount this directory from the destination system.
If the last component of the destination path name does not
exist on the destination system then the directory or file
being migrated as specified with the source path name will
assume the name of the last component of the destination path
name. For this case then the parent directory of the last
component of the destination path name must be exported for
use by the root user account on the source system. The source
system will mount the parent directory of the last component
of the destination path name from the destination system.
Ex: toaster:/vol/vol162/dir10/subdir19
:param source_path: The path to a directory or file that will be migrated to
the destination. Paths are specified by their fully-qualified
path. The directory or file must exist on the source
system, that is the system running the treemigrate-start command.
:param leave_source: When set it will make the treemigrate-start call leave the
files on the source after the migration is complete. Otherwise
the files that were migrated will be removed from the source.
The default value is false.
:param proxy_inactivity_timeout: The number of minutes after which no requests have been
proxied then the migration moves from the Proxy state to the
Proxy Cleanup state.
A timeout value of 0 indicates that the migration should
go to the Proxy Cleanup state immediately.
The default value is 30 minutes.
:param leave_destination: When set files already copied on the dstination will not be
cleared if the treemigration fails due to any reason.
The default value is false.
:param expected_files_number: Expected number of files which will be migrated.
Memory allocation for File Handle map will be done for
this number of files before starting the data copy.
Default value is 0.
:param priority_level: The priority level of the copy phase of the migration.
Possible values are: "fast", "medium", or "slow".
The default priority level is "medium".
:param test_run: When set it will make the treemigrate-start call a test run;
that is, it will run all the pre-migration checks, but will
not migrate the data. Use the treemigrate status APIs to
get the status of the test run and find out how many
directories, files, and bytes would be migrated had this
call been performed without this flag set.
The default value is false.
"""
return self.request( "treemigrate-start", {
'leave_source': [ leave_source, 'leave-source', [ bool, 'None' ], False ],
'proxy_inactivity_timeout': [ proxy_inactivity_timeout, 'proxy-inactivity-timeout', [ int, 'None' ], False ],
'leave_destination': [ leave_destination, 'leave-destination', [ bool, 'None' ], False ],
'destination_path': [ destination_path, 'destination-path', [ basestring, 'None' ], False ],
'expected_files_number': [ expected_files_number, 'expected-files-number', [ int, 'None' ], False ],
'priority_level': [ priority_level, 'priority-level', [ basestring, 'None' ], False ],
'test_run': [ test_run, 'test-run', [ bool, 'None' ], False ],
'source_path': [ source_path, 'source-path', [ basestring, 'None' ], False ],
}, {
'migration-id': [ int, False ],
} )
def treemigrate_limits(self):
"""
Display limits for treemigrations
"""
return self.request( "treemigrate-limits", {
}, {
'treemigrate-fh-map-memory': [ TreemigrateFhMapMemory, False ],
'treemigrate-migrations': [ TreemigrateMigrations, False ],
'treemigrate-files-estimate': [ TreemigrateFilesEstimate, False ],
} )
def treemigrate_list_info_iter_start(self, active=None, migration_id=None):
"""
Reports the status of one or more data migrations that are
currently in progress or have recently completed. If a
migration-id is specified then only the status of that data
migration is reported.
:param active: When set will limit the results to only returning
information on active migrations, that is migrations
that are in the Copy or Proxy state. This flag is
ignored if the migrate-id is specified.
The default value false.
:param migration_id: When the migration identifier is specified then only
information on that migration will be returned.
"""
return self.request( "treemigrate-list-info-iter-start", {
'active': [ active, 'active', [ bool, 'None' ], False ],
'migration_id': [ migration_id, 'migration-id', [ int, 'None' ], False ],
}, {
'records': [ int, False ],
'tag': [ basestring, False ],
} )
def treemigrate_status_clear(self, migration_id=None):
"""
Clear completed migrations from the system so that the
migration does not show up when the migration status is
listed. If a migration-id is specified then only that
entry is cleared provided the migration is complete.
A migration is considered complete if it is in the
"Migration Complete", "Migration Aborted", or
"Migration Failed" state.
:param migration_id: When the migration identifier is specified then only
that migration will be cleared.
"""
return self.request( "treemigrate-status-clear", {
'migration_id': [ migration_id, 'migration-id', [ int, 'None' ], False ],
}, {
} )
def treemigrate_modify(self, migration_id, proxy_inactivity_timeout=None):
"""
Modify the parameters of a migration that is in the Copy
or Proxy state.
:param migration_id: The migration identifier of the migration to modify.
:param proxy_inactivity_timeout: The number of minutes after which no requests have been
proxied then the migration moves from the Proxy state to the
Proxy Cleanup state.
A timeout value of 0 indicates that the migration should
go to the Proxy Cleanup state immediately.
The default value is 30 minutes.
"""
return self.request( "treemigrate-modify", {
'proxy_inactivity_timeout': [ proxy_inactivity_timeout, 'proxy-inactivity-timeout', [ int, 'None' ], False ],
'migration_id': [ migration_id, 'migration-id', [ int, 'None' ], False ],
}, {
} )
def treemigrate_list_info_iter_next(self, tag, maximum):
"""
Returns items from a previous call to
treemigrate-list-info-iter-start.
:param tag: Tag from a previous treemigrate-list-info-iter-start.
:param maximum: The maximum number of entries to retrieve.
"""
return self.request( "treemigrate-list-info-iter-next", {
'tag': tag,
'maximum': [ maximum, 'maximum', [ int, 'None' ], False ],
}, {
'records': [ int, False ],
'treemigrate-status': [ TreemigrateStatusInfo, True ],
} )
|
import torch
from torch.utils.data import Dataset
from PIL import Image
import os
import glob
class UCMayo4(Dataset):
"""Ulcerative Colitis dataset grouped according to Endoscopic Mayo scoring system"""
def __init__(self, root_dir, transform=None):
"""
root_dir (string): Path to parent folder where class folders are located.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.class_names = []
self.samples = []
self.transform = transform
subFolders = glob.glob(os.path.join(root_dir, "*"))
subFolders.sort()
for folder in subFolders:
className = folder.split("/")[-1]
self.class_names.append(className)
self.number_of_class = len(self.class_names)
for folder in subFolders:
className = folder.split("/")[-1]
image_paths = glob.glob(os.path.join(folder, "*"))
for image_path in image_paths:
image = Image.open(image_path)
image.load()
self.samples.append((image, self.class_names.index(className)))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample_image = self.samples[idx][0].copy()
if self.transform:
sample_image = self.transform(sample_image)
return (sample_image, self.samples[idx][1])
class UCMayo4Remission(Dataset):
"""
Ulcerative Colitis dataset grouped according to Endoscopic Mayo scoring system
According to the remission list given in constructor, it has binary output for annotation.
"""
def __init__(self, root_dir, remission=[2, 3], transform=None):
"""
Args:
root_dir (string): Path to parent folder where class folders are located.
resmission (list): Mayo scores (as int) that will be regarded as non-remission state.
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.number_of_class = 2
self.class_names = []
self.samples = []
self.transform = transform
subFolders = glob.glob(os.path.join(root_dir, "*"))
subFolders.sort()
for folder in subFolders:
className = folder.split("/")[-1]
self.class_names.append(className)
for folder in subFolders:
className = folder.split("/")[-1]
image_paths = glob.glob(os.path.join(folder, "*"))
for image_path in image_paths:
image = Image.open(image_path)
image.load()
label = 0
if self.class_names.index(className) in remission:
label = 1
self.samples.append((image, label))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample_image = self.samples[idx][0].copy()
# TODO since all images are loaded at constructor, transform can be moved there too
if self.transform:
sample_image = self.transform(sample_image)
return (sample_image, self.samples[idx][1])
|
import decneo
from decneo.commonFunctions import *
from decneo import geneLists
import DigitalCellSorter
wfile = 'DCS output/PanglaoDBendothelialAllv0.h5'
cwfile = wfile[:-3] + '_combined.h5'
conffile = 'data/allConfidencePanglaoDB.h5'
if __name__ == '__main__':
# Annotate all datasets of PanglaoDB
if False:
allFiles = np.loadtxt('PanglaoDBfilesSorted.txt', delimiter='\t', dtype=str)
# For large datasets, where most cells are low quality according to Alona
useQCcellsOnly = True
confEC = []
confEpC = []
confFB = []
for file in allFiles:
if (len(file) > 16) and (file[-16:] == '.sparse.RData.h5'):
print('\n\n\nProcessing', file, flush=True)
batch = file[:-16]
p = batch.split('_')
SRA, SRS = p[0], 'notused' if len(p)==1 else p[1]
saveDir = os.path.join('DCS output', 'PanglaoDB', batch, '')
if os.path.isfile(os.path.join(saveDir, 'ColormapForCellTypes.txt')):
print('Directory already exists (%s). Not overriding it' % batch)
continue
DCS = DigitalCellSorter.DigitalCellSorter(dataName=batch, precutQC=True, nClusters=12, doBatchCorrection=False, species='Human', mitochondrialGenesCutoffQC=1.75, saveDir=saveDir, geneListFileName = os.path.join('geneLists', 'fibro_endo_epi_v2_Human.xlsx'), updateConversionDictFile=False, layout='PCA')
# Annotate cells of batch
if False:
try:
df_expr = pd.read_hdf(os.path.join(RDataDirName, file), key='df')
if useQCcellsOnly:
df_expr = df_expr[pd.read_csv(os.path.join(MetadataDirName, 'PanglaoDB', 'data', 'sample_clusters', '%s%s.seurat_clusters.txt' % (SRA, '_' + SRS if SRS!='notused' else '')), delimiter=' ', index_col=0, header=None)[1].index.values]
df_expr.index = pd.Series(df_expr.index.values).replace(Mouse_to_Human_HUGO_conversion).values
df_expr = pd.concat([df_expr], keys=[batch], names=['batch'], axis=1)
df_expr.columns.names = ['batch', 'cell']
df_expr = df_expr.loc[~df_expr.index.duplicated(keep='first')]
df_expr = df_expr.T.loc[~df_expr.T.index.duplicated(keep='first')].T
df_expr0 = pd.read_hdf(os.path.join('data', 'Adult-Heart1_rmbatchdge.txt.gz.h5'), key='df')
df_expr0 = df_expr0.loc[~df_expr0.index.duplicated(keep='first')]
df_expr0 = df_expr0.T.loc[~df_expr0.T.index.duplicated(keep='first')].T
df_expr = pd.concat([df_expr, df_expr0], axis=1, sort=False)
df_expr = df_expr.T.loc[~df_expr.T.index.duplicated(keep='first')].T
DCS.excludedFromQC = df_expr.xs(key='AdultHeart_1', level='batch', axis=1, drop_level=False).columns
DCS.prepare(df_expr)
DCS.process()
DCS.annotate()
DCS.makeAnnotationResultsMatrixPlot()
DCS.makeMarkerExpressionPlot()
DCS.makeStackedBarplot()
DCS.makeProjectionPlotAnnotated()
except Exception as exception:
print('Error while annotating (%s)' % batch, exception)
# Collect DCS endothelial cells into one hdf file
if False:
try:
cells = DCS.getCells(celltype='Endothelial cells')
if not cells is None:
df_expr = DCS.getExprOfCells(cells)
df_expr = df_expr.xs(key=batch, axis=1, level='batch', drop_level=False)
df_expr = df_expr.loc[df_expr.sum(axis=1) > 0.]
columns = pd.MultiIndex.from_arrays([df_expr.columns.get_level_values('batch'), df_expr.columns.get_level_values('cell')])
df_expr = pd.DataFrame(data=df_expr.values, index=df_expr.index, columns=columns)
print('\tRemoved mixed-in cells and all-zero genes:', df_expr.shape)
print('\tSaving to hdf:', SRA, SRS)
df_expr.to_hdf(wfile, key=batch, mode='a', complevel=4, complib='zlib')
except Exception as exception:
print('Error while collecting Endothelial cells (%s)' % batch, exception)
# Collect confidence EC
if False:
try:
conf = pd.read_excel(os.path.join(DCS.saveDir, '%s_annotation.xlsx' % batch), sheet_name='z-scores', index_col='cluster', header=0)
se = pd.read_hdf(DCS.fileHDFpath, key='df_clusters')['cluster'].xs(key=batch, level='batch', drop_level=False)
seTemp = se.replace(conf['Endothelial cells'])
confEC.append(seTemp[seTemp > 0.])
except Exception as exception:
print('Error while annotating (%s)' % batch, exception)
# Collect confidence EpC
if False:
try:
conf = pd.read_excel(os.path.join(DCS.saveDir, '%s_annotation.xlsx' % batch), sheet_name='z-scores', index_col='cluster', header=0)
se = pd.read_hdf(DCS.fileHDFpath, key='df_clusters')['cluster'].xs(key=batch, level='batch', drop_level=False)
seTemp = se.replace(conf['Epithelial cells'])
confEpC.append(seTemp[seTemp > 0.])
except Exception as exception:
print('Error while annotating (%s)' % batch, exception)
# Collect confidence FB
if False:
try:
conf = pd.read_excel(os.path.join(DCS.saveDir, '%s_annotation.xlsx' % batch), sheet_name='z-scores', index_col='cluster', header=0)
se = pd.read_hdf(DCS.fileHDFpath, key='df_clusters')['cluster'].xs(key=batch, level='batch', drop_level=False)
seTemp = se.replace(conf['Fibroblasts'])
confFB.append(seTemp[seTemp > 0.])
except Exception as exception:
print('Error while annotating (%s)' % batch, exception)
if len(confEC) > 0:
confEC = pd.concat(confEC, axis=0, sort=False)
confEC.to_hdf(conffile, key='Endothelial')
print(confEC)
confEpC = pd.concat(confEpC, axis=0, sort=False)
confEpC.to_hdf(conffile, key='Epithelial')
print(confEpC)
confFB = pd.concat(confFB, axis=0, sort=False)
confFB.to_hdf(conffile, key='Fibroblasts')
print(confFB)
# Combine samples to one DataFrame
if False:
keys = KeysOfStore(wfile)
print('Reading %s keys of %s' % (len(keys), wfile), flush=True)
dfs = []
counts = 0
for ikey, key in enumerate(keys):
key = key[1:]
print('\tReading:', ikey, 'of', len(keys), key, 'Count:', counts, flush=True)
df_temp = pd.read_hdf(wfile, key=key).astype('float32')
if df_temp.shape[1] >= 20:
dfs.append(df_temp)
print(df_temp.shape, df_temp.dtypes[0], flush=True)
counts += df_temp.shape[1]
print('Total count:', counts, flush=True)
print('Combining samples', flush=True)
dfs = pd.concat(dfs, axis=1, sort=False).fillna(0.)
print(dfs, dfs.dtypes[0], flush=True)
dfs = dfs.loc[(dfs > 0).sum(axis=1) >= 100]
print(dfs, dfs.dtypes[0], flush=True)
dfs.to_hdf(cwfile, key='df', mode='a', complevel=4, complib='zlib')
# Meta-analysis
if True:
for species in ['Homo sapiens', 'Mus musculus'][:]:
DCS = DigitalCellSorter.DigitalCellSorter(dataName='PanglaoDB_EC_DCS', verbose=2)
# Process DCS-annotated EC
if False:
DCS.saveDir = 'results/DCS output/PanglaoDB_EC_DCS/%s/'%species
# Un-cut EC clustering and analysis
if True:
df = pd.read_hdf(cwfile, key='df')
df = df[df.columns[np.isin(df.columns.get_level_values('batch').values, df_anno[df_anno['Species'] == species].index)]]
df = df.T[df.sum(axis=0) > 0].T
df = df[df.sum(axis=1) > 0]
print(df)
DCS.prepare(df.astype(float))
del df
DCS.project()
DCS.nClusters = 15
DCS.cluster()
# Select best endothelial cells
if True:
df_projection = pd.read_hdf(DCS.fileHDFpath, key='df_projection')
valuesEndo = pd.read_hdf('data/allConfidencePanglaoDB.h5', key='Endothelial').reindex(df_projection.columns)
valuesEpi = pd.read_hdf('data/allConfidencePanglaoDB.h5', key='Epithelial').reindex(df_projection.columns)
valuesFibro = pd.read_hdf('data/allConfidencePanglaoDB.h5', key='Fibroblasts').reindex(df_projection.columns)
cutoff = 0.75
vEndo = ((valuesEndo.fillna(0.).values >= cutoff) & (valuesEpi.fillna(0.).values == 0.) & (valuesFibro.fillna(0.).values == 0.)).astype(bool)
print(len(vEndo), vEndo.sum(), vEndo.sum()/len(vEndo))
df_projection = df_projection[df_projection.columns[vEndo]]
print(df_projection.shape)
DCS.saveDir = 'results/DCS output/PanglaoDB_EC_DCS/%s/cut/'%species
# Cut EC clustering and analysis
if True:
df = pd.read_hdf(cwfile, key='df')[df_projection.columns]
df = df.T[df.sum(axis=0) > 0].T
df = df[df.sum(axis=1) > 0]
print(df)
DCS.prepare(df.astype(float))
del df
DCS.project()
DCS.nClusters = 15
DCS.cluster()
DCS.makeProjectionPlotByClusters(suffix='by Clusters')
# Make plots of filtered EC
if True:
DCS.saveDir='results/DCS output/PanglaoDB_EC_DCS/%s/cut/'%species
df_projection = pd.read_hdf(DCS.fileHDFpath, key='df_projection')
# Get PnaglaoDB metadata (df_anno)
if True:
fields = ['Tissue origin of the sample', 'Species', 'Number of raw cells', 'Fraction of cells passed QC', 'Tissue origin of the sample', 'Sequencing instrument', 'scRNA-seq protocol', 'Median number of expressed genes per cell', 'Is the sample from primary adult tissue?', 'Is the sample from a tumor? (1 true otherwise false)', 'Is the sample from a cell line?']
df_anno = getPanglaoDBAnnotationsSummaryDf(MetadataDirName).droplevel('Cluster index', axis=0)[fields]
df_anno.columns = ['Tissue', 'Species', 'Sequenced cells', 'QC-passed fraction', 'Tissue detailed', 'Sequencing instrument', 'scRNA-seq protocol', 'medianOfGenesPerCell', 'isPrimaryAdult', 'isTumor', 'isCellLine']
df_anno = df_anno.loc[~df_anno.index.duplicated(keep='first')]
df_anno.index = ['%s%s' % (item[0], '_' + item[1] if item[1]!='notused' else '') for item in df_anno.index]
tissueDictPath = os.path.join(os.path.dirname(decneo.__file__), 'geneLists', 'PanglaoDBtissues.xlsx')
tissueDict = pd.read_excel(tissueDictPath, index_col='PanglaoDB tissue')['Starred'].str.replace('?', '')
tissueDict[tissueDict!=tissueDict] = tissueDict.index.values[tissueDict!=tissueDict]
df_anno['Tissue'] = df_anno['Tissue'].replace(tissueDict)
# Sankey plots
if False:
seTissue = pd.Series(index=df_projection.columns, data=df_anno['Tissue'].reindex(df_projection.columns.get_level_values('batch')).fillna(0.).values)
seTissueDetailed = pd.Series(index=df_projection.columns, data=df_anno['Tissue detailed'].reindex(df_projection.columns.get_level_values('batch')).fillna(0.).values)
seBatch = pd.Series(index=df_projection.columns, data=df_projection.columns.get_level_values('batch').values)
DCS.makeSankeyDiagram(DCS.getCountsDataframe(seBatch, seTissue), attemptSavingHTML=True, nameAppend=' Sankey batch-tissue')
DCS.makeSankeyDiagram(DCS.getCountsDataframe(seBatch, seTissueDetailed), attemptSavingHTML=True, nameAppend=' Sankey batch-tissueDet')
DCS.makeSankeyDiagram(DCS.getCountsDataframe(seTissueDetailed, seTissue), attemptSavingHTML=True, nameAppend=' Sankey tissueDet-tissue')
seCluster = pd.read_hdf(DCS.fileHDFpath, key='df_clusters').reindex(df_projection.columns)['cluster']
DCS.makeSankeyDiagram(DCS.getCountsDataframe(seCluster, seTissue), attemptSavingHTML=True, nameAppend=' Sankey cluster-tissue')
DCS.makeSankeyDiagram(DCS.getCountsDataframe(seCluster, seBatch), attemptSavingHTML=True, nameAppend=' Sankey cluster-batch')
seAnno = pd.read_hdf('data/seAnnotaionPanglaoDBsubsetEC.h5', key='seAnno/%s' % species)
sePanglaoCelltype = pd.Series(index=df_projection.columns, data=seAnno.reindex(df_projection.columns).fillna('Missing').values)
DCS.makeSankeyDiagram(DCS.getCountsDataframe(seTissue, sePanglaoCelltype), attemptSavingHTML=True, nameAppend=' Sankey tissue-celltype')
DCS.makeSankeyDiagram(DCS.getCountsDataframe(seTissueDetailed, sePanglaoCelltype), attemptSavingHTML=True, nameAppend=' Sankey tissueDet-celltype')
# Plots by PnaglaoDB metadata
if True:
df_anno = df_anno.reindex(df_projection.columns.get_level_values('batch'), axis=0)
print('SRA:', len(np.unique(df_anno.index.str.split('_', expand=True).get_level_values(0))), 'SRS:', len(np.unique(df_anno.index.str.split('_', expand=True).get_level_values(1))))
makeBarplot(df_anno['Tissue'].values, DCS.saveDir, 'barplotByTissues')
DCS.makeProjectionPlot(df_projection.values, df_anno['Tissue'].values, legend=False, labels=True, suffix='by Tissue cut', rightShift=0.0, fontsize=18)
#DCS.makeProjectionPlot(df_projection.values, df_anno['Tissue'].values, legend=True, labels=True, suffix='by Tissue cut', rightShift=0.45) # 0.15 0.45
# Other quantities
if False:
for col in ['Tissue', 'isPrimaryAdult', 'isTumor', 'isCellLine', 'Species', 'Tissue detailed', 'Sequencing instrument', 'scRNA-seq protocol']:
DCS.makeProjectionPlot(df_projection.values, df_anno[col].values, legend=True, labels=True, suffix='by %s cut' % col, rightShift=0.45) # 0.15 0.45
for col in ['Sequenced cells', 'QC-passed fraction', 'medianOfGenesPerCell']:
DCS.makeProjectionPlot(df_projection.values, reduce(df_anno[col].values), legend=False, labels=False, colorbar=True, suffix='by %s cut' % col)
DCS.makeProjectionPlot(df_projection.values, df_projection.columns.get_level_values('batch').values, legend=False, labels=False, suffix='by batch cut')
|
"""
Module containing private utility functions
===========================================
The ``scipy._lib`` namespace is empty (for now). Tests for all
utilities in submodules of ``_lib`` can be run with::
from scipy import _lib
_lib.test()
"""
from __future__ import division, print_function, absolute_import
|
def bubble_sort_2(l):
for iteration in range(len(l)):
for index in range(1, len(l)):
this_hour, this_min = l[index]
prev_hour, prev_min = l[index - 1]
if prev_hour > this_hour or (prev_hour == this_hour and prev_min > this_min):
continue
l[index] = (prev_hour, prev_min)
l[index - 1] = (this_hour, this_min)
|
import sys
import os
import random
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.sql import SQLContext
from pyspark.sql import Row
def createTrackData(shipRows):
trackData={}
for r in shipRows:
mmsi=r[0]
dt=r[1]
lat=r[2]
lon=r[3]
coord=(lon,lat,0)
if(mmsi not in trackData):
trackData[mmsi]={"coord":[], "when":[]}
trackData[mmsi]["coord"].extend([coord])
trackData[mmsi]["when"].extend([str(dt)])
return trackData
def createTrack(rows):
rv=""
for r in rows:
rv=rv+"<Placemark>\n"
dt=r[1]
rv=rv + " <TimeStamp>\n"
rv=rv + " <when>" + dt + "</when>\n"
rv=rv + " </TimeStamp>\n"
lat=r[2]
lon=r[3]
rv=rv + " <Point>\n"
rv=rv + " <coordinates>" + lon + "," + lat + "," + "0.0" + "</coordinates>\n"
rv=rv + " </Point>\n"
rv=rv+"</Placemark>\n"
return rv
def printKmlHeader():
print("<?xml version=\"1.0\" encoding=\"UTF-8\"?>")
print("<kml xmlns=\"http://www.opengis.net/kml/2.2\" xmlns:gx=\"http://www.google.com/kml/ext/2.2\" xmlns:kml=\"http://www.opengis.net/kml/2.2\" xmlns:atom=\"http://www.w3.org/2005/Atom\">")
print("<Document>")
print("<name>" + "367331520" + "</name>")
def printKmlFooter():
print("</Document>")
print("</kml>")
def printKml(kml):
printKmlHeader()
print(kml)
printKmlFooter()
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from blog import Config
addr: str = Config.sql_address()
name: str = Config.sql_name()
password: str = Config.sql_pass()
db: str = Config.sql_db()
'''
MARIADB_DATABASE_URL = f"mariadb+mariadbconnector://{name}:{password}@{addr}:3306/{db}"
engine = create_engine(
MARIADB_DATABASE_URL,
)
'''
# SQLite DB
SQLITE_DATABASE_URL = "sqlite:///./app.db"
engine = create_engine(
SQLITE_DATABASE_URL,
connect_args={"check_same_thread": False},
)
SessionLocal = sessionmaker(
autocommit=False,
autoflush=False,
bind=engine,
)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
def create_tables():
Base.metadata.create_all(bind=engine)
|
"""Logging module."""
import logging
import sublime
log = logging.getLogger("OpenContextPath")
def update_logger():
"""Update the logger based on the current settings."""
settings = sublime.load_settings("OpenContextPath.sublime-settings")
# set the verbosity level
if settings.get("debug", False):
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
def plugin_loaded():
"""Initialize the logger."""
update_logger()
# track any changes to the settings
settings = sublime.load_settings("OpenContextPath.sublime-settings")
settings.add_on_change("logging", update_logger)
def plugin_unloaded():
"""Clean up."""
# remove our settings handler
settings = sublime.load_settings("OpenContextPath.sublime-settings")
settings.clear_on_change("logging")
|
import csv
class DataSet():
#This class reads and organizes a CSV file (can be a .txt file)
def __init__(self,filename):
filestream = open(filename,'r')
csv_reader = csv.reader(filestream,delimiter=',')
self.raw_inputs = [[int(r) for r in row] for row in csv_reader]
def getRaw(self):
return self.raw_inputs
def getInputs(self,test=False):
if not test:
#File is an input to network, ignores last column (target)
return [row[:-1]for row in self.getRaw()]
#File given is a testing file last column is part of test entry
return self.getRaw()
def getTargets(self):
#Returns matrix of all the targets for all the input entries/examples
return [[x[-1]] for x in self.raw_inputs]
def getNumInputElem(self):
#Returns the number of columns in an input entry/file
return len(self.getInputs()[0])
|
#!/usr/bin/python
import sys
import os
if len(sys.argv) >= 3:
psl_filename = sys.argv[1]
skip_Nine = int(sys.argv[2])
else:
print("usage:hist_psl.py psl_file skip_Nine")
print("or ")
sys.exit(1)
################################################################################
def process_temp_list(temp_list):
ref_stat = 0
ref_ls = []
for result_ls in temp_list:
stat = float(result_ls[0])/float(result_ls[10])
if stat > ref_stat:
ref_stat = stat
ref_ls = result_ls
return ref_ls
################################################################################
psl = open(psl_filename,'r')
temp_list = []
Qname=""
i=0
for line in psl:
if i<skip_Nine:
i+=1
continue
ls = line.strip().split('\t')
if (len(ls) != 21):
sys.stderr.write("Warning: Invalid psl line in " + psl_filename + ": " + line)
continue
if Qname == ls[9]:
temp_list.append(ls)
else:
if not Qname =="":
result_ls = process_temp_list(temp_list)
print '\t'.join(result_ls)
Qname = ls[9]
temp_list = [ls]
i+=1
result_ls = process_temp_list(temp_list)
print '\t'.join(result_ls)
psl.close()
################################################################################
|
# Copyright (c) 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import netaddr
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import address_scope
from neutron.extensions import l3
from neutron.extensions import securitygroup as secgrp
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
from neutron.tests.unit.extensions import test_address_scope
from neutron.tests.unit.extensions import test_extra_dhcp_opt as test_dhcpopts
from neutron.tests.unit.extensions import test_extraroute as test_ext_route
from neutron.tests.unit.extensions import test_l3 as test_l3_plugin
from neutron.tests.unit.extensions \
import test_l3_ext_gw_mode as test_ext_gw_mode
from neutron.tests.unit.scheduler \
import test_dhcp_agent_scheduler as test_dhcpagent
from neutron.tests.unit import testlib_api
from neutron_lib.api.definitions import external_net as extnet_apidef
from neutron_lib.api.definitions import extraroute as xroute_apidef
from neutron_lib.api.definitions import l3_ext_gw_mode as l3_egm_apidef
from neutron_lib.api.definitions import port_security as psec
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import provider_net as pnet
from neutron_lib.api.definitions import vlantransparent as vlan_apidef
from neutron_lib.callbacks import events
from neutron_lib.callbacks import exceptions as nc_exc
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib import context
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as plugin_utils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
from webob import exc
from vmware_nsx.api_client import exception as api_exc
from vmware_nsx.common import utils
from vmware_nsx.plugins.nsx_v3 import plugin as nsx_plugin
from vmware_nsx.services.lbaas.nsx_v3.v2 import lb_driver_v2
from vmware_nsx.tests import unit as vmware
from vmware_nsx.tests.unit.common_plugin import common_v3
from vmware_nsx.tests.unit.extensions import test_metadata
from vmware_nsxlib.tests.unit.v3 import mocks as nsx_v3_mocks
from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase
from vmware_nsxlib.v3 import exceptions as nsxlib_exc
from vmware_nsxlib.v3 import nsx_constants
PLUGIN_NAME = 'vmware_nsx.plugin.NsxV3Plugin'
NSX_TZ_NAME = 'default transport zone'
NSX_DHCP_PROFILE_ID = 'default dhcp profile'
NSX_METADATA_PROXY_ID = 'default metadata proxy'
NSX_SWITCH_PROFILE = 'dummy switch profile'
NSX_DHCP_RELAY_SRV = 'dhcp relay srv'
NSX_EDGE_CLUSTER_UUID = 'dummy edge cluster'
def _mock_create_firewall_rules(*args):
# NOTE(arosen): the code in the neutron plugin expects the
# neutron rule id as the display_name.
rules = args[4]
return {
'rules': [
{'display_name': rule['id'], 'id': uuidutils.generate_uuid()}
for rule in rules
]}
def _return_id_key(*args, **kwargs):
return {'id': uuidutils.generate_uuid()}
def _return_id_key_list(*args, **kwargs):
return [{'id': uuidutils.generate_uuid()}]
def _mock_add_rules_in_section(*args):
# NOTE(arosen): the code in the neutron plugin expects the
# neutron rule id as the display_name.
rules = args[0]
return {
'rules': [
{'display_name': rule['display_name'],
'id': uuidutils.generate_uuid()}
for rule in rules
]}
def _mock_nsx_backend_calls():
mock.patch("vmware_nsxlib.v3.client.NSX3Client").start()
fake_profile = {'key': 'FakeKey',
'resource_type': 'FakeResource',
'id': uuidutils.generate_uuid()}
def _return_id(*args, **kwargs):
return uuidutils.generate_uuid()
def _return_same(key, *args, **kwargs):
return key
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibSwitchingProfile."
"find_by_display_name",
return_value=[fake_profile]
).start()
mock.patch(
"vmware_nsxlib.v3.router.RouterLib.validate_tier0").start()
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibSwitchingProfile."
"create_port_mirror_profile",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibBridgeEndpoint.create",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsxlib.v3.security.NsxLibNsGroup.find_by_display_name",
side_effect=_return_id_key_list).start()
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.create",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibDhcpProfile."
"get_id_by_name_or_id",
return_value=NSX_DHCP_PROFILE_ID).start()
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibDhcpRelayService."
"get_id_by_name_or_id",
return_value=NSX_DHCP_RELAY_SRV).start()
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibMetadataProxy."
"get_id_by_name_or_id",
side_effect=_return_same).start()
mock.patch(
"vmware_nsxlib.v3.resources.LogicalPort.create",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter.create",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsxlib.v3.resources.LogicalDhcpServer.create",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsxlib.v3.resources.LogicalDhcpServer.create_binding",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter."
"get_firewall_section_id",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsxlib.v3.NsxLib.get_version",
return_value='2.4.0').start()
mock.patch(
"vmware_nsxlib.v3.load_balancer.Service.get_router_lb_service",
return_value=None).start()
mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='OVERLAY').start()
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibEdgeCluster."
"get_transport_nodes", return_value=['dummy']).start()
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportNode."
"get_transport_zones",
return_value=[NSX_TZ_NAME, mock.ANY]).start()
mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection.add_rules",
side_effect=_mock_add_rules_in_section).start()
class NsxV3PluginTestCaseMixin(test_plugin.NeutronDbPluginV2TestCase,
nsxlib_testcase.NsxClientTestCase):
def setup_conf_overrides(self):
cfg.CONF.set_override('default_overlay_tz', NSX_TZ_NAME, 'nsx_v3')
cfg.CONF.set_override('native_dhcp_metadata', False, 'nsx_v3')
cfg.CONF.set_override('dhcp_profile',
NSX_DHCP_PROFILE_ID, 'nsx_v3')
cfg.CONF.set_override('metadata_proxy',
NSX_METADATA_PROXY_ID, 'nsx_v3')
cfg.CONF.set_override(
'network_scheduler_driver',
'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler')
def mock_plugin_methods(self):
# need to mock the global placeholder. This is due to the fact that
# the generic security group tests assume that there is just one
# security group.
mock_ensure_global_sg_placeholder = mock.patch.object(
nsx_plugin.NsxV3Plugin, '_ensure_global_sg_placeholder')
mock_ensure_global_sg_placeholder.start()
mock.patch(
'neutron_lib.rpc.Connection.consume_in_threads',
return_value=[]).start()
mock.patch.object(nsx_plugin.NsxV3Plugin,
'_cleanup_duplicates').start()
def setUp(self, plugin=PLUGIN_NAME,
ext_mgr=None,
service_plugins=None, **kwargs):
self._patchers = []
_mock_nsx_backend_calls()
self.setup_conf_overrides()
self.mock_get_edge_cluster = mock.patch.object(
nsx_plugin.NsxV3Plugin, '_get_edge_cluster',
return_value=NSX_EDGE_CLUSTER_UUID)
self.mock_get_edge_cluster.start()
self.mock_plugin_methods()
# ignoring the given plugin and use the nsx-v3 one
if not plugin.endswith('NsxTVDPlugin'):
plugin = PLUGIN_NAME
super(NsxV3PluginTestCaseMixin, self).setUp(plugin=plugin,
ext_mgr=ext_mgr)
self.maxDiff = None
def tearDown(self):
for patcher in self._patchers:
patcher.stop()
super(NsxV3PluginTestCaseMixin, self).tearDown()
def _create_network(self, fmt, name, admin_state_up,
arg_list=None, providernet_args=None,
set_context=False, tenant_id=None,
**kwargs):
tenant_id = tenant_id or self._tenant_id
data = {'network': {'name': name,
'admin_state_up': admin_state_up,
'tenant_id': tenant_id}}
# Fix to allow the router:external attribute and any other
# attributes containing a colon to be passed with
# a double underscore instead
kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items())
if extnet_apidef.EXTERNAL in kwargs:
arg_list = (extnet_apidef.EXTERNAL, ) + (arg_list or ())
if providernet_args:
kwargs.update(providernet_args)
for arg in (('admin_state_up', 'tenant_id', 'shared',
'availability_zone_hints') + (arg_list or ())):
# Arg must be present
if arg in kwargs:
data['network'][arg] = kwargs[arg]
network_req = self.new_create_request('networks', data, fmt)
if set_context and tenant_id:
# create a specific auth context for this request
network_req.environ['neutron.context'] = context.Context(
'', tenant_id)
return network_req.get_response(self.api)
def _create_l3_ext_network(
self, physical_network=nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID):
name = 'l3_ext_net'
net_type = utils.NetworkTypes.L3_EXT
providernet_args = {pnet.NETWORK_TYPE: net_type,
pnet.PHYSICAL_NETWORK: physical_network}
return self.network(name=name,
router__external=True,
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK))
def _save_networks(self, networks):
ctx = context.get_admin_context()
for network_id in networks:
with ctx.session.begin(subtransactions=True):
ctx.session.add(models_v2.Network(id=network_id))
def _initialize_azs(self):
self.plugin.init_availability_zones()
self.plugin._translate_configured_names_to_uuids()
def _enable_native_dhcp_md(self):
cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3')
cfg.CONF.set_override('dhcp_agent_notification', False)
self.plugin._init_dhcp_metadata()
def _enable_dhcp_relay(self):
# Add the relay service to the config and availability zones
cfg.CONF.set_override('dhcp_relay_service', NSX_DHCP_RELAY_SRV,
'nsx_v3')
mock_nsx_version = mock.patch.object(
self.plugin.nsxlib, 'feature_supported', return_value=True)
mock_nsx_version.start()
self._initialize_azs()
self._enable_native_dhcp_md()
class TestNetworksV2(test_plugin.TestNetworksV2, NsxV3PluginTestCaseMixin):
def setUp(self, plugin=PLUGIN_NAME,
ext_mgr=None,
service_plugins=None):
# add vlan transparent to the configuration
cfg.CONF.set_override('vlan_transparent', True)
super(TestNetworksV2, self).setUp(plugin=plugin,
ext_mgr=ext_mgr)
def tearDown(self):
super(TestNetworksV2, self).tearDown()
@mock.patch.object(nsx_plugin.NsxV3Plugin, 'validate_availability_zones')
def test_create_network_with_availability_zone(self, mock_validate_az):
name = 'net-with-zone'
zone = ['zone1']
mock_validate_az.return_value = None
with self.network(name=name, availability_zone_hints=zone) as net:
az_hints = net['network']['availability_zone_hints']
self.assertListEqual(az_hints, zone)
def test_network_failure_rollback(self):
self._enable_native_dhcp_md()
self.plugin = directory.get_plugin()
with mock.patch.object(self.plugin.nsxlib.logical_port, 'create',
side_effect=api_exc.NsxApiException):
self.network()
ctx = context.get_admin_context()
networks = self.plugin.get_networks(ctx)
self.assertListEqual([], networks)
def test_create_provider_flat_network(self):
providernet_args = {pnet.NETWORK_TYPE: 'flat'}
with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
'create', side_effect=_return_id_key) as nsx_create, \
mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
'delete') as nsx_delete, \
mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='VLAN'),\
self.network(name='flat_net',
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE, )) as net:
self.assertEqual('flat', net['network'].get(pnet.NETWORK_TYPE))
# make sure the network is created at the backend
nsx_create.assert_called_once()
# Delete the network and make sure it is deleted from the backend
req = self.new_delete_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(exc.HTTPNoContent.code, res.status_int)
nsx_delete.assert_called_once()
def test_create_provider_flat_network_with_physical_net(self):
physical_network = nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID
providernet_args = {pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: physical_network}
with mock.patch(
'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='VLAN'),\
self.network(name='flat_net',
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK)) as net:
self.assertEqual('flat', net['network'].get(pnet.NETWORK_TYPE))
def test_create_provider_flat_network_with_vlan(self):
providernet_args = {pnet.NETWORK_TYPE: 'flat',
pnet.SEGMENTATION_ID: 11}
with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='VLAN'):
result = self._create_network(fmt='json', name='bad_flat_net',
admin_state_up=True,
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID))
data = self.deserialize('json', result)
# should fail
self.assertEqual('InvalidInput', data['NeutronError']['type'])
def test_create_provider_geneve_network(self):
providernet_args = {pnet.NETWORK_TYPE: 'geneve'}
with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
'create', side_effect=_return_id_key) as nsx_create, \
mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
'delete') as nsx_delete, \
mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='OVERLAY'),\
self.network(name='geneve_net',
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE, )) as net:
self.assertEqual('geneve', net['network'].get(pnet.NETWORK_TYPE))
# make sure the network is created at the backend
nsx_create.assert_called_once()
# Delete the network and make sure it is deleted from the backend
req = self.new_delete_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(exc.HTTPNoContent.code, res.status_int)
nsx_delete.assert_called_once()
def test_create_provider_geneve_network_with_physical_net(self):
physical_network = nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID
providernet_args = {pnet.NETWORK_TYPE: 'geneve',
pnet.PHYSICAL_NETWORK: physical_network}
with mock.patch(
'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='OVERLAY'),\
self.network(name='geneve_net',
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE, )) as net:
self.assertEqual('geneve', net['network'].get(pnet.NETWORK_TYPE))
def test_create_provider_geneve_network_with_vlan(self):
providernet_args = {pnet.NETWORK_TYPE: 'geneve',
pnet.SEGMENTATION_ID: 11}
with mock.patch(
'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='OVERLAY'):
result = self._create_network(fmt='json', name='bad_geneve_net',
admin_state_up=True,
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID))
data = self.deserialize('json', result)
# should fail
self.assertEqual('InvalidInput', data['NeutronError']['type'])
def test_create_provider_vlan_network(self):
providernet_args = {pnet.NETWORK_TYPE: 'vlan',
pnet.SEGMENTATION_ID: 11}
with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
'create', side_effect=_return_id_key) as nsx_create, \
mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
'delete') as nsx_delete, \
mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='VLAN'),\
self.network(name='vlan_net',
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID)) as net:
self.assertEqual('vlan', net['network'].get(pnet.NETWORK_TYPE))
# make sure the network is created at the backend
nsx_create.assert_called_once()
# Delete the network and make sure it is deleted from the backend
req = self.new_delete_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(exc.HTTPNoContent.code, res.status_int)
nsx_delete.assert_called_once()
def test_create_provider_nsx_network(self):
physical_network = 'Fake logical switch'
providernet_args = {pnet.NETWORK_TYPE: 'nsx-net',
pnet.PHYSICAL_NETWORK: physical_network}
with mock.patch(
'vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.create',
side_effect=nsxlib_exc.ResourceNotFound) as nsx_create, \
mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
'delete') as nsx_delete, \
self.network(name='nsx_net',
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK)) as net:
self.assertEqual('nsx-net', net['network'].get(pnet.NETWORK_TYPE))
self.assertEqual(physical_network,
net['network'].get(pnet.PHYSICAL_NETWORK))
# make sure the network is NOT created at the backend
nsx_create.assert_not_called()
# Delete the network. It should NOT deleted from the backend
req = self.new_delete_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(exc.HTTPNoContent.code, res.status_int)
nsx_delete.assert_not_called()
def test_create_provider_bad_nsx_network(self):
physical_network = 'Bad logical switch'
providernet_args = {pnet.NETWORK_TYPE: 'nsx-net',
pnet.PHYSICAL_NETWORK: physical_network}
with mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get",
side_effect=nsxlib_exc.ResourceNotFound):
result = self._create_network(fmt='json', name='bad_nsx_net',
admin_state_up=True,
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK))
data = self.deserialize('json', result)
# should fail
self.assertEqual('InvalidInput', data['NeutronError']['type'])
def test_create_ens_network_with_no_port_sec(self):
cfg.CONF.set_override('ens_support', True, 'nsx_v3')
providernet_args = {psec.PORTSECURITY: False}
with mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone."
"get_host_switch_mode", return_value="ENS"),\
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get",
return_value={'transport_zone_id': 'xxx'}):
result = self._create_network(fmt='json', name='ens_net',
admin_state_up=True,
providernet_args=providernet_args,
arg_list=(psec.PORTSECURITY,))
res = self.deserialize('json', result)
# should succeed, and net should have port security disabled
self.assertFalse(res['network']['port_security_enabled'])
def test_create_ens_network_with_port_sec(self):
cfg.CONF.set_override('ens_support', True, 'nsx_v3')
providernet_args = {psec.PORTSECURITY: True}
with mock.patch("vmware_nsxlib.v3.NsxLib.get_version",
return_value='2.3.0'),\
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone."
"get_host_switch_mode", return_value="ENS"),\
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch."
"get", return_value={'transport_zone_id': 'xxx'}):
result = self._create_network(fmt='json', name='ens_net',
admin_state_up=True,
providernet_args=providernet_args,
arg_list=(psec.PORTSECURITY,))
res = self.deserialize('json', result)
# should fail
self.assertEqual('NsxENSPortSecurity',
res['NeutronError']['type'])
def test_create_ens_network_with_port_sec_supported(self):
cfg.CONF.set_override('ens_support', True, 'nsx_v3')
providernet_args = {psec.PORTSECURITY: True}
with mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone."
"get_host_switch_mode", return_value="ENS"),\
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch."
"get", return_value={'transport_zone_id': 'xxx'}):
result = self._create_network(fmt='json', name='ens_net',
admin_state_up=True,
providernet_args=providernet_args,
arg_list=(psec.PORTSECURITY,))
res = self.deserialize('json', result)
# should succeed
self.assertTrue(res['network'][psec.PORTSECURITY])
def test_create_ens_network_disable_default_port_security(self):
cfg.CONF.set_override('ens_support', True, 'nsx_v3')
cfg.CONF.set_override('disable_port_security_for_ens', True, 'nsx_v3')
mock_ens = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibTransportZone'
'.get_host_switch_mode', return_value='ENS')
mock_tz = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibLogicalSwitch.get',
return_value={'transport_zone_id': 'xxx'})
mock_tt = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibTransportZone'
'.get_transport_type', return_value='VLAN')
data = {'network': {
'name': 'portsec_net',
'admin_state_up': True,
'shared': False,
'tenant_id': 'some_tenant',
'provider:network_type': 'flat',
'provider:physical_network': 'xxx',
'port_security_enabled': True}}
with mock_ens, mock_tz, mock_tt:
self.plugin.create_network(context.get_admin_context(), data)
def test_create_ens_network_with_qos(self):
cfg.CONF.set_override('ens_support', True, 'nsx_v3')
mock_ens = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibTransportZone'
'.get_host_switch_mode', return_value='ENS')
mock_tz = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibLogicalSwitch.get',
return_value={'transport_zone_id': 'xxx'})
mock_tt = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibTransportZone'
'.get_transport_type', return_value='VLAN')
mock_ver = mock.patch("vmware_nsxlib.v3.NsxLib.get_version",
return_value='2.4.0')
policy_id = uuidutils.generate_uuid()
data = {'network': {
'name': 'qos_net',
'tenant_id': 'some_tenant',
'provider:network_type': 'flat',
'provider:physical_network': 'xxx',
'qos_policy_id': policy_id,
'port_security_enabled': False}}
with mock_ens, mock_tz, mock_tt, mock_ver, mock.patch.object(
self.plugin, '_validate_qos_policy_id'):
self.assertRaises(n_exc.InvalidInput,
self.plugin.create_network,
context.get_admin_context(), data)
def test_update_ens_network_with_qos(self):
cfg.CONF.set_override('ens_support', True, 'nsx_v3')
mock_ens = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibTransportZone'
'.get_host_switch_mode', return_value='ENS')
mock_tz = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibLogicalSwitch.get',
return_value={'transport_zone_id': 'xxx'})
mock_tt = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibTransportZone'
'.get_transport_type', return_value='VLAN')
mock_ver = mock.patch("vmware_nsxlib.v3.NsxLib.get_version",
return_value='2.4.0')
data = {'network': {
'name': 'qos_net',
'tenant_id': 'some_tenant',
'provider:network_type': 'flat',
'provider:physical_network': 'xxx',
'admin_state_up': True,
'shared': False,
'port_security_enabled': False}}
with mock_ens, mock_tz, mock_tt, mock_ver,\
mock.patch.object(self.plugin, '_validate_qos_policy_id'):
network = self.plugin.create_network(context.get_admin_context(),
data)
policy_id = uuidutils.generate_uuid()
data = {'network': {
'id': network['id'],
'admin_state_up': True,
'shared': False,
'port_security_enabled': False,
'tenant_id': 'some_tenant',
'qos_policy_id': policy_id}}
self.assertRaises(n_exc.InvalidInput,
self.plugin.update_network,
context.get_admin_context(),
network['id'], data)
def test_update_ens_network(self):
cfg.CONF.set_override('ens_support', True, 'nsx_v3')
providernet_args = {psec.PORTSECURITY: False}
with mock.patch("vmware_nsxlib.v3.NsxLib.get_version",
return_value='2.3.0'),\
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone."
"get_host_switch_mode", return_value="ENS"),\
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch."
"get", return_value={'transport_zone_id': 'xxx'}):
result = self._create_network(fmt='json', name='ens_net',
admin_state_up=True,
providernet_args=providernet_args,
arg_list=(psec.PORTSECURITY,))
net = self.deserialize('json', result)
net_id = net['network']['id']
args = {'network': {psec.PORTSECURITY: True}}
req = self.new_update_request('networks', args,
net_id, fmt='json')
res = self.deserialize('json', req.get_response(self.api))
# should fail
self.assertEqual('NsxENSPortSecurity',
res['NeutronError']['type'])
def test_update_ens_network_psec_supported(self):
cfg.CONF.set_override('ens_support', True, 'nsx_v3')
providernet_args = {psec.PORTSECURITY: False}
with mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone."
"get_host_switch_mode", return_value="ENS"),\
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get",
return_value={'transport_zone_id': 'xxx'}):
result = self._create_network(fmt='json', name='ens_net',
admin_state_up=True,
providernet_args=providernet_args,
arg_list=(psec.PORTSECURITY,))
net = self.deserialize('json', result)
net_id = net['network']['id']
args = {'network': {psec.PORTSECURITY: True}}
req = self.new_update_request('networks', args,
net_id, fmt='json')
res = self.deserialize('json', req.get_response(self.api))
# should succeed
self.assertTrue(res['network'][psec.PORTSECURITY])
def test_create_transparent_vlan_network(self):
providernet_args = {vlan_apidef.VLANTRANSPARENT: True}
with mock.patch(
'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='OVERLAY'),\
self.network(name='vt_net',
providernet_args=providernet_args,
arg_list=(vlan_apidef.VLANTRANSPARENT, )) as net:
self.assertTrue(net['network'].get(vlan_apidef.VLANTRANSPARENT))
def test_create_provider_vlan_network_with_transparent(self):
providernet_args = {pnet.NETWORK_TYPE: 'vlan',
vlan_apidef.VLANTRANSPARENT: True}
with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='VLAN'):
result = self._create_network(fmt='json', name='badvlan_net',
admin_state_up=True,
providernet_args=providernet_args,
arg_list=(
pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,
vlan_apidef.VLANTRANSPARENT))
data = self.deserialize('json', result)
self.assertEqual('vlan', data['network'].get(pnet.NETWORK_TYPE))
def _test_generate_tag(self, vlan_id):
net_type = 'vlan'
name = 'phys_net'
plugin = directory.get_plugin()
plugin._network_vlans = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.nsx_v3.network_vlan_ranges)
expected = [('subnets', []), ('name', name),
('admin_state_up', True),
('status', 'ACTIVE'),
('shared', False),
(pnet.NETWORK_TYPE, net_type),
(pnet.PHYSICAL_NETWORK,
'fb69d878-958e-4f32-84e4-50286f26226b'),
(pnet.SEGMENTATION_ID, vlan_id)]
providernet_args = {pnet.NETWORK_TYPE: net_type,
pnet.PHYSICAL_NETWORK:
'fb69d878-958e-4f32-84e4-50286f26226b'}
with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='VLAN'):
with self.network(name=name, providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK)) as net:
for k, v in expected:
self.assertEqual(net['network'][k], v)
def test_create_phys_vlan_generate(self):
cfg.CONF.set_override('network_vlan_ranges',
'fb69d878-958e-4f32-84e4-50286f26226b',
'nsx_v3')
self._test_generate_tag(1)
def test_create_phys_vlan_generate_range(self):
cfg.CONF.set_override('network_vlan_ranges',
'fb69d878-958e-4f32-84e4-'
'50286f26226b:100:110',
'nsx_v3')
self._test_generate_tag(100)
def test_create_phys_vlan_network_outofrange_returns_503(self):
cfg.CONF.set_override('network_vlan_ranges',
'fb69d878-958e-4f32-84e4-'
'50286f26226b:9:10',
'nsx_v3')
self._test_generate_tag(9)
self._test_generate_tag(10)
with testlib_api.ExpectedException(exc.HTTPClientError) as ctx_manager:
self._test_generate_tag(11)
self.assertEqual(ctx_manager.exception.code, 503)
def test_update_external_flag_on_net(self):
with self.network() as net:
# should fail to update the network to external
args = {'network': {'router:external': 'True'}}
req = self.new_update_request('networks', args,
net['network']['id'], fmt='json')
res = self.deserialize('json', req.get_response(self.api))
self.assertEqual('InvalidInput',
res['NeutronError']['type'])
def test_network_update_external(self):
# This plugin does not support updating the external flag of a network
self.skipTest("UnSupported")
def test_network_update_external_failure(self):
data = {'network': {'name': 'net1',
'router:external': 'True',
'tenant_id': 'tenant_one',
'provider:physical_network': 'stam'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
ext_net_id = network['network']['id']
# should fail to update the network to non-external
args = {'network': {'router:external': 'False'}}
req = self.new_update_request('networks', args,
ext_net_id, fmt='json')
res = self.deserialize('json', req.get_response(self.api))
self.assertEqual('InvalidInput',
res['NeutronError']['type'])
def test_update_network_rollback(self):
with self.network() as net:
# Fail the backend update
with mock.patch("vmware_nsxlib.v3.core_resources."
"NsxLibLogicalSwitch.update",
side_effect=nsxlib_exc.InvalidInput):
args = {'network': {'description': 'test rollback'}}
req = self.new_update_request('networks', args,
net['network']['id'], fmt='json')
res = self.deserialize('json', req.get_response(self.api))
# should fail with the nsxlib error (meaning that the rollback
# did not fail)
self.assertEqual('InvalidInput',
res['NeutronError']['type'])
def test_update_network_port_sec(self):
data = {'network': {
'name': 'psec_net',
'tenant_id': 'some_tenant',
'admin_state_up': True,
'shared': False,
'port_security_enabled': True}}
network = self.plugin.create_network(context.get_admin_context(),
data)
self.assertEqual(True, network['port_security_enabled'])
data = {'network': {
'id': network['id'],
'admin_state_up': True,
'shared': False,
'port_security_enabled': False,
'tenant_id': 'some_tenant'}}
res = self.plugin.update_network(context.get_admin_context(),
network['id'], data)
self.assertEqual(False, res['port_security_enabled'])
class TestSubnetsV2(common_v3.NsxV3TestSubnets, NsxV3PluginTestCaseMixin):
def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None):
super(TestSubnetsV2, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
def test_create_subnet_with_shared_address_space(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '100.64.0.0/16',
'name': 'sub1',
'enable_dhcp': False,
'dns_nameservers': None,
'allocation_pools': None,
'tenant_id': 'tenant_one',
'host_routes': None,
'ip_version': 4}}
self.assertRaises(n_exc.InvalidInput,
self.plugin.create_subnet,
context.get_admin_context(), data)
def _create_external_network(self):
data = {'network': {'name': 'net1',
'router:external': 'True',
'tenant_id': 'tenant_one',
'provider:physical_network': 'stam'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
return network
def test_create_subnet_with_conflicting_t0_address(self):
network = self._create_external_network()
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '172.20.1.0/24',
'name': 'sub1',
'enable_dhcp': False,
'dns_nameservers': None,
'allocation_pools': None,
'tenant_id': 'tenant_one',
'host_routes': None,
'ip_version': 4}}
ports = [{'subnets': [{'ip_addresses': [u'172.20.1.60'],
'prefix_length': 24}],
'resource_type': 'LogicalRouterUpLinkPort'}]
with mock.patch.object(self.plugin.nsxlib.logical_router_port,
'get_by_router_id',
return_value=ports):
self.assertRaises(n_exc.InvalidInput,
self.plugin.create_subnet,
context.get_admin_context(), data)
def test_subnet_native_dhcp_subnet_enabled(self):
self._enable_native_dhcp_md()
with self.network() as network:
with mock.patch.object(self.plugin,
'_enable_native_dhcp') as enable_dhcp,\
self.subnet(network=network, enable_dhcp=True):
# Native dhcp should be set for this subnet
self.assertTrue(enable_dhcp.called)
def test_subnet_native_dhcp_subnet_disabled(self):
self._enable_native_dhcp_md()
with self.network() as network:
with mock.patch.object(self.plugin,
'_enable_native_dhcp') as enable_dhcp,\
self.subnet(network=network, enable_dhcp=False):
# Native dhcp should not be set for this subnet
self.assertFalse(enable_dhcp.called)
def test_subnet_native_dhcp_with_relay(self):
"""Verify that the relay service is added to the router interface"""
self._enable_dhcp_relay()
with self.network() as network:
with mock.patch.object(self.plugin,
'_enable_native_dhcp') as enable_dhcp,\
self.subnet(network=network, enable_dhcp=True):
# Native dhcp should not be set for this subnet
self.assertFalse(enable_dhcp.called)
def test_subnet_native_dhcp_flat_subnet_disabled(self):
self._enable_native_dhcp_md()
providernet_args = {pnet.NETWORK_TYPE: 'flat'}
with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='VLAN'):
with self.network(name='flat_net',
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE, )) as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '172.20.1.0/24',
'name': 'sub1',
'enable_dhcp': False,
'dns_nameservers': None,
'allocation_pools': None,
'tenant_id': 'tenant_one',
'host_routes': None,
'ip_version': 4}}
self.plugin.create_subnet(
context.get_admin_context(), data)
def test_subnet_native_dhcp_flat_subnet_enabled(self):
self._enable_native_dhcp_md()
providernet_args = {pnet.NETWORK_TYPE: 'flat'}
with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='VLAN'):
with self.network(name='flat_net',
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE, )) as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '172.20.1.0/24',
'name': 'sub1',
'enable_dhcp': True,
'dns_nameservers': None,
'allocation_pools': None,
'tenant_id': 'tenant_one',
'host_routes': None,
'ip_version': 4}}
self.assertRaises(n_exc.InvalidInput,
self.plugin.create_subnet,
context.get_admin_context(), data)
def test_fail_create_static_routes_per_subnet_over_limit(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.0.0/16',
'name': 'sub1',
'dns_nameservers': None,
'allocation_pools': None,
'tenant_id': 'tenant_one',
'enable_dhcp': False,
'ip_version': 4}}
count = 1
host_routes = []
while count < nsx_constants.MAX_STATIC_ROUTES:
host_routes.append("'host_routes': [{'destination': "
"'135.207.0.0/%s', 'nexthop': "
"'1.2.3.%s'}]" % (count, count))
count += 1
data['subnet']['host_routes'] = host_routes
self.assertRaises(n_exc.InvalidInput,
self.plugin.create_subnet,
context.get_admin_context(), data)
def test_create_subnet_disable_dhcp_with_host_route_fails(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '172.20.1.0/24',
'name': 'sub1',
'dns_nameservers': None,
'allocation_pools': None,
'tenant_id': 'tenant_one',
'enable_dhcp': False,
'host_routes': [{
'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}],
'ip_version': 4}}
self.assertRaises(n_exc.InvalidInput,
self.plugin.create_subnet,
context.get_admin_context(), data)
def test_update_subnet_disable_dhcp_with_host_route_fails(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '172.20.1.0/24',
'name': 'sub1',
'dns_nameservers': None,
'allocation_pools': None,
'tenant_id': 'tenant_one',
'enable_dhcp': True,
'host_routes': [{
'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}],
'ip_version': 4}}
subnet = self.plugin.create_subnet(
context.get_admin_context(), data)
data['subnet']['enable_dhcp'] = False
self.assertRaises(n_exc.InvalidInput,
self.plugin.update_subnet,
context.get_admin_context(), subnet['id'], data)
class TestPortsV2(common_v3.NsxV3SubnetMixin,
common_v3.NsxV3TestPorts, NsxV3PluginTestCaseMixin,
test_bindings.PortBindingsTestCase,
test_bindings.PortBindingsHostTestCaseMixin,
test_bindings.PortBindingsVnicTestCaseMixin):
VIF_TYPE = portbindings.VIF_TYPE_OVS
HAS_PORT_FILTER = True
def setUp(self):
cfg.CONF.set_override('switching_profiles', [NSX_SWITCH_PROFILE],
'nsx_v3')
# add vlan transparent to the configuration
cfg.CONF.set_override('vlan_transparent', True)
super(TestPortsV2, self).setUp()
self.plugin = directory.get_plugin()
self.ctx = context.get_admin_context()
def test_update_port_delete_ip(self):
# This test case overrides the default because the nsx plugin
# implements port_security/security groups and it is not allowed
# to remove an ip address from a port unless the security group
# is first removed.
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
data = {'port': {'admin_state_up': False,
'fixed_ips': [],
secgrp.SECURITYGROUPS: []}}
req = self.new_update_request('ports',
data, port['port']['id'])
res = self.deserialize('json', req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
self.assertEqual(res['port']['fixed_ips'],
data['port']['fixed_ips'])
def test_delete_dhcp_port(self):
self._enable_native_dhcp_md()
with self.subnet():
pl = directory.get_plugin()
ctx = context.Context(user_id=None, tenant_id=self._tenant_id,
is_admin=False)
ports = pl.get_ports(
ctx, filters={'device_owner': [constants.DEVICE_OWNER_DHCP]})
req = self.new_delete_request('ports', ports[0]['id'])
res = req.get_response(self.api)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_fail_create_port_with_ext_net(self):
expected_error = 'InvalidInput'
with self._create_l3_ext_network() as network:
with self.subnet(network=network, cidr='10.0.0.0/24'):
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X'
res = self._create_port(self.fmt,
network['network']['id'],
exc.HTTPBadRequest.code,
device_owner=device_owner)
data = self.deserialize(self.fmt, res)
self.assertEqual(expected_error, data['NeutronError']['type'])
def test_fail_update_port_with_ext_net(self):
with self._create_l3_ext_network() as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
with self.port(subnet=subnet) as port:
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X'
data = {'port': {'device_owner': device_owner}}
req = self.new_update_request('ports',
data, port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(exc.HTTPBadRequest.code,
res.status_int)
def test_fail_update_lb_port_with_allowed_address_pairs(self):
with self.network() as network:
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'pair_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': constants.DEVICE_OWNER_LOADBALANCERV2,
'fixed_ips': []}
}
port = self.plugin.create_port(self.ctx, data)
data['port']['allowed_address_pairs'] = '10.0.0.1'
self.assertRaises(
n_exc.InvalidInput,
self.plugin.update_port, self.ctx, port['id'], data)
def test_fail_create_allowed_address_pairs_over_limit(self):
with self.network() as network,\
self.subnet(network=network, enable_dhcp=True) as s1:
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'pair_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': 'fake_owner',
'fixed_ips': [{'subnet_id': s1['subnet']['id']}]
}
}
count = 1
address_pairs = []
while count < 129:
address_pairs.append({'ip_address': '10.0.0.%s' %
count})
count += 1
data['port']['allowed_address_pairs'] = address_pairs
self.assertRaises(n_exc.InvalidInput,
self.plugin.create_port, self.ctx, data)
def test_fail_update_lb_port_with_fixed_ip(self):
with self.network() as network:
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'pair_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': constants.DEVICE_OWNER_LOADBALANCERV2,
'fixed_ips': []}
}
port = self.plugin.create_port(self.ctx, data)
data['port']['fixed_ips'] = '10.0.0.1'
self.assertRaises(
n_exc.InvalidInput,
self.plugin.update_port, self.ctx, port['id'], data)
def test_create_port_with_qos(self):
with self.network() as network:
policy_id = uuidutils.generate_uuid()
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'qos_policy_id': policy_id,
'name': 'qos_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': 'fake_owner',
'fixed_ips': [],
'mac_address': '00:00:00:00:00:01'}
}
with mock.patch.object(self.plugin, '_get_qos_profile_id'),\
mock.patch.object(self.plugin, '_validate_qos_policy_id'):
port = self.plugin.create_port(self.ctx, data)
self.assertEqual(policy_id, port['qos_policy_id'])
# Get port should also return the qos policy id
with mock.patch('vmware_nsx.services.qos.common.utils.'
'get_port_policy_id',
return_value=policy_id):
port = self.plugin.get_port(self.ctx, port['id'])
self.assertEqual(policy_id, port['qos_policy_id'])
def test_update_port_with_qos(self):
with self.network() as network:
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'qos_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': 'fake_owner',
'fixed_ips': [],
'mac_address': '00:00:00:00:00:01'}
}
port = self.plugin.create_port(self.ctx, data)
policy_id = uuidutils.generate_uuid()
data['port']['qos_policy_id'] = policy_id
with mock.patch.object(self.plugin, '_get_qos_profile_id'),\
mock.patch.object(self.plugin, '_validate_qos_policy_id'):
res = self.plugin.update_port(self.ctx, port['id'], data)
self.assertEqual(policy_id, res['qos_policy_id'])
# Get port should also return the qos policy id
with mock.patch('vmware_nsx.services.qos.common.utils.'
'get_port_policy_id',
return_value=policy_id):
res = self.plugin.get_port(self.ctx, port['id'])
self.assertEqual(policy_id, res['qos_policy_id'])
# now remove the qos from the port
data['port']['qos_policy_id'] = None
res = self.plugin.update_port(self.ctx, port['id'], data)
self.assertIsNone(res['qos_policy_id'])
def test_create_ext_port_with_qos_fail(self):
with self._create_l3_ext_network() as network:
with self.subnet(network=network, cidr='10.0.0.0/24'),\
mock.patch.object(self.plugin, '_validate_qos_policy_id'):
policy_id = uuidutils.generate_uuid()
data = {'port': {'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'qos_policy_id': policy_id}}
# Cannot add qos policy to a router port
self.assertRaises(n_exc.InvalidInput,
self.plugin.create_port, self.ctx, data)
def _test_create_illegal_port_with_qos_fail(self, device_owner):
with self.network() as network:
with self.subnet(network=network, cidr='10.0.0.0/24'),\
mock.patch.object(self.plugin, '_validate_qos_policy_id'):
policy_id = uuidutils.generate_uuid()
data = {'port': {'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'device_owner': device_owner,
'qos_policy_id': policy_id}}
# Cannot add qos policy to this type of port
self.assertRaises(n_exc.InvalidInput,
self.plugin.create_port, self.ctx, data)
def test_create_port_ens_with_qos_fail(self):
with self.network() as network:
with self.subnet(network=network, cidr='10.0.0.0/24'):
policy_id = uuidutils.generate_uuid()
mock_ens = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibTransportZone'
'.get_host_switch_mode',
return_value='ENS')
mock_tz = mock.patch('vmware_nsxlib.v3'
'.core_resources'
'.NsxLibLogicalSwitch.get',
return_value={
'transport_zone_id': 'xxx'})
mock_tt = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibTransportZone'
'.get_transport_type',
return_value='VLAN')
mock_ver = mock.patch("vmware_nsxlib.v3.NsxLib.get_version",
return_value='2.4.0')
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'qos_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': 'fake_owner',
'fixed_ips': [],
'port_security_enabled': False,
'mac_address': '00:00:00:00:00:01',
'qos_policy_id': policy_id}
}
# Cannot add qos policy to this type of port
with mock_ens, mock_tz, mock_tt, mock_ver,\
mock.patch.object(self.plugin, '_validate_qos_policy_id'):
self.assertRaises(n_exc.InvalidInput,
self.plugin.create_port, self.ctx, data)
def test_create_port_ens_with_sg(self):
cfg.CONF.set_override('disable_port_security_for_ens', True, 'nsx_v3')
with self.network() as network:
with self.subnet(network=network, cidr='10.0.0.0/24'):
mock_ens = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibTransportZone'
'.get_host_switch_mode',
return_value='ENS')
mock_tz = mock.patch('vmware_nsxlib.v3'
'.core_resources'
'.NsxLibLogicalSwitch.get',
return_value={
'transport_zone_id': 'xxx'})
mock_tt = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibTransportZone'
'.get_transport_type',
return_value='VLAN')
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'sg_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': 'fake_owner',
'fixed_ips': [],
'mac_address': '00:00:00:00:00:01',
'port_security_enabled': True}
}
with mock_ens, mock_tz, mock_tt:
self.plugin.create_port(self.ctx, data)
def test_update_port_ens_with_qos_fail(self):
with self.network() as network:
with self.subnet(network=network, cidr='10.0.0.0/24'):
policy_id = uuidutils.generate_uuid()
mock_ens = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibTransportZone'
'.get_host_switch_mode',
return_value='ENS')
mock_tz = mock.patch('vmware_nsxlib.v3'
'.core_resources'
'.NsxLibLogicalSwitch.get',
return_value={
'transport_zone_id': 'xxx'})
mock_tt = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibTransportZone'
'.get_transport_type',
return_value='VLAN')
mock_ver = mock.patch("vmware_nsxlib.v3.NsxLib.get_version",
return_value='2.4.0')
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'qos_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': 'fake_owner',
'fixed_ips': [],
'port_security_enabled': False,
'mac_address': '00:00:00:00:00:01'}
}
with mock_ens, mock_tz, mock_tt, mock_ver,\
mock.patch.object(self.plugin, '_validate_qos_policy_id'):
port = self.plugin.create_port(self.ctx, data)
data['port'] = {'qos_policy_id': policy_id}
self.assertRaises(n_exc.InvalidInput,
self.plugin.update_port,
self.ctx, port['id'], data)
def test_create_port_with_mac_learning_true(self):
with self.network() as network:
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'qos_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': 'fake_owner',
'fixed_ips': [],
'port_security_enabled': False,
'mac_address': '00:00:00:00:00:01',
'mac_learning_enabled': True}
}
port = self.plugin.create_port(self.ctx, data)
self.assertTrue(port['mac_learning_enabled'])
def test_create_port_with_mac_learning_false(self):
with self.network() as network:
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'qos_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': 'fake_owner',
'fixed_ips': [],
'port_security_enabled': False,
'mac_address': '00:00:00:00:00:01',
'mac_learning_enabled': False}
}
port = self.plugin.create_port(self.ctx, data)
self.assertFalse(port['mac_learning_enabled'])
def test_update_port_with_mac_learning_true(self):
with self.network() as network:
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'qos_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': 'fake_owner',
'fixed_ips': [],
'port_security_enabled': False,
'mac_address': '00:00:00:00:00:01'}
}
port = self.plugin.create_port(self.ctx, data)
data['port']['mac_learning_enabled'] = True
update_res = self.plugin.update_port(self.ctx, port['id'], data)
self.assertTrue(update_res['mac_learning_enabled'])
def test_update_port_with_mac_learning_false(self):
with self.network() as network:
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'qos_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': 'fake_owner',
'fixed_ips': [],
'port_security_enabled': False,
'mac_address': '00:00:00:00:00:01'}
}
port = self.plugin.create_port(self.ctx, data)
data['port']['mac_learning_enabled'] = False
update_res = self.plugin.update_port(self.ctx, port['id'], data)
self.assertFalse(update_res['mac_learning_enabled'])
def test_update_port_with_mac_learning_failes(self):
with self.network() as network:
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'qos_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': constants.DEVICE_OWNER_FLOATINGIP,
'fixed_ips': [],
'port_security_enabled': False,
'mac_address': '00:00:00:00:00:01'}
}
port = self.plugin.create_port(self.ctx, data)
data['port']['mac_learning_enabled'] = True
self.assertRaises(
n_exc.InvalidInput,
self.plugin.update_port, self.ctx, port['id'], data)
def test_create_router_port_with_qos_fail(self):
self._test_create_illegal_port_with_qos_fail(
'network:router_interface')
def test_create_dhcp_port_with_qos_fail(self):
self._test_create_illegal_port_with_qos_fail('network:dhcp')
def _test_update_illegal_port_with_qos_fail(self, device_owner):
with self.network() as network:
with self.subnet(network=network, cidr='10.0.0.0/24'),\
mock.patch.object(self.plugin, '_validate_qos_policy_id'):
policy_id = uuidutils.generate_uuid()
data = {'port': {'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'qos_port',
'admin_state_up': True,
'fixed_ips': [],
'mac_address': '00:00:00:00:00:01',
'device_id': 'dummy',
'device_owner': ''}}
port = self.plugin.create_port(self.ctx, data)
policy_id = uuidutils.generate_uuid()
data['port'] = {'qos_policy_id': policy_id,
'device_owner': device_owner}
# Cannot add qos policy to a router interface port
self.assertRaises(n_exc.InvalidInput,
self.plugin.update_port, self.ctx, port['id'], data)
def test_update_router_port_with_qos_fail(self):
self._test_update_illegal_port_with_qos_fail(
'network:router_interface')
def test_update_dhcp_port_with_qos_fail(self):
self._test_update_illegal_port_with_qos_fail('network:dhcp')
def test_create_port_with_qos_on_net(self):
with self.network() as network:
policy_id = uuidutils.generate_uuid()
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X'
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'qos_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': device_owner,
'fixed_ips': [],
'mac_address': '00:00:00:00:00:01'}
}
with mock.patch.object(self.plugin,
'_get_qos_profile_id') as get_profile,\
mock.patch('vmware_nsx.services.qos.common.utils.'
'get_network_policy_id', return_value=policy_id),\
mock.patch.object(self.plugin, '_validate_qos_policy_id'):
self.plugin.create_port(self.ctx, data)
get_profile.assert_called_once_with(self.ctx, policy_id)
def test_update_port_with_qos_on_net(self):
with self.network() as network:
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'qos_port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': 'fake_owner',
'fixed_ips': [],
'mac_address': '00:00:00:00:00:01'}
}
port = self.plugin.create_port(self.ctx, data)
policy_id = uuidutils.generate_uuid()
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X'
data['port']['device_owner'] = device_owner
with mock.patch.object(self.plugin,
'_get_qos_profile_id') as get_profile,\
mock.patch('vmware_nsx.services.qos.common.utils.'
'get_network_policy_id', return_value=policy_id),\
mock.patch.object(self.plugin, '_validate_qos_policy_id'):
self.plugin.update_port(self.ctx, port['id'], data)
get_profile.assert_called_once_with(self.ctx, policy_id)
def _get_ports_with_fields(self, tenid, fields, expected_count):
pl = directory.get_plugin()
ctx = context.Context(user_id=None, tenant_id=tenid,
is_admin=False)
ports = pl.get_ports(ctx, filters={'tenant_id': [tenid]},
fields=fields)
self.assertEqual(expected_count, len(ports))
def test_get_ports_with_fields(self):
with self.port(), self.port(), self.port(), self.port() as p:
tenid = p['port']['tenant_id']
# get all fields:
self._get_ports_with_fields(tenid, None, 4)
# get specific fields:
self._get_ports_with_fields(tenid, 'mac_address', 4)
self._get_ports_with_fields(tenid, 'network_id', 4)
def test_list_ports_filtered_by_security_groups(self):
ctx = context.get_admin_context()
with self.port() as port1, self.port() as port2:
query_params = "security_groups=%s" % (
port1['port']['security_groups'][0])
ports_data = self._list('ports', query_params=query_params)
self.assertEqual(set([port1['port']['id'], port2['port']['id']]),
set([port['id'] for port in ports_data['ports']]))
query_params = "security_groups=%s&id=%s" % (
port1['port']['security_groups'][0],
port1['port']['id'])
ports_data = self._list('ports', query_params=query_params)
self.assertEqual(port1['port']['id'], ports_data['ports'][0]['id'])
self.assertEqual(1, len(ports_data['ports']))
temp_sg = {'security_group': {'tenant_id': 'some_tenant',
'name': '', 'description': 's'}}
sg_dbMixin = sg_db.SecurityGroupDbMixin()
sg = sg_dbMixin.create_security_group(ctx, temp_sg)
sg_dbMixin._delete_port_security_group_bindings(
ctx, port2['port']['id'])
sg_dbMixin._create_port_security_group_binding(
ctx, port2['port']['id'], sg['id'])
port2['port']['security_groups'][0] = sg['id']
query_params = "security_groups=%s" % (
port1['port']['security_groups'][0])
ports_data = self._list('ports', query_params=query_params)
self.assertEqual(port1['port']['id'], ports_data['ports'][0]['id'])
self.assertEqual(1, len(ports_data['ports']))
query_params = "security_groups=%s" % (
(port2['port']['security_groups'][0]))
ports_data = self._list('ports', query_params=query_params)
self.assertEqual(port2['port']['id'], ports_data['ports'][0]['id'])
def test_port_failure_rollback_dhcp_exception(self):
self._enable_native_dhcp_md()
self.plugin = directory.get_plugin()
with mock.patch.object(self.plugin, '_add_port_mp_dhcp_binding',
side_effect=nsxlib_exc.ManagerError):
self.port()
ctx = context.get_admin_context()
networks = self.plugin.get_ports(ctx)
self.assertListEqual([], networks)
def test_port_DB_failure_rollback_dhcp_exception(self):
self._enable_native_dhcp_md()
self.plugin = directory.get_plugin()
with mock.patch('vmware_nsx.db.db.add_neutron_nsx_dhcp_binding',
side_effect=db_exc.DBError),\
mock.patch.object(self.plugin, '_enable_native_dhcp'),\
mock.patch('vmware_nsx.db.db.get_nsx_service_binding'),\
self.network() as network,\
self.subnet(network, cidr='10.0.1.0/24') as subnet:
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'p1',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': 'fake_owner',
'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': '10.0.1.2'}],
'mac_address': '00:00:00:00:00:01'}
}
# making sure the port creation succeeded anyway
created_port = self.plugin.create_port(self.ctx, data)
self.assertEqual('fake_device', created_port['device_id'])
def test_update_port_add_additional_ip(self):
"""Test update of port with additional IP fails."""
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
data = {'port': {'admin_state_up': False,
'fixed_ips': [{'subnet_id':
subnet['subnet']['id']},
{'subnet_id':
subnet['subnet']['id']}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(exc.HTTPBadRequest.code,
res.status_int)
def test_create_port_additional_ip(self):
"""Test that creation of port with additional IP fails."""
with self.subnet() as subnet:
data = {'port': {'network_id': subnet['subnet']['network_id'],
'tenant_id': subnet['subnet']['tenant_id'],
'fixed_ips': [{'subnet_id':
subnet['subnet']['id']},
{'subnet_id':
subnet['subnet']['id']}]}}
port_req = self.new_create_request('ports', data)
res = port_req.get_response(self.api)
self.assertEqual(exc.HTTPBadRequest.code,
res.status_int)
def test_create_port_with_switching_profiles(self):
"""Tests that nsx ports get the configures switching profiles"""
self.plugin = directory.get_plugin()
with self.network() as network:
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'p1',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': 'fake_owner',
'fixed_ips': [],
'mac_address': '00:00:00:00:00:01'}
}
with mock.patch.object(self.plugin.nsxlib.logical_port, 'create',
return_value={'id': 'fake'}) as nsx_create:
self.plugin.create_port(self.ctx, data)
expected_prof = self.plugin.get_default_az().\
switching_profiles_objs[0]
actual_profs = nsx_create.call_args[1]['switch_profile_ids']
# the ports switching profiles should start with the
# configured one
self.assertEqual(expected_prof, actual_profs[0])
def test_create_ens_port_with_no_port_sec(self):
with self.subnet() as subnet,\
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone."
"get_host_switch_mode", return_value="ENS"),\
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get",
return_value={'transport_zone_id': 'xxx'}):
args = {'port': {'network_id': subnet['subnet']['network_id'],
'tenant_id': subnet['subnet']['tenant_id'],
'fixed_ips': [{'subnet_id':
subnet['subnet']['id']}],
psec.PORTSECURITY: False}}
port_req = self.new_create_request('ports', args)
port = self.deserialize(self.fmt, port_req.get_response(self.api))
self.assertFalse(port['port']['port_security_enabled'])
def test_create_ens_port_with_port_sec(self):
with self.subnet() as subnet,\
mock.patch("vmware_nsxlib.v3.NsxLib.get_version",
return_value='2.3.0'),\
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone."
"get_host_switch_mode", return_value="ENS"),\
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch."
"get", return_value={'transport_zone_id': 'xxx'}):
args = {'port': {'network_id': subnet['subnet']['network_id'],
'tenant_id': subnet['subnet']['tenant_id'],
'fixed_ips': [{'subnet_id':
subnet['subnet']['id']}],
psec.PORTSECURITY: True}}
port_req = self.new_create_request('ports', args)
res = self.deserialize('json', port_req.get_response(self.api))
# should fail
self.assertEqual('NsxENSPortSecurity',
res['NeutronError']['type'])
def test_create_ens_port_with_port_sec_supported(self):
with self.subnet() as subnet,\
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone."
"get_host_switch_mode", return_value="ENS"),\
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get",
return_value={'transport_zone_id': 'xxx'}):
args = {'port': {'network_id': subnet['subnet']['network_id'],
'tenant_id': subnet['subnet']['tenant_id'],
'fixed_ips': [{'subnet_id':
subnet['subnet']['id']}],
psec.PORTSECURITY: True}}
port_req = self.new_create_request('ports', args)
res = self.deserialize('json', port_req.get_response(self.api))
# should succeed
self.assertTrue(res['port'][psec.PORTSECURITY])
def test_update_ens_port(self):
with self.subnet() as subnet,\
mock.patch("vmware_nsxlib.v3.NsxLib.get_version",
return_value='2.3.0'),\
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone."
"get_host_switch_mode", return_value="ENS"),\
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch."
"get", return_value={'transport_zone_id': 'xxx'}):
args = {'port': {'network_id': subnet['subnet']['network_id'],
'tenant_id': subnet['subnet']['tenant_id'],
'fixed_ips': [{'subnet_id':
subnet['subnet']['id']}],
psec.PORTSECURITY: False}}
port_req = self.new_create_request('ports', args)
port = self.deserialize(self.fmt, port_req.get_response(self.api))
port_id = port['port']['id']
args = {'port': {psec.PORTSECURITY: True}}
req = self.new_update_request('ports', args, port_id)
res = self.deserialize('json', req.get_response(self.api))
# should fail
self.assertEqual('NsxENSPortSecurity',
res['NeutronError']['type'])
def test_update_ens_port_psec_supported(self):
with self.subnet() as subnet,\
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone."
"get_host_switch_mode", return_value="ENS"),\
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch."
"get", return_value={'transport_zone_id': 'xxx'}):
args = {'port': {'network_id': subnet['subnet']['network_id'],
'tenant_id': subnet['subnet']['tenant_id'],
'fixed_ips': [{'subnet_id':
subnet['subnet']['id']}],
psec.PORTSECURITY: False}}
port_req = self.new_create_request('ports', args)
port = self.deserialize(self.fmt, port_req.get_response(self.api))
port_id = port['port']['id']
args = {'port': {psec.PORTSECURITY: True}}
req = self.new_update_request('ports', args, port_id)
res = self.deserialize('json', req.get_response(self.api))
# should succeed
self.assertTrue(res['port'][psec.PORTSECURITY])
def test_update_dhcp_port_device_owner(self):
self._enable_native_dhcp_md()
with self.subnet():
pl = directory.get_plugin()
ctx = context.Context(user_id=None, tenant_id=self._tenant_id,
is_admin=False)
ports = pl.get_ports(
ctx, filters={'device_owner': [constants.DEVICE_OWNER_DHCP]})
port_id = ports[0]['id']
args = {'port': {'admin_state_up': False,
'fixed_ips': [],
'device_owner': 'abcd'}}
req = self.new_update_request('ports', args, port_id)
res = self.deserialize('json', req.get_response(self.api))
# should fail
self.assertEqual('InvalidInput',
res['NeutronError']['type'])
def test_create_compute_port_with_relay_no_router(self):
"""Compute port creation should fail
if a network with dhcp relay is not connected to a router
"""
self._enable_dhcp_relay()
with self.network() as network, \
self.subnet(network=network, enable_dhcp=True) as s1:
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X'
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': device_owner,
'fixed_ips': [{'subnet_id': s1['subnet']['id']}],
'mac_address': '00:00:00:00:00:01'}
}
self.assertRaises(n_exc.InvalidInput,
self.plugin.create_port,
self.ctx, data)
def test_create_compute_port_with_relay_and_router(self):
self._enable_dhcp_relay()
with self.network() as network, \
self.subnet(network=network, enable_dhcp=True) as s1,\
mock.patch.object(self.plugin, '_get_router',
return_value={'name': 'dummy'}):
# first create a router interface to simulate a router
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'port',
'admin_state_up': True,
'device_id': 'dummy',
'device_owner': l3_db.DEVICE_OWNER_ROUTER_INTF,
'fixed_ips': [{'subnet_id': s1['subnet']['id']}],
'mac_address': '00:00:00:00:00:02'}
}
port1 = self.plugin.create_port(self.ctx, data)
self.assertIn('id', port1)
# Now create a compute port
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X'
data = {'port': {
'network_id': network['network']['id'],
'tenant_id': self._tenant_id,
'name': 'port',
'admin_state_up': True,
'device_id': 'fake_device',
'device_owner': device_owner,
'fixed_ips': [{'subnet_id': s1['subnet']['id']}],
'mac_address': '00:00:00:00:00:01'}
}
port2 = self.plugin.create_port(self.ctx, data)
self.assertIn('id', port2)
def _test_create_direct_network(self, vlan_id=0):
net_type = vlan_id and 'vlan' or 'flat'
name = 'direct_net'
providernet_args = {pnet.NETWORK_TYPE: net_type,
pnet.PHYSICAL_NETWORK: 'tzuuid'}
if vlan_id:
providernet_args[pnet.SEGMENTATION_ID] = vlan_id
mock_tt = mock.patch('vmware_nsxlib.v3'
'.core_resources.NsxLibTransportZone'
'.get_transport_type',
return_value='VLAN')
mock_tt.start()
return self.network(name=name,
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID))
def _test_create_port_vnic_direct(self, vlan_id):
with self._test_create_direct_network(vlan_id=vlan_id) as network:
# Check that port security conflicts
kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT,
psec.PORTSECURITY: True}
net_id = network['network']['id']
res = self._create_port(self.fmt, net_id=net_id,
arg_list=(portbindings.VNIC_TYPE,
psec.PORTSECURITY),
**kwargs)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
# Check that security group conflicts
kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT,
'security_groups': [
'4cd70774-cc67-4a87-9b39-7d1db38eb087'],
psec.PORTSECURITY: False}
net_id = network['network']['id']
res = self._create_port(self.fmt, net_id=net_id,
arg_list=(portbindings.VNIC_TYPE,
psec.PORTSECURITY),
**kwargs)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
# All is kosher so we can create the port
kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT}
net_id = network['network']['id']
res = self._create_port(self.fmt, net_id=net_id,
arg_list=(portbindings.VNIC_TYPE,),
**kwargs)
port = self.deserialize('json', res)
self.assertEqual("direct", port['port'][portbindings.VNIC_TYPE])
self.assertEqual("dvs", port['port'][portbindings.VIF_TYPE])
self.assertEqual(
vlan_id,
port['port'][portbindings.VIF_DETAILS]['segmentation-id'])
# try to get the same port
req = self.new_show_request('ports', port['port']['id'], self.fmt)
sport = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual("dvs", sport['port'][portbindings.VIF_TYPE])
self.assertEqual("direct", sport['port'][portbindings.VNIC_TYPE])
self.assertEqual(
vlan_id,
sport['port'][portbindings.VIF_DETAILS]['segmentation-id'])
self.assertFalse(
sport['port'][portbindings.VIF_DETAILS]['vlan-transparent'])
def test_create_port_vnic_direct_flat(self):
self._test_create_port_vnic_direct(0)
def test_create_port_vnic_direct_vlan(self):
self._test_create_port_vnic_direct(10)
def test_create_port_vnic_direct_invalid_network(self):
with self.network(name='not vlan/flat') as net:
kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT,
psec.PORTSECURITY: False}
net_id = net['network']['id']
res = self._create_port(self.fmt, net_id=net_id,
arg_list=(portbindings.VNIC_TYPE,
psec.PORTSECURITY),
**kwargs)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_update_vnic_direct(self):
with self._test_create_direct_network(vlan_id=7) as network:
with self.subnet(network=network) as subnet:
with self.port(subnet=subnet) as port:
# need to do two updates as the update for port security
# disabled requires that it can only change 2 items
data = {'port': {psec.PORTSECURITY: False,
'security_groups': []}}
req = self.new_update_request('ports',
data, port['port']['id'])
res = self.deserialize('json', req.get_response(self.api))
self.assertEqual(portbindings.VNIC_NORMAL,
res['port'][portbindings.VNIC_TYPE])
data = {'port': {portbindings.VNIC_TYPE:
portbindings.VNIC_DIRECT}}
req = self.new_update_request('ports',
data, port['port']['id'])
res = self.deserialize('json', req.get_response(self.api))
self.assertEqual(portbindings.VNIC_DIRECT,
res['port'][portbindings.VNIC_TYPE])
def test_port_invalid_vnic_type(self):
with self._test_create_direct_network(vlan_id=7) as network:
kwargs = {portbindings.VNIC_TYPE: 'invalid',
psec.PORTSECURITY: False}
net_id = network['network']['id']
res = self._create_port(self.fmt, net_id=net_id,
arg_list=(portbindings.VNIC_TYPE,
psec.PORTSECURITY),
**kwargs)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_transparent_vlan_port(self):
providernet_args = {pnet.NETWORK_TYPE: 'vlan',
vlan_apidef.VLANTRANSPARENT: True}
with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='VLAN'):
result = self._create_network(fmt='json', name='vlan_net',
admin_state_up=True,
providernet_args=providernet_args,
arg_list=(
pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,
vlan_apidef.VLANTRANSPARENT))
network = self.deserialize('json', result)
net_id = network['network']['id']
with self.subnet(network=network):
kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT}
net_id = network['network']['id']
res = self._create_port(self.fmt, net_id=net_id,
arg_list=(portbindings.VNIC_TYPE,),
**kwargs)
port = self.deserialize('json', res)
self.assertTrue(
port['port'][portbindings.VIF_DETAILS]['vlan-transparent'])
@common_v3.with_disable_dhcp
def test_requested_subnet_id_v4_and_v6(self):
return super(TestPortsV2, self).test_requested_subnet_id_v4_and_v6()
def test_port_binding_host(self):
with self.port() as port:
# add host
data = {'port': {portbindings.HOST_ID: 'abc'}}
req = self.new_update_request('ports',
data, port['port']['id'])
res = self.deserialize('json', req.get_response(self.api))
self.assertEqual('abc', res['port'][portbindings.HOST_ID])
# remove host
data = {'port': {portbindings.HOST_ID: None}}
req = self.new_update_request('ports',
data, port['port']['id'])
res = self.deserialize('json', req.get_response(self.api))
self.assertEqual('', res['port'][portbindings.HOST_ID])
class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt,
NsxV3PluginTestCaseMixin):
def setUp(self, plugin=None):
super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp(
plugin=PLUGIN_NAME)
class NSXv3DHCPAgentAZAwareWeightSchedulerTestCase(
test_dhcpagent.DHCPAgentAZAwareWeightSchedulerTestCase,
NsxV3PluginTestCaseMixin):
def setUp(self):
super(NSXv3DHCPAgentAZAwareWeightSchedulerTestCase, self).setUp()
self.plugin = directory.get_plugin()
self.ctx = context.get_admin_context()
def setup_coreplugin(self, core_plugin=None, load_plugins=True):
super(NSXv3DHCPAgentAZAwareWeightSchedulerTestCase,
self).setup_coreplugin(core_plugin=PLUGIN_NAME,
load_plugins=load_plugins)
class TestL3ExtensionManager(object):
def get_resources(self):
# Simulate extension of L3 attribute map
l3.L3().update_attributes_map(
l3_egm_apidef.RESOURCE_ATTRIBUTE_MAP)
l3.L3().update_attributes_map(
xroute_apidef.RESOURCE_ATTRIBUTE_MAP)
return (l3.L3.get_resources() +
address_scope.Address_scope.get_resources())
def get_actions(self):
return []
def get_request_extensions(self):
return []
class L3NatTest(test_l3_plugin.L3BaseForIntTests,
NsxV3PluginTestCaseMixin,
common_v3.FixExternalNetBaseTest,
common_v3.NsxV3SubnetMixin,
test_address_scope.AddressScopeTestCase):
def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None,
service_plugins=None):
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
cfg.CONF.set_default('max_routes', 3)
ext_mgr = ext_mgr or TestL3ExtensionManager()
mock_nsx_version = mock.patch.object(nsx_plugin.utils,
'is_nsx_version_2_0_0',
new=lambda v: True)
mock_nsx_version.start()
# Make sure the LB callback is not called on router deletion
self.lb_mock1 = mock.patch(
"vmware_nsx.services.lbaas.nsx_v3.v2.lb_driver_v2."
"EdgeLoadbalancerDriverV2._check_lb_service_on_router")
self.lb_mock1.start()
self.lb_mock2 = mock.patch(
"vmware_nsx.services.lbaas.nsx_v3.v2.lb_driver_v2."
"EdgeLoadbalancerDriverV2._check_lb_service_on_router_interface")
self.lb_mock2.start()
super(L3NatTest, self).setUp(
plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins)
self.plugin_instance = directory.get_plugin()
self._plugin_name = "%s.%s" % (
self.plugin_instance.__module__,
self.plugin_instance.__class__.__name__)
self._plugin_class = self.plugin_instance.__class__
self.plugin_instance.fwaas_callbacks = None
self.original_subnet = self.subnet
self.original_network = self.network
def _set_net_external(self, net_id):
# This action is not supported by the V3 plugin
pass
def external_network(self, name='net1',
admin_state_up=True,
fmt=None, **kwargs):
if not name:
name = 'l3_ext_net'
physical_network = nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID
net_type = utils.NetworkTypes.L3_EXT
providernet_args = {pnet.NETWORK_TYPE: net_type,
pnet.PHYSICAL_NETWORK: physical_network}
return self.original_network(name=name,
admin_state_up=admin_state_up,
fmt=fmt,
router__external=True,
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK))
def test_floatingip_create_different_fixed_ip_same_port(self):
self.skipTest('Multiple fixed ips on a port are not supported')
def test_router_add_interface_multiple_ipv4_subnet_port_returns_400(self):
self.skipTest('Multiple fixed ips on a port are not supported')
def test_router_add_interface_multiple_ipv6_subnet_port(self):
self.skipTest('Multiple fixed ips on a port are not supported')
def test_floatingip_update_different_fixed_ip_same_port(self):
self.skipTest('Multiple fixed ips on a port are not supported')
def test_create_multiple_floatingips_same_fixed_ip_same_port(self):
self.skipTest('Multiple fixed ips on a port are not supported')
class TestL3NatTestCase(L3NatTest,
test_l3_plugin.L3NatDBIntTestCase,
test_ext_route.ExtraRouteDBTestCaseBase,
test_metadata.MetaDataTestCase):
block_dhcp_notifier = False
def setUp(self, plugin=PLUGIN_NAME,
ext_mgr=None,
service_plugins=None):
super(TestL3NatTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
cfg.CONF.set_override('metadata_mode', None, 'nsx_v3')
cfg.CONF.set_override('metadata_on_demand', False, 'nsx_v3')
self.subnet_calls = []
def _test_create_l3_ext_network(
self, physical_network=nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID):
name = 'l3_ext_net'
net_type = utils.NetworkTypes.L3_EXT
expected = [('subnets', []), ('name', name), ('admin_state_up', True),
('status', 'ACTIVE'), ('shared', False),
(extnet_apidef.EXTERNAL, True),
(pnet.NETWORK_TYPE, net_type),
(pnet.PHYSICAL_NETWORK, physical_network)]
with self._create_l3_ext_network(physical_network) as net:
for k, v in expected:
self.assertEqual(net['network'][k], v)
@common_v3.with_external_subnet
def test_router_update_gateway_with_external_ip_used_by_gw(self):
super(TestL3NatTestCase,
self).test_router_update_gateway_with_external_ip_used_by_gw()
@common_v3.with_external_subnet
def test_router_update_gateway_with_invalid_external_ip(self):
super(TestL3NatTestCase,
self).test_router_update_gateway_with_invalid_external_ip()
@common_v3.with_external_subnet
def test_router_update_gateway_with_invalid_external_subnet(self):
super(TestL3NatTestCase,
self).test_router_update_gateway_with_invalid_external_subnet()
@common_v3.with_external_network
def test_router_update_gateway_with_different_external_subnet(self):
super(TestL3NatTestCase,
self).test_router_update_gateway_with_different_external_subnet()
@common_v3.with_disable_dhcp
def test_create_floatingip_ipv6_only_network_returns_400(self):
super(TestL3NatTestCase,
self).test_create_floatingip_ipv6_only_network_returns_400()
@common_v3.with_disable_dhcp
def test_create_floatingip_with_assoc_to_ipv4_and_ipv6_port(self):
super(L3NatTest,
self).test_create_floatingip_with_assoc_to_ipv4_and_ipv6_port()
@common_v3.with_external_subnet_once
def test_router_update_gateway_with_existed_floatingip(self):
with self.subnet(cidr='20.0.0.0/24') as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.floatingip_with_assoc() as fip:
self._add_external_gateway_to_router(
fip['floatingip']['router_id'],
subnet['subnet']['network_id'],
expected_code=exc.HTTPConflict.code)
@common_v3.with_external_network
def test_router_update_gateway_add_multiple_prefixes_ipv6(self):
super(TestL3NatTestCase,
self).test_router_update_gateway_add_multiple_prefixes_ipv6()
@common_v3.with_external_network
def test_router_concurrent_delete_upon_subnet_create(self):
super(TestL3NatTestCase,
self).test_router_concurrent_delete_upon_subnet_create()
@common_v3.with_external_network
def test_router_update_gateway_upon_subnet_create_ipv6(self):
super(TestL3NatTestCase,
self).test_router_update_gateway_upon_subnet_create_ipv6()
@common_v3.with_external_subnet
def test_router_add_gateway_dup_subnet2_returns_400(self):
super(TestL3NatTestCase,
self).test_router_add_gateway_dup_subnet2_returns_400()
@common_v3.with_external_subnet
def test_router_update_gateway(self):
super(TestL3NatTestCase,
self).test_router_update_gateway()
@common_v3.with_external_subnet
def test_router_create_with_gwinfo(self):
super(TestL3NatTestCase,
self).test_router_create_with_gwinfo()
@common_v3.with_external_subnet
def test_router_clear_gateway_callback_failure_returns_409(self):
super(TestL3NatTestCase,
self).test_router_clear_gateway_callback_failure_returns_409()
@common_v3.with_external_subnet
def test_router_create_with_gwinfo_ext_ip(self):
super(TestL3NatTestCase,
self).test_router_create_with_gwinfo_ext_ip()
@common_v3.with_external_network
def test_router_create_with_gwinfo_ext_ip_subnet(self):
super(TestL3NatTestCase,
self).test_router_create_with_gwinfo_ext_ip_subnet()
@common_v3.with_external_subnet_second_time
def test_router_delete_with_floatingip_existed_returns_409(self):
super(TestL3NatTestCase,
self).test_router_delete_with_floatingip_existed_returns_409()
@common_v3.with_external_subnet
def test_router_add_and_remove_gateway_tenant_ctx(self):
super(TestL3NatTestCase,
self).test_router_add_and_remove_gateway_tenant_ctx()
@common_v3.with_external_subnet
def test_router_add_and_remove_gateway(self):
super(TestL3NatTestCase,
self).test_router_add_and_remove_gateway()
def test_router_update_gateway_upon_subnet_create_max_ips_ipv6(self):
self.skipTest('not supported')
def test_router_add_gateway_multiple_subnets_ipv6(self):
self.skipTest('multiple ipv6 subnets not supported')
def test__notify_gateway_port_ip_changed(self):
self.skipTest('not supported')
def test__notify_gateway_port_ip_not_changed(self):
self.skipTest('not supported')
def test_floatingip_via_router_interface_returns_201(self):
self.skipTest('not supported')
def test_floatingip_via_router_interface_returns_404(self):
self.skipTest('not supported')
def test_router_delete_dhcpv6_stateless_subnet_inuse_returns_409(self):
self.skipTest('DHCPv6 not supported')
def test_router_add_interface_ipv6_subnet(self):
self.skipTest('DHCPv6 not supported')
def test_update_router_interface_port_ipv6_subnet_ext_ra(self):
self.skipTest('DHCPv6 not supported')
@common_v3.with_disable_dhcp
def test_router_add_interface_ipv6_subnet_without_gateway_ip(self):
super(TestL3NatTestCase,
self).test_router_add_interface_ipv6_subnet_without_gateway_ip()
@common_v3.with_disable_dhcp
def test_router_add_interface_multiple_ipv6_subnets_different_net(self):
super(TestL3NatTestCase, self).\
test_router_add_interface_multiple_ipv6_subnets_different_net()
@common_v3.with_disable_dhcp
def test_create_floatingip_with_assoc_to_ipv6_subnet(self):
super(TestL3NatTestCase,
self).test_create_floatingip_with_assoc_to_ipv6_subnet()
def test_router_add_iface_ipv6_ext_ra_subnet_returns_400(self):
self.skipTest('DHCPv6 not supported')
@common_v3.with_external_subnet
def test_floatingip_list_with_sort(self):
super(TestL3NatTestCase,
self).test_floatingip_list_with_sort()
@common_v3.with_external_subnet_once
def test_floatingip_with_assoc_fails(self):
super(TestL3NatTestCase,
self).test_floatingip_with_assoc_fails()
@common_v3.with_external_subnet_second_time
def test_floatingip_update_same_fixed_ip_same_port(self):
super(TestL3NatTestCase,
self).test_floatingip_update_same_fixed_ip_same_port()
@common_v3.with_external_subnet
def test_floatingip_list_with_pagination_reverse(self):
super(TestL3NatTestCase,
self).test_floatingip_list_with_pagination_reverse()
@common_v3.with_external_subnet_once
def test_floatingip_association_on_unowned_router(self):
super(TestL3NatTestCase,
self).test_floatingip_association_on_unowned_router()
@common_v3.with_external_network
def test_delete_ext_net_with_disassociated_floating_ips(self):
super(TestL3NatTestCase,
self).test_delete_ext_net_with_disassociated_floating_ips()
@common_v3.with_external_network
def test_create_floatingip_with_subnet_and_invalid_fip_address(self):
super(
TestL3NatTestCase,
self).test_create_floatingip_with_subnet_and_invalid_fip_address()
@common_v3.with_external_subnet
def test_create_floatingip_with_duplicated_specific_ip(self):
super(TestL3NatTestCase,
self).test_create_floatingip_with_duplicated_specific_ip()
@common_v3.with_external_subnet
def test_create_floatingip_with_subnet_id_non_admin(self):
super(TestL3NatTestCase,
self).test_create_floatingip_with_subnet_id_non_admin()
@common_v3.with_external_subnet
def test_floatingip_list_with_pagination(self):
super(TestL3NatTestCase,
self).test_floatingip_list_with_pagination()
@common_v3.with_external_subnet
def test_create_floatingips_native_quotas(self):
super(TestL3NatTestCase,
self).test_create_floatingips_native_quotas()
@common_v3.with_external_network
def test_create_floatingip_with_multisubnet_id(self):
super(TestL3NatTestCase,
self).test_create_floatingip_with_multisubnet_id()
@common_v3.with_external_network
def test_create_floatingip_with_subnet_id_and_fip_address(self):
super(TestL3NatTestCase,
self).test_create_floatingip_with_subnet_id_and_fip_address()
@common_v3.with_external_subnet
def test_create_floatingip_with_specific_ip(self):
super(TestL3NatTestCase,
self).test_create_floatingip_with_specific_ip()
@common_v3.with_external_network
def test_create_floatingip_ipv6_and_ipv4_network_creates_ipv4(self):
super(TestL3NatTestCase,
self).test_create_floatingip_ipv6_and_ipv4_network_creates_ipv4()
@common_v3.with_external_subnet_once
def test_create_floatingip_non_admin_context_agent_notification(self):
super(
TestL3NatTestCase,
self).test_create_floatingip_non_admin_context_agent_notification()
@common_v3.with_external_subnet
def test_create_floatingip_no_ext_gateway_return_404(self):
super(TestL3NatTestCase,
self).test_create_floatingip_no_ext_gateway_return_404()
@common_v3.with_external_subnet
def test_create_floatingip_with_specific_ip_out_of_allocation(self):
super(TestL3NatTestCase,
self).test_create_floatingip_with_specific_ip_out_of_allocation()
@common_v3.with_external_subnet_third_time
def test_floatingip_update_different_router(self):
super(TestL3NatTestCase,
self).test_floatingip_update_different_router()
def test_router_add_gateway_notifications(self):
with self.router() as r,\
self._create_l3_ext_network() as ext_net,\
self.subnet(network=ext_net):
with mock.patch.object(registry, 'notify') as notify:
self._add_external_gateway_to_router(
r['router']['id'], ext_net['network']['id'])
expected = [mock.call(
resources.ROUTER_GATEWAY,
events.AFTER_CREATE, mock.ANY,
context=mock.ANY, gw_ips=mock.ANY,
network_id=mock.ANY, router_id=mock.ANY)]
notify.assert_has_calls(expected)
def test_create_l3_ext_network_with_default_tier0(self):
self._test_create_l3_ext_network()
def test_floatingip_update(self):
super(TestL3NatTestCase, self).test_floatingip_update(
expected_status=constants.FLOATINGIP_STATUS_DOWN)
@common_v3.with_external_subnet_second_time
def test_floatingip_with_invalid_create_port(self):
self._test_floatingip_with_invalid_create_port(self._plugin_name)
def test_network_update_external(self):
# This plugin does not support updating the external flag of a network
self.skipTest('not supported')
def test_network_update_external_failure(self):
# This plugin does not support updating the external flag of a network
# This is tested with a different test
self.skipTest('not supported')
def test_router_add_gateway_dup_subnet1_returns_400(self):
self.skipTest('not supported')
def test_router_add_interface_dup_subnet2_returns_400(self):
self.skipTest('not supported')
def test_router_add_interface_ipv6_port_existing_network_returns_400(self):
self.skipTest('multiple ipv6 subnets not supported')
def test_routes_update_for_multiple_routers(self):
self.skipTest('not supported')
def test_floatingip_multi_external_one_internal(self):
self.skipTest('not supported')
def test_floatingip_same_external_and_internal(self):
self.skipTest('not supported')
def test_route_update_with_external_route(self):
self.skipTest('not supported')
def test_floatingip_update_subnet_gateway_disabled(self):
self.skipTest('not supported')
def test_router_add_interface_by_port_other_tenant_address_out_of_pool(
self):
# multiple fixed ips per port are not supported
self.skipTest('not supported')
def test_router_add_interface_by_port_other_tenant_address_in_pool(self):
# multiple fixed ips per port are not supported
self.skipTest('not supported')
def test_router_add_interface_by_port_admin_address_out_of_pool(self):
# multiple fixed ips per port are not supported
self.skipTest('not supported')
def test_router_delete_with_lb_service(self):
self.lb_mock1.stop()
self.lb_mock2.stop()
# Create the LB object - here the delete callback is registered
lb_driver = lb_driver_v2.EdgeLoadbalancerDriverV2()
with self.router() as router:
with mock.patch('vmware_nsxlib.v3.load_balancer.Service.'
'get_router_lb_service'),\
mock.patch('vmware_nsx.db.db.get_nsx_router_id',
return_value=1):
self.assertRaises(nc_exc.CallbackFailure,
self.plugin_instance.delete_router,
context.get_admin_context(),
router['router']['id'])
# Unregister callback
lb_driver._unsubscribe_router_delete_callback()
self.lb_mock1.start()
self.lb_mock2.start()
def test_multiple_subnets_on_different_routers(self):
with self.network() as network:
with self.subnet(network=network) as s1,\
self.subnet(network=network,
cidr='11.0.0.0/24') as s2,\
self.router() as r1,\
self.router() as r2:
self._router_interface_action('add', r1['router']['id'],
s1['subnet']['id'], None)
self.assertRaises(n_exc.Conflict,
self.plugin_instance.add_router_interface,
context.get_admin_context(),
r2['router']['id'],
{'subnet_id': s2['subnet']['id']})
self._router_interface_action('remove', r1['router']['id'],
s1['subnet']['id'], None)
self._router_interface_action('add', r2['router']['id'],
s2['subnet']['id'], None)
self._router_interface_action('remove', r2['router']['id'],
s2['subnet']['id'], None)
def test_multiple_subnets_on_same_router(self):
with self.network() as network:
with self.subnet(network=network) as s1,\
self.subnet(network=network,
cidr='11.0.0.0/24') as s2,\
self.router() as r1:
self._router_interface_action('add', r1['router']['id'],
s1['subnet']['id'], None)
self.assertRaises(n_exc.InvalidInput,
self.plugin_instance.add_router_interface,
context.get_admin_context(),
r1['router']['id'],
{'subnet_id': s2['subnet']['id']})
self._router_interface_action('remove', r1['router']['id'],
s1['subnet']['id'], None)
def test_router_remove_interface_inuse_return_409(self):
with self.router() as r1,\
self._create_l3_ext_network() as ext_net,\
self.subnet(network=ext_net) as ext_subnet,\
self.subnet(cidr='11.0.0.0/24') as s1:
self._router_interface_action(
'add', r1['router']['id'],
s1['subnet']['id'], None)
self._add_external_gateway_to_router(
r1['router']['id'],
ext_subnet['subnet']['network_id'])
with self.port(subnet=s1,) as p:
fip_res = self._create_floatingip(
self.fmt,
ext_subnet['subnet']['network_id'],
subnet_id=ext_subnet['subnet']['id'],
port_id=p['port']['id'])
fip = self.deserialize(self.fmt, fip_res)
self._router_interface_action(
'remove',
r1['router']['id'],
s1['subnet']['id'],
None,
expected_code=exc.HTTPConflict.code)
self._delete('floatingips', fip['floatingip']['id'])
self._remove_external_gateway_from_router(
r1['router']['id'],
ext_subnet['subnet']['network_id'])
self._router_interface_action('remove',
r1['router']['id'],
s1['subnet']['id'],
None)
def test_router_update_on_external_port(self):
with self.router() as r:
with self._create_l3_ext_network() as ext_net,\
self.subnet(network=ext_net, cidr='10.0.1.0/24') as s:
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = body['router']['external_gateway_info']['network_id']
self.assertEqual(net_id, s['subnet']['network_id'])
port_res = self._list_ports(
'json',
200,
s['subnet']['network_id'],
tenant_id=r['router']['tenant_id'],
device_owner=constants.DEVICE_OWNER_ROUTER_GW)
port_list = self.deserialize('json', port_res)
self.assertEqual(len(port_list['ports']), 1)
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'}]
self.assertRaises(n_exc.InvalidInput,
self.plugin_instance.update_router,
context.get_admin_context(),
r['router']['id'],
{'router': {'routes':
routes}})
updates = {'admin_state_up': False}
self.assertRaises(n_exc.InvalidInput,
self.plugin_instance.update_router,
context.get_admin_context(),
r['router']['id'],
{'router': updates})
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
gw_info = body['router']['external_gateway_info']
self.assertIsNone(gw_info)
def test_router_on_vlan_net(self):
providernet_args = {pnet.NETWORK_TYPE: 'vlan',
pnet.SEGMENTATION_ID: 10}
with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
'get_transport_type', return_value='VLAN'):
result = self._create_network(fmt='json', name='badvlan_net',
admin_state_up=True,
providernet_args=providernet_args,
arg_list=(
pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID))
vlan_network = self.deserialize('json', result)
with self.router() as r1,\
self._create_l3_ext_network() as ext_net,\
self.subnet(network=ext_net) as ext_subnet,\
self.subnet(cidr='11.0.0.0/24', network=vlan_network) as s1:
# adding a vlan interface with no GW should fail
self._router_interface_action(
'add', r1['router']['id'],
s1['subnet']['id'], None,
expected_code=400)
# adding GW
self._add_external_gateway_to_router(
r1['router']['id'],
ext_subnet['subnet']['network_id'])
# adding the vlan interface
self._router_interface_action(
'add', r1['router']['id'],
s1['subnet']['id'], None)
# adding a floating ip
with self.port(subnet=s1) as p:
fip_res = self._create_floatingip(
self.fmt,
ext_subnet['subnet']['network_id'],
subnet_id=ext_subnet['subnet']['id'],
port_id=p['port']['id'])
fip = self.deserialize(self.fmt, fip_res)
self.assertEqual(p['port']['id'],
fip['floatingip']['port_id'])
def test_create_router_gateway_fails(self):
self.skipTest('not supported')
def test_router_remove_ipv6_subnet_from_interface(self):
self.skipTest('not supported')
def test_router_add_interface_multiple_ipv6_subnets_same_net(self):
self.skipTest('not supported')
def test_router_add_interface_multiple_ipv4_subnets(self):
self.skipTest('not supported')
def test_floatingip_update_to_same_port_id_twice(self):
self.skipTest('Plugin changes floating port status')
def _test_create_subnetpool(self, prefixes, expected=None,
admin=False, **kwargs):
keys = kwargs.copy()
keys.setdefault('tenant_id', self._tenant_id)
with self.subnetpool(prefixes, admin, **keys) as subnetpool:
self._validate_resource(subnetpool, keys, 'subnetpool')
if expected:
self._compare_resource(subnetpool, expected, 'subnetpool')
return subnetpool
def _update_router_enable_snat(self, router_id, network_id, enable_snat):
return self._update('routers', router_id,
{'router': {'external_gateway_info':
{'network_id': network_id,
'enable_snat': enable_snat}}})
def test_router_no_snat_with_different_address_scope(self):
"""Test that if the router has no snat, you cannot add an interface
from a different address scope than the gateway.
"""
# create an external network on one address scope
with self.address_scope(name='as1') as addr_scope, \
self._create_l3_ext_network() as ext_net:
as_id = addr_scope['address_scope']['id']
subnet = netaddr.IPNetwork('10.10.10.0/24')
subnetpool = self._test_create_subnetpool(
[subnet.cidr], name='sp1',
min_prefixlen='24', address_scope_id=as_id)
subnetpool_id = subnetpool['subnetpool']['id']
data = {'subnet': {
'network_id': ext_net['network']['id'],
'subnetpool_id': subnetpool_id,
'ip_version': 4,
'enable_dhcp': False,
'tenant_id': ext_net['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
ext_subnet = self.deserialize(self.fmt, req.get_response(self.api))
# create a regular network on another address scope
with self.address_scope(name='as2') as addr_scope2, \
self.network() as net:
as_id2 = addr_scope2['address_scope']['id']
subnet2 = netaddr.IPNetwork('20.10.10.0/24')
subnetpool2 = self._test_create_subnetpool(
[subnet2.cidr], name='sp2',
min_prefixlen='24', address_scope_id=as_id2)
subnetpool_id2 = subnetpool2['subnetpool']['id']
data = {'subnet': {
'network_id': net['network']['id'],
'subnetpool_id': subnetpool_id2,
'ip_version': 4,
'tenant_id': net['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
int_subnet = self.deserialize(
self.fmt, req.get_response(self.api))
# create a no snat router with this gateway
with self.router() as r:
self._add_external_gateway_to_router(
r['router']['id'],
ext_subnet['subnet']['network_id'])
self._update_router_enable_snat(
r['router']['id'],
ext_subnet['subnet']['network_id'],
False)
# should fail adding the interface to the router
err_code = exc.HTTPBadRequest.code
self._router_interface_action('add',
r['router']['id'],
int_subnet['subnet']['id'],
None,
err_code)
def test_router_no_snat_with_same_address_scope(self):
"""Test that if the router has no snat, you can add an interface
from the same address scope as the gateway.
"""
# create an external network on one address scope
with self.address_scope(name='as1') as addr_scope, \
self._create_l3_ext_network() as ext_net:
as_id = addr_scope['address_scope']['id']
subnet = netaddr.IPNetwork('10.10.10.0/21')
subnetpool = self._test_create_subnetpool(
[subnet.cidr], name='sp1',
min_prefixlen='24', address_scope_id=as_id)
subnetpool_id = subnetpool['subnetpool']['id']
data = {'subnet': {
'network_id': ext_net['network']['id'],
'subnetpool_id': subnetpool_id,
'ip_version': 4,
'enable_dhcp': False,
'tenant_id': ext_net['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
ext_subnet = self.deserialize(self.fmt, req.get_response(self.api))
# create a regular network on the same address scope
with self.network() as net:
data = {'subnet': {
'network_id': net['network']['id'],
'subnetpool_id': subnetpool_id,
'ip_version': 4,
'tenant_id': net['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
int_subnet = self.deserialize(
self.fmt, req.get_response(self.api))
# create a no snat router with this gateway
with self.router() as r:
self._add_external_gateway_to_router(
r['router']['id'],
ext_subnet['subnet']['network_id'])
self._update_router_enable_snat(
r['router']['id'],
ext_subnet['subnet']['network_id'],
False)
# should succeed adding the interface to the router
self._router_interface_action('add',
r['router']['id'],
int_subnet['subnet']['id'],
None)
def _mock_add_snat_rule(self):
return mock.patch("vmware_nsxlib.v3.router.RouterLib."
"add_gw_snat_rule")
def _mock_add_remove_service_router(self):
return mock.patch("vmware_nsxlib.v3.core_resources."
"NsxLibLogicalRouter.update")
def _mock_del_snat_rule(self):
return mock.patch("vmware_nsxlib.v3.router.RouterLib."
"delete_gw_snat_rule_by_source")
def _prepare_external_subnet_on_address_scope(self,
ext_net,
address_scope):
as_id = address_scope['address_scope']['id']
subnet = netaddr.IPNetwork('10.10.10.0/21')
subnetpool = self._test_create_subnetpool(
[subnet.cidr], name='sp1',
min_prefixlen='24', address_scope_id=as_id)
subnetpool_id = subnetpool['subnetpool']['id']
data = {'subnet': {
'network_id': ext_net['network']['id'],
'subnetpool_id': subnetpool_id,
'ip_version': 4,
'enable_dhcp': False,
'tenant_id': ext_net['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
ext_subnet = self.deserialize(self.fmt, req.get_response(self.api))
return ext_subnet['subnet']
def _create_subnet_and_assert_snat_rules(self, subnetpool_id,
router_id,
assert_snat_deleted=False,
assert_snat_added=False):
# create a regular network on the given subnet pool
with self.network() as net:
data = {'subnet': {
'network_id': net['network']['id'],
'subnetpool_id': subnetpool_id,
'ip_version': 4,
'tenant_id': net['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
int_subnet = self.deserialize(
self.fmt, req.get_response(self.api))
with self._mock_add_snat_rule() as add_nat,\
self._mock_del_snat_rule() as delete_nat:
# Add the interface
self._router_interface_action(
'add',
router_id,
int_subnet['subnet']['id'],
None)
if assert_snat_deleted:
delete_nat.assert_called()
else:
delete_nat.assert_not_called()
if assert_snat_added:
add_nat.assert_called()
else:
add_nat.assert_not_called()
def test_add_service_router_enable_snat(self):
with self.address_scope(name='as1') as addr_scope, \
self._create_l3_ext_network() as ext_net:
ext_subnet = self._prepare_external_subnet_on_address_scope(
ext_net, addr_scope)
# create a router with this gateway
with self.router() as r, \
mock.patch("vmware_nsxlib.v3.router.RouterLib."
"has_service_router", return_value=False),\
self._mock_add_remove_service_router() as change_sr:
router_id = r['router']['id']
self._add_external_gateway_to_router(
router_id, ext_subnet['network_id'])
# Checking that router update is being called with
# edge_cluster_uuid, for creating a service router
change_sr.assert_any_call(
mock.ANY, edge_cluster_id=NSX_EDGE_CLUSTER_UUID,
enable_standby_relocation=True)
def test_remove_service_router_disable_snat(self):
with self.address_scope(name='as1') as addr_scope, \
self._create_l3_ext_network() as ext_net:
ext_subnet = self._prepare_external_subnet_on_address_scope(
ext_net, addr_scope)
# create a router with this gateway, disable snat
with self.router() as r:
self._add_external_gateway_to_router(
r['router']['id'],
ext_subnet['network_id'])
with mock.patch("vmware_nsxlib.v3.router.RouterLib."
"has_service_router", return_value=True),\
self._mock_add_remove_service_router() as change_sr:
self._update_router_enable_snat(
r['router']['id'],
ext_subnet['network_id'],
False)
# Checking that router update is being called
# and setting edge_cluster_uuid to None, for service
# router removal.
change_sr.assert_called_once_with(
mock.ANY, edge_cluster_id=None,
enable_standby_relocation=False)
def test_router_address_scope_snat_rules(self):
"""Test that if the router interface had the same address scope
as the gateway - snat rule is not added.
"""
# create an external network on one address scope
with self.address_scope(name='as1') as addr_scope, \
self._create_l3_ext_network() as ext_net:
ext_subnet = self._prepare_external_subnet_on_address_scope(
ext_net, addr_scope)
# create a router with this gateway
with self.router() as r:
self._add_external_gateway_to_router(
r['router']['id'],
ext_subnet['network_id'])
# create a regular network on same address scope
# and verify no snat change
as_id = addr_scope['address_scope']['id']
subnet = netaddr.IPNetwork('30.10.10.0/24')
subnetpool = self._test_create_subnetpool(
[subnet.cidr], name='sp2',
min_prefixlen='24', address_scope_id=as_id)
as_id = addr_scope['address_scope']['id']
subnetpool_id = subnetpool['subnetpool']['id']
self._create_subnet_and_assert_snat_rules(
subnetpool_id, r['router']['id'])
# create a regular network on a different address scope
# and verify snat rules are added
with self.address_scope(name='as2') as addr_scope2:
as2_id = addr_scope2['address_scope']['id']
subnet2 = netaddr.IPNetwork('20.10.10.0/24')
subnetpool2 = self._test_create_subnetpool(
[subnet2.cidr], name='sp2',
min_prefixlen='24', address_scope_id=as2_id)
subnetpool2_id = subnetpool2['subnetpool']['id']
self._create_subnet_and_assert_snat_rules(
subnetpool2_id, r['router']['id'],
assert_snat_added=True)
def _test_router_address_scope_change(self, change_gw=False):
"""When subnetpool address scope changes, and router that was
originally under same address scope, results having different
address scopes, relevant snat rules are added.
"""
# create an external network on one address scope
with self.address_scope(name='as1') as addr_scope, \
self._create_l3_ext_network() as ext_net:
ext_subnet = self._prepare_external_subnet_on_address_scope(
ext_net, addr_scope)
# create a router with this gateway
with self.router() as r:
self._add_external_gateway_to_router(
r['router']['id'],
ext_subnet['network_id'])
# create a regular network on same address scope
# and verify no snat change
as_id = addr_scope['address_scope']['id']
subnet2 = netaddr.IPNetwork('40.10.10.0/24')
subnetpool2 = self._test_create_subnetpool(
[subnet2.cidr], name='sp2',
min_prefixlen='24', address_scope_id=as_id)
subnetpool2_id = subnetpool2['subnetpool']['id']
self._create_subnet_and_assert_snat_rules(
subnetpool2_id, r['router']['id'])
# change address scope of the first subnetpool
with self.address_scope(name='as2') as addr_scope2,\
self._mock_add_snat_rule() as add_nat:
as2_id = addr_scope2['address_scope']['id']
data = {'subnetpool': {
'address_scope_id': as2_id}}
if change_gw:
subnetpool_to_update = ext_subnet['subnetpool_id']
else:
subnetpool_to_update = subnetpool2_id
req = self.new_update_request('subnetpools', data,
subnetpool_to_update)
req.get_response(self.api)
add_nat.assert_called_once()
def test_router_address_scope_change(self):
self._test_router_address_scope_change()
def test_router_address_scope_gw_change(self):
self._test_router_address_scope_change(change_gw=True)
def _test_3leg_router_address_scope_change(self, change_gw=False,
change_2gw=False):
"""Test address scope change scenarios with router that covers
3 address scopes
"""
# create an external network on one address scope
with self.address_scope(name='as1') as as1, \
self.address_scope(name='as2') as as2, \
self.address_scope(name='as3') as as3, \
self._create_l3_ext_network() as ext_net:
ext_subnet = self._prepare_external_subnet_on_address_scope(
ext_net, as1)
as1_id = as1['address_scope']['id']
# create a router with this gateway
with self.router() as r:
self._add_external_gateway_to_router(
r['router']['id'],
ext_subnet['network_id'])
# create a regular network on address scope 2
# and verify snat change
as2_id = as2['address_scope']['id']
subnet2 = netaddr.IPNetwork('20.10.10.0/24')
subnetpool2 = self._test_create_subnetpool(
[subnet2.cidr], name='sp2',
min_prefixlen='24', address_scope_id=as2_id)
subnetpool2_id = subnetpool2['subnetpool']['id']
self._create_subnet_and_assert_snat_rules(
subnetpool2_id, r['router']['id'], assert_snat_added=True)
# create a regular network on address scope 3
# verify no snat change
as3_id = as3['address_scope']['id']
subnet3 = netaddr.IPNetwork('30.10.10.0/24')
subnetpool3 = self._test_create_subnetpool(
[subnet3.cidr], name='sp2',
min_prefixlen='24', address_scope_id=as3_id)
subnetpool3_id = subnetpool3['subnetpool']['id']
self._create_subnet_and_assert_snat_rules(
subnetpool3_id, r['router']['id'], assert_snat_added=True)
with self._mock_add_snat_rule() as add_nat, \
self._mock_del_snat_rule() as del_nat:
if change_gw:
# change address scope of GW subnet
subnetpool_to_update = ext_subnet['subnetpool_id']
else:
subnetpool_to_update = subnetpool2_id
if change_2gw:
# change subnet2 to be in GW address scope
target_as = as1_id
else:
target_as = as3_id
data = {'subnetpool': {
'address_scope_id': target_as}}
req = self.new_update_request('subnetpools', data,
subnetpool_to_update)
req.get_response(self.api)
if change_gw:
# The test changed address scope of gw subnet.
# Both previous rules should be deleted,
# and one new rule for subnet2 should be added
del_nat.assert_called()
self.assertEqual(2, del_nat.call_count)
add_nat.assert_called_once()
else:
if change_2gw:
# The test changed address scope of subnet2 to be
# same as GW address scope.
# Snat rule for as2 will be deleted. No effect on as3
# rule.
del_nat.assert_called_once()
else:
# The test changed address scope of subnet2 to
# as3. Affected snat rule should be re-created.
del_nat.assert_called_once()
add_nat.assert_called_once()
def test_3leg_router_address_scope_change(self):
self._test_3leg_router_address_scope_change()
def test_3leg_router_address_scope_change_to_gw(self):
self._test_3leg_router_address_scope_change(change_2gw=True)
def test_3leg_router_gw_address_scope_change(self):
self._test_3leg_router_address_scope_change(change_gw=True)
def test_subnetpool_router_address_scope_change_no_effect(self):
"""When all router interfaces are allocated from same subnetpool,
changing address scope on this subnetpool should not affect snat rules.
"""
# create an external network on one address scope
with self.address_scope(name='as1') as addr_scope, \
self._create_l3_ext_network() as ext_net:
ext_subnet = self._prepare_external_subnet_on_address_scope(
ext_net, addr_scope)
# create a router with this gateway
with self.router() as r:
self._add_external_gateway_to_router(
r['router']['id'],
ext_subnet['network_id'])
# create a regular network on same address scope
# and verify no snat change
self._create_subnet_and_assert_snat_rules(
ext_subnet['subnetpool_id'], r['router']['id'])
with self.address_scope(name='as2') as addr_scope2,\
self._mock_add_snat_rule() as add_nat,\
self._mock_del_snat_rule() as delete_nat:
as2_id = addr_scope2['address_scope']['id']
# change address scope of the subnetpool
data = {'subnetpool': {
'address_scope_id': as2_id}}
req = self.new_update_request('subnetpools', data,
ext_subnet['subnetpool_id'])
req.get_response(self.api)
add_nat.assert_not_called()
delete_nat.assert_not_called()
def test_router_admin_state(self):
"""It is not allowed to set the router admin-state to down"""
with self.router() as r:
self._update('routers', r['router']['id'],
{'router': {'admin_state_up': False}},
expected_code=exc.HTTPBadRequest.code)
def test_router_dhcp_relay_dhcp_enabled(self):
"""Verify that the relay service is added to the router interface"""
self._enable_dhcp_relay()
with self.network() as network:
with mock.patch.object(self.plugin,
'validate_router_dhcp_relay'),\
self.subnet(network=network, enable_dhcp=True) as s1,\
self.router() as r1,\
mock.patch.object(self.plugin.nsxlib.logical_router_port,
'update') as mock_update_port:
self._router_interface_action('add', r1['router']['id'],
s1['subnet']['id'], None)
mock_update_port.assert_called_once_with(
mock.ANY,
relay_service_uuid=NSX_DHCP_RELAY_SRV,
subnets=mock.ANY)
def test_router_dhcp_relay_dhcp_disabled(self):
"""Verify that the relay service is not added to the router interface
If the subnet do not have enabled dhcp
"""
self._enable_dhcp_relay()
with self.network() as network:
with mock.patch.object(self.plugin,
'validate_router_dhcp_relay'),\
self.subnet(network=network, enable_dhcp=False) as s1,\
self.router() as r1,\
mock.patch.object(self.plugin.nsxlib.logical_router_port,
'update') as mock_update_port:
self._router_interface_action('add', r1['router']['id'],
s1['subnet']['id'], None)
mock_update_port.assert_called_once_with(
mock.ANY,
relay_service_uuid=None,
subnets=mock.ANY)
def test_router_dhcp_relay_no_ipam(self):
"""Verify that a router cannot be created with relay and no ipam"""
# Add the relay service to the config and availability zones
self._enable_dhcp_relay()
self.assertRaises(n_exc.InvalidInput,
self.plugin_instance.create_router,
context.get_admin_context(),
{'router': {'name': 'rtr'}})
def test_router_add_gateway_no_subnet_forbidden(self):
with self.router() as r:
with self._create_l3_ext_network() as n:
self._add_external_gateway_to_router(
r['router']['id'], n['network']['id'],
expected_code=exc.HTTPBadRequest.code)
def test_router_add_gateway_no_subnet(self):
self.skipTest('No support for no subnet gateway set')
@mock.patch.object(nsx_plugin.NsxV3Plugin, 'validate_availability_zones')
def test_create_router_with_availability_zone(self, mock_validate_az):
name = 'rtr-with-zone'
zone = ['zone1']
mock_validate_az.return_value = None
with self.router(name=name, availability_zone_hints=zone) as rtr:
az_hints = rtr['router']['availability_zone_hints']
self.assertListEqual(zone, az_hints)
def _test_route_update_illegal(self, destination):
routes = [{'destination': destination, 'nexthop': '10.0.1.3'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
fixed_ip_data = [{'ip_address': '10.0.1.2'}]
with self.port(subnet=s, fixed_ips=fixed_ip_data) as p:
self._router_interface_action(
'add', r['router']['id'], None, p['port']['id'])
self._update('routers', r['router']['id'],
{'router': {'routes': routes}},
expected_code=400)
def test_route_update_illegal(self):
self._test_route_update_illegal('0.0.0.0/0')
self._test_route_update_illegal('0.0.0.0/16')
def test_update_router_distinct_edge_cluster(self):
self.mock_get_edge_cluster.stop()
edge_cluster = uuidutils.generate_uuid()
mock.patch(
"vmware_nsxlib.v3.core_resources.NsxLibEdgeCluster."
"get_id_by_name_or_id",
return_value=edge_cluster).start()
cfg.CONF.set_override('edge_cluster', edge_cluster, 'nsx_v3')
self._initialize_azs()
with self.address_scope(name='as1') as addr_scope, \
self._create_l3_ext_network() as ext_net:
ext_subnet = self._prepare_external_subnet_on_address_scope(
ext_net, addr_scope)
# create a router with this gateway
with self.router() as r, \
mock.patch("vmware_nsxlib.v3.router.RouterLib."
"has_service_router", return_value=False),\
self._mock_add_remove_service_router() as change_sr:
router_id = r['router']['id']
self._add_external_gateway_to_router(
router_id, ext_subnet['network_id'])
change_sr.assert_any_call(
mock.ANY, edge_cluster_id=edge_cluster,
enable_standby_relocation=True)
self.mock_get_edge_cluster.start()
def test_router_add_interface_cidr_overlapped_with_gateway(self):
with self.router() as r,\
self._create_l3_ext_network() as ext_net,\
self.subnet(cidr='10.0.1.0/24') as s1,\
self.subnet(network=ext_net, cidr='10.0.0.0/16',
enable_dhcp=False) as s2:
self._add_external_gateway_to_router(
r['router']['id'],
s2['subnet']['network_id'])
res = self._router_interface_action(
'add', r['router']['id'],
s1['subnet']['id'], None,
expected_code=exc.HTTPBadRequest.code)
self.assertIn('NeutronError', res)
def test_router_add_gateway_overlapped_with_interface_cidr(self):
with self.router() as r,\
self._create_l3_ext_network() as ext_net,\
self.subnet(cidr='10.0.1.0/24') as s1,\
self.subnet(network=ext_net, cidr='10.0.0.0/16',
enable_dhcp=False) as s2:
self._router_interface_action(
'add', r['router']['id'],
s1['subnet']['id'], None)
res = self._add_external_gateway_to_router(
r['router']['id'],
s2['subnet']['network_id'],
expected_code=exc.HTTPBadRequest.code)
self.assertIn('NeutronError', res)
def test_router_add_interface_by_port_cidr_overlapped_with_gateway(self):
with self.router() as r,\
self._create_l3_ext_network() as ext_net,\
self.subnet(cidr='10.0.1.0/24') as s1,\
self.subnet(network=ext_net, cidr='10.0.0.0/16',
enable_dhcp=False) as s2,\
self.port(subnet=s1) as p:
self._add_external_gateway_to_router(
r['router']['id'],
s2['subnet']['network_id'])
res = self._router_interface_action(
'add', r['router']['id'],
None,
p['port']['id'],
expected_code=exc.HTTPBadRequest.code)
self.assertIn('NeutronError', res)
def test_create_floatingip_invalid_fixed_ipv6_address_returns_400(self):
self.skipTest('Failed because of illegal port id')
def test_create_floatingip_with_router_interface_device_owner_fail(self):
# This tests that an error is raised when trying to assign a router
# interface port with floatingip.
with self.subnet(cidr='30.0.0.0/24', gateway_ip=None) as private_sub:
with self.port(
subnet=private_sub,
device_owner=constants.DEVICE_OWNER_ROUTER_INTF) as p:
port_id = p['port']['id']
with self.router() as r:
self._router_interface_action('add', r['router']['id'],
None, port_id)
with self.external_network() as public_net,\
self.subnet(
network=public_net, cidr='12.0.0.0/24') as public_sub:
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
self._make_floatingip(
self.fmt, public_sub['subnet']['network_id'],
port_id=port_id,
http_status=exc.HTTPBadRequest.code)
def test_assign_floatingip_to_router_interface_device_owner_fail(self):
# This tests that an error is raised when trying to assign a router
# interface port with floatingip.
with self.subnet(cidr='30.0.0.0/24', gateway_ip=None) as private_sub:
with self.port(
subnet=private_sub,
device_owner=constants.DEVICE_OWNER_ROUTER_INTF) as p:
port_id = p['port']['id']
with self.router() as r:
self._router_interface_action('add', r['router']['id'],
None, port_id)
with self.external_network() as public_net,\
self.subnet(
network=public_net, cidr='12.0.0.0/24') as public_sub:
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
fip = self._make_floatingip(self.fmt, public_sub[
'subnet']['network_id'])
self._update('floatingips', fip['floatingip'][
'id'], {'floatingip': {'port_id': port_id}},
expected_code=exc.HTTPBadRequest.code)
class ExtGwModeTestCase(test_ext_gw_mode.ExtGwModeIntTestCase,
L3NatTest):
def test_router_gateway_set_fail_after_port_create(self):
self.skipTest("TBD")
@common_v3.with_external_subnet
def _test_router_update_ext_gwinfo(self, snat_input_value,
snat_expected_value=False,
expected_http_code=exc.HTTPOk.code):
return super(ExtGwModeTestCase, self)._test_router_update_ext_gwinfo(
snat_input_value,
snat_expected_value=snat_expected_value,
expected_http_code=expected_http_code)
@common_v3.with_external_subnet
def test_router_gateway_set_retry(self):
super(ExtGwModeTestCase, self).test_router_gateway_set_retry()
@common_v3.with_external_subnet
def _test_router_create_show_ext_gwinfo(self, *args, **kwargs):
return super(ExtGwModeTestCase,
self)._test_router_create_show_ext_gwinfo(*args, **kwargs)
|
from eth_tester.exceptions import TransactionFailed
from pytest import fixture, raises
from utils import longTo32Bytes, PrintGasUsed, fix
from constants import BID, ASK, YES, NO
from datetime import timedelta
from old_eth_utils import ecsign, sha3, normalize_key, int_to_32bytearray, bytearray_to_bytestr, zpad
def test_fill_order_with_tokens(localFixture, zeroX, market, cash, augur):
expirationTimestampInSec = augur.getTimestamp() + 1
orderAddresses = [localFixture.accounts[0], market.address]
orderValues = [YES, BID, 10, 10, expirationTimestampInSec, 42]
orderHash = zeroX.getOrderHash(orderAddresses, orderValues)
v, r, s = createOrder(orderHash)
fillAmount = 5
# Fail with no Cash deposited
with raises(TransactionFailed):
zeroX.fillOrder(
orderAddresses,
orderValues,
fillAmount,
v,
r,
s,
sender = localFixture.accounts[1])
assert cash.faucet(50)
assert cash.approve(zeroX.address, 50)
assert zeroX.deposit(cash.address, 50)
assert cash.faucet(450, sender=localFixture.accounts[1])
assert cash.approve(zeroX.address, 450, sender=localFixture.accounts[1])
assert zeroX.deposit(cash.address, 450, sender=localFixture.accounts[1])
with PrintGasUsed(localFixture, "FILL_0X"):
assert zeroX.fillOrder(
orderAddresses,
orderValues,
fillAmount,
v,
r,
s,
sender = localFixture.accounts[1])
yesShareAddress = market.getShareToken(YES)
noShareAddress = market.getShareToken(NO)
assert zeroX.getTokenBalance(cash.address, localFixture.accounts[0]) == 0
assert zeroX.getTokenBalance(yesShareAddress, localFixture.accounts[0]) == fillAmount
assert zeroX.getTokenBalance(cash.address, localFixture.accounts[1]) == 0
assert zeroX.getTokenBalance(noShareAddress, localFixture.accounts[1]) == fillAmount
assert zeroX.getUnavailableAmount(orderHash) == fillAmount
def test_fill_order_with_shares(localFixture, zeroX, market, cash, augur):
expirationTimestampInSec = augur.getTimestamp() + 1
orderAddresses = [localFixture.accounts[0], market.address]
orderValues = [YES, ASK, 10, 10, expirationTimestampInSec, 42]
orderHash = zeroX.getOrderHash(orderAddresses, orderValues)
v, r, s = createOrder(orderHash)
fillAmount = 5
# Fail with no shares deposited
with raises(TransactionFailed):
zeroX.fillOrder(
orderAddresses,
orderValues,
fillAmount,
v,
r,
s,
sender = localFixture.accounts[1])
yesShareAddress = market.getShareToken(YES)
noShareAddress = market.getShareToken(NO)
invalidShareAddress = market.getShareToken(0)
yesShareToken = localFixture.applySignature('ShareToken', yesShareAddress)
noShareToken = localFixture.applySignature('ShareToken', noShareAddress)
invalidShareToken = localFixture.applySignature('ShareToken', invalidShareAddress)
completeSets = localFixture.contracts['CompleteSets']
cash.faucet(fix('20', market.getNumTicks()))
assert completeSets.publicBuyCompleteSets(market.address, fix(20))
assert noShareToken.transfer(localFixture.accounts[1], 10)
assert invalidShareToken.transfer(localFixture.accounts[1], 10)
assert yesShareToken.approve(zeroX.address, 10)
assert zeroX.deposit(yesShareAddress, 10)
assert noShareToken.approve(zeroX.address, 10, sender=localFixture.accounts[1])
assert zeroX.deposit(noShareAddress, 10, sender=localFixture.accounts[1])
assert invalidShareToken.approve(zeroX.address, 10, sender=localFixture.accounts[1])
assert zeroX.deposit(invalidShareAddress, 10, sender=localFixture.accounts[1])
with PrintGasUsed(localFixture, "FILL_0X"):
assert zeroX.fillOrder(
orderAddresses,
orderValues,
fillAmount,
v,
r,
s,
sender = localFixture.accounts[1])
yesShareAddress = market.getShareToken(YES)
noShareAddress = market.getShareToken(NO)
assert zeroX.getTokenBalance(cash.address, localFixture.accounts[0]) == 49
assert zeroX.getTokenBalance(yesShareAddress, localFixture.accounts[0]) == 5
assert zeroX.getTokenBalance(cash.address, localFixture.accounts[1]) == 441
assert zeroX.getTokenBalance(noShareAddress, localFixture.accounts[1]) == 5
assert zeroX.getUnavailableAmount(orderHash) == fillAmount
def test_maker_sell_shares_for_tokens(localFixture, zeroX, market, cash, augur):
expirationTimestampInSec = augur.getTimestamp() + 1
orderAddresses = [localFixture.accounts[0], market.address]
orderValues = [YES, ASK, 10, 10, expirationTimestampInSec, 42]
orderHash = zeroX.getOrderHash(orderAddresses, orderValues)
v, r, s = createOrder(orderHash)
fillAmount = 5
# Fail with no shares deposited
with raises(TransactionFailed):
zeroX.fillOrder(
orderAddresses,
orderValues,
fillAmount,
v,
r,
s,
sender = localFixture.accounts[1])
yesShareAddress = market.getShareToken(YES)
noShareAddress = market.getShareToken(NO)
yesShareToken = localFixture.applySignature('ShareToken', yesShareAddress)
completeSets = localFixture.contracts['CompleteSets']
assert cash.faucet(fix('20', market.getNumTicks()))
assert completeSets.publicBuyCompleteSets(market.address, fix(20))
assert yesShareToken.approve(zeroX.address, 10)
assert zeroX.deposit(yesShareAddress, 10)
assert cash.faucet(50, sender=localFixture.accounts[1])
assert cash.approve(zeroX.address, 50, sender=localFixture.accounts[1])
assert zeroX.deposit(cash.address, 50, sender=localFixture.accounts[1])
with PrintGasUsed(localFixture, "FILL_0X"):
assert zeroX.fillOrder(
orderAddresses,
orderValues,
fillAmount,
v,
r,
s,
sender = localFixture.accounts[1])
yesShareAddress = market.getShareToken(YES)
assert zeroX.getTokenBalance(cash.address, localFixture.accounts[0]) == 50
assert zeroX.getTokenBalance(yesShareAddress, localFixture.accounts[0]) == 5
assert zeroX.getTokenBalance(cash.address, localFixture.accounts[1]) == 0
assert zeroX.getTokenBalance(yesShareAddress, localFixture.accounts[1]) == 5
assert zeroX.getUnavailableAmount(orderHash) == fillAmount
def test_maker_buy_shares_for_tokens(localFixture, zeroX, market, cash, augur):
# TODO
pass
def test_cancel_order(localFixture, zeroX, market, cash, augur):
expirationTimestampInSec = augur.getTimestamp() + 1
orderAddresses = [localFixture.accounts[0], market.address]
orderValues = [YES, BID, 10, 10, expirationTimestampInSec, 42]
cancelAmount = 5
assert zeroX.cancelOrder(
orderAddresses,
orderValues,
cancelAmount)
orderHash = zeroX.getOrderHash(orderAddresses, orderValues)
assert zeroX.getUnavailableAmount(orderHash) == cancelAmount
def test_deposit_and_withdraw(localFixture, zeroX, cash):
assert cash.faucet(10)
assert cash.approve(zeroX.address, 10)
assert zeroX.deposit(cash.address, 9)
assert zeroX.getTokenBalance(cash.address, localFixture.accounts[0]) == 9
assert cash.balanceOf(localFixture.accounts[0]) == 1
with raises(TransactionFailed):
zeroX.withdraw(cash.address, 10)
assert zeroX.withdraw(cash.address, 8)
assert zeroX.getTokenBalance(cash.address, localFixture.accounts[0]) == 1
assert cash.balanceOf(localFixture.accounts[0]) == 9
@fixture(scope="session")
def localSnapshot(fixture, kitchenSinkSnapshot):
fixture.resetToSnapshot(kitchenSinkSnapshot)
augur = fixture.contracts["Augur"]
kitchenSinkSnapshot['zeroX'] = fixture.upload('solidity_test_helpers/ZeroX/ZeroXPoC.sol', "zeroX", constructorArgs=[augur.address])
return fixture.createSnapshot()
@fixture
def localFixture(fixture, localSnapshot):
fixture.resetToSnapshot(localSnapshot)
return fixture
@fixture
def zeroX(localFixture, kitchenSinkSnapshot):
return localFixture.applySignature(None, kitchenSinkSnapshot['zeroX'].address, kitchenSinkSnapshot['zeroX'].abi)
@fixture
def market(localFixture, kitchenSinkSnapshot):
return localFixture.applySignature(None, kitchenSinkSnapshot['yesNoMarket'].address, kitchenSinkSnapshot['yesNoMarket'].abi)
@fixture
def cash(localFixture, kitchenSinkSnapshot):
return localFixture.applySignature(None, kitchenSinkSnapshot['cash'].address, kitchenSinkSnapshot['cash'].abi)
@fixture
def augur(localFixture, kitchenSinkSnapshot):
return localFixture.contracts['Augur']
def createOrder(orderHash):
key = normalize_key('0x0000000000000000000000000000000000000000000000000000000000000001')
v, r, s = ecsign(sha3("\x19Ethereum Signed Message:\n32".encode('utf-8') + orderHash), key)
return v, zpad(bytearray_to_bytestr(int_to_32bytearray(r)), 32), zpad(bytearray_to_bytestr(int_to_32bytearray(s)), 32)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.